text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import logging
import os
import pendulum
import unittest
import time
from airflow import configuration, models, settings, AirflowException
from airflow.exceptions import AirflowSkipException
from airflow.jobs import BackfillJob
from airflow.models import DAG, TaskInstance as TI
from airflow.models import State as ST
from airflow.models import DagModel, DagStat
from airflow.models import clear_task_instances
from airflow.models import XCom
from airflow.models import Connection
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.python_operator import ShortCircuitOperator
from airflow.ti_deps.deps.trigger_rule_dep import TriggerRuleDep
from airflow.utils import timezone
from airflow.utils.state import State
from airflow.utils.trigger_rule import TriggerRule
from mock import patch
from parameterized import parameterized
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
class DagTest(unittest.TestCase):
def test_parms_not_passed_is_empty_dict(self):
"""
Test that when 'params' is _not_ passed to a new Dag, that the params
attribute is set to an empty dictionary.
"""
dag = models.DAG('test-dag')
self.assertEqual(dict, type(dag.params))
self.assertEqual(0, len(dag.params))
def test_params_passed_and_params_in_default_args_no_override(self):
"""
Test that when 'params' exists as a key passed to the default_args dict
in addition to params being passed explicitly as an argument to the
dag, that the 'params' key of the default_args dict is merged with the
dict of the params argument.
"""
params1 = {'parameter1': 1}
params2 = {'parameter2': 2}
dag = models.DAG('test-dag',
default_args={'params': params1},
params=params2)
params_combined = params1.copy()
params_combined.update(params2)
self.assertEqual(params_combined, dag.params)
def test_dag_as_context_manager(self):
"""
Test DAG as a context manager.
When used as a context manager, Operators are automatically added to
the DAG (unless they specifiy a different DAG)
"""
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
dag2 = DAG(
'dag2',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner2'})
with dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2', dag=dag2)
self.assertIs(op1.dag, dag)
self.assertEqual(op1.owner, 'owner1')
self.assertIs(op2.dag, dag2)
self.assertEqual(op2.owner, 'owner2')
with dag2:
op3 = DummyOperator(task_id='op3')
self.assertIs(op3.dag, dag2)
self.assertEqual(op3.owner, 'owner2')
with dag:
with dag2:
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
self.assertIs(op4.dag, dag2)
self.assertIs(op5.dag, dag)
self.assertEqual(op4.owner, 'owner2')
self.assertEqual(op5.owner, 'owner1')
with DAG('creating_dag_in_cm', start_date=DEFAULT_DATE) as dag:
DummyOperator(task_id='op6')
self.assertEqual(dag.dag_id, 'creating_dag_in_cm')
self.assertEqual(dag.tasks[0].task_id, 'op6')
def test_dag_topological_sort(self):
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> B
# A -> C -> D
# ordered: B, D, C, A or D, B, C, A or D, C, B, A
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op3 = DummyOperator(task_id='C')
op4 = DummyOperator(task_id='D')
op1.set_upstream([op2, op3])
op3.set_upstream(op4)
topological_list = dag.topological_sort()
logging.info(topological_list)
tasks = [op2, op3, op4]
self.assertTrue(topological_list[0] in tasks)
tasks.remove(topological_list[0])
self.assertTrue(topological_list[1] in tasks)
tasks.remove(topological_list[1])
self.assertTrue(topological_list[2] in tasks)
tasks.remove(topological_list[2])
self.assertTrue(topological_list[3] == op1)
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# C -> (A u B) -> D
# C -> E
# ordered: E | D, A | B, C
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op3 = DummyOperator(task_id='C')
op4 = DummyOperator(task_id='D')
op5 = DummyOperator(task_id='E')
op1.set_downstream(op3)
op2.set_downstream(op3)
op1.set_upstream(op4)
op2.set_upstream(op4)
op5.set_downstream(op3)
topological_list = dag.topological_sort()
logging.info(topological_list)
set1 = [op4, op5]
self.assertTrue(topological_list[0] in set1)
set1.remove(topological_list[0])
set2 = [op1, op2]
set2.extend(set1)
self.assertTrue(topological_list[1] in set2)
set2.remove(topological_list[1])
self.assertTrue(topological_list[2] in set2)
set2.remove(topological_list[2])
self.assertTrue(topological_list[3] in set2)
self.assertTrue(topological_list[4] == op3)
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
self.assertEquals(tuple(), dag.topological_sort())
def test_get_num_task_instances(self):
test_dag_id = 'test_get_num_task_instances_dag'
test_task_id = 'task_1'
test_dag = DAG(dag_id=test_dag_id, start_date=DEFAULT_DATE)
test_task = DummyOperator(task_id=test_task_id, dag=test_dag)
ti1 = TI(task=test_task, execution_date=DEFAULT_DATE)
ti1.state = None
ti2 = TI(task=test_task, execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti2.state = State.RUNNING
ti3 = TI(task=test_task, execution_date=DEFAULT_DATE + datetime.timedelta(days=2))
ti3.state = State.QUEUED
ti4 = TI(task=test_task, execution_date=DEFAULT_DATE + datetime.timedelta(days=3))
ti4.state = State.RUNNING
session = settings.Session()
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.merge(ti4)
session.commit()
self.assertEqual(0, DAG.get_num_task_instances(test_dag_id, ['fakename'],
session=session))
self.assertEqual(4, DAG.get_num_task_instances(test_dag_id, [test_task_id],
session=session))
self.assertEqual(4, DAG.get_num_task_instances(test_dag_id,
['fakename', test_task_id], session=session))
self.assertEqual(1, DAG.get_num_task_instances(test_dag_id, [test_task_id],
states=[None], session=session))
self.assertEqual(2, DAG.get_num_task_instances(test_dag_id, [test_task_id],
states=[State.RUNNING], session=session))
self.assertEqual(3, DAG.get_num_task_instances(test_dag_id, [test_task_id],
states=[None, State.RUNNING], session=session))
self.assertEqual(4, DAG.get_num_task_instances(test_dag_id, [test_task_id],
states=[None, State.QUEUED, State.RUNNING], session=session))
session.close()
def test_render_template_field(self):
"""Tests if render_template from a field works"""
dag = DAG('test-dag',
start_date=DEFAULT_DATE)
with dag:
task = DummyOperator(task_id='op1')
result = task.render_template('', '{{ foo }}', dict(foo='bar'))
self.assertEqual(result, 'bar')
def test_render_template_field_macro(self):
""" Tests if render_template from a field works,
if a custom filter was defined"""
dag = DAG('test-dag',
start_date=DEFAULT_DATE,
user_defined_macros = dict(foo='bar'))
with dag:
task = DummyOperator(task_id='op1')
result = task.render_template('', '{{ foo }}', dict())
self.assertEqual(result, 'bar')
def test_user_defined_filters(self):
def jinja_udf(name):
return 'Hello %s' %name
dag = models.DAG('test-dag',
start_date=DEFAULT_DATE,
user_defined_filters=dict(hello=jinja_udf))
jinja_env = dag.get_template_env()
self.assertIn('hello', jinja_env.filters)
self.assertEqual(jinja_env.filters['hello'], jinja_udf)
def test_render_template_field_filter(self):
""" Tests if render_template from a field works,
if a custom filter was defined"""
def jinja_udf(name):
return 'Hello %s' %name
dag = DAG('test-dag',
start_date=DEFAULT_DATE,
user_defined_filters = dict(hello=jinja_udf))
with dag:
task = DummyOperator(task_id='op1')
result = task.render_template('', "{{ 'world' | hello}}", dict())
self.assertEqual(result, 'Hello world')
class DagStatTest(unittest.TestCase):
def test_dagstats_crud(self):
DagStat.create(dag_id='test_dagstats_crud')
session = settings.Session()
qry = session.query(DagStat).filter(DagStat.dag_id == 'test_dagstats_crud')
self.assertEqual(len(qry.all()), len(State.dag_states))
DagStat.set_dirty(dag_id='test_dagstats_crud')
res = qry.all()
for stat in res:
self.assertTrue(stat.dirty)
# create missing
DagStat.set_dirty(dag_id='test_dagstats_crud_2')
qry2 = session.query(DagStat).filter(DagStat.dag_id == 'test_dagstats_crud_2')
self.assertEqual(len(qry2.all()), len(State.dag_states))
dag = DAG(
'test_dagstats_crud',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='A')
now = timezone.utcnow()
dr = dag.create_dagrun(
run_id='manual__' + now.isoformat(),
execution_date=now,
start_date=now,
state=State.FAILED,
external_trigger=False,
)
DagStat.update(dag_ids=['test_dagstats_crud'])
res = qry.all()
for stat in res:
if stat.state == State.FAILED:
self.assertEqual(stat.count, 1)
else:
self.assertEqual(stat.count, 0)
DagStat.update()
res = qry2.all()
for stat in res:
self.assertFalse(stat.dirty)
class DagRunTest(unittest.TestCase):
def create_dag_run(self, dag, state=State.RUNNING, task_states=None, execution_date=None):
now = timezone.utcnow()
if execution_date is None:
execution_date = now
dag_run = dag.create_dagrun(
run_id='manual__' + now.isoformat(),
execution_date=execution_date,
start_date=now,
state=state,
external_trigger=False,
)
if task_states is not None:
session = settings.Session()
for task_id, state in task_states.items():
ti = dag_run.get_task_instance(task_id)
ti.set_state(state, session)
session.close()
return dag_run
def test_id_for_date(self):
run_id = models.DagRun.id_for_date(
timezone.datetime(2015, 1, 2, 3, 4, 5, 6))
self.assertEqual(
'scheduled__2015-01-02T03:04:05', run_id,
'Generated run_id did not match expectations: {0}'.format(run_id))
def test_dagrun_find(self):
session = settings.Session()
now = timezone.utcnow()
dag_id1 = "test_dagrun_find_externally_triggered"
dag_run = models.DagRun(
dag_id=dag_id1,
run_id='manual__' + now.isoformat(),
execution_date=now,
start_date=now,
state=State.RUNNING,
external_trigger=True,
)
session.add(dag_run)
dag_id2 = "test_dagrun_find_not_externally_triggered"
dag_run = models.DagRun(
dag_id=dag_id2,
run_id='manual__' + now.isoformat(),
execution_date=now,
start_date=now,
state=State.RUNNING,
external_trigger=False,
)
session.add(dag_run)
session.commit()
self.assertEqual(1, len(models.DagRun.find(dag_id=dag_id1, external_trigger=True)))
self.assertEqual(0, len(models.DagRun.find(dag_id=dag_id1, external_trigger=False)))
self.assertEqual(0, len(models.DagRun.find(dag_id=dag_id2, external_trigger=True)))
self.assertEqual(1, len(models.DagRun.find(dag_id=dag_id2, external_trigger=False)))
def test_dagrun_success_when_all_skipped(self):
"""
Tests that a DAG run succeeds when all tasks are skipped
"""
dag = DAG(
dag_id='test_dagrun_success_when_all_skipped',
start_date=timezone.datetime(2017, 1, 1)
)
dag_task1 = ShortCircuitOperator(
task_id='test_short_circuit_false',
dag=dag,
python_callable=lambda: False)
dag_task2 = DummyOperator(
task_id='test_state_skipped1',
dag=dag)
dag_task3 = DummyOperator(
task_id='test_state_skipped2',
dag=dag)
dag_task1.set_downstream(dag_task2)
dag_task2.set_downstream(dag_task3)
initial_task_states = {
'test_short_circuit_false': State.SUCCESS,
'test_state_skipped1': State.SKIPPED,
'test_state_skipped2': State.SKIPPED,
}
dag_run = self.create_dag_run(dag=dag,
state=State.RUNNING,
task_states=initial_task_states)
updated_dag_state = dag_run.update_state()
self.assertEqual(State.SUCCESS, updated_dag_state)
def test_dagrun_success_conditions(self):
session = settings.Session()
dag = DAG(
'test_dagrun_success_conditions',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> B
# A -> C -> D
# ordered: B, D, C, A or D, B, C, A or D, C, B, A
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op3 = DummyOperator(task_id='C')
op4 = DummyOperator(task_id='D')
op1.set_upstream([op2, op3])
op3.set_upstream(op4)
dag.clear()
now = timezone.utcnow()
dr = dag.create_dagrun(run_id='test_dagrun_success_conditions',
state=State.RUNNING,
execution_date=now,
start_date=now)
# op1 = root
ti_op1 = dr.get_task_instance(task_id=op1.task_id)
ti_op1.set_state(state=State.SUCCESS, session=session)
ti_op2 = dr.get_task_instance(task_id=op2.task_id)
ti_op3 = dr.get_task_instance(task_id=op3.task_id)
ti_op4 = dr.get_task_instance(task_id=op4.task_id)
# root is successful, but unfinished tasks
state = dr.update_state()
self.assertEqual(State.RUNNING, state)
# one has failed, but root is successful
ti_op2.set_state(state=State.FAILED, session=session)
ti_op3.set_state(state=State.SUCCESS, session=session)
ti_op4.set_state(state=State.SUCCESS, session=session)
state = dr.update_state()
self.assertEqual(State.SUCCESS, state)
def test_dagrun_deadlock(self):
session = settings.Session()
dag = DAG(
'text_dagrun_deadlock',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op2.trigger_rule = TriggerRule.ONE_FAILED
op2.set_upstream(op1)
dag.clear()
now = timezone.utcnow()
dr = dag.create_dagrun(run_id='test_dagrun_deadlock',
state=State.RUNNING,
execution_date=now,
start_date=now)
ti_op1 = dr.get_task_instance(task_id=op1.task_id)
ti_op1.set_state(state=State.SUCCESS, session=session)
ti_op2 = dr.get_task_instance(task_id=op2.task_id)
ti_op2.set_state(state=State.NONE, session=session)
dr.update_state()
self.assertEqual(dr.state, State.RUNNING)
ti_op2.set_state(state=State.NONE, session=session)
op2.trigger_rule = 'invalid'
dr.update_state()
self.assertEqual(dr.state, State.FAILED)
def test_dagrun_no_deadlock(self):
session = settings.Session()
dag = DAG('test_dagrun_no_deadlock',
start_date=DEFAULT_DATE)
with dag:
op1 = DummyOperator(task_id='dop', depends_on_past=True)
op2 = DummyOperator(task_id='tc', task_concurrency=1)
dag.clear()
dr = dag.create_dagrun(run_id='test_dagrun_no_deadlock_1',
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
dr2 = dag.create_dagrun(run_id='test_dagrun_no_deadlock_2',
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(days=1),
start_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti1_op1 = dr.get_task_instance(task_id='dop')
ti2_op1 = dr2.get_task_instance(task_id='dop')
ti2_op1 = dr.get_task_instance(task_id='tc')
ti2_op2 = dr.get_task_instance(task_id='tc')
ti1_op1.set_state(state=State.RUNNING, session=session)
dr.update_state()
dr2.update_state()
self.assertEqual(dr.state, State.RUNNING)
self.assertEqual(dr2.state, State.RUNNING)
ti2_op1.set_state(state=State.RUNNING, session=session)
dr.update_state()
dr2.update_state()
self.assertEqual(dr.state, State.RUNNING)
self.assertEqual(dr2.state, State.RUNNING)
def test_get_task_instance_on_empty_dagrun(self):
"""
Make sure that a proper value is returned when a dagrun has no task instances
"""
dag = DAG(
dag_id='test_get_task_instance_on_empty_dagrun',
start_date=timezone.datetime(2017, 1, 1)
)
dag_task1 = ShortCircuitOperator(
task_id='test_short_circuit_false',
dag=dag,
python_callable=lambda: False)
session = settings.Session()
now = timezone.utcnow()
# Don't use create_dagrun since it will create the task instances too which we
# don't want
dag_run = models.DagRun(
dag_id=dag.dag_id,
run_id='manual__' + now.isoformat(),
execution_date=now,
start_date=now,
state=State.RUNNING,
external_trigger=False,
)
session.add(dag_run)
session.commit()
ti = dag_run.get_task_instance('test_short_circuit_false')
self.assertEqual(None, ti)
def test_get_latest_runs(self):
session = settings.Session()
dag = DAG(
dag_id='test_latest_runs_1',
start_date=DEFAULT_DATE)
dag_1_run_1 = self.create_dag_run(dag,
execution_date=timezone.datetime(2015, 1, 1))
dag_1_run_2 = self.create_dag_run(dag,
execution_date=timezone.datetime(2015, 1, 2))
dagruns = models.DagRun.get_latest_runs(session)
session.close()
for dagrun in dagruns:
if dagrun.dag_id == 'test_latest_runs_1':
self.assertEqual(dagrun.execution_date, timezone.datetime(2015, 1, 2))
def test_is_backfill(self):
dag = DAG(dag_id='test_is_backfill', start_date=DEFAULT_DATE)
dagrun = self.create_dag_run(dag, execution_date=DEFAULT_DATE)
dagrun.run_id = BackfillJob.ID_PREFIX + '_sfddsffds'
dagrun2 = self.create_dag_run(dag, execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
self.assertTrue(dagrun.is_backfill)
self.assertFalse(dagrun2.is_backfill)
class DagBagTest(unittest.TestCase):
def test_get_existing_dag(self):
"""
test that were're able to parse some example DAGs and retrieve them
"""
dagbag = models.DagBag(include_examples=True)
some_expected_dag_ids = ["example_bash_operator",
"example_branch_operator"]
for dag_id in some_expected_dag_ids:
dag = dagbag.get_dag(dag_id)
self.assertIsNotNone(dag)
self.assertEqual(dag_id, dag.dag_id)
self.assertGreaterEqual(dagbag.size(), 7)
def test_get_non_existing_dag(self):
"""
test that retrieving a non existing dag id returns None without crashing
"""
dagbag = models.DagBag(include_examples=True)
non_existing_dag_id = "non_existing_dag_id"
self.assertIsNone(dagbag.get_dag(non_existing_dag_id))
def test_process_file_that_contains_multi_bytes_char(self):
"""
test that we're able to parse file that contains multi-byte char
"""
from tempfile import NamedTemporaryFile
f = NamedTemporaryFile()
f.write('\u3042'.encode('utf8')) # write multi-byte char (hiragana)
f.flush()
dagbag = models.DagBag(include_examples=True)
self.assertEqual([], dagbag.process_file(f.name))
def test_zip(self):
"""
test the loading of a DAG within a zip file that includes dependencies
"""
dagbag = models.DagBag()
dagbag.process_file(os.path.join(TEST_DAGS_FOLDER, "test_zip.zip"))
self.assertTrue(dagbag.get_dag("test_zip_dag"))
@patch.object(DagModel,'get_current')
def test_get_dag_without_refresh(self, mock_dagmodel):
"""
Test that, once a DAG is loaded, it doesn't get refreshed again if it
hasn't been expired.
"""
dag_id = 'example_bash_operator'
mock_dagmodel.return_value = DagModel()
mock_dagmodel.return_value.last_expired = None
mock_dagmodel.return_value.fileloc = 'foo'
class TestDagBag(models.DagBag):
process_file_calls = 0
def process_file(self, filepath, only_if_updated=True, safe_mode=True):
if 'example_bash_operator.py' == os.path.basename(filepath):
TestDagBag.process_file_calls += 1
super(TestDagBag, self).process_file(filepath, only_if_updated, safe_mode)
dagbag = TestDagBag(include_examples=True)
processed_files = dagbag.process_file_calls
# Should not call process_file agani, since it's already loaded during init.
self.assertEqual(1, dagbag.process_file_calls)
self.assertIsNotNone(dagbag.get_dag(dag_id))
self.assertEqual(1, dagbag.process_file_calls)
def test_get_dag_fileloc(self):
"""
Test that fileloc is correctly set when we load example DAGs,
specifically SubDAGs.
"""
dagbag = models.DagBag(include_examples=True)
expected = {
'example_bash_operator': 'example_bash_operator.py',
'example_subdag_operator': 'example_subdag_operator.py',
'example_subdag_operator.section-1': 'subdags/subdag.py'
}
for dag_id, path in expected.items():
dag = dagbag.get_dag(dag_id)
self.assertTrue(
dag.fileloc.endswith('airflow/example_dags/' + path))
class TaskInstanceTest(unittest.TestCase):
def test_set_task_dates(self):
"""
Test that tasks properly take start/end dates from DAGs
"""
dag = DAG('dag', start_date=DEFAULT_DATE, end_date=DEFAULT_DATE + datetime.timedelta(days=10))
op1 = DummyOperator(task_id='op_1', owner='test')
self.assertTrue(op1.start_date is None and op1.end_date is None)
# dag should assign its dates to op1 because op1 has no dates
dag.add_task(op1)
self.assertTrue(
op1.start_date == dag.start_date and op1.end_date == dag.end_date)
op2 = DummyOperator(
task_id='op_2',
owner='test',
start_date=DEFAULT_DATE - datetime.timedelta(days=1),
end_date=DEFAULT_DATE + datetime.timedelta(days=11))
# dag should assign its dates to op2 because they are more restrictive
dag.add_task(op2)
self.assertTrue(
op2.start_date == dag.start_date and op2.end_date == dag.end_date)
op3 = DummyOperator(
task_id='op_3',
owner='test',
start_date=DEFAULT_DATE + datetime.timedelta(days=1),
end_date=DEFAULT_DATE + datetime.timedelta(days=9))
# op3 should keep its dates because they are more restrictive
dag.add_task(op3)
self.assertTrue(
op3.start_date == DEFAULT_DATE + datetime.timedelta(days=1))
self.assertTrue(
op3.end_date == DEFAULT_DATE + datetime.timedelta(days=9))
def test_set_dag(self):
"""
Test assigning Operators to Dags, including deferred assignment
"""
dag = DAG('dag', start_date=DEFAULT_DATE)
dag2 = DAG('dag2', start_date=DEFAULT_DATE)
op = DummyOperator(task_id='op_1', owner='test')
# no dag assigned
self.assertFalse(op.has_dag())
self.assertRaises(AirflowException, getattr, op, 'dag')
# no improper assignment
with self.assertRaises(TypeError):
op.dag = 1
op.dag = dag
# no reassignment
with self.assertRaises(AirflowException):
op.dag = dag2
# but assigning the same dag is ok
op.dag = dag
self.assertIs(op.dag, dag)
self.assertIn(op, dag.tasks)
def test_infer_dag(self):
dag = DAG('dag', start_date=DEFAULT_DATE)
dag2 = DAG('dag2', start_date=DEFAULT_DATE)
op1 = DummyOperator(task_id='test_op_1', owner='test')
op2 = DummyOperator(task_id='test_op_2', owner='test')
op3 = DummyOperator(task_id='test_op_3', owner='test', dag=dag)
op4 = DummyOperator(task_id='test_op_4', owner='test', dag=dag2)
# double check dags
self.assertEqual(
[i.has_dag() for i in [op1, op2, op3, op4]],
[False, False, True, True])
# can't combine operators with no dags
self.assertRaises(AirflowException, op1.set_downstream, op2)
# op2 should infer dag from op1
op1.dag = dag
op1.set_downstream(op2)
self.assertIs(op2.dag, dag)
# can't assign across multiple DAGs
self.assertRaises(AirflowException, op1.set_downstream, op4)
self.assertRaises(AirflowException, op1.set_downstream, [op3, op4])
def test_bitshift_compose_operators(self):
dag = DAG('dag', start_date=DEFAULT_DATE)
op1 = DummyOperator(task_id='test_op_1', owner='test')
op2 = DummyOperator(task_id='test_op_2', owner='test')
op3 = DummyOperator(task_id='test_op_3', owner='test')
op4 = DummyOperator(task_id='test_op_4', owner='test')
op5 = DummyOperator(task_id='test_op_5', owner='test')
# can't compose operators without dags
with self.assertRaises(AirflowException):
op1 >> op2
dag >> op1 >> op2 << op3
# make sure dag assignment carries through
# using __rrshift__
self.assertIs(op1.dag, dag)
self.assertIs(op2.dag, dag)
self.assertIs(op3.dag, dag)
# op2 should be downstream of both
self.assertIn(op2, op1.downstream_list)
self.assertIn(op2, op3.downstream_list)
# test dag assignment with __rlshift__
dag << op4
self.assertIs(op4.dag, dag)
# dag assignment with __rrshift__
dag >> op5
self.assertIs(op5.dag, dag)
@patch.object(DAG, 'concurrency_reached')
def test_requeue_over_concurrency(self, mock_concurrency_reached):
mock_concurrency_reached.return_value = True
dag = DAG(dag_id='test_requeue_over_concurrency', start_date=DEFAULT_DATE,
max_active_runs=1, concurrency=2)
task = DummyOperator(task_id='test_requeue_over_concurrency_op', dag=dag)
ti = TI(task=task, execution_date=timezone.utcnow())
ti.run()
self.assertEqual(ti.state, models.State.NONE)
@patch.object(TI, 'pool_full')
def test_run_pooling_task(self, mock_pool_full):
"""
test that running task update task state as without running task.
(no dependency check in ti_deps anymore, so also -> SUCCESS)
"""
# Mock the pool out with a full pool because the pool doesn't actually exist
mock_pool_full.return_value = True
dag = models.DAG(dag_id='test_run_pooling_task')
task = DummyOperator(task_id='test_run_pooling_task_op', dag=dag,
pool='test_run_pooling_task_pool', owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
ti = TI(
task=task, execution_date=timezone.utcnow())
ti.run()
self.assertEqual(ti.state, models.State.SUCCESS)
@patch.object(TI, 'pool_full')
def test_run_pooling_task_with_mark_success(self, mock_pool_full):
"""
test that running task with mark_success param update task state as SUCCESS
without running task.
"""
# Mock the pool out with a full pool because the pool doesn't actually exist
mock_pool_full.return_value = True
dag = models.DAG(dag_id='test_run_pooling_task_with_mark_success')
task = DummyOperator(
task_id='test_run_pooling_task_with_mark_success_op',
dag=dag,
pool='test_run_pooling_task_with_mark_success_pool',
owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
ti = TI(
task=task, execution_date=timezone.utcnow())
ti.run(mark_success=True)
self.assertEqual(ti.state, models.State.SUCCESS)
def test_run_pooling_task_with_skip(self):
"""
test that running task which returns AirflowSkipOperator will end
up in a SKIPPED state.
"""
def raise_skip_exception():
raise AirflowSkipException
dag = models.DAG(dag_id='test_run_pooling_task_with_skip')
task = PythonOperator(
task_id='test_run_pooling_task_with_skip',
dag=dag,
python_callable=raise_skip_exception,
owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
ti = TI(
task=task, execution_date=timezone.utcnow())
ti.run()
self.assertEqual(models.State.SKIPPED, ti.state)
def test_retry_delay(self):
"""
Test that retry delays are respected
"""
dag = models.DAG(dag_id='test_retry_handling')
task = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
retry_delay=datetime.timedelta(seconds=3),
dag=dag,
owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
def run_with_error(ti):
try:
ti.run()
except AirflowException:
pass
ti = TI(
task=task, execution_date=timezone.utcnow())
self.assertEqual(ti.try_number, 1)
# first run -- up for retry
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 2)
# second run -- still up for retry because retry_delay hasn't expired
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
# third run -- failed
time.sleep(3)
run_with_error(ti)
self.assertEqual(ti.state, State.FAILED)
@patch.object(TI, 'pool_full')
def test_retry_handling(self, mock_pool_full):
"""
Test that task retries are handled properly
"""
# Mock the pool with a pool with slots open since the pool doesn't actually exist
mock_pool_full.return_value = False
dag = models.DAG(dag_id='test_retry_handling')
task = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
retry_delay=datetime.timedelta(seconds=0),
dag=dag,
owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
def run_with_error(ti):
try:
ti.run()
except AirflowException:
pass
ti = TI(
task=task, execution_date=timezone.utcnow())
self.assertEqual(ti.try_number, 1)
# first run -- up for retry
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti._try_number, 1)
self.assertEqual(ti.try_number, 2)
# second run -- fail
run_with_error(ti)
self.assertEqual(ti.state, State.FAILED)
self.assertEqual(ti._try_number, 2)
self.assertEqual(ti.try_number, 3)
# Clear the TI state since you can't run a task with a FAILED state without
# clearing it first
dag.clear()
# third run -- up for retry
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti._try_number, 3)
self.assertEqual(ti.try_number, 4)
# fourth run -- fail
run_with_error(ti)
ti.refresh_from_db()
self.assertEqual(ti.state, State.FAILED)
self.assertEqual(ti._try_number, 4)
self.assertEqual(ti.try_number, 5)
def test_next_retry_datetime(self):
delay = datetime.timedelta(seconds=30)
max_delay = datetime.timedelta(minutes=60)
dag = models.DAG(dag_id='fail_dag')
task = BashOperator(
task_id='task_with_exp_backoff_and_max_delay',
bash_command='exit 1',
retries=3,
retry_delay=delay,
retry_exponential_backoff=True,
max_retry_delay=max_delay,
dag=dag,
owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
ti = TI(
task=task, execution_date=DEFAULT_DATE)
ti.end_date = pendulum.instance(timezone.utcnow())
dt = ti.next_retry_datetime()
# between 30 * 2^0.5 and 30 * 2^1 (15 and 30)
period = ti.end_date.add(seconds=30) - ti.end_date.add(seconds=15)
self.assertTrue(dt in period)
ti.try_number = 3
dt = ti.next_retry_datetime()
# between 30 * 2^2 and 30 * 2^3 (120 and 240)
period = ti.end_date.add(seconds=240) - ti.end_date.add(seconds=120)
self.assertTrue(dt in period)
ti.try_number = 5
dt = ti.next_retry_datetime()
# between 30 * 2^4 and 30 * 2^5 (480 and 960)
period = ti.end_date.add(seconds=960) - ti.end_date.add(seconds=480)
self.assertTrue(dt in period)
ti.try_number = 9
dt = ti.next_retry_datetime()
self.assertEqual(dt, ti.end_date+max_delay)
ti.try_number = 50
dt = ti.next_retry_datetime()
self.assertEqual(dt, ti.end_date+max_delay)
def test_depends_on_past(self):
dagbag = models.DagBag()
dag = dagbag.get_dag('test_depends_on_past')
dag.clear()
task = dag.tasks[0]
run_date = task.start_date + datetime.timedelta(days=5)
ti = TI(task, run_date)
# depends_on_past prevents the run
task.run(start_date=run_date, end_date=run_date)
ti.refresh_from_db()
self.assertIs(ti.state, None)
# ignore first depends_on_past to allow the run
task.run(
start_date=run_date,
end_date=run_date,
ignore_first_depends_on_past=True)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
# Parameterized tests to check for the correct firing
# of the trigger_rule under various circumstances
# Numeric fields are in order:
# successes, skipped, failed, upstream_failed, done
@parameterized.expand([
#
# Tests for all_success
#
['all_success', 5, 0, 0, 0, 0, True, None, True],
['all_success', 2, 0, 0, 0, 0, True, None, False],
['all_success', 2, 0, 1, 0, 0, True, ST.UPSTREAM_FAILED, False],
['all_success', 2, 1, 0, 0, 0, True, ST.SKIPPED, False],
#
# Tests for one_success
#
['one_success', 5, 0, 0, 0, 5, True, None, True],
['one_success', 2, 0, 0, 0, 2, True, None, True],
['one_success', 2, 0, 1, 0, 3, True, None, True],
['one_success', 2, 1, 0, 0, 3, True, None, True],
#
# Tests for all_failed
#
['all_failed', 5, 0, 0, 0, 5, True, ST.SKIPPED, False],
['all_failed', 0, 0, 5, 0, 5, True, None, True],
['all_failed', 2, 0, 0, 0, 2, True, ST.SKIPPED, False],
['all_failed', 2, 0, 1, 0, 3, True, ST.SKIPPED, False],
['all_failed', 2, 1, 0, 0, 3, True, ST.SKIPPED, False],
#
# Tests for one_failed
#
['one_failed', 5, 0, 0, 0, 0, True, None, False],
['one_failed', 2, 0, 0, 0, 0, True, None, False],
['one_failed', 2, 0, 1, 0, 0, True, None, True],
['one_failed', 2, 1, 0, 0, 3, True, None, False],
['one_failed', 2, 3, 0, 0, 5, True, ST.SKIPPED, False],
#
# Tests for done
#
['all_done', 5, 0, 0, 0, 5, True, None, True],
['all_done', 2, 0, 0, 0, 2, True, None, False],
['all_done', 2, 0, 1, 0, 3, True, None, False],
['all_done', 2, 1, 0, 0, 3, True, None, False]
])
def test_check_task_dependencies(self, trigger_rule, successes, skipped,
failed, upstream_failed, done,
flag_upstream_failed,
expect_state, expect_completed):
start_date = timezone.datetime(2016, 2, 1, 0, 0, 0)
dag = models.DAG('test-dag', start_date=start_date)
downstream = DummyOperator(task_id='downstream',
dag=dag, owner='airflow',
trigger_rule=trigger_rule)
for i in range(5):
task = DummyOperator(task_id='runme_{}'.format(i),
dag=dag, owner='airflow')
task.set_downstream(downstream)
run_date = task.start_date + datetime.timedelta(days=5)
ti = TI(downstream, run_date)
dep_results = TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=successes,
skipped=skipped,
failed=failed,
upstream_failed=upstream_failed,
done=done,
flag_upstream_failed=flag_upstream_failed)
completed = all([dep.passed for dep in dep_results])
self.assertEqual(completed, expect_completed)
self.assertEqual(ti.state, expect_state)
def test_xcom_pull_after_success(self):
"""
tests xcom set/clear relative to a task in a 'success' rerun scenario
"""
key = 'xcom_key'
value = 'xcom_value'
dag = models.DAG(dag_id='test_xcom', schedule_interval='@monthly')
task = DummyOperator(
task_id='test_xcom',
dag=dag,
pool='test_xcom',
owner='airflow',
start_date=timezone.datetime(2016, 6, 2, 0, 0, 0))
exec_date = timezone.utcnow()
ti = TI(
task=task, execution_date=exec_date)
ti.run(mark_success=True)
ti.xcom_push(key=key, value=value)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value)
ti.run()
# The second run and assert is to handle AIRFLOW-131 (don't clear on
# prior success)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value)
# Test AIRFLOW-703: Xcom shouldn't be cleared if the task doesn't
# execute, even if dependencies are ignored
ti.run(ignore_all_deps=True, mark_success=True)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value)
# Xcom IS finally cleared once task has executed
ti.run(ignore_all_deps=True)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), None)
def test_xcom_pull_different_execution_date(self):
"""
tests xcom fetch behavior with different execution dates, using
both xcom_pull with "include_prior_dates" and without
"""
key = 'xcom_key'
value = 'xcom_value'
dag = models.DAG(dag_id='test_xcom', schedule_interval='@monthly')
task = DummyOperator(
task_id='test_xcom',
dag=dag,
pool='test_xcom',
owner='airflow',
start_date=timezone.datetime(2016, 6, 2, 0, 0, 0))
exec_date = timezone.utcnow()
ti = TI(
task=task, execution_date=exec_date)
ti.run(mark_success=True)
ti.xcom_push(key=key, value=value)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value)
ti.run()
exec_date += datetime.timedelta(days=1)
ti = TI(
task=task, execution_date=exec_date)
ti.run()
# We have set a new execution date (and did not pass in
# 'include_prior_dates'which means this task should now have a cleared
# xcom value
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), None)
# We *should* get a value using 'include_prior_dates'
self.assertEqual(ti.xcom_pull(task_ids='test_xcom',
key=key,
include_prior_dates=True),
value)
def test_post_execute_hook(self):
"""
Test that post_execute hook is called with the Operator's result.
The result ('error') will cause an error to be raised and trapped.
"""
class TestError(Exception):
pass
class TestOperator(PythonOperator):
def post_execute(self, context, result):
if result == 'error':
raise TestError('expected error.')
dag = models.DAG(dag_id='test_post_execute_dag')
task = TestOperator(
task_id='test_operator',
dag=dag,
python_callable=lambda: 'error',
owner='airflow',
start_date=timezone.datetime(2017, 2, 1))
ti = TI(task=task, execution_date=timezone.utcnow())
with self.assertRaises(TestError):
ti.run()
def test_check_and_change_state_before_execution(self):
dag = models.DAG(dag_id='test_check_and_change_state_before_execution')
task = DummyOperator(task_id='task', dag=dag, start_date=DEFAULT_DATE)
ti = TI(
task=task, execution_date=timezone.utcnow())
self.assertEqual(ti._try_number, 0)
self.assertTrue(ti._check_and_change_state_before_execution())
# State should be running, and try_number column should be incremented
self.assertEqual(ti.state, State.RUNNING)
self.assertEqual(ti._try_number, 1)
def test_check_and_change_state_before_execution_dep_not_met(self):
dag = models.DAG(dag_id='test_check_and_change_state_before_execution')
task = DummyOperator(task_id='task', dag=dag, start_date=DEFAULT_DATE)
task2= DummyOperator(task_id='task2', dag=dag, start_date=DEFAULT_DATE)
task >> task2
ti = TI(
task=task2, execution_date=timezone.utcnow())
self.assertFalse(ti._check_and_change_state_before_execution())
def test_try_number(self):
"""
Test the try_number accessor behaves in various running states
"""
dag = models.DAG(dag_id='test_check_and_change_state_before_execution')
task = DummyOperator(task_id='task', dag=dag, start_date=DEFAULT_DATE)
ti = TI(task=task, execution_date=timezone.utcnow())
self.assertEqual(1, ti.try_number)
ti.try_number = 2
ti.state = State.RUNNING
self.assertEqual(2, ti.try_number)
ti.state = State.SUCCESS
self.assertEqual(3, ti.try_number)
def test_get_num_running_task_instances(self):
session = settings.Session()
dag = models.DAG(dag_id='test_get_num_running_task_instances')
dag2 = models.DAG(dag_id='test_get_num_running_task_instances_dummy')
task = DummyOperator(task_id='task', dag=dag, start_date=DEFAULT_DATE)
task2 = DummyOperator(task_id='task', dag=dag2, start_date=DEFAULT_DATE)
ti1 = TI(task=task, execution_date=DEFAULT_DATE)
ti2 = TI(task=task, execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti3 = TI(task=task2, execution_date=DEFAULT_DATE)
ti1.state = State.RUNNING
ti2.state = State.QUEUED
ti3.state = State.RUNNING
session.add(ti1)
session.add(ti2)
session.add(ti3)
session.commit()
self.assertEquals(1, ti1.get_num_running_task_instances(session=session))
self.assertEquals(1, ti2.get_num_running_task_instances(session=session))
self.assertEquals(1, ti3.get_num_running_task_instances(session=session))
class ClearTasksTest(unittest.TestCase):
def test_clear_task_instances(self):
dag = DAG('test_clear_task_instances', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
task0 = DummyOperator(task_id='0', owner='test', dag=dag)
task1 = DummyOperator(task_id='1', owner='test', dag=dag, retries=2)
ti0 = TI(task=task0, execution_date=DEFAULT_DATE)
ti1 = TI(task=task1, execution_date=DEFAULT_DATE)
ti0.run()
ti1.run()
session = settings.Session()
qry = session.query(TI).filter(
TI.dag_id == dag.dag_id).all()
clear_task_instances(qry, session, dag=dag)
session.commit()
ti0.refresh_from_db()
ti1.refresh_from_db()
# Next try to run will be try 2
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.max_tries, 1)
self.assertEqual(ti1.try_number, 2)
self.assertEqual(ti1.max_tries, 3)
def test_clear_task_instances_without_task(self):
dag = DAG('test_clear_task_instances_without_task', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
task0 = DummyOperator(task_id='task0', owner='test', dag=dag)
task1 = DummyOperator(task_id='task1', owner='test', dag=dag, retries=2)
ti0 = TI(task=task0, execution_date=DEFAULT_DATE)
ti1 = TI(task=task1, execution_date=DEFAULT_DATE)
ti0.run()
ti1.run()
# Remove the task from dag.
dag.task_dict = {}
self.assertFalse(dag.has_task(task0.task_id))
self.assertFalse(dag.has_task(task1.task_id))
session = settings.Session()
qry = session.query(TI).filter(
TI.dag_id == dag.dag_id).all()
clear_task_instances(qry, session)
session.commit()
# When dag is None, max_tries will be maximum of original max_tries or try_number.
ti0.refresh_from_db()
ti1.refresh_from_db()
# Next try to run will be try 2
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.max_tries, 1)
self.assertEqual(ti1.try_number, 2)
self.assertEqual(ti1.max_tries, 2)
def test_clear_task_instances_without_dag(self):
dag = DAG('test_clear_task_instances_without_dag', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
task0 = DummyOperator(task_id='task_0', owner='test', dag=dag)
task1 = DummyOperator(task_id='task_1', owner='test', dag=dag, retries=2)
ti0 = TI(task=task0, execution_date=DEFAULT_DATE)
ti1 = TI(task=task1, execution_date=DEFAULT_DATE)
ti0.run()
ti1.run()
session = settings.Session()
qry = session.query(TI).filter(
TI.dag_id == dag.dag_id).all()
clear_task_instances(qry, session)
session.commit()
# When dag is None, max_tries will be maximum of original max_tries or try_number.
ti0.refresh_from_db()
ti1.refresh_from_db()
# Next try to run will be try 2
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.max_tries, 1)
self.assertEqual(ti1.try_number, 2)
self.assertEqual(ti1.max_tries, 2)
def test_dag_clear(self):
dag = DAG('test_dag_clear', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
task0 = DummyOperator(task_id='test_dag_clear_task_0', owner='test', dag=dag)
ti0 = TI(task=task0, execution_date=DEFAULT_DATE)
# Next try to run will be try 1
self.assertEqual(ti0.try_number, 1)
ti0.run()
self.assertEqual(ti0.try_number, 2)
dag.clear()
ti0.refresh_from_db()
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.state, State.NONE)
self.assertEqual(ti0.max_tries, 1)
task1 = DummyOperator(task_id='test_dag_clear_task_1', owner='test',
dag=dag, retries=2)
ti1 = TI(task=task1, execution_date=DEFAULT_DATE)
self.assertEqual(ti1.max_tries, 2)
ti1.try_number = 1
# Next try will be 2
ti1.run()
self.assertEqual(ti1.try_number, 3)
self.assertEqual(ti1.max_tries, 2)
dag.clear()
ti0.refresh_from_db()
ti1.refresh_from_db()
# after clear dag, ti2 should show attempt 3 of 5
self.assertEqual(ti1.max_tries, 4)
self.assertEqual(ti1.try_number, 3)
# after clear dag, ti1 should show attempt 2 of 2
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.max_tries, 1)
def test_dags_clear(self):
# setup
session = settings.Session()
dags, tis = [], []
num_of_dags = 5
for i in range(num_of_dags):
dag = DAG('test_dag_clear_' + str(i), start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
ti = TI(task=DummyOperator(task_id='test_task_clear_' + str(i), owner='test', dag=dag),
execution_date=DEFAULT_DATE)
dags.append(dag)
tis.append(ti)
# test clear all dags
for i in range(num_of_dags):
tis[i].run()
self.assertEqual(tis[i].state, State.SUCCESS)
self.assertEqual(tis[i].try_number, 2)
self.assertEqual(tis[i].max_tries, 0)
DAG.clear_dags(dags)
for i in range(num_of_dags):
tis[i].refresh_from_db()
self.assertEqual(tis[i].state, State.NONE)
self.assertEqual(tis[i].try_number, 2)
self.assertEqual(tis[i].max_tries, 1)
# test dry_run
for i in range(num_of_dags):
tis[i].run()
self.assertEqual(tis[i].state, State.SUCCESS)
self.assertEqual(tis[i].try_number, 3)
self.assertEqual(tis[i].max_tries, 1)
DAG.clear_dags(dags, dry_run=True)
for i in range(num_of_dags):
tis[i].refresh_from_db()
self.assertEqual(tis[i].state, State.SUCCESS)
self.assertEqual(tis[i].try_number, 3)
self.assertEqual(tis[i].max_tries, 1)
# test only_failed
from random import randint
failed_dag_idx = randint(0, len(tis) - 1)
tis[failed_dag_idx].state = State.FAILED
session.merge(tis[failed_dag_idx])
session.commit()
DAG.clear_dags(dags, only_failed=True)
for i in range(num_of_dags):
tis[i].refresh_from_db()
if i != failed_dag_idx:
self.assertEqual(tis[i].state, State.SUCCESS)
self.assertEqual(tis[i].try_number, 3)
self.assertEqual(tis[i].max_tries, 1)
else:
self.assertEqual(tis[i].state, State.NONE)
self.assertEqual(tis[i].try_number, 3)
self.assertEqual(tis[i].max_tries, 2)
def test_operator_clear(self):
dag = DAG('test_operator_clear', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
t1 = DummyOperator(task_id='bash_op', owner='test', dag=dag)
t2 = DummyOperator(task_id='dummy_op', owner='test', dag=dag, retries=1)
t2.set_upstream(t1)
ti1 = TI(task=t1, execution_date=DEFAULT_DATE)
ti2 = TI(task=t2, execution_date=DEFAULT_DATE)
ti2.run()
# Dependency not met
self.assertEqual(ti2.try_number, 1)
self.assertEqual(ti2.max_tries, 1)
t2.clear(upstream=True)
ti1.run()
ti2.run()
self.assertEqual(ti1.try_number, 2)
# max_tries is 0 because there is no task instance in db for ti1
# so clear won't change the max_tries.
self.assertEqual(ti1.max_tries, 0)
self.assertEqual(ti2.try_number, 2)
# try_number (0) + retries(1)
self.assertEqual(ti2.max_tries, 1)
def test_xcom_disable_pickle_type(self):
json_obj = {"key": "value"}
execution_date = timezone.utcnow()
key = "xcom_test1"
dag_id = "test_dag1"
task_id = "test_task1"
XCom.set(key=key,
value=json_obj,
dag_id=dag_id,
task_id=task_id,
execution_date=execution_date,
enable_pickling=False)
ret_value = XCom.get_one(key=key,
dag_id=dag_id,
task_id=task_id,
execution_date=execution_date,
enable_pickling=False)
self.assertEqual(ret_value, json_obj)
def test_xcom_enable_pickle_type(self):
json_obj = {"key": "value"}
execution_date = timezone.utcnow()
key = "xcom_test2"
dag_id = "test_dag2"
task_id = "test_task2"
XCom.set(key=key,
value=json_obj,
dag_id=dag_id,
task_id=task_id,
execution_date=execution_date,
enable_pickling=True)
ret_value = XCom.get_one(key=key,
dag_id=dag_id,
task_id=task_id,
execution_date=execution_date,
enable_pickling=True)
self.assertEqual(ret_value, json_obj)
def test_xcom_disable_pickle_type_fail_on_non_json(self):
class PickleRce(object):
def __reduce__(self):
return (os.system, ("ls -alt",))
self.assertRaises(TypeError, XCom.set,
key="xcom_test3",
value=PickleRce(),
dag_id="test_dag3",
task_id="test_task3",
execution_date=timezone.utcnow(),
enable_pickling=False)
def test_xcom_get_many(self):
json_obj = {"key": "value"}
execution_date = timezone.utcnow()
key = "xcom_test4"
dag_id1 = "test_dag4"
task_id1 = "test_task4"
dag_id2 = "test_dag5"
task_id2 = "test_task5"
XCom.set(key=key,
value=json_obj,
dag_id=dag_id1,
task_id=task_id1,
execution_date=execution_date,
enable_pickling=True)
XCom.set(key=key,
value=json_obj,
dag_id=dag_id2,
task_id=task_id2,
execution_date=execution_date,
enable_pickling=True)
results = XCom.get_many(key=key,
execution_date=execution_date,
enable_pickling=True)
for result in results:
self.assertEqual(result.value, json_obj)
class ConnectionTest(unittest.TestCase):
@patch.object(configuration, 'get')
def test_connection_extra_no_encryption(self, mock_get):
"""
Tests extras on a new connection without encryption. The fernet key
is set to a non-base64-encoded string and the extra is stored without
encryption.
"""
mock_get.return_value = 'cryptography_not_found_storing_passwords_in_plain_text'
test_connection = Connection(extra='testextra')
self.assertEqual(test_connection.extra, 'testextra')
@patch.object(configuration, 'get')
def test_connection_extra_with_encryption(self, mock_get):
"""
Tests extras on a new connection with encryption. The fernet key
is set to a base64 encoded string and the extra is encrypted.
"""
# 'dGVzdA==' is base64 encoded 'test'
mock_get.return_value = 'dGVzdA=='
test_connection = Connection(extra='testextra')
self.assertEqual(test_connection.extra, 'testextra')
|
{
"content_hash": "ca276bc34074704d1b92b5f810176216",
"timestamp": "",
"source": "github",
"line_count": 1585,
"max_line_length": 102,
"avg_line_length": 37.01829652996845,
"alnum_prop": 0.578177727784027,
"repo_name": "KL-WLCR/incubator-airflow",
"id": "3bab3cf043ab3538d70d1136cc74bcad1e6aedbf",
"size": "59241",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57054"
},
{
"name": "HTML",
"bytes": "152247"
},
{
"name": "JavaScript",
"bytes": "1364571"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "2769143"
},
{
"name": "Shell",
"bytes": "28198"
}
],
"symlink_target": ""
}
|
'output dimensionalities for each column'
import csv
import sys
import re
import math
from collections import defaultdict
def get_words( text ):
text = text.replace( "'", "" )
text = re.sub( r'\W+', ' ', text )
text = text.lower()
text = text.split()
words = []
for w in text:
if w in words:
continue
words.append( w )
return words
###
csv.field_size_limit( 1000000 )
input_file = sys.argv[1]
target_col = 'SalaryNormalized'
cols2tokenize = [ 'Title', 'FullDescription' ]
cols2binarize = [ 'Loc1', 'Loc2', 'Loc3', 'Loc4', 'ContractType', 'ContractTime', 'Company', 'Category', 'SourceName' ]
cols2drop = [ 'SalaryRaw' ]
###
i_f = open( input_file )
reader = csv.reader( i_f )
headers = reader.next()
target_index = headers.index( target_col )
indexes2tokenize = map( lambda x: headers.index( x ), cols2tokenize )
indexes2binarize = map( lambda x: headers.index( x ), cols2binarize )
indexes2drop = map( lambda x: headers.index( x ), cols2drop )
n = 0
unique_values = defaultdict( set )
for line in reader:
for i in indexes2binarize:
value = line[i]
unique_values[i].add( value )
for i in indexes2tokenize:
words = get_words( line[i] )
unique_values[i].update( words )
n += 1
if n % 10000 == 0:
print n
# print counts
for i in sorted( unique_values ):
l = len( unique_values[i] )
print "index: %s, count: %s" % ( i, l )
if l < 100:
pass
# print unique_values[i]
|
{
"content_hash": "59ae33a242b3afd8ffc94055c1e75d63",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 119,
"avg_line_length": 17.481927710843372,
"alnum_prop": 0.6409372846312887,
"repo_name": "LiaoPan/kaggle-advertised-salaries",
"id": "b67047752da8d3b14f77c0bcfc653450d381433c",
"size": "1451",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "optional/cols_dimensionality.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21615"
},
{
"name": "R",
"bytes": "229"
},
{
"name": "Shell",
"bytes": "207"
}
],
"symlink_target": ""
}
|
"""
Implementation of the DNS protocol for use in Pants.
"""
###############################################################################
# Imports
###############################################################################
import collections
import itertools
import os
import random
import socket
import struct
import sys
import time
from pants.engine import Engine
from pants.stream import Stream
from pants.datagram import Datagram
###############################################################################
# Logging
###############################################################################
import logging
log = logging.getLogger(__name__)
###############################################################################
# Constants
###############################################################################
# Return Values
DNS_TIMEOUT = -1
DNS_OK = 0
DNS_FORMATERROR = 1
DNS_SERVERFAILURE = 2
DNS_NAMEERROR = 3
DNS_NOTIMPLEMENTED = 4
DNS_REFUSED = 5
# DNS Listening Port
DNS_PORT = 53
# Query Types
(A, NS, MD, MF, CNAME, SOA, MB, MG, MR, NULL, WKS, PTR, HINFO, MINFO, MX, TXT,
RP, AFSDB, X25, ISDN, RT, NSAP, NSAP_PTR, SIG, KEY, PX, GPOS, AAAA, LOC, NXT,
EID, NIMLOC, SRV, ATMA, NAPTR, KX, CERT, A6, DNAME, SINK, OPT, APL, DS, SSHFP,
IPSECKEY, RRSIG, NSEC, DNSKEY, DHCID, NSEC3, NSEC3PARAM) = range(1,52)
QTYPES = "A, NS, MD, MF, CNAME, SOA, MB, MG, MR, NULL, WKS, PTR, HINFO, MINFO, MX, TXT, RP, AFSDB, X25, ISDN, RT, NSAP, NSAP_PTR, SIG, KEY, PX, GPOS, AAAA, LOC, NXT, EID, NIMLOC, SRV, ATMA, NAPTR, KX, CERT, A6, DNAME, SINK, OPT, APL, DS, SSHFP, IPSECKEY, RRSIG, NSEC, DNSKEY, DHCID, NSEC3, NSEC3PARAM".split(', ')
# OPCODEs
OP_QUERY = 0
OP_IQUERY = 1
OP_STATUS = 2
# Query Classes
IN = 1
# Default Servers
DEFAULT_SERVERS = [
'127.0.0.1',
'8.8.8.8'
]
# Internal Exception
class TooShortError(ValueError):
pass
# RDATA Declarations
RDATA_TYPES = {
A: (('address', 'ipv4'), ),
NS: 'name',
MD: 'name',
MF: 'name',
CNAME: 'name',
SOA: (('mname', 'name'), ('rname', 'name'), ('serial|refresh|retry|expire|minimum', '!LlllL')),
MB: 'name',
MG: 'name',
MR: 'name',
NULL: 'str',
WKS: (('address', 'ipv4'), ('protocol', '!B'), ('map', 'str')),
PTR: 'name',
HINFO: (('cpu', 'lstr'), ('os', 'lstr')),
MINFO: (('rmailbx', 'name'), ('emailbx', 'name')),
MX: (('preference', '!H'), ('name', 'name')),
TXT: 'strs',
RP: (('mbox', 'name'), ('txt', 'name')),
AAAA: (('address', 'ipv6'), ),
SRV: (('priority|weight|port', '!3H'), ('target', 'name')),
DNAME: 'name',
DNSKEY: (('flags|protocol|algorithm', '!H2B'), ('key', 'str')),
}
RDATA_TUPLES = {}
for k,v in RDATA_TYPES.iteritems():
# Get the Name.
nm = '%s_Record' % QTYPES[k-1]
if v == 'strs':
continue
elif v == 'str':
RDATA_TUPLES[k] = collections.namedtuple(nm, ['value'])
continue
elif v == 'name':
RDATA_TUPLES[k] = collections.namedtuple(nm, ['name'])
continue
keys = []
for fn, ft in v:
if '|' in fn:
keys.extend(fn.split('|'))
else:
keys.append(fn)
RDATA_TUPLES[k] = collections.namedtuple(nm, keys)
###############################################################################
# OS-Specific DNS Server Listing and hosts Code
###############################################################################
if os.name == 'nt':
from ctypes import c_int, c_void_p, POINTER, windll, wintypes, \
create_string_buffer, c_char, c_char_p, c_size_t
DWORD = wintypes.DWORD
LPCWSTR = wintypes.LPCWSTR
DNS_CONFIG_DNS_SERVER_LIST = 6
DnsQueryConfig = windll.dnsapi.DnsQueryConfig
DnsQueryConfig.argtypes = [
c_int, # __in DNS_CONFIG_TYPE Config,
DWORD, # __in DWORD Flag,
LPCWSTR, # __in_opt PCWSTR pwsAdapterName,
c_void_p, # __in_opt PVOID pReserved,
POINTER(c_char), # __out PVOID pBuffer,
POINTER(DWORD), # __inout PDWORD pBufferLength
]
def list_dns_servers():
# First, figure out how much data we need.
needed = DWORD(0)
result = DnsQueryConfig(DNS_CONFIG_DNS_SERVER_LIST,
0, None, None, None, needed)
if result == 0:
if needed.value == 0:
# No results, apparently.
return DEFAULT_SERVERS[:]
else:
result = 234
if result != 234:
raise Exception("Unexpected result calling DnsQueryConfig, %d." % result)
# Now, call it.
buf = create_string_buffer(needed.value)
result = DnsQueryConfig(DNS_CONFIG_DNS_SERVER_LIST,
0, None, None, buf, needed)
if result == 234:
# Set the number of IPs to the space we have.
ips = (needed.value - 4) / 4
else:
# Some kind of magic.
ips = struct.unpack('I',buf[0:4])[0]
# Do crazy stuff.
out = []
for i in xrange(ips):
start = (i+1) * 4
out.append(socket.inet_ntoa(buf[start:start+4]))
out.extend(DEFAULT_SERVERS)
return out
# Additional Functions
if not hasattr(socket, 'inet_pton') and hasattr(windll, 'ws2_32') and hasattr(windll.ws2_32, 'inet_pton'):
_inet_pton = windll.ws2_32.inet_pton
_inet_pton.argtypes = [
c_int, # __in INT Family,
c_char_p, # __in PCTSTR pszAddrString,
POINTER(c_char), # __out PVOID pAddrBuf
]
def inet_pton(address_family, ip_string):
"""
Convert an IP address from its family-specific string format to a
packed, binary format. inet_pton() is useful when a library or
network protocol calls for an object of type ``struct in_addr`` or
``struct in6_addr``.
=============== ============
Argument Description
=============== ============
address_family Supported values are ``socket.AF_INET`` and ``socket.AF_INET6``.
ip_string The IP address to pack.
=============== ============
"""
if not address_family in (socket.AF_INET, socket.AF_INET6):
raise socket.error(97, os.strerror(97))
if address_family == socket.AF_INET:
bytes = 5
else:
bytes = 17
buf = create_string_buffer(bytes)
result = _inet_pton(address_family, ip_string, buf)
if result == 0:
raise socket.error("illegal IP address string passed to inet_pton")
elif result != 1:
raise socket.error("unknown error calling inet_pton")
return buf.raw[:bytes-1]
socket.inet_pton = inet_pton
if not hasattr(socket, 'inet_ntop') and hasattr(windll, 'ws2_32') and hasattr(windll.ws2_32, 'inet_ntop'):
_inet_ntop = windll.ws2_32.inet_ntop
_inet_ntop.argtypes = [
c_int, # __in INT Family,
POINTER(c_char), # __in PVOID pAddr,
c_char_p, # __out PTSTR pStringBuf,
c_size_t, # __in size_t StringBufSize
]
def inet_ntop(address_family, packed_ip):
"""
Convert a packed IP address (a string of some number of characters)
to its standard, family-specific string representation (for
example, ``'7.10.0.5`` or ``5aef:2b::8``). inet_ntop() is useful
when a library or network protocol returns an object of type
``struct in_addr`` or ``struct in6_addr``.
=============== ============
Argument Description
=============== ============
address_family Supported values are ``socket.AF_INET`` and ``socket.AF_INET6``.
packed_ip The IP address to unpack.
=============== ============
"""
if not address_family in (socket.AF_INET, socket.AF_INET6):
raise socket.error(97, os.strerror(97))
if address_family == socket.AF_INET:
bytes = 17
else:
bytes = 47
buf = create_string_buffer(bytes)
result = _inet_ntop(address_family, packed_ip, buf, bytes)
if not result:
raise socket.error("unknown error calling inet_ntop")
return buf.value
socket.inet_ntop = inet_ntop
host_path = os.path.join(os.path.expandvars("%SystemRoot%"), "system32", "drivers", "etc", "hosts")
else:
# *nix is way easier. Parse resolve.conf.
def list_dns_servers():
out = []
try:
with open('/etc/resolv.conf','r') as f:
for l in f.readlines():
if l.startswith('nameserver '):
out.append(l[11:].strip())
except IOError:
pass
out.extend(DEFAULT_SERVERS)
return out
host_path = "/etc/hosts"
###############################################################################
# Hosts
###############################################################################
hosts = {A: {}, AAAA: {}}
host_m = None
host_time = None
def load_hosts():
global host_m
global host_time
host_time = time.time()
try:
stat = os.stat(host_path)
if hosts and host_m is not None:
if host_m == (stat.st_mtime, stat.st_size):
return
hosts[A].clear()
hosts[AAAA].clear()
with open(host_path, 'r') as f:
for l in f.readlines():
l = l.strip().split(None, 1)
if len(l) < 2 or l[0].startswith('#') or not all(l):
continue
ip = l[0].strip()
host = [x.strip() for x in l[1].split()]
try:
socket.inet_aton(ip)
for h in host:
hosts[A][h] = ip
except socket.error:
if hasattr(socket, 'inet_pton'):
try:
socket.inet_pton(socket.AF_INET6, ip)
for h in host:
hosts[AAAA][h] = ip
except socket.error:
continue
host_m = (stat.st_mtime, stat.st_size)
except (OSError, ValueError):
pass
if not 'localhost' in hosts[A]:
hosts[A]['localhost'] = '127.0.0.1'
if not 'localhost' in hosts[AAAA]:
hosts[AAAA]['localhost'] = '::1'
load_hosts()
###############################################################################
# DNSMessage Class
###############################################################################
class DNSMessage(object):
"""
This class stores all the information used in a DNS message, and can either
generate valid messages to be sent to a server or read messages from a
server.
To convert an instance of DNSMessage into a byte string for sending to the
server, simply use str() on it. To read a message from the server into an
instance of DNSMessage, use DNSMessage.from_string().
"""
__slots__ = ('id','qr','opcode','aa','tc','rd','ra','rcode','server',
'questions','answers','authrecords','additional')
def __init__(self, id=None, qr=False, opcode=OP_QUERY, aa=False, tc=False,
rd=True, ra=True, rcode=DNS_OK):
self.id = id
self.qr = qr
self.opcode = opcode
self.aa = aa
self.tc = tc
self.rd = rd
self.ra = ra
self.rcode = rcode
self.server = None
self.questions = []
self.answers = []
self.authrecords = []
self.additional = []
def __str__(self):
return self.to_string()
def to_string(self, limit=None):
"""
Render the DNSMessage as a string of bytes that can be sent to a DNS
server. If a *limit* is specified and the length of the string exceeds
that limit, the truncated byte will automatically be set to True.
========= ======== ============
Argument Default Description
========= ======== ============
limit None *Optional.* The maximum size of the message to generate, in bytes.
========= ======== ============
"""
out = ""
## Body
for q in self.questions:
qname, qtype, qclass = q
for part in qname.split('.'):
out += chr(len(part)) + part
out += '\x00' + struct.pack('!2H', qtype, qclass)
for q in itertools.chain(self.answers, self.authrecords, self.additional):
name, typ, clss, ttl, rdata = q
for part in name.split('.'):
out += chr(len(part)) + part
out += '\x00%s%s' % (
struct.pack('!2HIH', typ, clss, ttl, len(rdata)),
rdata
)
## Header
if limit:
tc = len(out) + 12 > limit
out = out[:(limit-12)]
else:
tc = self.tc
byte3 = (self.qr << 7) | (self.opcode << 3) | (self.aa << 2) | \
(tc << 1) | self.rd
byte4 = (self.ra << 7) | self.rcode
hdr = struct.pack('!H2B4H', self.id, byte3, byte4, len(self.questions),
len(self.answers), len(self.authrecords), len(self.additional))
return hdr + out
@classmethod
def from_string(cls, data):
"""
Create a DNSMessage instance containing the provided data in a usable
format.
========= ============
Argument Description
========= ============
data The data to parse into a DNSMessage instance.
========= ============
"""
if len(data) < 12:
raise TooShortError
self = cls()
full_data = data
self.id, byte3, byte4, qdcount, ancount, nscount, arcount = \
struct.unpack('!H2B4H', data[:12])
self.qr = bool(byte3 >> 7)
self.opcode = (byte3 & 120) >> 3
self.aa = bool((byte3 & 4) >> 2)
self.tc = bool((byte3 & 2) >> 1)
self.rd = bool(byte3 & 1)
self.ra = bool(byte4 >> 7)
self.rcode = byte4 & 15
data = data[12:]
try:
for i in xrange(qdcount):
qname, qtype, qclass, bytes = readQuery(data, full_data)
data = data[bytes:]
self.questions.append((qname, qtype, qclass))
for i in xrange(ancount):
name, typ, clss, ttl, rdata, bytes = readAnswer(data, full_data)
data = data[bytes:]
self.answers.append((name, typ, clss, ttl, rdata))
for i in xrange(nscount):
name, typ, clss, ttl, rdata, bytes = readAnswer(data, full_data)
data = data[bytes:]
self.authrecords.append((name, typ, clss, ttl, rdata))
for i in xrange(arcount):
name, typ, clss, ttl, rdata, bytes = readAnswer(data, full_data)
data = data[bytes:]
self.additional.append((name, typ, clss, ttl, rdata))
except TooShortError:
if not self.tc:
raise
return self
###############################################################################
# Message Reading Functions
###############################################################################
def readName(data, full_data=None):
"""
Read a QNAME from the bytes of a DNS message.
"""
if not data:
raise TooShortError
orig = len(data)
name = None
while True:
if not data:
raise TooShortError
l = ord(data[0])
if full_data and l & 0xC0 == 0xC0:
offset, = struct.unpack('!H', data[:2])
offset ^= 0xC000
if name:
name += '.%s' % readName(full_data[offset:], full_data)[0]
else:
name = readName(full_data[offset:], full_data)[0]
data = data[2:]
break
elif l == 0:
data = data[1:]
break
if len(data) < 1 + l:
raise TooShortError
if name:
name += '.%s' % data[1:l+1]
else:
name = data[1:l+1]
data = data[1+l:]
return name, orig - len(data)
def readAnswer(data, full_data):
"""
Read an answer (or similarly formatted record) from a DNS message.
"""
if not data:
raise TooShortError
orig = len(data)
name, bytes = readName(data, full_data)
data = data[bytes:]
if len(data) < 10:
raise TooShortError
typ, clss, ttl, rdlength = struct.unpack('!2HIH', data[:10])
data = data[10:]
if not data or len(data) < rdlength:
raise TooShortError
rdata = readRDATA(data[:rdlength], full_data, typ)
data = data[rdlength:]
return name, typ, clss, ttl, rdata, orig - len(data)
def readQuery(data, full_data):
"""
Read a query from a DNS message.
"""
if not data:
raise TooShortError
orig = len(data)
qname, bytes = readName(data, full_data)
data = data[bytes:]
if len(data) < 4:
raise TooShortError
qtype, qclass = struct.unpack('!2H', data[:4])
return qname, qtype, qclass, (orig - len(data)) + 4
def readRDATA(data, full_data, qtype):
"""
Read RDATA for a given QTYPE into an easy-to-use namedtuple.
"""
if not qtype in RDATA_TYPES:
return data
format = RDATA_TYPES[qtype]
# Special cast for TXT.
if format == 'strs':
values = []
while data:
l = ord(data[0])
values.append(data[1:1+l:])
data = data[1+l:]
return tuple(values)
tup = RDATA_TUPLES[qtype]
if format == 'name':
return tup(readName(data, full_data)[0])
values = []
for fn, ft in format:
if ft == 'ipv4':
values.append(socket.inet_ntoa(data[:4]))
data = data[4:]
elif ft == 'ipv6':
if hasattr(socket, 'inet_ntop'):
values.append(socket.inet_ntop(socket.AF_INET6, data[:16]))
else:
values.append(data[:16])
data = data[16:]
elif ft == 'lstr':
l = ord(data[0])
values.append(data[1:1+l])
data = data[1+l:]
elif ft == 'name':
v, bytes = readName(data, full_data)
data = data[bytes:]
values.append(v)
elif ft == 'str':
values.append(data)
data = ''
else:
sz = struct.calcsize(ft)
values.extend(struct.unpack(ft, data[:sz]))
data = data[sz:]
return tup(*values)
###############################################################################
# _DNSStream Class
###############################################################################
class _DNSStream(Stream):
"""
A subclass of Stream that makes things way easier inside Resolver.
"""
def __init__(self, resolver, id, **kwargs):
Stream.__init__(self, **kwargs)
self.resolver = resolver
self.id = id
self.response = ''
def on_connect(self):
if not self.id in self.resolver._messages:
if self.id in self.resolver._tcp:
del self.resolver._tcp[self.id]
self.close()
return
message = str(self.resolver._messages[self.id][1])
self._wait_for_write_event = True
self.write(message)
def on_read(self, data):
if not self.id in self.resolver._messages:
if self.id in self.resolver._tcp:
del self.resolver._tcp[self.id]
self.close()
return
self.response += data
try:
m = DNSMessage.from_string(self.response)
except TooShortError:
return
if self.remote_address and isinstance(self.remote_address, tuple):
m.server = '%s:%d' % self.remote_address
if self.id in self.resolver._tcp:
del self.resolver._tcp[self.id]
self.close()
self.resolver.receive_message(m)
###############################################################################
# Resolver Class
###############################################################################
class Resolver(object):
"""
The Resolver class generates DNS messages, sends them to remote servers,
and processes any responses. The bulk of the heavy lifting is done in
DNSMessage and the RDATA handling functions, however.
========= ============
Argument Description
========= ============
servers *Optional.* A list of DNS servers to query. If a list isn't provided, Pants will attempt to retrieve a list of servers from the OS, falling back to a list of default servers if none are available.
engine *Optional.* The :class:`pants.engine.Engine` instance to use.
========= ============
"""
def __init__(self, servers=None, engine=None):
self.servers = servers or list_dns_servers()
self.engine = engine or Engine.instance()
# Internal State
self._messages = {}
self._cache = {}
self._queries = {}
self._tcp = {}
self._udp = None
self._last_id = -1
def _safely_call(self, callback, *args, **kwargs):
try:
callback(*args, **kwargs)
except Exception:
log.exception('Error calling callback for DNS result.')
def _error(self, message, err=DNS_TIMEOUT):
if not message in self._messages:
return
if message in self._tcp:
try:
self._tcp[message].close()
except Exception:
pass
del self._tcp[message]
callback, message, df_timeout, media, data = self._messages[message]
del self._messages[message.id]
try:
df_timeout.cancel()
except Exception:
pass
if err == DNS_TIMEOUT and data:
self._safely_call(callback, DNS_OK, data)
else:
self._safely_call(callback, err, None)
def _init_udp(self):
"""
Create a new Datagram instance and listen on a socket.
"""
self._udp = Datagram(engine=self.engine)
self._udp.on_read = self.receive_message
start = port = random.randrange(10005, 65535)
while True:
try:
self._udp.listen(('',port))
break
except Exception:
port += 1
if port > 65535:
port = 10000
if port == start:
raise Exception("Can't listen on any port.")
def send_message(self, message, callback=None, timeout=10, media=None):
"""
Send an instance of DNSMessage to a DNS server, and call the provided
callback when a response is received, or if the action times out.
========= ======== ============
Argument Default Description
========= ======== ============
message The :class:`DNSMessage` instance to send to the server.
callback None *Optional.* The function to call once the response has been received or the attempt has timed out.
timeout 10 *Optional.* How long, in seconds, to wait before timing out.
media None *Optional.* Whether to use UDP or TCP. UDP is used by default.
========= ======== ============
"""
while message.id is None or message.id in self._messages:
self._last_id += 1
if self._last_id > 65535:
self._last_id = 0
message.id = self._last_id
# Timeout in timeout seconds.
df_timeout = self.engine.defer(timeout, self._error, message.id)
# Send the Message
msg = str(message)
if media is None:
media = 'udp'
#if len(msg) > 512:
# media = 'tcp'
#else:
# media = 'udp'
# Store Info
self._messages[message.id] = callback, message, df_timeout, media, None
if media == 'udp':
if self._udp is None:
self._init_udp()
try:
self._udp.write(msg, (self.servers[0], DNS_PORT))
except Exception:
# Pants gummed up. Try again.
self._next_server(message.id)
self.engine.defer(0.5, self._next_server, message.id)
else:
tcp = self._tcp[message.id] = _DNSStream(self, message.id)
tcp.connect((self.servers[0], DNS_PORT))
def _next_server(self, id):
if not id in self._messages or id in self._tcp:
return
# Cycle the list.
self.servers.append(self.servers.pop(0))
msg = str(self._messages[id][1])
try:
self._udp.write(msg, (self.servers[0], DNS_PORT))
except Exception:
try:
self._udp.close()
except Exception:
pass
del self._udp
self._init_udp()
self._udp.write(msg, (self.servers[0], DNS_PORT))
def receive_message(self, data):
if not isinstance(data, DNSMessage):
try:
data = DNSMessage.from_string(data)
except TooShortError:
if len(data) < 2:
return
id = struct.unpack("!H", data[:2])
if not id in self._messages:
return
self._error(id, err=DNS_FORMATERROR)
return
if not data.id in self._messages:
return
callback, message, df_timeout, media, _ = self._messages[data.id]
#if data.tc and media == 'udp':
# self._messages[data.id] = callback, message, df_timeout, 'tcp', data
# tcp = self._tcp[data.id] = _DNSStream(self, message.id)
# tcp.connect((self.servers[0], DNS_PORT))
# return
if not data.server:
if self._udp and isinstance(self._udp.remote_address, tuple):
data.server = '%s:%d' % self._udp.remote_address
else:
data.server = '%s:%d' % (self.servers[0], DNS_PORT)
try:
df_timeout.cancel()
except Exception:
pass
del self._messages[data.id]
self._safely_call(callback, DNS_OK, data)
def query(self, name, qtype=A, qclass=IN, callback=None, timeout=10, allow_cache=True, allow_hosts=True):
"""
Make a DNS request of the given QTYPE for the given name.
============ ======== ============
Argument Default Description
============ ======== ============
name The name to query.
qtype A *Optional.* The QTYPE to query.
qclass IN *Optional.* The QCLASS to query.
callback None *Optional.* The function to call when a response for the query has been received, or when the request has timed out.
timeout 10 *Optional.* The time, in seconds, to wait before timing out.
allow_cache True *Optional.* Whether or not to use the cache. If you expect to be performing thousands of requests, you may want to disable the cache to avoid excess memory usage.
allow_hosts True *Optional.* Whether or not to use any records gathered from the OS hosts file.
============ ======== ============
"""
if not isinstance(qtype, (list,tuple)):
qtype = (qtype, )
if allow_hosts:
if host_time + 30 < time.time():
load_hosts()
cname = None
if name in self._cache and CNAME in self._cache[name]:
cname = self._cache[name][CNAME]
result = []
if AAAA in qtype and name in hosts[AAAA]:
result.append(hosts[AAAA][name])
if A in qtype and name in hosts[A]:
result.append(hosts[A][name])
if result:
if callback:
self._safely_call(callback, DNS_OK, cname, None, tuple(result))
return
if allow_cache and name in self._cache:
cname = self._cache[name].get(CNAME, None)
tm = time.time()
result = []
min_ttl = sys.maxint
for t in qtype:
death, ttl, rdata = self._cache[name][(t, qclass)]
if death < tm:
del self._cache[name][(t, qclass)]
continue
min_ttl = min(ttl, min_ttl)
if rdata:
result.extend(rdata)
if callback:
self._safely_call(callback, DNS_OK, cname, min_ttl,
tuple(result))
return
# Build a message and add our question.
m = DNSMessage()
m.questions.append((name, qtype[0], qclass))
# Make the function for handling our response.
def handle_response(status, data):
cname = None
# TTL is 30 by default, so answers with no records we want will be
# repeated, but not too often.
ttl = sys.maxint
if not data:
self._safely_call(callback, status, None, None, None)
return
rdata = {}
final_rdata = []
for (aname, atype, aclass, attl, ardata) in data.answers:
if atype == CNAME:
cname = ardata[0]
if atype in qtype and aclass == qclass:
ttl = min(attl, ttl)
if len(ardata) == 1:
rdata.setdefault(atype, []).append(ardata[0])
final_rdata.append(ardata[0])
else:
rdata.setdefault(atype, []).append(ardata)
final_rdata.append(ardata)
final_rdata = tuple(final_rdata)
ttl = min(30, ttl)
if allow_cache:
if not name in self._cache:
self._cache[name] = {}
if cname:
self._cache[name][CNAME] = cname
for t in qtype:
self._cache[name][(t, qclass)] = time.time() + ttl, ttl, rdata.get(t, [])
if data.rcode != DNS_OK:
status = data.rcode
self._safely_call(callback, status, cname, ttl, final_rdata)
# Send it, so we get an ID.
self.send_message(m, handle_response)
resolver = Resolver()
###############################################################################
# Helper Functions
###############################################################################
query = resolver.query
send_message = resolver.send_message
def gethostbyaddr(ip_address, callback, timeout=10):
"""
Returns a tuple ``(hostname, aliaslist, ipaddrlist)``, functioning similarly
to :func:`socket.gethostbyaddr`. When the information is available, it will
be passed to callback. If the attempt fails, the callback will be called
with None instead.
=========== ======== ============
Argument Default Description
=========== ======== ============
ip_address The IP address to look up information on.
callback The function to call when a result is available.
timeout 10 *Optional.* How long, in seconds, to wait before timing out.
=========== ======== ============
"""
is_ipv6 = False
if hasattr(socket, 'inet_pton'):
try:
addr = socket.inet_pton(socket.AF_INET6, ip_address)
is_ipv6 = True
except socket.error:
try:
addr = socket.inet_pton(socket.AF_INET, ip_address)
except socket.error:
raise ValueError("%r is not a valid IP address." % ip_address)
else:
try:
addr = socket.inet_aton(ip_address)
except socket.error:
is_ipv6 = True
if is_ipv6:
if not hasattr(socket, 'inet_pton'):
raise ImportError("socket lacks inet_pton.")
addr = socket.inet_pton(socket.AF_INET6, ip_address)
name = ''.join('%02x' % ord(c) for c in addr)
name = '.'.join(reversed(name)) + '.ip6.arpa'
else:
name = '.'.join(reversed(ip_address.split('.'))) + '.in-addr.arpa'
def handle_response(status, cname, ttl, rdata):
if status != DNS_OK:
res = None
else:
if not rdata:
res = None
else:
res = rdata[0], [name] + list(rdata[1:]), [ip_address]
try:
callback(res)
except Exception:
log.exception('Error calling callback for gethostbyaddr.')
resolver.query(name, qtype=PTR, callback=handle_response, timeout=timeout)
def gethostbyname(hostname, callback, timeout=10):
"""
Translate a host name to an IPv4 address, functioning similarly to
:func:`socket.gethostbyname`. When the information becomes available, it
will be passed to callback. If the underlying query fails, the callback
will be called with None instead.
========= ======== ============
Argument Default Description
========= ======== ============
hostname The hostname to look up information on.
callback The function to call when a result is available.
timeout 10 *Optional.* How long, in seconds, to wait before timing out.
========= ======== ============
"""
def handle_response(status, cname, ttl, rdata):
if status != DNS_OK or not rdata:
res = None
else:
res = rdata[0]
try:
callback(res)
except Exception:
log.exception('Error calling callback for gethostbyname.')
resolver.query(hostname, qtype=A, callback=handle_response, timeout=timeout)
def gethostbyname_ex(hostname, callback, timeout=10):
"""
Translate a host name to an IPv4 address, functioning similarly to
:func:`socket.gethostbyname_ex` and return a tuple
``(hostname, aliaslist, ipaddrlist)``. When the information becomes
available, it will be passed to callback. If the underlying query fails,
the callback will be called with None instead.
========= ======== ============
Argument Default Description
========= ======== ============
hostname The hostname to look up information on.
callback The function to call when a result is available.
timeout 10 *Optional.* How long, in seconds, to wait before timing out.
========= ======== ============
"""
def handle_response(status, cname, ttl, rdata):
if status != DNS_OK or not rdata:
res = None
else:
if cname != hostname:
res = cname, [hostname], list(rdata)
else:
res = cname, [], list(rdata)
try:
callback(res)
except Exception:
log.exception('Error calling callback for gethostbyname_ex.')
resolver.query(hostname, qtype=A, callback=handle_response, timeout=timeout)
###############################################################################
# Synchronous Support
###############################################################################
class Synchroniser(object):
__slots__ = ('_parent',)
def __init__(self, parent):
self._parent = parent
def __getattr__(self, key):
if key.startswith('_'):
return object.__getattribute__(self, key)
func = self._parent[key]
if not callable(func):
raise ValueError("%r isn't callable." % key)
def doer(*a, **kw):
if Engine.instance()._running:
raise RuntimeError("synchronous calls cannot be made while Pants is already running.")
data = []
def callback(*a,**kw):
if kw:
if a:
a = a + (kw, )
else:
a = kw
if isinstance(a, tuple) and len(a) == 1:
a = a[0]
data.append(a)
Engine.instance().stop()
kw['callback'] = callback
func(*a, **kw)
Engine.instance().start()
return data[0]
doer.__name__ = func.__name__
return doer
sync = synchronous = Synchroniser(globals())
|
{
"content_hash": "d4de807e197724078a19307429f086f3",
"timestamp": "",
"source": "github",
"line_count": 1173,
"max_line_length": 313,
"avg_line_length": 31.86018755328218,
"alnum_prop": 0.48905597773734344,
"repo_name": "ecdavis/pants",
"id": "fc3c922cf1e1fa791b30248fd74cce7e2fb958bb",
"size": "38139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pants/util/dns.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2236"
},
{
"name": "HTML",
"bytes": "496"
},
{
"name": "JavaScript",
"bytes": "1198"
},
{
"name": "Python",
"bytes": "673768"
}
],
"symlink_target": ""
}
|
from src.Chart import Chart
from src.ColorSelector import ColorSelector
import os
def export_charts(title, desc, charts):
export = ''
export += '<html>'
export += get_header()
export += '<br/><div class="container">'
export += '<div class="row">'
export += '<div class="col-md-6"><div class="jumbotron">' + title + '</div></div>'
export += '<div class="col-md-6">'+desc+'</div>'
export += '</div><div class="row">'
export += '<div id="ALL_CHARTS">'
for i in range(len(charts)):
if i == 0:
export += "<div class='col-md-12'>"+create_chart_div(charts[i], i)+"</div>"
else:
export += "<div class='col-md-6'>"+create_chart_div(charts[i], i)+"</div>"
export += '</div></div>'
export += '<script>correctAllGraphs()</script>'
for i in range(len(charts)):
export += write_chart_script(charts[i], i)
export += '<script> allData = ['
for i in range(len(charts)):
export += 'data'+str(i)
if i < len(charts) -1:
export += ','
export += '];createCompleteDownload()</script>'
export += '<br/><br/><br/>'
export += write_footer()
export += '</div>'
export += '</html>'
return export
def get_header():
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
scripts = open(os.path.join(__location__, 'script.js'), 'r')
styles = open(os.path.join(__location__, 'style.css'), 'r')
helper = open(os.path.join(__location__, 'helper.js'), 'r')
header = '<head>'
header += '<script>' + scripts.read().replace('\n', '') + '</script>'
header += '<script>' + helper.read() + '</script>'
header += '<style>' + styles.read().replace('\n', '') + '</style>'
header += '</head>'
return header
def create_chart_div(chart, index):
div = '<div class="panel panel-default"><div class="panel-heading"><h3 class="panel-title">'
div += chart.get_name() + '</h3></div><div class="panel-body">'
div += '<canvas class="pychart" id="c' +str(index) + '" width="100%" height="400"></canvas>'
div += '</div></div>'
return div
def write_chart_script(chart, index):
script = '\n<script>\n'
script += 'var chartref = document.getElementById("c'+str(index)+'");\n'
if chart.get_chart_type() == "Line" or chart.get_chart_type() == "Bar" or chart.get_chart_type() == "Radar":
script += write_line_chart_data(chart, index)
if chart.get_chart_type() == "Pie":
script += write_pie_chart_data(chart, index)
script += '\n</script>\n'
return script
def write_pie_chart_data(chart, index):
selector = ColorSelector()
script = "var data"+str(index)+" = ["
for i in range(len(chart.get_all_data_from_set(chart.get_data_sets()[0]))):
color = selector.get_random_color()
data_val = chart.get_all_data_from_set(chart.get_data_sets()[0])[i]
script += '{value:'+str(data_val)+',color:"#'+color+'", highlight: "#'+color+'", label: "'+str(chart.get_data_labels()[i])+'" }'
if i != (len(chart.get_all_data_from_set(chart.get_data_sets()[0]))) - 1:
script += ','
script += "];\n"
script += 'var myPieChart = new Chart(chartref.getContext("2d")).Pie(data'+str(index)+');'
return script
def write_line_chart_data(chart, index):
"""
:param chart :
:return:
"""
# create a selector for getting colors
selector = ColorSelector()
# Variable declaration that stores chart info
script = "var data"+str(index)+" = {\n"
# get correct labels
labels = []
if chart.can_sort_data_numerically():
labels = chart.get_sorted_labels()
else:
labels = chart.get_data_labels()
script += 'labels:'+str(labels) + ', '
# Print out the data set info
script += "datasets : ["
for s in range(len(chart.get_data_sets())):
data_set = chart.get_data_sets()[s]
data = []
if chart.can_sort_data_numerically():
data = chart.sort_data_set_by_label(data_set)
else:
data = chart.get_all_data_from_set(data_set)
color = selector.get_random_color()
script += '{\n'
script += 'data: ' + str(data) + ',\n'
script += 'label:"' + data_set + '",\n'
script += 'fillColor:' + '"rgba(220,220,220,0.0)" ,\n'
script += 'strokeColor:' + '"#'+color + '",\n'
script += 'pointColor:' + '"#'+color + '",\n'
script += 'pointStrokeColor:' + '"#'+color + '",\n'
script += 'pointHighlightFill:' + '"#'+color + '",\n'
script += 'pointHighlightStroke:' + "'#"+color + "'\n"
script += '}'
if s < len(chart.get_data_sets())-1:
script += ","
script += "]};\n"
if chart.get_chart_type() == "Line":
script += "var myLineChart = new Chart(chartref.getContext('2d')).Line(data"+str(index)+");\n"
elif chart.get_chart_type() == "Bar":
script += "var myBarChart = new Chart(chartref.getContext('2d')).Bar(data"+str(index)+");\n"
elif chart.get_chart_type() == "Radar":
script += "var myRadarChart = new Chart(chartref.getContext('2d')).Radar(data"+str(index)+");\n"
return script
def write_footer():
footer = '<nav class="navbar navbar-default navbar-fixed-bottom">'\
'<div class="container-fluid">'\
'<p class="text-center" style="margin-top:10px" > <a href="#" onclick="allDownload()" class="navbar-link">Download</a></p>'\
'</div>'\
'</nav>'
return footer
|
{
"content_hash": "05469c71dfbf7d527795becef9dc6bd1",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 137,
"avg_line_length": 27.271844660194176,
"alnum_prop": 0.5494838020647917,
"repo_name": "EliCDavis/PyChart",
"id": "3fde20acd4ae14d5c732f042256b671cf592bfa3",
"size": "5618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Export.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "12928"
}
],
"symlink_target": ""
}
|
import numbers
import numpy as np
from scipy.sparse import issparse
from warnings import warn
from ..tree import ExtraTreeRegressor
from ..utils import (
check_random_state,
check_array,
gen_batches,
get_chunk_n_rows,
)
from ..utils.fixes import _joblib_parallel_args
from ..utils.validation import check_is_fitted, _num_samples
from ..base import OutlierMixin
from ._bagging import BaseBagging
__all__ = ["IsolationForest"]
class IsolationForest(OutlierMixin, BaseBagging):
"""
Isolation Forest Algorithm.
Return the anomaly score of each sample using the IsolationForest algorithm
The IsolationForest 'isolates' observations by randomly selecting a feature
and then randomly selecting a split value between the maximum and minimum
values of the selected feature.
Since recursive partitioning can be represented by a tree structure, the
number of splittings required to isolate a sample is equivalent to the path
length from the root node to the terminating node.
This path length, averaged over a forest of such random trees, is a
measure of normality and our decision function.
Random partitioning produces noticeably shorter paths for anomalies.
Hence, when a forest of random trees collectively produce shorter path
lengths for particular samples, they are highly likely to be anomalies.
Read more in the :ref:`User Guide <isolation_forest>`.
.. versionadded:: 0.18
Parameters
----------
n_estimators : int, default=100
The number of base estimators in the ensemble.
max_samples : "auto", int or float, default="auto"
The number of samples to draw from X to train each base estimator.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
- If "auto", then `max_samples=min(256, n_samples)`.
If max_samples is larger than the number of samples provided,
all samples will be used for all trees (no sampling).
contamination : 'auto' or float, default='auto'
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. Used when fitting to define the threshold
on the scores of the samples.
- If 'auto', the threshold is determined as in the
original paper.
- If float, the contamination should be in the range (0, 0.5].
.. versionchanged:: 0.22
The default value of ``contamination`` changed from 0.1
to ``'auto'``.
max_features : int or float, default=1.0
The number of features to draw from X to train each base estimator.
- If int, then draw `max_features` features.
- If float, then draw `max_features * X.shape[1]` features.
bootstrap : bool, default=False
If True, individual trees are fit on random subsets of the training
data sampled with replacement. If False, sampling without replacement
is performed.
n_jobs : int, default=None
The number of jobs to run in parallel for both :meth:`fit` and
:meth:`predict`. ``None`` means 1 unless in a
:obj:`joblib.parallel_backend` context. ``-1`` means using all
processors. See :term:`Glossary <n_jobs>` for more details.
random_state : int, RandomState instance or None, default=None
Controls the pseudo-randomness of the selection of the feature
and split values for each branching step and each tree in the forest.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
verbose : int, default=0
Controls the verbosity of the tree building process.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`the Glossary <warm_start>`.
.. versionadded:: 0.21
Attributes
----------
base_estimator_ : ExtraTreeRegressor instance
The child estimator template used to create the collection of
fitted sub-estimators.
estimators_ : list of ExtraTreeRegressor instances
The collection of fitted sub-estimators.
estimators_features_ : list of ndarray
The subset of drawn features for each base estimator.
estimators_samples_ : list of ndarray
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator.
max_samples_ : int
The actual number of samples.
offset_ : float
Offset used to define the decision function from the raw scores. We
have the relation: ``decision_function = score_samples - offset_``.
``offset_`` is defined as follows. When the contamination parameter is
set to "auto", the offset is equal to -0.5 as the scores of inliers are
close to 0 and the scores of outliers are close to -1. When a
contamination parameter different than "auto" is provided, the offset
is defined in such a way we obtain the expected number of outliers
(samples with decision function < 0) in training.
.. versionadded:: 0.20
n_features_ : int
The number of features when ``fit`` is performed.
.. deprecated:: 1.0
Attribute `n_features_` was deprecated in version 1.0 and will be
removed in 1.2. Use `n_features_in_` instead.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Notes
-----
The implementation is based on an ensemble of ExtraTreeRegressor. The
maximum depth of each tree is set to ``ceil(log_2(n))`` where
:math:`n` is the number of samples used to build the tree
(see (Liu et al., 2008) for more details).
References
----------
.. [1] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation forest."
Data Mining, 2008. ICDM'08. Eighth IEEE International Conference on.
.. [2] Liu, Fei Tony, Ting, Kai Ming and Zhou, Zhi-Hua. "Isolation-based
anomaly detection." ACM Transactions on Knowledge Discovery from
Data (TKDD) 6.1 (2012): 3.
See Also
----------
sklearn.covariance.EllipticEnvelope : An object for detecting outliers in a
Gaussian distributed dataset.
sklearn.svm.OneClassSVM : Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
sklearn.neighbors.LocalOutlierFactor : Unsupervised Outlier Detection
using Local Outlier Factor (LOF).
Examples
--------
>>> from sklearn.ensemble import IsolationForest
>>> X = [[-1.1], [0.3], [0.5], [100]]
>>> clf = IsolationForest(random_state=0).fit(X)
>>> clf.predict([[0.1], [0], [90]])
array([ 1, 1, -1])
"""
def __init__(
self,
*,
n_estimators=100,
max_samples="auto",
contamination="auto",
max_features=1.0,
bootstrap=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
):
super().__init__(
base_estimator=ExtraTreeRegressor(
max_features=1, splitter="random", random_state=random_state
),
# here above max_features has no links with self.max_features
bootstrap=bootstrap,
bootstrap_features=False,
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
warm_start=warm_start,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
)
self.contamination = contamination
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by iforest")
def _parallel_args(self):
# ExtraTreeRegressor releases the GIL, so it's more efficient to use
# a thread-based backend rather than a process-based backend so as
# to avoid suffering from communication overhead and extra memory
# copies.
return _joblib_parallel_args(prefer="threads")
def fit(self, X, y=None, sample_weight=None):
"""
Fit estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Fitted estimator.
"""
X = self._validate_data(X, accept_sparse=["csc"])
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
# ensure that max_sample is in [1, n_samples]:
n_samples = X.shape[0]
if self.contamination != "auto":
if not (0.0 < self.contamination <= 0.5):
raise ValueError(
"contamination must be in (0, 0.5], got: %f" % self.contamination
)
if isinstance(self.max_samples, str):
if self.max_samples == "auto":
max_samples = min(256, n_samples)
else:
raise ValueError(
"max_samples (%s) is not supported."
'Valid choices are: "auto", int or'
"float"
% self.max_samples
)
elif isinstance(self.max_samples, numbers.Integral):
if self.max_samples > n_samples:
warn(
"max_samples (%s) is greater than the "
"total number of samples (%s). max_samples "
"will be set to n_samples for estimation."
% (self.max_samples, n_samples)
)
max_samples = n_samples
else:
max_samples = self.max_samples
else: # float
if not 0.0 < self.max_samples <= 1.0:
raise ValueError(
"max_samples must be in (0, 1], got %r" % self.max_samples
)
max_samples = int(self.max_samples * X.shape[0])
self.max_samples_ = max_samples
max_depth = int(np.ceil(np.log2(max(max_samples, 2))))
super()._fit(
X, y, max_samples, max_depth=max_depth, sample_weight=sample_weight
)
if self.contamination == "auto":
# 0.5 plays a special role as described in the original paper.
# we take the opposite as we consider the opposite of their score.
self.offset_ = -0.5
return self
# else, define offset_ wrt contamination parameter
self.offset_ = np.percentile(self.score_samples(X), 100.0 * self.contamination)
return self
def predict(self, X):
"""
Predict if a particular sample is an outlier or not.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
For each observation, tells whether or not (+1 or -1) it should
be considered as an inlier according to the fitted model.
"""
check_is_fitted(self)
X = self._validate_data(X, accept_sparse="csr", reset=False)
is_inlier = np.ones(X.shape[0], dtype=int)
is_inlier[self.decision_function(X) < 0] = -1
return is_inlier
def decision_function(self, X):
"""
Average anomaly score of X of the base classifiers.
The anomaly score of an input sample is computed as
the mean anomaly score of the trees in the forest.
The measure of normality of an observation given a tree is the depth
of the leaf containing this observation, which is equivalent to
the number of splittings required to isolate this point. In case of
several observations n_left in the leaf, the average path length of
a n_left samples isolation tree is added.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
scores : ndarray of shape (n_samples,)
The anomaly score of the input samples.
The lower, the more abnormal. Negative scores represent outliers,
positive scores represent inliers.
"""
# We subtract self.offset_ to make 0 be the threshold value for being
# an outlier:
return self.score_samples(X) - self.offset_
def score_samples(self, X):
"""
Opposite of the anomaly score defined in the original paper.
The anomaly score of an input sample is computed as
the mean anomaly score of the trees in the forest.
The measure of normality of an observation given a tree is the depth
of the leaf containing this observation, which is equivalent to
the number of splittings required to isolate this point. In case of
several observations n_left in the leaf, the average path length of
a n_left samples isolation tree is added.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
Returns
-------
scores : ndarray of shape (n_samples,)
The anomaly score of the input samples.
The lower, the more abnormal.
"""
# code structure from ForestClassifier/predict_proba
check_is_fitted(self)
# Check data
X = self._validate_data(X, accept_sparse="csr", reset=False)
# Take the opposite of the scores as bigger is better (here less
# abnormal)
return -self._compute_chunked_score_samples(X)
def _compute_chunked_score_samples(self, X):
n_samples = _num_samples(X)
if self._max_features == X.shape[1]:
subsample_features = False
else:
subsample_features = True
# We get as many rows as possible within our working_memory budget
# (defined by sklearn.get_config()['working_memory']) to store
# self._max_features in each row during computation.
#
# Note:
# - this will get at least 1 row, even if 1 row of score will
# exceed working_memory.
# - this does only account for temporary memory usage while loading
# the data needed to compute the scores -- the returned scores
# themselves are 1D.
chunk_n_rows = get_chunk_n_rows(
row_bytes=16 * self._max_features, max_n_rows=n_samples
)
slices = gen_batches(n_samples, chunk_n_rows)
scores = np.zeros(n_samples, order="f")
for sl in slices:
# compute score on the slices of test samples:
scores[sl] = self._compute_score_samples(X[sl], subsample_features)
return scores
def _compute_score_samples(self, X, subsample_features):
"""
Compute the score of each samples in X going through the extra trees.
Parameters
----------
X : array-like or sparse matrix
Data matrix.
subsample_features : bool
Whether features should be subsampled.
"""
n_samples = X.shape[0]
depths = np.zeros(n_samples, order="f")
for tree, features in zip(self.estimators_, self.estimators_features_):
X_subset = X[:, features] if subsample_features else X
leaves_index = tree.apply(X_subset)
node_indicator = tree.decision_path(X_subset)
n_samples_leaf = tree.tree_.n_node_samples[leaves_index]
depths += (
np.ravel(node_indicator.sum(axis=1))
+ _average_path_length(n_samples_leaf)
- 1.0
)
denominator = len(self.estimators_) * _average_path_length([self.max_samples_])
scores = 2 ** (
# For a single training sample, denominator and depth are 0.
# Therefore, we set the score manually to 1.
-np.divide(
depths, denominator, out=np.ones_like(depths), where=denominator != 0
)
)
return scores
def _more_tags(self):
return {
"_xfail_checks": {
"check_sample_weights_invariance": (
"zero sample_weight is not equivalent to removing samples"
),
}
}
def _average_path_length(n_samples_leaf):
"""
The average path length in a n_samples iTree, which is equal to
the average path length of an unsuccessful BST search since the
latter has the same structure as an isolation tree.
Parameters
----------
n_samples_leaf : array-like of shape (n_samples,)
The number of training samples in each test sample leaf, for
each estimators.
Returns
-------
average_path_length : ndarray of shape (n_samples,)
"""
n_samples_leaf = check_array(n_samples_leaf, ensure_2d=False)
n_samples_leaf_shape = n_samples_leaf.shape
n_samples_leaf = n_samples_leaf.reshape((1, -1))
average_path_length = np.zeros(n_samples_leaf.shape)
mask_1 = n_samples_leaf <= 1
mask_2 = n_samples_leaf == 2
not_mask = ~np.logical_or(mask_1, mask_2)
average_path_length[mask_1] = 0.0
average_path_length[mask_2] = 1.0
average_path_length[not_mask] = (
2.0 * (np.log(n_samples_leaf[not_mask] - 1.0) + np.euler_gamma)
- 2.0 * (n_samples_leaf[not_mask] - 1.0) / n_samples_leaf[not_mask]
)
return average_path_length.reshape(n_samples_leaf_shape)
|
{
"content_hash": "a1e299bd3f6b21bf13f8a6409699a985",
"timestamp": "",
"source": "github",
"line_count": 516,
"max_line_length": 87,
"avg_line_length": 36.35852713178294,
"alnum_prop": 0.605671339480838,
"repo_name": "shyamalschandra/scikit-learn",
"id": "375a1b60874df229c057bac38757a2d7389cf994",
"size": "18918",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sklearn/ensemble/_iforest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "394788"
},
{
"name": "C++",
"bytes": "140225"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "PowerShell",
"bytes": "17312"
},
{
"name": "Python",
"bytes": "6271288"
},
{
"name": "Shell",
"bytes": "6747"
}
],
"symlink_target": ""
}
|
import Globals
try:
from collections import OrderedDict
except ImportError: # pragma: no coverage
from ordereddict import OrderedDict
from AccessControl import ClassSecurityInfo
from Products.CMFPlone.utils import safe_unicode
class AlphaBatch(object):
"""Object used to batch results alphabetically.
"""
security = ClassSecurityInfo()
JOKER = '*'
vocab = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
JOKER]
threshold = 30
def __init__(self, results, context, request):
"""Take the results and the request.
"""
self.context = context
self.results = results
self.currentresults = []
self.pagemap = OrderedDict()
self.request = request
self.showBatch = True
if len(results) < self.threshold:
self.showBatch = False
self.currentresults = results
else:
self.initialize()
def initialize(self):
"""Initialize this batch object.
"""
current = self.request.get('currentPage', None)
pointer = 0
hasResults = len(self.results)
nonresults = []
for term in self.vocab:
# full init for term
self.pagemap[term] = dict()
self.pagemap[term]['value'] = term
self.pagemap[term]['visible'] = False
self.pagemap[term]['current'] = False
# special handling for joker
if term == self.JOKER:
continue
# assume alpha sorted results here
for result in self.results[pointer:]:
title = safe_unicode(result['fullname']).upper()
# title.replace(u'ü',u'Ü')
# title.replace(u'ö',u'Ö')
# title.replace(u'ä',u'Ä')
currentTerm = title and title[0] or None
if currentTerm is None or currentTerm not in self.vocab:
nonresults.append(result)
pointer += 1
continue
if title.startswith(term):
self.pagemap[term]['visible'] = True
if current is None:
current = term
if term == current:
self.currentresults.append(result)
pointer += 1
else:
break
# check for current after processing
if term == current:
self.pagemap[term]['current'] = True
if nonresults:
self.pagemap[self.JOKER]['visible'] = True
if current == '*':
self.currentresults = nonresults
self.pagemap[self.JOKER]['current'] = True
security.declarePublic('showBatch')
def showBatch(self):
"""Return True if results reaches threshold.
"""
return self.showBatch
security.declarePublic('getPages')
def getPages(self):
"""Return a list of dicts containing page definitions.
"""
return self.pagemap.values()
security.declarePublic('getResults')
def getResults(self):
"""Return the current result.
"""
return self.currentresults
Globals.InitializeClass(AlphaBatch)
|
{
"content_hash": "f634fb2c2efb926c16555413bfa0a466",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 77,
"avg_line_length": 31.31858407079646,
"alnum_prop": 0.5049448996891778,
"repo_name": "collective/Products.UserAndGroupSelectionWidget",
"id": "4508cbe084d5d7ca4ab56bf4ac2a70c324e1132f",
"size": "3569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Products/UserAndGroupSelectionWidget/alphabatch.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "2839"
},
{
"name": "Python",
"bytes": "45492"
},
{
"name": "Shell",
"bytes": "275"
}
],
"symlink_target": ""
}
|
"""Tools to parse and validate a MongoDB URI."""
from urllib import unquote_plus
from pymongo.common import validate
from pymongo.errors import (ConfigurationError,
InvalidURI,
UnsupportedOption)
SCHEME = 'mongodb://'
SCHEME_LEN = len(SCHEME)
DEFAULT_PORT = 27017
def _partition(entity, sep):
"""Python2.4 doesn't have a partition method so we provide
our own that mimics str.partition from later releases.
Split the string at the first occurrence of sep, and return a
3-tuple containing the part before the separator, the separator
itself, and the part after the separator. If the separator is not
found, return a 3-tuple containing the string itself, followed
by two empty strings.
"""
parts = entity.split(sep, 1)
if len(parts) == 2:
return parts[0], sep, parts[1]
else:
return entity, '', ''
def _rpartition(entity, sep):
"""Python2.4 doesn't have an rpartition method so we provide
our own that mimics str.rpartition from later releases.
Split the string at the last occurrence of sep, and return a
3-tuple containing the part before the separator, the separator
itself, and the part after the separator. If the separator is not
found, return a 3-tuple containing two empty strings, followed
by the string itself.
"""
idx = entity.rfind(sep)
if idx == -1:
return '', '', entity
return entity[:idx], sep, entity[idx + 1:]
def parse_userinfo(userinfo):
"""Validates the format of user information in a MongoDB URI.
Reserved characters like ':', '/', '+' and '@' must be escaped
following RFC 2396.
Returns a 2-tuple containing the unescaped username followed
by the unescaped password.
:Paramaters:
- `userinfo`: A string of the form <username>:<password>
.. versionchanged:: 2.2
Now uses `urllib.unquote_plus` so `+` characters must be escaped.
"""
if '@' in userinfo or userinfo.count(':') > 1:
raise InvalidURI("':' or '@' characters in a username or password "
"must be escaped according to RFC 2396.")
user, _, passwd = _partition(userinfo, ":")
# No password is expected with GSSAPI authentication.
if not user:
raise InvalidURI("The empty string is not valid username.")
user = unquote_plus(user)
passwd = unquote_plus(passwd)
return user, passwd
def parse_ipv6_literal_host(entity, default_port):
"""Validates an IPv6 literal host:port string.
Returns a 2-tuple of IPv6 literal followed by port where
port is default_port if it wasn't specified in entity.
:Parameters:
- `entity`: A string that represents an IPv6 literal enclosed
in braces (e.g. '[::1]' or '[::1]:27017').
- `default_port`: The port number to use when one wasn't
specified in entity.
"""
if entity.find(']') == -1:
raise ConfigurationError("an IPv6 address literal must be "
"enclosed in '[' and ']' according "
"to RFC 2732.")
i = entity.find(']:')
if i == -1:
return entity[1:-1], default_port
return entity[1: i], entity[i + 2:]
def parse_host(entity, default_port=DEFAULT_PORT):
"""Validates a host string
Returns a 2-tuple of host followed by port where port is default_port
if it wasn't specified in the string.
:Parameters:
- `entity`: A host or host:port string where host could be a
hostname or IP address.
- `default_port`: The port number to use when one wasn't
specified in entity.
"""
host = entity
port = default_port
if entity[0] == '[':
host, port = parse_ipv6_literal_host(entity, default_port)
elif entity.find(':') != -1:
if entity.count(':') > 1:
raise ConfigurationError("Reserved characters such as ':' must be "
"escaped according RFC 2396. An IPv6 "
"address literal must be enclosed in '[' "
"and ']' according to RFC 2732.")
host, port = host.split(':', 1)
if isinstance(port, basestring):
if not port.isdigit():
raise ConfigurationError("Port number must be an integer.")
port = int(port)
return host, port
def validate_options(opts):
"""Validates and normalizes options passed in a MongoDB URI.
Returns a new dictionary of validated and normalized options.
:Parameters:
- `opts`: A dict of MongoDB URI options.
"""
normalized = {}
for option, value in opts.iteritems():
option, value = validate(option, value)
# str(option) to ensure that a unicode URI results in plain 'str'
# option names. 'normalized' is then suitable to be passed as kwargs
# in all Python versions.
normalized[str(option)] = value
return normalized
def _parse_options(opts, delim):
"""Helper method for split_options which creates the options dict.
Also handles the creation of a list of dicts for the URI tag_sets/
readpreferencetags portion."""
options = {}
for opt in opts.split(delim):
key, val = opt.split("=")
if key.lower() == 'readpreferencetags':
options.setdefault('readpreferencetags', []).append(val)
else:
options[key] = val
if 'readpreferencetags' in options:
tag_sets = []
for tag_set in options['readpreferencetags']:
if tag_set == '':
tag_sets.append({})
continue
try:
tag_sets.append(dict([tag.split(":")
for tag in tag_set.split(",")]))
except Exception:
raise ValueError("%s not a valid value "
"for readpreferencetags" % (tag_set,))
options['readpreferencetags'] = tag_sets
return options
def split_options(opts):
"""Takes the options portion of a MongoDB URI, validates each option
and returns the options in a dictionary. The option names will be returned
lowercase even if camelCase options are used.
:Parameters:
- `opt`: A string representing MongoDB URI options.
"""
and_idx = opts.find("&")
semi_idx = opts.find(";")
try:
if and_idx >= 0 and semi_idx >= 0:
raise InvalidURI("Can not mix '&' and ';' for option separators.")
elif and_idx >= 0:
options = _parse_options(opts, "&")
elif semi_idx >= 0:
options = _parse_options(opts, ";")
elif opts.find("=") != -1:
options = _parse_options(opts, None)
else:
raise ValueError
except ValueError:
raise InvalidURI("MongoDB URI options are key=value pairs.")
return validate_options(options)
def split_hosts(hosts, default_port=DEFAULT_PORT):
"""Takes a string of the form host1[:port],host2[:port]... and
splits it into (host, port) tuples. If [:port] isn't present the
default_port is used.
Returns a set of 2-tuples containing the host name (or IP) followed by
port number.
:Parameters:
- `hosts`: A string of the form host1[:port],host2[:port],...
- `default_port`: The port number to use when one wasn't specified
for a host.
"""
nodes = []
for entity in hosts.split(','):
if not entity:
raise ConfigurationError("Empty host "
"(or extra comma in host list).")
port = default_port
# Unix socket entities don't have ports
if entity.endswith('.sock'):
port = None
nodes.append(parse_host(entity, port))
return nodes
def parse_uri(uri, default_port=DEFAULT_PORT):
"""Parse and validate a MongoDB URI.
Returns a dict of the form::
{
'nodelist': <list of (host, port) tuples>,
'username': <username> or None,
'password': <password> or None,
'database': <database name> or None,
'collection': <collection name> or None,
'options': <dict of MongoDB URI options>
}
:Parameters:
- `uri`: The MongoDB URI to parse.
- `default_port`: The port number to use when one wasn't specified
for a host in the URI.
"""
if not uri.startswith(SCHEME):
raise InvalidURI("Invalid URI scheme: URI "
"must begin with '%s'" % (SCHEME,))
scheme_free = uri[SCHEME_LEN:]
if not scheme_free:
raise InvalidURI("Must provide at least one hostname or IP.")
nodes = None
user = None
passwd = None
dbase = None
collection = None
options = {}
# Check for unix domain sockets in the uri
if '.sock' in scheme_free:
host_part, _, path_part = _rpartition(scheme_free, '/')
try:
parse_uri('%s%s' % (SCHEME, host_part))
except (ConfigurationError, InvalidURI):
host_part = scheme_free
path_part = ""
else:
host_part, _, path_part = _partition(scheme_free, '/')
if not path_part and '?' in host_part:
raise InvalidURI("A '/' is required between "
"the host list and any options.")
if '@' in host_part:
userinfo, _, hosts = _rpartition(host_part, '@')
user, passwd = parse_userinfo(userinfo)
else:
hosts = host_part
nodes = split_hosts(hosts, default_port=default_port)
if path_part:
if path_part[0] == '?':
opts = path_part[1:]
else:
dbase, _, opts = _partition(path_part, '?')
if '.' in dbase:
dbase, collection = dbase.split('.', 1)
if opts:
options = split_options(opts)
return {
'nodelist': nodes,
'username': user,
'password': passwd,
'database': dbase,
'collection': collection,
'options': options
}
if __name__ == '__main__':
import pprint
import sys
try:
pprint.pprint(parse_uri(sys.argv[1]))
except (InvalidURI, UnsupportedOption), e:
print e
sys.exit(0)
|
{
"content_hash": "604f0d255fea43e5ac756b29f3a51a3a",
"timestamp": "",
"source": "github",
"line_count": 313,
"max_line_length": 79,
"avg_line_length": 33.29712460063898,
"alnum_prop": 0.5797351755900979,
"repo_name": "deathping1994/sendmail-api",
"id": "50d7420fc5834f2a5d468482cbe663676127b843",
"size": "11004",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/pymongo/uri_parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "20694"
},
{
"name": "CSS",
"bytes": "6111"
},
{
"name": "HTML",
"bytes": "449"
},
{
"name": "JavaScript",
"bytes": "6345"
},
{
"name": "Python",
"bytes": "5285925"
},
{
"name": "Shell",
"bytes": "3759"
}
],
"symlink_target": ""
}
|
import logging
import unittest
import os
import re
import time
import grpc
from google.rpc import code_pb2, status_pb2
import cirque.proto.capability_pb2 as capability_pb2
import cirque.proto.device_pb2 as device_pb2
import cirque.proto.service_pb2 as service_pb2
import cirque.proto.service_pb2_grpc as service_pb2_grpc
from cirque.proto.device_pb2 import DeviceSpecification
from cirque.common.cirquelog import CirqueLog
from cirque.common.utils import sleep_time
from cirque.proto.capability_pb2 import (WeaveCapability, ThreadCapability,
WiFiCapability, XvncCapability,
InteractiveCapability,
LanAccessCapability)
DEVICE_CONFIG = {
'device0': {
'device_type': 'Generic Node',
'base_image': 'generic_node_image',
'wifi_capability': WiFiCapability(),
'thread_capability': ThreadCapability(rcp_mode=True),
'interactive_capability': InteractiveCapability()
},
'device1': {
'device_type': 'GenericNode',
'base_image': 'generic_node_image',
'wifi_capability': WiFiCapability(),
'thread_capability': ThreadCapability(rcp_mode=True),
'interactive_capability': InteractiveCapability()
},
'wifi-ap': {
'device_type': 'wifi_ap',
'base_image': 'mac80211_ap_image',
}
}
class TestGrpcVirtualHome(unittest.TestCase):
@classmethod
def setUpClass(cls):
CirqueLog.setup_cirque_logger(level=logging.INFO)
cls.logger = CirqueLog.get_cirque_logger('GrpcVirtualHome')
cls.channel = grpc.insecure_channel('localhost:50051')
cls.stub = service_pb2_grpc.CirqueServiceStub(cls.channel)
@classmethod
def tearDownClass(cls):
cls.logger.info('tearing down test class')
home_ids = cls.stub.ListCirqueHomes(
service_pb2.ListCirqueHomesRequest()).home_id
cls.logger.info('created home ids: {}'.format(home_ids))
for home_id in home_ids:
cls.logger.info('stopping home: {}'.format(home_id))
cls.stub.StopCirqueHome(
service_pb2.StopCirqueHomeRequest(home_id=home_id))
cls.channel.close()
def setUp(self):
pass
def test_001_create_home(self):
home_id = self.stub.CreateCirqueHome(
service_pb2.CreateCirqueHomeRequest()).home_id
self.logger.info('\nhome id: {} created!'.format(home_id))
self.assertTrue(
home_id == self.stub.ListCirqueHomes(
service_pb2.ListCirqueHomesRequest()).home_id[0],
'created home_id could not find in the cirque service!!')
def test_002_create_devices(self):
home_id = self.stub.ListCirqueHomes(
service_pb2.ListCirqueHomesRequest()).home_id[0]
device_ids = set()
device_config = create_device_from_config()
for _ in DEVICE_CONFIG:
device_id = self.stub.CreateCirqueDevice(
service_pb2.CreateCirqueDeviceRequest(
home_id=home_id,
specification=device_pb2.DeviceSpecification(
**next(device_config)))).device.device_id
device_ids.add(device_id)
self.logger.info('\ncreated device ids:')
list(map(print, device_ids))
devices = self.stub.ListCirqueHomeDevices(
service_pb2.ListCirqueHomeDevicesRequest(home_id=home_id))
device_ids_from_request = set(
[device.device_id for device in devices.devices])
self.assertTrue(device_ids == device_ids_from_request,
'device created did not match from devices query command!')
def test_003_connect_to_thread_network(self):
home_id = self.stub.ListCirqueHomes(
service_pb2.ListCirqueHomesRequest()).home_id[0]
devices = self.stub.ListCirqueHomeDevices(
service_pb2.ListCirqueHomeDevicesRequest(home_id=home_id))
devices = [
d for d in devices.devices
if d.device_specification.base_image == 'generic_node_image'
]
self.logger.info('Running commands to form Thread network')
for device in devices:
self.stub.ExecuteDeviceCommand(
service_pb2.ExecuteDeviceCommandRequest(
home_id=home_id,
device_id=device.device_id,
command="bash -c 'ot-ctl panid 0x1234 && \
ot-ctl ifconfig up && \
ot-ctl thread start'"))
self.logger.info('Waiting for Thread network to be formed...')
time.sleep(10)
roles = set()
for device in devices:
reply = self.stub.ExecuteDeviceCommand(
service_pb2.ExecuteDeviceCommandRequest(
home_id=home_id,
device_id=device.device_id,
command='ot-ctl state'))
roles.add(reply.output.split()[0])
self.assertIn('leader', roles)
self.assertTrue('router' in roles or 'child' in roles)
def test_004_scan_available_wifi_network(self):
home_id = self.stub.ListCirqueHomes(
service_pb2.ListCirqueHomesRequest()).home_id[0]
devices = self.stub.ListCirqueHomeDevices(
service_pb2.ListCirqueHomeDevicesRequest(home_id=home_id))
device_ids = [
device.device_id
for device in devices.devices
if device.device_specification.device_type != 'wifi_ap'
]
ssid = [
device.device_description.ssid
for device in devices.devices
if device.device_specification.device_type == 'wifi_ap'
][0]
self.logger.info('\nwifi ap ssid: {}'.format(ssid))
network_scan_command = 'iwlist wlan0 scanning'
for device_id in device_ids:
ret = self.stub.ExecuteDeviceCommand(
service_pb2.ExecuteDeviceCommandRequest(
home_id=home_id,
device_id=device_id,
command=network_scan_command,
streaming=False))
ssid_from_dev = re.search(r'\s+ESSID:"(.+)"', ret.output).group(1)
self.logger.info('\nssid: {} scanned by\ndevice: {}'.format(
ssid_from_dev, device_id))
self.assertTrue(ssid == ssid_from_dev,
'ssid from device did not match what is from wifi_ap!!')
def test_005_connect_to_desired_wifi_network(self):
home_id = self.stub.ListCirqueHomes(
service_pb2.ListCirqueHomesRequest()).home_id[0]
devices = self.stub.ListCirqueHomeDevices(
service_pb2.ListCirqueHomeDevicesRequest(home_id=home_id))
device_ids = [
device.device_id
for device in devices.devices
if device.device_specification.device_type != 'wifi_ap'
]
ssid, psk = [(device.device_description.ssid, device.device_description.psk)
for device in devices.devices
if device.device_specification.device_type == 'wifi_ap'][0]
self.logger.info('\nwifi ap ssid: {}, psk: {}'.format(ssid, psk))
for device_id in device_ids:
self.logger.info('\ndevice: {}\n connecting to desired ssid: {}'.format(
device_id, ssid))
write_psk_to_wpa_supplicant_config(self.logger, self.stub, home_id,
device_id, ssid, psk)
kill_existing_wpa_supplicant(self.logger, self.stub, home_id, device_id)
start_wpa_supplicant(self.logger, self.stub, home_id, device_id)
time.sleep(5)
def test_006_device_connectivity(self):
home_id = self.stub.ListCirqueHomes(
service_pb2.ListCirqueHomesRequest()).home_id[0]
devices = self.stub.ListCirqueHomeDevices(
service_pb2.ListCirqueHomeDevicesRequest(home_id=home_id))
device_ids = [
device.device_id
for device in devices.devices
if device.device_specification.device_type != 'wifi_ap'
]
device_addrs = list()
request_addr_command = 'dhcpcd wlan0'
for device_id in device_ids:
self.logger.info(
'\nrequesting ip address from wifi ap by\ndevice:{}'.format(
device_id))
ret = self.stub.ExecuteDeviceCommand(
service_pb2.ExecuteDeviceCommandRequest(
home_id=home_id,
device_id=device_id,
command=request_addr_command,
streaming=False))
ipaddr = re.search(
r'wlan0: leased (\d+\.\d+\.\d+\.\d+) for (\d+) seconds',
ret.output).group(1)
self.logger.info('\nip address requested: {}'.format(ipaddr))
device_addrs.append((device_id, ipaddr))
self.logger.info('\npinging from device: {} to\ndevice: {}'.format(
device_addrs[0][0], device_addrs[1][0]))
ping_command = 'ping -c 3 {}'
ret = self.stub.ExecuteDeviceCommand(
service_pb2.ExecuteDeviceCommandRequest(
home_id=home_id,
device_id=device_addrs[0][0],
command=ping_command.format(device_addrs[1][1]),
streaming=False))
loss = re.search(r'(\d+)% packet loss', ret.output).group(1)
self.assertNotEqual(
loss, '100', 'unable to ping device: {}, no wifi connectivity!!'.format(
device_addrs[1][1]))
self.logger.info('ping loss rate: {}%'.format(loss))
def create_device_from_config():
for device in DEVICE_CONFIG:
yield DEVICE_CONFIG[device]
def write_psk_to_wpa_supplicant_config(logger, stub, home_id, device_id, ssid,
psk):
logger.info(
'\ndevice id: {}\nwriting ssid, psk to wpa_supplicant config'.format(
device_id))
write_psk_command = ''.join([
"sh -c 'wpa_passphrase {} {} >> ".format(ssid, psk),
"/etc/wpa_supplicant/wpa_supplicant.conf'"
])
return stub.ExecuteDeviceCommand(
service_pb2.ExecuteDeviceCommandRequest(
home_id=home_id,
device_id=device_id,
command=write_psk_command,
streaming=False))
def kill_existing_wpa_supplicant(logger, stub, home_id, device_id):
logger.info('\ndevice id: {}\nkill existing wpa_supplicant'.format(device_id))
kill_wpa_supplicant_command = 'killall wpa_supplicant'
return stub.ExecuteDeviceCommand(
service_pb2.ExecuteDeviceCommandRequest(
home_id=home_id,
device_id=device_id,
command=kill_wpa_supplicant_command,
streaming=False))
def start_wpa_supplicant(logger, stub, home_id, device_id):
logger.info(
'\ndevice id: {}\nstarting wpa_supplicant on device'.format(device_id))
start_wpa_supplicant_command = ''.join([
'wpa_supplicant -B -i wlan0 ',
'-c /etc/wpa_supplicant/wpa_supplicant.conf ',
'-f /var/log/wpa_supplicant.log -t -dd'
])
return stub.ExecuteDeviceCommand(
service_pb2.ExecuteDeviceCommandRequest(
home_id=home_id,
device_id=device_id,
command=start_wpa_supplicant_command,
streaming=False))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestGrpcVirtualHome)
unittest.TextTestRunner(verbosity=2).run(suite)
|
{
"content_hash": "ccbdcc2576be5e720b77ddb8918ce55e",
"timestamp": "",
"source": "github",
"line_count": 334,
"max_line_length": 80,
"avg_line_length": 32.544910179640716,
"alnum_prop": 0.6354185832566698,
"repo_name": "openweave/cirque",
"id": "e1ae9662a0926eec92f914a312b10dfa701848fb",
"size": "12331",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/test_grpc_virtual_home.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "5415"
},
{
"name": "Python",
"bytes": "116998"
},
{
"name": "Shell",
"bytes": "8702"
},
{
"name": "Starlark",
"bytes": "568"
}
],
"symlink_target": ""
}
|
from ..core import *
import ctypes
lib.ElPermutationMetaSet.argtypes = [c_void_p,c_void_p,c_void_p]
lib.ElPermutationMetaClear.argtypes = [c_void_p]
lib.ElPermutationMetaTotalSend.argtypes = [c_void_p,POINTER(c_int)]
lib.ElPermutationMetaTotalRecv.argtypes = [c_void_p,POINTER(c_int)]
lib.ElPermutationMetaScaleUp.argtypes = [c_void_p,iType]
lib.ElPermutationMetaScaleDown.argtypes = [c_void_p,iType]
class PermutationMeta(ctypes.Structure):
_fields_ = [("align",iType),("comm",mpi.Comm),
("sendCounts",POINTER(iType)),("sendDispls",POINTER(iType)),
("recvCounts",POINTER(iType)),("recvDispls",POINTER(iType)),
("numSendIdx",iType), ("numRecvIdx",iType),
("sendIdx",POINTER(iType)), ("sendRanks",POINTER(iType)),
("recvIdx",POINTER(iType)), ("recvRanks",POINTER(iType))]
def __init__(self,p,pInv):
if type(p) is not DistMatrix or type(pInv) is not DistMatrix:
raise Exception('Types of p and pInv must be DistMatrix')
if p.tag != iTag or pInv.tag != iTag:
raise Exception('p and pInv must be integral')
lib.ElPermutationMetaSet(p.obj,pInv.obj,pointer(self))
def Set(self,p,pInv):
if type(p) is not DistMatrix or type(pInv) is not DistMatrix:
raise Exception('Types of p and pInv must be DistMatrix')
if p.tag != iTag or pInv.tag != iTag:
raise Exception('p and pInv must be integral')
lib.ElPermutationMetaSet(p.obj,pInv.obj,pointer(self))
def Clear(self,p,pInv):
lib.ElPermutationMetaClear(pointer(self))
def TotalSend(self):
total = c_int()
lib.ElPermutationMetaTotalSend(pointer(self),pointer(total))
return total
def TotalRecv(self):
total = c_int()
lib.ElPermutationMetaTotalRecv(pointer(self),pointer(total))
return total
def ScaleUp(self,length):
lib.ElPermutationMetaScaleUp(pointer(self),length)
def ScaleDown(self,length):
lib.ElPermutationMetaScaleDown(pointer(self),length)
# Apply column pivots
# ===================
lib.ElApplyColPivots_i.argtypes = \
lib.ElApplyColPivots_s.argtypes = \
lib.ElApplyColPivots_d.argtypes = \
lib.ElApplyColPivots_c.argtypes = \
lib.ElApplyColPivots_z.argtypes = \
lib.ElApplyColPivotsDist_i.argtypes = \
lib.ElApplyColPivotsDist_s.argtypes = \
lib.ElApplyColPivotsDist_d.argtypes = \
lib.ElApplyColPivotsDist_c.argtypes = \
lib.ElApplyColPivotsDist_z.argtypes = \
[c_void_p,c_void_p,iType]
def ApplyColPivots(A,pivots,offset=0):
if type(A) is not type(pivots):
raise Exception('Types of A and pivots must match')
if pivots.tag != iTag:
raise Exception('pivots must be integral')
args = [A.obj,pivots.obj,offset]
if type(A) is Matrix:
if A.tag == iTag: lib.ElApplyColPivots_i(*args)
elif A.tag == sTag: lib.ElApplyColPivots_s(*args)
elif A.tag == dTag: lib.ElApplyColPivots_d(*args)
elif A.tag == cTag: lib.ElApplyColPivots_c(*args)
elif A.tag == zTag: lib.ElApplyColPivots_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElApplyColPivotsDist_i(*args)
elif A.tag == sTag: lib.ElApplyColPivotsDist_s(*args)
elif A.tag == dTag: lib.ElApplyColPivotsDist_d(*args)
elif A.tag == cTag: lib.ElApplyColPivotsDist_c(*args)
elif A.tag == zTag: lib.ElApplyColPivotsDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Apply row pivots
# ================
lib.ElApplyRowPivots_i.argtypes = \
lib.ElApplyRowPivots_s.argtypes = \
lib.ElApplyRowPivots_d.argtypes = \
lib.ElApplyRowPivots_c.argtypes = \
lib.ElApplyRowPivots_z.argtypes = \
lib.ElApplyRowPivotsDist_i.argtypes = \
lib.ElApplyRowPivotsDist_s.argtypes = \
lib.ElApplyRowPivotsDist_d.argtypes = \
lib.ElApplyRowPivotsDist_c.argtypes = \
lib.ElApplyRowPivotsDist_z.argtypes = \
[c_void_p,c_void_p,iType]
def ApplyRowPivots(A,pivots,offset=0):
if type(A) is not type(pivots):
raise Exception('Types of A and pivots must match')
if pivots.tag != iTag:
raise Exception('pivots must be integral')
args = [A.obj,pivots.obj,offset]
if type(A) is Matrix:
if A.tag == iTag: lib.ElApplyRowPivots_i(*args)
elif A.tag == sTag: lib.ElApplyRowPivots_s(*args)
elif A.tag == dTag: lib.ElApplyRowPivots_d(*args)
elif A.tag == cTag: lib.ElApplyRowPivots_c(*args)
elif A.tag == zTag: lib.ElApplyRowPivots_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElApplyRowPivotsDist_i(*args)
elif A.tag == sTag: lib.ElApplyRowPivotsDist_s(*args)
elif A.tag == dTag: lib.ElApplyRowPivotsDist_d(*args)
elif A.tag == cTag: lib.ElApplyRowPivotsDist_c(*args)
elif A.tag == zTag: lib.ElApplyRowPivotsDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Apply symmetric pivots
# ======================
lib.ElApplySymmetricPivots_i.argtypes = \
lib.ElApplySymmetricPivots_s.argtypes = \
lib.ElApplySymmetricPivots_d.argtypes = \
lib.ElApplySymmetricPivotsDist_i.argtypes = \
lib.ElApplySymmetricPivotsDist_s.argtypes = \
lib.ElApplySymmetricPivotsDist_d.argtypes = \
[c_uint,c_void_p,c_void_p,iType]
lib.ElApplySymmetricPivots_c.argtypes = \
lib.ElApplySymmetricPivots_z.argtypes = \
lib.ElApplySymmetricPivotsDist_c.argtypes = \
lib.ElApplySymmetricPivotsDist_z.argtypes = \
[c_uint,c_void_p,c_void_p,bType,iType]
def ApplySymmetricPivots(uplo,A,p,conjugate=False,offset=0):
if type(A) is not type(p):
raise Exception('Types of A and p must match')
if p.tag != iTag:
raise Exception('p must be integral')
args = [uplo,A.obj,pivots.obj,offset]
argsCpx = [uplo,A.obj,pivots.obj,conjugate,offset]
if type(A) is Matrix:
if A.tag == iTag: lib.ElApplySymmetricPivots_i(*args)
elif A.tag == sTag: lib.ElApplySymmetricPivots_s(*args)
elif A.tag == dTag: lib.ElApplySymmetricPivots_d(*args)
elif A.tag == cTag: lib.ElApplySymmetricPivots_c(*argsCpx)
elif A.tag == zTag: lib.ElApplySymmetricPivots_z(*argsCpx)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElApplySymmetricPivotsDist_i(*args)
elif A.tag == sTag: lib.ElApplySymmetricPivotsDist_s(*args)
elif A.tag == dTag: lib.ElApplySymmetricPivotsDist_d(*args)
elif A.tag == cTag: lib.ElApplySymmetricPivotsDist_c(*argsCpx)
elif A.tag == zTag: lib.ElApplySymmetricPivotsDist_z(*argsCpx)
else: DataExcept()
else: TypeExcept()
lib.ElApplyInverseSymmetricPivots_i.argtypes = \
lib.ElApplyInverseSymmetricPivots_s.argtypes = \
lib.ElApplyInverseSymmetricPivots_d.argtypes = \
lib.ElApplyInverseSymmetricPivotsDist_i.argtypes = \
lib.ElApplyInverseSymmetricPivotsDist_s.argtypes = \
lib.ElApplyInverseSymmetricPivotsDist_d.argtypes = \
[c_uint,c_void_p,c_void_p,iType]
lib.ElApplyInverseSymmetricPivots_c.argtypes = \
lib.ElApplyInverseSymmetricPivots_z.argtypes = \
lib.ElApplyInverseSymmetricPivotsDist_c.argtypes = \
lib.ElApplyInverseSymmetricPivotsDist_z.argtypes = \
[c_uint,c_void_p,c_void_p,bType,iType]
def ApplyInverseSymmetricPivots(uplo,A,p,conjugate=False,offset=0):
if type(A) is not type(p):
raise Exception('Types of A and p must match')
if p.tag != iTag:
raise Exception('p must be integral')
args = [uplo,A.obj,pivots.obj,offset]
argsCpx = [uplo,A.obj,pivots.obj,conjugate,offset]
if type(A) is Matrix:
if A.tag == iTag: lib.ElApplyInverseSymmetricPivots_i(*args)
elif A.tag == sTag: lib.ElApplyInverseSymmetricPivots_s(*args)
elif A.tag == dTag: lib.ElApplyInverseSymmetricPivots_d(*args)
elif A.tag == cTag: lib.ElApplyInverseSymmetricPivots_c(*argsCpx)
elif A.tag == zTag: lib.ElApplyInverseSymmetricPivots_z(*argsCpx)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElApplyInverseSymmetricPivotsDist_i(*args)
elif A.tag == sTag: lib.ElApplyInverseSymmetricPivotsDist_s(*args)
elif A.tag == dTag: lib.ElApplyInverseSymmetricPivotsDist_d(*args)
elif A.tag == cTag: lib.ElApplyInverseSymmetricPivotsDist_c(*argsCpx)
elif A.tag == zTag: lib.ElApplyInverseSymmetricPivotsDist_z(*argsCpx)
else: DataExcept()
else: TypeExcept()
# Explicit permutation
# ====================
lib.ElExplicitPermutation.argtypes = [c_void_p,c_void_p]
lib.ElExplicitPermutationDist.argtypes = [c_void_p,c_void_p]
def ExplicitPermutation(p):
if p.tag != iTag:
raise Exception('p must be integral')
if type(p) is Matrix:
P = Matrix(iTag)
lib.ElExplicitPermutation(p.obj,P.obj)
return P
elif type(p) is DistMatrix:
P = DistMatrix(iTag,MC,MR,p.Grid())
lib.ElExplicitPermutationDist(p.obj,P.obj)
return P
else: TypeExcept()
# Invert permutation
# ==================
lib.ElInvertPermutation.argtypes = [c_void_p,c_void_p]
lib.ElInvertPermutationDist.argtypes = [c_void_p,c_void_p]
def InvertPermutation(p):
if p.tag != iTag:
raise Exception('p must be integral')
if type(p) is Matrix:
pInv = Matrix(iTag)
lib.ElInvertPermutation(p.obj,pInv.obj)
return pInv
elif type(p) is DistMatrix:
pInv = DistMatrix(iTag,VC,STAR,p.Grid())
lib.ElInvertPermutationDist(p.obj,pInv.obj)
return pInv
else: TypeExcept()
# Parity of a permutation
# =======================
lib.ElPermutationParity.argtypes = [c_void_p,POINTER(bType)]
lib.ElPermutationParityDist.argtypes = [c_void_p,POINTER(bType)]
def PermutationParity(p):
if p.tag != iTag:
raise Exception('p must be integral')
parity = bType()
if type(p) is Matrix:
lib.ElPermutationParity(p.obj,pointer(parity))
elif type(p) is DistMatrix:
lib.ElPermutationParityDist(p.obj,pointer(parity))
else: TypeExcept()
return parity
# Permute columns
# ===============
lib.ElPermuteCols_i.argtypes = \
lib.ElPermuteCols_s.argtypes = \
lib.ElPermuteCols_d.argtypes = \
lib.ElPermuteCols_c.argtypes = \
lib.ElPermuteCols_z.argtypes = \
lib.ElPermuteColsDist_i.argtypes = \
lib.ElPermuteColsDist_s.argtypes = \
lib.ElPermuteColsDist_d.argtypes = \
lib.ElPermuteColsDist_c.argtypes = \
lib.ElPermuteColsDist_z.argtypes = \
[c_void_p,c_void_p]
lib.ElPermuteColsBoth_i.argtypes = \
lib.ElPermuteColsBoth_s.argtypes = \
lib.ElPermuteColsBoth_d.argtypes = \
lib.ElPermuteColsBoth_c.argtypes = \
lib.ElPermuteColsBoth_z.argtypes = \
lib.ElPermuteColsBothDist_i.argtypes = \
lib.ElPermuteColsBothDist_s.argtypes = \
lib.ElPermuteColsBothDist_d.argtypes = \
lib.ElPermuteColsBothDist_c.argtypes = \
lib.ElPermuteColsBothDist_z.argtypes = \
[c_void_p,c_void_p,c_void_p]
def PermuteCols(A,p,pInv=None):
if type(A) is Matrix:
if pInv == None:
if A.tag == iTag: lib.ElPermuteCols_i(A.obj,p.obj)
elif A.tag == sTag: lib.ElPermuteCols_s(A.obj,p.obj)
elif A.tag == dTag: lib.ElPermuteCols_d(A.obj,p.obj)
elif A.tag == cTag: lib.ElPermuteCols_c(A.obj,p.obj)
elif A.tag == zTag: lib.ElPermuteCols_z(A.obj,p.obj)
else: DataExcept()
else:
if type(pInv) != Matrix or pInv.tag != iTag:
raise Exception('pInv must be an integer Matrix')
if A.tag == iTag: lib.ElPermuteColsBoth_i(A.obj,p.obj,pInv.obj)
elif A.tag == sTag: lib.ElPermuteColsBoth_s(A.obj,p.obj,pInv.obj)
elif A.tag == dTag: lib.ElPermuteColsBoth_d(A.obj,p.obj,pInv.obj)
elif A.tag == cTag: lib.ElPermuteColsBoth_c(A.obj,p.obj,pInv.obj)
elif A.tag == zTag: lib.ElPermuteColsBoth_z(A.obj,p.obj,pInv.obj)
else: DataExcept()
elif type(A) is DistMatrix:
if pInv == None:
if A.tag == iTag: lib.ElPermuteColsDist_i(A.obj,p.obj)
elif A.tag == sTag: lib.ElPermuteColsDist_s(A.obj,p.obj)
elif A.tag == dTag: lib.ElPermuteColsDist_d(A.obj,p.obj)
elif A.tag == cTag: lib.ElPermuteColsDist_c(A.obj,p.obj)
elif A.tag == zTag: lib.ElPermuteColsDist_z(A.obj,p.obj)
else: DataExcept()
else:
if type(pInv) != Matrix or pInv.tag != iTag:
raise Exception('pInv must be an integer Matrix')
if A.tag == iTag: lib.ElPermuteColsBothDist_i(A.obj,p.obj,pInv.obj)
elif A.tag == sTag: lib.ElPermuteColsBothDist_s(A.obj,p.obj,pInv.obj)
elif A.tag == dTag: lib.ElPermuteColsBothDist_d(A.obj,p.obj,pInv.obj)
elif A.tag == cTag: lib.ElPermuteColsBothDist_c(A.obj,p.obj,pInv.obj)
elif A.tag == zTag: lib.ElPermuteColsBothDist_z(A.obj,p.obj,pInv.obj)
else: DataExcept()
else: TypeExcept()
lib.ElPermuteColsMetaDist_i.argtypes = \
lib.ElPermuteColsMetaDist_s.argtypes = \
lib.ElPermuteColsMetaDist_d.argtypes = \
lib.ElPermuteColsMetaDist_c.argtypes = \
lib.ElPermuteColsMetaDist_z.argtypes = \
[c_void_p,POINTER(PermutationMeta)]
def PermuteColsMeta(A,meta):
if type(A) is DistMatrix:
if A.tag == iTag: lib.ElPermuteColsMetaDist_i(A.obj,pointer(meta))
elif A.tag == sTag: lib.ElPermuteColsMetaDist_s(A.obj,pointer(meta))
elif A.tag == dTag: lib.ElPermuteColsMetaDist_d(A.obj,pointer(meta))
elif A.tag == cTag: lib.ElPermuteColsMetaDist_c(A.obj,pointer(meta))
elif A.tag == zTag: lib.ElPermuteColsMetaDist_z(A.obj,pointer(meta))
else: DataExcept()
else: TypeExcept()
# Permute rows
# ============
lib.ElPermuteRows_i.argtypes = \
lib.ElPermuteRows_s.argtypes = \
lib.ElPermuteRows_d.argtypes = \
lib.ElPermuteRows_c.argtypes = \
lib.ElPermuteRows_z.argtypes = \
lib.ElPermuteRowsDist_i.argtypes = \
lib.ElPermuteRowsDist_s.argtypes = \
lib.ElPermuteRowsDist_d.argtypes = \
lib.ElPermuteRowsDist_c.argtypes = \
lib.ElPermuteRowsDist_z.argtypes = \
[c_void_p,c_void_p]
lib.ElPermuteRowsBoth_i.argtypes = \
lib.ElPermuteRowsBoth_s.argtypes = \
lib.ElPermuteRowsBoth_d.argtypes = \
lib.ElPermuteRowsBoth_c.argtypes = \
lib.ElPermuteRowsBoth_z.argtypes = \
lib.ElPermuteRowsBothDist_i.argtypes = \
lib.ElPermuteRowsBothDist_s.argtypes = \
lib.ElPermuteRowsBothDist_d.argtypes = \
lib.ElPermuteRowsBothDist_c.argtypes = \
lib.ElPermuteRowsBothDist_z.argtypes = \
[c_void_p,c_void_p,c_void_p]
def PermuteRows(A,p,pInv=None):
if type(A) is Matrix:
if pInv == None:
args = [A.obj,p.obj]
if A.tag == iTag: lib.ElPermuteRows_i(*args)
elif A.tag == sTag: lib.ElPermuteRows_s(*args)
elif A.tag == dTag: lib.ElPermuteRows_d(*args)
elif A.tag == cTag: lib.ElPermuteRows_c(*args)
elif A.tag == zTag: lib.ElPermuteRows_z(*args)
else: DataExcept()
else:
if type(pInv) != Matrix or pInv.tag != iTag:
raise Exception('pInv must be integral')
args = [A.obj,p.obj,pInv.obj]
if A.tag == iTag: lib.ElPermuteRowsBoth_i(*args)
elif A.tag == sTag: lib.ElPermuteRowsBoth_s(*args)
elif A.tag == dTag: lib.ElPermuteRowsBoth_d(*args)
elif A.tag == cTag: lib.ElPermuteRowsBoth_c(*args)
elif A.tag == zTag: lib.ElPermuteRowsBoth_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if pInv == None:
args = [A.obj,p.obj]
if A.tag == iTag: lib.ElPermuteRowsDist_i(*args)
elif A.tag == sTag: lib.ElPermuteRowsDist_s(*args)
elif A.tag == dTag: lib.ElPermuteRowsDist_d(*args)
elif A.tag == cTag: lib.ElPermuteRowsDist_c(*args)
elif A.tag == zTag: lib.ElPermuteRowsDist_z(*args)
else: DataExcept()
else:
if type(pInv) != Matrix or pInv.tag != iTag:
raise Exception('pInv must be integral')
args = [A.obj,p.obj,pInv.obj]
if A.tag == iTag: lib.ElPermuteRowsBothDist_i(*args)
elif A.tag == sTag: lib.ElPermuteRowsBothDist_s(*args)
elif A.tag == dTag: lib.ElPermuteRowsBothDist_d(*args)
elif A.tag == cTag: lib.ElPermuteRowsBothDist_c(*args)
elif A.tag == zTag: lib.ElPermuteRowsBothDist_z(*args)
else: DataExcept()
else: TypeExcept()
lib.ElPermuteRowsMetaDist_i.argtypes = \
lib.ElPermuteRowsMetaDist_s.argtypes = \
lib.ElPermuteRowsMetaDist_d.argtypes = \
lib.ElPermuteRowsMetaDist_c.argtypes = \
lib.ElPermuteRowsMetaDist_z.argtypes = \
[c_void_p,POINTER(PermutationMeta)]
def PermuteRowsMeta(A,meta):
args = [A.obj,pointer(meta)]
if type(A) is DistMatrix:
if A.tag == iTag: lib.ElPermuteRowsMetaDist_i(*args)
elif A.tag == sTag: lib.ElPermuteRowsMetaDist_s(*args)
elif A.tag == dTag: lib.ElPermuteRowsMetaDist_d(*args)
elif A.tag == cTag: lib.ElPermuteRowsMetaDist_c(*args)
elif A.tag == zTag: lib.ElPermuteRowsMetaDist_z(*args)
else: DataExcept()
else: TypeExcept()
# Pivot parity
# ============
lib.ElPivotParity.argtypes = [c_void_p,iType,POINTER(bType)]
lib.ElPivotParityDist.argtypes = [c_void_p,iType,POINTER(bType)]
def PivotParity(p,offset=0):
if p.tag != iTag:
raise Exception('p must be integral')
parity = bType()
args = [p.obj,offset,pointer(parity)]
if type(p) is Matrix: lib.ElPivotParity(*args)
elif type(p) is DistMatrix: lib.ElPivotParityDist(*args)
else: TypeExcept()
return parity
# Convert a pivot sequence to a partial permutation vector
# ========================================================
lib.ElPivotsToPartialPermutation.argtypes = \
lib.ElPivotsToPartialPermutationDist.argtypes = \
[c_void_p,c_void_p,c_void_p,iType]
def PivotsToPartialPermutation(pivots,offset=0):
if pivots.tag != iTag:
raise Exception('pivots must be integral')
if type(pivots) is Matrix:
p = Matrix(iTag)
pInv = Matrix(iTag)
lib.ElPivotsToPartialPermutation(pivots.obj,p.obj,pInv.obj,offset)
return p, pInv
elif type(pivots) is DistMatrix:
p = DistMatrix(iTag,VC,STAR,pivots.Grid())
pInv = DistMatrix(iTag,VC,STAR,pivots.Grid())
lib.ElPivotsToPartialPermutationDist(pivots.obj,p.obj,pInv.obj,offset)
return p, pInv
else: TypeExcept()
# Convert a pivot sequence to a permutation vector
# ================================================
lib.ElPivotsToPermutation.argtypes = \
lib.ElPivotsToPermutationDist.argtypes = \
[c_void_p,c_void_p,iType]
def PivotsToPermutation(pivots,offset=0):
if pivots.tag != iTag:
raise Exception('pivots must be integral')
if type(pivots) is Matrix:
p = Matrix(iTag)
lib.ElPivotsToPermutation(pivots.obj,p.obj,offset)
return p
elif type(pivots) is DistMatrix:
p = DistMatrix(iTag,VC,STAR,pivots.Grid())
lib.ElPivotsToPermutationDist(pivots.obj,p.obj,offset)
return p
else: TypeExcept()
lib.ElPivotsToInversePermutation.argtypes = \
lib.ElPivotsToInversePermutationDist.argtypes = \
[c_void_p,c_void_p,iType]
def PivotsToInversePermutation(pivots,offset=0):
if pivots.tag != iTag:
raise Exception('pivots must be integral')
if type(pivots) is Matrix:
pInv = Matrix(iTag)
lib.ElPivotsToInversePermutation(pivots.obj,pInv.obj,offset)
return pInv
elif type(pivots) is DistMatrix:
pInv = DistMatrix(iTag,VC,STAR,pivots.Grid())
lib.ElPivotsToInversePermutationDist(pivots.obj,pInv.obj,offset)
return pInv
else: TypeExcept()
|
{
"content_hash": "ea9aa221f246d936e8bd27ffe8bfb906",
"timestamp": "",
"source": "github",
"line_count": 470,
"max_line_length": 75,
"avg_line_length": 39.54255319148936,
"alnum_prop": 0.6957223567393059,
"repo_name": "justusc/Elemental",
"id": "1b92a41a1ffb79168c92c4e6af70441f9f010a1b",
"size": "18849",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/lapack_like/perm.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "760573"
},
{
"name": "C++",
"bytes": "7177017"
},
{
"name": "CMake",
"bytes": "186926"
},
{
"name": "Makefile",
"bytes": "333"
},
{
"name": "Matlab",
"bytes": "13306"
},
{
"name": "Python",
"bytes": "942707"
},
{
"name": "Ruby",
"bytes": "1393"
},
{
"name": "Shell",
"bytes": "1335"
},
{
"name": "TeX",
"bytes": "23728"
}
],
"symlink_target": ""
}
|
from datetime import datetime
GAME_VIDEO_BASE_URL = "http://www.nfl.com/feeds-rs/videos/byGameCenter/{0}.json"
LIVE_UPDATE_BASE_URL = "http://www.nfl.com/liveupdate/game-center/{0}/{0}_gtd.json"
class Game(object):
def __init__(self, id_, h, v):
self.id_ = id_
self.date = self.id_[:-2]
self.home = h
self.vis = v
self.latest_play_id = ""
self.latest_clip_id = ""
self.videos = {}
def is_today(self):
return self.date == str((datetime.today()).strftime('%Y%m%d'))
def video_url(self):
return GAME_VIDEO_BASE_URL.format(self.id_)
def live_update_url(self):
return LIVE_UPDATE_BASE_URL.format(self.id_)
|
{
"content_hash": "df96ac5f213c2ccc68543ba8cf22a6c8",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 83,
"avg_line_length": 28.16,
"alnum_prop": 0.5894886363636364,
"repo_name": "twbarber/nfl-highlight-bot",
"id": "73a6559d84f7e8e31ac4003082676f9872226364",
"size": "704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nflh/games.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6455"
}
],
"symlink_target": ""
}
|
from .alloy_composition_descriptor import AlloyCompositionDescriptor
from .categorical_descriptor import CategoricalDescriptor
from .inorganic_descriptor import InorganicDescriptor
from .int_descriptor import IntDescriptor
from .organic_descriptor import OrganicDescriptor
from .real_descriptor import RealDescriptor
from .formulation_descriptor import FormulationDescriptor
|
{
"content_hash": "830b7ecf6dea1dbf28db2e08d7c9cefc",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 68,
"avg_line_length": 53.57142857142857,
"alnum_prop": 0.8853333333333333,
"repo_name": "CitrineInformatics/python-citrination-client",
"id": "c7c6d5acf77500a56a683ebd5472777799bb3d6d",
"size": "375",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "citrination_client/views/descriptors/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "391180"
}
],
"symlink_target": ""
}
|
import os
import subprocess
def returns_on_fail(return_value):
def decorator(func):
def inner(*args, **kwargs):
try:
return func(*args, **kwargs)
except subprocess.CalledProcessError:
return return_value
return inner
return decorator
class BaseVCSClient(object):
@property
def base_command(self):
raise NotImplementedError
def is_repository(self):
raise NotImplementedError
def get_hash(self):
raise NotImplementedError
def get_short_hash(self):
raise NotImplementedError
def get_current_branch_name(self):
raise NotImplementedError
def get_author_name(self):
raise NotImplementedError
def get_author_email(self):
raise NotImplementedError
def get_author_info(self):
raise NotImplementedError
def get_committer_name(self):
raise NotImplementedError
def get_committer_email(self):
raise NotImplementedError
def get_committer_info(self):
raise NotImplementedError
def get_date(self):
raise NotImplementedError
def get_message(self):
raise NotImplementedError
def _get_cwd(self):
return os.getcwd()
def _execute_vcs(self, main, *options):
commands = [self.base_command, main] + list(options)
raw_data = subprocess.check_output(commands)
return raw_data.decode()
|
{
"content_hash": "e3b2bae7d8191332f310e4e1c7d81c49",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 60,
"avg_line_length": 23.126984126984127,
"alnum_prop": 0.6424159231297186,
"repo_name": "giginet/django-debug-toolbar-vcs-info",
"id": "04c90089be67415534742db48c1ba30b43cd928a",
"size": "1457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vcs_info_panel/clients/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "685"
},
{
"name": "Python",
"bytes": "25856"
}
],
"symlink_target": ""
}
|
import os, errno, json
import modules
from modules.misctools import mkdir_p
import datetime
bot=None
def logger(messageId, jid, messageContent, timestamp, wantsReceipt, pushName, isBroadcast):
path=os.path.join('logs','incoming',jid)
if not os.path.exists(path):
os.makedirs(path)
filepath=os.path.join(path,messageId)
with open (filepath,'w') as messagefile:
json.dump((messageId,jid,messageContent,timestamp,wantsReceipt,pushName,isBroadcast),messagefile)
global bot
if wantsReceipt and bot.sendReceipts:
bot.methodsInterface.call("message_ack",(jid,messageId))
def onMessageReceived(messageId, jid, messageContent, timestamp, wantsReceipt, pushName, isBroadcast):
messageobject=messageId,jid, messageContent, timestamp, wantsReceipt, pushName, isBroadcast
formattedDate = datetime.datetime.fromtimestamp(timestamp).strftime('%d-%m-%Y %H:%M')
print pushName,formattedDate,':',messageContent,
#messageContent=messageContent.decode('utf8')
logger(*messageobject)
#modules.sender.message_queue(jid,messageContent)
def onGroupMessageReceived(messageId, jid, msgauthor, messageContent, timestamp, wantsReceipt, pushName):
messageobject=messageId,jid, msgauthor, messageContent, timestamp, wantsReceipt, pushName
formattedDate = datetime.datetime.fromtimestamp(timestamp).strftime('%d-%m-%Y %H:%M')
print jid,'('+pushName,formattedDate+'): ',messageContent
if wantsReceipt and bot.sendReceipts:
bot.methodsInterface.call("message_ack", (jid, messageId))
logger(*messageobject)
def setup(parent):
parent.signalsInterface.registerListener("message_received", onMessageReceived)
parent.signalsInterface.registerListener("group_messageReceived", onGroupMessageReceived)
global bot
bot=parent
|
{
"content_hash": "c61545d81998957798b21037aabbba26",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 105,
"avg_line_length": 43.3,
"alnum_prop": 0.7927251732101617,
"repo_name": "siyei/python-whatsapp-bot",
"id": "589550a331bc7c6c873138618412625fd67b203d",
"size": "1732",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "modules/logger.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""
@package mi.instrument.mclane.driver
@file marine-integrations/mi/instrument/mclane/driver.py
@author Dan Mergens
@brief Driver base class for McLane instruments
Release notes:
initial version
"""
import datetime
__author__ = 'Dan Mergens'
__license__ = 'Apache 2.0'
import re
import time
from mi.core.log import get_logger
log = get_logger()
from mi.core.common import BaseEnum
from mi.core.util import dict_equal
from mi.core.exceptions import SampleException, \
InstrumentParameterException, \
InstrumentProtocolException, \
InstrumentTimeoutException
from mi.core.instrument.instrument_protocol import \
CommandResponseInstrumentProtocol, \
RE_PATTERN, \
DEFAULT_CMD_TIMEOUT
from mi.core.instrument.instrument_fsm import ThreadSafeFSM
from mi.core.instrument.chunker import StringChunker
from mi.core.instrument.instrument_driver import \
DriverEvent, \
DriverAsyncEvent, \
DriverProtocolState, \
DriverParameter, \
ResourceAgentState
from mi.core.instrument.data_particle import \
DataParticle, \
DataParticleKey, \
CommonDataParticleType
from mi.core.instrument.driver_dict import DriverDictKey
from mi.core.instrument.protocol_param_dict import ProtocolParameterDict, \
ParameterDictType, \
ParameterDictVisibility
NEWLINE = '\r\n'
CONTROL_C = '\x03'
NUM_PORTS = 24 # number of collection bags
# default timeout.
INTER_CHARACTER_DELAY = .2 # works
# INTER_CHARACTER_DELAY = .02 - too fast
# INTER_CHARACTER_DELAY = .04
PUMP_RATE_ERROR = 1.15 # PPS is off in it's flow rate measurement by 14.5% - TODO - check RAS data
####
# Driver Constant Definitions
####
class ScheduledJob(BaseEnum):
CLOCK_SYNC = 'clock_sync'
class ProtocolState(BaseEnum):
"""
Instrument protocol states
"""
UNKNOWN = DriverProtocolState.UNKNOWN
COMMAND = DriverProtocolState.COMMAND
DIRECT_ACCESS = DriverProtocolState.DIRECT_ACCESS
FLUSH = 'DRIVER_STATE_FLUSH'
FILL = 'DRIVER_STATE_FILL'
CLEAR = 'DRIVER_STATE_CLEAR'
RECOVERY = 'DRIVER_STATE_RECOVERY' # for recovery after pump failure
class ProtocolEvent(BaseEnum):
"""
Protocol events
"""
ENTER = DriverEvent.ENTER
EXIT = DriverEvent.EXIT
DISCOVER = DriverEvent.DISCOVER
INIT_PARAMS = DriverEvent.INIT_PARAMS
EXECUTE_DIRECT = DriverEvent.EXECUTE_DIRECT
START_DIRECT = DriverEvent.START_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
GET = DriverEvent.GET
SET = DriverEvent.SET
ACQUIRE_SAMPLE = DriverEvent.ACQUIRE_SAMPLE
# ACQUIRE_STATUS = DriverEvent.ACQUIRE_STATUS
CLOCK_SYNC = DriverEvent.CLOCK_SYNC
FLUSH = 'DRIVER_EVENT_FLUSH'
FILL = 'DRIVER_EVENT_FILL'
CLEAR = 'DRIVER_EVENT_CLEAR'
PUMP_STATUS = 'DRIVER_EVENT_PUMP_STATUS'
INSTRUMENT_FAILURE = 'DRIVER_EVENT_INSTRUMENT_FAILURE'
class Capability(BaseEnum):
"""
Protocol events that should be exposed to users (subset of above).
"""
GET = ProtocolEvent.GET
SET = ProtocolEvent.SET
CLOCK_SYNC = ProtocolEvent.CLOCK_SYNC
ACQUIRE_SAMPLE = ProtocolEvent.ACQUIRE_SAMPLE
# ACQUIRE_STATUS = ProtocolEvent.ACQUIRE_STATUS
CLEAR = ProtocolEvent.CLEAR
START_DIRECT = DriverEvent.START_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
DISCOVER = DriverEvent.DISCOVER
class Parameter(DriverParameter):
"""
Device specific parameters.
"""
FLUSH_VOLUME = "flush_volume"
FLUSH_FLOWRATE = "flush_flowrate"
FLUSH_MINFLOW = "flush_minflow"
FILL_VOLUME = "fill_volume"
FILL_FLOWRATE = "fill_flowrate"
FILL_MINFLOW = "fill_minflow"
CLEAR_VOLUME = "clear_volume"
CLEAR_FLOWRATE = "clear_flowrate"
CLEAR_MINFLOW = "clear_minflow"
class McLaneCommand(BaseEnum):
"""
Instrument command strings - case insensitive
"""
GO = NEWLINE
CONTROL_C = CONTROL_C
CLOCK = 'clock' # set the clock date and time
BATTERY = 'battery' # display battery voltage
HOME = 'home' # set the port to the home port (0)
FORWARD = 'forward' # start forward pump operation < volume flowrate minflow [time] >
REVERSE = 'reverse' # reverse pump operation < volume flowrate minflow [time] >
PORT = 'port' # display current port or set valve to supplied position
CAPACITY = 'capacity' # pump max flow rate mL/min
COPYRIGHT = 'copyright' # display version, release and copyright notice
class Prompt(BaseEnum):
"""
Device i/o prompts.
"""
CR_NL = '\r\n'
PERIOD = '.'
SUSPENDED = 'Suspended ... '
ENTER_CTRL_C = 'Enter ^C now to wake up ...'
COMMAND_INPUT = '>'
UNRECOGNIZED_COMMAND = '] unrecognized command'
class McLaneResponse(BaseEnum):
"""
Expected device response strings
"""
HOME = re.compile(r'Port: 00')
PORT = re.compile(r'Port: (\d+)') # e.g. Port: 01
# e.g. 03/25/14 20:24:02 PPS ML13003-01>
READY = re.compile(r'(\d+/\d+/\d+\s+\d+:\d+:\d+\s+)(RAS|PPS)\s+(.*)>')
# Result 00 | 75 100 25 4 | 77.2 98.5 99.1 47 031514 001813 | 29.8 1
# Result 00 | 10 100 75 60 | 10.0 85.5 100.0 7 032814 193855 | 30.0 1
PUMP = re.compile(r'(Status|Result).*(\d+)' + NEWLINE)
# Battery: 30.1V [Alkaline, 18V minimum]
BATTERY = re.compile(r'Battery:\s+(\d*\.\d+)V\s+\[.*\]') # battery voltage
# Capacity: Maxon 250mL
CAPACITY = re.compile(r'Capacity:\s(Maxon|Pittman)\s+(\d+)mL') # pump make and capacity
# McLane Research Laboratories, Inc.
# CF2 Adaptive Water Transfer System
# Version 2.02 of Jun 7 2013 18:17
# Configured for: Maxon 250ml pump
VERSION = re.compile(
r'McLane .*$' + NEWLINE +
r'CF2 .*$' + NEWLINE +
r'Version\s+(\S+)\s+of\s+(.*)$' + NEWLINE + # version and release date
r'.*$'
)
class Timeout(BaseEnum):
"""
Timeouts for commands # TODO - calculate based on flow rate & volume
"""
HOME = 30
PORT = 10 + 2 # average time to advance to next port is 10 seconds, any more indicates skipping of a port
FLUSH = 103 + 5
FILL = 2728 + 30
CLEAR = 68 + 5
CLOCK = INTER_CHARACTER_DELAY * 30 + 1
#####
# Codes for pump termination
TerminationCodes = {
0: 'Pumping in progress',
1: 'Volume reached',
2: 'Time limit reached',
3: 'Min flow reached',
4: 'Low battery',
5: 'Stopped by user',
6: 'Pump would not start',
7: 'Sudden flow obstruction',
8: 'Sudden obstruction with slip',
9: 'Sudden pressure release'
}
class TerminationCodeEnum(BaseEnum):
PUMP_IN_PROGRESS = 0
VOLUME_REACHED = 1
TIME_LIMIT_REACHED = 2
MIN_FLOW_REACHED = 3
LOW_BATTERY = 4
STOPPED_BY_USER = 5
PUMP_WOULD_NOT_START = 6
SUDDEN_FLOW_OBSTRUCTION = 7
SUDDEN_OBSTRUCTION_WITH_SLIP = 8
SUDDEN_PRESSURE_RELEASE = 9
class McLaneDataParticleType(BaseEnum):
"""
Data particle types produced by this driver
"""
# TODO - define which commands will be published to user
RAW = CommonDataParticleType.RAW
MCLANE_PARSED = 'mclane_parsed'
PUMP_STATUS = 'pump_status'
VOLTAGE_STATUS = 'battery'
VERSION_INFO = 'version'
###############################################################################
# Data Particles
###############################################################################
class McLaneSampleDataParticleKey(BaseEnum):
PORT = 'port_number'
VOLUME_COMMANDED = 'commanded_volume'
FLOW_RATE_COMMANDED = 'commanded_flowrate'
MIN_FLOW_COMMANDED = 'commanded_min_flowrate'
TIME_LIMIT = 'commanded_timelimit'
VOLUME_ACTUAL = 'cumulative_volume'
FLOW_RATE_ACTUAL = 'flowrate'
MIN_FLOW_ACTUAL = 'minimum_flowrate'
TIMER = 'elapsed_time'
TIME = 'date_time_string'
BATTERY = 'battery_voltage'
CODE = 'sampling_status_code'
# data particle for forward, reverse, and result commands
# e.g.:
# --- command --- -------- result -------------
# Result port | vol flow minf tlim | vol flow minf secs date-time | batt code
# Status 00 | 75 100 25 4 | 1.5 90.7 90.7* 1 031514 001727 | 29.9 0
class McLaneSampleDataParticle(DataParticle):
@staticmethod
def regex():
"""
get the compiled regex pattern
@return: compiled re
"""
exp = str(r'(?P<status>Status|Result)' + # status is incremental, result is the last return from the command
'\s*(?P<port>\d+)\s*\|' + # PORT
'\s*(?P<commanded_volume>\d+)' + # VOLUME_COMMANDED
'\s*(?P<commanded_flow_rate>\d+)' + # FLOW RATE COMMANDED
'\s*(?P<commanded_min_flowrate>\d+)' + # MIN RATE COMMANDED
'\s*(?P<time_limit>\d+)\s*\|' + # TLIM - TODO
'\s*(?P<volume>\d*\.?\d+)' + # VOLUME (actual)
'\s*(?P<flow_rate>\d*\.?\d+)' + # FLOW RATE (actual)
'\s*(?P<min_flow>\d*\.?\d+)' + # MIN RATE (actual)
'\*?' +
'\s*(?P<timer>\d+)' + # elapsed time (seconds)
'\s*(?P<time>\d+\s*\d+)\s*\|' + # MMDDYY HHMMSS (current date and time)
'\s*(?P<voltage>\d*\.?\d+)' + # voltage (battery)
'\s*(?P<code>\d+)' + # code enumeration
'\s*' + NEWLINE)
return exp
@staticmethod
def regex_compiled():
"""
get the compiled regex pattern
@return: compiled re
"""
return re.compile(McLaneSampleDataParticle.regex())
def _build_parsed_values(self):
match = McLaneSampleDataParticle.regex_compiled().match(self.raw_data)
timestamp = datetime.datetime.strptime(match.group('time'), '%m%d%y %H%M%S')
timestamp = (timestamp - datetime.datetime(1900, 1, 1)).total_seconds()
self.set_internal_timestamp(timestamp)
if not match:
raise SampleException("RASFL_SampleDataParticle: No regex match of parsed sample data: [%s]", self.raw_data)
result = [
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.PORT,
DataParticleKey.VALUE: int(match.group('port'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.VOLUME_COMMANDED,
DataParticleKey.VALUE: int(match.group('commanded_volume'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.FLOW_RATE_COMMANDED,
DataParticleKey.VALUE: int(match.group('commanded_flow_rate'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.MIN_FLOW_COMMANDED,
DataParticleKey.VALUE: int(match.group('commanded_min_flowrate'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.TIME_LIMIT,
DataParticleKey.VALUE: int(match.group('time_limit'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.VOLUME_ACTUAL,
DataParticleKey.VALUE: float(match.group('volume'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.FLOW_RATE_ACTUAL,
DataParticleKey.VALUE: float(match.group('flow_rate'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.MIN_FLOW_ACTUAL,
DataParticleKey.VALUE: float(match.group('min_flow'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.TIMER,
DataParticleKey.VALUE: int(match.group('timer'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.TIME,
DataParticleKey.VALUE: str(match.group('time'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.BATTERY,
DataParticleKey.VALUE: float(match.group('voltage'))},
{DataParticleKey.VALUE_ID: McLaneSampleDataParticleKey.CODE,
DataParticleKey.VALUE: int(match.group('code'))}]
return result
###########################################################################
# Protocol
###########################################################################
# noinspection PyMethodMayBeStatic,PyUnusedLocal
class McLaneProtocol(CommandResponseInstrumentProtocol):
"""
Instrument protocol class
Subclasses CommandResponseInstrumentProtocol
"""
# __metaclass__ = get_logging_metaclass(log_level='debug')
def __init__(self, prompts, newline, driver_event):
"""
Protocol constructor.
@param prompts A BaseEnum class containing instrument prompts.
@param newline The newline.
@param driver_event Driver process event callback.
"""
# Construct protocol superclass.
CommandResponseInstrumentProtocol.__init__(self, prompts, newline, driver_event)
# Build protocol state machine.
self._protocol_fsm = ThreadSafeFSM(ProtocolState, ProtocolEvent, ProtocolEvent.ENTER, ProtocolEvent.EXIT)
# Add event handlers for protocol state machine.
handlers = {
ProtocolState.UNKNOWN: [
(ProtocolEvent.ENTER, self._handler_unknown_enter),
(ProtocolEvent.DISCOVER, self._handler_unknown_discover),
],
ProtocolState.COMMAND: [
(ProtocolEvent.ENTER, self._handler_command_enter),
(ProtocolEvent.INIT_PARAMS, self._handler_command_init_params),
(ProtocolEvent.START_DIRECT, self._handler_command_start_direct),
(ProtocolEvent.CLOCK_SYNC, self._handler_sync_clock),
(ProtocolEvent.ACQUIRE_SAMPLE, self._handler_command_acquire),
# (ProtocolEvent.ACQUIRE_STATUS, self._handler_command_status),
(ProtocolEvent.CLEAR, self._handler_command_clear),
(ProtocolEvent.GET, self._handler_get),
(ProtocolEvent.SET, self._handler_command_set),
],
ProtocolState.FLUSH: [
(ProtocolEvent.ENTER, self._handler_flush_enter),
(ProtocolEvent.FLUSH, self._handler_flush_flush),
(ProtocolEvent.PUMP_STATUS, self._handler_flush_pump_status),
(ProtocolEvent.INSTRUMENT_FAILURE, self._handler_all_failure),
],
ProtocolState.FILL: [
(ProtocolEvent.ENTER, self._handler_fill_enter),
(ProtocolEvent.FILL, self._handler_fill_fill),
(ProtocolEvent.PUMP_STATUS, self._handler_fill_pump_status),
(ProtocolEvent.INSTRUMENT_FAILURE, self._handler_all_failure),
],
ProtocolState.CLEAR: [
(ProtocolEvent.ENTER, self._handler_clear_enter),
(ProtocolEvent.CLEAR, self._handler_clear_clear),
(ProtocolEvent.PUMP_STATUS, self._handler_clear_pump_status),
(ProtocolEvent.INSTRUMENT_FAILURE, self._handler_all_failure),
],
ProtocolState.RECOVERY: [
(ProtocolEvent.ENTER, self._handler_recovery_enter),
],
ProtocolState.DIRECT_ACCESS: [
(ProtocolEvent.ENTER, self._handler_direct_access_enter),
(ProtocolEvent.EXECUTE_DIRECT, self._handler_direct_access_execute_direct),
(ProtocolEvent.STOP_DIRECT, self._handler_direct_access_stop_direct),
],
}
for state in handlers:
for event, handler in handlers[state]:
self._protocol_fsm.add_handler(state, event, handler)
# Add build handlers for device commands - we are only using simple commands
for cmd in McLaneCommand.list():
self._add_build_handler(cmd, self._build_command)
# Add response handlers for device commands.
# self._add_response_handler(McLaneCommand.BATTERY, self._parse_battery_response)
# self._add_response_handler(McLaneCommand.CLOCK, self._parse_clock_response)
# self._add_response_handler(McLaneCommand.PORT, self._parse_port_response)
# Construct the parameter dictionary containing device parameters,
# current parameter values, and set formatting functions.
self._build_param_dict()
self._build_command_dict()
self._build_driver_dict()
self._chunker = StringChunker(McLaneProtocol.sieve_function)
self._add_scheduler_event(ScheduledJob.CLOCK_SYNC, ProtocolEvent.CLOCK_SYNC)
# Start state machine in UNKNOWN state.
self._protocol_fsm.start(ProtocolState.UNKNOWN)
self._sent_cmds = None
# TODO - reset next_port on mechanical refresh of the PPS filters - how is the driver notified?
# TODO - need to persist state for next_port to save driver restart
self.next_port = 1 # next available port
self._second_attempt = False
@staticmethod
def sieve_function(raw_data):
"""
The method that splits samples and status
"""
matchers = []
return_list = []
matchers.append(McLaneSampleDataParticle.regex_compiled())
for matcher in matchers:
for match in matcher.finditer(raw_data):
return_list.append((match.start(), match.end()))
return return_list
def _filter_capabilities(self, events):
"""
Return a list of currently available capabilities.
"""
return [x for x in events if Capability.has(x)]
########################################################################
# implement virtual methods from base class.
########################################################################
def _set_params(self, *args, **kwargs):
"""
Issue commands to the instrument to set various parameters. If
startup is set to true that means we are setting startup values
and immutable parameters can be set. Otherwise only READ_WRITE
parameters can be set.
must be overloaded in derived classes
@param params dictionary containing parameter name and value pairs
@param startup flag - true indicates initializing, false otherwise
"""
params = args[0]
# check for attempt to set readonly parameters (read-only or immutable set outside startup)
self._verify_not_readonly(*args, **kwargs)
old_config = self._param_dict.get_config()
for (key, val) in params.iteritems():
log.debug("KEY = " + str(key) + " VALUE = " + str(val))
self._param_dict.set_value(key, val)
new_config = self._param_dict.get_config()
log.debug('new config: %s\nold config: %s', new_config, old_config)
# check for parameter change
if not dict_equal(old_config, new_config):
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
def apply_startup_params(self):
"""
Apply startup parameters
"""
# fn = "apply_startup_params"
# config = self.get_startup_config()
# log.debug("%s: startup config = %s", fn, config)
#
# for param in Parameter.list():
# if param in config:
# self._param_dict.set_value(param, config[param])
#
# log.debug("%s: new parameters", fn)
# for x in config:
# log.debug(" parameter %s: %s", x, config[x])
if self.get_current_state() != DriverProtocolState.COMMAND:
raise InstrumentProtocolException('cannot set parameters outside command state')
self._set_params(self.get_startup_config(), True)
########################################################################
# Instrument commands.
########################################################################
def _do_cmd_resp(self, cmd, *args, **kwargs):
"""
Perform a command-response on the device. Overrides the base class so it will
return the regular expression groups without concatenating them into a string.
@param cmd The command to execute.
@param args positional arguments to pass to the build handler.
@param write_delay kwarg for the amount of delay in seconds to pause
between each character. If none supplied, the DEFAULT_WRITE_DELAY
value will be used.
@param timeout optional wakeup and command timeout via kwargs.
@param response_regex kwarg with a compiled regex for the response to
match. Groups that match will be returned as a tuple.
@retval response The parsed response result.
@raises InstrumentTimeoutException if the response did not occur in time.
@raises InstrumentProtocolException if command could not be built or if response
was not recognized.
"""
# Get timeout and initialize response.
timeout = kwargs.get('timeout', DEFAULT_CMD_TIMEOUT)
response_regex = kwargs.get('response_regex', None) # required argument
write_delay = INTER_CHARACTER_DELAY
retval = None
if not response_regex:
raise InstrumentProtocolException('missing required keyword argument "response_regex"')
if response_regex and not isinstance(response_regex, RE_PATTERN):
raise InstrumentProtocolException('Response regex is not a compiled pattern!')
# Get the build handler.
build_handler = self._build_handlers.get(cmd, None)
if not build_handler:
raise InstrumentProtocolException('Cannot build command: %s' % cmd)
cmd_line = build_handler(cmd, *args)
# Wakeup the device, pass up exception if timeout
prompt = self._wakeup(timeout)
# Clear line and prompt buffers for result.
self._linebuf = ''
self._promptbuf = ''
# Send command.
log.debug('_do_cmd_resp: %s, timeout=%s, write_delay=%s, response_regex=%s',
repr(cmd_line), timeout, write_delay, response_regex)
for char in cmd_line:
self._connection.send(char)
time.sleep(write_delay)
# Wait for the prompt, prepare result and return, timeout exception
return self._get_response(timeout, response_regex=response_regex)
def _do_cmd_home(self):
"""
Move valve to the home port
@retval True if successful, False if unable to return home
"""
func = '_do_cmd_home'
log.debug('--- djm --- command home')
port = int(self._do_cmd_resp(McLaneCommand.PORT, response_regex=McLaneResponse.PORT)[0])
log.debug('--- djm --- at port: %d', port)
if port != 0:
log.debug('--- djm --- going home')
self._do_cmd_resp(McLaneCommand.HOME, response_regex=McLaneResponse.HOME, timeout=Timeout.HOME)
port = int(self._do_cmd_resp(McLaneCommand.PORT, response_regex=McLaneResponse.PORT)[0])
if port != 0:
log.error('Unable to return to home port')
return False
return True
def _do_cmd_flush(self, *args, **kwargs):
"""
Flush the home port in preparation for collecting a sample. This clears the intake port so that
the sample taken will be new.
This only starts the flush. The remainder of the flush is monitored by got_chunk.
"""
flush_volume = self._param_dict.get(Parameter.FLUSH_VOLUME)
flush_flowrate = self._param_dict.get(Parameter.FLUSH_FLOWRATE)
flush_minflow = self._param_dict.get(Parameter.FLUSH_MINFLOW)
if not self._do_cmd_home():
self._async_raise_fsm_event(ProtocolEvent.INSTRUMENT_FAILURE)
log.debug('--- djm --- flushing home port, %d %d %d',
flush_volume, flush_flowrate, flush_flowrate)
self._do_cmd_no_resp(McLaneCommand.FORWARD, flush_volume, flush_flowrate, flush_minflow)
def _do_cmd_fill(self, *args, **kwargs):
"""
Fill the sample at the next available port
"""
log.debug('--- djm --- collecting sample in port %d', self.next_port)
fill_volume = self._param_dict.get(Parameter.FILL_VOLUME)
fill_flowrate = self._param_dict.get(Parameter.FILL_FLOWRATE)
fill_minflow = self._param_dict.get(Parameter.FILL_MINFLOW)
log.debug('--- djm --- collecting sample in port %d', self.next_port)
reply = self._do_cmd_resp(McLaneCommand.PORT, self.next_port, response_regex=McLaneResponse.PORT)
log.debug('--- djm --- port returned:\n%r', reply)
self.next_port += 1 # succeed or fail, we can't use this port again
# TODO - commit next_port to the agent for persistent data store
self._do_cmd_no_resp(McLaneCommand.FORWARD, fill_volume, fill_flowrate, fill_minflow)
def _do_cmd_clear(self, *args, **kwargs):
"""
Clear the home port
"""
self._do_cmd_home()
clear_volume = self._param_dict.get(Parameter.CLEAR_VOLUME)
clear_flowrate = self._param_dict.get(Parameter.CLEAR_FLOWRATE)
clear_minflow = self._param_dict.get(Parameter.CLEAR_MINFLOW)
log.debug('--- djm --- clearing home port, %d %d %d',
clear_volume, clear_flowrate, clear_minflow)
self._do_cmd_no_resp(McLaneCommand.REVERSE, clear_volume, clear_flowrate, clear_minflow)
########################################################################
# Generic handlers.
########################################################################
def _handler_pass(self, *args, **kwargs):
pass
def _handler_all_failure(self, *args, **kwargs):
log.error('Instrument failure detected. Entering recovery mode.')
return ProtocolState.RECOVERY, ResourceAgentState.BUSY
########################################################################
# Unknown handlers.
########################################################################
def _handler_unknown_enter(self, *args, **kwargs):
"""
Enter unknown state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
# TODO - read persistent data (next port)
def _handler_unknown_discover(self, *args, **kwargs):
"""
Discover current state; can only be COMMAND (instrument has no AUTOSAMPLE mode).
@retval (next_state, result), (ProtocolState.COMMAND, None) if successful.
"""
# force to command mode, this instrument has no autosample mode
return ProtocolState.COMMAND, ResourceAgentState.IDLE
########################################################################
# Flush
########################################################################
def _handler_flush_enter(self, *args, **kwargs):
"""
Enter the flush state. Trigger FLUSH event.
"""
log.debug('--- djm --- entering FLUSH state')
self._second_attempt = False
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._async_raise_fsm_event(ProtocolEvent.FLUSH)
def _handler_flush_flush(self, *args, **kwargs):
"""
Begin flushing the home port. Subsequent flushing will be monitored and sent to the flush_pump_status
handler.
"""
log.debug('--- djm --- in FLUSH state')
next_state = ProtocolState.FILL
next_agent_state = ResourceAgentState.BUSY
# 2. Set to home port
# 3. flush intake (home port)
# 4. wait 30 seconds
# 1. Get next available port (if no available port, bail)
log.debug('--- djm --- Flushing home port')
self._do_cmd_flush()
return None, (ResourceAgentState.BUSY, None)
def _handler_flush_pump_status(self, *args, **kwargs):
"""
Manage pump status update during flush. Status updates indicate continued pumping, Result updates
indicate completion of command. Check the termination code for success.
@args match object containing the regular expression match of the status line.
"""
match = args[0]
pump_status = match.group('status')
code = int(match.group('code'))
next_state = None
next_agent_state = None
log.debug('--- djm --- received pump status: pump status: %s, code: %d', pump_status, code)
if pump_status == 'Result':
log.debug('--- djm --- flush completed - %s', TerminationCodes[code])
if code == TerminationCodeEnum.SUDDEN_FLOW_OBSTRUCTION:
log.info('Encountered obstruction during flush, attempting to clear')
self._async_raise_fsm_event(ProtocolEvent.CLEAR)
else:
next_state = ProtocolState.FILL
next_agent_state = ResourceAgentState.BUSY
# elif pump_status == 'Status':
return next_state, next_agent_state
def _handler_flush_clear(self, *args, **kwargs):
"""
Attempt to clear home port after stoppage has occurred during flush.
This is only performed once. On the second stoppage, the driver will enter recovery mode.
"""
log.debug('--- djm --- handling clear request during flush')
if self._second_attempt:
return ProtocolState.RECOVERY, ResourceAgentState.BUSY
self._second_attempt = True
self._do_cmd_clear()
return None, None
########################################################################
# Fill
########################################################################
def _handler_fill_enter(self, *args, **kwargs):
"""
Enter the fill state. Trigger FILL event.
"""
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._async_raise_fsm_event(ProtocolEvent.FILL)
def _handler_fill_fill(self, *args, **kwargs):
"""
Send the fill command and process the first response
"""
next_state = None
next_agent_state = None
result = None
log.debug('Entering PHIL PHIL')
# 5. switch to collection port (next available)
# 6. collect sample (4000 ml)
# 7. wait 2 minutes
if self.next_port > NUM_PORTS:
log.error('Unable to collect RAS sample - %d containers full', NUM_PORTS)
next_state = ProtocolState.COMMAND
next_agent_state = ResourceAgentState.COMMAND
else:
self._do_cmd_fill()
return next_state, (next_agent_state, result)
def _handler_fill_pump_status(self, *args, **kwargs):
"""
Process pump status updates during filter collection.
"""
next_state = None
next_agent_state = None
match = args[0]
pump_status = match.group('status')
code = int(match.group('code'))
if pump_status == 'Result':
if code != TerminationCodeEnum.VOLUME_REACHED:
next_state = ProtocolState.RECOVERY
next_state = ProtocolState.CLEAR # all done
# if pump_status == 'Status':
# TODO - check for bag rupture (> 93% flow rate near end of sample collect- RAS only)
return next_state, next_agent_state
########################################################################
# Clear
########################################################################
def _handler_clear_enter(self, *args, **kwargs):
"""
Enter the clear state. Trigger the CLEAR event.
"""
self._second_attempt = False
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._async_raise_fsm_event(ProtocolEvent.CLEAR)
def _handler_clear_clear(self, *args, **kwargs):
"""
Send the clear command. If there is an obstruction trigger a FLUSH, otherwise place driver in RECOVERY mode.
"""
log.debug('--- djm --- clearing home port')
# 8. return to home port
# 9. reverse flush 75 ml to pump water from exhaust line through intake line
self._do_cmd_clear()
return None, None
def _handler_clear_pump_status(self, *args, **kwargs):
"""
Parse pump status during clear action.
"""
next_state = None
next_agent_state = None
match = args[0]
pump_status = match.group('status')
code = int(match.group('code'))
if pump_status == 'Result':
if code != TerminationCodeEnum.VOLUME_REACHED:
log.error('Encountered obstruction during clear. Attempting flush...')
self._async_raise_fsm_event(ProtocolEvent.FLUSH)
else:
log.debug('--- djm --- clear complete')
next_state = ProtocolState.COMMAND
next_agent_state = ResourceAgentState.COMMAND
# if Status, nothing to do
return next_state, next_agent_state
def _handler_clear_flush(self, *args, **kwargs):
"""
Attempt to recover from failed attempt to clear by flushing home port. Only try once.
"""
log.info('Attempting to flush main port during clear')
if self._second_attempt:
return ProtocolState.RECOVERY, ResourceAgentState.BUSY
self._second_attempt = True
self._do_cmd_flush()
return None, None
########################################################################
# Command handlers.
# just implemented to make DA possible, instrument has no actual command mode
########################################################################
def _handler_command_enter(self, *args, **kwargs):
"""
Enter command state.
"""
# Command device to update parameters and send a config change event if needed.
self._update_params()
self._protocol_fsm.on_event(ProtocolEvent.INIT_PARAMS)
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_command_init_params(self, *args, **kwargs):
"""
Setup initial parameters.
"""
self._init_params()
return None, None
def _handler_command_set(self, *args, **kwargs):
"""
Set instrument parameters
"""
log.debug('handler command set called')
startup = False
try:
params = args[0]
except IndexError:
raise InstrumentParameterException('set command requires a parameter dictionary.')
try:
startup = args[1]
except IndexError:
pass
if not isinstance(params, dict):
raise InstrumentParameterException('set parameters is not a dictionary')
self._set_params(params, startup)
return None, None
# changed = False
# for key, value in params.items():
# log.info('Command:set - setting parameter %s to %s', key, value)
# if not Parameter.has(key):
# raise InstrumentProtocolException('Attempt to set undefined parameter: %s', key)
# old_value = self._param_dict.get(key)
# if old_value != value:
# changed = True
# self._param_dict.set_value(key, value)
#
# if changed:
# self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
#
# next_state = None
# result = None
# return next_state, result
def _handler_command_start_direct(self, *args, **kwargs):
"""
Start direct access.
"""
log.debug('--- djm --- entered _handler_command_start_direct with args: %s', args)
result = None
next_state = ProtocolState.DIRECT_ACCESS
next_agent_state = ResourceAgentState.DIRECT_ACCESS
return next_state, (next_agent_state, result)
########################################################################
# Recovery handlers.
########################################################################
# TODO - not sure how to determine how to exit from this state. Probably requires a driver reset.
def _handler_recovery_enter(self, *args, **kwargs):
"""
Error recovery mode. The instrument failed to respond to a command and now requires the user to perform
diagnostics and correct before proceeding.
"""
log.debug('--- djm --- entered recovery mode')
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
########################################################################
# Direct access handlers.
########################################################################
def _handler_direct_access_enter(self, *args, **kwargs):
"""
Enter direct access state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._sent_cmds = []
def _handler_direct_access_execute_direct(self, data):
self._do_cmd_direct(data)
return None, None
def _handler_direct_access_stop_direct(self, *args, **kwargs):
return ProtocolState.COMMAND, (ResourceAgentState.COMMAND, None)
########################################################################
# general handlers.
########################################################################
def get_timestamp_delayed(self, fmt, delay=0):
"""
Return a formatted date string of the current utc time,
but the string return is delayed until the next second
transition.
Formatting:
http://docs.python.org/library/time.html#time.strftime
@param fmt: strftime() format string
@return: formatted date string
@raise ValueError if format is None
"""
if not fmt:
raise ValueError
now = datetime.datetime.utcnow() + datetime.timedelta(seconds=delay)
time.sleep((1e6 - now.microsecond) / 1e6)
now = datetime.datetime.utcnow() + datetime.timedelta(seconds=delay)
return now.strftime(fmt)
def _handler_sync_clock(self, *args, **kwargs):
"""
sync clock close to a second edge
@retval (next_state, (next_agent_state, result)) tuple, (None, (None, None)).
@throws InstrumentTimeoutException if device respond correctly.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
cmd_len = len('clock 03/20/2014 17:14:55' + NEWLINE)
delay = cmd_len * INTER_CHARACTER_DELAY
time_format = "%m/%d/%Y %H:%M:%S"
str_val = self.get_timestamp_delayed(time_format, delay)
# str_val = time.strftime(time_format, time.gmtime(time.time() + self._clock_set_offset))
log.debug("Setting instrument clock to '%s'", str_val)
ras_time = self._do_cmd_resp(McLaneCommand.CLOCK, str_val, response_regex=McLaneResponse.READY)[0]
return None, (None, {'time': ras_time})
def _handler_command_acquire(self, *args, **kwargs):
self._handler_sync_clock()
return ProtocolState.FLUSH, ResourceAgentState.BUSY
# def _handler_command_status(self, *args, **kwargs):
# # get the following:
# # - VERSION
# # - CAPACITY (pump flow)
# # - BATTERY
# # - CODES (termination codes)
# # - COPYRIGHT (termination codes)
# return None, ResourceAgentState.COMMAND
def _handler_command_clear(self, *args, **kwargs):
return ProtocolState.CLEAR, ResourceAgentState.BUSY
########################################################################
# Private helpers.
########################################################################
def _wakeup(self, wakeup_timeout=10, response_timeout=3):
"""
Over-written because waking this instrument up is a multi-step process with
two different requests required
@param wakeup_timeout The timeout to wake the device.
@param response_timeout The time to look for response to a wakeup attempt.
@throw InstrumentTimeoutException if the device could not be woken.
"""
sleep_time = .1
command = McLaneCommand.GO
# Grab start time for overall wakeup timeout.
starttime = time.time()
while True:
# Clear the prompt buffer.
log.debug("_wakeup: clearing promptbuf: %s", self._promptbuf)
self._promptbuf = ''
# Send a command and wait delay amount for response.
log.debug('_wakeup: Sending command %s, delay=%s', command.encode("hex"), response_timeout)
for char in command:
self._connection.send(char)
time.sleep(INTER_CHARACTER_DELAY)
sleep_amount = 0
while True:
time.sleep(sleep_time)
if self._promptbuf.find(Prompt.COMMAND_INPUT) != -1:
# instrument is awake
log.debug('_wakeup: got command input prompt %s', Prompt.COMMAND_INPUT)
# add inter-character delay which _do_cmd_resp() incorrectly doesn't add to
# the start of a transmission
time.sleep(INTER_CHARACTER_DELAY)
return Prompt.COMMAND_INPUT
if self._promptbuf.find(Prompt.ENTER_CTRL_C) != -1:
command = McLaneCommand.CONTROL_C
break
if self._promptbuf.find(Prompt.PERIOD) == 0:
command = McLaneCommand.CONTROL_C
break
sleep_amount += sleep_time
if sleep_amount >= response_timeout:
log.debug("_wakeup: expected response not received, buffer=%s", self._promptbuf)
break
if time.time() > starttime + wakeup_timeout:
raise InstrumentTimeoutException(
"_wakeup(): instrument failed to wakeup in %d seconds time" % wakeup_timeout)
def _build_command(self, cmd, *args):
return cmd + ' ' + ' '.join([str(x) for x in args]) + NEWLINE
def _build_driver_dict(self):
"""
Populate the driver dictionary with options
"""
self._driver_dict.add(DriverDictKey.VENDOR_SW_COMPATIBLE, False)
def _build_command_dict(self):
"""
Populate the command dictionary with command.
"""
self._cmd_dict.add(Capability.CLOCK_SYNC, display_name="Synchronize Clock")
self._cmd_dict.add(Capability.DISCOVER, display_name='Discover')
def _build_param_dict(self):
"""
Populate the parameter dictionary with XR-420 parameters.
For each parameter key add value formatting function for set commands.
"""
# The parameter dictionary.
self._param_dict = ProtocolParameterDict()
# Add parameter handlers to parameter dictionary for instrument configuration parameters.
self._param_dict.add(Parameter.FLUSH_VOLUME,
r'Flush Volume: (.*)mL',
None,
self._int_to_string,
type=ParameterDictType.INT,
# default_value=150,
default_value=10, # djm - fast test value
units='mL',
startup_param=True,
display_name="flush_volume",
visibility=ParameterDictVisibility.IMMUTABLE)
self._param_dict.add(Parameter.FLUSH_FLOWRATE,
r'Flush Flow Rate: (.*)mL/min',
None,
self._int_to_string,
type=ParameterDictType.INT,
default_value=100,
units='mL/min',
startup_param=True,
display_name="flush_flow_rate",
visibility=ParameterDictVisibility.IMMUTABLE)
self._param_dict.add(Parameter.FLUSH_MINFLOW,
r'Flush Min Flow: (.*)mL/min',
None,
self._int_to_string,
type=ParameterDictType.INT,
default_value=75,
units='mL/min',
startup_param=True,
display_name="flush_min_flow",
visibility=ParameterDictVisibility.IMMUTABLE)
self._param_dict.add(Parameter.FILL_VOLUME,
r'Fill Volume: (.*)mL',
None,
self._int_to_string,
type=ParameterDictType.INT,
# default_value=4000,
default_value=10, # djm - fast test value
units='mL',
startup_param=True,
display_name="fill_volume",
visibility=ParameterDictVisibility.IMMUTABLE)
self._param_dict.add(Parameter.FILL_FLOWRATE,
r'Fill Flow Rate: (.*)mL/min',
None,
self._int_to_string,
type=ParameterDictType.INT,
default_value=100,
units='mL/min',
startup_param=True,
display_name="fill_flow_rate",
visibility=ParameterDictVisibility.IMMUTABLE)
self._param_dict.add(Parameter.FILL_MINFLOW,
r'Fill Min Flow: (.*)mL/min',
None,
self._int_to_string,
type=ParameterDictType.INT,
default_value=75,
units='mL/min',
startup_param=True,
display_name="fill_min_flow",
visibility=ParameterDictVisibility.IMMUTABLE)
self._param_dict.add(Parameter.CLEAR_VOLUME,
r'Reverse Volume: (.*)mL',
None,
self._int_to_string,
type=ParameterDictType.INT,
# default_value=100,
default_value=10, # djm - fast test value
units='mL',
startup_param=True,
display_name="clear_volume",
visibility=ParameterDictVisibility.IMMUTABLE)
self._param_dict.add(Parameter.CLEAR_FLOWRATE,
r'Reverse Flow Rate: (.*)mL/min',
None,
self._int_to_string,
type=ParameterDictType.INT,
default_value=100,
units='mL/min',
startup_param=True,
display_name="clear_flow_rate",
visibility=ParameterDictVisibility.IMMUTABLE)
self._param_dict.add(Parameter.CLEAR_MINFLOW,
r'Reverse Min Flow: (.*)mL/min',
None,
self._int_to_string,
type=ParameterDictType.INT,
default_value=75,
units='mL/min',
startup_param=True,
display_name="clear_min_flow",
visibility=ParameterDictVisibility.IMMUTABLE)
def _update_params(self):
"""
Update the parameter dictionary.
"""
log.debug("_update_params:")
# def _parse_battery_response(self, response, prompt):
# """
# Parse handler for battery command.
# @param response command response string.
# @param prompt prompt following command response.
# @throws InstrumentProtocolException if battery command misunderstood.
# """
# log.debug("_parse_battery_response: response=%s, prompt=%s", response, prompt)
# if prompt == Prompt.UNRECOGNIZED_COMMAND:
# raise InstrumentProtocolException('battery command not recognized: %s.' % response)
#
# if not self._param_dict.update(response):
# raise InstrumentProtocolException('battery command not parsed: %s.' % response)
#
# return
#
# def _parse_clock_response(self, response, prompt):
# """
# Parse handler for clock command.
# @param response command response string.
# @param prompt prompt following command response.
# @throws InstrumentProtocolException if clock command misunderstood.
# @retval the joined string from the regular expression match
# """
# # extract current time from response
# log.debug('--- djm --- parse_clock_response: response: %r', response)
# ras_time_string = ' '.join(response.split()[:2])
# time_format = "%m/%d/%y %H:%M:%S"
# ras_time = time.strptime(ras_time_string, time_format)
# ras_time = list(ras_time)
# ras_time[-1] = 0 # tm_isdst field set to 0 - using GMT, no DST
#
# return tuple(ras_time)
#
# def _parse_port_response(self, response, prompt):
# """
# Parse handler for port command.
# @param response command response string.
# @param prompt prompt following command response.
# @throws InstrumentProtocolException if port command misunderstood.
# @retval the joined string from the regular expression match
# """
# # extract current port from response
# log.debug('--- djm --- parse_port_response: response: %r', response)
# port = int(response)
#
# return port
|
{
"content_hash": "ab3c58c59e4018cabc9014f1b1d12408",
"timestamp": "",
"source": "github",
"line_count": 1255,
"max_line_length": 120,
"avg_line_length": 40.23824701195219,
"alnum_prop": 0.5660508128873839,
"repo_name": "ronkyo/mi-instrument",
"id": "b92f8511a269221d68dc322b7a4b926f04668567",
"size": "50499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mi/instrument/mclane/driver.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "6398834"
}
],
"symlink_target": ""
}
|
import ast
import os
import re
try:
from distutils.util import get_platform
is_windows = get_platform().startswith("win")
except ImportError:
# Don't break install if distuils is incompatible in some way
# probably overly defensive.
is_windows = False
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
# Set environment variable to 1 to build as library for Galaxy instead
# of as stand-alone app.
DEFAULT_PULSAR_GALAXY_LIB = 0
PULSAR_GALAXY_LIB = os.environ.get("PULSAR_GALAXY_LIB", "%d" % DEFAULT_PULSAR_GALAXY_LIB) == "1"
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
if os.path.exists("requirements.txt"):
requirements = [r for r in open("requirements.txt").read().split("\n") if ";" not in r]
else:
# In tox, it will cover them anyway.
requirements = []
if PULSAR_GALAXY_LIB:
requirements = [r for r in requirements if not r.startswith("galaxy-")]
test_requirements = [
# TODO: put package test requirements here
]
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('pulsar/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
if is_windows:
scripts = ["scripts/pulsar.bat"]
else:
scripts = ["scripts/pulsar"]
name = "pulsar-app" if not PULSAR_GALAXY_LIB else "pulsar-galaxy-lib"
setup(
name=name,
version=version,
description='Distributed job execution application built for Galaxy (http://galaxyproject.org/).',
long_description=readme + '\n\n' + history,
long_description_content_type='text/x-rst',
author='Galaxy Project',
author_email='jmchilton@gmail.com',
url='https://github.com/galaxyproject/pulsar',
packages=[
'pulsar',
'pulsar.cache',
'pulsar.client',
'pulsar.client.test',
'pulsar.client.staging',
'pulsar.client.transport',
'pulsar.managers',
'pulsar.managers.base',
'pulsar.managers.staging',
'pulsar.managers.util',
'pulsar.managers.util.cli',
'pulsar.managers.util.cli.job',
'pulsar.managers.util.cli.shell',
'pulsar.managers.util.condor',
'pulsar.managers.util.drmaa',
'pulsar.managers.util.job_script',
'pulsar.mesos',
'pulsar.messaging',
'pulsar.scripts',
'pulsar.tools',
'pulsar.util',
'pulsar.util.pastescript',
'pulsar.web',
],
entry_points='''
[console_scripts]
pulsar-main=pulsar.main:main
pulsar-check=pulsar.client.test.check:main
pulsar-config=pulsar.scripts.config:main
pulsar-drmaa-launch=pulsar.scripts.drmaa_launch:main
pulsar-drmaa-kill=pulsar.scripts.drmaa_kill:main
pulsar-chown-working-directory=pulsar.scripts.chown_working_directory:main
pulsar-submit=pulsar.scripts.submit:main
pulsar-run=pulsar.scripts.run:main
_pulsar-conda-init=pulsar.scripts._conda_init:main
_pulsar-configure-slurm=pulsar.scripts._configure_slurm:main
_pulsar-configure-galaxy-cvmfs=pulsar.scripts._configure_galaxy_cvmfs:main
''',
scripts=scripts,
package_data={'pulsar': [
'managers/util/job_script/DEFAULT_JOB_FILE_TEMPLATE.sh',
'managers/util/job_script/CLUSTER_SLOTS_STATEMENT.sh',
'scripts/cvmfs_data/*',
]},
package_dir={'pulsar': 'pulsar'},
include_package_data=True,
install_requires=requirements,
extras_require={
'web': ['Paste', 'PasteScript'],
'galaxy_extended_metadata': ['galaxy-job-execution>=19.9.0.dev0', 'galaxy-util[template]'],
},
license="Apache License 2.0",
zip_safe=False,
keywords='pulsar',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
test_suite='test',
tests_require=test_requirements
)
|
{
"content_hash": "fedae804fe19af4cecf160d61b87b2fb",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 102,
"avg_line_length": 33.65151515151515,
"alnum_prop": 0.6391265195857722,
"repo_name": "natefoo/pulsar",
"id": "be1c332481bee3c1e3d3cb7ac4d67e80a1d9efee",
"size": "4442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "241"
},
{
"name": "Dockerfile",
"bytes": "5135"
},
{
"name": "Makefile",
"bytes": "5889"
},
{
"name": "Python",
"bytes": "659280"
},
{
"name": "Shell",
"bytes": "17011"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class SayingsConfig(AppConfig):
name = 'sayings'
|
{
"content_hash": "659d2959584a8737657e9131d23ef27f",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 17.8,
"alnum_prop": 0.7528089887640449,
"repo_name": "migdall/quickblog",
"id": "9b6439361a3e3f8dce64560b554f1408baa88808",
"size": "89",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sayings/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1463"
},
{
"name": "HTML",
"bytes": "7950"
},
{
"name": "JavaScript",
"bytes": "3611"
},
{
"name": "Python",
"bytes": "16775"
}
],
"symlink_target": ""
}
|
import io
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from implib2.imp_eeprom import EEPROM
class TestEEPROM:
def test_init_ReadsData(self):
data = io.StringIO(u'255\n255\n255')
with patch('implib2.imp_eeprom.open', create=True) as mock_open:
mock_open.return_value = data
eeprom = EEPROM('test.epr')
assert eeprom._data.getvalue() == b'\xff\xff\xff'
def test_init_ReadsDataWithHeader(self):
data = io.StringIO(u'; some = header\n255\n255\n255')
with patch('implib2.imp_eeprom.open', create=True) as mock_open:
mock_open.return_value = data
eeprom = EEPROM('test.epr')
assert eeprom._data.getvalue() == b'\xff\xff\xff'
assert eeprom.some == 'header'
def test_init_ReadsDataWithHeaderHasSpace(self):
data = io.StringIO(u'; some bla = header\n255\n255\n255')
with patch('implib2.imp_eeprom.open', create=True) as mock_open:
mock_open.return_value = data
eeprom = EEPROM('test.epr')
assert eeprom._data.getvalue() == b'\xff\xff\xff'
assert eeprom.some_bla == 'header'
def test_iterating_OnePage(self):
data = io.StringIO(u'255\n' * 250)
with patch('implib2.imp_eeprom.open', create=True) as mock_open:
mock_open.return_value = data
eeprom = EEPROM('test.epr')
for no, page in enumerate(eeprom):
assert len(page) == 250
assert page == b'\xff' * 250
assert no == 0
def test_iterating_TwoAndaHalfePage(self):
data = io.StringIO(u'255\n' * 625)
with patch('implib2.imp_eeprom.open', create=True) as mock_open:
mock_open.return_value = data
eeprom = EEPROM('test.epr')
for no, page in enumerate(eeprom):
assert no in [0, 1, 2]
if no in [0, 1]:
assert len(page) == 250
assert page == b'\xff' * 250
else:
assert len(page) == 125
assert page == b'\xff' * 125
|
{
"content_hash": "288251b081477be63352ce684c2d00cc",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 72,
"avg_line_length": 34.145161290322584,
"alnum_prop": 0.5762871988663203,
"repo_name": "mhubig/implib2",
"id": "b1b2e04f3afc22f5bb74f3dd7d185363eb818b61",
"size": "2142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_eeprom.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "5616"
},
{
"name": "Makefile",
"bytes": "1939"
},
{
"name": "Python",
"bytes": "128109"
},
{
"name": "Shell",
"bytes": "570"
}
],
"symlink_target": ""
}
|
def voter_guide_followers_retrieve_doc_template_values(url_root):
"""
Show documentation about voterGuideFollowersRetrieve
"""
required_query_parameter_list = [
{
'name': 'voter_device_id',
'value': 'string', # boolean, integer, long, string
'description': 'An 88 character unique identifier linked to a voter record on the server',
},
{
'name': 'api_key',
'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string
'description': 'The unique key provided to any organization using the WeVoteServer APIs',
},
]
optional_query_parameter_list = [
{
'name': 'maximum_number_to_retrieve',
'value': 'integer', # boolean, integer, long, string
'description': 'Defaults to 20 voter guides. Enter a value to set your own limit.',
},
]
potential_status_codes_list = [
{
'code': 'VALID_VOTER_DEVICE_ID_MISSING',
'description': 'Cannot proceed. A valid voter_device_id parameter was not included.',
},
{
'code': 'VALID_VOTER_ID_MISSING',
'description': 'Cannot proceed. A valid voter_id was not found.',
},
# {
# 'code': '',
# 'description': '',
# },
]
try_now_link_variables_dict = {
# 'organization_we_vote_id': 'wv85org1',
}
api_response = '{\n' \
' "status": string,\n' \
' "success": boolean,\n' \
' "voter_device_id": string (88 characters long),\n' \
' "voter_guides": list\n' \
' [\n' \
' "voter_guide_display_name": string (Name of this org or person),\n' \
' "voter_guide_owner_type": ORGANIZATION, PUBLIC_FIGURE, VOTER),\n' \
' "we_vote_id": string (We Vote ID of the voter guide),\n' \
' "organization_we_vote_id": string (We Vote ID for the org that owns the voter guide),\n' \
' "public_figure_we_vote_id": string (We Vote ID for the person that owns the voter guide),\n' \
' "voter_guide_image_url_large": string ' \
'(We Vote ID for the person that owns the voter guide),\n' \
' "voter_guide_image_url_medium": string ' \
'(We Vote ID for the person that owns the voter guide),\n' \
' "voter_guide_image_url_tiny": string ' \
'(We Vote ID for the person that owns the voter guide),\n' \
' "last_updated": string (time in this format %Y-%m-%d %H:%M:%S),\n' \
' "google_civic_election_id": integer,\n' \
' "twitter_description": string,\n' \
' "twitter_followers_count": integer,\n' \
' "twitter_handle": integer,\n' \
' "owner_voter_id": integer TO BE DEPRECATED,\n' \
' ],\n' \
' "google_civic_election_id": integer,\n' \
' "maximum_number_to_retrieve": integer,\n' \
'}\n'
template_values = {
'api_name': 'voterGuideFollowersRetrieve',
'api_slug': 'voterGuideFollowersRetrieve',
'api_introduction':
"",
'try_now_link': 'apis_v1:voterGuideFollowersRetrieveView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'required_query_parameter_list': required_query_parameter_list,
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
'api_response_notes':
"",
'potential_status_codes_list': potential_status_codes_list,
}
return template_values
|
{
"content_hash": "1cc3762808d162a02a2dc353bbf03065",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 119,
"avg_line_length": 46.40909090909091,
"alnum_prop": 0.5012242899118511,
"repo_name": "wevote/WeVoteServer",
"id": "ab38b38219ef21111e1f85bc84f383fa55f3a936",
"size": "4217",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "apis_v1/documentation_source/voter_guide_followers_retrieve_doc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3612"
},
{
"name": "HTML",
"bytes": "1559624"
},
{
"name": "JavaScript",
"bytes": "26822"
},
{
"name": "Procfile",
"bytes": "51"
},
{
"name": "Python",
"bytes": "11943600"
},
{
"name": "Shell",
"bytes": "587"
}
],
"symlink_target": ""
}
|
"""Sana mDS Django admin interface
:Authors: Sana dev team
:Version: 2.0
"""
from django.contrib import admin
from django.forms.models import modelformset_factory
from .models import *
class UuidHackInline(admin.StackedInline):
def __init__(self,*args):
super(admin.StackedInline,self).__init__(self,*args)
self.queryset = Observation.objects.filter(encounter=request)
#def formfield_for_foreignkey(self, db_field, request, **kwargs):
# if db_field.name == "uuid":
# #e = Enocunter.objects.get(pk=kwargs['object_id'])
# kwargs["id__exact"] = db_field
# return super(admin.StackedInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class DeviceAdmin(admin.ModelAdmin):
readonly_fields = ['uuid']
list_display = ['name', 'uuid']
list_filter = ['name',]
class ProcedureAdmin(admin.ModelAdmin):
readonly_fields = ['uuid']
list_display = ['title', 'author', 'uuid']
class RestAdmin(admin.TabularInline):
app_label="REST Services"
inlines = []
class RelationshipAdmin(admin.TabularInline):
model = Relationship
fk_name = 'to_concept'
list_display_links = []
class ConceptAdmin(admin.ModelAdmin):
inlines = [
RelationshipAdmin,
]
readonly_fields = ['uuid']
list_display = ['name', 'uuid']
list_filter = ['name',]
class ObservationAdmin(admin.ModelAdmin):
exclude = ('_complex_progress',)
readonly_fields = ['_complex_size','uuid','value']
list_display = ['question', 'concept','value', 'subject','device','modified', 'encounter']
list_filter = ['node','concept', 'modified', 'encounter']
class ObservationInline(UuidHackInline):
model = Observation
#formset = modelformset_factory(Observation)
#exclude = ('_complex_progress',)
#readonly_fields = ['_complex_size','uuid','value']
class EncounterAdmin(admin.ModelAdmin):
readonly_fields = ['uuid','device','procedure','subject','observer',]
exclude = ['concept',]
#inlines = [ ObservationInline,]
list_display = ['subject', 'procedure', 'modified','uuid',"observer",]
class EncounterInline(admin.StackedInline):
model = Encounter
class ObserverAdmin(admin.ModelAdmin):
readonly_fields = ['uuid',]
list_display = ['user', 'uuid']
inlines = [ EncounterInline, ]
class SubjectAdmin(admin.ModelAdmin):
readonly_fields = ['uuid',]
list_display = ['system_id','given_name', 'family_name', 'uuid', "image"]
class SubjectInline(admin.StackedInline):
model = Subject
class SurgicalSubjectAdmin(admin.ModelAdmin):
readonly_fields = ['uuid',]
list_display = ['system_id','given_name', 'family_name', 'uuid', "image"]
class LocationAdmin(admin.ModelAdmin):
readonly_fields = ['uuid',]
model = Location
list_display = ('code','name',)
#list_filter = ('name',)
admin.site.register(Concept, ConceptAdmin)
admin.site.register(Relationship)
admin.site.register(RelationshipCategory)
admin.site.register(Device, DeviceAdmin)
admin.site.register(Encounter, EncounterAdmin)
admin.site.register(Observation,ObservationAdmin)
admin.site.register(Location,LocationAdmin)
admin.site.register(Notification)
admin.site.register(Observer,ObserverAdmin)
admin.site.register(Procedure,ProcedureAdmin)
admin.site.register(Subject,SubjectAdmin)
admin.site.register(SurgicalSubject,SurgicalSubjectAdmin)
admin.site.register(Event)
admin.site.register(Surgeon)
admin.site.register(SurgicalAdvocate)
#admin.site.register(ClientEventLog, ClientEventLogAdmin)
|
{
"content_hash": "16dfa7adcd7d59c2d4875986a200b902",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 102,
"avg_line_length": 31.723214285714285,
"alnum_prop": 0.6963129749507458,
"repo_name": "dekatzenel/team-k",
"id": "1c9a93eb313a8a3953017bcf620d818de9c5d22c",
"size": "3553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mds/core/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import os
import plistlib
import sys
#==== This should be modified to match your environment ====
REPO_ROOT = '/Volumes/its_munki_repo'
#-----------------------------------------------------------
manifest_path = os.path.join(REPO_ROOT, 'manifests')
script_path = os.path.dirname(os.path.realpath(sys.argv[0]))
def includesTitle(manifest, software, section):
''' given software title and manifest dict, check to see if
title is referenced. Returns true or false
'''
isReferenced = False
try:
with open(os.path.join(manifest_path, manifest), 'rb') as fp:
manifest_plist = plistlib.load(fp)
for title in manifest_plist.get(section):
if title == software:
isReferenced = True
except:
print("** Unable to load : " + manifest)
return isReferenced
# Before we get too far, lets check to make we can reach the manifest path
if not os.path.exists(manifest_path):
print('\n*** ERROR : Manifests not found at ',str(manifest_path),' ***')
print(' Check to make sure the munki repo is mounted before running script')
quit()
# Walk the manifest path and build a list of manifests
manifest_list = []
for root, dirs, files in os.walk(manifest_path):
for file in files:
if str(file)[0] != '.': # Ignore any dot files
manifest_list.append(file)
# Get manifest from script argument and check for it's inclusions
if len(sys.argv) == 3:
softwareTitle = (sys.argv[1])
manifestSection = (sys.argv[2])
else:
print("** Error - please provide title and manifest section ")
exit(1)
for m in manifest_list:
if includesTitle(m,softwareTitle, manifestSection):
print(m + " has " + softwareTitle + " in " + manifestSection)
|
{
"content_hash": "882f599748ce4b5f86fbad3e8ab15086",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 83,
"avg_line_length": 34.48076923076923,
"alnum_prop": 0.6257668711656442,
"repo_name": "vmiller/Munki-Helper-Scripts",
"id": "412c345be8c3619223813893856bfe468e06d55c",
"size": "1905",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "searchmanifest.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5938"
}
],
"symlink_target": ""
}
|
import sys, codecs
import unittest, re
from httplib import HTTPException
from test import test_support
from StringIO import StringIO
class TestBase:
encoding = '' # codec name
codec = None # codec tuple (with 4 elements)
tstring = '' # string to test StreamReader
codectests = None # must set. codec test tuple
roundtriptest = 1 # set if roundtrip is possible with unicode
has_iso10646 = 0 # set if this encoding contains whole iso10646 map
xmlcharnametest = None # string to test xmlcharrefreplace
unmappedunicode = u'\udeee' # a unicode codepoint that is not mapped.
def setUp(self):
if self.codec is None:
self.codec = codecs.lookup(self.encoding)
self.encode = self.codec.encode
self.decode = self.codec.decode
self.reader = self.codec.streamreader
self.writer = self.codec.streamwriter
self.incrementalencoder = self.codec.incrementalencoder
self.incrementaldecoder = self.codec.incrementaldecoder
def test_chunkcoding(self):
for native, utf8 in zip(*[StringIO(f).readlines()
for f in self.tstring]):
u = self.decode(native)[0]
self.assertEqual(u, utf8.decode('utf-8'))
if self.roundtriptest:
self.assertEqual(native, self.encode(u)[0])
def test_errorhandle(self):
for source, scheme, expected in self.codectests:
if type(source) == type(''):
func = self.decode
else:
func = self.encode
if expected:
result = func(source, scheme)[0]
self.assertEqual(result, expected)
else:
self.assertRaises(UnicodeError, func, source, scheme)
def test_xmlcharrefreplace(self):
if self.has_iso10646:
return
s = u"\u0b13\u0b23\u0b60 nd eggs"
self.assertEqual(
self.encode(s, "xmlcharrefreplace")[0],
"ଓଣୠ nd eggs"
)
def test_customreplace_encode(self):
if self.has_iso10646:
return
from htmlentitydefs import codepoint2name
def xmlcharnamereplace(exc):
if not isinstance(exc, UnicodeEncodeError):
raise TypeError("don't know how to handle %r" % exc)
l = []
for c in exc.object[exc.start:exc.end]:
if ord(c) in codepoint2name:
l.append(u"&%s;" % codepoint2name[ord(c)])
else:
l.append(u"&#%d;" % ord(c))
return (u"".join(l), exc.end)
codecs.register_error("test.xmlcharnamereplace", xmlcharnamereplace)
if self.xmlcharnametest:
sin, sout = self.xmlcharnametest
else:
sin = u"\xab\u211c\xbb = \u2329\u1234\u232a"
sout = "«ℜ» = ⟨ሴ⟩"
self.assertEqual(self.encode(sin,
"test.xmlcharnamereplace")[0], sout)
def test_callback_wrong_objects(self):
def myreplace(exc):
return (ret, exc.end)
codecs.register_error("test.cjktest", myreplace)
for ret in ([1, 2, 3], [], None, object(), 'string', ''):
self.assertRaises(TypeError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_callback_long_index(self):
def myreplace(exc):
return (u'x', long(exc.end))
codecs.register_error("test.cjktest", myreplace)
self.assertEqual(self.encode(u'abcd' + self.unmappedunicode + u'efgh',
'test.cjktest'), ('abcdxefgh', 9))
def myreplace(exc):
return (u'x', sys.maxint + 1)
codecs.register_error("test.cjktest", myreplace)
self.assertRaises(IndexError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_callback_None_index(self):
def myreplace(exc):
return (u'x', None)
codecs.register_error("test.cjktest", myreplace)
self.assertRaises(TypeError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_callback_backward_index(self):
def myreplace(exc):
if myreplace.limit > 0:
myreplace.limit -= 1
return (u'REPLACED', 0)
else:
return (u'TERMINAL', exc.end)
myreplace.limit = 3
codecs.register_error("test.cjktest", myreplace)
self.assertEqual(self.encode(u'abcd' + self.unmappedunicode + u'efgh',
'test.cjktest'),
('abcdREPLACEDabcdREPLACEDabcdREPLACEDabcdTERMINALefgh', 9))
def test_callback_forward_index(self):
def myreplace(exc):
return (u'REPLACED', exc.end + 2)
codecs.register_error("test.cjktest", myreplace)
self.assertEqual(self.encode(u'abcd' + self.unmappedunicode + u'efgh',
'test.cjktest'), ('abcdREPLACEDgh', 9))
def test_callback_index_outofbound(self):
def myreplace(exc):
return (u'TERM', 100)
codecs.register_error("test.cjktest", myreplace)
self.assertRaises(IndexError, self.encode, self.unmappedunicode,
'test.cjktest')
def test_incrementalencoder(self):
UTF8Reader = codecs.getreader('utf-8')
for sizehint in [None] + range(1, 33) + \
[64, 128, 256, 512, 1024]:
istream = UTF8Reader(StringIO(self.tstring[1]))
ostream = StringIO()
encoder = self.incrementalencoder()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
e = encoder.encode(data)
ostream.write(e)
self.assertEqual(ostream.getvalue(), self.tstring[0])
def test_incrementaldecoder(self):
UTF8Writer = codecs.getwriter('utf-8')
for sizehint in [None, -1] + range(1, 33) + \
[64, 128, 256, 512, 1024]:
istream = StringIO(self.tstring[0])
ostream = UTF8Writer(StringIO())
decoder = self.incrementaldecoder()
while 1:
data = istream.read(sizehint)
if not data:
break
else:
u = decoder.decode(data)
ostream.write(u)
self.assertEqual(ostream.getvalue(), self.tstring[1])
def test_incrementalencoder_error_callback(self):
inv = self.unmappedunicode
e = self.incrementalencoder()
self.assertRaises(UnicodeEncodeError, e.encode, inv, True)
e.errors = 'ignore'
self.assertEqual(e.encode(inv, True), '')
e.reset()
def tempreplace(exc):
return (u'called', exc.end)
codecs.register_error('test.incremental_error_callback', tempreplace)
e.errors = 'test.incremental_error_callback'
self.assertEqual(e.encode(inv, True), 'called')
# again
e.errors = 'ignore'
self.assertEqual(e.encode(inv, True), '')
def test_streamreader(self):
UTF8Writer = codecs.getwriter('utf-8')
for name in ["read", "readline", "readlines"]:
for sizehint in [None, -1] + range(1, 33) + \
[64, 128, 256, 512, 1024]:
istream = self.reader(StringIO(self.tstring[0]))
ostream = UTF8Writer(StringIO())
func = getattr(istream, name)
while 1:
data = func(sizehint)
if not data:
break
if name == "readlines":
ostream.writelines(data)
else:
ostream.write(data)
self.assertEqual(ostream.getvalue(), self.tstring[1])
def test_streamwriter(self):
readfuncs = ('read', 'readline', 'readlines')
UTF8Reader = codecs.getreader('utf-8')
for name in readfuncs:
for sizehint in [None] + range(1, 33) + \
[64, 128, 256, 512, 1024]:
istream = UTF8Reader(StringIO(self.tstring[1]))
ostream = self.writer(StringIO())
func = getattr(istream, name)
while 1:
if sizehint is not None:
data = func(sizehint)
else:
data = func()
if not data:
break
if name == "readlines":
ostream.writelines(data)
else:
ostream.write(data)
self.assertEqual(ostream.getvalue(), self.tstring[0])
class TestBase_Mapping(unittest.TestCase):
pass_enctest = []
pass_dectest = []
supmaps = []
def __init__(self, *args, **kw):
unittest.TestCase.__init__(self, *args, **kw)
try:
self.open_mapping_file().close() # test it to report the error early
except (IOError, HTTPException):
self.skipTest("Could not retrieve "+self.mapfileurl)
def open_mapping_file(self):
return test_support.open_urlresource(self.mapfileurl)
def test_mapping_file(self):
if self.mapfileurl.endswith('.xml'):
self._test_mapping_file_ucm()
else:
self._test_mapping_file_plain()
def _test_mapping_file_plain(self):
_unichr = lambda c: eval("u'\\U%08x'" % int(c, 16))
unichrs = lambda s: u''.join(_unichr(c) for c in s.split('+'))
urt_wa = {}
with self.open_mapping_file() as f:
for line in f:
if not line:
break
data = line.split('#')[0].strip().split()
if len(data) != 2:
continue
csetval = eval(data[0])
if csetval <= 0x7F:
csetch = chr(csetval & 0xff)
elif csetval >= 0x1000000:
csetch = chr(csetval >> 24) + chr((csetval >> 16) & 0xff) + \
chr((csetval >> 8) & 0xff) + chr(csetval & 0xff)
elif csetval >= 0x10000:
csetch = chr(csetval >> 16) + \
chr((csetval >> 8) & 0xff) + chr(csetval & 0xff)
elif csetval >= 0x100:
csetch = chr(csetval >> 8) + chr(csetval & 0xff)
else:
continue
unich = unichrs(data[1])
if unich == u'\ufffd' or unich in urt_wa:
continue
urt_wa[unich] = csetch
self._testpoint(csetch, unich)
def _test_mapping_file_ucm(self):
with self.open_mapping_file() as f:
ucmdata = f.read()
uc = re.findall('<a u="([A-F0-9]{4})" b="([0-9A-F ]+)"/>', ucmdata)
for uni, coded in uc:
unich = unichr(int(uni, 16))
codech = ''.join(chr(int(c, 16)) for c in coded.split())
self._testpoint(codech, unich)
def test_mapping_supplemental(self):
for mapping in self.supmaps:
self._testpoint(*mapping)
def _testpoint(self, csetch, unich):
if (csetch, unich) not in self.pass_enctest:
try:
self.assertEqual(unich.encode(self.encoding), csetch)
except UnicodeError, exc:
self.fail('Encoding failed while testing %s -> %s: %s' % (
repr(unich), repr(csetch), exc.reason))
if (csetch, unich) not in self.pass_dectest:
try:
self.assertEqual(csetch.decode(self.encoding), unich)
except UnicodeError, exc:
self.fail('Decoding failed while testing %s -> %s: %s' % (
repr(csetch), repr(unich), exc.reason))
def load_teststring(encoding):
from test import cjkencodings_test
return cjkencodings_test.teststring[encoding]
|
{
"content_hash": "39cbbb88fa8dc11281b9f7eed3d8f924",
"timestamp": "",
"source": "github",
"line_count": 325,
"max_line_length": 81,
"avg_line_length": 38.08,
"alnum_prop": 0.5245636716224952,
"repo_name": "xxd3vin/spp-sdk",
"id": "0639032a2dd475efb7522e89a3633e81173e40a5",
"size": "12480",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "opt/Python27/Lib/test/test_multibytecodec_support.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "759663"
},
{
"name": "C#",
"bytes": "9892"
},
{
"name": "C++",
"bytes": "56155"
},
{
"name": "CSS",
"bytes": "6226"
},
{
"name": "F#",
"bytes": "3065"
},
{
"name": "FORTRAN",
"bytes": "7795"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "JavaScript",
"bytes": "163687"
},
{
"name": "Makefile",
"bytes": "895"
},
{
"name": "Pascal",
"bytes": "8738"
},
{
"name": "Python",
"bytes": "22177886"
},
{
"name": "Shell",
"bytes": "15704"
},
{
"name": "Tcl",
"bytes": "2065501"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
from django.test import TestCase, Client
from bs4 import BeautifulSoup
from django.core import mail
from django.contrib.auth.models import User
import factory
class UserFactory(factory.Factory):
class Meta:
model = User
username = 'testsubject1'
first_name = 'Chell'
last_name = '[REDACTED]'
email = 'testsubject1@mailinator.com'
class LoginLogoutTestCase(TestCase):
def setUp(self):
user = UserFactory()
user.set_password('longfallboots')
user.save()
def test_login(self):
client = Client()
response = client.get('/')
soup = BeautifulSoup(response.content, 'html5lib')
self.assertTrue(soup.find('a', text='Login'))
self.assertFalse(soup.find('div', id='logged_in_user'))
self.assertFalse(soup.find('a', text='Logout'))
response = client.post(
'/accounts/login/',
{'username': 'testsubject1', 'password': 'longfallboots'},
follow=True
)
self.assertEqual(response.status_code, 200)
user = User.objects.get(username='testsubject1')
soup = BeautifulSoup(response.content, 'html5lib')
self.assertIn(
user.username,
soup.find('div', id='logged_in_user').text
)
self.assertFalse(soup.find('div', text='Logout'))
def test_logout(self):
client = Client()
client.login(username='testsubject1', password='longfallboots')
response = client.get('/accounts/logout/')
self.assertEqual(response.status_code, 200)
soup = BeautifulSoup(response.content, 'html5lib')
self.assertTrue(soup.find('a', text='Login'))
self.assertFalse(soup.find('a', text='Logout'))
class RegistrationTestCase(TestCase):
def setUp(self):
user = UserFactory()
user.set_password('longfallboots')
user.save()
def test_new_registration(self):
client = Client()
username = 'companion_cube'
email = 'do_not_incinerate@mailinator.com'
response = client.post(
'/accounts/register/',
{
'username': username,
'email': email,
'password1': 'iamsentient',
'password2': 'iamsentient',
},
follow=True,
)
user = User.objects.get(username=username)
self.assertFalse(user.is_active)
# check redirect page works
soup = BeautifulSoup(response.content, 'html5lib')
self.assertTrue(soup.find(
text="Please check your email to "
"complete the registration process."
))
# check mail.outbox
domain = 'example.com'
link_lead = domain + '/accounts/activate/'
self.assertEqual(len(mail.outbox), 1)
self.assertIn(domain, mail.outbox[0].body)
self.assertIn(link_lead, mail.outbox[0].body)
self.assertIn(email, mail.outbox[0].recipients())
def test_existing_registration(self):
client = Client()
user = User.objects.get(username='testsubject1')
response = client.post(
'/accounts/register/',
{
'username': user.username,
'email': user.email,
'password1': 'longfallboots',
'password2': 'longfallboots',
},
follow=True,
)
self.assertIn(
'A user with that username already exists',
response.content
)
|
{
"content_hash": "f1b99c52425f69c7a9afc799fe625f15",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 71,
"avg_line_length": 31.83783783783784,
"alnum_prop": 0.5789473684210527,
"repo_name": "gatita/django-imager",
"id": "7ab587967ae7bf7ea62f1a49bed7bd8bb3c7a907",
"size": "3534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imagersite/imagersite/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10537"
},
{
"name": "HTML",
"bytes": "27190"
},
{
"name": "JavaScript",
"bytes": "54533"
},
{
"name": "Python",
"bytes": "58895"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2016-2020 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# Hervé BREDIN - http://herve.niderb.fr
import yaml
from pathlib import Path
import warnings
import pandas as pd
from pyannote.core import Segment, Timeline, Annotation
from .protocol.protocol import ProtocolFile
from .config import get_database_yml
from typing import Text
from typing import Union
from typing import Dict
from typing import List
DatabaseName = Text
PathTemplate = Text
class PyannoteDatabaseException(Exception):
pass
class FileFinder:
"""Database file finder
Parameters
----------
database_yml : str, optional
Path to database configuration file in YAML format (see below).
When not provided, pyannote.database will first use file 'database.yml'
in current working directory if it exists. If it does not exist, it will
use the path provided by the PYANNOTE_DATABASE_CONFIG environment
variable. If empty or not set, defaults to '~/.pyannote/database.yml'.
Configuration file
------------------
Here are a few examples of what is expected in the configuration file.
# support for multiple databases
database1: /path/to/database1/{uri}.wav
database2: /path/to/database2/{uri}.wav
# files are spread over multiple directory
database3:
- /path/to/database3/1/{uri}.wav
- /path/to/database3/2/{uri}.wav
# supports * (and **) globbing
database4: /path/to/database4/*/{uri}.wav
See also
--------
pathlib.Path.glob
"""
def __init__(self, database_yml: Text = None):
super().__init__()
self.database_yml = get_database_yml(database_yml=database_yml)
with open(self.database_yml, "r") as fp:
config = yaml.load(fp, Loader=yaml.SafeLoader)
self.config_: Dict[DatabaseName, Union[PathTemplate, List[PathTemplate]]] = {
str(database): path
for database, path in config.get("Databases", dict()).items()
}
def __call__(self, current_file: ProtocolFile) -> Path:
"""Look for current file
Parameter
---------
current_file : ProtocolFile
Protocol file.
Returns
-------
path : Path
Path to file.
Raises
------
FileNotFoundError when the file could not be found or when more than one
matching file were found.
"""
uri = current_file["uri"]
database = current_file["database"]
# read
path_templates = self.config_[database]
if isinstance(path_templates, Text):
path_templates = [path_templates]
searched = []
found = []
for path_template in path_templates:
path = Path(path_template.format(uri=uri, database=database))
if not path.is_absolute():
path = self.database_yml.parent / path
searched.append(path)
# paths with "*" or "**" patterns are split into two parts,
# - the root part (from the root up to the first occurrence of *)
# - the pattern part (from the first occurrence of * to the end)
# which is looked for (inside root) using Path.glob
# Example with path = '/path/to/**/*/file.wav'
# root = '/path/to'
# pattern = '**/*/file.wav'
if "*" in str(path):
parts = path.parent.parts
for p, part in enumerate(parts):
if "*" in part:
break
root = path.parents[len(parts) - p]
pattern = str(path.relative_to(root))
found_ = root.glob(pattern)
found.extend(found_)
# a path without "*" patterns is supposed to be an actual file
elif path.is_file():
found.append(path)
if len(found) == 1:
return found[0]
if len(found) == 0:
msg = f'Could not find file "{uri}" in the following location(s):'
for path in searched:
msg += f"\n - {path}"
raise FileNotFoundError(msg)
if len(found) > 1:
msg = (
f'Looked for file "{uri}" and found more than one '
f"({len(found)}) matching locations: "
)
for path in found:
msg += f"\n - {path}"
raise FileNotFoundError(msg)
def get_unique_identifier(item):
"""Return unique item identifier
The complete format is {database}/{uri}_{channel}:
* prefixed by "{database}/" only when `item` has a 'database' key.
* suffixed by "_{channel}" only when `item` has a 'channel' key.
Parameters
----------
item : dict
Item as yielded by pyannote.database protocols
Returns
-------
identifier : str
Unique item identifier
"""
IDENTIFIER = ""
# {database}/{uri}_{channel}
database = item.get("database", None)
if database is not None:
IDENTIFIER += f"{database}/"
IDENTIFIER += item['uri']
channel = item.get("channel", None)
if channel is not None:
IDENTIFIER += f"_{channel:d}"
return IDENTIFIER
# This function is used in custom.py
def get_annotated(current_file):
"""Get part of the file that is annotated.
Parameters
----------
current_file : `dict`
File generated by a `pyannote.database` protocol.
Returns
-------
annotated : `pyannote.core.Timeline`
Part of the file that is annotated. Defaults to
`current_file["annotated"]`. When it does not exist, try to use the
full audio extent. When that fails, use "annotation" extent.
"""
# if protocol provides 'annotated' key, use it
if "annotated" in current_file:
annotated = current_file["annotated"]
return annotated
# if it does not, but does provide 'audio' key
# try and use wav duration
if "duration" in current_file:
try:
duration = current_file["duration"]
except ImportError:
pass
else:
annotated = Timeline([Segment(0, duration)])
msg = '"annotated" was approximated by [0, audio duration].'
warnings.warn(msg)
return annotated
extent = current_file["annotation"].get_timeline().extent()
annotated = Timeline([extent])
msg = (
'"annotated" was approximated by "annotation" extent. '
'Please provide "annotated" directly, or at the very '
'least, use a "duration" preprocessor.'
)
warnings.warn(msg)
return annotated
def get_label_identifier(label, current_file):
"""Return unique label identifier
Parameters
----------
label : str
Database-internal label
current_file
Yielded by pyannote.database protocols
Returns
-------
unique_label : str
Global label
"""
# TODO. when the "true" name of a person is used,
# do not preprend database name.
database = current_file["database"]
return database + "|" + label
def load_rttm(file_rttm, keep_type="SPEAKER"):
"""Load RTTM file
Parameter
---------
file_rttm : `str`
Path to RTTM file.
keep_type : str, optional
Only keep lines with this type (field #1 in RTTM specs).
Defaults to "SPEAKER".
Returns
-------
annotations : `dict`
Speaker diarization as a {uri: pyannote.core.Annotation} dictionary.
"""
names = [
"type",
"uri",
"NA2",
"start",
"duration",
"NA3",
"NA4",
"speaker",
"NA5",
"NA6",
]
dtype = {"uri": str, "start": float, "duration": float, "speaker": str}
data = pd.read_csv(
file_rttm,
names=names,
dtype=dtype,
delim_whitespace=True,
keep_default_na=True,
)
annotations = dict()
for uri, turns in data.groupby("uri"):
annotation = Annotation(uri=uri)
for i, turn in turns.iterrows():
if turn.type != keep_type:
continue
segment = Segment(turn.start, turn.start + turn.duration)
annotation[segment, i] = turn.speaker
annotations[uri] = annotation
return annotations
def load_stm(file_stm):
"""Load STM file (speaker-info only)
Parameter
---------
file_stm : str
Path to STM file
Returns
-------
annotations : `dict`
Speaker diarization as a {uri: pyannote.core.Annotation} dictionary.
"""
dtype = {"uri": str, "speaker": str, "start": float, "end": float}
data = pd.read_csv(
file_stm,
delim_whitespace=True,
usecols=[0, 2, 3, 4],
dtype=dtype,
names=list(dtype))
annotations = dict()
for uri, turns in data.groupby("uri"):
annotation = Annotation(uri=uri)
for i, turn in turns.iterrows():
segment = Segment(turn.start, turn.end)
annotation[segment, i] = turn.speaker
annotations[uri] = annotation
return annotations
def load_mdtm(file_mdtm):
"""Load MDTM file
Parameter
---------
file_mdtm : `str`
Path to MDTM file.
Returns
-------
annotations : `dict`
Speaker diarization as a {uri: pyannote.core.Annotation} dictionary.
"""
names = ["uri", "NA1", "start", "duration", "NA2", "NA3", "NA4", "speaker"]
dtype = {"uri": str, "start": float, "duration": float, "speaker": str}
data = pd.read_csv(
file_mdtm,
names=names,
dtype=dtype,
delim_whitespace=True,
keep_default_na=False,
)
annotations = dict()
for uri, turns in data.groupby("uri"):
annotation = Annotation(uri=uri)
for i, turn in turns.iterrows():
segment = Segment(turn.start, turn.start + turn.duration)
annotation[segment, i] = turn.speaker
annotations[uri] = annotation
return annotations
def load_uem(file_uem):
"""Load UEM file
Parameter
---------
file_uem : `str`
Path to UEM file.
Returns
-------
timelines : `dict`
Evaluation map as a {uri: pyannote.core.Timeline} dictionary.
"""
names = ["uri", "NA1", "start", "end"]
dtype = {"uri": str, "start": float, "end": float}
data = pd.read_csv(file_uem, names=names, dtype=dtype, delim_whitespace=True)
timelines = dict()
for uri, parts in data.groupby("uri"):
segments = [Segment(part.start, part.end) for i, part in parts.iterrows()]
timelines[uri] = Timeline(segments=segments, uri=uri)
return timelines
def load_lab(path, uri: str = None) -> Annotation:
"""Load LAB file
Parameter
---------
file_lab : `str`
Path to LAB file
Returns
-------
data : `pyannote.core.Annotation`
"""
names = ["start", "end", "label"]
dtype = {"start": float, "end": float, "label": str}
data = pd.read_csv(path, names=names, dtype=dtype, delim_whitespace=True)
annotation = Annotation(uri=uri)
for i, turn in data.iterrows():
segment = Segment(turn.start, turn.end)
annotation[segment, i] = turn.label
return annotation
def load_lst(file_lst):
"""Load LST file
LST files provide a list of URIs (one line per URI)
Parameter
---------
file_lst : `str`
Path to LST file.
Returns
-------
uris : `list`
List or uris
"""
with open(file_lst, mode="r") as fp:
lines = fp.readlines()
return [line.strip() for line in lines]
def load_mapping(mapping_txt):
"""Load mapping file
Parameter
---------
mapping_txt : `str`
Path to mapping file
Returns
-------
mapping : `dict`
{1st field: 2nd field} dictionary
"""
with open(mapping_txt, mode="r") as fp:
lines = fp.readlines()
mapping = dict()
for line in lines:
key, value, *left = line.strip().split()
mapping[key] = value
return mapping
class LabelMapper(object):
"""Label mapper for use as pyannote.database preprocessor
Parameters
----------
mapping : `dict`
Mapping dictionary as used in `Annotation.rename_labels()`.
keep_missing : `bool`, optional
In case a label has no mapping, a `ValueError` will be raised.
Set "keep_missing" to True to keep those labels unchanged instead.
Usage
-----
>>> mapping = {'Hadrien': 'MAL', 'Marvin': 'MAL',
... 'Wassim': 'CHI', 'Herve': 'GOD'}
>>> preprocessors = {'annotation': LabelMapper(mapping=mapping)}
>>> protocol = get_protocol('AMI.SpeakerDiarization.MixHeadset',
preprocessors=preprocessors)
"""
def __init__(self, mapping, keep_missing=False):
self.mapping = mapping
self.keep_missing = keep_missing
def __call__(self, current_file):
if not self.keep_missing:
missing = set(current_file["annotation"].labels()) - set(self.mapping)
if missing and not self.keep_missing:
label = missing.pop()
msg = (
f'No mapping found for label "{label}". Set "keep_missing" '
f"to True to keep labels with no mapping."
)
raise ValueError(msg)
return current_file["annotation"].rename_labels(mapping=self.mapping)
|
{
"content_hash": "612173ab3fff78dfdb2278cb7e428076",
"timestamp": "",
"source": "github",
"line_count": 531,
"max_line_length": 85,
"avg_line_length": 27.657250470809792,
"alnum_prop": 0.5843660629170638,
"repo_name": "pyannote/pyannote-database",
"id": "8a7de9cc1884ff51e36d4c6e8fa1128fa7e38947",
"size": "14687",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pyannote/database/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "201733"
}
],
"symlink_target": ""
}
|
import os
from subprocess import Popen, PIPE
import networkx as nx
import matplotlib.pyplot as plt
def solve_it(input_data):
# Writes the inputData to a temporay file
tmp_file_name = 'tmp.data'
tmp_file = open(tmp_file_name, 'w')
tmp_file.write(input_data)
tmp_file.close()
# Runs the command: java Solver -file=tmp.data
tmp_file = open(tmp_file_name, 'r')
process = Popen(['./Solver'], stdin=tmp_file, stdout=PIPE)
(stdout, stderr) = process.communicate()
# print stdout
# removes the temporay file
os.remove(tmp_file_name)
# G = nx.Graph()
# lines = input_data.split('\n')
# [N,E] = lines[0].split()
# G.add_nodes_from(range(int(N)))
# for l in lines[1:-1]:
# a = l.split(' ')
# G.add_edge(int(a[0]),int(a[1]))
# nx.draw(G)
# plt.show()
return stdout.strip()
import sys
if __name__ == '__main__':
if len(sys.argv) > 1:
file_location = sys.argv[1].strip()
input_data_file = open(file_location, 'r')
input_data = ''.join(input_data_file.readlines())
input_data_file.close()
print solve_it(input_data)
else:
print 'This test requires an input file. Please select one from the data directory. (i.e. python solver.py ./data/gc_4_1)'
|
{
"content_hash": "8703f009c10bc36846fc160b78e035a6",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 131,
"avg_line_length": 28.08695652173913,
"alnum_prop": 0.6006191950464397,
"repo_name": "srmanikandasriram/discrete-optimization-coursera",
"id": "20cea6a26b696bfae30d5cb3ff8ae0e172d554fa",
"size": "1335",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "coloring/solver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "9117"
},
{
"name": "Java",
"bytes": "2525"
},
{
"name": "Python",
"bytes": "40023"
}
],
"symlink_target": ""
}
|
__author__ = 'vden'
from model.group import Group
from model.contact import Contact
import random
def test_add_contact_in_group(app, orm):
if len(orm.get_group_list()) == 0:
app.group.create(Group(name="test"))
groups = orm.get_group_list()
group = random.choice(groups)
index = 0
for m in groups:
if m.id == group.id:
return index
index = index+1
if len(orm.get_contacts_not_in_group(group)) == 0:
app.contact.create(Contact(firstname="test"))
contacts = orm.get_contacts_not_in_group(group)
contact = random.choice(contacts)
old_contacts = orm.get_contacts_in_group(group)
app.contact.add_contact_in_group(index+1, contact)
new_contacts = orm.get_contacts_in_group(group)
old_contacts.append(contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
|
{
"content_hash": "a65644ebd619ba7d12abd58bc511a3b3",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 101,
"avg_line_length": 33.370370370370374,
"alnum_prop": 0.6625971143174251,
"repo_name": "vdenPython/python_training",
"id": "4f0ac71d039d9041ab483260b4f8560433564876",
"size": "901",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/teat_add_contact_in_group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "35693"
}
],
"symlink_target": ""
}
|
"""
Pagination fields
"""
# pylint: disable=no-init, too-few-public-methods, no-self-use
from collections import OrderedDict
from rest_framework import serializers
from rest_framework import pagination
from rest_framework.views import Response
from rest_framework.templatetags.rest_framework import replace_query_param
# DRF 2.4.X compatibility.
ReadOnlyField = getattr(serializers, 'ReadOnlyField', serializers.Field)
class NextPageLinkField(ReadOnlyField):
"""
Field that returns a link to the next page in paginated results.
"""
page_field = 'page'
def to_representation(self, value):
if not value.has_next():
return None
page = value.next_page_number()
request = self.context.get('request')
url = request and request.build_absolute_uri() or ''
return replace_query_param(url, self.page_field, page)
class NextPageField(ReadOnlyField):
"""
Field that returns a the next page number in paginated results.
"""
page_field = 'page'
def to_representation(self, value):
if not value.has_next():
return None
return value.next_page_number()
class PreviousPageLinkField(ReadOnlyField):
"""
Field that returns a link to the previous page in paginated results.
"""
page_field = 'page'
def to_representation(self, value):
if not value.has_previous():
return None
page = value.previous_page_number()
request = self.context.get('request')
url = request and request.build_absolute_uri() or ''
return replace_query_param(url, self.page_field, page)
class PreviousPageField(ReadOnlyField):
"""
Field that returns the previous page number in paginated results.
"""
page_field = 'page'
def to_representation(self, value):
if not value.has_previous():
return None
return value.previous_page_number()
class PageField(ReadOnlyField):
"""
Field that returns the current page number in paginated results.
"""
page_field = 'page'
def to_representation(self, value):
return value.number
# compatibility for DRF 3.0 and older
try:
BasePagination = pagination.PageNumberPagination
except:
BasePagination = pagination.BasePaginationSerializer
class PaginationSerializer(BasePagination):
"""
Pagination serializer.
"""
next = NextPageField(source='*')
next_link = NextPageLinkField(source='*')
page = PageField(source='*')
previous = PreviousPageField(source='*')
previous_link = PreviousPageLinkField(source='*')
count = ReadOnlyField(source='paginator.count')
total = ReadOnlyField(source='paginator.num_pages')
class EmberPaginationSerializer(PaginationSerializer):
"""
Backwards compatibility for name change
"""
pass
class PageNumberPagination(BasePagination):
"""
A json-api compatible pagination format
"""
def build_link(self, index):
if not index:
return None
url = self.request and self.request.build_absolute_uri() or ''
return replace_query_param(url, 'page', index)
def get_paginated_response(self, data):
next = None
previous = None
if self.page.has_next():
next = self.page.next_page_number()
if self.page.has_previous():
previous = self.page.previous_page_number()
return Response({
'results': data,
'meta': {
'pagination': OrderedDict([
('page', self.page.number),
('pages', self.page.paginator.num_pages),
('count', self.page.paginator.count),
])
},
'links': OrderedDict([
('first', self.build_link(1)),
('last', self.build_link(self.page.paginator.num_pages)),
('next', self.build_link(next)),
('prev', self.build_link(previous))
])
})
|
{
"content_hash": "102f52597f8118a1b5f320a3ca534825",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 74,
"avg_line_length": 28.401408450704224,
"alnum_prop": 0.6273245722787008,
"repo_name": "hnakamur/django-rest-framework-json-api",
"id": "85d660af6b512fd3d320e06fec665aee311c2aee",
"size": "4033",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "rest_framework_json_api/pagination.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "347076"
}
],
"symlink_target": ""
}
|
import sqlalchemy
from sqlalchemy import Column
from pp.db import Base
class TestTable(Base):
__tablename__ = 'test'
id = Column(sqlalchemy.types.String(36), primary_key=True, nullable=False, unique=True)
foo = Column(sqlalchemy.types.String(200), nullable=False)
def init():
"""Called to do the initial metadata set up.
Returns a list of the tables, mappers and declarative base classes this
module implements.
"""
declarative_bases = [TestTable]
tables = []
mappers = []
return (declarative_bases, tables, mappers)
|
{
"content_hash": "bd08ee079435788c9e6d8c7990c8eb01",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 91,
"avg_line_length": 25.772727272727273,
"alnum_prop": 0.6931216931216931,
"repo_name": "pythonpro-dev/pp-db",
"id": "b5e7d66a7cf6da8b8ae4c93f09bb74034b85e3e4",
"size": "567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pp/db/tests/backup_test_db.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "27198"
}
],
"symlink_target": ""
}
|
"""Starter script for All cinder services.
This script attempts to start all the cinder services in one process. Each
service is started in its own greenthread. Please note that exceptions and
sys.exit() on the starting of a service are logged and the script will
continue attempting to launch the rest of the services.
"""
import eventlet
eventlet.monkey_patch()
import sys
from oslo_config import cfg
from oslo_log import log as logging
from oslo_log import versionutils
from oslo_reports import guru_meditation_report as gmr
from oslo_reports import opts as gmr_opts
from cinder import i18n
i18n.enable_lazy()
# Need to register global_opts
from cinder.cmd import volume as volume_cmd
from cinder.common import config # noqa
from cinder.db import api as session
from cinder.i18n import _LE, _, _LW
from cinder import objects
from cinder import rpc
from cinder import service
from cinder import utils
from cinder import version
CONF = cfg.CONF
# TODO(e0ne): get a rid of code duplication in cinder.cmd module in Mitaka
def main():
objects.register_all()
gmr_opts.set_defaults(CONF)
CONF(sys.argv[1:], project='cinder',
version=version.version_string())
config.set_middleware_defaults()
logging.setup(CONF, "cinder")
LOG = logging.getLogger('cinder.all')
versionutils.report_deprecated_feature(LOG, _(
'cinder-all is deprecated in Newton and will be removed in Ocata.'))
utils.monkey_patch()
gmr.TextGuruMeditation.setup_autorun(version, conf=CONF)
rpc.init(CONF)
launcher = service.process_launcher()
# cinder-api
try:
server = service.WSGIService('osapi_volume')
launcher.launch_service(server, workers=server.workers or 1)
except (Exception, SystemExit):
LOG.exception(_LE('Failed to load osapi_volume'))
for binary in ['cinder-scheduler', 'cinder-backup']:
try:
launcher.launch_service(service.Service.create(binary=binary))
except (Exception, SystemExit):
LOG.exception(_LE('Failed to load %s'), binary)
# cinder-volume
try:
if CONF.enabled_backends:
for backend in CONF.enabled_backends:
CONF.register_opt(volume_cmd.host_opt, group=backend)
backend_host = getattr(CONF, backend).backend_host
host = "%s@%s" % (backend_host or CONF.host, backend)
server = service.Service.create(host=host,
service_name=backend,
binary='cinder-volume',
coordination=True)
# Dispose of the whole DB connection pool here before
# starting another process. Otherwise we run into cases
# where child processes share DB connections which results
# in errors.
session.dispose_engine()
launcher.launch_service(server)
else:
LOG.warning(_LW('Configuration for cinder-volume does not specify '
'"enabled_backends", using DEFAULT as backend. '
'Support for DEFAULT section to configure drivers '
'will be removed in the next release.'))
server = service.Service.create(binary='cinder-volume',
coordination=True)
launcher.launch_service(server)
except (Exception, SystemExit):
LOG.exception(_LE('Failed to load cinder-volume'))
launcher.wait()
|
{
"content_hash": "200cf70b4b4b6d7fc616a6f9f3da4c3c",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 79,
"avg_line_length": 36.43434343434343,
"alnum_prop": 0.6343221513723316,
"repo_name": "bswartz/cinder",
"id": "a5708f05bc3693caa79ca8fc42f0d507576bc2c0",
"size": "4392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/cmd/all.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16345375"
},
{
"name": "Shell",
"bytes": "8187"
}
],
"symlink_target": ""
}
|
class Query:
def __init__(self):
self._parameters = {}
self._aliased_parameters = {}
self.param_order = None
self.orders = []
def add(self, name, **kwargs):
return self.add_param(
Parameter(name=name, **kwargs)
)
@property
def params(self):
return self._parameters.values()
def add_param(self, parameter):
self._parameters[parameter.name] = parameter
self._aliased_parameters[parameter.alias] = parameter
def has_param(self, name):
return name in self._parameters
def get_param(self, name):
return self._parameters[name]
def get_aliased_param(self, name):
return self._aliased_parameters[name]
def has_param_with_alias(self, name):
return name in self._aliased_parameters
def add_order(self, order):
self.orders.append(order)
class Parameter:
def __init__(self, name, **kwargs):
self.name = name
self.filter = kwargs.get('filter', None)
self.value = kwargs.get('value', None)
self.alias = kwargs.get('alias', name)
def __eq__(self, other):
other = getattr(other, 'name', other)
return other == self.name
class Order:
def __init__(self, param, direction=None):
self.name = param
self._direction = 'asc'
self.direction = direction
@property
def direction(self):
return self._direction
@direction.setter
def direction(self, value):
if value not in ('asc', 'desc'):
value = 'asc'
self._direction = value
class BindingOperation:
def __init__(self, left, right):
self.left = left
self.right = right
def __eq__(self, other):
return (
self.left == other.left and
self.right == other.right
)
class And(BindingOperation):
def __eq__(self, other):
if not isinstance(other, And):
return False
return super().__eq__(other)
class Or(BindingOperation):
def __eq__(self, other):
if not isinstance(other, Or):
return False
return super().__eq__(other)
class Not:
def __init__(self, inner):
self.inner = inner
def __eq__(self, other):
if not isinstance(other, Not):
return False
return self.inner == other.inner
|
{
"content_hash": "a7f1d292e394d258fd7c4d5dd0af3775",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 61,
"avg_line_length": 24.151515151515152,
"alnum_prop": 0.5692179004600586,
"repo_name": "cbrand/python-filterparams",
"id": "8a4fc30600950e8c3c091e7bd6094aee89cb6dce",
"size": "2419",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/filterparams/obj.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25230"
}
],
"symlink_target": ""
}
|
import signal
def initialize_worker():
"""Initialize Worker
Sets worker processes to ignore SIGINT.
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
{
"content_hash": "0789c0458e7d099b564af87f7d665ebe",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 45,
"avg_line_length": 15.6,
"alnum_prop": 0.7435897435897436,
"repo_name": "kalail/queen",
"id": "f2a0e3d764055e6387e482fccdf374c9d12de90d",
"size": "156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "queen/helpers/process.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27344"
},
{
"name": "Shell",
"bytes": "5094"
}
],
"symlink_target": ""
}
|
"""The Exponential distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import gamma
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
class Exponential(gamma.Gamma):
"""The Exponential distribution with rate parameter lam.
The PDF of this distribution is:
```pdf(x) = (lam * e^(-lam * x)), x > 0```
Note that the Exponential distribution is a special case of the Gamma
distribution, with Exponential(lam) = Gamma(1, lam).
"""
def __init__(self, lam, name="Exponential"):
with ops.op_scope([lam], name, "init"):
lam = ops.convert_to_tensor(lam)
self._lam = lam
super(Exponential, self).__init__(
alpha=math_ops.cast(1.0, dtype=lam.dtype),
beta=lam)
@property
def lam(self):
return self._lam
|
{
"content_hash": "71bd8802784b40fd5a4637968b6e01ff",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 71,
"avg_line_length": 28.151515151515152,
"alnum_prop": 0.6803013993541442,
"repo_name": "petewarden/tensorflow_makefile",
"id": "4652e6b3ec7da57c4a00e877af8f905ebd8acb22",
"size": "1606",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/distributions/python/ops/exponential.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "156010"
},
{
"name": "C++",
"bytes": "9202688"
},
{
"name": "CMake",
"bytes": "29372"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "HTML",
"bytes": "773228"
},
{
"name": "Java",
"bytes": "39181"
},
{
"name": "JavaScript",
"bytes": "10779"
},
{
"name": "Jupyter Notebook",
"bytes": "1773330"
},
{
"name": "Makefile",
"bytes": "7908"
},
{
"name": "Protocol Buffer",
"bytes": "111893"
},
{
"name": "Python",
"bytes": "6457132"
},
{
"name": "Shell",
"bytes": "167245"
},
{
"name": "TypeScript",
"bytes": "409165"
}
],
"symlink_target": ""
}
|
default_app_config = 'django_yubikey_admin.apps.YubiKeyConfig'
|
{
"content_hash": "d63db32a8816327f54c3da2356019621",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 62,
"avg_line_length": 63,
"alnum_prop": 0.8095238095238095,
"repo_name": "LyricalSecurity/django-yubikey-admin",
"id": "4597817453ff821ac999e3c12d58cf45cd046e40",
"size": "63",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_yubikey_admin/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5746"
},
{
"name": "Python",
"bytes": "6617"
}
],
"symlink_target": ""
}
|
import abc
import inspect
import importlib_metadata
from stevedore import extension
from cliff import _argparse
_dists_by_mods = None
def _get_distributions_by_modules():
"""Return dict mapping module name to distribution names.
The python package name (the name used for importing) and the
distribution name (the name used with pip and PyPI) do not
always match. We want to report which distribution caused the
command to be installed, so we need to look up the values.
"""
global _dists_by_mods
if _dists_by_mods is None:
# There can be multiple distribution in the case of namespace packages
# so we'll just grab the first one
_dists_by_mods = {
k: v[0] for k, v in
importlib_metadata.packages_distributions().items()
}
return _dists_by_mods
def _get_distribution_for_module(module):
"Return the distribution containing the module."
dist_name = None
if module:
pkg_name = module.__name__.partition('.')[0]
dist_name = _get_distributions_by_modules().get(pkg_name)
return dist_name
class Command(object, metaclass=abc.ABCMeta):
"""Base class for command plugins.
When the command is instantiated, it loads extensions from a
namespace based on the parent application namespace and the
command name::
app.namespace + '.' + cmd_name.replace(' ', '_')
:param app: Application instance invoking the command.
:paramtype app: cliff.app.App
"""
deprecated = False
conflict_handler = 'ignore'
_description = ''
_epilog = None
def __init__(self, app, app_args, cmd_name=None):
self.app = app
self.app_args = app_args
self.cmd_name = cmd_name
self._load_hooks()
def _load_hooks(self):
# Look for command extensions
if self.app and self.cmd_name:
namespace = '{}.{}'.format(
self.app.command_manager.namespace,
self.cmd_name.replace(' ', '_')
)
self._hooks = extension.ExtensionManager(
namespace=namespace,
invoke_on_load=True,
invoke_kwds={
'command': self,
},
)
else:
# Setting _hooks to an empty list allows iteration without
# checking if there are hooks every time.
self._hooks = []
return
def get_description(self):
"""Return the command description.
The default is to use the first line of the class' docstring
as the description. Set the ``_description`` class attribute
to a one-line description of a command to use a different
value. This is useful for enabling translations, for example,
with ``_description`` set to a string wrapped with a gettext
translation marker.
"""
# NOTE(dhellmann): We need the trailing "or ''" because under
# Python 2.7 the default for the docstring is None instead of
# an empty string, and we always want this method to return a
# string.
desc = self._description or inspect.getdoc(self.__class__) or ''
# The base class command description isn't useful for any
# real commands, so ignore that value.
if desc == inspect.getdoc(Command):
desc = ''
return desc
def get_epilog(self):
"""Return the command epilog."""
# replace a None in self._epilog with an empty string
parts = [self._epilog or '']
hook_epilogs = filter(
None,
(h.obj.get_epilog() for h in self._hooks),
)
parts.extend(hook_epilogs)
app_dist_name = getattr(
self, 'app_dist_name', _get_distribution_for_module(
inspect.getmodule(self.app)
)
)
dist_name = _get_distribution_for_module(inspect.getmodule(self))
if dist_name and dist_name != app_dist_name:
parts.append(
'This command is provided by the %s plugin.' %
(dist_name,)
)
return '\n\n'.join(parts)
def get_parser(self, prog_name):
"""Return an :class:`argparse.ArgumentParser`.
"""
parser = _argparse.ArgumentParser(
description=self.get_description(),
epilog=self.get_epilog(),
prog=prog_name,
formatter_class=_argparse.SmartHelpFormatter,
conflict_handler=self.conflict_handler,
)
for hook in self._hooks:
hook.obj.get_parser(parser)
return parser
@abc.abstractmethod
def take_action(self, parsed_args):
"""Override to do something useful.
The returned value will be returned by the program.
"""
def run(self, parsed_args):
"""Invoked by the application when the command is run.
Developers implementing commands should override
:meth:`take_action`.
Developers creating new command base classes (such as
:class:`Lister` and :class:`ShowOne`) should override this
method to wrap :meth:`take_action`.
Return the value returned by :meth:`take_action` or 0.
"""
parsed_args = self._run_before_hooks(parsed_args)
return_code = self.take_action(parsed_args) or 0
return_code = self._run_after_hooks(parsed_args, return_code)
return return_code
def _run_before_hooks(self, parsed_args):
"""Calls before() method of the hooks.
This method is intended to be called from the run() method before
take_action() is called.
This method should only be overridden by developers creating new
command base classes and only if it is necessary to have different
hook processing behavior.
"""
for hook in self._hooks:
ret = hook.obj.before(parsed_args)
# If the return is None do not change parsed_args, otherwise
# set up to pass it to the next hook
if ret is not None:
parsed_args = ret
return parsed_args
def _run_after_hooks(self, parsed_args, return_code):
"""Calls after() method of the hooks.
This method is intended to be called from the run() method after
take_action() is called.
This method should only be overridden by developers creating new
command base classes and only if it is necessary to have different
hook processing behavior.
"""
for hook in self._hooks:
ret = hook.obj.after(parsed_args, return_code)
# If the return is None do not change return_code, otherwise
# set up to pass it to the next hook
if ret is not None:
return_code = ret
return return_code
|
{
"content_hash": "6a664b63f1f51ee1fadb0814b6caf674",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 78,
"avg_line_length": 34.14356435643565,
"alnum_prop": 0.6011309264897782,
"repo_name": "openstack/cliff",
"id": "f8e38ad5bb6cd03f261ab95d6775551a6f502d10",
"size": "7452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cliff/command.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "249070"
},
{
"name": "Shell",
"bytes": "1076"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
from datetime import datetime
from mock import patch
from werkzeug.test import Client
from werkzeug.wrappers import BaseResponse
from proccer.database import Job, JobResult
from proccer.t.testing import setup_module
from proccer.t.test_mail import ok_result
from proccer.app import app
session = None
def test_index():
job = Job.create(session, 'foo', 'bar', 'baz')
job.state = 'ok'
job.last_stamp = job.last_seen = datetime(1979, 7, 9)
client = Client(app, BaseResponse)
resp = client.get('/')
assert resp.status_code == 200
def test_index_reason():
job = Job.create(session, 'foo', 'bar', 'baz')
job.state = 'error'
job.reason = 'signal'
job.last_stamp = job.last_seen = datetime(1979, 7, 9)
job = Job.create(session, 'narf', 'flag', 'bo')
job.state = 'ok'
job.last_stamp = job.last_seen = datetime(1979, 7, 10)
client = Client(app, BaseResponse)
resp = client.get('/')
assert resp.status_code == 200
def test_get_job_ok():
job = get_job(session)
JobResult.create(
job=job, clock_ms=117, result={'ok': True}, rusage={},
output='Hello, World!')
def test_get_job_signal():
job = get_job(session, state='error')
inner_result = {
'ok': False,
'reason': 'signal',
'signo': 12345,
}
r = JobResult.create(
job=job, clock_ms=118, result=inner_result, rusage={},
output='Foo, Bar!')
r.state = 'error'
session.flush()
client = Client(app, BaseResponse)
resp = client.get('/job/%d/' % job.id)
assert resp.status_code == 200
assert 'Foo, Bar!' in resp.data
assert 'signal(12345)' in resp.data
def test_get_job_exit():
job = get_job(session, state='error')
inner_result = {
'ok': False,
'reason': 'exit',
'code': 2,
}
r = JobResult.create(
job=job, clock_ms=118, result=inner_result, rusage={},
output='Foo, Bir!')
r.state = 'error'
session.flush()
client = Client(app, BaseResponse)
resp = client.get('/job/%d/' % job.id)
assert resp.status_code == 200
assert 'Foo, Bir!' in resp.data
assert 'exit(2)' in resp.data
def get_job(session, state='ok'):
job = Job.create(session, 'foo', 'bar', 'baz')
job.state = 'ok'
job.last_stamp = job.last_seen = datetime(1979, 7, 9)
session.flush()
return job
|
{
"content_hash": "ff0699b04f1a5894fca2b7b75e8412f4",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 62,
"avg_line_length": 26,
"alnum_prop": 0.6095947063688999,
"repo_name": "CSIS/proccer",
"id": "d6a2361e3c90c40a2d510046978b50a0da9e1f56",
"size": "2418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/proccer/t/test_ui.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1783"
},
{
"name": "GCC Machine Description",
"bytes": "62"
},
{
"name": "HTML",
"bytes": "4911"
},
{
"name": "JavaScript",
"bytes": "1840"
},
{
"name": "PLpgSQL",
"bytes": "328"
},
{
"name": "Python",
"bytes": "60956"
},
{
"name": "Shell",
"bytes": "292"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for google.apphosting.tools.devappserver2.application_configuration."""
import collections
from contextlib import contextmanager
import os.path
import shutil
import tempfile
import unittest
import google
import mox
from google.appengine.api import appinfo
from google.appengine.api import backendinfo
from google.appengine.api import dispatchinfo
from google.appengine.tools.devappserver2 import application_configuration
from google.appengine.tools.devappserver2 import errors
@contextmanager
def _java_temporarily_supported():
"""Make the java_supported() function return True temporarily.
Use as:
with _java_temporarily_supported():
...test that relies on Java being supported...
"""
old_java_supported = application_configuration.java_supported
application_configuration.java_supported = lambda: True
yield
application_configuration.java_supported = old_java_supported
class TestModuleConfiguration(unittest.TestCase):
"""Tests for application_configuration.ModuleConfiguration."""
def setUp(self):
self.mox = mox.Mox()
self.mox.StubOutWithMock(
application_configuration.ModuleConfiguration,
'_parse_configuration')
self.mox.StubOutWithMock(os.path, 'getmtime')
def tearDown(self):
self.mox.UnsetStubs()
def test_good_app_yaml_configuration(self):
automatic_scaling = appinfo.AutomaticScaling(min_pending_latency='1.0s',
max_pending_latency='2.0s',
min_idle_instances=1,
max_idle_instances=2)
error_handlers = [appinfo.ErrorHandlers(file='error.html')]
handlers = [appinfo.URLMap()]
env_variables = appinfo.EnvironmentVariables()
info = appinfo.AppInfoExternal(
application='app',
module='module1',
version='1',
runtime='python27',
threadsafe=False,
automatic_scaling=automatic_scaling,
skip_files=r'\*.gif',
error_handlers=error_handlers,
handlers=handlers,
inbound_services=['warmup'],
env_variables=env_variables,
)
application_configuration.ModuleConfiguration._parse_configuration(
'/appdir/app.yaml').AndReturn((info, ['/appdir/app.yaml']))
os.path.getmtime('/appdir/app.yaml').AndReturn(10)
self.mox.ReplayAll()
config = application_configuration.ModuleConfiguration(
'/appdir/app.yaml')
self.mox.VerifyAll()
self.assertEqual(os.path.realpath('/appdir'), config.application_root)
self.assertEqual(os.path.realpath('/appdir/app.yaml'), config.config_path)
self.assertEqual('dev~app', config.application)
self.assertEqual('app', config.application_external_name)
self.assertEqual('dev', config.partition)
self.assertEqual('module1', config.module_name)
self.assertEqual('1', config.major_version)
self.assertRegexpMatches(config.version_id, r'module1:1\.\d+')
self.assertEqual('python27', config.runtime)
self.assertFalse(config.threadsafe)
self.assertEqual(automatic_scaling, config.automatic_scaling)
self.assertEqual(info.GetNormalizedLibraries(),
config.normalized_libraries)
self.assertEqual(r'\*.gif', config.skip_files)
self.assertEqual(error_handlers, config.error_handlers)
self.assertEqual(handlers, config.handlers)
self.assertEqual(['warmup'], config.inbound_services)
self.assertEqual(env_variables, config.env_variables)
self.assertEqual({'/appdir/app.yaml': 10}, config._mtimes)
def test_vm_app_yaml_configuration(self):
manual_scaling = appinfo.ManualScaling()
vm_settings = appinfo.VmSettings()
vm_settings['vm_runtime'] = 'myawesomeruntime'
info = appinfo.AppInfoExternal(
application='app',
module='module1',
version='1',
runtime='vm',
vm_settings=vm_settings,
threadsafe=False,
manual_scaling=manual_scaling,
)
application_configuration.ModuleConfiguration._parse_configuration(
'/appdir/app.yaml').AndReturn((info, ['/appdir/app.yaml']))
os.path.getmtime('/appdir/app.yaml').AndReturn(10)
self.mox.ReplayAll()
config = application_configuration.ModuleConfiguration('/appdir/app.yaml')
self.mox.VerifyAll()
self.assertEqual(os.path.realpath('/appdir'), config.application_root)
self.assertEqual(os.path.realpath('/appdir/app.yaml'), config.config_path)
self.assertEqual('dev~app', config.application)
self.assertEqual('app', config.application_external_name)
self.assertEqual('dev', config.partition)
self.assertEqual('module1', config.module_name)
self.assertEqual('1', config.major_version)
self.assertRegexpMatches(config.version_id, r'module1:1\.\d+')
self.assertEqual('vm', config.runtime)
self.assertEqual(vm_settings['vm_runtime'], config.effective_runtime)
self.assertFalse(config.threadsafe)
self.assertEqual(manual_scaling, config.manual_scaling)
self.assertEqual({'/appdir/app.yaml': 10}, config._mtimes)
def test_check_for_updates_unchanged_mtime(self):
info = appinfo.AppInfoExternal(
application='app',
module='default',
version='version',
runtime='python27',
threadsafe=False)
application_configuration.ModuleConfiguration._parse_configuration(
'/appdir/app.yaml').AndReturn((info, ['/appdir/app.yaml']))
os.path.getmtime('/appdir/app.yaml').AndReturn(10)
os.path.getmtime('/appdir/app.yaml').AndReturn(10)
self.mox.ReplayAll()
config = application_configuration.ModuleConfiguration('/appdir/app.yaml')
self.assertSequenceEqual(set(), config.check_for_updates())
self.mox.VerifyAll()
def test_check_for_updates_with_includes(self):
info = appinfo.AppInfoExternal(
application='app',
module='default',
version='version',
runtime='python27',
includes=['/appdir/include.yaml'],
threadsafe=False)
application_configuration.ModuleConfiguration._parse_configuration(
'/appdir/app.yaml').AndReturn(
(info, ['/appdir/app.yaml', '/appdir/include.yaml']))
os.path.getmtime('/appdir/app.yaml').InAnyOrder().AndReturn(10)
os.path.getmtime('/appdir/include.yaml').InAnyOrder().AndReturn(10)
os.path.getmtime('/appdir/app.yaml').AndReturn(10)
os.path.getmtime('/appdir/include.yaml').AndReturn(11)
application_configuration.ModuleConfiguration._parse_configuration(
'/appdir/app.yaml').AndReturn(
(info, ['/appdir/app.yaml', '/appdir/include.yaml']))
os.path.getmtime('/appdir/app.yaml').InAnyOrder().AndReturn(10)
os.path.getmtime('/appdir/include.yaml').InAnyOrder().AndReturn(11)
self.mox.ReplayAll()
config = application_configuration.ModuleConfiguration('/appdir/app.yaml')
self.assertEqual({'/appdir/app.yaml': 10, '/appdir/include.yaml': 10},
config._mtimes)
config._mtimes = collections.OrderedDict([('/appdir/app.yaml', 10),
('/appdir/include.yaml', 10)])
self.assertSequenceEqual(set(), config.check_for_updates())
self.mox.VerifyAll()
self.assertEqual({'/appdir/app.yaml': 10, '/appdir/include.yaml': 11},
config._mtimes)
def test_check_for_updates_no_changes(self):
info = appinfo.AppInfoExternal(
application='app',
module='default',
version='version',
runtime='python27',
threadsafe=False)
application_configuration.ModuleConfiguration._parse_configuration(
'/appdir/app.yaml').AndReturn((info, ['/appdir/app.yaml']))
os.path.getmtime('/appdir/app.yaml').AndReturn(10)
os.path.getmtime('/appdir/app.yaml').AndReturn(11)
application_configuration.ModuleConfiguration._parse_configuration(
'/appdir/app.yaml').AndReturn((info, ['/appdir/app.yaml']))
os.path.getmtime('/appdir/app.yaml').AndReturn(11)
self.mox.ReplayAll()
config = application_configuration.ModuleConfiguration('/appdir/app.yaml')
self.assertSequenceEqual(set(), config.check_for_updates())
self.mox.VerifyAll()
self.assertEqual({'/appdir/app.yaml': 11}, config._mtimes)
def test_check_for_updates_immutable_changes(self):
automatic_scaling1 = appinfo.AutomaticScaling(
min_pending_latency='0.1s',
max_pending_latency='1.0s',
min_idle_instances=1,
max_idle_instances=2)
info1 = appinfo.AppInfoExternal(
application='app',
module='default',
version='version',
runtime='python27',
threadsafe=False,
automatic_scaling=automatic_scaling1)
info2 = appinfo.AppInfoExternal(
application='app2',
module='default2',
version='version2',
runtime='python',
threadsafe=True,
automatic_scaling=appinfo.AutomaticScaling(
min_pending_latency='1.0s',
max_pending_latency='2.0s',
min_idle_instances=1,
max_idle_instances=2))
application_configuration.ModuleConfiguration._parse_configuration(
'/appdir/app.yaml').AndReturn((info1, ['/appdir/app.yaml']))
os.path.getmtime('/appdir/app.yaml').AndReturn(10)
os.path.getmtime('/appdir/app.yaml').AndReturn(11)
application_configuration.ModuleConfiguration._parse_configuration(
'/appdir/app.yaml').AndReturn((info2, ['/appdir/app.yaml']))
os.path.getmtime('/appdir/app.yaml').AndReturn(11)
self.mox.ReplayAll()
config = application_configuration.ModuleConfiguration('/appdir/app.yaml')
self.assertSequenceEqual(set(), config.check_for_updates())
self.mox.VerifyAll()
self.assertEqual('dev~app', config.application)
self.assertEqual('default', config.module_name)
self.assertEqual('version', config.major_version)
self.assertRegexpMatches(config.version_id, r'^version\.\d+$')
self.assertEqual('python27', config.runtime)
self.assertFalse(config.threadsafe)
self.assertEqual(automatic_scaling1, config.automatic_scaling)
def test_check_for_mutable_changes(self):
info1 = appinfo.AppInfoExternal(
application='app',
module='default',
version='version',
runtime='python27',
threadsafe=False,
libraries=[appinfo.Library(name='django', version='latest')],
skip_files='.*',
handlers=[],
inbound_services=['warmup'],
env_variables=appinfo.EnvironmentVariables(),
error_handlers=[appinfo.ErrorHandlers(file='error.html')],
)
info2 = appinfo.AppInfoExternal(
application='app',
module='default',
version='version',
runtime='python27',
threadsafe=False,
libraries=[appinfo.Library(name='jinja2', version='latest')],
skip_files=r'.*\.py',
handlers=[appinfo.URLMap()],
inbound_services=[],
)
application_configuration.ModuleConfiguration._parse_configuration(
'/appdir/app.yaml').AndReturn((info1, ['/appdir/app.yaml']))
os.path.getmtime('/appdir/app.yaml').AndReturn(10)
os.path.getmtime('/appdir/app.yaml').AndReturn(11)
application_configuration.ModuleConfiguration._parse_configuration(
'/appdir/app.yaml').AndReturn((info2, ['/appdir/app.yaml']))
os.path.getmtime('/appdir/app.yaml').AndReturn(11)
self.mox.ReplayAll()
config = application_configuration.ModuleConfiguration('/appdir/app.yaml')
self.assertSequenceEqual(
set([application_configuration.NORMALIZED_LIBRARIES_CHANGED,
application_configuration.SKIP_FILES_CHANGED,
application_configuration.HANDLERS_CHANGED,
application_configuration.INBOUND_SERVICES_CHANGED,
application_configuration.ENV_VARIABLES_CHANGED,
application_configuration.ERROR_HANDLERS_CHANGED]),
config.check_for_updates())
self.mox.VerifyAll()
self.assertEqual(info2.GetNormalizedLibraries(),
config.normalized_libraries)
self.assertEqual(info2.skip_files, config.skip_files)
self.assertEqual(info2.error_handlers, config.error_handlers)
self.assertEqual(info2.handlers, config.handlers)
self.assertEqual(info2.inbound_services, config.inbound_services)
self.assertEqual(info2.env_variables, config.env_variables)
class TestBackendsConfiguration(unittest.TestCase):
def setUp(self):
self.mox = mox.Mox()
self.mox.StubOutWithMock(
application_configuration.BackendsConfiguration,
'_parse_configuration')
self.mox.StubOutWithMock(application_configuration, 'BackendConfiguration')
def tearDown(self):
self.mox.UnsetStubs()
def test_good_configuration(self):
self.mox.StubOutWithMock(application_configuration, 'ModuleConfiguration')
static_backend_entry = backendinfo.BackendEntry(name='static')
dynamic_backend_entry = backendinfo.BackendEntry(name='dynamic')
backend_info = backendinfo.BackendInfoExternal(
backends=[static_backend_entry, dynamic_backend_entry])
module_config = object()
application_configuration.ModuleConfiguration(
'/appdir/app.yaml').AndReturn(module_config)
application_configuration.BackendsConfiguration._parse_configuration(
'/appdir/backends.yaml').AndReturn(backend_info)
static_configuration = object()
dynamic_configuration = object()
application_configuration.BackendConfiguration(
module_config,
mox.IgnoreArg(),
static_backend_entry).InAnyOrder().AndReturn(static_configuration)
application_configuration.BackendConfiguration(
module_config,
mox.IgnoreArg(),
dynamic_backend_entry).InAnyOrder().AndReturn(dynamic_configuration)
self.mox.ReplayAll()
config = application_configuration.BackendsConfiguration(
'/appdir/app.yaml',
'/appdir/backends.yaml')
self.assertItemsEqual([static_configuration, dynamic_configuration],
config.get_backend_configurations())
self.mox.VerifyAll()
def test_no_backends(self):
self.mox.StubOutWithMock(application_configuration, 'ModuleConfiguration')
backend_info = backendinfo.BackendInfoExternal()
module_config = object()
application_configuration.ModuleConfiguration(
'/appdir/app.yaml').AndReturn(module_config)
application_configuration.BackendsConfiguration._parse_configuration(
'/appdir/backends.yaml').AndReturn(backend_info)
self.mox.ReplayAll()
config = application_configuration.BackendsConfiguration(
'/appdir/app.yaml',
'/appdir/backends.yaml')
self.assertEqual([], config.get_backend_configurations())
self.mox.VerifyAll()
def test_check_for_changes(self):
static_backend_entry = backendinfo.BackendEntry(name='static')
dynamic_backend_entry = backendinfo.BackendEntry(name='dynamic')
backend_info = backendinfo.BackendInfoExternal(
backends=[static_backend_entry, dynamic_backend_entry])
module_config = self.mox.CreateMock(
application_configuration.ModuleConfiguration)
self.mox.StubOutWithMock(application_configuration, 'ModuleConfiguration')
application_configuration.ModuleConfiguration(
'/appdir/app.yaml').AndReturn(module_config)
application_configuration.BackendsConfiguration._parse_configuration(
'/appdir/backends.yaml').AndReturn(backend_info)
module_config.check_for_updates().AndReturn(set())
module_config.check_for_updates().AndReturn(set([1]))
module_config.check_for_updates().AndReturn(set([2]))
module_config.check_for_updates().AndReturn(set())
self.mox.ReplayAll()
config = application_configuration.BackendsConfiguration(
'/appdir/app.yaml',
'/appdir/backends.yaml')
self.assertEqual(set(), config.check_for_updates('dynamic'))
self.assertEqual(set([1]), config.check_for_updates('static'))
self.assertEqual(set([1, 2]), config.check_for_updates('dynamic'))
self.assertEqual(set([2]), config.check_for_updates('static'))
self.mox.VerifyAll()
class TestDispatchConfiguration(unittest.TestCase):
def setUp(self):
self.mox = mox.Mox()
self.mox.StubOutWithMock(os.path, 'getmtime')
self.mox.StubOutWithMock(
application_configuration.DispatchConfiguration,
'_parse_configuration')
def tearDown(self):
self.mox.UnsetStubs()
def test_good_configuration(self):
info = dispatchinfo.DispatchInfoExternal(
application='appid',
dispatch=[
dispatchinfo.DispatchEntry(url='*/path', module='foo'),
dispatchinfo.DispatchEntry(url='domain.com/path', module='bar'),
dispatchinfo.DispatchEntry(url='*/path/*', module='baz'),
dispatchinfo.DispatchEntry(url='*.domain.com/path/*', module='foo'),
])
os.path.getmtime('/appdir/dispatch.yaml').AndReturn(123.456)
application_configuration.DispatchConfiguration._parse_configuration(
'/appdir/dispatch.yaml').AndReturn(info)
self.mox.ReplayAll()
config = application_configuration.DispatchConfiguration(
'/appdir/dispatch.yaml')
self.mox.VerifyAll()
self.assertEqual(123.456, config._mtime)
self.assertEqual(2, len(config.dispatch))
self.assertEqual(vars(dispatchinfo.ParsedURL('*/path')),
vars(config.dispatch[0][0]))
self.assertEqual('foo', config.dispatch[0][1])
self.assertEqual(vars(dispatchinfo.ParsedURL('*/path/*')),
vars(config.dispatch[1][0]))
self.assertEqual('baz', config.dispatch[1][1])
def test_check_for_updates_no_modification(self):
info = dispatchinfo.DispatchInfoExternal(
application='appid',
dispatch=[])
os.path.getmtime('/appdir/dispatch.yaml').AndReturn(123.456)
application_configuration.DispatchConfiguration._parse_configuration(
'/appdir/dispatch.yaml').AndReturn(info)
os.path.getmtime('/appdir/dispatch.yaml').AndReturn(123.456)
self.mox.ReplayAll()
config = application_configuration.DispatchConfiguration(
'/appdir/dispatch.yaml')
config.check_for_updates()
self.mox.VerifyAll()
def test_check_for_updates_with_invalid_modification(self):
info = dispatchinfo.DispatchInfoExternal(
application='appid',
dispatch=[
dispatchinfo.DispatchEntry(url='*/path', module='bar'),
])
os.path.getmtime('/appdir/dispatch.yaml').AndReturn(123.456)
application_configuration.DispatchConfiguration._parse_configuration(
'/appdir/dispatch.yaml').AndReturn(info)
os.path.getmtime('/appdir/dispatch.yaml').AndReturn(124.456)
application_configuration.DispatchConfiguration._parse_configuration(
'/appdir/dispatch.yaml').AndRaise(Exception)
self.mox.ReplayAll()
config = application_configuration.DispatchConfiguration(
'/appdir/dispatch.yaml')
self.assertEqual('bar', config.dispatch[0][1])
config.check_for_updates()
self.mox.VerifyAll()
self.assertEqual('bar', config.dispatch[0][1])
def test_check_for_updates_with_modification(self):
info = dispatchinfo.DispatchInfoExternal(
application='appid',
dispatch=[
dispatchinfo.DispatchEntry(url='*/path', module='bar'),
])
new_info = dispatchinfo.DispatchInfoExternal(
application='appid',
dispatch=[
dispatchinfo.DispatchEntry(url='*/path', module='foo'),
])
os.path.getmtime('/appdir/dispatch.yaml').AndReturn(123.456)
application_configuration.DispatchConfiguration._parse_configuration(
'/appdir/dispatch.yaml').AndReturn(info)
os.path.getmtime('/appdir/dispatch.yaml').AndReturn(124.456)
application_configuration.DispatchConfiguration._parse_configuration(
'/appdir/dispatch.yaml').AndReturn(new_info)
self.mox.ReplayAll()
config = application_configuration.DispatchConfiguration(
'/appdir/dispatch.yaml')
self.assertEqual('bar', config.dispatch[0][1])
config.check_for_updates()
self.mox.VerifyAll()
self.assertEqual('foo', config.dispatch[0][1])
class TestBackendConfiguration(unittest.TestCase):
def setUp(self):
self.mox = mox.Mox()
self.mox.StubOutWithMock(
application_configuration.ModuleConfiguration,
'_parse_configuration')
self.mox.StubOutWithMock(os.path, 'getmtime')
def tearDown(self):
self.mox.UnsetStubs()
def test_good_configuration(self):
automatic_scaling = appinfo.AutomaticScaling(min_pending_latency='1.0s',
max_pending_latency='2.0s',
min_idle_instances=1,
max_idle_instances=2)
error_handlers = [appinfo.ErrorHandlers(file='error.html')]
handlers = [appinfo.URLMap()]
env_variables = appinfo.EnvironmentVariables()
info = appinfo.AppInfoExternal(
application='app',
module='module1',
version='1',
runtime='python27',
threadsafe=False,
automatic_scaling=automatic_scaling,
skip_files=r'\*.gif',
error_handlers=error_handlers,
handlers=handlers,
inbound_services=['warmup'],
env_variables=env_variables,
)
backend_entry = backendinfo.BackendEntry(
name='static',
instances='3',
options='public')
application_configuration.ModuleConfiguration._parse_configuration(
'/appdir/app.yaml').AndReturn((info, ['/appdir/app.yaml']))
os.path.getmtime('/appdir/app.yaml').AndReturn(10)
self.mox.ReplayAll()
module_config = application_configuration.ModuleConfiguration(
'/appdir/app.yaml')
config = application_configuration.BackendConfiguration(
module_config, None, backend_entry)
self.mox.VerifyAll()
self.assertEqual(os.path.realpath('/appdir'), config.application_root)
self.assertEqual('dev~app', config.application)
self.assertEqual('app', config.application_external_name)
self.assertEqual('dev', config.partition)
self.assertEqual('static', config.module_name)
self.assertEqual('1', config.major_version)
self.assertRegexpMatches(config.version_id, r'static:1\.\d+')
self.assertEqual('python27', config.runtime)
self.assertFalse(config.threadsafe)
self.assertEqual(None, config.automatic_scaling)
self.assertEqual(None, config.basic_scaling)
self.assertEqual(appinfo.ManualScaling(instances='3'),
config.manual_scaling)
self.assertEqual(info.GetNormalizedLibraries(),
config.normalized_libraries)
self.assertEqual(r'\*.gif', config.skip_files)
self.assertEqual(error_handlers, config.error_handlers)
self.assertEqual(handlers, config.handlers)
self.assertEqual(['warmup'], config.inbound_services)
self.assertEqual(env_variables, config.env_variables)
whitelist_fields = ['module_name', 'version_id', 'automatic_scaling',
'manual_scaling', 'basic_scaling', 'is_backend',
'minor_version']
# Check that all public attributes and methods in a ModuleConfiguration
# exist in a BackendConfiguration.
for field in dir(module_config):
if not field.startswith('_'):
self.assertTrue(hasattr(config, field), 'Missing field: %s' % field)
value = getattr(module_config, field)
if field not in whitelist_fields and not callable(value):
# Check that the attributes other than those in the whitelist have
# equal values in the BackendConfiguration to the ModuleConfiguration
# from which it inherits.
self.assertEqual(value, getattr(config, field))
def test_vm_app_yaml_configuration(self):
automatic_scaling = appinfo.AutomaticScaling(min_pending_latency='1.0s',
max_pending_latency='2.0s',
min_idle_instances=1,
max_idle_instances=2)
vm_settings = appinfo.VmSettings()
vm_settings['vm_runtime'] = 'myawesomeruntime'
info = appinfo.AppInfoExternal(
application='app',
module='module1',
version='1',
runtime='vm',
vm_settings=vm_settings,
threadsafe=False,
automatic_scaling=automatic_scaling,
)
backend_entry = backendinfo.BackendEntry(
name='static',
instances='3',
options='public')
application_configuration.ModuleConfiguration._parse_configuration(
'/appdir/app.yaml').AndReturn((info, ['/appdir/app.yaml']))
os.path.getmtime('/appdir/app.yaml').AndReturn(10)
self.mox.ReplayAll()
module_config = application_configuration.ModuleConfiguration(
'/appdir/app.yaml')
config = application_configuration.BackendConfiguration(
module_config, None, backend_entry)
self.mox.VerifyAll()
self.assertEqual(os.path.realpath('/appdir'), config.application_root)
self.assertEqual('dev~app', config.application)
self.assertEqual('app', config.application_external_name)
self.assertEqual('dev', config.partition)
self.assertEqual('static', config.module_name)
self.assertEqual('1', config.major_version)
self.assertRegexpMatches(config.version_id, r'static:1\.\d+')
self.assertEqual('vm', config.runtime)
self.assertEqual(vm_settings['vm_runtime'], config.effective_runtime)
self.assertFalse(config.threadsafe)
# Resident backends are assigned manual scaling.
self.assertEqual(None, config.automatic_scaling)
self.assertEqual(None, config.basic_scaling)
self.assertEqual(appinfo.ManualScaling(instances='3'),
config.manual_scaling)
def test_good_configuration_dynamic_scaling(self):
automatic_scaling = appinfo.AutomaticScaling(min_pending_latency='1.0s',
max_pending_latency='2.0s',
min_idle_instances=1,
max_idle_instances=2)
error_handlers = [appinfo.ErrorHandlers(file='error.html')]
handlers = [appinfo.URLMap()]
env_variables = appinfo.EnvironmentVariables()
info = appinfo.AppInfoExternal(
application='app',
module='module1',
version='1',
runtime='python27',
threadsafe=False,
automatic_scaling=automatic_scaling,
skip_files=r'\*.gif',
error_handlers=error_handlers,
handlers=handlers,
inbound_services=['warmup'],
env_variables=env_variables,
)
backend_entry = backendinfo.BackendEntry(
name='dynamic',
instances='3',
options='public, dynamic',
start='handler')
application_configuration.ModuleConfiguration._parse_configuration(
'/appdir/app.yaml').AndReturn((info, ['/appdir/app.yaml']))
os.path.getmtime('/appdir/app.yaml').AndReturn(10)
self.mox.ReplayAll()
module_config = application_configuration.ModuleConfiguration(
'/appdir/app.yaml')
config = application_configuration.BackendConfiguration(
module_config, None, backend_entry)
self.mox.VerifyAll()
self.assertEqual(os.path.realpath('/appdir'), config.application_root)
self.assertEqual('dev~app', config.application)
self.assertEqual('dynamic', config.module_name)
self.assertEqual('1', config.major_version)
self.assertRegexpMatches(config.version_id, r'dynamic:1\.\d+')
self.assertEqual('python27', config.runtime)
self.assertFalse(config.threadsafe)
self.assertEqual(None, config.automatic_scaling)
self.assertEqual(None, config.manual_scaling)
self.assertEqual(appinfo.BasicScaling(max_instances='3'),
config.basic_scaling)
self.assertEqual(info.GetNormalizedLibraries(),
config.normalized_libraries)
self.assertEqual(r'\*.gif', config.skip_files)
self.assertEqual(error_handlers, config.error_handlers)
start_handler = appinfo.URLMap(url='/_ah/start',
script=backend_entry.start,
login='admin')
self.assertEqual([start_handler] + handlers, config.handlers)
self.assertEqual(['warmup'], config.inbound_services)
self.assertEqual(env_variables, config.env_variables)
def test_check_for_changes(self):
backends_config = self.mox.CreateMock(
application_configuration.BackendsConfiguration)
config = application_configuration.BackendConfiguration(
None, backends_config, backendinfo.BackendEntry(name='backend'))
changes = object()
backends_config.check_for_updates('backend').AndReturn([])
backends_config.check_for_updates('backend').AndReturn(changes)
minor_version = config.minor_version
self.mox.ReplayAll()
self.assertEqual([], config.check_for_updates())
self.assertEqual(minor_version, config.minor_version)
self.assertEqual(changes, config.check_for_updates())
self.assertNotEqual(minor_version, config.minor_version)
self.mox.VerifyAll()
class ModuleConfigurationStub(object):
def __init__(self, application='myapp', module_name='module'):
self.application = application
self.module_name = module_name
class DispatchConfigurationStub(object):
def __init__(self, dispatch):
self.dispatch = dispatch
class TestApplicationConfiguration(unittest.TestCase):
"""Tests for application_configuration.ApplicationConfiguration."""
def setUp(self):
self.mox = mox.Mox()
self.mox.StubOutWithMock(application_configuration, 'ModuleConfiguration')
self.mox.StubOutWithMock(application_configuration, 'BackendsConfiguration')
self.mox.StubOutWithMock(application_configuration, 'DispatchConfiguration')
self.tmpdir = tempfile.mkdtemp(dir=os.getenv('TEST_TMPDIR'))
def tearDown(self):
self.mox.UnsetStubs()
shutil.rmtree(self.tmpdir)
def _make_file_hierarchy(self, filenames):
absnames = []
for filename in filenames:
absname = os.path.normpath(self.tmpdir + '/' + filename)
absnames += [absname]
dirname = os.path.dirname(absname)
if not os.path.exists(dirname):
os.makedirs(dirname)
open(absname, 'w').close()
return absnames
def test_yaml_files(self):
absnames = self._make_file_hierarchy(
['appdir/app.yaml', 'appdir/other.yaml'])
module_config1 = ModuleConfigurationStub()
application_configuration.ModuleConfiguration(
absnames[0]).AndReturn(module_config1)
module_config2 = ModuleConfigurationStub(module_name='other')
application_configuration.ModuleConfiguration(
absnames[1]).AndReturn(module_config2)
self.mox.ReplayAll()
config = application_configuration.ApplicationConfiguration(
absnames)
self.mox.VerifyAll()
self.assertEqual('myapp', config.app_id)
self.assertSequenceEqual([module_config1, module_config2], config.modules)
def test_yaml_files_with_different_app_ids(self):
absnames = self._make_file_hierarchy(
['appdir/app.yaml', 'appdir/other.yaml'])
module_config1 = ModuleConfigurationStub()
application_configuration.ModuleConfiguration(
absnames[0]).AndReturn(module_config1)
module_config2 = ModuleConfigurationStub(application='other_app',
module_name='other')
application_configuration.ModuleConfiguration(
absnames[1]).AndReturn(module_config2)
self.mox.ReplayAll()
self.assertRaises(errors.InvalidAppConfigError,
application_configuration.ApplicationConfiguration,
absnames)
self.mox.VerifyAll()
def test_yaml_files_with_duplicate_module_names(self):
absnames = self._make_file_hierarchy(
['appdir/app.yaml', 'appdir/other.yaml'])
application_configuration.ModuleConfiguration(
absnames[0]).AndReturn(ModuleConfigurationStub())
application_configuration.ModuleConfiguration(
absnames[1]).AndReturn(ModuleConfigurationStub())
self.mox.ReplayAll()
self.assertRaises(errors.InvalidAppConfigError,
application_configuration.ApplicationConfiguration,
absnames)
self.mox.VerifyAll()
def test_directory(self):
absnames = self._make_file_hierarchy(['appdir/app.yaml'])
module_config = ModuleConfigurationStub()
application_configuration.ModuleConfiguration(
absnames[0]).AndReturn(module_config)
self.mox.ReplayAll()
config = application_configuration.ApplicationConfiguration(
[os.path.dirname(absnames[0])])
self.mox.VerifyAll()
self.assertEqual('myapp', config.app_id)
self.assertSequenceEqual([module_config], config.modules)
def test_directory_and_module(self):
absnames = self._make_file_hierarchy(
['appdir/app.yaml', 'otherdir/mymodule.yaml'])
app_yaml_config = ModuleConfigurationStub()
application_configuration.ModuleConfiguration(
absnames[0]).AndReturn(app_yaml_config)
my_module_config = ModuleConfigurationStub(module_name='my_module')
application_configuration.ModuleConfiguration(
absnames[1]).AndReturn(my_module_config)
self.mox.ReplayAll()
config = application_configuration.ApplicationConfiguration(
[os.path.dirname(absnames[0]), absnames[1]])
self.mox.VerifyAll()
self.assertSequenceEqual(
[app_yaml_config, my_module_config], config.modules)
def test_directory_app_yml_only(self):
absnames = self._make_file_hierarchy(['appdir/app.yml'])
module_config = ModuleConfigurationStub()
application_configuration.ModuleConfiguration(
absnames[0]).AndReturn(module_config)
self.mox.ReplayAll()
config = application_configuration.ApplicationConfiguration(
[os.path.dirname(absnames[0])])
self.mox.VerifyAll()
self.assertEqual('myapp', config.app_id)
self.assertSequenceEqual([module_config], config.modules)
def test_directory_app_yaml_and_app_yml(self):
absnames = self._make_file_hierarchy(['appdir/app.yaml', 'appdir/app.yml'])
self.mox.ReplayAll()
self.assertRaises(errors.InvalidAppConfigError,
application_configuration.ApplicationConfiguration,
[os.path.dirname(absnames[0])])
self.mox.VerifyAll()
def test_directory_no_app_yamls(self):
absnames = self._make_file_hierarchy(['appdir/somethingelse.yaml'])
self.mox.ReplayAll()
self.assertRaises(errors.AppConfigNotFoundError,
application_configuration.ApplicationConfiguration,
[os.path.dirname(absnames[0])])
self.mox.VerifyAll()
def test_directory_no_app_yamls_or_web_inf(self):
absnames = self._make_file_hierarchy(['appdir/somethingelse.yaml'])
self.mox.ReplayAll()
with _java_temporarily_supported():
self.assertRaises(errors.AppConfigNotFoundError,
application_configuration.ApplicationConfiguration,
[os.path.dirname(absnames[0])])
self.mox.VerifyAll()
def test_app_yaml(self):
absnames = self._make_file_hierarchy(['appdir/app.yaml'])
module_config = ModuleConfigurationStub()
application_configuration.ModuleConfiguration(
absnames[0]).AndReturn(module_config)
self.mox.ReplayAll()
config = application_configuration.ApplicationConfiguration(absnames)
self.mox.VerifyAll()
self.assertEqual('myapp', config.app_id)
self.assertSequenceEqual([module_config], config.modules)
def test_directory_with_backends_yaml(self):
absnames = self._make_file_hierarchy(
['appdir/app.yaml', 'appdir/backends.yaml'])
module_config = ModuleConfigurationStub()
application_configuration.ModuleConfiguration(
absnames[0]).AndReturn(module_config)
backend_config = ModuleConfigurationStub(module_name='backend')
backends_config = self.mox.CreateMock(
application_configuration.BackendsConfiguration)
backends_config.get_backend_configurations().AndReturn([backend_config])
application_configuration.BackendsConfiguration(
absnames[0], absnames[1]).AndReturn(backends_config)
self.mox.ReplayAll()
config = application_configuration.ApplicationConfiguration(
[os.path.dirname(absnames[0])])
self.mox.VerifyAll()
self.assertEqual('myapp', config.app_id)
self.assertSequenceEqual([module_config, backend_config], config.modules)
def test_yaml_files_with_backends_yaml(self):
absnames = self._make_file_hierarchy(
['appdir/app.yaml', 'appdir/backends.yaml'])
module_config = ModuleConfigurationStub()
application_configuration.ModuleConfiguration(
absnames[0]).AndReturn(module_config)
backend_config = ModuleConfigurationStub(module_name='backend')
backends_config = self.mox.CreateMock(
application_configuration.BackendsConfiguration)
backends_config.get_backend_configurations().AndReturn([backend_config])
application_configuration.BackendsConfiguration(
absnames[0], absnames[1]).AndReturn(backends_config)
self.mox.ReplayAll()
config = application_configuration.ApplicationConfiguration(absnames)
self.mox.VerifyAll()
self.assertEqual('myapp', config.app_id)
self.assertSequenceEqual([module_config, backend_config], config.modules)
def test_yaml_files_with_backends_and_dispatch_yaml(self):
absnames = self._make_file_hierarchy(
['appdir/app.yaml', 'appdir/backends.yaml', 'appdir/dispatch.yaml'])
module_config = ModuleConfigurationStub(module_name='default')
application_configuration.ModuleConfiguration(
absnames[0]).AndReturn(module_config)
backend_config = ModuleConfigurationStub(module_name='backend')
backends_config = self.mox.CreateMock(
application_configuration.BackendsConfiguration)
backends_config.get_backend_configurations().AndReturn([backend_config])
application_configuration.BackendsConfiguration(
absnames[0], absnames[1]).AndReturn(backends_config)
dispatch_config = DispatchConfigurationStub(
[(None, 'default'), (None, 'backend')])
application_configuration.DispatchConfiguration(
absnames[2]).AndReturn(dispatch_config)
self.mox.ReplayAll()
config = application_configuration.ApplicationConfiguration(absnames)
self.mox.VerifyAll()
self.assertEqual('myapp', config.app_id)
self.assertSequenceEqual([module_config, backend_config], config.modules)
self.assertEqual(dispatch_config, config.dispatch)
def test_yaml_files_dispatch_yaml_and_no_default_module(self):
absnames = self._make_file_hierarchy(
['appdir/app.yaml', 'appdir/dispatch.yaml'])
module_config = ModuleConfigurationStub(module_name='not-default')
application_configuration.ModuleConfiguration(
absnames[0]).AndReturn(module_config)
dispatch_config = DispatchConfigurationStub([(None, 'default')])
application_configuration.DispatchConfiguration(
absnames[1]).AndReturn(dispatch_config)
self.mox.ReplayAll()
self.assertRaises(errors.InvalidAppConfigError,
application_configuration.ApplicationConfiguration,
absnames)
self.mox.VerifyAll()
def test_yaml_files_dispatch_yaml_and_missing_dispatch_target(self):
absnames = self._make_file_hierarchy(
['appdir/app.yaml', 'appdir/dispatch.yaml'])
module_config = ModuleConfigurationStub(module_name='default')
application_configuration.ModuleConfiguration(
absnames[0]).AndReturn(module_config)
dispatch_config = DispatchConfigurationStub(
[(None, 'default'), (None, 'fake-module')])
application_configuration.DispatchConfiguration(
absnames[1]).AndReturn(dispatch_config)
self.mox.ReplayAll()
self.assertRaises(errors.InvalidAppConfigError,
application_configuration.ApplicationConfiguration,
absnames)
self.mox.VerifyAll()
def test_directory_web_inf(self):
absnames = self._make_file_hierarchy(
['appdir/WEB-INF/appengine-web.xml', 'appdir/WEB-INF/web.xml'])
appdir = os.path.dirname(os.path.dirname(absnames[0]))
module_config = ModuleConfigurationStub(module_name='default')
application_configuration.ModuleConfiguration(
absnames[0]).AndReturn(module_config)
self.mox.ReplayAll()
with _java_temporarily_supported():
config = application_configuration.ApplicationConfiguration([appdir])
self.mox.VerifyAll()
self.assertEqual('myapp', config.app_id)
self.assertSequenceEqual([module_config], config.modules)
def test_directory_web_inf_missing_appengine_xml(self):
absnames = self._make_file_hierarchy(['appdir/WEB-INF/web.xml'])
appdir = os.path.dirname(os.path.dirname(absnames[0]))
self.mox.ReplayAll()
with _java_temporarily_supported():
self.assertRaises(errors.AppConfigNotFoundError,
application_configuration.ApplicationConfiguration,
[appdir])
self.mox.VerifyAll()
def test_directory_web_inf_missing_web_xml(self):
absnames = self._make_file_hierarchy(['appdir/WEB-INF/appengine-web.xml'])
appdir = os.path.dirname(os.path.dirname(absnames[0]))
self.mox.ReplayAll()
with _java_temporarily_supported():
self.assertRaises(errors.AppConfigNotFoundError,
application_configuration.ApplicationConfiguration,
[appdir])
self.mox.VerifyAll()
def test_config_with_yaml_and_xml(self):
absnames = self._make_file_hierarchy(
['module1/app.yaml', 'module1/dispatch.yaml',
'module2/WEB-INF/appengine-web.xml', 'module2/WEB-INF/web.xml'])
app_yaml = absnames[0]
dispatch_yaml = absnames[1]
appengine_web_xml = absnames[2]
module2 = os.path.dirname(os.path.dirname(appengine_web_xml))
module1_config = ModuleConfigurationStub(module_name='default')
application_configuration.ModuleConfiguration(
app_yaml).AndReturn(module1_config)
dispatch_config = DispatchConfigurationStub(
[(None, 'default'), (None, 'module2')])
application_configuration.DispatchConfiguration(
dispatch_yaml).AndReturn(dispatch_config)
module2_config = ModuleConfigurationStub(module_name='module2')
application_configuration.ModuleConfiguration(
appengine_web_xml).AndReturn(module2_config)
self.mox.ReplayAll()
with _java_temporarily_supported():
config = application_configuration.ApplicationConfiguration(
[app_yaml, dispatch_yaml, module2])
self.mox.VerifyAll()
self.assertEqual('myapp', config.app_id)
self.assertSequenceEqual(
[module1_config, module2_config], config.modules)
self.assertEqual(dispatch_config, config.dispatch)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "704946f14ab1dc3c09950cee27e0bffe",
"timestamp": "",
"source": "github",
"line_count": 1073,
"max_line_length": 80,
"avg_line_length": 40.769804287045666,
"alnum_prop": 0.6855026745302427,
"repo_name": "levibostian/myBlanky",
"id": "5c041bc0212ea25584de0307c165f20a86f2bb8a",
"size": "43746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "googleAppEngine/google/appengine/tools/devappserver2/application_configuration_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "29352"
},
{
"name": "JavaScript",
"bytes": "305206"
},
{
"name": "PHP",
"bytes": "4350"
},
{
"name": "Python",
"bytes": "11679977"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
from django.contrib import admin
from django.contrib.auth.models import User, Group
from ..models import Configuration
from .configuration import ConfigurationAdmin
admin.site.register(Configuration, ConfigurationAdmin)
|
{
"content_hash": "5fca4a00f4aa36ef4a0b41e4da2ab857",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 56,
"avg_line_length": 28.1,
"alnum_prop": 0.8291814946619217,
"repo_name": "globocom/database-as-a-service",
"id": "d7d9ecb1def4307585029f99958f23daa25e765b",
"size": "305",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbaas/system/admin/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "243568"
},
{
"name": "Dockerfile",
"bytes": "1372"
},
{
"name": "HTML",
"bytes": "310401"
},
{
"name": "JavaScript",
"bytes": "988830"
},
{
"name": "Makefile",
"bytes": "5199"
},
{
"name": "Python",
"bytes": "9674426"
},
{
"name": "Shell",
"bytes": "215115"
}
],
"symlink_target": ""
}
|
"""
Compute the Gacs-Korner common information
"""
from ...algorithms import insert_meet
from ...helpers import normalize_rvs, parse_rvs
from ...npdist import Distribution
from ...shannon import conditional_entropy as H
from ...utils import unitful
@unitful
def gk_common_information(dist, rvs=None, crvs=None, rv_mode=None):
"""
Calculates the Gacs-Korner common information K[X1:X2...] over the random
variables in `rvs`.
Parameters
----------
dist : Distribution
The distribution from which the common information is calculated.
rvs : list, None
The indexes of the random variables for which the Gacs-Korner common
information is to be computed. If None, then the common information is
calculated over all random variables.
crvs : list, None
The indexes of the random variables to condition the common information
by. If none, than there is no conditioning.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If equal
to 'names', the the elements are interpreted as random variable names.
If `None`, then the value of `dist._rv_mode` is consulted, which
defaults to 'indices'.
Returns
-------
K : float
The Gacs-Korner common information of the distribution.
Raises
------
ditException
Raised if `rvs` or `crvs` contain non-existant random variables.
"""
rvs, crvs, rv_mode = normalize_rvs(dist, rvs, crvs, rv_mode)
crvs = parse_rvs(dist, crvs, rv_mode)[1]
outcomes, pmf = zip(*dist.zipped(mode='patoms'))
# The GK-common information is sensitive to zeros in the sample space.
# Here, we make sure to remove them.
d = Distribution(outcomes, pmf, sample_space=outcomes)
d.set_rv_names(dist.get_rv_names())
d2 = insert_meet(d, -1, rvs, rv_mode=rv_mode)
common = [d2.outcome_length() - 1]
K = H(d2, common, crvs)
return K
|
{
"content_hash": "f9f40b1a3ca996e3c139b53b197f063c",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 79,
"avg_line_length": 33.55555555555556,
"alnum_prop": 0.6617786187322611,
"repo_name": "Autoplectic/dit",
"id": "2a654f6739482b65f3329891facfc64a1f647a57",
"size": "2114",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dit/multivariate/common_informations/gk_common_information.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5938"
},
{
"name": "HTML",
"bytes": "265"
},
{
"name": "PHP",
"bytes": "614"
},
{
"name": "Python",
"bytes": "1272731"
},
{
"name": "Shell",
"bytes": "180"
},
{
"name": "TeX",
"bytes": "6951"
}
],
"symlink_target": ""
}
|
from Exscript.util import crypt
from Exscript.stdlib.util import secure_function
@secure_function
def otp(scope, password, seed, seqs):
"""
Calculates a one-time password hash using the given password, seed, and
sequence number and returns it.
Uses the md4/sixword algorithm as supported by TACACS+ servers.
:type password: string
:param password: A password.
:type seed: string
:param seed: A username.
:type seqs: int
:param seqs: A sequence number, or a list of sequence numbers.
:rtype: string
:return: A hash, or a list of hashes.
"""
return [crypt.otp(password[0], seed[0], int(seq)) for seq in seqs]
|
{
"content_hash": "052e7c17a67a535fda23d2c662feb0cc",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 75,
"avg_line_length": 31.904761904761905,
"alnum_prop": 0.6865671641791045,
"repo_name": "knipknap/exscript",
"id": "970f3ed367260f3ae0f1b982bcde506cce2d1ebe",
"size": "1794",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Exscript/stdlib/crypt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1844"
},
{
"name": "Python",
"bytes": "848571"
},
{
"name": "Roff",
"bytes": "10849"
},
{
"name": "Shell",
"bytes": "1418"
}
],
"symlink_target": ""
}
|
"""
Copyright 2022, the CVXPY authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from cvxpy.atoms.affine.bmat import bmat
from cvxpy.expressions.variable import Variable
def tr_inv_canon(expr, args):
"""Reduces the atom to an affine expression and list of constraints.
Creates the equivalent problem::
maximize sum(u[i])
subject to: [A ei; ei.T u[i]] is positive semidefinite
where ei is the n dimensional column vector whose i-th entry is 1 and other entries are 0
This follows from the inequality:
.. math::
u[i] >= R[i][i] for all i, where R=A^-1
Parameters
----------
expr : tr_inv
args : list
The arguments for the expression
Returns
-------
tuple
(Variable for objective, list of constraints)
"""
A = args[0]
n, _ = A.shape
su = None
constraints = []
for i in range(n):
ei = np.zeros((n, 1))
ei[i] = 1.0
ui = Variable((1, 1))
R = bmat([[A, ei],
[ei.T, ui]])
constraints += [R >> 0]
if su is None:
su = ui
else:
su += ui
return su, constraints
|
{
"content_hash": "0a57238a5e9727cf82c09b3883ce8ac7",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 96,
"avg_line_length": 25.892307692307693,
"alnum_prop": 0.6256684491978609,
"repo_name": "merraksh/cvxpy",
"id": "4e0d5d6caecc43b4aa2e9ee2af62d5421475da5c",
"size": "1683",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cvxpy/reductions/dcp2cone/atom_canonicalizers/tr_inv_canon.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "120010"
},
{
"name": "C++",
"bytes": "5687983"
},
{
"name": "CMake",
"bytes": "694"
},
{
"name": "Makefile",
"bytes": "6320"
},
{
"name": "Python",
"bytes": "2149670"
},
{
"name": "SWIG",
"bytes": "2403"
},
{
"name": "Shell",
"bytes": "3117"
}
],
"symlink_target": ""
}
|
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = "/home/app/content"
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - app -c '%s;%s'" % (XW_ENV, cmd)
return cmd
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
pkg_infos = line.split()
if len(pkg_infos) == 4:
continue
name = pkg_infos[5]
name = name.lstrip('[').rstrip(']')
print "name is: %s" % name
if pkg_name == name:
test_pkg_id = pkg_infos[3]
test_pkg_id = test_pkg_id.lstrip('[').rstrip(']')
print test_pkg_id
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".wgt"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t wgt -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith("%s.wgt" % PKG_NAME):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t wgt -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
# Do some special copy/delete... steps
'''
(return_code, output) = doRemoteCMD(
"mkdir -p %s/tests" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("specname/tests", "%s/tests" % PKG_SRC_DIR):
action_status = False
'''
for item in glob.glob("%s/*" % SCRIPT_DIR):
if item.endswith("inst.py"):
continue
else:
item_name = os.path.basename(item)
if not doRemoteCopy(item, PKG_SRC_DIR):
action_status = False
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
{
"content_hash": "20b555f291c7eb3a5fd7665829d3fef4",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 89,
"avg_line_length": 29.565420560747665,
"alnum_prop": 0.5376955903271693,
"repo_name": "yugang/crosswalk-test-suite",
"id": "3b073d5e0cebfe86cfa26120c6e7a57d6665e42d",
"size": "6350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stability/wrt-stabilitymanu-tizen-tests/inst.wgt.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3495"
},
{
"name": "CSS",
"bytes": "1694855"
},
{
"name": "Erlang",
"bytes": "2850"
},
{
"name": "Java",
"bytes": "155590"
},
{
"name": "JavaScript",
"bytes": "32256550"
},
{
"name": "PHP",
"bytes": "43783"
},
{
"name": "Perl",
"bytes": "1696"
},
{
"name": "Python",
"bytes": "4215706"
},
{
"name": "Shell",
"bytes": "638387"
},
{
"name": "XSLT",
"bytes": "2143471"
}
],
"symlink_target": ""
}
|
import doctest
from custom.abt.reports import late_pmt_2020
def test_doctests():
results = doctest.testmod(late_pmt_2020)
assert results.failed == 0
|
{
"content_hash": "4696da9ffb446463eb0dc356f928659a",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 44,
"avg_line_length": 20,
"alnum_prop": 0.73125,
"repo_name": "dimagi/commcare-hq",
"id": "a79f83ca75d7659e4874a2e793e540a47a2fe2e4",
"size": "160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "custom/abt/reports/tests/test_late_pmt_2020.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
}
|
"""
.. module: security_monkey.views.poam
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Pritam D. Gautam <pritam.gautam@nuagedm.com> @nuagedm
"""
from sqlalchemy.orm import joinedload, aliased, load_only, defer
from security_monkey import db, rbac
from security_monkey.views import AuthenticatedService
from security_monkey.datastore import Item, ItemAudit, Account, Technology, ItemRevision
from sqlalchemy import func, text, null as sqlnull, false, between
def sev2score(score):
if score < 5:
return "Low"
elif score > 10:
return "High"
else:
return "Medium"
# Get a List of POA&M Items
class POAMItemList(AuthenticatedService):
decorators = [rbac.allow(['View'], ["GET"])]
def get(self):
"""
.. http:get:: /api/1/poamitems
Get a List of POA&M Items by account.
**Example Request**:
.. sourcecode:: http
GET /api/1/poamitems HTTP/1.1
Host: example.com
Accept: application/json
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
"items": [
{
"control": "policy",
"create_date": "2017-11-01 19:29:52.329638",
"poam_comments": null,
"poam_id": "sa_poam-12868",
"item_id": "",
"account": "DEV",
"score": 10,
"weakness_description": "Service [iam] Category: [Permissions] Resources: [\"*\"], universal, ServiceCatalogAdmin-SupplementalPermissions",
"weakness_name": "Sensitive Permissions"
}
],
"total": 1,
"page": 1,
"count" 1,
"auth": {
"authenticated": true,
"user": "user@example.com"
}
}
:statuscode 200: no error
:statuscode 401: Authentication Error. Please Login.
"""
# SQL Query base for implementation
# select
# distinct concat('sa_poam-', ia.id) as "poam_id",
# i.id as "item_id",
# acc.name as "account",
# t.name as "control",
# ia.issue as "weakness_name",
# concat(
# ia.notes, ', ', i.region, ', ', i.name
# ) as "weakness_description",
# ia.score,
# ir.create_date,
# ia.action_instructions as "poam_comments"
# from
# item i
# inner join itemaudit ia ON i.id = ia.item_id
# and (
# (i.account_id in (select a.id from account a where a."name" in (p_account_id)) )
# or (p_account_id is null)
# )
# inner join technology t ON i.tech_id = t.id
# inner join (
# select
# item_id,
# min(date_created) as "create_date"
# from
# itemrevision
# group by
# item_id
# ) ir on i.id = ir.item_id
# inner join account acc ON i.account_id = acc.id
# where
# ia.justified = FALSE
# and ia.fixed = FALSE
# and i.arn is not null
# and ia.score > 1
# order by
# ir.create_date asc,
# ia.score desc
self.reqparse.add_argument('accounts', type=str, default=None, location='args')
self.reqparse.add_argument('count', type=int, default=10, location='args')
self.reqparse.add_argument('page', type=int, default=1, location='args')
self.reqparse.add_argument('sev', type=str, default=None, location='args')
self.reqparse.add_argument('tech', type=str, default=None, location='args')
args = self.reqparse.parse_args()
page = args.pop('page', None)
count = args.pop('count', None)
for k, v in args.items():
if not v:
del args[k]
# Read more about filtering:
# https://docs.sqlalchemy.org/en/latest/orm/query.html
query = Item.query.join((ItemAudit, Item.id == ItemAudit.item_id)) \
.options(load_only(Item.id)) \
.distinct()
query = query.join((Technology, Technology.id == Item.tech_id))
# Subquery on ItemRevision Table
itemrevision_subquery = db.session \
.query(ItemRevision, func.min(ItemRevision.date_created).label('create_date')) \
.options(load_only("item_id")) \
.group_by(ItemRevision.item_id) \
.subquery()
query = query.join(itemrevision_subquery, Item.id == itemrevision_subquery.c.item_id)
query = query.join((Account, Account.id == Item.account_id))
# Add Select Columns
query = query \
.add_column(func.concat('sa_poam-', ItemAudit.id).label('poam_id')) \
.add_column(Account.name.label('account')) \
.add_column(Technology.name.label('control')) \
.add_column(ItemAudit.issue.label('weakness_name')) \
.add_column(func.concat(ItemAudit.notes, ',', Item.region, ',', Item.name).label('weakness_description')) \
.add_column(ItemAudit.score.label('score')) \
.add_column(itemrevision_subquery.c.create_date.label('create_date')) \
.add_column(ItemAudit.action_instructions.label('poam_comments'))
# Filters
query = query.filter(ItemAudit.justified == false())
query = query.filter(ItemAudit.fixed == false())
query = query.filter(ItemAudit.score > 1)
query = query.filter(Item.arn != sqlnull())
if 'accounts' in args:
accounts = args['accounts'].split(',')
query = query.filter(Account.name.in_(accounts))
if 'sev' in args:
sev = args['sev'].lower()
if sev == 'low':
query = query.filter(ItemAudit.score < 5)
elif sev == 'medium':
query = query.filter(between(ItemAudit.score, 5, 10))
elif sev == 'high':
query = query.filter(ItemAudit.score > 10)
if 'tech' in args:
tech = args['tech'].split(',')
query = query.join((Technology, Technology.id == Item.tech_id))
query = query.filter(Technology.name.in_(tech))
# Order By
query = query.order_by(itemrevision_subquery.c.create_date)
query = query.order_by(ItemAudit.score.desc())
# Eager load the joins
query = query.options(joinedload('account'))
query = query.options(joinedload('technology'))
# Paginate
items = query.paginate(page, count)
marshaled_dict = {
'page': items.page,
'total': items.total,
'auth': self.auth_dict
}
marshaled_items = []
for row in items.items:
row_dict = dict(row.__dict__)
marshaled_items.append({
'poam_id': row_dict['poam_id'],
'item_id': row_dict['Item'].id,
'account': row_dict['account'],
'control': row_dict['control'],
'weakness_name': row_dict['weakness_name'],
'weakness_description': row_dict['weakness_description'],
'score': row_dict['score'],
'sev': sev2score(row_dict['score']),
'create_date': str(row_dict['create_date']),
'poam_comments': row_dict['poam_comments']
})
marshaled_dict['items'] = marshaled_items
marshaled_dict['count'] = len(marshaled_items)
return marshaled_dict, 200
|
{
"content_hash": "2e178bc9493c8ac72a091fc3a4bdcd40",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 171,
"avg_line_length": 37.143497757847534,
"alnum_prop": 0.49837015574067367,
"repo_name": "stackArmor/security_monkey",
"id": "9f1131870eb28b8fae50c0d28e79f82695f02030",
"size": "8283",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "security_monkey/views/poam.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "33462"
},
{
"name": "Dart",
"bytes": "137774"
},
{
"name": "Dockerfile",
"bytes": "3798"
},
{
"name": "HTML",
"bytes": "165572"
},
{
"name": "JavaScript",
"bytes": "984069"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1682110"
},
{
"name": "Shell",
"bytes": "29978"
}
],
"symlink_target": ""
}
|
"""Module that pre-processes the notebook for export to HTML.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import io
from pygments.formatters import HtmlFormatter
from IPython.utils import path
from .base import Transformer
from IPython.utils.traitlets import Unicode
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class CSSHTMLHeaderTransformer(Transformer):
"""
Transformer used to pre-process notebook for HTML output. Adds IPython notebook
front-end CSS and Pygments CSS to HTML output.
"""
header = []
highlight_class = Unicode('.highlight', config=True,
help="CSS highlight class identifier")
def __init__(self, config=None, **kw):
"""
Public constructor
Parameters
----------
config : Config
Configuration file structure
**kw : misc
Additional arguments
"""
super(CSSHTMLHeaderTransformer, self).__init__(config=config, **kw)
if self.enabled :
self._regen_header()
def call(self, nb, resources):
"""Fetch and add CSS to the resource dictionary
Fetch CSS from IPython and Pygments to add at the beginning
of the html files. Add this css in resources in the
"inlining.css" key
Parameters
----------
nb : NotebookNode
Notebook being converted
resources : dictionary
Additional resources used in the conversion process. Allows
transformers to pass variables into the Jinja engine.
"""
resources['inlining'] = {}
resources['inlining']['css'] = self.header
return nb, resources
def _regen_header(self):
"""
Fills self.header with lines of CSS extracted from IPython
and Pygments.
"""
#Clear existing header.
header = []
#Construct path to IPy CSS
sheet_filename = os.path.join(path.get_ipython_package_dir(),
'html', 'static', 'style', 'style.min.css')
#Load style CSS file.
with io.open(sheet_filename, encoding='utf-8') as file:
file_text = file.read()
header.append(file_text)
#Add pygments CSS
formatter = HtmlFormatter()
pygments_css = formatter.get_style_defs(self.highlight_class)
header.append(pygments_css)
#Set header
self.header = header
|
{
"content_hash": "47172fdd6c67e1ce1faad3f2ecdd5a6c",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 84,
"avg_line_length": 29.72641509433962,
"alnum_prop": 0.5052364328784513,
"repo_name": "marcoantoniooliveira/labweb",
"id": "7f33fa9e780b8bbeffc111353976fcb698fdc9ec",
"size": "3151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oscar/lib/python2.7/site-packages/IPython/nbconvert/transformers/csshtmlheader.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "1534157"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "JavaScript",
"bytes": "2968822"
},
{
"name": "LiveScript",
"bytes": "6103"
},
{
"name": "Puppet",
"bytes": "3507"
},
{
"name": "Python",
"bytes": "30402832"
},
{
"name": "Shell",
"bytes": "10782"
},
{
"name": "TeX",
"bytes": "56626"
},
{
"name": "XSLT",
"bytes": "49764"
}
],
"symlink_target": ""
}
|
from tasks import request
url = 'https://en.wikipedia.org/w/api.php?action=query&titles=Main%20Page&prop=revisions&rvprop=content&format=json&formatversion=2'
result = request.delay(url)
response = result.get()
print(response.json())
|
{
"content_hash": "5d5999c36335565dca6a5b75ec60b09d",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 132,
"avg_line_length": 33.57142857142857,
"alnum_prop": 0.7787234042553192,
"repo_name": "sblancov/hello_world",
"id": "8807fb62cc97fe1f7bbab01c8c543a79394ccf1d",
"size": "235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/celery/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "174"
},
{
"name": "Java",
"bytes": "120"
},
{
"name": "JavaScript",
"bytes": "11365"
},
{
"name": "Makefile",
"bytes": "706"
},
{
"name": "Python",
"bytes": "4916"
}
],
"symlink_target": ""
}
|
r"""
NLTK Tokenizer Package
Tokenizers divide strings into lists of substrings. For example,
tokenizers can be used to find the words and punctuation in a string:
>>> from nltk.tokenize import word_tokenize
>>> s = '''Good muffins cost $3.88\nin New York. Please buy me
... two of them.\n\nThanks.'''
>>> word_tokenize(s)
['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York', '.',
'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.']
This particular tokenizer requires the Punkt sentence tokenization
models to be installed. NLTK also provides a simpler,
regular-expression based tokenizer, which splits text on whitespace
and punctuation:
>>> from nltk.tokenize import wordpunct_tokenize
>>> wordpunct_tokenize(s)
['Good', 'muffins', 'cost', '$', '3', '.', '88', 'in', 'New', 'York', '.',
'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.']
We can also operate at the level of sentences, using the sentence
tokenizer directly as follows:
>>> from nltk.tokenize import sent_tokenize, word_tokenize
>>> sent_tokenize(s)
['Good muffins cost $3.88\nin New York.', 'Please buy me\ntwo of them.', 'Thanks.']
>>> [word_tokenize(t) for t in sent_tokenize(s)]
[['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York', '.'],
['Please', 'buy', 'me', 'two', 'of', 'them', '.'], ['Thanks', '.']]
Caution: when tokenizing a Unicode string, make sure you are not
using an encoded version of the string (it may be necessary to
decode it first, e.g. with ``s.decode("utf8")``.
NLTK tokenizers can produce token-spans, represented as tuples of integers
having the same semantics as string slices, to support efficient comparison
of tokenizers. (These methods are implemented as generators.)
>>> from nltk.tokenize import WhitespaceTokenizer
>>> list(WhitespaceTokenizer().span_tokenize(s))
[(0, 4), (5, 12), (13, 17), (18, 23), (24, 26), (27, 30), (31, 36), (38, 44),
(45, 48), (49, 51), (52, 55), (56, 58), (59, 64), (66, 73)]
There are numerous ways to tokenize text. If you need more control over
tokenization, see the other methods provided in this package.
For further information, please see Chapter 3 of the NLTK book.
"""
import re
from nltk.data import load
from nltk.tokenize.casual import (TweetTokenizer, casual_tokenize)
from nltk.tokenize.mwe import MWETokenizer
from nltk.tokenize.punkt import PunktSentenceTokenizer
from nltk.tokenize.regexp import (RegexpTokenizer, WhitespaceTokenizer,
BlanklineTokenizer, WordPunctTokenizer,
wordpunct_tokenize, regexp_tokenize,
blankline_tokenize)
from nltk.tokenize.repp import ReppTokenizer
from nltk.tokenize.sexpr import SExprTokenizer, sexpr_tokenize
from nltk.tokenize.simple import (SpaceTokenizer, TabTokenizer, LineTokenizer,
line_tokenize)
from nltk.tokenize.stanford import StanfordTokenizer
from nltk.tokenize.texttiling import TextTilingTokenizer
from nltk.tokenize.toktok import ToktokTokenizer
from nltk.tokenize.treebank import TreebankWordTokenizer
from nltk.tokenize.util import string_span_tokenize, regexp_span_tokenize
from nltk.tokenize.stanford_segmenter import StanfordSegmenter
# Standard sentence tokenizer.
def sent_tokenize(text, language='english'):
"""
Return a sentence-tokenized copy of *text*,
using NLTK's recommended sentence tokenizer
(currently :class:`.PunktSentenceTokenizer`
for the specified language).
:param text: text to split into sentences
:param language: the model name in the Punkt corpus
"""
tokenizer = load('tokenizers/punkt/{0}.pickle'.format(language))
return tokenizer.tokenize(text)
# Standard word tokenizer.
_treebank_word_tokenizer = TreebankWordTokenizer()
# See discussion on https://github.com/nltk/nltk/pull/1437
# Adding to TreebankWordTokenizer, the splits on
# - chervon quotes u'\xab' and u'\xbb' .
# - unicode quotes u'\u2018', u'\u2019', u'\u201c' and u'\u201d'
improved_open_quote_regex = re.compile(u'([«“‘])', re.U)
improved_close_quote_regex = re.compile(u'([»”’])', re.U)
improved_punct_regex = re.compile(r'([^\.])(\.)([\]\)}>"\'' u'»”’ ' r']*)\s*$', re.U)
_treebank_word_tokenizer.STARTING_QUOTES.insert(0, (improved_open_quote_regex, r' \1 '))
_treebank_word_tokenizer.ENDING_QUOTES.insert(0, (improved_close_quote_regex, r' \1 '))
_treebank_word_tokenizer.PUNCTUATION.insert(0, (improved_punct_regex, r'\1 \2 \3 '))
def word_tokenize(text, language='english', preserve_line=False):
"""
Return a tokenized copy of *text*,
using NLTK's recommended word tokenizer
(currently an improved :class:`.TreebankWordTokenizer`
along with :class:`.PunktSentenceTokenizer`
for the specified language).
:param text: text to split into words
:param text: str
:param language: the model name in the Punkt corpus
:type language: str
:param preserve_line: An option to keep the preserve the sentence and not sentence tokenize it.
:type preserver_line: bool
"""
sentences = [text] if preserve_line else sent_tokenize(text, language)
return [token for sent in sentences
for token in _treebank_word_tokenizer.tokenize(sent)]
|
{
"content_hash": "d9b8511b0e4ec1f04a875df86a649d3a",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 99,
"avg_line_length": 43.90163934426229,
"alnum_prop": 0.6786781179985063,
"repo_name": "arju88nair/projectCulminate",
"id": "b4b6dd7105ec544c20f6423baf87d210cb614a18",
"size": "5688",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "venv/lib/python3.5/site-packages/nltk/tokenize/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "365921"
},
{
"name": "C++",
"bytes": "237910"
},
{
"name": "CSS",
"bytes": "6270"
},
{
"name": "JavaScript",
"bytes": "6264"
},
{
"name": "Makefile",
"bytes": "90112"
},
{
"name": "Python",
"bytes": "15199371"
},
{
"name": "Shell",
"bytes": "17795"
}
],
"symlink_target": ""
}
|
import hashlib
import json
import time
import urllib
import requests
from requests import RequestException, ConnectionError, Timeout
from tornado import gen
from tornado.httpclient import AsyncHTTPClient
from .api import ApiSpec
from .exceptions import ElongException, ElongAPIError, \
RetryableException, RetryableAPIError
from .response import RequestsResponse, TornadoResponse, logger
from .util.retry import retry_on_error, is_retryable
class Request(object):
def __init__(self, client,
host=ApiSpec.host,
version=ApiSpec.version,
local=ApiSpec.local):
self.client = client
self.verify_ssl = self.client.cert is not None
self.host = host
self.version = version
self.local = local
def do(self, api, params, https, raw=False):
raise NotImplementedError()
def prepare(self, api, params, https, raw):
timestamp = str(int(time.time()))
data = self.build_data(params, raw)
scheme = 'https' if https else 'http'
url = "%s://%s" % (scheme, self.host)
params = {
'method': api,
'user': self.client.user,
'timestamp': timestamp,
'data': data,
'signature': self.signature(data, timestamp),
'format': 'json'
}
return url, params
def build_data(self, params, raw=False):
if not raw:
data = {
'Version': self.version,
'Local': self.local,
'Request': params
}
else:
data = params
return json.dumps(data, separators=(',', ':'))
def signature(self, data, timestamp):
s = self._md5(data + self.client.app_key)
return self._md5("%s%s%s" % (timestamp, s, self.client.secret_key))
@staticmethod
def _md5(data):
return hashlib.md5(data.encode('utf-8')).hexdigest()
def check_response(self, resp):
if not resp.ok and self.client.raise_api_error:
# logger.error('pyelong calling api failed, url: %s', resp.url)
if is_retryable(resp.code):
raise RetryableAPIError(resp.code, resp.error)
raise ElongAPIError(resp.code, resp.error)
return resp
def timing(self, api, delta):
if self.client.statsd_client and \
hasattr(self.client.statsd_client, 'timing'):
self.client.statsd_client.timing(api, delta)
class SyncRequest(Request):
@property
def session(self):
if not hasattr(self, '_session') or not self._session:
self._session = requests.Session()
if self.client.proxy_host and self.client.proxy_port:
p = '%s:%s' % (self.client.proxy_host, self.client.proxy_port)
self._session.proxies = {'http': p, 'https': p}
return self._session
@retry_on_error(retry_api_error=True)
def do(self, api, params, https, raw=False):
url, params = self.prepare(api, params, https, raw)
try:
result = self.session.get(url=url,
params=params,
verify=self.verify_ssl,
cert=self.client.cert)
except (ConnectionError, Timeout) as e:
logger.exception('pyelong catches ConnectionError or Timeout, '
'url: %s, params: %s', url, params)
raise RetryableException('ConnectionError or Timeout: %s' % e)
except RequestException as e:
logger.exception('pyelong catches RequestException, url: %s,'
' params: %s', url, params)
raise ElongException('RequestException: %s' % e)
except Exception as e:
logger.exception('pyelong catches unknown exception, url: %s, '
'params: %s', url, params)
raise ElongException('unknown exception: %s' % e)
resp = RequestsResponse(result)
self.timing(api, resp.request_time)
return self.check_response(resp)
class AsyncRequest(Request):
@property
def proxy_config(self):
if not getattr(self, '_proxy_config', None):
if self.client.proxy_host and self.client.proxy_port:
self._proxy_config = {
'proxy_host': self.client.proxy_host,
'proxy_port': self.client.proxy_port
}
else:
self._proxy_config = {}
return self._proxy_config
@staticmethod
def _encode_params(data):
"""
:param dict data: params
Taken from requests.models.RequestEncodingMixin._encode_params
"""
result = []
for k, vs in data.iteritems():
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urllib.urlencode(result, doseq=True)
def _prepare_url(self, url, params):
if url.endswith('/'):
url = url.strip('/')
return '%s?%s' % (url, self._encode_params(params))
@gen.coroutine
def do(self, api, params, https, raw=False):
url, params = self.prepare(api, params, https, raw)
# use the default SimpleAsyncHTTPClient
resp = yield AsyncHTTPClient().fetch(self._prepare_url(url, params),
validate_cert=self.verify_ssl,
ca_certs=self.client.cert,
**self.proxy_config)
resp = TornadoResponse(resp)
self.timing(api, resp.request_time)
raise gen.Return(self.check_response(resp))
|
{
"content_hash": "a3af305b21937599808d12fbe1168737",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 78,
"avg_line_length": 36.760736196319016,
"alnum_prop": 0.5539052069425902,
"repo_name": "DeanThompson/pyelong",
"id": "eb85f1e9e920e45a9cda0d9703f335d5b7a1e3bd",
"size": "6017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyelong/request.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39833"
}
],
"symlink_target": ""
}
|
"""
urlresolver XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import random
import re
import urllib
from urlresolver import common
from lib import helpers
from urlresolver.resolver import UrlResolver, ResolverError
class MovshareResolver(UrlResolver):
name = "movshare"
domains = ["movshare.net", 'wholecloud.net', 'vidgg.to']
pattern = '(?://|\.)(movshare.net|wholecloud.net|vidgg.to)/(?:video/|embed(?:/|\.php)\?(?:v|id)=)([A-Za-z0-9]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {'User-Agent': common.FF_USER_AGENT}
html = self.net.http_GET(web_url, headers=headers).content
stream_url = ''
match = re.search('<video.*?</video>', html, re.DOTALL)
if match:
links = re.findall('<source[^>]+src="([^"]+)', match.group(0), re.DOTALL)
if links:
stream_url = random.choice(links)
if not stream_url:
match = re.search('fkzd="([^"]+)', html)
if match:
query = {'pass': 'undefined', 'key': match.group(1), 'cid3': 'undefined', 'cid': 0, 'numOfErrors': 0, 'file': media_id, 'cid2': 'undefined', 'user': 'undefined'}
api_url = 'http://www.wholecloud.net//api/player.api.php?' + urllib.urlencode(query)
html = self.net.http_GET(api_url, headers=headers).content
match = re.search('url=([^&]+)', html)
if match:
stream_url = match.group(1)
if stream_url:
headers.update({'Referer': web_url, })
return stream_url + helpers.append_headers(headers)
else:
raise ResolverError('File Not Found or removed')
def get_url(self, host, media_id):
if 'vidgg' in host:
template = 'http://{host}/embed/?id={media_id}'
else:
template = 'http://{host}/embed/?v={media_id}'
return self._default_get_url(host, media_id, template)
|
{
"content_hash": "ac33226baf57a6b0416f3ac2e99f2f32",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 177,
"avg_line_length": 41.784615384615385,
"alnum_prop": 0.6049337260677466,
"repo_name": "TheWardoctor/Wardoctors-repo",
"id": "e5faa6d725bc643da423dda3328a02d0cf025692",
"size": "2716",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "script.module.urlresolver/lib/urlresolver/plugins/movshare.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3208"
},
{
"name": "JavaScript",
"bytes": "115722"
},
{
"name": "Python",
"bytes": "34405207"
},
{
"name": "Shell",
"bytes": "914"
}
],
"symlink_target": ""
}
|
import select
# From python select module and Tornado source
_EPOLLIN = 0x001
_EPOLLPRI = 0x002
_EPOLLOUT = 0x004
_EPOLLERR = 0x008
_EPOLLHUP = 0x010
_EPOLLRDHUP = 0x2000
_EPOLLONESHOT = (1 << 30)
_EPOLLET = (1 << 31)
READ = _EPOLLIN
WRITE = _EPOLLOUT
ERROR = _EPOLLERR | _EPOLLHUP
MASK_ALL = 0xFFFF
class PollerImpl(object):
def poll(self, timeout):
raise NotImplementedError()
def register(self, fd, eventmask):
raise NotImplementedError()
def unregister(self, fd):
raise NotImplementedError()
def modify(self, fd, eventmask):
raise NotImplementedError()
class EpollImpl(PollerImpl):
"""
epoll wrapper. Only usable on Linux.
"""
def __init__(self):
super(EpollImpl, self).__init__()
self._epoll = select.epoll()
def __del__(self):
try:
self._epoll.close()
except:
# Doesn't matter
pass
def register(self, fd, eventmask):
self._epoll.register(fd, eventmask)
def unregister(self, fd):
self._epoll.unregister(fd)
def poll(self, timeout):
return self._epoll.poll(timeout)
def modify(self, fd, eventmask):
return self._epoll.modify(fd, eventmask)
class KQueueImpl(PollerImpl):
"""
kqueue wrapper. Only usable on BSD-like systems.
"""
def __init__(self):
super(KQueueImpl, self).__init__()
self._kqueue = select.kqueue()
self._events = {}
def __del__(self):
try:
self._kqueue.close()
except:
# Doesn't matter
pass
def _control(self, fd, eventmask, flags):
events = []
if eventmask & READ:
events.append(select.kevent(fd, filter=select.KQ_FILTER_READ, flags=flags))
if eventmask & WRITE:
events.append(select.kevent(fd, filter=select.KQ_FILTER_WRITE, flags=flags))
for ev in events:
self._kqueue.control([ev], 0)
def register(self, fd, eventmask):
assert fd not in self._events, 'File already registered'
self._events[fd] = eventmask
if eventmask != 0:
self._control(fd, eventmask, select.KQ_EV_ADD)
def unregister(self, fd):
assert fd in self._events, 'File not registered'
event = self._events.pop(fd)
if event != 0:
self._control(fd, event, select.KQ_EV_DELETE)
def poll(self, timeout):
retval = {}
kevents = self._kqueue.control(None, 1000, timeout)
for kevent in kevents:
ident = kevent.ident
if kevent.filter == select.KQ_FILTER_READ:
retval[ident] = retval.get(ident, 0) | READ
if kevent.filter == select.KQ_FILTER_WRITE:
if kevent.flags & select.KQ_EV_EOF:
retval[ident] = ERROR
else:
retval[ident] = retval.get(ident, 0) | WRITE
if kevent.flags & select.KQ_EV_ERROR:
retval[ident] = retval.get(ident, 0) | ERROR
return retval.items()
def modify(self, fd, eventmask):
self.unregister(fd)
self.register(fd, eventmask)
class PollImpl(PollerImpl):
def __init__(self):
self._poll = select.poll()
def __del__(self):
try:
self._poll.close()
except:
# Doesn't matter
pass
def register(self, fd, eventmask):
self._poll.register(fd, eventmask)
def unregister(self, fd):
self._poll.unregister(fd)
def poll(self, timeout):
return self._poll.poll(timeout)
def modify(self, fd, eventmask):
return self._poll.modify(fd, eventmask)
class SelectImpl(PollerImpl):
def __init__(self):
self._reading = set()
self._writing = set()
self._error = set()
def register(self, fd, eventmask):
if eventmask & READ:
self._reading.add(fd)
if eventmask & WRITE:
self._writing.add(fd)
if eventmask & ERROR:
self._error.add(fd)
def modify(self, fd, eventmask):
self.unregister(fd)
self.register(fd, eventmask)
def poll(self, timeout):
read, write, err = select.select(self._reading, self._writing, self._error, timeout)
events = {}
for fd in read:
events[fd] = events.get(fd, 0) | READ
for fd in write:
events[fd] = events.get(fd, 0) | WRITE
for fd in err:
events[fd] = events.get(fd, 0) | ERROR
return events.items()
def unregister(self, fd):
self._reading.discard(fd)
self._writing.discard(fd)
self._error.discard(fd)
def get_poller():
if hasattr(select, 'epoll'):
# Linux
return EpollImpl()
elif hasattr(select, 'kqueue'):
# BSD
return KQueueImpl()
elif hasattr(select, 'poll'):
# UNIX
return PollImpl()
elif hasattr(select, 'select'):
# Windows et al.
return SelectImpl()
else:
raise OSError('System not supported')
|
{
"content_hash": "2087e2bdc808e09f52d31cbdb16d541f",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 92,
"avg_line_length": 25.928934010152282,
"alnum_prop": 0.5644087705559906,
"repo_name": "mar29th/ring",
"id": "cde3ed8983b949f19922261750b28b1cb41a2424",
"size": "5712",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ring/poller.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "127640"
}
],
"symlink_target": ""
}
|
import fiona
# This module contains examples of opening files to get feature collections in
# different ways.
#
# It is meant to be run from the distribution root, the directory containing
# setup.py.
#
# A ``path`` is always the ``open()`` function's first argument. It can be
# absolute or relative to the working directory. It is the only positional
# argument, though it is conventional to use the mode as a 2nd positional
# argument.
# 1. Opening a file with a single data layer (shapefiles, etc).
#
# args: path, mode
# kwds: none
#
# The relative path to a file on the filesystem is given and its single layer
# is selected implicitly (a shapefile has a single layer). The file is opened
# for reading (mode 'r'), but since this is the default, we'll omit it in
# following examples.
with fiona.open('docs/data/test_uk.shp', 'r') as c:
assert len(c) == 48
# 2. Opening a file with explicit layer selection (FileGDB, etc).
#
# args: path
# kwds: layer
#
# Same as above but layer specified explicitly by name..
with fiona.open('docs/data/test_uk.shp', layer='test_uk') as c:
assert len(c) == 48
# 3. Opening a directory for access to a single file.
#
# args: path
# kwds: layer
#
# Same as above but using the path to the directory containing the shapefile,
# specified explicitly by name.
with fiona.open('docs/data', layer='test_uk') as c:
assert len(c) == 48
# 4. Opening a single file within a zip archive.
#
# args: path
# kwds: vfs
#
# Open a file given its absolute path within a virtual filesystem. The VFS
# is given an Apache Commons VFS identifier. It may contain either an absolute
# path or a path relative to the working directory.
#
# Example archive:
#
# $ unzip -l docs/data/test_uk.zip
# Archive: docs/data/test_uk.zip
# Length Date Time Name
# -------- ---- ---- ----
# 10129 04-08-13 20:49 test_uk.dbf
# 143 04-08-13 20:49 test_uk.prj
# 65156 04-08-13 20:49 test_uk.shp
# 484 04-08-13 20:49 test_uk.shx
# -------- -------
# 75912 4 files
with fiona.open('/test_uk.shp', vfs='zip://docs/data/test_uk.zip') as c:
assert len(c) == 48
# 5. Opening a directory within a zip archive to select a layer.
#
# args: path
# kwds: layer, vfs
#
# The most complicated case. As above, but specifying the root directory within
# the virtual filesystem as the path and the layer by name (combination of
# 4 and 3). It ought to be possible to open a file geodatabase within a zip
# file like this.
with fiona.open('/', layer='test_uk', vfs='zip://docs/data/test_uk.zip') as c:
assert len(c) == 48
|
{
"content_hash": "fd69da7783d9e30202210ca900ae917c",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 79,
"avg_line_length": 30.96470588235294,
"alnum_prop": 0.6713525835866262,
"repo_name": "perrygeo/Fiona",
"id": "82e522264437a035da99d20e9031f0a35fab3257",
"size": "2633",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "examples/open.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "333612"
},
{
"name": "Shell",
"bytes": "2574"
}
],
"symlink_target": ""
}
|
"""
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
N-dimensional cantilever beam problem.
"""
import numpy as np
from smt.utils.options_dictionary import OptionsDictionary
from smt.problems.problem import Problem
from smt.problems.reduced_problem import ReducedProblem
from smt.problems.cantilever_beam import CantileverBeam
class NdimCantileverBeam(Problem):
def __init__(self, ndim=1, w=0.2):
self.problem = ReducedProblem(
CantileverBeam(ndim=3 * ndim), np.arange(1, 3 * ndim, 3), w=w
)
self.options = OptionsDictionary()
self.options.declare("ndim", ndim, types=int)
self.options.declare("return_complex", False, types=bool)
self.options.declare("name", "NdimCantileverBeam", types=str)
self.xlimits = self.problem.xlimits
def _evaluate(self, x, kx):
return self.problem._evaluate(x, kx)
|
{
"content_hash": "0094257eb7273bbb4b3a410b3efa7eb9",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 73,
"avg_line_length": 31.133333333333333,
"alnum_prop": 0.6980728051391863,
"repo_name": "relf/smt",
"id": "30ad31616a417178b175b83d258207c111fad456",
"size": "934",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "smt/problems/ndim_cantilever_beam.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "22649"
},
{
"name": "Cython",
"bytes": "5481"
},
{
"name": "Jupyter Notebook",
"bytes": "14431913"
},
{
"name": "Python",
"bytes": "799500"
}
],
"symlink_target": ""
}
|
import argparse
import collections
from maas_common import get_openstack_client
from maas_common import metric
from maas_common import metric_bool
from maas_common import print_output
from maas_common import status_err
from maas_common import status_ok
# The actual stat names from novaclient are nasty, so this mapping is used to
# translate them to something more consistent and usable, as well as set the
# units for each metric
stats_mapping = {
'hypervisor_count': {
'stat_name': 'count', 'unit': 'hypervisors', 'type': 'uint32'
},
'total_disk_space': {
'stat_name': 'local_disk_size', 'unit': 'Gigabytes', 'type': 'uint32'
},
'used_disk_space': {
'stat_name': 'local_disk_used', 'unit': 'Gigabytes', 'type': 'uint32'
},
'free_disk_space': {
'stat_name': 'local_disk_free', 'unit': 'Gigabytes', 'type': 'uint32'
},
'total_memory': {
'stat_name': 'memory_size', 'unit': 'Megabytes', 'type': 'uint32'
},
'used_memory': {
'stat_name': 'memory_used', 'unit': 'Megabytes', 'type': 'uint32'
},
'free_memory': {
'stat_name': 'memory_free', 'unit': 'Megabytes', 'type': 'uint32'
},
'total_vcpus': {
'stat_name': 'vcpus', 'unit': 'vcpu', 'type': 'uint32'
},
'used_vcpus': {
'stat_name': 'vcpus_used', 'unit': 'vcpu', 'type': 'uint32'
}
}
def check(args):
try:
nova = get_openstack_client('compute')
except Exception as e:
metric_bool('client_success', False, m_name='maas_nova')
status_err(str(e), m_name='maas_nova')
else:
metric_bool('client_success', True, m_name='maas_nova')
# get some cloud stats
stats = [nova.get_hypervisor(i.id) for i in nova.hypervisors()]
cloud_stats = collections.defaultdict(dict)
count = 0
for stat in stats:
count += 1
setattr(stat, 'count', count)
for metric_name, vals in stats_mapping.iteritems():
multiplier = 1
if metric_name == 'total_vcpus':
multiplier = args.cpu_allocation_ratio
elif metric_name == 'total_memory':
multiplier = args.mem_allocation_ratio
cloud_stats[metric_name]['value'] = \
(getattr(stat, vals['stat_name']) * multiplier)
cloud_stats[metric_name]['unit'] = \
vals['unit']
cloud_stats[metric_name]['type'] = \
vals['type']
status_ok(m_name='maas_nova')
for metric_name in cloud_stats.iterkeys():
metric('cloud_resource_%s' % metric_name,
cloud_stats[metric_name]['type'],
cloud_stats[metric_name]['value'],
cloud_stats[metric_name]['unit'])
def main(args):
check(args)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Check Nova hypervisor stats')
parser.add_argument('--cpu',
type=float,
default=1.0,
required=False,
action='store',
dest='cpu_allocation_ratio',
help='cpu allocation ratio')
parser.add_argument('--mem',
type=float,
default=1.0,
required=False,
action='store',
dest='mem_allocation_ratio',
help='mem allocation ratio')
parser.add_argument('ip', nargs='?',
type=str,
help='Nova API hostname or IP address')
parser.add_argument('--telegraf-output',
action='store_true',
default=False,
help='Set the output format to telegraf')
parser.add_argument('--protocol',
type=str,
help='Protocol to use for contacting nova',
default='http')
args = parser.parse_args()
with print_output(print_telegraf=args.telegraf_output):
main(args)
|
{
"content_hash": "1666e3cf0dea9cb8f53507c878d4cf28",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 77,
"avg_line_length": 36.05172413793103,
"alnum_prop": 0.5248684839789575,
"repo_name": "npawelek/rpc-maas",
"id": "256c2a70f45d836914d83177f3ae383ec718d9a3",
"size": "4789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "playbooks/files/rax-maas/plugins/nova_cloud_stats.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3215"
},
{
"name": "Python",
"bytes": "368284"
},
{
"name": "Shell",
"bytes": "50085"
}
],
"symlink_target": ""
}
|
from scipy.io import loadmat
import matplotlib.pyplot as plt
import numpy as np
def normalize_message(m):
return m / np.sum(m)
def belief_propagation(transition_prob, visible_to_hidden_prob, observations):
"""
My implementation
Used reference notes from
https://dl.dropboxusercontent.com/u/14115372/sum_product_algorithm/sum_product_note.pdf
"""
n_observations = len(observations)
# Forward pass
forward = []
# u f -> s from prev hidden
hfac_message = np.array([0.8, 0.2])
for i in range(n_observations):
obs = observations[i]
# Multiply the 2 factor messages, then marginalize over state i.e.
# state i.e. multiply and sum (dot prod!)
ofac_message = normalize_message(np.array([0.5, 0.5])
* visible_to_hidden_prob[:, obs])
forward_update = normalize_message(
np.dot(transition_prob, ofac_message
* hfac_message))
hfac_message = forward_update
forward.append(forward_update)
forward = np.array(forward)
# Backward pass
backward = []
hfac_message = np.array([0.5, 0.5])
for i in range(n_observations)[::-1]:
obs = observations[i]
ofac_message = normalize_message(np.array([0.5, 0.5])
* visible_to_hidden_prob[:, obs])
# Multiply the 2 factor messages, then marginalize over state i.e.
# state i.e. multiply and sum (dot prod!)
backward_update = normalize_message(
np.dot(transition_prob, visible_to_hidden_prob[:, obs] * hfac_message))
hfac_message = backward_update
backward.insert(0, backward_update)
backward = np.array(backward)
marginal_prob = forward * backward
marginal_prob /= np.sum(marginal_prob, axis=1)[:, None]
return marginal_prob, forward, backward
# zt, zt+1
# Bad, Bad | Bad, Good
# Good, Bad | Good, Good
transition_prob = np.array([[0.8, 0.2],
[0.2, 0.8]])
# xt, zt
# -1, Bad | +1, Bad
# -1, Good | +1, Good
q = 0.7
visible_to_hidden_prob = np.array([[q, 1. - q],
[1. - q, q]])
# Hardcoded in the function but will list it here as well
# x1
# Bad | Good
init_prob = np.array([[0.8, 0.2],])
# X is observed, Z is hidden state
X = loadmat('sp500.mat')['price_move']
X[ X > 0] = 1
X[ X < 0] = 0
pl, f, b = belief_propagation(transition_prob, visible_to_hidden_prob, X.ravel())
# xt, zt
# -1, Bad | +1, Bad
# -1, Good | +1, Good
q = 0.9
visible_to_hidden_prob = np.array([[q, 1. - q],
[1. - q, q]])
ph, f, b = belief_propagation(transition_prob, visible_to_hidden_prob, X.ravel())
plt.plot(pl[:, 1], label="q=0.7", color="steelblue")
plt.plot(ph[:, 1], label="q=0.9", color="darkred")
plt.title("Discrete HMM Belief Propagation")
plt.xlabel("Time (Weeks)")
plt.ylabel("Marginal probability of 'good' state")
plt.legend()
plt.show()
|
{
"content_hash": "7182fe68e9eb9ad54afa17dad2b86af1",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 91,
"avg_line_length": 32.747252747252745,
"alnum_prop": 0.5902684563758389,
"repo_name": "kastnerkyle/ift6085",
"id": "9aa70f9f1b72cb4921b518f6e4656403451e10b9",
"size": "2980",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "markov_sum_product.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "13545"
},
{
"name": "Shell",
"bytes": "648"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import fnmatch
import glob
import io
import os
import re
import sys
from functools import total_ordering
from itertools import dropwhile
import django
from django.conf import settings
from django.core.files.temp import NamedTemporaryFile
from django.core.management.base import BaseCommand, CommandError
from django.core.management.utils import (
find_command, handle_extensions, popen_wrapper,
)
from django.utils._os import upath
from django.utils.encoding import DEFAULT_LOCALE_ENCODING, force_str
from django.utils.functional import cached_property
from django.utils.jslex import prepare_js_for_gettext
from django.utils.text import get_text_list
plural_forms_re = re.compile(r'^(?P<value>"Plural-Forms.+?\\n")\s*$', re.MULTILINE | re.DOTALL)
STATUS_OK = 0
NO_LOCALE_DIR = object()
def check_programs(*programs):
for program in programs:
if find_command(program) is None:
raise CommandError("Can't find %s. Make sure you have GNU "
"gettext tools 0.15 or newer installed." % program)
@total_ordering
class TranslatableFile(object):
def __init__(self, dirpath, file_name, locale_dir):
self.file = file_name
self.dirpath = dirpath
self.locale_dir = locale_dir
def __repr__(self):
return "<TranslatableFile: %s>" % os.sep.join([self.dirpath, self.file])
def __eq__(self, other):
return self.path == other.path
def __lt__(self, other):
return self.path < other.path
@property
def path(self):
return os.path.join(self.dirpath, self.file)
class BuildFile(object):
"""
Represents the state of a translatable file during the build process.
"""
def __init__(self, command, domain, translatable):
self.command = command
self.domain = domain
self.translatable = translatable
@cached_property
def is_templatized(self):
if self.domain == 'djangojs':
return self.command.gettext_version < (0, 18, 3)
elif self.domain == 'django':
file_ext = os.path.splitext(self.translatable.file)[1]
return file_ext != '.py'
return False
@cached_property
def path(self):
return self.translatable.path
@cached_property
def work_path(self):
"""
Path to a file which is being fed into GNU gettext pipeline. This may
be either a translatable or its preprocessed version.
"""
if not self.is_templatized:
return self.path
extension = {
'djangojs': 'c',
'django': 'py',
}.get(self.domain)
filename = '%s.%s' % (self.translatable.file, extension)
return os.path.join(self.translatable.dirpath, filename)
def preprocess(self):
"""
Preprocess (if necessary) a translatable file before passing it to
xgettext GNU gettext utility.
"""
from django.utils.translation import templatize
if not self.is_templatized:
return
with io.open(self.path, 'r', encoding=settings.FILE_CHARSET) as fp:
src_data = fp.read()
if self.domain == 'djangojs':
content = prepare_js_for_gettext(src_data)
elif self.domain == 'django':
content = templatize(src_data, self.path[2:])
with io.open(self.work_path, 'w', encoding='utf-8') as fp:
fp.write(content)
def postprocess_messages(self, msgs):
"""
Postprocess messages generated by xgettext GNU gettext utility.
Transform paths as if these messages were generated from original
translatable files rather than from preprocessed versions.
"""
if not self.is_templatized:
return msgs
# Remove '.py' suffix
if os.name == 'nt':
# Preserve '.\' prefix on Windows to respect gettext behavior
old = '#: ' + self.work_path
new = '#: ' + self.path
else:
old = '#: ' + self.work_path[2:]
new = '#: ' + self.path[2:]
return msgs.replace(old, new)
def cleanup(self):
"""
Remove a preprocessed copy of a translatable file (if any).
"""
if self.is_templatized:
# This check is needed for the case of a symlinked file and its
# source being processed inside a single group (locale dir);
# removing either of those two removes both.
if os.path.exists(self.work_path):
os.unlink(self.work_path)
def write_pot_file(potfile, msgs):
"""
Write the :param potfile: POT file with the :param msgs: contents,
previously making sure its format is valid.
"""
if os.path.exists(potfile):
# Strip the header
msgs = '\n'.join(dropwhile(len, msgs.split('\n')))
else:
msgs = msgs.replace('charset=CHARSET', 'charset=UTF-8')
with io.open(potfile, 'a', encoding='utf-8') as fp:
fp.write(msgs)
class Command(BaseCommand):
help = ("Runs over the entire source tree of the current directory and "
"pulls out all strings marked for translation. It creates (or updates) a message "
"file in the conf/locale (in the django tree) or locale (for projects and "
"applications) directory.\n\nYou must run this command with one of either the "
"--locale, --exclude or --all options.")
translatable_file_class = TranslatableFile
build_file_class = BuildFile
requires_system_checks = False
leave_locale_alone = True
msgmerge_options = ['-q', '--previous']
msguniq_options = ['--to-code=utf-8']
msgattrib_options = ['--no-obsolete']
xgettext_options = ['--from-code=UTF-8', '--add-comments=Translators']
def add_arguments(self, parser):
parser.add_argument('--locale', '-l', default=[], dest='locale', action='append',
help='Creates or updates the message files for the given locale(s) (e.g. pt_BR). '
'Can be used multiple times.')
parser.add_argument('--exclude', '-x', default=[], dest='exclude', action='append',
help='Locales to exclude. Default is none. Can be used multiple times.')
parser.add_argument('--domain', '-d', default='django', dest='domain',
help='The domain of the message files (default: "django").')
parser.add_argument('--all', '-a', action='store_true', dest='all',
default=False, help='Updates the message files for all existing locales.')
parser.add_argument('--extension', '-e', dest='extensions',
help='The file extension(s) to examine (default: "html,txt,py", or "js" '
'if the domain is "djangojs"). Separate multiple extensions with '
'commas, or use -e multiple times.',
action='append')
parser.add_argument('--symlinks', '-s', action='store_true', dest='symlinks',
default=False, help='Follows symlinks to directories when examining '
'source code and templates for translation strings.')
parser.add_argument('--ignore', '-i', action='append', dest='ignore_patterns',
default=[], metavar='PATTERN',
help='Ignore files or directories matching this glob-style pattern. '
'Use multiple times to ignore more.')
parser.add_argument('--no-default-ignore', action='store_false', dest='use_default_ignore_patterns',
default=True, help="Don't ignore the common glob-style patterns 'CVS', '.*', '*~' and '*.pyc'.")
parser.add_argument('--no-wrap', action='store_true', dest='no_wrap',
default=False, help="Don't break long message lines into several lines.")
parser.add_argument('--no-location', action='store_true', dest='no_location',
default=False, help="Don't write '#: filename:line' lines.")
parser.add_argument('--no-obsolete', action='store_true', dest='no_obsolete',
default=False, help="Remove obsolete message strings.")
parser.add_argument('--keep-pot', action='store_true', dest='keep_pot',
default=False, help="Keep .pot file after making messages. Useful when debugging.")
def handle(self, *args, **options):
locale = options['locale']
exclude = options['exclude']
self.domain = options['domain']
self.verbosity = options['verbosity']
process_all = options['all']
extensions = options['extensions']
self.symlinks = options['symlinks']
# Need to ensure that the i18n framework is enabled
if settings.configured:
settings.USE_I18N = True
else:
settings.configure(USE_I18N=True)
ignore_patterns = options['ignore_patterns']
if options['use_default_ignore_patterns']:
ignore_patterns += ['CVS', '.*', '*~', '*.pyc']
self.ignore_patterns = list(set(ignore_patterns))
# Avoid messing with mutable class variables
if options['no_wrap']:
self.msgmerge_options = self.msgmerge_options[:] + ['--no-wrap']
self.msguniq_options = self.msguniq_options[:] + ['--no-wrap']
self.msgattrib_options = self.msgattrib_options[:] + ['--no-wrap']
self.xgettext_options = self.xgettext_options[:] + ['--no-wrap']
if options['no_location']:
self.msgmerge_options = self.msgmerge_options[:] + ['--no-location']
self.msguniq_options = self.msguniq_options[:] + ['--no-location']
self.msgattrib_options = self.msgattrib_options[:] + ['--no-location']
self.xgettext_options = self.xgettext_options[:] + ['--no-location']
self.no_obsolete = options['no_obsolete']
self.keep_pot = options['keep_pot']
if self.domain not in ('django', 'djangojs'):
raise CommandError("currently makemessages only supports domains "
"'django' and 'djangojs'")
if self.domain == 'djangojs':
exts = extensions if extensions else ['js']
else:
exts = extensions if extensions else ['html', 'txt', 'py']
self.extensions = handle_extensions(exts)
if (locale is None and not exclude and not process_all) or self.domain is None:
raise CommandError("Type '%s help %s' for usage information." % (
os.path.basename(sys.argv[0]), sys.argv[1]))
if self.verbosity > 1:
self.stdout.write('examining files with the extensions: %s\n'
% get_text_list(list(self.extensions), 'and'))
self.invoked_for_django = False
self.locale_paths = []
self.default_locale_path = None
if os.path.isdir(os.path.join('conf', 'locale')):
self.locale_paths = [os.path.abspath(os.path.join('conf', 'locale'))]
self.default_locale_path = self.locale_paths[0]
self.invoked_for_django = True
else:
self.locale_paths.extend(settings.LOCALE_PATHS)
# Allow to run makemessages inside an app dir
if os.path.isdir('locale'):
self.locale_paths.append(os.path.abspath('locale'))
if self.locale_paths:
self.default_locale_path = self.locale_paths[0]
if not os.path.exists(self.default_locale_path):
os.makedirs(self.default_locale_path)
# Build locale list
locale_dirs = filter(os.path.isdir, glob.glob('%s/*' % self.default_locale_path))
all_locales = map(os.path.basename, locale_dirs)
# Account for excluded locales
if process_all:
locales = all_locales
else:
locales = locale or all_locales
locales = set(locales) - set(exclude)
if locales:
check_programs('msguniq', 'msgmerge', 'msgattrib')
check_programs('xgettext')
try:
potfiles = self.build_potfiles()
# Build po files for each selected locale
for locale in locales:
if self.verbosity > 0:
self.stdout.write("processing locale %s\n" % locale)
for potfile in potfiles:
self.write_po_file(potfile, locale)
finally:
if not self.keep_pot:
self.remove_potfiles()
@cached_property
def gettext_version(self):
# Gettext tools will output system-encoded bytestrings instead of UTF-8,
# when looking up the version. It's especially a problem on Windows.
out, err, status = popen_wrapper(
['xgettext', '--version'],
stdout_encoding=DEFAULT_LOCALE_ENCODING,
)
m = re.search(r'(\d+)\.(\d+)\.?(\d+)?', out)
if m:
return tuple(int(d) for d in m.groups() if d is not None)
else:
raise CommandError("Unable to get gettext version. Is it installed?")
def build_potfiles(self):
"""
Build pot files and apply msguniq to them.
"""
file_list = self.find_files(".")
self.remove_potfiles()
self.process_files(file_list)
potfiles = []
for path in self.locale_paths:
potfile = os.path.join(path, '%s.pot' % str(self.domain))
if not os.path.exists(potfile):
continue
args = ['msguniq'] + self.msguniq_options + [potfile]
msgs, errors, status = popen_wrapper(args)
if errors:
if status != STATUS_OK:
raise CommandError(
"errors happened while running msguniq\n%s" % errors)
elif self.verbosity > 0:
self.stdout.write(errors)
with io.open(potfile, 'w', encoding='utf-8') as fp:
fp.write(msgs)
potfiles.append(potfile)
return potfiles
def remove_potfiles(self):
for path in self.locale_paths:
pot_path = os.path.join(path, '%s.pot' % str(self.domain))
if os.path.exists(pot_path):
os.unlink(pot_path)
def find_files(self, root):
"""
Helper method to get all files in the given root. Also check that there
is a matching locale dir for each file.
"""
def is_ignored(path, ignore_patterns):
"""
Check if the given path should be ignored or not.
"""
filename = os.path.basename(path)
def ignore(pattern):
return fnmatch.fnmatchcase(filename, pattern) or fnmatch.fnmatchcase(path, pattern)
return any(ignore(pattern) for pattern in ignore_patterns)
ignore_patterns = [os.path.normcase(p) for p in self.ignore_patterns]
dir_suffixes = {'%s*' % path_sep for path_sep in {'/', os.sep}}
norm_patterns = []
for p in ignore_patterns:
for dir_suffix in dir_suffixes:
if p.endswith(dir_suffix):
norm_patterns.append(p[:-len(dir_suffix)])
break
else:
norm_patterns.append(p)
all_files = []
ignored_roots = [os.path.normpath(p) for p in (settings.MEDIA_ROOT, settings.STATIC_ROOT) if p]
for dirpath, dirnames, filenames in os.walk(root, topdown=True, followlinks=self.symlinks):
for dirname in dirnames[:]:
if (is_ignored(os.path.normpath(os.path.join(dirpath, dirname)), norm_patterns) or
os.path.join(os.path.abspath(dirpath), dirname) in ignored_roots):
dirnames.remove(dirname)
if self.verbosity > 1:
self.stdout.write('ignoring directory %s\n' % dirname)
elif dirname == 'locale':
dirnames.remove(dirname)
self.locale_paths.insert(0, os.path.join(os.path.abspath(dirpath), dirname))
for filename in filenames:
file_path = os.path.normpath(os.path.join(dirpath, filename))
file_ext = os.path.splitext(filename)[1]
if file_ext not in self.extensions or is_ignored(file_path, self.ignore_patterns):
if self.verbosity > 1:
self.stdout.write('ignoring file %s in %s\n' % (filename, dirpath))
else:
locale_dir = None
for path in self.locale_paths:
if os.path.abspath(dirpath).startswith(os.path.dirname(path)):
locale_dir = path
break
if not locale_dir:
locale_dir = self.default_locale_path
if not locale_dir:
locale_dir = NO_LOCALE_DIR
all_files.append(self.translatable_file_class(dirpath, filename, locale_dir))
return sorted(all_files)
def process_files(self, file_list):
"""
Group translatable files by locale directory and run pot file build
process for each group.
"""
file_groups = {}
for translatable in file_list:
file_group = file_groups.setdefault(translatable.locale_dir, [])
file_group.append(translatable)
for locale_dir, files in file_groups.items():
self.process_locale_dir(locale_dir, files)
def process_locale_dir(self, locale_dir, files):
"""
Extract translatable literals from the specified files, creating or
updating the POT file for a given locale directory.
Uses the xgettext GNU gettext utility.
"""
build_files = []
for translatable in files:
if self.verbosity > 1:
self.stdout.write('processing file %s in %s\n' % (
translatable.file, translatable.dirpath
))
if self.domain not in ('djangojs', 'django'):
continue
build_file = self.build_file_class(self, self.domain, translatable)
try:
build_file.preprocess()
except UnicodeDecodeError as e:
self.stdout.write(
'UnicodeDecodeError: skipped file %s in %s (reason: %s)' % (
translatable.file, translatable.dirpath, e,
)
)
continue
build_files.append(build_file)
if self.domain == 'djangojs':
is_templatized = build_file.is_templatized
args = [
'xgettext',
'-d', self.domain,
'--language=%s' % ('C' if is_templatized else 'JavaScript',),
'--keyword=gettext_noop',
'--keyword=gettext_lazy',
'--keyword=ngettext_lazy:1,2',
'--keyword=pgettext:1c,2',
'--keyword=npgettext:1c,2,3',
'--output=-',
]
elif self.domain == 'django':
args = [
'xgettext',
'-d', self.domain,
'--language=Python',
'--keyword=gettext_noop',
'--keyword=gettext_lazy',
'--keyword=ngettext_lazy:1,2',
'--keyword=ugettext_noop',
'--keyword=ugettext_lazy',
'--keyword=ungettext_lazy:1,2',
'--keyword=pgettext:1c,2',
'--keyword=npgettext:1c,2,3',
'--keyword=pgettext_lazy:1c,2',
'--keyword=npgettext_lazy:1c,2,3',
'--output=-',
]
else:
return
input_files = [bf.work_path for bf in build_files]
with NamedTemporaryFile(mode='w+') as input_files_list:
input_files_list.write('\n'.join(input_files))
input_files_list.flush()
args.extend(['--files-from', input_files_list.name])
args.extend(self.xgettext_options)
msgs, errors, status = popen_wrapper(args)
if errors:
if status != STATUS_OK:
for build_file in build_files:
build_file.cleanup()
raise CommandError(
'errors happened while running xgettext on %s\n%s' %
('\n'.join(input_files), errors)
)
elif self.verbosity > 0:
# Print warnings
self.stdout.write(errors)
if msgs:
if locale_dir is NO_LOCALE_DIR:
file_path = os.path.normpath(build_files[0].path)
raise CommandError(
'Unable to find a locale path to store translations for '
'file %s' % file_path
)
for build_file in build_files:
msgs = build_file.postprocess_messages(msgs)
potfile = os.path.join(locale_dir, '%s.pot' % str(self.domain))
write_pot_file(potfile, msgs)
for build_file in build_files:
build_file.cleanup()
def write_po_file(self, potfile, locale):
"""
Creates or updates the PO file for self.domain and :param locale:.
Uses contents of the existing :param potfile:.
Uses msgmerge, and msgattrib GNU gettext utilities.
"""
basedir = os.path.join(os.path.dirname(potfile), locale, 'LC_MESSAGES')
if not os.path.isdir(basedir):
os.makedirs(basedir)
pofile = os.path.join(basedir, '%s.po' % str(self.domain))
if os.path.exists(pofile):
args = ['msgmerge'] + self.msgmerge_options + [pofile, potfile]
msgs, errors, status = popen_wrapper(args)
if errors:
if status != STATUS_OK:
raise CommandError(
"errors happened while running msgmerge\n%s" % errors)
elif self.verbosity > 0:
self.stdout.write(errors)
else:
with io.open(potfile, 'r', encoding='utf-8') as fp:
msgs = fp.read()
if not self.invoked_for_django:
msgs = self.copy_plural_forms(msgs, locale)
msgs = msgs.replace(
"#. #-#-#-#-# %s.pot (PACKAGE VERSION) #-#-#-#-#\n" % self.domain, "")
with io.open(pofile, 'w', encoding='utf-8') as fp:
fp.write(msgs)
if self.no_obsolete:
args = ['msgattrib'] + self.msgattrib_options + ['-o', pofile, pofile]
msgs, errors, status = popen_wrapper(args)
if errors:
if status != STATUS_OK:
raise CommandError(
"errors happened while running msgattrib\n%s" % errors)
elif self.verbosity > 0:
self.stdout.write(errors)
def copy_plural_forms(self, msgs, locale):
"""
Copies plural forms header contents from a Django catalog of locale to
the msgs string, inserting it at the right place. msgs should be the
contents of a newly created .po file.
"""
django_dir = os.path.normpath(os.path.join(os.path.dirname(upath(django.__file__))))
if self.domain == 'djangojs':
domains = ('djangojs', 'django')
else:
domains = ('django',)
for domain in domains:
django_po = os.path.join(django_dir, 'conf', 'locale', locale, 'LC_MESSAGES', '%s.po' % domain)
if os.path.exists(django_po):
with io.open(django_po, 'r', encoding='utf-8') as fp:
m = plural_forms_re.search(fp.read())
if m:
plural_form_line = force_str(m.group('value'))
if self.verbosity > 1:
self.stdout.write("copying plural forms: %s\n" % plural_form_line)
lines = []
found = False
for line in msgs.split('\n'):
if not found and (not line or plural_forms_re.search(line)):
line = '%s\n' % plural_form_line
found = True
lines.append(line)
msgs = '\n'.join(lines)
break
return msgs
|
{
"content_hash": "951299290b64c51469234497c163a4c6",
"timestamp": "",
"source": "github",
"line_count": 593,
"max_line_length": 108,
"avg_line_length": 42.22428330522766,
"alnum_prop": 0.5457486321338711,
"repo_name": "yephper/django",
"id": "c9550fcab3af646146f79f55c92ad52173021f87",
"size": "25039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/core/management/commands/makemessages.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "1538"
},
{
"name": "CSS",
"bytes": "1697381"
},
{
"name": "HTML",
"bytes": "390772"
},
{
"name": "Java",
"bytes": "588"
},
{
"name": "JavaScript",
"bytes": "3172126"
},
{
"name": "Makefile",
"bytes": "134"
},
{
"name": "PHP",
"bytes": "19336"
},
{
"name": "Python",
"bytes": "13365273"
},
{
"name": "Shell",
"bytes": "837"
},
{
"name": "Smarty",
"bytes": "133"
}
],
"symlink_target": ""
}
|
__all__ = ['StatesSet']
class StatesSet:
STATE, CAPTURED = range(2)
def __init__(self):
self._list = []
self._set = set()
def __len__(self):
return len(self._list)
def __bool__(self):
return bool(self._list)
def __iter__(self):
yield from self._list
def __contains__(self, item):
return item in self._set
def __getitem__(self, item):
return self._list[item]
def extend(self, items):
items = tuple(
item
for item in items
if item[self.STATE] not in self._set)
self._list.extend(items)
self._set.update(
item[self.STATE]
for item in items)
def clear(self):
self._list.clear()
self._set.clear()
|
{
"content_hash": "1bbbabf0688cc85807e5583115ea04ed",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 49,
"avg_line_length": 20.384615384615383,
"alnum_prop": 0.5044025157232704,
"repo_name": "nitely/regexy",
"id": "549c9501282f3d09812cd57b127b6bef81fee83c",
"size": "821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "regexy/shared/collections/states_set.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "235"
},
{
"name": "Python",
"bytes": "75351"
}
],
"symlink_target": ""
}
|
from whoosh.qparser import QueryParser
import whoosh.index as index
from whoosh.fields import *
from whoosh import qparser
import argparse
import os
import time
import logging
class Timer(object):
def __init__(self, name=None, logger=None):
self.logger = logger
self.name = name
def __enter__(self):
self.tstart = time.time()
def __exit__(self, type, value, traceback):
if self.logger is None:
if self.name:
print '[%s]' % self.name,
print 'Elapsed: %s' % (time.time() - self.tstart)
else:
if self.name:
self.logger.info("[%s] Elapsed: %s" % (self.name, (time.time() - self.tstart)))
else:
self.logger.info('Elapsed: %s' % (time.time() - self.tstart))
def get_section(rawtxt, tag1, tag2):
rexp1 = "%s([\\S\\s]*?)%s" % (tag1, tag2)
re_exp = re.compile(rexp1, re.DOTALL)
section = []
for m in re_exp.finditer(rawtxt):
section.append(m.group(1))
return section
def get_section2(rawtxt, tag1, tag2):
rexp1 = "(%s[\\S\\s]*?%s)" % (tag1, tag2)
re_exp = re.compile(rexp1, re.DOTALL)
section = []
for m in re_exp.finditer(rawtxt):
section.append(m.group(1))
return section
def query(indexpath):
ix = index.open_dir(indexpath)
with ix.searcher() as searcher:
query = QueryParser("content", ix.schema).parse("test")
results = searcher.search(query)
print results[0]
def batch_query(querypath, indexpath):
ix = index.open_dir(indexpath)
with open(querypath, 'r') as fh:
with open('output.txt', 'w') as out_file:
rawdata = fh.read()
document = get_section2(rawdata, "<top>", "</top>")
for d in document:
topicnum = get_section(d, "<num> Number:", "<title>")[0].strip(" ").strip("\n")
title = get_section(d, "<title>", "<desc>")[0].strip(" ").strip("\n")
desc = get_section(d, "<desc>", "<narr>")[0].replace("Description:","").strip(" ")
narr = get_section(d, "<narr>", "</top>")[0].replace("Narrative:","").strip(" ")
print topicnum, title, desc, narr
with ix.searcher() as searcher:
parser = qparser.QueryParser("content", schema=ix.schema,
group=qparser.OrGroup)
query = parser.parse(desc+" "+title)
# query = QueryParser("content", ix.schema)
results = searcher.search(query, limit=1000)
print results[0]
print results[1]
# return
for i in range(1000):
out_file.write("%s\tQ0\t%s\t%s\t%s\t jack\n" % (topicnum, results[i].values()[1], results[i].rank+1, results[i].score))
# return
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("querypath")
parser.add_argument("indexpath")
args = parser.parse_args()
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s')
logging.root.setLevel(level=logging.INFO)
with Timer("indexing", logger):
batch_query(args.querypath, args.indexpath)
# query(args.indexpath)
|
{
"content_hash": "66b0ace280ff55d2a2403a5a193ac501",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 143,
"avg_line_length": 31.897196261682243,
"alnum_prop": 0.5493700556694989,
"repo_name": "bgshin/irqa",
"id": "d504e0066e9adf1224c95b916aeb5ff320c8ddbd",
"size": "3413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bquery.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8969"
}
],
"symlink_target": ""
}
|
import socket
import time
import threading
import json
import signal
import socketserver
socketserver.ThreadingTCPServer.allow_reuse_address = True
UUID_LEN = 32
monitor_ins_id = 'ab8bd7be5102545ea70d092b333d6631ab8bd7be5102545ea70d092b333d66314ec7c545c4504d3b89de49f067ce76c0'
class TCPHandler(socketserver.BaseRequestHandler):
"""
TCP listen
"""
def handle(self):
# TODO: zmq
data = self.request.recv(1024).strip().decode('utf-8')
print(data)
self.request.sendall(bytes(str(True), 'utf-8'))
class ThreadingTCPServer(socketserver.ThreadingTCPServer):
pass
def signal_handler(signal, frame):
global interrupted
interrupted = True
def send_instance_info():
try:
HOST, PORT = "localhost", 10006
f = open('./test.json', 'r')
data = str(json.load(f))
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
# Connect to server and send data
sock.connect((HOST, PORT))
sock.sendall(bytes(data, "utf-8"))
# Receive data from the server and shut down
received = str(sock.recv(1024), "utf-8")
# print("Sent: {}".format(data))
# print("Received: {}".format(received))
except:
pass
def send_check_instance():
try:
HOST, PORT = "localhost", 10007
data = monitor_ins_id
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
# Connect to server and send data
sock.connect((HOST, PORT))
sock.sendall(bytes(data, "utf-8"))
# Receive data from the server and shut down
received = str(sock.recv(1024), "utf-8")
# print("Sent: {}".format(data))
# print("Received: {}".format(received))
except:
pass
# Create a socket (SOCK_STREAM means a TCP socket)
interrupted = False
signal.signal(signal.SIGINT, signal_handler)
DB_HOST, DB_PORT = 'localhost', 10009
db_server = ThreadingTCPServer((DB_HOST, DB_PORT), TCPHandler)
t = threading.Thread(target=db_server.serve_forever)
t.setDaemon(True)
t.start()
while True:
t = threading.Thread(target=send_instance_info)
t.setDaemon(True)
t.start()
t = threading.Thread(target=send_check_instance)
t.setDaemon(True)
t.start()
if interrupted:
break
|
{
"content_hash": "b0446c991b307a50891bd414c33877c3",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 115,
"avg_line_length": 26.670454545454547,
"alnum_prop": 0.6374094588836813,
"repo_name": "utam0k/c3os",
"id": "c4512d87a9cebc7a3602c2a1cd9ee01351c7024a",
"size": "2347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "c3os/tests/test_sender.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7747"
},
{
"name": "Makefile",
"bytes": "7904"
},
{
"name": "Python",
"bytes": "43618"
}
],
"symlink_target": ""
}
|
import numpy as np
from statsmodels.stats.correlation_tools import (
kernel_covariance, GaussianMultivariateKernel)
from numpy.testing import assert_allclose
def test_kernel_covariance():
np.random.seed(342)
# Number of independent observations
ng = 1000
# Dimension of the process
p = 3
# Each component of the process in an AR(r) with 10 values
# observed on a grid
r = 0.5
ii = np.arange(10)
qm = r**np.abs(np.subtract.outer(ii, ii))
qm = np.linalg.cholesky(qm)
exog, groups, pos = [], [], []
for j in range(ng):
pos1 = np.arange(10)[:, None]
groups1 = j * np.ones(10)
# The components are independent AR processes
ex1 = np.random.normal(size=(10, 3))
ex1 = np.dot(qm, ex1)
pos.append(pos1)
groups.append(groups1)
exog.append(ex1)
groups = np.concatenate(groups)
pos = np.concatenate(pos, axis=0)
exog = np.concatenate(exog, axis=0)
for j in range(4):
if j == 0:
kernel = None
bw = None
elif j == 1:
kernel = GaussianMultivariateKernel()
bw = None
elif j == 2:
kernel = GaussianMultivariateKernel()
bw = 1
elif j == 3:
kernel = GaussianMultivariateKernel()
bw = kernel.set_default_bw(pos)
cv = kernel_covariance(exog, pos, groups, kernel=kernel, bw=bw)
assert_allclose(cv(0, 0), np.eye(p), atol=0.1, rtol=0.01)
assert_allclose(cv(0, 1), 0.5*np.eye(p), atol=0.1, rtol=0.01)
assert_allclose(cv(0, 2), 0.25*np.eye(p), atol=0.1, rtol=0.01)
assert_allclose(cv(1, 2), 0.5*np.eye(p), atol=0.1, rtol=0.01)
|
{
"content_hash": "fa2511cd3ceb1b6a7bb6da4ea9b0c3dc",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 71,
"avg_line_length": 28.7,
"alnum_prop": 0.5743321718931476,
"repo_name": "statsmodels/statsmodels",
"id": "9ee1b5531f958ff9cda3c84d41a62d8c70282c0c",
"size": "1722",
"binary": false,
"copies": "7",
"ref": "refs/heads/main",
"path": "statsmodels/stats/tests/test_correlation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10035"
},
{
"name": "Batchfile",
"bytes": "625"
},
{
"name": "C",
"bytes": "381"
},
{
"name": "Cython",
"bytes": "225838"
},
{
"name": "Fortran",
"bytes": "16671"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "MATLAB",
"bytes": "100525"
},
{
"name": "Python",
"bytes": "14445661"
},
{
"name": "R",
"bytes": "106569"
},
{
"name": "Shell",
"bytes": "25329"
},
{
"name": "Stata",
"bytes": "50129"
}
],
"symlink_target": ""
}
|
from test_managers import TestManager
|
{
"content_hash": "a0047a7b848258ddd30d7760d18bf7fd",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 37,
"avg_line_length": 37,
"alnum_prop": 0.8918918918918919,
"repo_name": "drtyrsa/django-cached-manager",
"id": "1e7b1bc2db51b29c0d0a562c857ccd12a138b8c1",
"size": "60",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cached_manager/tests/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "12789"
}
],
"symlink_target": ""
}
|
"""
Deep clustering
---------------
.. autoclass:: nussl.separation.deep.DeepClustering
:autosummary:
Deep mask estimation
--------------------
.. autoclass:: nussl.separation.deep.DeepMaskEstimation
:autosummary:
Deep audio estimation
---------------------
.. autoclass:: nussl.separation.deep.DeepAudioEstimation
:autosummary:
"""
from .deep_clustering import DeepClustering
from .deep_mask_estimation import DeepMaskEstimation
from .deep_audio_estimation import DeepAudioEstimation
|
{
"content_hash": "ca89cacfa51fd4871a4e03afe3944b5a",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 56,
"avg_line_length": 21,
"alnum_prop": 0.6964285714285714,
"repo_name": "interactiveaudiolab/nussl",
"id": "cd2037224d8c56c0cff1d748a1bf097f7e6a16ad",
"size": "504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nussl/separation/deep/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "MATLAB",
"bytes": "11692"
},
{
"name": "Python",
"bytes": "591205"
},
{
"name": "Shell",
"bytes": "26"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, unicode_literals
from botocore.exceptions import ClientError
import six
from c7n.manager import resources
from c7n.filters import FilterRegistry
from c7n.query import QueryResourceManager
from c7n.utils import local_session, type_schema
from c7n.actions import BaseAction
from c7n.tags import RemoveTag, Tag, TagActionFilter, TagDelayedAction
@resources.register('sagemaker-notebook')
class NotebookInstance(QueryResourceManager):
class resource_type(object):
service = 'sagemaker'
enum_spec = ('list_notebook_instances', 'NotebookInstances', None)
detail_spec = (
'describe_notebook_instance', 'NotebookInstanceName',
'NotebookInstanceName', None)
id = 'NotebookInstanceArn'
name = 'NotebookInstanceName'
date = 'CreationTime'
dimension = None
filter_name = None
filters = FilterRegistry('sagemaker-notebook.filters')
filters.register('marked-for-op', TagActionFilter)
filter_registry = filters
permissions = ('sagemaker:ListTags',)
def augment(self, resources):
client = local_session(self.session_factory).client('sagemaker')
def _augment(r):
# List tags for the Notebook-Instance & set as attribute
tags = client.list_tags(
ResourceArn=r['NotebookInstanceArn'])['Tags']
r['Tags'] = tags
return r
# Describe notebook-instance & then list tags
resources = super(NotebookInstance, self).augment(resources)
with self.executor_factory(max_workers=1) as w:
return list(filter(None, w.map(_augment, resources)))
@resources.register('sagemaker-job')
class SagemakerJob(QueryResourceManager):
class resource_type(object):
service = 'sagemaker'
enum_spec = ('list_training_jobs', 'TrainingJobSummaries', None)
detail_spec = (
'describe_training_job', 'TrainingJobName', 'TrainingJobName', None)
id = 'TrainingJobArn'
name = 'TrainingJobName'
date = 'CreationTime'
dimension = None
filter_name = None
permissions = (
'sagemaker:ListTrainingJobs', 'sagemaker:DescribeTrainingJobs',
'sagemaker:ListTags')
def __init__(self, ctx, data):
super(SagemakerJob, self).__init__(ctx, data)
self.queries = QueryFilter.parse(
self.data.get('query', [
{'StatusEquals': 'InProgress'}]))
def resources(self, query=None):
for q in self.queries:
if q is None:
continue
query = query or {}
for k, v in q.items():
query[k] = v
return super(SagemakerJob, self).resources(query=query)
def augment(self, jobs):
client = local_session(self.session_factory).client('sagemaker')
def _augment(j):
tags = client.list_tags(ResourceArn=j['TrainingJobArn'])['Tags']
j['Tags'] = tags
return j
jobs = super(SagemakerJob, self).augment(jobs)
with self.executor_factory(max_workers=1) as w:
return list(filter(None, w.map(_augment, jobs)))
JOB_FILTERS = ('StatusEquals', 'NameContains',)
class QueryFilter(object):
@classmethod
def parse(cls, data):
results = []
names = set()
for d in data:
if not isinstance(d, dict):
raise ValueError(
"Training-Job Query Filter Invalid structure %s" % d)
for k, v in d.items():
if isinstance(v, list):
raise ValueError(
'Training-job query filter invalid structure %s' % v)
query = cls(d).validate().query()
if query['Name'] in names:
# Cannot filter multiple times on the same key
continue
names.add(query['Name'])
if isinstance(query['Value'], list):
results.append({query['Name']: query['Value'][0]})
continue
results.append({query['Name']: query['Value']})
if 'StatusEquals' not in names:
# add default StatusEquals if not included
results.append({'Name': 'StatusEquals', 'Value': 'InProgress'})
return results
def __init__(self, data):
self.data = data
self.key = None
self.value = None
def validate(self):
if not len(list(self.data.keys())) == 1:
raise ValueError(
"Training-Job Query Filter Invalid %s" % self.data)
self.key = list(self.data.keys())[0]
self.value = list(self.data.values())[0]
if self.key not in JOB_FILTERS and not self.key.startswith('tag:'):
raise ValueError(
"Training-Job Query Filter invalid filter name %s" % (
self.data))
if self.value is None:
raise ValueError(
"Training-Job Query Filters must have a value, use tag-key"
" w/ tag name as value for tag present checks"
" %s" % self.data)
return self
def query(self):
value = self.value
if isinstance(self.value, six.string_types):
value = [self.value]
return {'Name': self.key, 'Value': value}
@resources.register('sagemaker-endpoint')
class SagemakerEndpoint(QueryResourceManager):
class resource_type(object):
service = 'sagemaker'
enum_spec = ('list_endpoints', 'Endpoints', None)
detail_spec = (
'describe_endpoint', 'EndpointName',
'EndpointName', None)
id = 'EndpointArn'
name = 'EndpointName'
date = 'CreationTime'
dimension = None
filter_name = None
filters = FilterRegistry('sagemaker-endpoint.filters')
filters.register('marked-for-op', TagActionFilter)
filter_registry = filters
permissions = ('sagemaker:ListTags',)
def augment(self, endpoints):
client = local_session(self.session_factory).client('sagemaker')
def _augment(e):
tags = client.list_tags(
ResourceArn=e['EndpointArn'])['Tags']
e['Tags'] = tags
return e
# Describe endpoints & then list tags
endpoints = super(SagemakerEndpoint, self).augment(endpoints)
with self.executor_factory(max_workers=1) as w:
return list(filter(None, w.map(_augment, endpoints)))
@resources.register('sagemaker-endpoint-config')
class SagemakerEndpointConfig(QueryResourceManager):
class resource_type(object):
service = 'sagemaker'
enum_spec = ('list_endpoint_configs', 'EndpointConfigs', None)
detail_spec = (
'describe_endpoint_config', 'EndpointConfigName',
'EndpointConfigName', None)
id = 'EndpointConfigArn'
name = 'EndpointConfigName'
date = 'CreationTime'
dimension = None
filter_name = None
filters = FilterRegistry('sagemaker-endpoint-config.filters')
filters.register('marked-for-op', TagActionFilter)
filter_registry = filters
permissions = ('sagemaker:ListTags',)
def augment(self, endpoints):
client = local_session(self.session_factory).client('sagemaker')
def _augment(e):
tags = client.list_tags(
ResourceArn=e['EndpointConfigArn'])['Tags']
e['Tags'] = tags
return e
endpoints = super(SagemakerEndpointConfig, self).augment(endpoints)
with self.executor_factory(max_workers=1) as w:
return list(filter(None, w.map(_augment, endpoints)))
@resources.register('sagemaker-model')
class Model(QueryResourceManager):
class resource_type(object):
service = 'sagemaker'
enum_spec = ('list_models', 'Models', None)
detail_spec = (
'describe_model', 'ModelName',
'ModelName', None)
id = 'ModelArn'
name = 'ModelName'
date = 'CreationTime'
dimension = None
filter_name = None
filters = FilterRegistry('sagemaker-model.filters')
filters.register('marked-for-op', TagActionFilter)
filter_registry = filters
permissions = ('sagemaker:ListTags',)
def augment(self, resources):
client = local_session(self.session_factory).client('sagemaker')
def _augment(r):
tags = client.list_tags(
ResourceArn=r['ModelArn'])['Tags']
r.setdefault('Tags', []).extend(tags)
return r
with self.executor_factory(max_workers=1) as w:
return list(filter(None, w.map(_augment, resources)))
class StateTransitionFilter(object):
"""Filter instances by state.
Try to simplify construction for policy authors by automatically
filtering elements (filters or actions) to the instances states
they are valid for.
"""
valid_origin_states = ()
def filter_instance_state(self, instances, states=None):
states = states or self.valid_origin_states
orig_length = len(instances)
results = [i for i in instances
if i['NotebookInstanceStatus'] in states]
self.log.info("%s %d of %d notebook instances" % (
self.__class__.__name__, len(results), orig_length))
return results
@SagemakerEndpoint.action_registry.register('tag')
@SagemakerEndpointConfig.action_registry.register('tag')
@NotebookInstance.action_registry.register('tag')
@SagemakerJob.action_registry.register('tag')
@Model.action_registry.register('tag')
class TagNotebookInstance(Tag):
"""Action to create tag(s) on a SageMaker resource
(notebook-instance, endpoint, endpoint-config)
:example:
.. code-block:: yaml
policies:
- name: tag-sagemaker-notebook
resource: sagemaker-notebook
filters:
- "tag:target-tag": absent
actions:
- type: tag
key: target-tag
value: target-value
- name: tag-sagemaker-endpoint
resource: sagemaker-endpoint
filters:
- "tag:required-tag": absent
actions:
- type: tag
key: required-tag
value: required-value
- name: tag-sagemaker-endpoint-config
resource: sagemaker-endpoint-config
filters:
- "tag:required-tag": absent
actions:
- type: tag
key: required-tag
value: required-value
- name: tag-sagemaker-job
resource: sagemaker-job
filters:
- "tag:required-tag": absent
actions:
- type: tag
key: required-tag
value: required-value
"""
permissions = ('sagemaker:AddTags',)
def process_resource_set(self, resources, tags):
client = local_session(
self.manager.session_factory).client('sagemaker')
tag_list = []
for t in tags:
tag_list.append({'Key': t['Key'], 'Value': t['Value']})
for r in resources:
client.add_tags(ResourceArn=r[self.id_key], Tags=tag_list)
@SagemakerEndpoint.action_registry.register('remove-tag')
@SagemakerEndpointConfig.action_registry.register('remove-tag')
@NotebookInstance.action_registry.register('remove-tag')
@SagemakerJob.action_registry.register('remove-tag')
@Model.action_registry.register('remove-tag')
class RemoveTagNotebookInstance(RemoveTag):
"""Remove tag(s) from SageMaker resources
(notebook-instance, endpoint, endpoint-config)
:example:
.. code-block:: yaml
policies:
- name: sagemaker-notebook-remove-tag
resource: sagemaker-notebook
filters:
- "tag:BadTag": present
actions:
- type: remove-tag
tags: ["BadTag"]
- name: sagemaker-endpoint-remove-tag
resource: sagemaker-endpoint
filters:
- "tag:expired-tag": present
actions:
- type: remove-tag
tags: ["expired-tag"]
- name: sagemaker-endpoint-config-remove-tag
resource: sagemaker-endpoint-config
filters:
- "tag:expired-tag": present
actions:
- type: remove-tag
tags: ["expired-tag"]
- name: sagemaker-job-remove-tag
resource: sagemaker-job
filters:
- "tag:expired-tag": present
actions:
- type: remove-tag
tags: ["expired-tag"]
"""
permissions = ('sagemaker:DeleteTags',)
def process_resource_set(self, resources, keys):
client = local_session(
self.manager.session_factory).client('sagemaker')
for r in resources:
client.delete_tags(ResourceArn=r[self.id_key], TagKeys=keys)
@SagemakerEndpoint.action_registry.register('mark-for-op')
@SagemakerEndpointConfig.action_registry.register('mark-for-op')
@NotebookInstance.action_registry.register('mark-for-op')
@Model.action_registry.register('mark-for-op')
class MarkNotebookInstanceForOp(TagDelayedAction):
"""Mark SageMaker resources for deferred action
(notebook-instance, endpoint, endpoint-config)
:example:
.. code-block:: yaml
policies:
- name: sagemaker-notebook-invalid-tag-stop
resource: sagemaker-notebook
filters:
- "tag:InvalidTag": present
actions:
- type: mark-for-op
op: stop
days: 1
- name: sagemaker-endpoint-failure-delete
resource: sagemaker-endpoint
filters:
- 'EndpointStatus': 'Failed'
actions:
- type: mark-for-op
op: delete
days: 1
- name: sagemaker-endpoint-config-invalid-size-delete
resource: sagemaker-notebook
filters:
- type: value
- key: ProductionVariants[].InstanceType
- value: 'ml.m4.10xlarge'
- op: contains
actions:
- type: mark-for-op
op: delete
days: 1
"""
permissions = ('sagemaker:AddTags',)
def process_resource_set(self, resources, tags):
client = local_session(
self.manager.session_factory).client('sagemaker')
tag_list = []
for t in tags:
tag_list.append({'Key': t['Key'], 'Value': t['Value']})
for r in resources:
client.add_tags(ResourceArn=r[self.id_key], Tags=tag_list)
@NotebookInstance.action_registry.register('start')
class StartNotebookInstance(BaseAction, StateTransitionFilter):
"""Start sagemaker-notebook(s)
:example:
.. code-block: yaml
policies:
- name: start-sagemaker-notebook
resource: sagemaker-notebook
actions:
- start
"""
schema = type_schema('start')
permissions = ('sagemaker:StartNotebookInstance',)
valid_origin_states = ('Stopped',)
def process_instance(self, resource):
client = local_session(
self.manager.session_factory).client('sagemaker')
client.start_notebook_instance(
NotebookInstanceName=resource['NotebookInstanceName'])
def process(self, resources):
resources = self.filter_instance_state(resources)
if not len(resources):
return
with self.executor_factory(max_workers=2) as w:
list(w.map(self.process_instance, resources))
@NotebookInstance.action_registry.register('stop')
class StopNotebookInstance(BaseAction, StateTransitionFilter):
"""Stop sagemaker-notebook(s)
:example:
.. code-block: yaml
policies:
- name: stop-sagemaker-notebook
resource: sagemaker-notebook
filters:
- "tag:DeleteMe": present
actions:
- stop
"""
schema = type_schema('stop')
permissions = ('sagemaker:StopNotebookInstance',)
valid_origin_states = ('InService',)
def process_instance(self, resource):
client = local_session(
self.manager.session_factory).client('sagemaker')
client.stop_notebook_instance(
NotebookInstanceName=resource['NotebookInstanceName'])
def process(self, resources):
resources = self.filter_instance_state(resources)
if not len(resources):
return
with self.executor_factory(max_workers=2) as w:
list(w.map(self.process_instance, resources))
@NotebookInstance.action_registry.register('delete')
class DeleteNotebookInstance(BaseAction, StateTransitionFilter):
"""Deletes sagemaker-notebook(s)
:example:
.. code-block: yaml
policies:
- name: delete-sagemaker-notebook
resource: sagemaker-notebook
filters:
- "tag:DeleteMe": present
actions:
- delete
"""
schema = type_schema('delete')
permissions = ('sagemaker:DeleteNotebookInstance',)
valid_origin_states = ('Stopped', 'Failed',)
def process_instance(self, resource):
client = local_session(
self.manager.session_factory).client('sagemaker')
client.delete_notebook_instance(
NotebookInstanceName=resource['NotebookInstanceName'])
def process(self, resources):
resources = self.filter_instance_state(resources)
if not len(resources):
return
with self.executor_factory(max_workers=2) as w:
list(w.map(self.process_instance, resources))
@Model.action_registry.register('delete')
class DeleteModel(BaseAction, StateTransitionFilter):
"""Deletes sagemaker-model(s)
:example:
.. code-block: yaml
policies:
- name: delete-sagemaker-model
resource: sagemaker-model
filters:
- "tag:DeleteMe": present
actions:
- delete
"""
schema = type_schema('delete')
permissions = ('sagemaker:DeleteModel',)
def process_instance(self, resource):
client = local_session(
self.manager.session_factory).client('sagemaker')
client.delete_model(
ModelName=resource['ModelName'])
def process(self, resources):
if not len(resources):
return
with self.executor_factory(max_workers=2) as w:
list(w.map(self.process_instance, resources))
@SagemakerJob.action_registry.register('stop')
class SagemakerJobStop(BaseAction):
"""Stops a SageMaker job
:example:
.. code-block:: yaml
policies:
- name: stop-ml-job
resource: sagemaker-job
filters:
- TrainingJobName: ml-job-10
actions:
- stop
"""
schema = type_schema('stop')
permissions = ('sagemaker:StopTrainingJob',)
def process_job(self, job):
client = local_session(
self.manager.session_factory).client('sagemaker')
try:
client.stop_training_job(
TrainingJobName=job['TrainingJobName'])
except ClientError as e:
if e.response['Error']['Code'] == 'ResourceNotFound':
self.log.exception(
"Exception stopping sagemaker job %s:\n %s" % (
job['TrainingJobName'], e))
else:
raise
def process(self, jobs):
with self.executor_factory(max_workers=2) as w:
list(w.map(self.process_job, jobs))
@SagemakerEndpoint.action_registry.register('delete')
class SagemakerEndpointDelete(BaseAction):
"""Delete a SageMaker endpoint
:example:
.. code-block: yaml
policies:
- name: delete-sagemaker-endpoint
resource: sagemaker-endpoint
filters:
- EndpointName: sagemaker-ep--2018-01-01-00-00-00
actions:
- type: delete
"""
permissions = (
'sagemaker:DeleteEndpoint', 'sagemaker:DeleteEndpointConfig')
schema = type_schema('delete')
def process_endpoint(self, endpoint):
client = local_session(
self.manager.session_factory).client('sagemaker')
client.delete_endpoint(EndpointName=endpoint['EndpointName'])
def process(self, endpoints):
with self.executor_factory(max_workers=2) as w:
list(w.map(self.process_endpoint, endpoints))
@SagemakerEndpointConfig.action_registry.register('delete')
class SagemakerEndpointConfigDelete(BaseAction):
"""Delete a SageMaker endpoint
:example:
.. code-block: yaml
policies:
- name: delete-sagemaker-endpoint-config
resource: sagemaker-endpoint-config
filters:
- EndpointConfigName: sagemaker-2018-01-01-00-00-00-T00
actions:
- delete
"""
schema = type_schema('delete')
permissions = ('sagemaker:DeleteEndpointConfig',)
def process_endpoint_config(self, endpoint):
client = local_session(
self.manager.session_factory).client('sagemaker')
client.delete_endpoint_config(
EndpointConfigName=endpoint['EndpointConfigName'])
def process(self, endpoints):
with self.executor_factory(max_workers=2) as w:
list(w.map(self.process_endpoint_config, endpoints))
|
{
"content_hash": "de180438a39c3805017337921b8db0bb",
"timestamp": "",
"source": "github",
"line_count": 686,
"max_line_length": 82,
"avg_line_length": 32.09475218658892,
"alnum_prop": 0.587318889948676,
"repo_name": "scotwk/cloud-custodian",
"id": "f5cf5d6aa96ed21b614faaabebf7b32c63630e28",
"size": "22607",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "c7n/resources/sagemaker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "6346"
},
{
"name": "Python",
"bytes": "2664681"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lotnisko', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='passenger',
name='id',
),
migrations.AddField(
model_name='passenger',
name='email',
field=models.EmailField(default='aa@aa.com', max_length=254),
preserve_default=False,
),
migrations.AlterField(
model_name='passenger',
name='nickname',
field=models.CharField(max_length=254, serialize=False, primary_key=True),
),
]
|
{
"content_hash": "b91fa75cef1a315a8d666459458a2c64",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 86,
"avg_line_length": 25.821428571428573,
"alnum_prop": 0.5615491009681881,
"repo_name": "edek437/Zastosowanie-informatyki-w-gospodarce-projekt",
"id": "ed85c727876e41982888376123aafc7324e6ee30",
"size": "747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lotnisko/migrations/0002_auto_20160113_2246.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17701"
},
{
"name": "HTML",
"bytes": "56151"
},
{
"name": "JavaScript",
"bytes": "82799"
},
{
"name": "Python",
"bytes": "94318"
}
],
"symlink_target": ""
}
|
from Chip import OpCodeDefinitions
from Tests.OpCodeTests.OpCodeTestBase import OpCodeTestBase
class TestDeyOpCode(OpCodeTestBase):
def test_dey_implied_command_calls_dey_method(self):
self.assert_opcode_execution(OpCodeDefinitions.dey_implied_command, self.target.get_dey_command_executed)
|
{
"content_hash": "2f3633b64a4c6ed3a076445baffc0c85",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 113,
"avg_line_length": 43.57142857142857,
"alnum_prop": 0.8163934426229508,
"repo_name": "jeroanan/Nes2",
"id": "0617b53f0161ad8448d043c3e032c2de3e6e7042",
"size": "305",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Tests/OpCodeTests/TestDeyOpcode.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "83392"
}
],
"symlink_target": ""
}
|
import collections
import os
import eventlet
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_utils import importutils
from neutron.agent.linux import dhcp
from neutron.agent.linux import external_process
from neutron.agent.linux import utils as linux_utils
from neutron.agent.metadata import driver as metadata_driver
from neutron.agent import rpc as agent_rpc
from neutron.common import constants
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron import context
from neutron.i18n import _LE, _LI, _LW
from neutron import manager
from neutron.openstack.common import loopingcall
LOG = logging.getLogger(__name__)
class DhcpAgent(manager.Manager):
"""DHCP agent service manager.
Note that the public methods of this class are exposed as the server side
of an rpc interface. The neutron server uses
neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.DhcpAgentNotifyApi as the
client side to execute the methods here. For more information about
changing rpc interfaces, see doc/source/devref/rpc_api.rst.
"""
target = oslo_messaging.Target(version='1.0')
def __init__(self, host=None):
super(DhcpAgent, self).__init__(host=host)
self.needs_resync_reasons = collections.defaultdict(list)
self.conf = cfg.CONF
self.cache = NetworkCache()
self.dhcp_driver_cls = importutils.import_class(self.conf.dhcp_driver)
ctx = context.get_admin_context_without_session()
self.plugin_rpc = DhcpPluginApi(topics.PLUGIN,
ctx, self.conf.use_namespaces)
# create dhcp dir to store dhcp info
dhcp_dir = os.path.dirname("/%s/dhcp/" % self.conf.state_path)
linux_utils.ensure_dir(dhcp_dir)
self.dhcp_version = self.dhcp_driver_cls.check_version()
self._populate_networks_cache()
self._process_monitor = external_process.ProcessMonitor(
config=self.conf,
resource_type='dhcp')
def _populate_networks_cache(self):
"""Populate the networks cache when the DHCP-agent starts."""
try:
existing_networks = self.dhcp_driver_cls.existing_dhcp_networks(
self.conf
)
for net_id in existing_networks:
net = dhcp.NetModel(self.conf.use_namespaces,
{"id": net_id,
"subnets": [],
"ports": []})
self.cache.put(net)
except NotImplementedError:
# just go ahead with an empty networks cache
LOG.debug("The '%s' DHCP-driver does not support retrieving of a "
"list of existing networks",
self.conf.dhcp_driver)
def after_start(self):
self.run()
LOG.info(_LI("DHCP agent started"))
def run(self):
"""Activate the DHCP agent."""
self.sync_state()
self.periodic_resync()
def call_driver(self, action, network, **action_kwargs):
"""Invoke an action on a DHCP driver instance."""
LOG.debug('Calling driver for network: %(net)s action: %(action)s',
{'net': network.id, 'action': action})
try:
# the Driver expects something that is duck typed similar to
# the base models.
driver = self.dhcp_driver_cls(self.conf,
network,
self._process_monitor,
self.dhcp_version,
self.plugin_rpc)
getattr(driver, action)(**action_kwargs)
return True
except exceptions.Conflict:
# No need to resync here, the agent will receive the event related
# to a status update for the network
LOG.warning(_LW('Unable to %(action)s dhcp for %(net_id)s: there '
'is a conflict with its current state; please '
'check that the network and/or its subnet(s) '
'still exist.'),
{'net_id': network.id, 'action': action})
except Exception as e:
self.schedule_resync(e, network.id)
if (isinstance(e, oslo_messaging.RemoteError)
and e.exc_type == 'NetworkNotFound'
or isinstance(e, exceptions.NetworkNotFound)):
LOG.warning(_LW("Network %s has been deleted."), network.id)
else:
LOG.exception(_LE('Unable to %(action)s dhcp for %(net_id)s.'),
{'net_id': network.id, 'action': action})
def schedule_resync(self, reason, network=None):
"""Schedule a resync for a given network and reason. If no network is
specified, resync all networks.
"""
self.needs_resync_reasons[network].append(reason)
@utils.synchronized('dhcp-agent')
def sync_state(self, networks=None):
"""Sync the local DHCP state with Neutron. If no networks are passed,
or 'None' is one of the networks, sync all of the networks.
"""
only_nets = set([] if (not networks or None in networks) else networks)
LOG.info(_LI('Synchronizing state'))
pool = eventlet.GreenPool(cfg.CONF.num_sync_threads)
known_network_ids = set(self.cache.get_network_ids())
try:
active_networks = self.plugin_rpc.get_active_networks_info()
active_network_ids = set(network.id for network in active_networks)
for deleted_id in known_network_ids - active_network_ids:
try:
self.disable_dhcp_helper(deleted_id)
except Exception as e:
self.schedule_resync(e, deleted_id)
LOG.exception(_LE('Unable to sync network state on '
'deleted network %s'), deleted_id)
for network in active_networks:
if (not only_nets or # specifically resync all
network.id not in known_network_ids or # missing net
network.id in only_nets): # specific network to sync
pool.spawn(self.safe_configure_dhcp_for_network, network)
pool.waitall()
LOG.info(_LI('Synchronizing state complete'))
except Exception as e:
self.schedule_resync(e)
LOG.exception(_LE('Unable to sync network state.'))
@utils.exception_logger()
def _periodic_resync_helper(self):
"""Resync the dhcp state at the configured interval."""
while True:
eventlet.sleep(self.conf.resync_interval)
if self.needs_resync_reasons:
# be careful to avoid a race with additions to list
# from other threads
reasons = self.needs_resync_reasons
self.needs_resync_reasons = collections.defaultdict(list)
for net, r in reasons.items():
if not net:
net = "*"
LOG.debug("resync (%(network)s): %(reason)s",
{"reason": r, "network": net})
self.sync_state(reasons.keys())
def periodic_resync(self):
"""Spawn a thread to periodically resync the dhcp state."""
eventlet.spawn(self._periodic_resync_helper)
def safe_get_network_info(self, network_id):
try:
network = self.plugin_rpc.get_network_info(network_id)
if not network:
LOG.warn(_LW('Network %s has been deleted.'), network_id)
return network
except Exception as e:
self.schedule_resync(e, network_id)
LOG.exception(_LE('Network %s info call failed.'), network_id)
def enable_dhcp_helper(self, network_id):
"""Enable DHCP for a network that meets enabling criteria."""
network = self.safe_get_network_info(network_id)
if network:
self.configure_dhcp_for_network(network)
@utils.exception_logger()
def safe_configure_dhcp_for_network(self, network):
try:
self.configure_dhcp_for_network(network)
except (exceptions.NetworkNotFound, RuntimeError):
LOG.warn(_LW('Network %s may have been deleted and its resources '
'may have already been disposed.'), network.id)
def configure_dhcp_for_network(self, network):
if not network.admin_state_up:
return
enable_metadata = self.dhcp_driver_cls.should_enable_metadata(
self.conf, network)
dhcp_network_enabled = False
for subnet in network.subnets:
if subnet.enable_dhcp:
if self.call_driver('enable', network):
dhcp_network_enabled = True
self.cache.put(network)
break
if enable_metadata and dhcp_network_enabled:
for subnet in network.subnets:
if subnet.ip_version == 4 and subnet.enable_dhcp:
self.enable_isolated_metadata_proxy(network)
break
def disable_dhcp_helper(self, network_id):
"""Disable DHCP for a network known to the agent."""
network = self.cache.get_network_by_id(network_id)
if network:
if (self.conf.use_namespaces and
self.conf.enable_isolated_metadata):
# NOTE(jschwarz): In the case where a network is deleted, all
# the subnets and ports are deleted before this function is
# called, so checking if 'should_enable_metadata' is True
# for any subnet is false logic here.
self.disable_isolated_metadata_proxy(network)
if self.call_driver('disable', network):
self.cache.remove(network)
def refresh_dhcp_helper(self, network_id):
"""Refresh or disable DHCP for a network depending on the current state
of the network.
"""
old_network = self.cache.get_network_by_id(network_id)
if not old_network:
# DHCP current not running for network.
return self.enable_dhcp_helper(network_id)
network = self.safe_get_network_info(network_id)
if not network:
return
old_cidrs = set(s.cidr for s in old_network.subnets if s.enable_dhcp)
new_cidrs = set(s.cidr for s in network.subnets if s.enable_dhcp)
if new_cidrs and old_cidrs == new_cidrs:
self.call_driver('reload_allocations', network)
self.cache.put(network)
elif new_cidrs:
if self.call_driver('restart', network):
self.cache.put(network)
else:
self.disable_dhcp_helper(network.id)
@utils.synchronized('dhcp-agent')
def network_create_end(self, context, payload):
"""Handle the network.create.end notification event."""
network_id = payload['network']['id']
self.enable_dhcp_helper(network_id)
@utils.synchronized('dhcp-agent')
def network_update_end(self, context, payload):
"""Handle the network.update.end notification event."""
network_id = payload['network']['id']
if payload['network']['admin_state_up']:
self.enable_dhcp_helper(network_id)
else:
self.disable_dhcp_helper(network_id)
@utils.synchronized('dhcp-agent')
def network_delete_end(self, context, payload):
"""Handle the network.delete.end notification event."""
self.disable_dhcp_helper(payload['network_id'])
@utils.synchronized('dhcp-agent')
def subnet_update_end(self, context, payload):
"""Handle the subnet.update.end notification event."""
network_id = payload['subnet']['network_id']
self.refresh_dhcp_helper(network_id)
# Use the update handler for the subnet create event.
subnet_create_end = subnet_update_end
@utils.synchronized('dhcp-agent')
def subnet_delete_end(self, context, payload):
"""Handle the subnet.delete.end notification event."""
subnet_id = payload['subnet_id']
network = self.cache.get_network_by_subnet_id(subnet_id)
if network:
self.refresh_dhcp_helper(network.id)
@utils.synchronized('dhcp-agent')
def port_update_end(self, context, payload):
"""Handle the port.update.end notification event."""
updated_port = dhcp.DictModel(payload['port'])
network = self.cache.get_network_by_id(updated_port.network_id)
if network:
driver_action = 'reload_allocations'
if self._is_port_on_this_agent(updated_port):
orig = self.cache.get_port_by_id(updated_port['id'])
# assume IP change if not in cache
old_ips = {i['ip_address'] for i in orig['fixed_ips'] or []}
new_ips = {i['ip_address'] for i in updated_port['fixed_ips']}
if old_ips != new_ips:
driver_action = 'restart'
self.cache.put_port(updated_port)
self.call_driver(driver_action, network)
def _is_port_on_this_agent(self, port):
thishost = utils.get_dhcp_agent_device_id(
port['network_id'], self.conf.host)
return port['device_id'] == thishost
# Use the update handler for the port create event.
port_create_end = port_update_end
@utils.synchronized('dhcp-agent')
def port_delete_end(self, context, payload):
"""Handle the port.delete.end notification event."""
port = self.cache.get_port_by_id(payload['port_id'])
if port:
network = self.cache.get_network_by_id(port.network_id)
self.cache.remove_port(port)
self.call_driver('reload_allocations', network)
def enable_isolated_metadata_proxy(self, network):
# The proxy might work for either a single network
# or all the networks connected via a router
# to the one passed as a parameter
kwargs = {'network_id': network.id}
# When the metadata network is enabled, the proxy might
# be started for the router attached to the network
if self.conf.enable_metadata_network:
router_ports = [port for port in network.ports
if (port.device_owner ==
constants.DEVICE_OWNER_ROUTER_INTF)]
if router_ports:
# Multiple router ports should not be allowed
if len(router_ports) > 1:
LOG.warning(_LW("%(port_num)d router ports found on the "
"metadata access network. Only the port "
"%(port_id)s, for router %(router_id)s "
"will be considered"),
{'port_num': len(router_ports),
'port_id': router_ports[0].id,
'router_id': router_ports[0].device_id})
kwargs = {'router_id': router_ports[0].device_id}
metadata_driver.MetadataDriver.spawn_monitored_metadata_proxy(
self._process_monitor, network.namespace, dhcp.METADATA_PORT,
self.conf, **kwargs)
def disable_isolated_metadata_proxy(self, network):
metadata_driver.MetadataDriver.destroy_monitored_metadata_proxy(
self._process_monitor, network.id, network.namespace, self.conf)
class DhcpPluginApi(object):
"""Agent side of the dhcp rpc API.
This class implements the client side of an rpc interface. The server side
of this interface can be found in
neutron.api.rpc.handlers.dhcp_rpc.DhcpRpcCallback. For more information
about changing rpc interfaces, see doc/source/devref/rpc_api.rst.
API version history:
1.0 - Initial version.
1.1 - Added get_active_networks_info, create_dhcp_port,
and update_dhcp_port methods.
"""
def __init__(self, topic, context, use_namespaces):
self.context = context
self.host = cfg.CONF.host
self.use_namespaces = use_namespaces
target = oslo_messaging.Target(
topic=topic,
namespace=constants.RPC_NAMESPACE_DHCP_PLUGIN,
version='1.0')
self.client = n_rpc.get_client(target)
def get_active_networks_info(self):
"""Make a remote process call to retrieve all network info."""
cctxt = self.client.prepare(version='1.1')
networks = cctxt.call(self.context, 'get_active_networks_info',
host=self.host)
return [dhcp.NetModel(self.use_namespaces, n) for n in networks]
def get_network_info(self, network_id):
"""Make a remote process call to retrieve network info."""
cctxt = self.client.prepare()
network = cctxt.call(self.context, 'get_network_info',
network_id=network_id, host=self.host)
if network:
return dhcp.NetModel(self.use_namespaces, network)
def get_dhcp_port(self, network_id, device_id):
"""Make a remote process call to get the dhcp port."""
cctxt = self.client.prepare()
port = cctxt.call(self.context, 'get_dhcp_port',
network_id=network_id, device_id=device_id,
host=self.host)
if port:
return dhcp.DictModel(port)
def create_dhcp_port(self, port):
"""Make a remote process call to create the dhcp port."""
cctxt = self.client.prepare(version='1.1')
port = cctxt.call(self.context, 'create_dhcp_port',
port=port, host=self.host)
if port:
return dhcp.DictModel(port)
def update_dhcp_port(self, port_id, port):
"""Make a remote process call to update the dhcp port."""
cctxt = self.client.prepare(version='1.1')
port = cctxt.call(self.context, 'update_dhcp_port',
port_id=port_id, port=port, host=self.host)
if port:
return dhcp.DictModel(port)
def release_dhcp_port(self, network_id, device_id):
"""Make a remote process call to release the dhcp port."""
cctxt = self.client.prepare()
return cctxt.call(self.context, 'release_dhcp_port',
network_id=network_id, device_id=device_id,
host=self.host)
def release_port_fixed_ip(self, network_id, device_id, subnet_id):
"""Make a remote process call to release a fixed_ip on the port."""
cctxt = self.client.prepare()
return cctxt.call(self.context, 'release_port_fixed_ip',
network_id=network_id, subnet_id=subnet_id,
device_id=device_id, host=self.host)
class NetworkCache(object):
"""Agent cache of the current network state."""
def __init__(self):
self.cache = {}
self.subnet_lookup = {}
self.port_lookup = {}
def get_network_ids(self):
return self.cache.keys()
def get_network_by_id(self, network_id):
return self.cache.get(network_id)
def get_network_by_subnet_id(self, subnet_id):
return self.cache.get(self.subnet_lookup.get(subnet_id))
def get_network_by_port_id(self, port_id):
return self.cache.get(self.port_lookup.get(port_id))
def put(self, network):
if network.id in self.cache:
self.remove(self.cache[network.id])
self.cache[network.id] = network
for subnet in network.subnets:
self.subnet_lookup[subnet.id] = network.id
for port in network.ports:
self.port_lookup[port.id] = network.id
def remove(self, network):
del self.cache[network.id]
for subnet in network.subnets:
del self.subnet_lookup[subnet.id]
for port in network.ports:
del self.port_lookup[port.id]
def put_port(self, port):
network = self.get_network_by_id(port.network_id)
for index in range(len(network.ports)):
if network.ports[index].id == port.id:
network.ports[index] = port
break
else:
network.ports.append(port)
self.port_lookup[port.id] = network.id
def remove_port(self, port):
network = self.get_network_by_port_id(port.id)
for index in range(len(network.ports)):
if network.ports[index] == port:
del network.ports[index]
del self.port_lookup[port.id]
break
def get_port_by_id(self, port_id):
network = self.get_network_by_port_id(port_id)
if network:
for port in network.ports:
if port.id == port_id:
return port
def get_state(self):
net_ids = self.get_network_ids()
num_nets = len(net_ids)
num_subnets = 0
num_ports = 0
for net_id in net_ids:
network = self.get_network_by_id(net_id)
num_subnets += len(network.subnets)
num_ports += len(network.ports)
return {'networks': num_nets,
'subnets': num_subnets,
'ports': num_ports}
class DhcpAgentWithStateReport(DhcpAgent):
def __init__(self, host=None):
super(DhcpAgentWithStateReport, self).__init__(host=host)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self.agent_state = {
'binary': 'neutron-dhcp-agent',
'host': host,
'topic': topics.DHCP_AGENT,
'configurations': {
'dhcp_driver': cfg.CONF.dhcp_driver,
'use_namespaces': cfg.CONF.use_namespaces,
'dhcp_lease_duration': cfg.CONF.dhcp_lease_duration},
'start_flag': True,
'agent_type': constants.AGENT_TYPE_DHCP}
report_interval = cfg.CONF.AGENT.report_interval
self.use_call = True
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
self.heartbeat.start(interval=report_interval)
def _report_state(self):
try:
self.agent_state.get('configurations').update(
self.cache.get_state())
ctx = context.get_admin_context_without_session()
self.state_rpc.report_state(ctx, self.agent_state, self.use_call)
self.use_call = False
except AttributeError:
# This means the server does not support report_state
LOG.warn(_LW("Neutron server does not support state report."
" State report for this agent will be disabled."))
self.heartbeat.stop()
self.run()
return
except Exception:
LOG.exception(_LE("Failed reporting state!"))
return
if self.agent_state.pop('start_flag', None):
self.run()
def agent_updated(self, context, payload):
"""Handle the agent_updated notification event."""
self.schedule_resync(_("Agent updated: %(payload)s") %
{"payload": payload})
LOG.info(_LI("agent_updated by server side %s!"), payload)
def after_start(self):
LOG.info(_LI("DHCP agent started"))
|
{
"content_hash": "5e6e98bddfe55eff79d757e956c5a87d",
"timestamp": "",
"source": "github",
"line_count": 574,
"max_line_length": 79,
"avg_line_length": 41.37282229965157,
"alnum_prop": 0.5835438773791477,
"repo_name": "pnavarro/neutron",
"id": "214bfdff14de3c55c2335d15866ed52301e02344",
"size": "24384",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "neutron/agent/dhcp/agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "980"
},
{
"name": "Python",
"bytes": "7148188"
},
{
"name": "Shell",
"bytes": "12319"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('leaflets', '0014_leaflet_publisher_person'),
]
operations = [
migrations.CreateModel(
name='LeafletProperties',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', django_extensions.db.fields.CreationDateTimeField(default=django.utils.timezone.now, verbose_name='created', editable=False, blank=True)),
('modified', django_extensions.db.fields.ModificationDateTimeField(default=django.utils.timezone.now, verbose_name='modified', editable=False, blank=True)),
('key', models.CharField(db_index=True, max_length=100, blank=True)),
('value', models.CharField(db_index=True, max_length=255, blank=True)),
('leaflet', models.ForeignKey(to='leaflets.Leaflet')),
],
options={
'ordering': ('-modified', '-created'),
'abstract': False,
'get_latest_by': 'modified',
},
bases=(models.Model,),
),
]
|
{
"content_hash": "b8f2bd1a968747468cc1f0e3e6e8b323",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 172,
"avg_line_length": 41.25,
"alnum_prop": 0.6045454545454545,
"repo_name": "JustinWingChungHui/electionleaflets",
"id": "40b3bde1fd95265bf77784c59f4ed53a06727c25",
"size": "1344",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "electionleaflets/apps/analysis/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23074"
},
{
"name": "Cucumber",
"bytes": "7808"
},
{
"name": "HTML",
"bytes": "121455"
},
{
"name": "Handlebars",
"bytes": "446"
},
{
"name": "JavaScript",
"bytes": "69039"
},
{
"name": "Python",
"bytes": "160654"
},
{
"name": "Ruby",
"bytes": "165"
}
],
"symlink_target": ""
}
|
"""
Current Fabric version constant plus version pretty-print method.
This functionality is contained in its own module to prevent circular import
problems with ``__init__.py`` (which is loaded by setup.py during installation,
which in turn needs access to this version information.)
"""
from subprocess import Popen, PIPE
from os.path import abspath, dirname
def git_sha():
loc = abspath(dirname(__file__))
p = Popen(
"cd \"%s\" && git log -1 --format=format:%%h" % loc,
shell=True,
stdout=PIPE,
stderr=PIPE
)
return p.communicate()[0]
VERSION = (1, 0, 0, 'alpha', 0)
def get_version(form='short'):
"""
Return a version string for this package, based on `VERSION`.
Takes a single argument, ``form``, which should be one of the following
strings:
* ``branch``: just the major + minor, e.g. "0.9", "1.0".
* ``short`` (default): compact, e.g. "0.9rc1", "0.9.0". For package
filenames or SCM tag identifiers.
* ``normal``: human readable, e.g. "0.9", "0.9.1", "0.9 beta 1". For e.g.
documentation site headers.
* ``verbose``: like ``normal`` but fully explicit, e.g. "0.9 final". For
tag commit messages, or anywhere that it's important to remove ambiguity
between a branch and the first final release within that branch.
"""
# Setup
versions = {}
branch = "%s.%s" % (VERSION[0], VERSION[1])
tertiary = VERSION[2]
type_ = VERSION[3]
final = (type_ == "final")
type_num = VERSION[4]
firsts = "".join([x[0] for x in type_.split()])
sha = git_sha()
sha1 = (" (%s)" % sha) if sha else ""
# Branch
versions['branch'] = branch
# Short
v = branch
if (tertiary or final):
v += "." + str(tertiary)
if not final:
v += firsts
if type_num:
v += str(type_num)
else:
v += sha1
versions['short'] = v
# Normal
v = branch
if tertiary:
v += "." + str(tertiary)
if not final:
if type_num:
v += " " + type_ + " " + str(type_num)
else:
v += " pre-" + type_ + sha1
versions['normal'] = v
# Verbose
v = branch
if tertiary:
v += "." + str(tertiary)
if not final:
if type_num:
v += " " + type_ + " " + str(type_num)
else:
v += " pre-" + type_ + sha1
else:
v += " final"
versions['verbose'] = v
try:
return versions[form]
except KeyError:
raise TypeError, '"%s" is not a valid form specifier.' % form
__version__ = get_version('short')
|
{
"content_hash": "fb43115f83c7fabec4082c53f47d70ad",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 79,
"avg_line_length": 27.072164948453608,
"alnum_prop": 0.5495049504950495,
"repo_name": "ericholscher/fabric",
"id": "4cbcde0b367e93166057d669f3a6239f97ce3f73",
"size": "2626",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "fabric/version.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "205859"
}
],
"symlink_target": ""
}
|
def send_some_data(socket, data):
length = len(data)
header = b'SIZE:' + bytes(str(length), encoding='latin-1')
socket.send(header)
socket.send(data)
|
{
"content_hash": "6eee4397d841d8170dbea0924d3140ef",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 62,
"avg_line_length": 33.2,
"alnum_prop": 0.6445783132530121,
"repo_name": "haarcuba/testix",
"id": "f5cdabff251f7efdd24e5bc3010e8ce2aaba4940",
"size": "167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/tutorial/basics/other_tests/data_sender_example/prefix_1/data_sender.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46054"
},
{
"name": "Ruby",
"bytes": "2247"
},
{
"name": "Shell",
"bytes": "450"
},
{
"name": "Vim Script",
"bytes": "76189"
}
],
"symlink_target": ""
}
|
import netaddr
import six
from tempest.api.network import base_routers as base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
class RoutersTest(base.BaseRouterTest):
@classmethod
def skip_checks(cls):
super(RoutersTest, cls).skip_checks()
if not test.is_extension_enabled('router', 'network'):
msg = "router extension not enabled."
raise cls.skipException(msg)
@classmethod
def setup_clients(cls):
super(RoutersTest, cls).setup_clients()
cls.identity_admin_client = cls.os_adm.identity_client
@classmethod
def resource_setup(cls):
super(RoutersTest, cls).resource_setup()
cls.tenant_cidr = (CONF.network.tenant_network_cidr
if cls._ip_version == 4 else
CONF.network.tenant_network_v6_cidr)
@test.attr(type='smoke')
@test.idempotent_id('f64403e2-8483-4b34-8ccd-b09a87bcc68c')
def test_create_show_list_update_delete_router(self):
# Create a router
# NOTE(salv-orlando): Do not invoke self.create_router
# as we need to check the response code
name = data_utils.rand_name('router-')
create_body = self.client.create_router(
name, external_gateway_info={
"network_id": CONF.network.public_network_id},
admin_state_up=False)
self.addCleanup(self._delete_router, create_body['router']['id'])
self.assertEqual(create_body['router']['name'], name)
self.assertEqual(
create_body['router']['external_gateway_info']['network_id'],
CONF.network.public_network_id)
self.assertEqual(create_body['router']['admin_state_up'], False)
# Show details of the created router
show_body = self.client.show_router(create_body['router']['id'])
self.assertEqual(show_body['router']['name'], name)
self.assertEqual(
show_body['router']['external_gateway_info']['network_id'],
CONF.network.public_network_id)
self.assertEqual(show_body['router']['admin_state_up'], False)
# List routers and verify if created router is there in response
list_body = self.client.list_routers()
routers_list = list()
for router in list_body['routers']:
routers_list.append(router['id'])
self.assertIn(create_body['router']['id'], routers_list)
# Update the name of router and verify if it is updated
updated_name = 'updated ' + name
update_body = self.client.update_router(create_body['router']['id'],
name=updated_name)
self.assertEqual(update_body['router']['name'], updated_name)
show_body = self.client.show_router(
create_body['router']['id'])
self.assertEqual(show_body['router']['name'], updated_name)
@test.idempotent_id('e54dd3a3-4352-4921-b09d-44369ae17397')
def test_create_router_setting_tenant_id(self):
# Test creating router from admin user setting tenant_id.
test_tenant = data_utils.rand_name('test_tenant_')
test_description = data_utils.rand_name('desc_')
tenant = self.identity_admin_client.create_tenant(
name=test_tenant, description=test_description)
tenant_id = tenant['id']
self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id)
name = data_utils.rand_name('router-')
create_body = self.admin_client.create_router(name,
tenant_id=tenant_id)
self.addCleanup(self.admin_client.delete_router,
create_body['router']['id'])
self.assertEqual(tenant_id, create_body['router']['tenant_id'])
@test.idempotent_id('847257cc-6afd-4154-b8fb-af49f5670ce8')
@test.requires_ext(extension='ext-gw-mode', service='network')
def test_create_router_with_default_snat_value(self):
# Create a router with default snat rule
name = data_utils.rand_name('router')
router = self._create_router(
name, external_network_id=CONF.network.public_network_id)
self._verify_router_gateway(
router['id'], {'network_id': CONF.network.public_network_id,
'enable_snat': True})
@test.idempotent_id('ea74068d-09e9-4fd7-8995-9b6a1ace920f')
@test.requires_ext(extension='ext-gw-mode', service='network')
def test_create_router_with_snat_explicit(self):
name = data_utils.rand_name('snat-router')
# Create a router enabling snat attributes
enable_snat_states = [False, True]
for enable_snat in enable_snat_states:
external_gateway_info = {
'network_id': CONF.network.public_network_id,
'enable_snat': enable_snat}
create_body = self.admin_client.create_router(
name, external_gateway_info=external_gateway_info)
self.addCleanup(self.admin_client.delete_router,
create_body['router']['id'])
# Verify snat attributes after router creation
self._verify_router_gateway(create_body['router']['id'],
exp_ext_gw_info=external_gateway_info)
@test.attr(type='smoke')
@test.idempotent_id('b42e6e39-2e37-49cc-a6f4-8467e940900a')
def test_add_remove_router_interface_with_subnet_id(self):
network = self.create_network()
subnet = self.create_subnet(network)
router = self._create_router(data_utils.rand_name('router-'))
# Add router interface with subnet id
interface = self.client.add_router_interface_with_subnet_id(
router['id'], subnet['id'])
self.addCleanup(self._remove_router_interface_with_subnet_id,
router['id'], subnet['id'])
self.assertIn('subnet_id', interface.keys())
self.assertIn('port_id', interface.keys())
# Verify router id is equal to device id in port details
show_port_body = self.client.show_port(
interface['port_id'])
self.assertEqual(show_port_body['port']['device_id'],
router['id'])
@test.attr(type='smoke')
@test.idempotent_id('2b7d2f37-6748-4d78-92e5-1d590234f0d5')
def test_add_remove_router_interface_with_port_id(self):
network = self.create_network()
self.create_subnet(network)
router = self._create_router(data_utils.rand_name('router-'))
port_body = self.client.create_port(
network_id=network['id'])
# add router interface to port created above
interface = self.client.add_router_interface_with_port_id(
router['id'], port_body['port']['id'])
self.addCleanup(self._remove_router_interface_with_port_id,
router['id'], port_body['port']['id'])
self.assertIn('subnet_id', interface.keys())
self.assertIn('port_id', interface.keys())
# Verify router id is equal to device id in port details
show_port_body = self.client.show_port(
interface['port_id'])
self.assertEqual(show_port_body['port']['device_id'],
router['id'])
def _verify_router_gateway(self, router_id, exp_ext_gw_info=None):
show_body = self.admin_client.show_router(router_id)
actual_ext_gw_info = show_body['router']['external_gateway_info']
if exp_ext_gw_info is None:
self.assertIsNone(actual_ext_gw_info)
return
# Verify only keys passed in exp_ext_gw_info
for k, v in six.iteritems(exp_ext_gw_info):
self.assertEqual(v, actual_ext_gw_info[k])
def _verify_gateway_port(self, router_id):
list_body = self.admin_client.list_ports(
network_id=CONF.network.public_network_id,
device_id=router_id)
self.assertEqual(len(list_body['ports']), 1)
gw_port = list_body['ports'][0]
fixed_ips = gw_port['fixed_ips']
self.assertGreaterEqual(len(fixed_ips), 1)
public_net_body = self.admin_client.show_network(
CONF.network.public_network_id)
public_subnet_id = public_net_body['network']['subnets'][0]
self.assertIn(public_subnet_id,
map(lambda x: x['subnet_id'], fixed_ips))
@test.idempotent_id('6cc285d8-46bf-4f36-9b1a-783e3008ba79')
def test_update_router_set_gateway(self):
router = self._create_router(data_utils.rand_name('router-'))
self.client.update_router(
router['id'],
external_gateway_info={
'network_id': CONF.network.public_network_id})
# Verify operation - router
self._verify_router_gateway(
router['id'],
{'network_id': CONF.network.public_network_id})
self._verify_gateway_port(router['id'])
@test.idempotent_id('b386c111-3b21-466d-880c-5e72b01e1a33')
@test.requires_ext(extension='ext-gw-mode', service='network')
def test_update_router_set_gateway_with_snat_explicit(self):
router = self._create_router(data_utils.rand_name('router-'))
self.admin_client.update_router_with_snat_gw_info(
router['id'],
external_gateway_info={
'network_id': CONF.network.public_network_id,
'enable_snat': True})
self._verify_router_gateway(
router['id'],
{'network_id': CONF.network.public_network_id,
'enable_snat': True})
self._verify_gateway_port(router['id'])
@test.idempotent_id('96536bc7-8262-4fb2-9967-5c46940fa279')
@test.requires_ext(extension='ext-gw-mode', service='network')
def test_update_router_set_gateway_without_snat(self):
router = self._create_router(data_utils.rand_name('router-'))
self.admin_client.update_router_with_snat_gw_info(
router['id'],
external_gateway_info={
'network_id': CONF.network.public_network_id,
'enable_snat': False})
self._verify_router_gateway(
router['id'],
{'network_id': CONF.network.public_network_id,
'enable_snat': False})
self._verify_gateway_port(router['id'])
@test.idempotent_id('ad81b7ee-4f81-407b-a19c-17e623f763e8')
def test_update_router_unset_gateway(self):
router = self._create_router(
data_utils.rand_name('router-'),
external_network_id=CONF.network.public_network_id)
self.client.update_router(router['id'], external_gateway_info={})
self._verify_router_gateway(router['id'])
# No gateway port expected
list_body = self.admin_client.list_ports(
network_id=CONF.network.public_network_id,
device_id=router['id'])
self.assertFalse(list_body['ports'])
@test.idempotent_id('f2faf994-97f4-410b-a831-9bc977b64374')
@test.requires_ext(extension='ext-gw-mode', service='network')
def test_update_router_reset_gateway_without_snat(self):
router = self._create_router(
data_utils.rand_name('router-'),
external_network_id=CONF.network.public_network_id)
self.admin_client.update_router_with_snat_gw_info(
router['id'],
external_gateway_info={
'network_id': CONF.network.public_network_id,
'enable_snat': False})
self._verify_router_gateway(
router['id'],
{'network_id': CONF.network.public_network_id,
'enable_snat': False})
self._verify_gateway_port(router['id'])
@test.idempotent_id('c86ac3a8-50bd-4b00-a6b8-62af84a0765c')
@test.requires_ext(extension='extraroute', service='network')
def test_update_extra_route(self):
# Create different cidr for each subnet to avoid cidr duplicate
# The cidr starts from tenant_cidr
next_cidr = netaddr.IPNetwork(self.tenant_cidr)
# Prepare to build several routes
test_routes = []
routes_num = 5
# Create a router
self.router = self._create_router(
data_utils.rand_name('router-'), True)
self.addCleanup(
self._delete_extra_routes,
self.router['id'])
# Update router extra route, second ip of the range is
# used as next hop
for i in range(routes_num):
network = self.create_network()
subnet = self.create_subnet(network, cidr=next_cidr)
next_cidr = next_cidr.next()
# Add router interface with subnet id
self.create_router_interface(self.router['id'], subnet['id'])
cidr = netaddr.IPNetwork(subnet['cidr'])
next_hop = str(cidr[2])
destination = str(subnet['cidr'])
test_routes.append(
{'nexthop': next_hop, 'destination': destination}
)
test_routes.sort(key=lambda x: x['destination'])
extra_route = self.client.update_extra_routes(self.router['id'],
test_routes)
show_body = self.client.show_router(self.router['id'])
# Assert the number of routes
self.assertEqual(routes_num, len(extra_route['router']['routes']))
self.assertEqual(routes_num, len(show_body['router']['routes']))
routes = extra_route['router']['routes']
routes.sort(key=lambda x: x['destination'])
# Assert the nexthops & destination
for i in range(routes_num):
self.assertEqual(test_routes[i]['destination'],
routes[i]['destination'])
self.assertEqual(test_routes[i]['nexthop'], routes[i]['nexthop'])
routes = show_body['router']['routes']
routes.sort(key=lambda x: x['destination'])
for i in range(routes_num):
self.assertEqual(test_routes[i]['destination'],
routes[i]['destination'])
self.assertEqual(test_routes[i]['nexthop'], routes[i]['nexthop'])
def _delete_extra_routes(self, router_id):
self.client.delete_extra_routes(router_id)
@test.idempotent_id('a8902683-c788-4246-95c7-ad9c6d63a4d9')
def test_update_router_admin_state(self):
self.router = self._create_router(data_utils.rand_name('router-'))
self.assertFalse(self.router['admin_state_up'])
# Update router admin state
update_body = self.client.update_router(self.router['id'],
admin_state_up=True)
self.assertTrue(update_body['router']['admin_state_up'])
show_body = self.client.show_router(self.router['id'])
self.assertTrue(show_body['router']['admin_state_up'])
@test.attr(type='smoke')
@test.idempotent_id('802c73c9-c937-4cef-824b-2191e24a6aab')
def test_add_multiple_router_interfaces(self):
network01 = self.create_network(
network_name=data_utils.rand_name('router-network01-'))
network02 = self.create_network(
network_name=data_utils.rand_name('router-network02-'))
subnet01 = self.create_subnet(network01)
sub02_cidr = netaddr.IPNetwork(self.tenant_cidr).next()
subnet02 = self.create_subnet(network02, cidr=sub02_cidr)
router = self._create_router(data_utils.rand_name('router-'))
interface01 = self._add_router_interface_with_subnet_id(router['id'],
subnet01['id'])
self._verify_router_interface(router['id'], subnet01['id'],
interface01['port_id'])
interface02 = self._add_router_interface_with_subnet_id(router['id'],
subnet02['id'])
self._verify_router_interface(router['id'], subnet02['id'],
interface02['port_id'])
def _verify_router_interface(self, router_id, subnet_id, port_id):
show_port_body = self.client.show_port(port_id)
interface_port = show_port_body['port']
self.assertEqual(router_id, interface_port['device_id'])
self.assertEqual(subnet_id,
interface_port['fixed_ips'][0]['subnet_id'])
class RoutersIpV6Test(RoutersTest):
_ip_version = 6
class DvrRoutersTest(base.BaseRouterTest):
@classmethod
def skip_checks(cls):
super(DvrRoutersTest, cls).skip_checks()
if not test.is_extension_enabled('dvr', 'network'):
msg = "DVR extension not enabled."
raise cls.skipException(msg)
@test.idempotent_id('141297aa-3424-455d-aa8d-f2d95731e00a')
def test_create_distributed_router(self):
name = data_utils.rand_name('router')
create_body = self.admin_client.create_router(
name, distributed=True)
self.addCleanup(self._delete_router,
create_body['router']['id'],
self.admin_client)
self.assertTrue(create_body['router']['distributed'])
@test.idempotent_id('644d7a4a-01a1-4b68-bb8d-0c0042cb1729')
def test_convert_centralized_router(self):
router = self._create_router(data_utils.rand_name('router'))
self.assertNotIn('distributed', router)
update_body = self.admin_client.update_router(router['id'],
distributed=True)
self.assertTrue(update_body['router']['distributed'])
show_body = self.admin_client.show_router(router['id'])
self.assertTrue(show_body['router']['distributed'])
show_body = self.client.show_router(router['id'])
self.assertNotIn('distributed', show_body['router'])
|
{
"content_hash": "ed35226c0df97c74ded82f3b76cdec53",
"timestamp": "",
"source": "github",
"line_count": 385,
"max_line_length": 79,
"avg_line_length": 46.57402597402597,
"alnum_prop": 0.6019184652278178,
"repo_name": "Juraci/tempest",
"id": "78b51c8bcd8493aabbb5f637c51756bb012e8a1e",
"size": "18567",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tempest/api/network/test_routers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2701511"
},
{
"name": "Shell",
"bytes": "8578"
}
],
"symlink_target": ""
}
|
import logging
import re
from urllib.parse import urljoin
from streamlink.plugin import Plugin
from streamlink.stream import HLSStream
log = logging.getLogger(__name__)
class SSH101(Plugin):
url_re = re.compile(r'https?://(?:www\.)?ssh101\.com/(?:secure)?live/')
src_re = re.compile(r'sources.*?src:\s"(?P<url>.*?)"')
iframe_re = re.compile(r'iframe.*?src="(?P<url>.*?)"')
@classmethod
def can_handle_url(cls, url):
return cls.url_re.match(url)
def _get_streams(self):
res = self.session.http.get(self.url)
# some pages have embedded players
iframe_m = self.iframe_re.search(res.text)
if iframe_m:
url = urljoin(self.url, iframe_m.group("url"))
res = self.session.http.get(url)
video = self.src_re.search(res.text)
stream_src = video and video.group("url")
if stream_src and stream_src.endswith("m3u8"):
# do not open empty m3u8 files
if len(self.session.http.get(stream_src).text) <= 10:
log.error("This stream is currently offline")
return
log.debug("URL={0}".format(stream_src))
streams = HLSStream.parse_variant_playlist(self.session, stream_src)
if not streams:
return {"live": HLSStream(self.session, stream_src)}
else:
return streams
__plugin__ = SSH101
|
{
"content_hash": "3fced482a5db4d4c7f8722260667b7f5",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 80,
"avg_line_length": 30.891304347826086,
"alnum_prop": 0.5918367346938775,
"repo_name": "beardypig/streamlink",
"id": "5bd83ef84f01f8d943142956cd50d4464bdf65c1",
"size": "1421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/streamlink/plugins/ssh101.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Python",
"bytes": "1538432"
},
{
"name": "Shell",
"bytes": "18707"
}
],
"symlink_target": ""
}
|
"""Spectral Embedding"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Wei LI <kuantkid@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse
from scipy.linalg import eigh
from scipy.sparse.linalg import lobpcg
from ..base import BaseEstimator
from ..externals import six
from ..utils import check_random_state, check_array, check_symmetric
from ..utils.extmath import _deterministic_vector_sign_flip
from ..utils.graph import graph_laplacian
from ..utils.sparsetools import connected_components
from ..utils.arpack import eigsh
from ..metrics.pairwise import rbf_kernel
from ..neighbors import kneighbors_graph
def _graph_connected_component(graph, node_id):
"""Find the largest graph connected components the contains one
given node
Parameters
----------
graph : array-like, shape: (n_samples, n_samples)
adjacency matrix of the graph, non-zero weight means an edge
between the nodes
node_id : int
The index of the query node of the graph
Returns
-------
connected_components_matrix : array-like, shape: (n_samples,)
An array of bool value indicates the indexes of the nodes
belong to the largest connected components of the given query
node
"""
connected_components_matrix = np.zeros(
shape=(graph.shape[0]), dtype=np.bool)
connected_components_matrix[node_id] = True
n_node = graph.shape[0]
for i in range(n_node):
last_num_component = connected_components_matrix.sum()
_, node_to_add = np.where(graph[connected_components_matrix] != 0)
connected_components_matrix[node_to_add] = True
if last_num_component >= connected_components_matrix.sum():
break
return connected_components_matrix
def _graph_is_connected(graph):
""" Return whether the graph is connected (True) or Not (False)
Parameters
----------
graph : array-like or sparse matrix, shape: (n_samples, n_samples)
adjacency matrix of the graph, non-zero weight means an edge
between the nodes
Returns
-------
is_connected : bool
True means the graph is fully connected and False means not
"""
if sparse.isspmatrix(graph):
# sparse graph, find all the connected components
n_connected_components, _ = connected_components(graph)
return n_connected_components == 1
else:
# dense graph, find all connected components start from node 0
return _graph_connected_component(graph, 0).sum() == graph.shape[0]
def _set_diag(laplacian, value):
"""Set the diagonal of the laplacian matrix and convert it to a
sparse format well suited for eigenvalue decomposition
Parameters
----------
laplacian : array or sparse matrix
The graph laplacian
value : float
The value of the diagonal
Returns
-------
laplacian : array or sparse matrix
An array of matrix in a form that is well suited to fast
eigenvalue decomposition, depending on the band width of the
matrix.
"""
n_nodes = laplacian.shape[0]
# We need all entries in the diagonal to values
if not sparse.isspmatrix(laplacian):
laplacian.flat[::n_nodes + 1] = value
else:
laplacian = laplacian.tocoo()
diag_idx = (laplacian.row == laplacian.col)
laplacian.data[diag_idx] = value
# If the matrix has a small number of diagonals (as in the
# case of structured matrices coming from images), the
# dia format might be best suited for matvec products:
n_diags = np.unique(laplacian.row - laplacian.col).size
if n_diags <= 7:
# 3 or less outer diagonals on each side
laplacian = laplacian.todia()
else:
# csr has the fastest matvec and is thus best suited to
# arpack
laplacian = laplacian.tocsr()
return laplacian
def spectral_embedding(adjacency, n_components=8, eigen_solver=None,
random_state=None, eigen_tol=0.0,
norm_laplacian=True, drop_first=True):
"""Project the sample on the first eigen vectors of the graph Laplacian.
The adjacency matrix is used to compute a normalized graph Laplacian
whose spectrum (especially the eigen vectors associated to the
smallest eigen values) has an interpretation in terms of minimal
number of cuts necessary to split the graph into comparably sized
components.
This embedding can also 'work' even if the ``adjacency`` variable is
not strictly the adjacency matrix of a graph but more generally
an affinity or similarity matrix between samples (for instance the
heat kernel of a euclidean distance matrix or a k-NN matrix).
However care must taken to always make the affinity matrix symmetric
so that the eigen vector decomposition works as expected.
Parameters
----------
adjacency : array-like or sparse matrix, shape: (n_samples, n_samples)
The adjacency matrix of the graph to embed.
n_components : integer, optional, default 8
The dimension of the projection subspace.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}, default None
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen_solver == 'amg'.
By default, arpack is used.
eigen_tol : float, optional, default=0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
drop_first : bool, optional, default=True
Whether to drop the first eigenvector. For spectral embedding, this
should be True as the first eigenvector should be constant vector for
connected graph, but for spectral clustering, this should be kept as
False to retain the first eigenvector.
norm_laplacian : bool, optional, default=True
If True, then compute normalized Laplacian.
Returns
-------
embedding : array, shape=(n_samples, n_components)
The reduced samples.
Notes
-----
Spectral embedding is most useful when the graph has one connected
component. If there graph has many components, the first few eigenvectors
will simply uncover the connected components of the graph.
References
----------
* http://en.wikipedia.org/wiki/LOBPCG
* Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method
Andrew V. Knyazev
http://dx.doi.org/10.1137%2FS1064827500366124
"""
adjacency = check_symmetric(adjacency)
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
if eigen_solver == "amg":
raise ValueError("The eigen_solver was set to 'amg', but pyamg is "
"not available.")
if eigen_solver is None:
eigen_solver = 'arpack'
elif eigen_solver not in ('arpack', 'lobpcg', 'amg'):
raise ValueError("Unknown value for eigen_solver: '%s'."
"Should be 'amg', 'arpack', or 'lobpcg'"
% eigen_solver)
random_state = check_random_state(random_state)
n_nodes = adjacency.shape[0]
# Whether to drop the first eigenvector
if drop_first:
n_components = n_components + 1
if not _graph_is_connected(adjacency):
warnings.warn("Graph is not fully connected, spectral embedding"
" may not work as expected.")
laplacian, dd = graph_laplacian(adjacency,
normed=norm_laplacian, return_diag=True)
if (eigen_solver == 'arpack'
or eigen_solver != 'lobpcg' and
(not sparse.isspmatrix(laplacian)
or n_nodes < 5 * n_components)):
# lobpcg used with eigen_solver='amg' has bugs for low number of nodes
# for details see the source code in scipy:
# https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen
# /lobpcg/lobpcg.py#L237
# or matlab:
# http://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m
laplacian = _set_diag(laplacian, 1)
# Here we'll use shift-invert mode for fast eigenvalues
# (see http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
# for a short explanation of what this means)
# Because the normalized Laplacian has eigenvalues between 0 and 2,
# I - L has eigenvalues between -1 and 1. ARPACK is most efficient
# when finding eigenvalues of largest magnitude (keyword which='LM')
# and when these eigenvalues are very large compared to the rest.
# For very large, very sparse graphs, I - L can have many, many
# eigenvalues very near 1.0. This leads to slow convergence. So
# instead, we'll use ARPACK's shift-invert mode, asking for the
# eigenvalues near 1.0. This effectively spreads-out the spectrum
# near 1.0 and leads to much faster convergence: potentially an
# orders-of-magnitude speedup over simply using keyword which='LA'
# in standard mode.
try:
# We are computing the opposite of the laplacian inplace so as
# to spare a memory allocation of a possibly very large array
laplacian *= -1
lambdas, diffusion_map = eigsh(laplacian, k=n_components,
sigma=1.0, which='LM',
tol=eigen_tol)
embedding = diffusion_map.T[n_components::-1] * dd
except RuntimeError:
# When submatrices are exactly singular, an LU decomposition
# in arpack fails. We fallback to lobpcg
eigen_solver = "lobpcg"
# Revert the laplacian to its opposite to have lobpcg work
laplacian *= -1
if eigen_solver == 'amg':
# Use AMG to get a preconditioner and speed up the eigenvalue
# problem.
if not sparse.issparse(laplacian):
warnings.warn("AMG works better for sparse matrices")
laplacian = laplacian.astype(np.float) # lobpcg needs native floats
laplacian = _set_diag(laplacian, 1)
ml = smoothed_aggregation_solver(check_array(laplacian, 'csr'))
M = ml.aspreconditioner()
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
lambdas, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-12,
largest=False)
embedding = diffusion_map.T * dd
if embedding.shape[0] == 1:
raise ValueError
elif eigen_solver == "lobpcg":
laplacian = laplacian.astype(np.float) # lobpcg needs native floats
if n_nodes < 5 * n_components + 1:
# see note above under arpack why lobpcg has problems with small
# number of nodes
# lobpcg will fallback to eigh, so we short circuit it
if sparse.isspmatrix(laplacian):
laplacian = laplacian.toarray()
lambdas, diffusion_map = eigh(laplacian)
embedding = diffusion_map.T[:n_components] * dd
else:
# lobpcg needs native floats
laplacian = laplacian.astype(np.float)
laplacian = _set_diag(laplacian, 1)
# We increase the number of eigenvectors requested, as lobpcg
# doesn't behave well in low dimension
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
lambdas, diffusion_map = lobpcg(laplacian, X, tol=1e-15,
largest=False, maxiter=2000)
embedding = diffusion_map.T[:n_components] * dd
if embedding.shape[0] == 1:
raise ValueError
embedding = _deterministic_vector_sign_flip(embedding)
if drop_first:
return embedding[1:n_components].T
else:
return embedding[:n_components].T
class SpectralEmbedding(BaseEstimator):
"""Spectral embedding for non-linear dimensionality reduction.
Forms an affinity matrix given by the specified function and
applies spectral decomposition to the corresponding graph laplacian.
The resulting transformation is given by the value of the
eigenvectors for each data point.
Parameters
-----------
n_components : integer, default: 2
The dimension of the projected subspace.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities.
random_state : int seed, RandomState instance, or None, default : None
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen_solver == 'amg'.
affinity : string or callable, default : "nearest_neighbors"
How to construct the affinity matrix.
- 'nearest_neighbors' : construct affinity matrix by knn graph
- 'rbf' : construct affinity matrix by rbf kernel
- 'precomputed' : interpret X as precomputed affinity matrix
- callable : use passed in function as affinity
the function takes in data matrix (n_samples, n_features)
and return affinity matrix (n_samples, n_samples).
gamma : float, optional, default : 1/n_features
Kernel coefficient for rbf kernel.
n_neighbors : int, default : max(n_samples/10 , 1)
Number of nearest neighbors for nearest_neighbors graph building.
Attributes
----------
embedding_ : array, shape = (n_samples, n_components)
Spectral embedding of the training matrix.
affinity_matrix_ : array, shape = (n_samples, n_samples)
Affinity_matrix constructed from samples or precomputed.
References
----------
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- On Spectral Clustering: Analysis and an algorithm, 2011
Andrew Y. Ng, Michael I. Jordan, Yair Weiss
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.19.8100
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
"""
def __init__(self, n_components=2, affinity="nearest_neighbors",
gamma=None, random_state=None, eigen_solver=None,
n_neighbors=None):
self.n_components = n_components
self.affinity = affinity
self.gamma = gamma
self.random_state = random_state
self.eigen_solver = eigen_solver
self.n_neighbors = n_neighbors
@property
def _pairwise(self):
return self.affinity == "precomputed"
def _get_affinity_matrix(self, X, Y=None):
"""Caclulate the affinity matrix from data
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Returns
-------
affinity_matrix, shape (n_samples, n_samples)
"""
if self.affinity == 'precomputed':
self.affinity_matrix_ = X
return self.affinity_matrix_
if self.affinity == 'nearest_neighbors':
if sparse.issparse(X):
warnings.warn("Nearest neighbors affinity currently does "
"not support sparse input, falling back to "
"rbf affinity")
self.affinity = "rbf"
else:
self.n_neighbors_ = (self.n_neighbors
if self.n_neighbors is not None
else max(int(X.shape[0] / 10), 1))
self.affinity_matrix_ = kneighbors_graph(X, self.n_neighbors_)
# currently only symmetric affinity_matrix supported
self.affinity_matrix_ = 0.5 * (self.affinity_matrix_ +
self.affinity_matrix_.T)
return self.affinity_matrix_
if self.affinity == 'rbf':
self.gamma_ = (self.gamma
if self.gamma is not None else 1.0 / X.shape[1])
self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma_)
return self.affinity_matrix_
self.affinity_matrix_ = self.affinity(X)
return self.affinity_matrix_
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
if isinstance(self.affinity, six.string_types):
if self.affinity not in set(("nearest_neighbors", "rbf",
"precomputed")):
raise ValueError(("%s is not a valid affinity. Expected "
"'precomputed', 'rbf', 'nearest_neighbors' "
"or a callable.") % self.affinity)
elif not callable(self.affinity):
raise ValueError(("'affinity' is expected to be an an affinity "
"name or a callable. Got: %s") % self.affinity)
affinity_matrix = self._get_affinity_matrix(X)
self.embedding_ = spectral_embedding(affinity_matrix,
n_components=self.n_components,
eigen_solver=self.eigen_solver,
random_state=random_state)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self.fit(X)
return self.embedding_
|
{
"content_hash": "7855f7b8b608c3d0b605bd7dbdecdb62",
"timestamp": "",
"source": "github",
"line_count": 484,
"max_line_length": 79,
"avg_line_length": 40.43801652892562,
"alnum_prop": 0.6178724708767627,
"repo_name": "Barmaley-exe/scikit-learn",
"id": "277ce0c48c9f37a1c873223c4b0a0bcaacbce0f6",
"size": "19572",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sklearn/manifold/spectral_embedding_.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "18609258"
},
{
"name": "C++",
"bytes": "1810938"
},
{
"name": "Makefile",
"bytes": "1364"
},
{
"name": "PowerShell",
"bytes": "13427"
},
{
"name": "Python",
"bytes": "5778311"
},
{
"name": "Shell",
"bytes": "5968"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "manatsum.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "cb2aa7e663371cb0889fb77f899d45e9",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 72,
"avg_line_length": 25.444444444444443,
"alnum_prop": 0.7117903930131004,
"repo_name": "mashabow/manatsum",
"id": "2a83262df5560a607d1cc405120fc75765b56bc1",
"size": "251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2053"
},
{
"name": "HTML",
"bytes": "10626"
},
{
"name": "JavaScript",
"bytes": "12979"
},
{
"name": "Python",
"bytes": "16773"
}
],
"symlink_target": ""
}
|
import unittest
import openmesh
import numpy as np
class BoundaryTriangleMesh(unittest.TestCase):
def setUp(self):
self.mesh = openmesh.TriMesh()
# Add some vertices
self.vhandle = []
self.vhandle.append(self.mesh.add_vertex(np.array([0, 1, 0])))
self.vhandle.append(self.mesh.add_vertex(np.array([1, 0, 0])))
self.vhandle.append(self.mesh.add_vertex(np.array([2, 1, 0])))
self.vhandle.append(self.mesh.add_vertex(np.array([0,-1, 0])))
self.vhandle.append(self.mesh.add_vertex(np.array([2,-1, 0])))
self.vhandle.append(self.mesh.add_vertex(np.array([3, 0, 0])))
# Single point
self.vhandle.append(self.mesh.add_vertex(np.array([0,-2, 0])))
# Add five faces
self.fhandle = []
face_vhandles = []
face_vhandles.append(self.vhandle[0])
face_vhandles.append(self.vhandle[1])
face_vhandles.append(self.vhandle[2])
self.fhandle.append(self.mesh.add_face(face_vhandles))
face_vhandles = []
face_vhandles.append(self.vhandle[1])
face_vhandles.append(self.vhandle[3])
face_vhandles.append(self.vhandle[4])
self.fhandle.append(self.mesh.add_face(face_vhandles))
face_vhandles = []
face_vhandles.append(self.vhandle[0])
face_vhandles.append(self.vhandle[3])
face_vhandles.append(self.vhandle[1])
self.fhandle.append(self.mesh.add_face(face_vhandles))
face_vhandles = []
face_vhandles.append(self.vhandle[2])
face_vhandles.append(self.vhandle[1])
face_vhandles.append(self.vhandle[4])
self.fhandle.append(self.mesh.add_face(face_vhandles))
face_vhandles = []
face_vhandles.append(self.vhandle[5])
face_vhandles.append(self.vhandle[2])
face_vhandles.append(self.vhandle[4])
self.fhandle.append(self.mesh.add_face(face_vhandles))
# Test setup:
# 0 ==== 2
# |\ 0 /|\
# | \ / | \
# |2 1 3|4 5
# | / \ | /
# |/ 1 \|/
# 3 ==== 4
#
# Vertex 6 single
def test_boundary_vertex(self):
self.assertTrue (self.mesh.is_boundary(self.vhandle[0]))
self.assertFalse(self.mesh.is_boundary(self.vhandle[1]))
self.assertTrue (self.mesh.is_boundary(self.vhandle[2]))
self.assertTrue (self.mesh.is_boundary(self.vhandle[3]))
self.assertTrue (self.mesh.is_boundary(self.vhandle[4]))
self.assertTrue (self.mesh.is_boundary(self.vhandle[5]))
self.assertTrue (self.mesh.is_boundary(self.vhandle[6]))
def test_boundary_face(self):
self.assertTrue (self.mesh.is_boundary(self.fhandle[0]))
self.assertTrue (self.mesh.is_boundary(self.fhandle[1]))
self.assertTrue (self.mesh.is_boundary(self.fhandle[2]))
self.assertFalse(self.mesh.is_boundary(self.fhandle[3]))
self.assertTrue (self.mesh.is_boundary(self.fhandle[4]))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(BoundaryTriangleMesh)
unittest.TextTestRunner(verbosity=2).run(suite)
|
{
"content_hash": "62f5e34b4f22157339cabde17b0a0292",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 77,
"avg_line_length": 36.76136363636363,
"alnum_prop": 0.6003091190108192,
"repo_name": "TinyTinni/OF_Plugin-PyMesh",
"id": "6c6acb675bfad759247c7a9ebf0673cb1700f121",
"size": "3235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openmesh-python/tests/test_boundary.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7764"
},
{
"name": "C++",
"bytes": "153358"
},
{
"name": "CMake",
"bytes": "2333"
},
{
"name": "Makefile",
"bytes": "7670"
},
{
"name": "Python",
"bytes": "237811"
},
{
"name": "Shell",
"bytes": "665"
}
],
"symlink_target": ""
}
|
import os
from collections import OrderedDict
FILE_EXTENSIONS = ['.xlsx', '.xls', '.csv']
databases_folder_path = os.path.dirname(os.path.abspath(__file__))
def get_regions():
return [folder for folder in os.listdir(databases_folder_path) if folder != "weather"
and os.path.isdir(os.path.join(databases_folder_path, folder))]
def get_categories(db_path):
return [folder for folder in os.listdir(db_path) if os.path.isdir(os.path.join(db_path, folder))]
def get_database_template_tree():
"""
Assumes that folders in `databases_folder_path` are `categories` and items (file/folder) in `categories` are `databases`.
Uses first region database as template (i.e. CH)
:return: dict containing `categories` and `databases`
e.g.
{
'categories': {
'$category_name': {
'databases': [
{
'name': '$database_name',
'extension': '$database_extension'
},
...
]
},
...
}
}
"""
out = {'categories': OrderedDict()}
template_path = os.path.join(databases_folder_path, get_regions()[0])
for category in get_categories(template_path):
category_path = os.path.join(template_path, category)
category_databases = []
for database in os.listdir(category_path):
database_name, ext = os.path.splitext(database)
if ext in FILE_EXTENSIONS or os.path.isdir(os.path.join(category_path, database_name)):
database_name = database_name.upper()
category_databases.append({'name': database_name, 'extension': ext})
if category_databases:
out['categories'][category] = {'databases': category_databases}
return out
def get_database_tree(db_path):
"""
Look for database files in `db_path` based on `get_database_template_tree`
:param db_path: path of databases
:return: dict containing `categories` and `databases` found in `db_path`
e.g.
{
'categories': {
'$category_name': {
'databases': [...]
},
...
},
'databases': {
'$database_name': {
'files': [
{
'extension': '$file_extension',
'name': '$file_name',
'path': '$path'
},
...
]
},
...
}
}
"""
database_categories_tree = get_database_template_tree()['categories']
out = {'categories': OrderedDict(), 'databases': OrderedDict()}
for category, databases in database_categories_tree.items():
out['categories'][category] = {'databases': []}
for database in databases['databases']:
database_name = database['name']
database_path = os.path.join(db_path, category, database_name)
out['databases'][database_name] = {'files': path_crawler(database_path + database['extension'])}
out['categories'][category]['databases'].append(database_name)
return out
def path_crawler(parent_path):
"""
Looks for database files in `parent_path`
:param parent_path:
:return: list of files with its properties (i.e. name, extension, path) contained in `parent_path`
"""
out = list()
if os.path.isfile(parent_path):
name, ext = os.path.splitext(os.path.basename(parent_path))
out.append({'path': parent_path, 'name': name, 'extension': ext})
else:
for (dir_path, _, filenames) in os.walk(parent_path):
for f in filenames:
file_path = os.path.join(dir_path, f)
name, ext = os.path.splitext(os.path.basename(file_path))
out.append({'path': file_path, 'name': name, 'extension': ext})
return out
if __name__ == '__main__':
import json
import cea.config
import cea.inputlocator
config = cea.config.Configuration()
locator = cea.inputlocator.InputLocator(config.scenario)
print(json.dumps(get_database_template_tree(), indent=2))
print(json.dumps(get_database_tree(locator.get_databases_folder()), indent=2))
|
{
"content_hash": "c83bf407cfb49de26580df37f8cd7ac4",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 125,
"avg_line_length": 35.33606557377049,
"alnum_prop": 0.562514497796335,
"repo_name": "architecture-building-systems/CEAforArcGIS",
"id": "eb23f2dbf68ff7f3b9c8fd724ddcca532843375c",
"size": "4314",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cea/databases/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2776"
},
{
"name": "Jupyter Notebook",
"bytes": "135743"
},
{
"name": "Makefile",
"bytes": "675"
},
{
"name": "NSIS",
"bytes": "3948"
},
{
"name": "Python",
"bytes": "1217645"
},
{
"name": "Shell",
"bytes": "7194"
}
],
"symlink_target": ""
}
|
import os
ROOT_PATH = os.path.dirname(__file__)
TEMPLATE_DEBUG = DEBUG = True
MANAGERS = ADMINS = ()
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = os.path.join(ROOT_PATH, 'testdb.sqlite')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(ROOT_PATH, 'testdb.sqlite'),
}
}
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
MEDIA_ROOT = ''
MEDIA_URL = ''
ADMIN_MEDIA_PREFIX = '/media/'
SECRET_KEY = '2+@4vnr#v8e273^+a)g$8%dre^dwcn#d&n#8+l6jk7r#$p&3zk'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.request",
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (os.path.join(ROOT_PATH, '../../templates'),)
INSTALLED_APPS = (
'django_roa',
'django_roa.remoteauth',
'django_roa_client',
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
)
AUTHENTICATION_BACKENDS = (
'django_roa.remoteauth.backends.RemoteUserModelBackend',
)
SESSION_ENGINE = "django.contrib.sessions.backends.file"
SERIALIZATION_MODULES = {
'django': 'examples.django_roa_client.serializers',
}
## ROA custom settings
ROA_MODELS = True # set to False if you'd like to develop/test locally
ROA_FORMAT = 'django' # json or xml
# specify the headers sent to the ws from restkit
ROA_HEADERS = {
'Content-Type': 'application/x-www-form-urlencoded',
}
ROA_DJANGO_ERRORS = True # useful to ease debugging if you use test server
ROA_URL_OVERRIDES_LIST = {
'django_roa_client.remotepagewithoverriddenurls': u'http://127.0.0.1:8081/django_roa_server/remotepagewithoverriddenurls/',
}
ROA_URL_OVERRIDES_DETAIL = {
'django_roa_client.remotepagewithoverriddenurls': lambda o: u"%s%s-%s/" % (o.get_resource_url_list(), o.id, o.slug),
}
ROA_MODEL_NAME_MAPPING = (
# local name: remote name
('django_roa_client.', 'django_roa_server.'),
('remoteauth.', 'auth.'),
)
ROA_ARGS_NAMES_MAPPING = {
'ORDER_BY': 'order',
}
ROA_MODEL_CREATE_MAPPING = {
'package.somemodel': ['attribute1', 'attribute2', 'attribute3', 'attribute4'],
}
ROA_MODEL_UPDATE_MAPPING = {
'package.somemodel': ['attribute2', 'attribute3'],
}
# Enable HTTP authentication through django-piston
from restkit import BasicAuth
ROA_FILTERS = [ BasicAuth('django-roa', 'roa'), ]
# Disable authentication through django-piston
#ROA_FILTERS = []
## Logging settings
import logging
logging.basicConfig(level=logging.DEBUG, format="%(name)s - %(message)s")
|
{
"content_hash": "3aaa82ee9d0e92f5831a5b69f1037c8c",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 127,
"avg_line_length": 31.347368421052632,
"alnum_prop": 0.6991269308260577,
"repo_name": "charles-vdulac/django-roa",
"id": "b0e1b6b91f12e74860e9cbf7d6deb40f8c891f46",
"size": "2978",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/django_roa_client/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "181765"
},
{
"name": "Shell",
"bytes": "90"
}
],
"symlink_target": ""
}
|
import m3u8
import sys, getopt, urllib2, httplib, signal
def signal_handler(signal, frame):
print('Cancelat per Ctrl+C!')
sys.exit(0)
stats_text=''
# Analisi dels SubManfests (Playlists que contenen els .ts i que van variant)
def analitzaSubManifestNagios(m, test_ts,test_target_duration,expected_target_duration):
err=''
warn=''
status=0
global stats_text
u=None
try:
if m.uri.startswith('http'):
url = m.uri
else:
url = m.base_uri + "/" + m.uri
u = m3u8.load(url)
except urllib2.URLError, e:
stats_text+=" ERR"
status=2
else:
# Generar warning si versio 6 o posterior
if m.stream_info.program_id and int(u.version) > 5:
stats_text+=" WARN"
status=1
if str(len (u.segments)) == 0:
stats_text+=" ERR"
status=2
else:
if test_ts:
for s in u.segments:
try:
url = m.base_uri + "/" + s.uri
h = urllib2.Request(url)
h.get_method = lambda : 'HEAD'
resp = urllib2.urlopen(h)
stats_text+=" OK"
except:
status=2
stats_text+=" KO"
if test_target_duration:
if u.target_duration > int(expected_target_duration):
if status < 2:
status = 1
stats_text+=" WARN (targetduration)"
if status == 0:
stats_text+=" OK"
return status
# Analisi dels SubManfests (Playlists que contenen els .ts i que van variant)
def analitzaSubManifestSilent(m, test_ts):
u=None
try:
if m.uri.startswith('http'):
url = m.uri
else:
url = m.base_uri + "/" + m.uri
u = m3u8.load(url)
except urllib2.HTTPError, e:
print " ERROR: Codi HTTP " + str(e.code) + ": " + url
except urllib2.URLError, e:
print " ERROR: " + url
else:
if str(len (u.segments)) == 0:
print " ERROR: " + url
else:
if test_ts:
print " OK: " + url + " : ",
for s in u.segments:
try:
url = m.base_uri + "/" + s.uri
h = urllib2.Request(url)
h.get_method = lambda : 'HEAD'
resp = urllib2.urlopen(h)
print " OK",
except:
print " KO",
print " "
else:
print " OK: " + url
# Analisi dels SubManfests (Playlists que contenen els .ts i que van variant)
def analitzaSubManifest(m,test_ts):
u=None
print " URL base : " + m.base_uri
print " URL : " + m.uri
print " Informacio (EXT-X-STREAM-INF)"
print " Program ID (PROGRAM-ID): " + str(m.stream_info.program_id)
if m.stream_info.program_id:
print " (deprecated; no hauria d'estar present)"
print " Bandwidth (BANDWIDTH) : " + m.stream_info.bandwidth
print " Codecs (CODECS) : " + m.stream_info.codecs
if m.stream_info.resolution:
print " Resolution (RESOLUTION): " + str(m.stream_info.resolution[0])+ "x" + str(m.stream_info.resolution[1])
else:
print " Resolution (RESOLUTION): None"
print " Contingut"
try:
if m.uri.startswith('http'):
url = m.uri
else:
url = m.base_uri + "/" + m.uri
u = m3u8.load(url)
except urllib2.HTTPError, e:
print " ERROR: Codi HTTP " + str(e.code) + ": " + url
except urllib2.URLError, e:
print " ERROR: No s'ha pogut connectar"
else:
if str(len (u.segments)) == 0:
print " ERROR: playlist buida"
else:
print " Playlist complerta (EXT-X-ENDLIST): " + str (u.is_endlist)
if u.is_endlist:
print " ATENCIO: Si es un HLS live, NO hauria d'apareixer"
print " Versio (EXT-X-VERSION): " + str(u.version)
print " Permet cache (EXT-X-ALLOW-CACHE): " + str(u.allow_cache)
print " Durada objectiu playlist (EXT-X-TARGETDURATION): " + str(u.target_duration)
print " Sequencia (EXT-X-MEDIA-SEQUENCE): " + str(u.media_sequence)
if test_ts:
print " Segments: : " + str(len (u.segments)) + ": ",
for s in u.segments:
try:
url = m.base_uri + "/" + s.uri
h = urllib2.Request(url)
h.get_method = lambda : 'HEAD'
resp = urllib2.urlopen(h)
print " OK",
except:
print " KO",
print " "
else:
print " Segments: : " + str(len (u.segments)),
def analitzaManifest(m):
u=None
if str(len (m.segments)) == 0:
print " ERROR: playlist buida"
else:
print " Playlist complerta (EXT-X-ENDLIST): " + str (m.is_endlist)
if m.is_endlist:
print " ATENCIO: Si es un HLS live, NO hauria d'apareixer"
print " Versio (EXT-X-VERSION): " + str(m.version)
print " Permet cache (EXT-X-ALLOW-CACHE): " + str(m.allow_cache)
print " Durada objectiu playlist (EXT-X-TARGETDURATION): " + str(m.target_duration)
print " Sequencia (EXT-X-MEDIA-SEQUENCE): " + str(m.media_sequence)
print " Segments: : " + str(len (m.segments))
def analitzaManifestSilent(m, url):
if str(len (m.segments)) == 0:
print "ERROR: playlist buida" + url
else:
print "OK: " + url
def analitzaManifestNagios(m, url):
if str(len (m.segments)) == 0:
print "ERROR: playlist buida" + url
sys.exit(2)
else:
print "OK: " + url
sys.exit(0)
def us():
print 'validaHLS.py [-h] [-n] [-s] [-t] -u <url>'
print " -u, --url : URL a testejar. Unic parametre obligatori"
print " -h, --help : Aquesta informacio"
print " -s, --silent : Resultats mes concisos"
print " -n, --nagios : Resultats compatibles amb un check de nagios"
print " -t, --test-ts : Comprova el segments de la playlist. No els descarrega, nomes fa un HEAD"
print " -d <n>, --target-duration <n> : Comprova que el valor de EXT-X-TARGET-DURATION estigui per sota de n. Nomes en mode Nagios"
def main(argv):
url = ''
silent = False
nagios = False
status = 0
test_ts = False
test_target_duration = False
expected_target_duration = 0
global stats_text
try:
opts, args = getopt.getopt(argv,"hsnu:td:",["url=", "help", "silent", "nagios","test-ts","target-duration="])
except getopt.GetoptError:
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
us()
sys.exit()
elif opt in ("-u", "--url"):
url = arg
elif opt in ("-s", "--silent"):
silent = True
elif opt in ("-n", "--nagios"):
nagios = True
elif opt in ("-t", "--test-ts"):
test_ts = True
elif opt in ("-d", "--target-duration"):
test_target_duration = True
expected_target_duration = arg
# S'ha indicat URL
if url == '':
print 'ERROR: Cal indicar URL a testejar'
us()
sys.exit(2)
# Llegim URL
try:
variant_m3u8 = m3u8.load(url)
except urllib2.HTTPError, e:
print "CRITICAL: Error HTTP " + str(e.code) + ": " + url
sys.exit(2)
except urllib2.URLError, e:
print "CRITICAL: No s'ha pogut connectar a " + url
sys.exit(2)
except:
print "CRITICAL: Error desconegut al connectar a " + url
sys.exit(2)
# Analitzem
# Si la playlist principal esta buida o corrupta, ja no continuem
if not variant_m3u8.is_variant and (len(variant_m3u8.segments) == 0):
print "CRITICAL: playlist buida o erronia: " + url
sys.exit(2)
if nagios:
if variant_m3u8.is_variant:
for playlist in variant_m3u8.playlists:
status=max(status,analitzaSubManifestNagios(playlist, test_ts,test_target_duration,expected_target_duration))
else:
status=max(status,analitzaManifestNagios(variant_m3u8, url))
# Resultats
if status == 2:
print "CRITICAL: (" + stats_text + ") " + url
sys.exit(2)
elif status == 1:
print "WARNING: (" + stats_text + ") " + url
sys.exit(1)
print "OK: (" + stats_text + ") " + url
sys.exit(0)
elif silent:
if variant_m3u8.is_variant:
print "OK: " + url
for playlist in variant_m3u8.playlists:
analitzaSubManifestSilent(playlist,test_ts)
else:
analitzaManifestSilent(variant_m3u8, url)
else:
print "Manifest principal"
print "---------------------------------------------------------------"
print "URL: " + url
print "Variant (multiples bitrates): " + str(variant_m3u8.is_variant)
if variant_m3u8.is_variant:
print "Versio: " + str(variant_m3u8.version)
for playlist in variant_m3u8.playlists:
print "Sub Manifest"
analitzaSubManifest(playlist,test_ts)
else:
analitzaManifest(variant_m3u8)
####################################################
# Crida al programa principal
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal_handler)
main(sys.argv[1:])
|
{
"content_hash": "c50a1d9e0161d412272837a3dc70d23d",
"timestamp": "",
"source": "github",
"line_count": 280,
"max_line_length": 139,
"avg_line_length": 34.082142857142856,
"alnum_prop": 0.5242586188829509,
"repo_name": "tcomerma/validaHLS",
"id": "f190a0da8965273d8d24055ee66b914b648b7f6a",
"size": "9925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "validaHLS.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26316"
}
],
"symlink_target": ""
}
|
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_serialization import jsonutils
import six
from neutron.common import constants
from neutron.common import exceptions
from neutron.common import utils
from neutron import context as neutron_context
from neutron.db import api as db_api
from neutron.extensions import l3
from neutron.extensions import portbindings
from neutron import manager
from neutron.plugins.common import constants as plugin_constants
LOG = logging.getLogger(__name__)
class L3RpcCallback(object):
"""L3 agent RPC callback in plugin implementations."""
# 1.0 L3PluginApi BASE_RPC_API_VERSION
# 1.1 Support update_floatingip_statuses
# 1.2 Added methods for DVR support
# 1.3 Added a method that returns the list of activated services
# 1.4 Added L3 HA update_router_state. This method was later removed,
# since it was unused. The RPC version was not changed
# 1.5 Added update_ha_routers_states
# 1.6 Added process_prefix_update to support IPv6 Prefix Delegation
# 1.7 Added method delete_agent_gateway_port for DVR Routers
# 1.8 Added address scope information
# 1.9 Added get_router_ids
target = oslo_messaging.Target(version='1.9')
@property
def plugin(self):
if not hasattr(self, '_plugin'):
self._plugin = manager.NeutronManager.get_plugin()
return self._plugin
@property
def l3plugin(self):
if not hasattr(self, '_l3plugin'):
self._l3plugin = manager.NeutronManager.get_service_plugins()[
plugin_constants.L3_ROUTER_NAT]
return self._l3plugin
def get_router_ids(self, context, host):
"""Returns IDs of routers scheduled to l3 agent on <host>
This will autoschedule unhosted routers to l3 agent on <host> and then
return all ids of routers scheduled to it.
"""
if utils.is_extension_supported(
self.l3plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS):
if cfg.CONF.router_auto_schedule:
self.l3plugin.auto_schedule_routers(context, host,
router_ids=None)
return self.l3plugin.list_router_ids_on_host(context, host)
@db_api.retry_db_errors
def sync_routers(self, context, **kwargs):
"""Sync routers according to filters to a specific agent.
@param context: contain user information
@param kwargs: host, router_ids
@return: a list of routers
with their interfaces and floating_ips
"""
router_ids = kwargs.get('router_ids')
host = kwargs.get('host')
context = neutron_context.get_admin_context()
if utils.is_extension_supported(
self.l3plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS):
# only auto schedule routers that were specifically requested;
# on agent full sync routers will be auto scheduled in
# get_router_ids()
if cfg.CONF.router_auto_schedule and router_ids:
self.l3plugin.auto_schedule_routers(context, host, router_ids)
routers = (
self.l3plugin.list_active_sync_routers_on_active_l3_agent(
context, host, router_ids))
else:
routers = self.l3plugin.get_sync_data(context, router_ids)
if utils.is_extension_supported(
self.plugin, constants.PORT_BINDING_EXT_ALIAS):
self._ensure_host_set_on_ports(context, host, routers)
LOG.debug("Routers returned to l3 agent:\n %s",
utils.DelayedStringRenderer(jsonutils.dumps,
routers, indent=5))
return routers
def _ensure_host_set_on_ports(self, context, host, routers):
for router in routers:
LOG.debug("Checking router: %(id)s for host: %(host)s",
{'id': router['id'], 'host': host})
if router.get('gw_port') and router.get('distributed'):
# '' is used to effectively clear binding of a gw port if not
# bound (snat is not hosted on any l3 agent)
gw_port_host = router.get('gw_port_host') or ''
self._ensure_host_set_on_port(context,
gw_port_host,
router.get('gw_port'),
router['id'])
for p in router.get(constants.SNAT_ROUTER_INTF_KEY, []):
self._ensure_host_set_on_port(context,
gw_port_host,
p, router['id'])
else:
self._ensure_host_set_on_port(
context, host,
router.get('gw_port'),
router['id'],
ha_router_port=router.get('ha'))
for interface in router.get(constants.INTERFACE_KEY, []):
self._ensure_host_set_on_port(
context,
host,
interface,
router['id'],
ha_router_port=router.get('ha'))
interface = router.get(constants.HA_INTERFACE_KEY)
if interface:
self._ensure_host_set_on_port(context, host, interface,
router['id'])
def _ensure_host_set_on_port(self, context, host, port, router_id=None,
ha_router_port=False):
if (port and host is not None and
(port.get('device_owner') !=
constants.DEVICE_OWNER_DVR_INTERFACE and
port.get(portbindings.HOST_ID) != host or
port.get(portbindings.VIF_TYPE) ==
portbindings.VIF_TYPE_BINDING_FAILED)):
# Ports owned by non-HA routers are bound again if they're
# already bound but the router moved to another host.
if not ha_router_port:
# All ports, including ports created for SNAT'ing for
# DVR are handled here
try:
self.plugin.update_port(
context,
port['id'],
{'port': {portbindings.HOST_ID: host}})
# updating port's host to pass actual info to l3 agent
port[portbindings.HOST_ID] = host
except exceptions.PortNotFound:
LOG.debug("Port %(port)s not found while updating "
"agent binding for router %(router)s.",
{"port": port['id'], "router": router_id})
# Ports owned by HA routers should only be bound once, if
# they are unbound. These ports are moved when an agent reports
# that one of its routers moved to the active state.
else:
if not port.get(portbindings.HOST_ID):
active_host = (
self.l3plugin.get_active_host_for_ha_router(
context, router_id))
if active_host:
host = active_host
# If there is currently no active router instance (For
# example it's a new router), the host that requested
# the routers (Essentially a random host) will do. The
# port binding will be corrected when an active is
# elected.
try:
self.plugin.update_port(
context,
port['id'],
{'port': {portbindings.HOST_ID: host}})
except exceptions.PortNotFound:
LOG.debug("Port %(port)s not found while updating "
"agent binding for router %(router)s.",
{"port": port['id'], "router": router_id})
elif (port and
port.get('device_owner') ==
constants.DEVICE_OWNER_DVR_INTERFACE):
# Ports that are DVR interfaces have multiple bindings (based on
# of hosts on which DVR router interfaces are spawned). Such
# bindings are created/updated here by invoking
# update_dvr_port_binding
self.plugin.update_dvr_port_binding(context, port['id'],
{'port':
{portbindings.HOST_ID: host,
'device_id': router_id}
})
def get_external_network_id(self, context, **kwargs):
"""Get one external network id for l3 agent.
l3 agent expects only one external network when it performs
this query.
"""
context = neutron_context.get_admin_context()
net_id = self.plugin.get_external_network_id(context)
LOG.debug("External network ID returned to l3 agent: %s",
net_id)
return net_id
def get_service_plugin_list(self, context, **kwargs):
plugins = manager.NeutronManager.get_service_plugins()
return plugins.keys()
@db_api.retry_db_errors
def update_floatingip_statuses(self, context, router_id, fip_statuses):
"""Update operational status for a floating IP."""
with context.session.begin(subtransactions=True):
for (floatingip_id, status) in six.iteritems(fip_statuses):
LOG.debug("New status for floating IP %(floatingip_id)s: "
"%(status)s", {'floatingip_id': floatingip_id,
'status': status})
try:
self.l3plugin.update_floatingip_status(context,
floatingip_id,
status)
except l3.FloatingIPNotFound:
LOG.debug("Floating IP: %s no longer present.",
floatingip_id)
# Find all floating IPs known to have been the given router
# for which an update was not received. Set them DOWN mercilessly
# This situation might occur for some asynchronous backends if
# notifications were missed
known_router_fips = self.l3plugin.get_floatingips(
context, {'last_known_router_id': [router_id]})
# Consider only floating ips which were disassociated in the API
# FIXME(salv-orlando): Filtering in code should be avoided.
# the plugin should offer a way to specify a null filter
fips_to_disable = (fip['id'] for fip in known_router_fips
if not fip['router_id'])
for fip_id in fips_to_disable:
self.l3plugin.update_floatingip_status(
context, fip_id, constants.FLOATINGIP_STATUS_DOWN)
def get_ports_by_subnet(self, context, **kwargs):
"""DVR: RPC called by dvr-agent to get all ports for subnet."""
subnet_id = kwargs.get('subnet_id')
LOG.debug("DVR: subnet_id: %s", subnet_id)
filters = {'fixed_ips': {'subnet_id': [subnet_id]}}
return self.plugin.get_ports(context, filters=filters)
@db_api.retry_db_errors
def get_agent_gateway_port(self, context, **kwargs):
"""Get Agent Gateway port for FIP.
l3 agent expects an Agent Gateway Port to be returned
for this query.
"""
network_id = kwargs.get('network_id')
host = kwargs.get('host')
admin_ctx = neutron_context.get_admin_context()
agent_port = self.l3plugin.create_fip_agent_gw_port_if_not_exists(
admin_ctx, network_id, host)
self._ensure_host_set_on_port(admin_ctx, host, agent_port)
LOG.debug('Agent Gateway port returned : %(agent_port)s with '
'host %(host)s', {'agent_port': agent_port,
'host': host})
return agent_port
@db_api.retry_db_errors
def update_ha_routers_states(self, context, **kwargs):
"""Update states for HA routers.
Get a map of router_id to its HA state on a host and update the DB.
State must be in: ('active', 'standby').
"""
states = kwargs.get('states')
host = kwargs.get('host')
LOG.debug('Updating HA routers states on host %s: %s', host, states)
self.l3plugin.update_routers_states(context, states, host)
def process_prefix_update(self, context, **kwargs):
subnets = kwargs.get('subnets')
updated_subnets = []
for subnet_id, prefix in subnets.items():
updated_subnets.append(self.plugin.update_subnet(
context,
subnet_id,
{'subnet': {'cidr': prefix}}))
return updated_subnets
@db_api.retry_db_errors
def delete_agent_gateway_port(self, context, **kwargs):
"""Delete Floatingip agent gateway port."""
network_id = kwargs.get('network_id')
host = kwargs.get('host')
admin_ctx = neutron_context.get_admin_context()
self.l3plugin.delete_floatingip_agent_gateway_port(
admin_ctx, host, network_id)
|
{
"content_hash": "7d1c915be53b426b1113fe2db8c9d40d",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 78,
"avg_line_length": 46.55972696245734,
"alnum_prop": 0.5459610027855153,
"repo_name": "klmitch/neutron",
"id": "7e5bd94fbdbfb49f686a3e80b2ec650d906356e8",
"size": "14233",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/api/rpc/handlers/l3_rpc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "8467992"
},
{
"name": "Shell",
"bytes": "14648"
}
],
"symlink_target": ""
}
|
import codecs
import hashlib
from sqlalchemy import create_engine
from sqlalchemy.exc import IntegrityError, DataError
from sqlalchemy.orm import sessionmaker
from pbnh.db import models
from pbnh.db.connect import DBConnect
class Paster():
def __init__(self, dialect='sqlite', driver=None, username=None, password=None,
host=None, port=None, dbname='pastedb'):
"""Grab connection information to pass to DBConnect"""
self.dialect = dialect
self.dbname = dbname
self.driver = driver
self.username = username
self.password = password
self.host = host
self.port = port
def __enter__(self):
connection = DBConnect(dialect=self.dialect, driver=self.driver, username=self.username,
password=self.password, host=self.host, port=self.port,
dbname=self.dbname).connect
if self.dialect == 'postgresql':
self.engine = create_engine(connection, pool_size=1)
else:
self.engine = create_engine(connection)
Session = sessionmaker(bind=self.engine)
self.session = Session()
return self
def __exit__(self, exception_type, exception_value, traceback):
self.session.close()
self.engine.dispose()
def create(self, data, ip=None, mime=None, sunset=None,
timestamp=None):
sha1 = hashlib.sha1(data).hexdigest()
collision = self.query(hashid=sha1)
if collision:
pasteid = collision.get('id')
else:
paste = models.Paste(
hashid = sha1,
ip = ip,
mime = mime,
sunset = sunset,
timestamp = timestamp,
data = data
)
try:
self.session.add(paste)
self.session.commit()
except IntegrityError:
pasteid = 'HASH COLLISION'
self.session.rollback()
else:
pasteid = paste.id
return {'id': pasteid, 'hashid': sha1}
def query(self, id=None, hashid=None):
result = None
if id:
try:
result = (self.session.query(models.Paste)
.filter(models.Paste.id == id).first())
except DataError:
self.session.rollback()
raise ValueError
elif hashid:
try:
result = (self.session.query(models.Paste)
.filter(models.Paste.hashid == hashid).first())
except DataError:
self.session.rollback()
raise ValueError
else:
return None
if result:
result = {
'id': result.id,
'hashid': result.hashid,
'ip': result.ip,
'mime': result.mime,
'timestamp': result.timestamp,
'sunset': result.sunset,
'data': result.data
}
return result
def delete(self, id=None, hashid=None):
if id:
result = (self.session.query(models.Paste)
.filter(models.Paste.id == id).first())
elif hashid:
result = (self.session.query(models.Paste)
.filter(models.Paste.hashid == hashid).first())
else:
return None
self.session.delete(result)
self.session.commit()
|
{
"content_hash": "a7adb9af05c3e175f32ff6b3718ba9ef",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 96,
"avg_line_length": 34.71153846153846,
"alnum_prop": 0.5138504155124654,
"repo_name": "bhanderson/pbnh",
"id": "29dd7893ec8338ae0251d4c2109f93b501432ea5",
"size": "3610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pbnh/db/paste.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "60416"
},
{
"name": "Dockerfile",
"bytes": "221"
},
{
"name": "HTML",
"bytes": "14511"
},
{
"name": "JavaScript",
"bytes": "1479016"
},
{
"name": "Python",
"bytes": "31840"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup, find_packages
import people as app
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except IOError:
return ''
dependency_links = [
# needs this dev version for django 1.6 fixes
'https://github.com/KristianOellegaard/django-hvad/tarball/0e2101f15404eaf9611cd6cf843bfc424117b227', # NOQA
]
setup(
name="django-people",
version=app.__version__,
description=read('DESCRIPTION'),
long_description=read('README.rst'),
license='The MIT License',
platforms=['OS Independent'],
keywords='django, cms, plugin, people, person, profile',
author='Martin Brochhaus',
author_email='mbrochh@gmail.com',
url="https://github.com/bitmazk/django-people",
packages=find_packages(),
include_package_data=True,
install_requires=[
'Django',
'South',
'django-libs',
'django-filer',
'Pillow',
'django-localized-names',
],
dependency_links=dependency_links,
tests_require=[
'fabric',
'factory_boy',
'django-nose',
'coverage',
'django-coverage',
'mock',
],
test_suite='people.tests.runtests.runtests',
)
|
{
"content_hash": "c74f6b436c7394ed0d4fbded9f238f02",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 113,
"avg_line_length": 25.26,
"alnum_prop": 0.6247030878859857,
"repo_name": "chayapan/django-people",
"id": "fcf21b768ce1bb5294f1052d2d7032637a2646f6",
"size": "1263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "421"
},
{
"name": "Python",
"bytes": "266037"
}
],
"symlink_target": ""
}
|
"""
将持仓数据合并
"""
import os
import pandas as pd
from kquant_data.config import __CONFIG_H5_FUT_DATA_DIR__
def process1(input_path, output_path, member_name, folder_name):
# 由于文件数太多,从头加载很慢,需要增量处理
for dirpath, dirnames, filenames in os.walk(input_path):
for dirname in dirnames:
# 由于read_csv时不支持中文,只好改用英文
path2 = os.path.join(output_path, folder_name, '%s.csv' % dirname)
try:
df_old = pd.read_csv(path2, encoding='utf-8-sig', parse_dates=True, index_col=['date'])
last_date_csv = df_old.index[-1].strftime('%Y-%m-%d.csv')
dfs = df_old
except:
last_date_csv = '1900-01-01.csv'
dfs = None
sub_dirpath = os.path.join(dirpath, dirname)
print('开始处理', sub_dirpath)
for _dirpath, _dirnames, _filenames in os.walk(sub_dirpath):
for _filename in _filenames:
if _filename <= last_date_csv:
continue
path = os.path.join(_dirpath, _filename)
df = pd.read_csv(path, encoding='utf-8-sig', parse_dates=True, index_col=['date'])
row = df[df['member_name'] == member_name]
dfs = pd.concat([dfs, row])
# dfs.set_index('date')
if dfs is None:
continue
dfs.to_csv(path2, encoding='utf-8-sig', date_format='%Y-%m-%d')
print("处理完成", path2)
def process2(input_path, output_path, folder_name):
for dirpath, dirnames, filenames in os.walk(input_path):
dfs_long = None
dfs_short = None
for filename in filenames:
path = os.path.join(dirpath, filename)
df = pd.read_csv(path, encoding='utf-8-sig', parse_dates=['date'])
df.index = df['date']
col_name = filename[:-4]
col_long = df['long_position_increase']
col_long.name = col_name
dfs_long = pd.concat([dfs_long, col_long], axis=1)
col_short = df['short_position_increase']
col_short.name = col_name
dfs_short = pd.concat([dfs_short, col_short], axis=1)
path2 = os.path.join(output_path, '%s_long_position_increase.csv' % folder_name)
dfs_long.to_csv(path2, encoding='utf-8-sig', date_format='%Y-%m-%d')
path2 = os.path.join(output_path, '%s_short_position_increase.csv' % folder_name)
dfs_short.to_csv(path2, encoding='utf-8-sig', date_format='%Y-%m-%d')
if __name__ == '__main__':
member_name = '前二十名合计'
folder_name = 'top20'
input_path = os.path.join(__CONFIG_H5_FUT_DATA_DIR__, "futureoir")
output_path = os.path.join(__CONFIG_H5_FUT_DATA_DIR__, "futureoir_processed")
process1(input_path, output_path, member_name, folder_name)
input_path = os.path.join(__CONFIG_H5_FUT_DATA_DIR__, "futureoir_processed", folder_name)
output_path = os.path.join(__CONFIG_H5_FUT_DATA_DIR__, "futureoir_processed")
process2(input_path, output_path, folder_name)
member_name = '前十名合计'
folder_name = 'top10'
input_path = os.path.join(__CONFIG_H5_FUT_DATA_DIR__, "futureoir")
output_path = os.path.join(__CONFIG_H5_FUT_DATA_DIR__, "futureoir_processed")
process1(input_path, output_path, member_name, folder_name)
input_path = os.path.join(__CONFIG_H5_FUT_DATA_DIR__, "futureoir_processed", folder_name)
output_path = os.path.join(__CONFIG_H5_FUT_DATA_DIR__, "futureoir_processed")
process2(input_path, output_path, folder_name)
member_name = '前五名合计'
folder_name = 'top5'
input_path = os.path.join(__CONFIG_H5_FUT_DATA_DIR__, "futureoir")
output_path = os.path.join(__CONFIG_H5_FUT_DATA_DIR__, "futureoir_processed")
process1(input_path, output_path, member_name, folder_name)
input_path = os.path.join(__CONFIG_H5_FUT_DATA_DIR__, "futureoir_processed", folder_name)
output_path = os.path.join(__CONFIG_H5_FUT_DATA_DIR__, "futureoir_processed")
process2(input_path, output_path, folder_name)
|
{
"content_hash": "b84b5631cdf87fd53cf1912107498a8f",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 103,
"avg_line_length": 42.873684210526314,
"alnum_prop": 0.5926835256567641,
"repo_name": "wukan1986/kquant_data",
"id": "786ab0d7c3831264a23917427a5baa2c2f3c8a84",
"size": "4253",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo_future/A03_merge_futureoir.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "AutoIt",
"bytes": "6036"
},
{
"name": "Batchfile",
"bytes": "3417"
},
{
"name": "M",
"bytes": "166"
},
{
"name": "MATLAB",
"bytes": "966"
},
{
"name": "Python",
"bytes": "238130"
}
],
"symlink_target": ""
}
|
'''
vn.zb的gateway接入
'''
import os
import json
from datetime import datetime
from time import sleep
from copy import copy
from threading import Condition
from Queue import Queue
from threading import Thread
from time import sleep
from vnpy.trader.vtGateway import *
from vnpy.api.zb import ZB_Sub_Spot_Api , zb_all_symbol_pairs , zb_all_symbols , zb_all_real_pair
from vnpy.trader.vtFunction import getJsonPath
# 价格类型映射
# 买卖类型: 限价单(buy/sell) 市价单(buy_market/sell_market)
zb_priceTypeMap = {}
zb_priceTypeMap["1"] = (DIRECTION_LONG, PRICETYPE_LIMITPRICE)
zb_priceTypeMap['buy_market'] = (DIRECTION_LONG, PRICETYPE_MARKETPRICE)
zb_priceTypeMap["0"] = (DIRECTION_SHORT, PRICETYPE_LIMITPRICE)
zb_priceTypeMap['sell_market'] = (DIRECTION_SHORT, PRICETYPE_MARKETPRICE)
zb_priceTypeMapReverse = {v: k for k, v in zb_priceTypeMap.items()}
# 委托状态印射
zb_statusMap = {}
zb_statusMap[0] = STATUS_NOTTRADED
zb_statusMap[1] = STATUS_CANCELLED
zb_statusMap[2] = STATUS_ALLTRADED
zb_statusMap[3] = STATUS_PARTTRADED
class zbGateway(VtGateway):
#----------------------------------------------------------------------
def __init__(self, eventEngine, gatewayName='ZB'):
"""Constructor"""
super(zbGateway, self).__init__(eventEngine, gatewayName)
self.api_spot = ZB_API_Spot(self)
self.connected = False
self.fileName = self.gatewayName + '_connect.json'
self.filePath = getJsonPath(self.fileName, __file__)
self.qryEnabled = True
self.countTimer = 0
self.localTimeDelay = 3
# 启动查询
self.initQuery()
self.startQuery()
#----------------------------------------------------------------------
def connect(self):
"""连接"""
# 载入json文件
try:
f = file(self.filePath)
except IOError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'读取连接配置出错,请检查'
self.onLog(log)
return
# 解析json文件
setting = json.load(f)
try:
apiKey = str(setting['apiKey'])
secretKey = str(setting['secretKey'])
trace = setting["trace"]
except KeyError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'连接配置缺少字段,请检查'
self.onLog(log)
return
# 初始化接口
self.api_spot.active = True
self.api_spot.connect_Subpot( apiKey, secretKey, trace)
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'接口初始化成功'
self.onLog(log)
# 启动查询
#----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅行情"""
self.api_spot.subscribe(subscribeReq)
#----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
return self.api_spot.spotSendOrder(orderReq)
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq):
"""撤单"""
self.api_spot.spotCancel(cancelOrderReq)
#----------------------------------------------------------------------
def qryAccount(self):
"""查询账户资金"""
self.api_spot.spotUserInfo()
#----------------------------------------------------------------------
def qryOrderInfo(self):
self.api_spot.spotAllOrders()
#----------------------------------------------------------------------
def qryPosition(self):
"""查询持仓"""
pass
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.api_spot.active = False
self.api_spot.close()
#----------------------------------------------------------------------
def initQuery(self):
"""初始化连续查询"""
if self.qryEnabled:
# 需要循环的查询函数列表
#self.qryFunctionList = [self.qryAccount , self.qryOrderInfo]
self.qryFunctionList = [ self.qryOrderInfo]
#self.qryFunctionList = []
self.qryCount = 0 # 查询触发倒计时
self.qryTrigger = 2 # 查询触发点
self.qryNextFunction = 0 # 上次运行的查询函数索引
#----------------------------------------------------------------------
def query(self, event):
"""注册到事件处理引擎上的查询函数"""
self.qryCount += 1
self.countTimer += 1
if self.countTimer % self.localTimeDelay == 0:
if self.qryCount > self.qryTrigger:
# 清空倒计时
self.qryCount = 0
# 执行查询函数
function = self.qryFunctionList[self.qryNextFunction]
function()
# 计算下次查询函数的索引,如果超过了列表长度,则重新设为0
self.qryNextFunction += 1
if self.qryNextFunction == len(self.qryFunctionList):
self.qryNextFunction = 0
#----------------------------------------------------------------------
def startQuery(self):
"""启动连续查询"""
self.eventEngine.register(EVENT_TIMER, self.query)
#----------------------------------------------------------------------
def setQryEnabled(self, qryEnabled):
"""设置是否要启动循环查询"""
self.qryEnabled = qryEnabled
class ZB_API_Spot(ZB_Sub_Spot_Api):
""" zb 的 API实现 """
#----------------------------------------------------------------------
def __init__(self, gateway):
"""Constructor"""
super(ZB_API_Spot, self).__init__()
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.active = False # 若为True则会在断线后自动重连
self.cbDict = {}
self.tickDict = {}
self.orderDict = {}
self.channelSymbolMap = {}
self.localNo = 0 # 本地委托号
self.localNoQueue = Queue() # 未收到系统委托号的本地委托号队列
self.localNoDict = {} # key为本地委托号,value为系统委托号
self.orderIdDict = {} # key为系统委托号,value为本地委托号
self.cancelDict = {} # key为本地委托号,value为撤单请求
self.recordOrderId_BefVolume = {} # 记录的之前处理的量
self.tradeID = 0
self.local_status_dict = {}
self.registerSymbolPairArray = set([])
self.initCallback()
#----------------------------------------------------------------------
def subscribe(self ,subscribeReq):
"""订阅行情"""
symbol_pair_gateway = subscribeReq.symbol
arr = symbol_pair_gateway.split('.')
symbol_pair = arr[0]
if symbol_pair not in self.registerSymbolPairArray:
self.registerSymbolPairArray.add(symbol_pair)
self.subscirbeSinglePair( symbol_pair)
#----------------------------------------------------------------------
def onMessage(self, ws, evt):
"""信息推送"""
# print evt
data = self.readData(evt)
try:
channel = data['channel']
except Exception,ex:
channel = None
if channel == None:
return
callback = self.cbDict[channel]
callback(data)
#----------------------------------------------------------------------
def onError(self, ws, evt):
"""错误推送"""
error = VtErrorData()
error.gatewayName = self.gatewayName
error.errorMsg = str(evt)
self.gateway.onError(error)
#----------------------------------------------------------------------
def onError(self, data):
error = VtErrorData()
error.gatewayName = self.gatewayName
error.errorMsg = str(data["data"]["error_code"])
self.gateway.onError(error)
#----------------------------------------------------------------------
def onClose(self, ws):
"""接口断开"""
# 如果尚未连上,则忽略该次断开提示
if not self.gateway.connected:
return
self.gateway.connected = False
self.writeLog(u'服务器连接断开')
# 重新连接
if self.active:
def reconnect():
while not self.gateway.connected:
self.writeLog(u'等待10秒后重新连接')
sleep(10)
if not self.gateway.connected:
self.reconnect()
t = Thread(target=reconnect)
t.start()
#----------------------------------------------------------------------
def subscirbeSinglePair(self ,symbol_pair):
if symbol_pair in zb_all_symbol_pairs:
self.subscribeSpotTicker(symbol_pair)
self.subscribeSpotDepth(symbol_pair)
#self.self.subscribeSpotTrades(symbol_pair)
#----------------------------------------------------------------------
def onOpen(self, ws):
"""连接成功"""
self.gateway.connected = True
self.writeLog(u'服务器连接成功')
self.spotUserInfo()
self.subscirbeSinglePair("btc_qc")
for symbol in zb_all_symbol_pairs:
#self.subscirbeSinglePair(symbol)
use_symbol = symbol.replace('_','')
#Ticker数据
self.channelSymbolMap["%s_ticker" % use_symbol] = symbol
#盘口的深度
self.channelSymbolMap["%s_depth" % use_symbol] = symbol
#所有人的交易数据
self.channelSymbolMap["%s_trades" % use_symbol] = symbol
contract = VtContractData()
contract.gatewayName = self.gatewayName
contract.symbol = symbol
contract.exchange = EXCHANGE_ZB
contract.vtSymbol = '.'.join([contract.symbol, contract.exchange])
contract.name = u'ZB现货%s' % symbol
contract.size = 0.00001
contract.priceTick = 0.00001
contract.productClass = PRODUCT_SPOT
self.gateway.onContract(contract)
#----------------------------------------------------------------------
def initCallback(self):
for symbol_pair in zb_all_symbol_pairs:
use_symbol = symbol_pair.replace('_','')
self.cbDict["%s_ticker" % use_symbol] = self.onTicker
self.cbDict["%s_depth" % use_symbol] = self.onDepth
self.cbDict["%s_trades" % use_symbol] = self.onTrades
self.cbDict["%s_order" % use_symbol] = self.onSpotOrder
self.cbDict["%s_cancelorder" % use_symbol] = self.onSpotCancelOrder
self.cbDict["%s_getorder" % use_symbol] = self.onSpotGetOrder
self.cbDict["%s_getorders" % use_symbol] = self.onSpotGetOrders
####self.cbDict["%s_getordersignoretradetype" % use_symbol] = self.onSpotGetOrdersignoretradetype
self.cbDict["%s_getordersignoretradetype" % use_symbol] = self.onSpotGetOrders
# self.cbDict["%s_ticker" % symbol_pair] = self.onTicker
# self.cbDict["%s_depth" % symbol_pair] = self.onDepth
# self.cbDict["%s_trades" % symbol_pair] = self.onTrades
# self.cbDict["%s_order" % symbol_pair] = self.onSpotOrder
# self.cbDict["%s_cancelorder" % symbol_pair] = self.onSpotCancelOrder
# self.cbDict["%s_getorder" % symbol_pair] = self.onSpotGetOrder
# self.cbDict["%s_getorders" % symbol_pair] = self.onSpotGetOrders
# ####self.cbDict["%s_getordersignoretradetype" % use_symbol] = self.onSpotGetOrdersignoretradetype
# self.cbDict["%s_getordersignoretradetype" % use_symbol] = self.onSpotGetOrders
self.cbDict["getaccountinfo"] = self.onSpotUserInfo
#----------------------------------------------------------------------
def writeLog(self, content):
"""快速记录日志"""
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = content
self.gateway.onLog(log)
#----------------------------------------------------------------------
def onTicker(self, data):
""""""
if 'ticker' not in data:
return
channel = data['channel']
if channel == 'addChannel':
return
try:
symbol = self.channelSymbolMap[channel]
if symbol not in self.tickDict:
tick = VtTickData()
tick.exchange = EXCHANGE_ZB
tick.symbol = '.'.join([symbol, tick.exchange])
tick.vtSymbol = '.'.join([symbol, tick.exchange])
tick.gatewayName = self.gatewayName
self.tickDict[symbol] = tick
else:
tick = self.tickDict[symbol]
rawData = data['ticker']
tick.highPrice = float(rawData['high'])
tick.lowPrice = float(rawData['low'])
tick.lastPrice = float(rawData['last'])
tick.volume = float(rawData['vol'])
tick.date, tick.time = self.generateDateTime(data['date'])
# print "ticker", tick.date , tick.time
# newtick = copy(tick)
# self.gateway.onTick(newtick)
except Exception,ex:
print "Error in onTicker " , channel
#----------------------------------------------------------------------
def onDepth(self, data):
""""""
try:
channel = data['channel']
symbol = self.channelSymbolMap[channel]
except Exception,ex:
symbol = None
if symbol == None:
return
if symbol not in self.tickDict:
tick = VtTickData()
tick.symbol = symbol
tick.vtSymbol = symbol
tick.gatewayName = self.gatewayName
self.tickDict[symbol] = tick
else:
tick = self.tickDict[symbol]
if 'asks' not in data:
return
asks = data["asks"]
bids = data["bids"]
tick.bidPrice1, tick.bidVolume1 = bids[0]
tick.bidPrice2, tick.bidVolume2 = bids[1]
tick.bidPrice3, tick.bidVolume3 = bids[2]
tick.bidPrice4, tick.bidVolume4 = bids[3]
tick.bidPrice5, tick.bidVolume5 = bids[4]
tick.askPrice1, tick.askVolume1 = asks[-1]
tick.askPrice2, tick.askVolume2 = asks[-2]
tick.askPrice3, tick.askVolume3 = asks[-3]
tick.askPrice4, tick.askVolume4 = asks[-4]
tick.askPrice5, tick.askVolume5 = asks[-5]
tick.date, tick.time = self.generateDateTimeAccordingLocalTime()
# print "Depth", tick.date , tick.time
newtick = copy(tick)
self.gateway.onTick(newtick)
'''
//# Request
{
'event':'addChannel',
'channel':'ltcbtc_trades',
}
//# Response
{
"data": [
{
"date":"1443428902",
"price":"1565.91",
"amount":"0.553",
"tid":"37594617",
"type":"sell",
"trade_type":"ask"
}...
],
"no": 1031995,
"channel": "ltcbtc_trades"
}
'''
#----------------------------------------------------------------------
def onTrades(self, data):
pass
# try:
# channel = data['channel']
# symbol = self.channelSymbolMap[channel]
# except Exception,ex:
# symbol = None
# if symbol == None:
# return
#----------------------------------------------------------------------
def spotAllOrders(self):
for symbol_pair in self.registerSymbolPairArray:
self.spotGetOrderSignOrderTradeType(symbol_pair , 1 , 50 , 1)
#----------------------------------------------------------------------
def onSpotOrder(self, data):
code = data["code"]
if str(code) != "1000":
errorData = {"data":{"error_code":str(code)}}
self.onError(errorData)
return
rawData = json.loads(data['data'].replace("entrustId",'"entrustId"'))
orderId = str(rawData["entrustId"])
# 尽管websocket接口的委托号返回是异步的,但经过测试是
# 符合先发现回的规律,因此这里通过queue获取之前发送的
# 本地委托号,并把它和推送的系统委托号进行映射
localNo = self.localNoQueue.get_nowait()
self.localNoDict[localNo] = orderId
self.orderIdDict[orderId] = localNo
t_symbol = (data["channel"].split('_'))[0]
symbol = zb_all_real_pair[t_symbol]
order = VtOrderData()
order.gatewayName = self.gatewayName
order.symbol = '.'.join([symbol , EXCHANGE_ZB])
order.vtSymbol = order.symbol
order.orderID = localNo
order.vtOrderID = '.'.join([self.gatewayName, order.orderID])
self.orderDict[orderId] = order
order.status = STATUS_UNKNOWN
local_status = self.local_status_dict.get( str(localNo) , None)
if local_status != None:
order.direction, priceType = zb_priceTypeMap[str(local_status)]
self.gateway.onOrder(order)
# 检查是否有系统委托号返回前就发出的撤单请求,若有则进
# 行撤单操作
if localNo in self.cancelDict:
req = self.cancelDict[localNo]
self.spotCancel(req)
del self.cancelDict[localNo]
'''
{
"success": true,
"code": 1000,
"channel": "ltcbtc_cancelorder",
"message": "操作成功。",
"no": "1472814987517496849777"
}
'''
#----------------------------------------------------------------------
def onSpotCancelOrder(self, data):
code = data["code"]
if str(code) != "1000":
errorData = {"data":{"error_code":str(code)}}
self.onError(errorData)
return
symbol , orderId = data["no"].split('.')
orderId = str(orderId)
localNo = self.orderIdDict[orderId]
if orderId not in self.orderDict:
order = VtOrderData()
order.gatewayName = self.gatewayName
order.symbol = '.'.join([symbol , EXCHANGE_ZB])
order.vtSymbol = order.symbol
order.orderID = localNo
order.vtOrderID = '.'.join([self.gatewayName, order.orderID])
self.orderDict[orderId] = order
else:
order = self.orderDict[orderId]
order.status = STATUS_CANCELLED
self.gateway.onOrder(order)
del self.orderDict[orderId]
del self.orderIdDict[orderId]
del self.localNoDict[localNo]
#----------------------------------------------------------------------
def spotSendOrder(self, req):
"""发单"""
symbol = (req.symbol.split('.'))[0]
type_ = zb_priceTypeMapReverse[(req.direction, req.priceType)]
self.spotTrade(symbol, type_, str(req.price), str(req.volume))
# 本地委托号加1,并将对应字符串保存到队列中,返回基于本地委托号的vtOrderID
self.localNo += 1
self.localNoQueue.put(str(self.localNo))
self.local_status_dict[str(self.localNo)] = str(type_)
vtOrderID = '.'.join([self.gatewayName, str(self.localNo)])
return vtOrderID
#----------------------------------------------------------------------
def spotCancel(self, req):
"""撤单"""
symbol = (req.symbol.split('.'))[0]
localNo = req.orderID
if localNo in self.localNoDict:
orderID = self.localNoDict[localNo]
self.spotCancelOrder(symbol, orderID)
else:
# 如果在系统委托号返回前客户就发送了撤单请求,则保存
# 在cancelDict字典中,等待返回后执行撤单任务
self.cancelDict[localNo] = req
#----------------------------------------------------------------------
def onSpotGetOrder(self , data):
"""生成时间"""
code = data["code"]
if str(code) != "1000":
errorData = {"data":{"error_code":str(code)}}
self.onError(errorData)
return
rawData = data['data'].replace('{','{"').replace('}','"}').replace(":",'":"').replace(',','","').replace('}","{','}{').\
replace(':"[',':[').replace('}{','},{').replace(']",','],').replace('}"}','}}').replace(':"{"',':{"')
orderId = str(rawData["id"])
# 这时候出现None , 情况是 已经发出了单子,但是系统这里还没建立 索引
# 先这样返回试一下
# 因为 发完单,订单变化是先推送的。。导致不清楚他的localID
# 现在的处理方式是, 先缓存这里的信息,等到出现了 localID,再来处理这一段
localNo = self.orderIdDict.get(orderId , None)
if localNo == None:
print "Error , localNo is none !" + str(localNo)
return
# 委托信息
if orderId not in self.orderDict:
order = VtOrderData()
order.gatewayName = self.gatewayName
symbol = zb_all_real_pair[rawData["currency"].replace('_',"")]
order.symbol = '.'.join([symbol, EXCHANGE_ZB])
#order.symbol = spotSymbolMap[rawData['symbol']]
order.vtSymbol = order.symbol
order.orderID = localNo
order.vtOrderID = '.'.join([self.gatewayName, order.orderID])
order.price = float(rawData['price'])
order.totalVolume = float(rawData['total_amount'])
order.direction, priceType = zb_priceTypeMap[str(rawData['type'])]
self.orderDict[orderId] = order
else:
order = self.orderDict[orderId]
order.tradedVolume = float(rawData['trade_amount'])
order.status = zb_statusMap[int(rawData['status'])]
self.gateway.onOrder(copy(order))
bef_volume = self.recordOrderId_BefVolume.get( orderId , 0.0 )
now_volume = float(rawData['trade_amount']) - bef_volume
if now_volume > 0.00001:
trade = VtTradeData()
trade.gatewayName = self.gatewayName
trade.symbol = order.symbol
trade.vtSymbol = order.symbol
self.tradeID += 1
trade.tradeID = str(self.tradeID)
trade.vtTradeID = '.'.join([self.gatewayName, trade.tradeID])
trade.orderID = localNo
trade.vtOrderID = '.'.join([self.gatewayName, trade.orderID])
trade.price = float(rawData['price'])
trade.volume = float(now_volume)
trade.direction, priceType = zb_priceTypeMap[str(rawData['type'])]
trade.tradeTime = datetime.now().strftime('%H:%M:%S')
self.gateway.onTrade(trade)
if order.status in [STATUS_CANCELLED , STATUS_ALLTRADED]:
del self.orderIdDict[orderId]
del self.orderDict[orderId]
#----------------------------------------------------------------------
def onSpotGetOrders(self, data):
code = data["code"]
if str(code) != "1000":
errorData = {"data":{"error_code":str(code)}}
self.onError(errorData)
return
rawData = data['data'].replace('{','{"').replace('}','"}').replace(":",'":"').replace(',','","').replace('}","{','}{').\
replace(':"[',':[').replace('}{','},{').replace(']",','],').replace('}"}','}}').replace(':"{"',':{"')
rawData = json.loads(rawData)
orderDictKeys = self.orderDict.keys()
system_has_orderID_list = self.orderIdDict.keys()
for d in rawData:
orderId = str(d["id"])
if orderId in system_has_orderID_list:
localNo = self.orderIdDict[orderId]
order = self.orderDict[orderId]
order.price = float(d["price"])
order.totalVolume = float(d["total_amount"])
order.tradedVolume = float(d['trade_amount'])
order.status = zb_statusMap[int(d['status'])]
self.gateway.onOrder(copy(order))
bef_volume = self.recordOrderId_BefVolume.get( orderId , 0.0 )
now_volume = float(d['trade_amount']) - bef_volume
if now_volume > 0.00001:
trade = VtTradeData()
trade.gatewayName = self.gatewayName
trade.symbol = order.symbol
trade.vtSymbol = order.symbol
self.tradeID += 1
trade.tradeID = str(self.tradeID)
trade.vtTradeID = '.'.join([self.gatewayName, trade.tradeID])
trade.orderID = localNo
trade.vtOrderID = '.'.join([self.gatewayName, trade.orderID])
trade.price = float(d['price'])
trade.volume = float(now_volume)
trade.direction, priceType = zb_priceTypeMap[str(d['type'])]
trade.tradeTime = datetime.now().strftime('%H:%M:%S')
self.gateway.onTrade(trade)
if order.status in [STATUS_CANCELLED , STATUS_ALLTRADED]:
del self.orderIdDict[orderId]
del self.orderDict[orderId]
else:
if zb_statusMap[int(d['status'])] not in [STATUS_CANCELLED , STATUS_ALLTRADED]:
self.localNo += 1
localNo = str(self.localNo)
orderId = str(d['id'])
self.localNoDict[localNo] = orderId
self.orderIdDict[orderId] = localNo
order = VtOrderData()
order.gatewayName = self.gatewayName
symbol = zb_all_real_pair[d["currency"].replace('_',"")]
order.symbol = '.'.join([symbol , EXCHANGE_ZB])
order.vtSymbol = order.symbol
order.orderID = localNo
order.vtOrderID = '.'.join([self.gatewayName, order.orderID])
order.price = float(d['price'])
order.totalVolume = float(d['total_amount'])
order.direction, priceType = zb_priceTypeMap[str(d['type'])]
self.orderDict[orderId] = order
order.tradedVolume = float(d['trade_amount'])
order.status = zb_statusMap[int(d['status'])]
self.gateway.onOrder(copy(order))
#----------------------------------------------------------------------
# def onSpotGetOrdersignoretradetype(self, data):
"""取消tradeType字段过滤,可同时获取买单和卖单,每次请求返回pageSize<100条记录"""
"""因为跟 getOrders 功能重复,统一处理了"""
'''
{
"message": "操作成功",
"no": "15207605119",
"data": {
"coins": [
{
"freez": "1.35828369",
"enName": "BTC",
"unitDecimal": 8,
"cnName": "BTC",
"unitTag": "฿",
"available": "0.72771906",
"key": "btc"
},
{
"freez": "0.011",
"enName": "LTC",
"unitDecimal": 8,
"cnName": "LTC",
"unitTag": "Ł",
"available": "3.51859814",
"key": "ltc"
},
...
],
"base": {
"username": "15207605119",
"trade_password_enabled": true,
"auth_google_enabled": true,
"auth_mobile_enabled": true
}
},
"code": 1000,
"channel": "getaccountinfo",
"success": true
}
'''
#----------------------------------------------------------------------
def onSpotUserInfo(self, data):
code = data["code"]
if str(code) != "1000":
errorData = {"data":{"error_code":str(code)}}
self.onError(errorData)
return
try:
rawData = data['data'].replace('{','{"').replace('}','"}').replace(":",'":"').replace(',','","').replace('}","{','}{').\
replace(':"[',':[').replace('}{','},{').replace(']",','],').replace('}"}','}}').replace(':"{"',':{"')
rawData = json.loads(rawData)
coins = rawData["coins"]
except Exception,ex:
print ex
for coin in coins:
symbol = coin["cnName"].lower()
if symbol in zb_all_symbols:
pos = VtPositionData()
pos.gatewayName = self.gatewayName
pos.symbol = symbol + "." + EXCHANGE_ZB
pos.vtSymbol = symbol + "." + EXCHANGE_ZB
pos.vtPositionName = symbol
pos.direction = DIRECTION_NET
pos.frozen = float(coin["freez"])
pos.position = pos.frozen + float(coin["available"])
self.gateway.onPosition(pos)
# 账户资金
account = VtAccountData()
account.gatewayName = self.gatewayName
account.accountID = self.gatewayName
account.vtAccountID = account.accountID
account.balance = 0.0
#account.balance = float(funds['asset']['net'])
self.gateway.onAccount(account)
#----------------------------------------------------------------------
def generateDateTime(self, s):
"""生成时间"""
dt = datetime.fromtimestamp(float(s)/1e3)
time = dt.strftime("%H:%M:%S.%f")
date = dt.strftime("%Y%m%d")
return date, time
#----------------------------------------------------------------------
def generateDateTimeAccordingLocalTime(self):
dt = datetime.now()
time = dt.strftime("%H:%M:%S.%f")
date = dt.strftime("%Y%m%d")
return date, time
|
{
"content_hash": "333fbe90852c6098b4aaf1f4f156cf86",
"timestamp": "",
"source": "github",
"line_count": 855,
"max_line_length": 132,
"avg_line_length": 35.23859649122807,
"alnum_prop": 0.4736964386471506,
"repo_name": "wisfern/vnpy",
"id": "db7f7f16c6da439a6fb90705a4a06e5bf55a5e9a",
"size": "31587",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "beta/gateway/zbGateway/zbGateway.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "341"
},
{
"name": "C",
"bytes": "3151559"
},
{
"name": "C++",
"bytes": "8866606"
},
{
"name": "CMake",
"bytes": "44564"
},
{
"name": "HTML",
"bytes": "807"
},
{
"name": "Makefile",
"bytes": "99693"
},
{
"name": "Objective-C",
"bytes": "22505"
},
{
"name": "PHP",
"bytes": "4107"
},
{
"name": "Python",
"bytes": "5367161"
},
{
"name": "Shell",
"bytes": "3722"
}
],
"symlink_target": ""
}
|
import socket
import vstruct
from vstruct.primitives import *
import vstruct.defs.inet as vs_inet
# only the currently parsed record types
DNS_TYPE_A = 1
DNS_TYPE_NS = 2
DNS_TYPE_CNAME = 5
DNS_TYPE_SOA = 6
DNS_TYPE_PTR = 12
DNS_TYPE_MX = 15
DNS_TYPE_TXT = 16
dns_type_names = {
DNS_TYPE_A: 'A',
DNS_TYPE_NS: 'NS',
DNS_TYPE_CNAME: 'CNAME',
DNS_TYPE_SOA: 'SOA',
DNS_TYPE_PTR: 'PTR',
DNS_TYPE_MX: 'MX',
DNS_TYPE_TXT: 'TXT',
}
DNS_CLASS_IN = 0x0001
DNS_NAMETYPE_LABEL = 0
DNS_NAMETYPE_RESERVED = 1
DNS_NAMETYPE_EXTENDED = 2
DNS_NAMETYPE_POINTER = 3
DNS_NAMETYPE_LABELPOINTER = 4
DNS_FLAG_RESP = 1 << 15
DNS_FLAG_TRUNC = 1 << 9
DNS_FLAG_RECUR = 1 << 8
#DNS_FLAG_AA = 1 << 5 # authoritative answer
#DNS_FLAG_TC = 1 << 6 # truncated response
#DNS_FLAG_RD = 1 << 7 # recursion desired
#DNS_FLAG_RA = 1 << 8 # recursion allowed
#DNS_FLAG_AD = 1 << 10 # authentic data
#DNS_FLAG_CD = 1 << 11 # checking disabled
#totally arbitrary count value to abort parsing dns records
DNS_SUSPICIOUS_COUNT = 0x20
class DnsParseError(Exception):
pass
def dnsFlagsOp(val):
return (val >> 11) & 0xf
class DnsNameLabel(vstruct.VStruct):
'''
A DNS Name component.
'''
def __init__(self, label=''):
vstruct.VStruct.__init__(self)
self._nametype = 0
self._pointer = 0
self.length = v_uint8( len(label) )
self.label = v_bytes( vbytes=label )
def pcb_length(self):
size = self.length
labeltype = size >> 6
# ordered by use
if labeltype == 0b11:
self._nametype = DNS_NAMETYPE_POINTER
size = 1
elif labeltype == 0b00:
self._nametype = DNS_NAMETYPE_LABEL
elif labeltype == 0b10:
raise DnsParseError('Extended labeltype is not supported.')
elif labeltype == 0b01:
raise DnsParseError('Unrecognized labeltype (reserved).')
self.vsGetField('label').vsSetLength(size)
def pcb_label(self):
if self.length == 0:
return
if self._nametype == DNS_NAMETYPE_POINTER:
ordlabel = 0
# some broken(?) packets have a pointer with an empty label
if self.label:
ordlabel = ord(self.label)
# the length field's lower 6 bits + the 8 bits from the label form the pointer
self._pointer = ((self.length ^ 0xc0) << 8) + ordlabel
def getNameType(self):
return self._nametype
def getNamePointer(self):
return self._pointer
def isNamePointer(self):
if self._nametype == DNS_NAMETYPE_POINTER:
return True
return False
def isNameTerm(self):
if self.length == 0 or self._nametype == DNS_NAMETYPE_POINTER:
return True
return False
class DnsName(vstruct.VArray):
'''
The contiguous labels (DnsNameLabel()) in a DNS Name field. Note that the
last label may simply be a pointer to an offset earlier in the DNS message.
'''
def __init__(self, name=None):
vstruct.VArray.__init__(self)
if name != None:
for part in name.split('.'):
self.vsAddElement( DnsNameLabel( part ) )
self.vsAddElement( DnsNameLabel('') )
def getTypeVal(self):
'''
Return a (nametype, nameval) tuple based on walking the labels.
'''
nametype = None
namepointer = None
labels = []
for fname,fobj in self.vsGetFields():
nametype = fobj.getNameType()
if nametype == DNS_NAMETYPE_LABEL and fobj.length != 0:
labels.append(fobj.label)
if nametype == DNS_NAMETYPE_POINTER:
namepointer = fobj.getNamePointer()
if labels:
nametype = DNS_NAMETYPE_LABELPOINTER
joinedlabels = '.'.join(labels)
if nametype == DNS_NAMETYPE_LABEL:
return nametype,joinedlabels
elif nametype == DNS_NAMETYPE_POINTER:
return nametype,namepointer
elif nametype == DNS_NAMETYPE_LABELPOINTER:
return nametype,(joinedlabels,namepointer)
raise DnsParseError('Unrecognized label.')
def vsParse(self, bytez, offset=0):
while offset < len(bytez):
nl = DnsNameLabel()
labelofs = offset
offset = nl.vsParse(bytez, offset=offset)
self.vsAddElement(nl)
if nl.isNamePointer() and nl.getNamePointer() >= labelofs:
raise DnsParseError('Label points forward (or to self).')
if nl.isNameTerm():
break
return offset
class DnsMailboxAsName(DnsName):
'''
A DNS Name used to encode a mailbox address.
'''
pass
class DnsQuestion(vstruct.VStruct):
'''
A DNS Question Record (the query).
'''
def __init__(self, name=None, qtype=0, qclass=0):
vstruct.VStruct.__init__(self)
self.qname = DnsName(name=name)
self.qtype = v_uint16(qtype, bigend=True)
self.qclass = v_uint16(qclass, bigend=True)
class DnsQuestionArray(vstruct.VArray):
'''
A DNS Question Section.
'''
def __init__(self, reccnt):
vstruct.VArray.__init__(self)
for i in xrange(reccnt):
self.vsAddElement(DnsQuestion())
class DnsResourceRecord(vstruct.VStruct):
'''
A DNS Resource Record. Used in the Answer, Authority, and Additional Sections.
'''
def __init__(self):
vstruct.VStruct.__init__(self)
self.dnsname = DnsName()
self.rrtype = v_uint16(bigend=True)
self.dnsclass = v_uint16(bigend=True)
self.ttl = v_uint32(bigend=True)
self.rdlength = v_uint16(bigend=True)
self.rdata = vstruct.VStruct()
def pcb_rrtype(self):
if self.rrtype == DNS_TYPE_A:
self.rdata.address = vs_inet.IPv4Address()
elif self.rrtype == DNS_TYPE_NS:
self.rdata.nsdname = DnsName()
elif self.rrtype == DNS_TYPE_CNAME:
self.rdata.cname = DnsName()
elif self.rrtype == DNS_TYPE_SOA:
self.rdata.mname = DnsName()
# this is an encoded email address
self.rdata.rname = DnsMailboxAsName()
self.rdata.serial = v_uint32(bigend=True)
self.rdata.refresh = v_uint32(bigend=True)
self.rdata.retry = v_uint32(bigend=True)
self.rdata.expire = v_uint32(bigend=True)
self.rdata.minimum = v_uint32(bigend=True)
elif self.rrtype == DNS_TYPE_PTR:
self.rdata.ptrdname = DnsName()
elif self.rrtype == DNS_TYPE_MX:
self.rdata.preference = v_uint16(bigend=True)
self.rdata.exchange = DnsName()
elif self.rrtype == DNS_TYPE_TXT:
self.rdata.txtdata = v_str()
else:
self.rdata.bytez = v_bytes()
def pcb_rdlength(self):
size = self.rdlength
if self.rdata.vsHasField('bytez'):
self.rdata.vsGetField('bytez').vsSetLength(size)
elif self.rdata.vsHasField('txtdata'):
self.rdata.vsGetField('txtdata').vsSetLength(size)
class DnsResourceRecordArray(vstruct.VArray):
'''
A DNS RR Section (Answer, Authority, or Additional).
'''
def __init__(self, reccnt):
vstruct.VArray.__init__(self)
for i in xrange(reccnt):
self.vsAddElement(DnsResourceRecord())
class DnsMessage(vstruct.VStruct):
'''
A DNS Message.
'''
def __init__(self, tcpdns=False):
vstruct.VStruct.__init__(self)
self._tcpdns = tcpdns
if tcpdns:
self.length = v_uint16(bigend=True)
self.transid = v_uint16(bigend=True)
self.flags = v_uint16(bigend=True)
self.qdcount = v_uint16(bigend=True)
self.ancount = v_uint16(bigend=True)
self.nscount = v_uint16(bigend=True)
self.arcount = v_uint16(bigend=True)
self.section = vstruct.VStruct()
self.section.question = DnsQuestionArray(0)
self.section.answer = DnsResourceRecordArray(0)
self.section.authority = DnsResourceRecordArray(0)
self.section.additional = DnsResourceRecordArray(0)
self._nptr = {} # name pointer cache
def pcb_qdcount(self):
if self.qdcount > DNS_SUSPICIOUS_COUNT:
raise RuntimeError('DNS suspicious count threshold hit')
self.section.question = DnsQuestionArray(self.qdcount)
def pcb_ancount(self):
if self.ancount > DNS_SUSPICIOUS_COUNT:
raise RuntimeError('DNS suspicious count threshold hit')
self.section.answer = DnsResourceRecordArray(self.ancount)
def pcb_nscount(self):
if self.nscount > DNS_SUSPICIOUS_COUNT:
raise RuntimeError('DNS suspicious count threshold hit')
self.section.authority = DnsResourceRecordArray(self.nscount)
def pcb_arcount(self):
if self.arcount > DNS_SUSPICIOUS_COUNT:
raise RuntimeError('DNS suspicious count threshold hit')
self.section.additional = DnsResourceRecordArray(self.arcount)
def vsParse(self, bytez, offset=0):
self._dns_bytes = bytez
self._dns_offset = offset
return vstruct.VStruct.vsParse(self, bytez, offset=offset)
def _getLabelPointerRef(self, msgofs):
'''
Given an offset relative to the beginning of a message, create a
DnsName() structure based on the data there, and return the results
of its getTypeVal() method (a (nametype, nameval) tuple).
'''
# msgofs is relative to the beginning of the message, not necessarily the stream
if not self._nptr.has_key(msgofs):
# these are often repeated within a message, so we cache them
self._nptr[msgofs] = DnsName()
self._nptr[msgofs].vsParse(self._dns_bytes, self._dns_offset + msgofs)
return self._nptr[msgofs].getTypeVal()
def getDnsName(self, nametype, nameval):
'''
Given a nametype (one of the DNS_NAMETYPE_* constants) and nameval
(depending on the type, either an fqdn, pointer, or partial fqdn and
a pointer), return an fqdn. This is meant to be called with the
results from a DnsName() instance's getTypeVal() method.
'''
if nametype == DNS_NAMETYPE_LABEL:
fqdn = nameval
elif nametype == DNS_NAMETYPE_POINTER:
offset = nameval
if self._tcpdns:
# if we're a TCP packet, the 'length' field is not included in the pointer offset
offset += 2
fqdn = self.getDnsName(*self._getLabelPointerRef(offset))
elif nametype == DNS_NAMETYPE_LABELPOINTER:
beginlabels,offset = nameval
if self._tcpdns:
# if we're a TCP packet, the 'length' field is not included in the pointer offset
offset += 2
ptrtype,ptrval = self._getLabelPointerRef(offset)
endlabels = self.getDnsName(ptrtype, ptrval)
fqdn = '.'.join((beginlabels, endlabels))
return fqdn
def getQuestionRecords(self):
'''
Return a list of Question records as (dnstype, dnsclass, fqdn) tuples.
'''
ret = []
for fname,q in self.section.question.vsGetFields():
fqdn = self.getDnsName(*q.qname.getTypeVal())
ret.append((q.qtype, q.qclass, fqdn))
return ret
def _getResourceRecords(self, structure):
'''
Given a DnsResourceRecordArray() structure, return a list of Resource
Records as (dnstype, dnsclass, ttl, fqdn, adata) tuples. If a parser
is available for the dnsclass, the 'rdata' field will be further parsed
into its components (as a tuple if necessary).
'''
ret = []
for fname,rr in structure.vsGetFields():
fqdn = self.getDnsName(*rr.dnsname.getTypeVal())
rdata = None
if rr.rrtype == DNS_TYPE_A:
rdata = vs_inet.reprIPv4Addr(rr.rdata.address)
elif rr.rrtype == DNS_TYPE_NS:
rdata = self.getDnsName(*rr.rdata.nsdname.getTypeVal())
elif rr.rrtype == DNS_TYPE_CNAME:
rdata = self.getDnsName(*rr.rdata.cname.getTypeVal())
elif rr.rrtype == DNS_TYPE_SOA:
rdata = (self.getDnsName(*rr.rdata.mname.getTypeVal()),
self.getDnsName(*rr.rdata.rname.getTypeVal()),
rr.rdata.serial,
rr.rdata.refresh,
rr.rdata.retry,
rr.rdata.expire,
rr.rdata.minimum)
elif rr.rrtype == DNS_TYPE_PTR:
rdata = self.getDnsName(*rr.rdata.ptrdname.getTypeVal())
elif rr.rrtype == DNS_TYPE_MX:
rdata = (rr.rdata.preference,
self.getDnsName(*rr.rdata.exchange.getTypeVal()))
elif rr.rrtype == DNS_TYPE_TXT:
rdata = rr.rdata.txtdata
else:
rdata = rr.rdata.bytez
ret.append((rr.rrtype, rr.dnsclass, rr.ttl, fqdn, rdata))
return ret
def getAnswerRecords(self):
'''
Return a list of Answer records as (rrtype, dnsclass, ttl, fqdn,
rdata) tuples. If a parser is available for the dnsclass, the
'rdata' field will be further parsed into its components (as a
tuple if necessary).
'''
return self._getResourceRecords(structure=self.section.answer)
def getAuthorityRecords(self):
'''
Return a list of Authority records as (rrtype, dnsclass, ttl,
fqdn, rdata) tuples. If a parser is available for the dnsclass,
the 'rdata' field will be further parsed into its components
(as a tuple if necessary).
'''
return self._getResourceRecords(structure=self.section.authority)
def getAdditionalRecords(self):
'''
Return a list of Additional records as (rrtype, dnsclass, ttl,
fqdn, rdata) tuples. If a parser is available for the dnsclass,
the 'rdata' field will be further parsed into its components (as
a tuple if necessary).
'''
return self._getResourceRecords(structure=self.section.additional)
def getDnsNames(self):
'''
Return a list of the DNS names in the message.
'''
fqdns = set()
for ofs,indent,fname,fobj in self.vsGetPrintInfo():
if fobj.vsGetTypeName() == 'DnsName':
fqdns.add(self.getDnsName(*fobj.getTypeVal()))
return list(fqdns)
def getIPv4Integers(self):
'''
Return a list of the IPv4 addresses in the message.
'''
ips = set()
for ofs,indent,fname,fobj in self.vsGetPrintInfo():
if fobj.vsGetTypeName() == 'IPv4Address':
ips.add(fobj._vs_value)
return list(ips)
def getEmailAddresses(self):
'''
Return a list of the email addresses which are encoded as DNS names
in the message (they are decoded back to email addresses here).
'''
emails = set()
for ofs,indent,fname,fobj in self.vsGetPrintInfo():
if fobj.vsGetTypeName() == 'DnsMailboxAsName':
mailbox = self.getDnsName(*fobj.getTypeVal())
parts = mailbox.split('.', 1)
emails.add('@'.join(parts))
return list(emails)
|
{
"content_hash": "79e2741ede4b05c10947914eca4ecbb5",
"timestamp": "",
"source": "github",
"line_count": 432,
"max_line_length": 97,
"avg_line_length": 36.24768518518518,
"alnum_prop": 0.5889903569832046,
"repo_name": "HackerTool/vivisect",
"id": "13810cfeb03266b593ae8d268bd27fda994db68b",
"size": "15659",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "vstruct/defs/dns.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "167795"
},
{
"name": "CSS",
"bytes": "15980"
},
{
"name": "Makefile",
"bytes": "355"
},
{
"name": "Python",
"bytes": "11384786"
},
{
"name": "Shell",
"bytes": "476"
}
],
"symlink_target": ""
}
|
import os
from skimage._build import cython
base_path = os.path.abspath(os.path.dirname(__file__))
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
config = Configuration('filters', parent_package, top_path)
config.add_data_dir('tests')
config.add_data_dir('rank/tests')
cython(['_ctmf.pyx'], working_path=base_path)
cython(['rank/core_cy.pyx'], working_path=base_path)
cython(['rank/generic_cy.pyx'], working_path=base_path)
cython(['rank/percentile_cy.pyx'], working_path=base_path)
cython(['rank/bilateral_cy.pyx'], working_path=base_path)
config.add_extension('_ctmf', sources=['_ctmf.c'],
include_dirs=[get_numpy_include_dirs()])
config.add_extension('rank.core_cy', sources=['rank/core_cy.c'],
include_dirs=[get_numpy_include_dirs()])
config.add_extension('rank.generic_cy', sources=['rank/generic_cy.c'],
include_dirs=[get_numpy_include_dirs()])
config.add_extension(
'rank.percentile_cy', sources=['rank/percentile_cy.c'],
include_dirs=[get_numpy_include_dirs()])
config.add_extension(
'rank.bilateral_cy', sources=['rank/bilateral_cy.c'],
include_dirs=[get_numpy_include_dirs()])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(maintainer='scikit-image Developers',
author='scikit-image Developers',
maintainer_email='scikit-image@python.org',
description='Filters',
url='https://github.com/scikit-image/scikit-image',
license='SciPy License (BSD Style)',
**(configuration(top_path='').todict())
)
|
{
"content_hash": "6be5e5262a3e7589870e135409de75c9",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 79,
"avg_line_length": 38.77777777777778,
"alnum_prop": 0.6458452722063037,
"repo_name": "paalge/scikit-image",
"id": "0a21856bfd7bec9d9013c1c375ff46b92ade351f",
"size": "1768",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "skimage/filters/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "235642"
},
{
"name": "C++",
"bytes": "44817"
},
{
"name": "Makefile",
"bytes": "567"
},
{
"name": "Python",
"bytes": "2532932"
}
],
"symlink_target": ""
}
|
import sys
import os
if len(sys.argv) > 1:
# Get path to cross_sections.out file from command line argument
filename = sys.argv[-1]
else:
# Set default path for cross_sections.out
filename = 'cross_sections.out'
if not os.path.exists(filename):
raise OSError('Could not find cross_sections.out file!')
# Open file handle for cross_sections.out file
f = open(filename, 'r')
# Initialize memory size arrays
memory_xs = []
memory_angle = []
memory_energy = []
memory_urr = []
memory_total = []
memory_sab = []
while True:
# Read next line in file
line = f.readline()
# Check for EOF
if line == '':
break
# Look for block listing memory usage for a nuclide
words = line.split()
if len(words) == 2 and words[0] == 'Memory':
memory_xs.append(int(f.readline().split()[-2]))
memory_angle.append(int(f.readline().split()[-2]))
memory_energy.append(int(f.readline().split()[-2]))
memory_urr.append(int(f.readline().split()[-2]))
memory_total.append(int(f.readline().split()[-2]))
# Look for memory usage for S(a,b) table
if len(words) == 5 and words[1] == 'Used':
memory_sab.append(int(words[-2]))
# Write out summary memory usage
print('Memory Requirements')
print(' Reaction Cross Sections = ' + str(sum(memory_xs)))
print(' Secondary Angle Distributions = ' + str(sum(memory_angle)))
print(' Secondary Energy Distributions = ' + str(sum(memory_energy)))
print(' Probability Tables = ' + str(sum(memory_urr)))
print(' S(a,b) Tables = ' + str(sum(memory_sab)))
print(' Total = ' + str(sum(memory_total)))
|
{
"content_hash": "fc678725d9f975a1ab9780fcf3579f94",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 70,
"avg_line_length": 32.53846153846154,
"alnum_prop": 0.6134751773049646,
"repo_name": "bhermanmit/cdash",
"id": "bf3cc1efd76a75611369270b37f9b7f8bd33a563",
"size": "1862",
"binary": false,
"copies": "1",
"ref": "refs/heads/ctests",
"path": "src/utils/memory_usage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "FORTRAN",
"bytes": "2289329"
},
{
"name": "Python",
"bytes": "311545"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import logging
import time
from mopidy import backend
logger = logging.getLogger(__name__)
class GMusicPlaybackProvider(backend.PlaybackProvider):
def __init__(self, audio, backend):
super(GMusicPlaybackProvider, self).__init__(audio, backend)
self._track_start = 0
self._track = None
def change_track(self, track):
logger.debug('play(): %r', track)
url = self.backend.session.get_stream_url(track.uri.split(':')[2])
if url is None:
return False
self.audio.set_uri(url).get()
self._track_start = time.time()
self._track = track
return True
def stop(self):
super(GMusicPlaybackProvider, self).stop()
if not self._track:
logger.debug('Current track is unset')
elif 0 < self._track.length * 2/3 \
< (time.time() - self._track_start) * 1000:
track_id = self._track.uri.split(':')[2]
logger.debug('Broadcast play to google music: %r', track_id)
self.backend.session.increment_song_playcount(track_id)
self._track = None # prevent additional calls
else:
logger.debug('Track got skipped')
|
{
"content_hash": "71d0393f60e076cdcd7a10ab02de3d47",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 74,
"avg_line_length": 32.15384615384615,
"alnum_prop": 0.5988835725677831,
"repo_name": "elrosti/mopidy-gmusic",
"id": "e8e9f8923bc007c355cf569e54696db349bdee15",
"size": "1254",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "mopidy_gmusic/playback.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "55755"
}
],
"symlink_target": ""
}
|
import random
class Dictannabinol(dict):
"""
Dictionnary that forgets things based on a trust ratio
This trust ratio is the viability of the object (in percentage)
All overrided functions have a (small) chance to loose some data
the chance to remove the data is 1 minus threshold thus, being
said here it wont be in each function comments
A trust = 1 correspond to an object that can only loose
"""
#######################
# Dictannabinol basis #
#######################
def __init__(self, trust_percent = 0.75):
"""
Default constructor for a Dictannabinol
_trust percentage of reliability of a dictannabinol (default: 0.75)
"""
super().__init__(self)
if 0 <= trust_percent <= 1:
self._trust = trust_percent # _trust is protected (convention)
else:
raise ValueError('trust ratio must be within 0-1')
@property
def trust(self):
return self._trust
@trust.setter # This need the previous @property to work
def trust(self, trust_percent):
"""
Change trust ratio
Using decorators to perform a simple check on values
"""
if 0 <= trust_percent <= 1:
self._trust = trust_percent
else:
raise ValueError('trust ratio must be within 0-1')
##################
# Misc functions #
##################
def __forgot(self, threshold, key=False):
"""
Forgot something based on a treshold, if above treshold -> removed
if no key specified the removal will be random
threshold from 0 to 1, chance to not be removed
If the object is trusty, you wont forget
"""
# If trusty, stay trusty and don't forget
if self._trust < 1:
n = random.random()
if key and key in super().keys() and n > threshold:
super().__delitem__(key)
elif n > threshold:
key = random.choice(list(super().keys()))
super().__delitem__(key)
else:
pass
###########################
# Override dict functions #
###########################
def __contains__(self, query):
""" Give unreliable informations about the existence of keys in dict """
# 1 - % chance of random removal
self.__forgot(threshold = 0.98, key = query)
# actual method
if random.random() > self._trust:
return False
else:
return super().__contains__(query)
def __setitem__(self, key, value):
"""
Add (or not) the key:value to the dictannabinol based
on a trust value
N.B. Here we use the __forgot function to simulate the "not adding
the value"
"""
super().__setitem__(key, value)
self.__forgot(threshold = self._trust, key = key)
def __getitem__(self, key):
"""
Get value from key (or not) based on the trust value
"""
# 1 - % chance of random removal
self.__forgot(threshold = 0.95)
# actual method
if random.random() <= self._trust:
return super().__getitem__(key)
else:
raise KeyError(key)
|
{
"content_hash": "a093ee4b705fa7388934f7e827f6cf0f",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 80,
"avg_line_length": 29.17241379310345,
"alnum_prop": 0.5233451536643026,
"repo_name": "Piplopp/misc",
"id": "96b8e1c5e9e782d9c09fec61dd20403f6cab0a93",
"size": "3427",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dictannabinol.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "LiveScript",
"bytes": "4228"
},
{
"name": "Python",
"bytes": "22084"
}
],
"symlink_target": ""
}
|
"""mBuild library of atoms."""
from mbuild.lib.atoms.c3 import C3
from mbuild.lib.atoms.h import H
from mbuild.lib.atoms.n4 import N4
|
{
"content_hash": "8b32120989e9115ae1422acce85bfe4d",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 34,
"avg_line_length": 33.5,
"alnum_prop": 0.7611940298507462,
"repo_name": "iModels/mbuild",
"id": "0f8bda67834318966874c8ecfa98d5fd2cf96fc6",
"size": "134",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mbuild/lib/atoms/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "43251"
},
{
"name": "Python",
"bytes": "218822"
},
{
"name": "Shell",
"bytes": "1124"
}
],
"symlink_target": ""
}
|
import sys
import os
sys.path.append(os.getcwd())
from pypaper import log
from pypaper.google import Archive
if __name__ == '__main__':
import argparse
p = argparse.ArgumentParser()
p.add_argument('bibtex', help='bibtex file')
p.add_argument('-c', '--cache', help='cache directory', default='cache')
p.add_argument('-d', '--database', help='database file', default='google.db')
p.add_argument('-w', '--wait', help='interval between two query', default=30)
args = p.parse_args()
if not os.path.exists(args.cache):
os.makedirs(args.cache)
log.init(os.path.join(args.cache, 'google.log'), stdout=True)
arc = Archive(args.database, interval=float(args.wait))
arc.import_bibtex(args.bibtex)
|
{
"content_hash": "6265e2ebd6ce7915866c904b866f082b",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 81,
"avg_line_length": 31,
"alnum_prop": 0.6612903225806451,
"repo_name": "Answeror/pypaper",
"id": "8592f814d543eaf8326bdbb42a3567757e78ee68",
"size": "791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/google_fetch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66844"
},
{
"name": "Shell",
"bytes": "181"
}
],
"symlink_target": ""
}
|
from BaseScouting.views.standard_views.base_team import BaseSingleTeamView
from Scouting2017.model.reusable_models import Team, TeamPictures, TeamComments, \
TeamCompetesIn
from Scouting2017.model.models2017 import TeamPitScouting
from Scouting2017.model.get_team_metrics import get_team_metrics
class SingleTeamView2017(BaseSingleTeamView):
def __init__(self):
BaseSingleTeamView.__init__(self, Team, TeamPictures, TeamComments, TeamPitScouting, TeamCompetesIn, 'Scouting2017/team.html')
def _get_metrics(self, team, regional_code):
return get_team_metrics(team, regional_code)
|
{
"content_hash": "3c06290c7e8af9e83a2cf2f77db2231e",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 134,
"avg_line_length": 43.642857142857146,
"alnum_prop": 0.7855973813420621,
"repo_name": "ArcticWarriors/scouting-app-2016",
"id": "14e5ff8da909c517bde555b5c2d5333c2296c8e3",
"size": "611",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ScoutingWebsite/Scouting2017/view/standard_views/team.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2636"
},
{
"name": "HTML",
"bytes": "75765"
},
{
"name": "JavaScript",
"bytes": "16877"
},
{
"name": "Python",
"bytes": "94669"
}
],
"symlink_target": ""
}
|
"""
Different kind of fuzzy sets.
For any of these you can call C{set(x)} to get the membership value of x.
See L{Set<fuzzy.set.Set>} for more.
Examples can be found here U{http://pyfuzzy.sourceforge.net/demo/set/}
"""
__revision__ = "$Id: __init__.py,v 1.11 2010-03-28 18:44:46 rliebscher Exp $"
|
{
"content_hash": "60305735e00e4ee1a464f114298f7524",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 77,
"avg_line_length": 27.363636363636363,
"alnum_prop": 0.6810631229235881,
"repo_name": "avatar29A/pyfuzzy",
"id": "bd26b4c731463d2815702adaeca3f2d94b13649f",
"size": "1025",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fuzzy/set/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GAP",
"bytes": "13981"
},
{
"name": "Python",
"bytes": "712985"
}
],
"symlink_target": ""
}
|
"""An apiproxy stub that calls a remote handler via HTTP.
This is a special version of the remote_api_stub which sends all traffic
to the local backends *except* for datastore.put calls where the key
contains a remote app_id.
It re-implements parts of the remote_api_stub so as to replace dependencies on
the (SDK only) appengine_rpc with urlfetch.
"""
import logging
import pickle
import random
import yaml
from google.appengine.api import apiproxy_rpc
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import urlfetch
from google.appengine.ext.remote_api import remote_api_pb
from google.appengine.runtime import apiproxy_errors
class Error(Exception):
"""Base class for exceptions in this module."""
class ConfigurationError(Error):
"""Exception for configuration errors."""
class FetchFailed(Exception):
"""Remote fetch failed."""
class UnknownJavaServerError(Error):
"""Exception for exceptions returned from a Java remote_api handler."""
class RemoteTransactionsUnimplemented(Error):
"""Remote Put requests do not support transactions."""
class DatastorePutStub(object):
"""A specialised stub for sending "puts" to a remote App Engine datastore.
This stub passes through all requests to the normal stub except for
datastore put. It will check those to see if the put is for the local app
or a remote app, and if remote will send traffic remotely.
"""
def __init__(self, remote_url, target_appid, extra_headers, normal_stub):
"""Constructor.
Args:
remote_url: The URL of the remote_api handler.
target_appid: The appid to intercept calls for.
extra_headers: Headers to send (for authentication).
normal_stub: The standard stub to delegate most calls to.
"""
self.remote_url = remote_url
self.target_appid = target_appid
self.extra_headers = extra_headers or {}
if 'X-appcfg-api-version' not in self.extra_headers:
self.extra_headers['X-appcfg-api-version'] = '1'
self.normal_stub = normal_stub
def CreateRPC(self):
"""Creates RPC object instance.
Returns:
a instance of RPC.
"""
return apiproxy_rpc.RPC(stub=self)
def MakeSyncCall(self, service, call, request, response):
"""Handle all calls to this stub; delegate as appropriate."""
assert service == 'datastore_v3'
explanation = []
assert request.IsInitialized(explanation), explanation
handler = getattr(self, '_Dynamic_' + call, None)
if handler:
handler(request, response)
else:
self.normal_stub.MakeSyncCall(service, call, request, response)
assert response.IsInitialized(explanation), explanation
def _MakeRemoteSyncCall(self, service, call, request, response):
"""Send an RPC to a remote_api endpoint."""
request_pb = remote_api_pb.Request()
request_pb.set_service_name(service)
request_pb.set_method(call)
request_pb.mutable_request().set_contents(request.Encode())
response_pb = remote_api_pb.Response()
encoded_request = request_pb.Encode()
try:
urlfetch_response = urlfetch.fetch(self.remote_url, encoded_request,
urlfetch.POST, self.extra_headers,
follow_redirects=False,
deadline=10)
except Exception, e:
logging.exception('Fetch failed to %s', self.remote_url)
raise FetchFailed(e)
if urlfetch_response.status_code != 200:
logging.error('Fetch failed to %s; Status %s; body %s',
self.remote_url,
urlfetch_response.status_code,
urlfetch_response.content)
raise FetchFailed(urlfetch_response.status_code)
response_pb.ParseFromString(urlfetch_response.content)
if response_pb.has_application_error():
error_pb = response_pb.application_error()
raise apiproxy_errors.ApplicationError(error_pb.code(),
error_pb.detail())
elif response_pb.has_exception():
raise pickle.loads(response_pb.exception().contents())
elif response_pb.has_java_exception():
raise UnknownJavaServerError('An unknown error has occured in the '
'Java remote_api handler for this call.')
else:
response.ParseFromString(response_pb.response().contents())
def _Dynamic_Put(self, request, response):
"""Handle a Put request and route remotely if it matches the target app.
Args:
request: A datastore_pb.PutRequest
response: A datastore_pb.PutResponse
Raises:
RemoteTransactionsUnimplemented: Remote transactions are unimplemented.
"""
if request.entity_list():
entity = request.entity(0)
if entity.has_key() and entity.key().app() == self.target_appid:
if request.has_transaction():
raise RemoteTransactionsUnimplemented()
self._MakeRemoteSyncCall('datastore_v3', 'Put', request, response)
return
self.normal_stub.MakeSyncCall('datastore_v3', 'Put', request, response)
def _Dynamic_AllocateIds(self, request, response):
"""Handle AllocateIds and route remotely if it matches the target app.
Args:
request: A datastore_pb.AllocateIdsRequest
response: A datastore_pb.AllocateIdsResponse
"""
if request.model_key().app() == self.target_appid:
self._MakeRemoteSyncCall('datastore_v3', 'AllocateIds', request, response)
else:
self.normal_stub.MakeSyncCall('datastore_v3', 'AllocateIds', request,
response)
def get_remote_appid(remote_url, extra_headers=None):
"""Get the appid from the remote_api endpoint.
This also has the side effect of verifying that it is a remote_api endpoint.
Args:
remote_url: The url to the remote_api handler.
extra_headers: Headers to send (for authentication).
Returns:
app_id: The app_id of the target app.
Raises:
FetchFailed: Urlfetch call failed.
ConfigurationError: URLfetch suceeded but results were invalid.
"""
rtok = str(random.random())[2:]
url = remote_url + '?rtok=' + rtok
if not extra_headers:
extra_headers = {}
if 'X-appcfg-api-version' not in extra_headers:
extra_headers['X-appcfg-api-version'] = '1'
try:
urlfetch_response = urlfetch.fetch(url, None, urlfetch.GET,
extra_headers, follow_redirects=False)
except Exception, e:
logging.exception('Fetch failed to %s', remote_url)
raise FetchFailed('Fetch to %s failed: %r' % (remote_url, e))
if urlfetch_response.status_code != 200:
logging.error('Fetch failed to %s; Status %s; body %s',
remote_url,
urlfetch_response.status_code,
urlfetch_response.content)
raise FetchFailed('Fetch to %s failed with status %s' %
(remote_url, urlfetch_response.status_code))
response = urlfetch_response.content
if not response.startswith('{'):
logging.info('Response unparasable: %s', response)
raise ConfigurationError(
'Invalid response recieved from server: %s' % response)
app_info = yaml.load(response)
if not app_info or 'rtok' not in app_info or 'app_id' not in app_info:
logging.info('Response unparsable: %s', response)
raise ConfigurationError('Error parsing app_id lookup response')
if app_info['rtok'] != rtok:
logging.info('Response invalid token (expected %s): %s', rtok, response)
raise ConfigurationError('Token validation failed during app_id lookup. '
'(sent %s, got %s)' % (repr(rtok),
repr(app_info['rtok'])))
return app_info['app_id']
def configure_remote_put(remote_url, app_id, extra_headers=None):
"""Does necessary setup to intercept PUT.
Args:
remote_url: The url to the remote_api handler.
app_id: The app_id of the target app.
extra_headers: Headers to send (for authentication).
Raises:
ConfigurationError: if there is a error configuring the stub.
"""
if not app_id or not remote_url:
raise ConfigurationError('app_id and remote_url required')
original_datastore_stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
if isinstance(original_datastore_stub, DatastorePutStub):
logging.info('Stub is already configured. Hopefully in a matching fashion.')
return
datastore_stub = DatastorePutStub(remote_url, app_id, extra_headers,
original_datastore_stub)
apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', datastore_stub)
|
{
"content_hash": "96d302b1a67a5fcfdfe9f7747b472f1c",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 80,
"avg_line_length": 36.357142857142854,
"alnum_prop": 0.6690165260603259,
"repo_name": "SRabbelier/Melange",
"id": "cdef16115c0e3fdb08555d5f6a5adde53b30225a",
"size": "9255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thirdparty/google_appengine/google/appengine/ext/datastore_admin/remote_api_put_stub.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "400472"
},
{
"name": "C++",
"bytes": "20"
},
{
"name": "Java",
"bytes": "1496"
},
{
"name": "JavaScript",
"bytes": "1623582"
},
{
"name": "PHP",
"bytes": "1032"
},
{
"name": "Perl",
"bytes": "177565"
},
{
"name": "Python",
"bytes": "15317793"
},
{
"name": "Ruby",
"bytes": "59"
},
{
"name": "Shell",
"bytes": "15303"
}
],
"symlink_target": ""
}
|
import abc
import fnmatch
import hashlib
import os
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
import six
from stevedore import extension
import yaml
from ceilometer.event.storage import models
from ceilometer.i18n import _, _LW
from ceilometer import publisher
from ceilometer.publisher import utils as publisher_utils
from ceilometer import sample as sample_util
OPTS = [
cfg.StrOpt('pipeline_cfg_file',
default="pipeline.yaml",
help="Configuration file for pipeline definition."
),
cfg.StrOpt('event_pipeline_cfg_file',
default="event_pipeline.yaml",
help="Configuration file for event pipeline definition."
),
cfg.BoolOpt('refresh_pipeline_cfg',
default=False,
help="Refresh Pipeline configuration on-the-fly."
),
cfg.IntOpt('pipeline_polling_interval',
default=20,
help="Polling interval for pipeline file configuration"
" in seconds."
),
]
cfg.CONF.register_opts(OPTS)
LOG = log.getLogger(__name__)
class PipelineException(Exception):
def __init__(self, message, pipeline_cfg):
self.msg = message
self.pipeline_cfg = pipeline_cfg
def __str__(self):
return 'Pipeline %s: %s' % (self.pipeline_cfg, self.msg)
@six.add_metaclass(abc.ABCMeta)
class PipelineEndpoint(object):
def __init__(self, context, pipeline):
self.publish_context = PublishContext(context, [pipeline])
@abc.abstractmethod
def sample(self, ctxt, publisher_id, event_type, payload, metadata):
pass
class SamplePipelineEndpoint(PipelineEndpoint):
def sample(self, ctxt, publisher_id, event_type, payload, metadata):
samples = [
sample_util.Sample(name=s['counter_name'],
type=s['counter_type'],
unit=s['counter_unit'],
volume=s['counter_volume'],
user_id=s['user_id'],
project_id=s['project_id'],
resource_id=s['resource_id'],
timestamp=s['timestamp'],
resource_metadata=s['resource_metadata'],
source=s.get('source'))
for s in payload if publisher_utils.verify_signature(
s, cfg.CONF.publisher.telemetry_secret)
]
with self.publish_context as p:
p(samples)
class EventPipelineEndpoint(PipelineEndpoint):
def sample(self, ctxt, publisher_id, event_type, payload, metadata):
events = [
models.Event(
message_id=ev['message_id'],
event_type=ev['event_type'],
generated=timeutils.normalize_time(
timeutils.parse_isotime(ev['generated'])),
traits=[models.Trait(name, dtype,
models.Trait.convert_value(dtype, value))
for name, dtype, value in ev['traits']],
raw=ev.get('raw', {}))
for ev in payload if publisher_utils.verify_signature(
ev, cfg.CONF.publisher.telemetry_secret)
]
with self.publish_context as p:
p(events)
class _PipelineTransportManager(object):
def __init__(self):
self.transporters = []
def add_transporter(self, transporter):
self.transporters.append(transporter)
def publisher(self, context):
serializer = self.serializer
hash_to_bucketise = self.hash_to_bucketise
transporters = self.transporters
filter_attr = self.filter_attr
event_type = self.event_type
class PipelinePublishContext(object):
def __enter__(self):
def p(data):
# TODO(gordc): cleanup so payload is always single
# datapoint. we can't correctly bucketise
# datapoints if batched.
data = [data] if not isinstance(data, list) else data
for datapoint in data:
serialized_data = serializer(datapoint)
for d_filter, notifiers in transporters:
if d_filter(serialized_data[filter_attr]):
key = (hash_to_bucketise(serialized_data) %
len(notifiers))
notifier = notifiers[key]
notifier.sample(context.to_dict(),
event_type=event_type,
payload=[serialized_data])
return p
def __exit__(self, exc_type, exc_value, traceback):
pass
return PipelinePublishContext()
class SamplePipelineTransportManager(_PipelineTransportManager):
filter_attr = 'counter_name'
event_type = 'ceilometer.pipeline'
@staticmethod
def hash_to_bucketise(datapoint):
return hash(datapoint['resource_id'])
@staticmethod
def serializer(data):
return publisher_utils.meter_message_from_counter(
data, cfg.CONF.publisher.telemetry_secret)
class EventPipelineTransportManager(_PipelineTransportManager):
filter_attr = 'event_type'
event_type = 'pipeline.event'
@staticmethod
def hash_to_bucketise(datapoint):
return hash(datapoint['event_type'])
@staticmethod
def serializer(data):
return publisher_utils.message_from_event(
data, cfg.CONF.publisher.telemetry_secret)
class PublishContext(object):
def __init__(self, context, pipelines=None):
pipelines = pipelines or []
self.pipelines = set(pipelines)
self.context = context
def add_pipelines(self, pipelines):
self.pipelines.update(pipelines)
def __enter__(self):
def p(data):
for p in self.pipelines:
p.publish_data(self.context, data)
return p
def __exit__(self, exc_type, exc_value, traceback):
for p in self.pipelines:
p.flush(self.context)
class Source(object):
"""Represents a source of samples or events."""
def __init__(self, cfg):
self.cfg = cfg
try:
self.name = cfg['name']
self.sinks = cfg.get('sinks')
except KeyError as err:
raise PipelineException(
"Required field %s not specified" % err.args[0], cfg)
def __str__(self):
return self.name
def check_sinks(self, sinks):
if not self.sinks:
raise PipelineException(
"No sink defined in source %s" % self,
self.cfg)
for sink in self.sinks:
if sink not in sinks:
raise PipelineException(
"Dangling sink %s from source %s" % (sink, self),
self.cfg)
def check_source_filtering(self, data, d_type):
"""Source data rules checking
- At least one meaningful datapoint exist
- Included type and excluded type can't co-exist on the same pipeline
- Included type meter and wildcard can't co-exist at same pipeline
"""
if not data:
raise PipelineException('No %s specified' % d_type, self.cfg)
if ([x for x in data if x[0] not in '!*'] and
[x for x in data if x[0] == '!']):
raise PipelineException(
'Both included and excluded %s specified' % d_type,
cfg)
if '*' in data and [x for x in data if x[0] not in '!*']:
raise PipelineException(
'Included %s specified with wildcard' % d_type,
self.cfg)
@staticmethod
def is_supported(dataset, data_name):
# Support wildcard like storage.* and !disk.*
# Start with negation, we consider that the order is deny, allow
if any(fnmatch.fnmatch(data_name, datapoint[1:])
for datapoint in dataset if datapoint[0] == '!'):
return False
if any(fnmatch.fnmatch(data_name, datapoint)
for datapoint in dataset if datapoint[0] != '!'):
return True
# if we only have negation, we suppose the default is allow
return all(datapoint.startswith('!') for datapoint in dataset)
class EventSource(Source):
"""Represents a source of events.
In effect it is a set of notification handlers capturing events for a set
of matching notifications.
"""
def __init__(self, cfg):
super(EventSource, self).__init__(cfg)
try:
self.events = cfg['events']
except KeyError as err:
raise PipelineException(
"Required field %s not specified" % err.args[0], cfg)
self.check_source_filtering(self.events, 'events')
def support_event(self, event_name):
return self.is_supported(self.events, event_name)
class SampleSource(Source):
"""Represents a source of samples.
In effect it is a set of pollsters and/or notification handlers emitting
samples for a set of matching meters. Each source encapsulates meter name
matching, polling interval determination, optional resource enumeration or
discovery, and mapping to one or more sinks for publication.
"""
def __init__(self, cfg):
super(SampleSource, self).__init__(cfg)
try:
# Support 'counters' for backward compatibility
self.meters = cfg.get('meters', cfg.get('counters'))
except KeyError as err:
raise PipelineException(
"Required field %s not specified" % err.args[0], cfg)
try:
self.interval = int(cfg.get('interval', 600))
except ValueError:
raise PipelineException("Invalid interval value", cfg)
if self.interval <= 0:
raise PipelineException("Interval value should > 0", cfg)
self.resources = cfg.get('resources') or []
if not isinstance(self.resources, list):
raise PipelineException("Resources should be a list", cfg)
self.discovery = cfg.get('discovery') or []
if not isinstance(self.discovery, list):
raise PipelineException("Discovery should be a list", cfg)
self.check_source_filtering(self.meters, 'meters')
def get_interval(self):
return self.interval
def support_meter(self, meter_name):
return self.is_supported(self.meters, meter_name)
class Sink(object):
"""Represents a sink for the transformation and publication of data.
Each sink config is concerned *only* with the transformation rules
and publication conduits for data.
In effect, a sink describes a chain of handlers. The chain starts
with zero or more transformers and ends with one or more publishers.
The first transformer in the chain is passed data from the
corresponding source, takes some action such as deriving rate of
change, performing unit conversion, or aggregating, before passing
the modified data to next step.
The subsequent transformers, if any, handle the data similarly.
At the end of the chain, publishers publish the data. The exact
publishing method depends on publisher type, for example, pushing
into data storage via the message bus providing guaranteed delivery,
or for loss-tolerant data UDP may be used.
If no transformers are included in the chain, the publishers are
passed data directly from the sink which are published unchanged.
"""
def __init__(self, cfg, transformer_manager):
self.cfg = cfg
try:
self.name = cfg['name']
# It's legal to have no transformer specified
self.transformer_cfg = cfg.get('transformers') or []
except KeyError as err:
raise PipelineException(
"Required field %s not specified" % err.args[0], cfg)
if not cfg.get('publishers'):
raise PipelineException("No publisher specified", cfg)
self.publishers = []
for p in cfg['publishers']:
if '://' not in p:
# Support old format without URL
p = p + "://"
try:
self.publishers.append(publisher.get_publisher(p,
self.NAMESPACE))
except Exception:
LOG.exception(_("Unable to load publisher %s"), p)
self.multi_publish = True if len(self.publishers) > 1 else False
self.transformers = self._setup_transformers(cfg, transformer_manager)
def __str__(self):
return self.name
def _setup_transformers(self, cfg, transformer_manager):
transformers = []
for transformer in self.transformer_cfg:
parameter = transformer['parameters'] or {}
try:
ext = transformer_manager[transformer['name']]
except KeyError:
raise PipelineException(
"No transformer named %s loaded" % transformer['name'],
cfg)
transformers.append(ext.plugin(**parameter))
LOG.info(_(
"Pipeline %(pipeline)s: Setup transformer instance %(name)s "
"with parameter %(param)s") % ({'pipeline': self,
'name': transformer['name'],
'param': parameter}))
return transformers
class EventSink(Sink):
NAMESPACE = 'ceilometer.event.publisher'
def publish_events(self, ctxt, events):
if events:
for p in self.publishers:
try:
p.publish_events(ctxt, events)
except Exception:
LOG.exception(_("Pipeline %(pipeline)s: %(status)s"
" after error from publisher %(pub)s") %
({'pipeline': self, 'status': 'Continue' if
self.multi_publish else 'Exit', 'pub': p}
))
if not self.multi_publish:
raise
def flush(self, ctxt):
"""Flush data after all events have been injected to pipeline."""
pass
class SampleSink(Sink):
NAMESPACE = 'ceilometer.publisher'
def _transform_sample(self, start, ctxt, sample):
try:
for transformer in self.transformers[start:]:
sample = transformer.handle_sample(ctxt, sample)
if not sample:
LOG.debug(
"Pipeline %(pipeline)s: Sample dropped by "
"transformer %(trans)s", {'pipeline': self,
'trans': transformer})
return
return sample
except Exception as err:
# TODO(gordc): only use one log level.
LOG.warning(_("Pipeline %(pipeline)s: "
"Exit after error from transformer "
"%(trans)s for %(smp)s") % ({'pipeline': self,
'trans': transformer,
'smp': sample}))
LOG.exception(err)
def _publish_samples(self, start, ctxt, samples):
"""Push samples into pipeline for publishing.
:param start: The first transformer that the sample will be injected.
This is mainly for flush() invocation that transformer
may emit samples.
:param ctxt: Execution context from the manager or service.
:param samples: Sample list.
"""
transformed_samples = []
if not self.transformers:
transformed_samples = samples
else:
for sample in samples:
LOG.debug(
"Pipeline %(pipeline)s: Transform sample "
"%(smp)s from %(trans)s transformer", {'pipeline': self,
'smp': sample,
'trans': start})
sample = self._transform_sample(start, ctxt, sample)
if sample:
transformed_samples.append(sample)
if transformed_samples:
for p in self.publishers:
try:
p.publish_samples(ctxt, transformed_samples)
except Exception:
LOG.exception(_(
"Pipeline %(pipeline)s: Continue after error "
"from publisher %(pub)s") % ({'pipeline': self,
'pub': p}))
def publish_samples(self, ctxt, samples):
self._publish_samples(0, ctxt, samples)
def flush(self, ctxt):
"""Flush data after all samples have been injected to pipeline."""
for (i, transformer) in enumerate(self.transformers):
try:
self._publish_samples(i + 1, ctxt,
list(transformer.flush(ctxt)))
except Exception as err:
LOG.warning(_(
"Pipeline %(pipeline)s: Error flushing "
"transformer %(trans)s") % ({'pipeline': self,
'trans': transformer}))
LOG.exception(err)
@six.add_metaclass(abc.ABCMeta)
class Pipeline(object):
"""Represents a coupling between a sink and a corresponding source."""
def __init__(self, source, sink):
self.source = source
self.sink = sink
self.name = str(self)
def __str__(self):
return (self.source.name if self.source.name == self.sink.name
else '%s:%s' % (self.source.name, self.sink.name))
def flush(self, ctxt):
self.sink.flush(ctxt)
@property
def publishers(self):
return self.sink.publishers
@abc.abstractmethod
def publish_data(self, ctxt, data):
"""Publish data from pipeline."""
class EventPipeline(Pipeline):
"""Represents a pipeline for Events."""
def __str__(self):
# NOTE(gordc): prepend a namespace so we ensure event and sample
# pipelines do not have the same name.
return 'event:%s' % super(EventPipeline, self).__str__()
def support_event(self, event_type):
return self.source.support_event(event_type)
def publish_data(self, ctxt, events):
if not isinstance(events, list):
events = [events]
supported = [e for e in events
if self.source.support_event(e.event_type)]
self.sink.publish_events(ctxt, supported)
class SamplePipeline(Pipeline):
"""Represents a pipeline for Samples."""
def get_interval(self):
return self.source.interval
@property
def resources(self):
return self.source.resources
@property
def discovery(self):
return self.source.discovery
def support_meter(self, meter_name):
return self.source.support_meter(meter_name)
def _validate_volume(self, s):
volume = s.volume
if volume is None:
LOG.warning(_LW(
'metering data %(counter_name)s for %(resource_id)s '
'@ %(timestamp)s has no volume (volume: None), the sample will'
' be dropped')
% {'counter_name': s.name,
'resource_id': s.resource_id,
'timestamp': s.timestamp if s.timestamp else 'NO TIMESTAMP'}
)
return False
if not isinstance(volume, (int, float)):
try:
volume = float(volume)
except ValueError:
LOG.warning(_LW(
'metering data %(counter_name)s for %(resource_id)s '
'@ %(timestamp)s has volume which is not a number '
'(volume: %(counter_volume)s), the sample will be dropped')
% {'counter_name': s.name,
'resource_id': s.resource_id,
'timestamp': (
s.timestamp if s.timestamp else 'NO TIMESTAMP'),
'counter_volume': volume}
)
return False
return True
def publish_data(self, ctxt, samples):
if not isinstance(samples, list):
samples = [samples]
supported = [s for s in samples if self.source.support_meter(s.name)
and self._validate_volume(s)]
self.sink.publish_samples(ctxt, supported)
SAMPLE_TYPE = {'pipeline': SamplePipeline,
'source': SampleSource,
'sink': SampleSink}
EVENT_TYPE = {'pipeline': EventPipeline,
'source': EventSource,
'sink': EventSink}
class PipelineManager(object):
"""Pipeline Manager
Pipeline manager sets up pipelines according to config file
Usually only one pipeline manager exists in the system.
"""
def __init__(self, cfg, transformer_manager, p_type=SAMPLE_TYPE):
"""Setup the pipelines according to config.
The configuration is supported as follows:
Decoupled: the source and sink configuration are separately
specified before being linked together. This allows source-
specific configuration, such as resource discovery, to be
kept focused only on the fine-grained source while avoiding
the necessity for wide duplication of sink-related config.
The configuration is provided in the form of separate lists
of dictionaries defining sources and sinks, for example:
{"sources": [{"name": source_1,
"interval": interval_time,
"meters" : ["meter_1", "meter_2"],
"resources": ["resource_uri1", "resource_uri2"],
"sinks" : ["sink_1", "sink_2"]
},
{"name": source_2,
"interval": interval_time,
"meters" : ["meter_3"],
"sinks" : ["sink_2"]
},
],
"sinks": [{"name": sink_1,
"transformers": [
{"name": "Transformer_1",
"parameters": {"p1": "value"}},
{"name": "Transformer_2",
"parameters": {"p1": "value"}},
],
"publishers": ["publisher_1", "publisher_2"]
},
{"name": sink_2,
"publishers": ["publisher_3"]
},
]
}
The interval determines the cadence of sample injection into
the pipeline where samples are produced under the direct control
of an agent, i.e. via a polling cycle as opposed to incoming
notifications.
Valid meter format is '*', '!meter_name', or 'meter_name'.
'*' is wildcard symbol means any meters; '!meter_name' means
"meter_name" will be excluded; 'meter_name' means 'meter_name'
will be included.
The 'meter_name" is Sample name field. For meter names with
variable like "instance:m1.tiny", it's "instance:*".
Valid meters definition is all "included meter names", all
"excluded meter names", wildcard and "excluded meter names", or
only wildcard.
The resources is list of URI indicating the resources from where
the meters should be polled. It's optional and it's up to the
specific pollster to decide how to use it.
Transformer's name is plugin name in setup.cfg.
Publisher's name is plugin name in setup.cfg
"""
self.pipelines = []
if not ('sources' in cfg and 'sinks' in cfg):
raise PipelineException("Both sources & sinks are required",
cfg)
LOG.info(_('detected decoupled pipeline config format'))
unique_names = set()
sources = []
for s in cfg.get('sources', []):
name = s.get('name')
if name in unique_names:
raise PipelineException("Duplicated source names: %s" %
name, self)
else:
unique_names.add(name)
sources.append(p_type['source'](s))
unique_names.clear()
sinks = {}
for s in cfg.get('sinks', []):
name = s.get('name')
if name in unique_names:
raise PipelineException("Duplicated sink names: %s" %
name, self)
else:
unique_names.add(name)
sinks[s['name']] = p_type['sink'](s, transformer_manager)
unique_names.clear()
for source in sources:
source.check_sinks(sinks)
for target in source.sinks:
pipe = p_type['pipeline'](source, sinks[target])
if pipe.name in unique_names:
raise PipelineException(
"Duplicate pipeline name: %s. Ensure pipeline"
" names are unique. (name is the source and sink"
" names combined)" % pipe.name, cfg)
else:
unique_names.add(pipe.name)
self.pipelines.append(pipe)
unique_names.clear()
def publisher(self, context):
"""Build a new Publisher for these manager pipelines.
:param context: The context.
"""
return PublishContext(context, self.pipelines)
class PollingManager(object):
"""Polling Manager
Polling manager sets up polling according to config file.
"""
def __init__(self, cfg):
"""Setup the polling according to config.
The configuration is the sources half of the Pipeline Config.
"""
self.sources = []
if not ('sources' in cfg and 'sinks' in cfg):
raise PipelineException("Both sources & sinks are required",
cfg)
LOG.info(_('detected decoupled pipeline config format'))
unique_names = set()
for s in cfg.get('sources', []):
name = s.get('name')
if name in unique_names:
raise PipelineException("Duplicated source names: %s" %
name, self)
else:
unique_names.add(name)
self.sources.append(SampleSource(s))
unique_names.clear()
def _setup_pipeline_manager(cfg_file, transformer_manager, p_type=SAMPLE_TYPE):
if not os.path.exists(cfg_file):
cfg_file = cfg.CONF.find_file(cfg_file)
LOG.debug("Pipeline config file: %s", cfg_file)
with open(cfg_file) as fap:
data = fap.read()
pipeline_cfg = yaml.safe_load(data)
LOG.info(_("Pipeline config: %s"), pipeline_cfg)
return PipelineManager(pipeline_cfg,
transformer_manager or
extension.ExtensionManager(
'ceilometer.transformer',
), p_type)
def _setup_polling_manager(cfg_file):
if not os.path.exists(cfg_file):
cfg_file = cfg.CONF.find_file(cfg_file)
LOG.debug("Polling config file: %s", cfg_file)
with open(cfg_file) as fap:
data = fap.read()
pipeline_cfg = yaml.safe_load(data)
LOG.info(_("Pipeline config: %s"), pipeline_cfg)
return PollingManager(pipeline_cfg)
def setup_event_pipeline(transformer_manager=None):
"""Setup event pipeline manager according to yaml config file."""
cfg_file = cfg.CONF.event_pipeline_cfg_file
return _setup_pipeline_manager(cfg_file, transformer_manager, EVENT_TYPE)
def setup_pipeline(transformer_manager=None):
"""Setup pipeline manager according to yaml config file."""
cfg_file = cfg.CONF.pipeline_cfg_file
return _setup_pipeline_manager(cfg_file, transformer_manager)
def _get_pipeline_cfg_file(p_type=SAMPLE_TYPE):
if p_type == EVENT_TYPE:
cfg_file = cfg.CONF.event_pipeline_cfg_file
else:
cfg_file = cfg.CONF.pipeline_cfg_file
if not os.path.exists(cfg_file):
cfg_file = cfg.CONF.find_file(cfg_file)
return cfg_file
def get_pipeline_mtime(p_type=SAMPLE_TYPE):
cfg_file = _get_pipeline_cfg_file(p_type)
return os.path.getmtime(cfg_file)
def get_pipeline_hash(p_type=SAMPLE_TYPE):
cfg_file = _get_pipeline_cfg_file(p_type)
with open(cfg_file) as fap:
data = fap.read()
if six.PY3:
data = data.encode('utf-8')
file_hash = hashlib.md5(data).hexdigest()
return file_hash
def setup_polling():
"""Setup polling manager according to yaml config file."""
cfg_file = cfg.CONF.pipeline_cfg_file
return _setup_polling_manager(cfg_file)
|
{
"content_hash": "320d3e3b91c5e636bfad939698f3b316",
"timestamp": "",
"source": "github",
"line_count": 836,
"max_line_length": 79,
"avg_line_length": 35.32057416267943,
"alnum_prop": 0.552424817122731,
"repo_name": "mathslinux/ceilometer",
"id": "620933dda5b5e51723ca066c51c14178e9f26d66",
"size": "30234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ceilometer/pipeline.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2849027"
},
{
"name": "Shell",
"bytes": "29510"
}
],
"symlink_target": ""
}
|
from bottle import Bottle, run, request
import json
import subprocess
HOST_NAME = "localhost"
PORT=9999
app = Bottle()
##
## Basic route that returns all nodes/tags for a given environment (passed as part of the URL)
##
@app.route('/api/<env>')
def knife_search_env(env):
cmd = "knife search \"chef_environment:" + env + "\" -i 2> /dev/null | sort"
(stdoutdata, stderrdata) = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()
response = {}
response[env]={}
node_list = stdoutdata.split()
for node in node_list:
tags = getTagListByNode(node)
response[env][node] = tags
return json.dumps(response)
##
## Route that takes advantage of multiple arguments that can be passed on the URL.
## Uses the arguments as filter for knife search command
##
## Supports:
## * env
## * zone
## * role
## * node (name)
##
@app.route('/api')
def knife_search_env_role():
query_items = request.query.decode()
cmd = ""
response = {}
for key in query_items:
if cmd: #String is not empty
cmd = cmd + " AND "
if key == 'zone':
zone = query_items[key]
cmd = cmd + "zone:" + zone
elif key == 'env':
env = query_items[key]
cmd = cmd + "chef_environment:" + env
elif key == 'role':
role = query_items[key]
cmd = cmd + "role:" + role
elif key == 'node':
node = query_items[key]
cmd = cmd + "name:" + node
cmd = "knife search \"" + cmd + "\" -i 2> /dev/null | sort"
(stdoutdata, stderrdata) = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()
node_list = stdoutdata.split()
for node in node_list:
tags = getTagListByNode(node)
response[node] = tags
return json.dumps(response)
##
## Basic method that fetches all tags for a given node
##
def getTagListByNode(node):
cmd = 'knife tag list %s'%(node)
(stdoutdata, stderrdata) = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()
return stdoutdata.strip().split("\n")
run(app, host=HOST_NAME, port=PORT)
|
{
"content_hash": "ab6dae16167a2c797c0fed40b4bdde6f",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 102,
"avg_line_length": 25.848101265822784,
"alnum_prop": 0.6385896180215475,
"repo_name": "semblano/chef-tags-api",
"id": "820f72167eec2d1eeaa7f5228c7dec56eec6c421",
"size": "2114",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chef-tag-api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2114"
}
],
"symlink_target": ""
}
|
__all__ = [
'TableToTimeGrid',
'ReverseImageDataAxii',
'TranslateGridOrigin',
]
__displayname__ = 'Transform'
import numpy as np
import vtk
from vtk.numpy_interface import dataset_adapter as dsa
from .. import _helpers, interface
from ..base import FilterBase
###############################################################################
class TableToTimeGrid(FilterBase):
"""A filter to convert a static (no time variance) table to a time varying
grid. This effectively reashapes a table full of data arrays as a 4D array
that is placed onto the CellData of a ``vtkImageData`` object.
"""
__displayname__ = 'Table To Time Grid'
__category__ = 'filter'
def __init__(
self,
extent=(10, 10, 10, 1),
order='C',
spacing=(1.0, 1.0, 1.0),
origin=(0.0, 0.0, 0.0),
dims=(0, 1, 2, 3),
dt=1.0,
points=False,
**kwargs
):
FilterBase.__init__(
self,
nInputPorts=1,
nOutputPorts=1,
inputType='vtkTable',
outputType='vtkImageData',
**kwargs
)
if len(extent) != 4:
raise _helpers.PVGeoError('`extent` must be of length 4.')
self.__extent = list(extent)
self.__dims = list(
dims
) # these are indexes for the filter to use on the reshape.
# NOTE: self.__dims[0] is the x axis index, etc., self.__dims[3] is the time axis
self.__spacing = list(spacing) # image data spacing
self.__origin = list(origin) # image data origin
self.__order = list(order) # unpacking order: 'C' or 'F'
self.__data = None # this is where we hold the data so entire filter does
# not execute on every time step. Data will be a disctionary of 4D arrays
# each 4D array will be in (nx, ny, nz, nt) shape
self.__needToRun = True
self.__timesteps = None
self.__dt = dt
# Optional parameter to switch between cell and point data
self.__usePointData = points
self.__needToUpdateOutput = True
def _set_data(self, table):
"""Internal helper to restructure the inpt table arrays"""
self.__data = dict()
dims = np.array([d for d in self.__dims])
sd = dims.argsort()
df = interface.table_to_data_frame(table)
keys = df.keys().tolist()
for k in keys:
# perfrom the reshape properly. using the user given extent
arr = np.reshape(df[k].values, self.__extent, order=self.__order)
# Now order correctly for the image data spatial reference
# this uses the user specified dimension definitions
for i in range(4):
arr = np.moveaxis(arr, sd[i], dims[i])
# Now add to disctionary
self.__data[k] = arr
self.__needToRun = False
return
def _build_image_data(self, img):
"""Internal helper to consturct the output"""
if self.__needToUpdateOutput:
# Clean out the output data object
img.DeepCopy(vtk.vtkImageData())
self.__needToUpdateOutput = False
ext = self.__extent
dims = self.__dims
nx, ny, nz = ext[dims[0]], ext[dims[1]], ext[dims[2]]
if not self.__usePointData:
nx += 1
ny += 1
nz += 1
sx, sy, sz = self.__spacing[0], self.__spacing[1], self.__spacing[2]
ox, oy, oz = self.__origin[0], self.__origin[1], self.__origin[2]
img.SetDimensions(nx, ny, nz)
img.SetSpacing(sx, sy, sz)
img.SetOrigin(ox, oy, oz)
return img
def _update_time_steps(self):
"""For internal use only: appropriately sets the timesteps."""
nt = self.__extent[self.__dims[3]]
if nt > 1:
self.__timesteps = _helpers.update_time_steps(self, nt, self.__dt)
return 1
#### Algorithm Methods ####
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Get input/output of Proxy
table = self.GetInputData(inInfo, 0, 0)
img = self.GetOutputData(outInfo, 0)
self._build_image_data(img)
# Perfrom task
if self.__needToRun:
self._set_data(table)
# Get requested time index
i = _helpers.get_requested_time(self, outInfo)
for k, arr in self.__data.items():
# NOTE: Keep order='F' because of the way the grid is already reshaped
# the 3D array has XYZ structure so VTK requires F ordering
narr = interface.convert_array(arr[:, :, :, i].flatten(order='F'), name=k)
if self.__usePointData:
img.GetPointData().AddArray(narr)
else:
img.GetCellData().AddArray(narr)
return 1
def RequestInformation(self, request, inInfo, outInfo):
"""Used by pipeline to set whole output extent."""
# Setup the ImageData
ext = self.__extent
dims = self.__dims
nx, ny, nz = ext[dims[0]], ext[dims[1]], ext[dims[2]]
if self.__usePointData:
ext = [0, nx - 1, 0, ny - 1, 0, nz - 1]
else:
ext = [0, nx, 0, ny, 0, nz]
info = outInfo.GetInformationObject(0)
# Set WHOLE_EXTENT: This is absolutely necessary
info.Set(vtk.vtkStreamingDemandDrivenPipeline.WHOLE_EXTENT(), ext, 6)
# Now set the number of timesteps:
self._update_time_steps()
return 1
#### Setters / Getters ####
def Modified(self, run_again=True):
"""Call modified if the filter needs to run again"""
if run_again:
self.__needToRun = run_again
self.__needToUpdateOutput = True
FilterBase.Modified(self)
def modified(self, run_again=True):
"""Call modified if the filter needs to run again"""
return self.Modified(run_again=run_again)
def set_extent(self, nx, ny, nz, nt):
"""Set the extent of the output grid"""
if self.__extent != [nx, ny, nz, nt]:
self.__extent = [nx, ny, nz, nt]
self.Modified()
def set_dimensions(self, x, y, z, t):
"""Set the dimensions of the output grid"""
if self.__dims != [x, y, z, t]:
self.__dims = [x, y, z, t]
self.Modified()
def set_spacing(self, dx, dy, dz):
"""Set the spacing for the points along each axial direction"""
if self.__spacing != [dx, dy, dz]:
self.__spacing = [dx, dy, dz]
self.Modified()
def set_origin(self, x0, y0, z0):
"""Set the origin of the output `vtkImageData`"""
if self.__origin != [x0, y0, z0]:
self.__origin = [x0, y0, z0]
self.Modified()
def set_order(self, order):
"""Set the reshape order (`'C'` or `'F'`)"""
if self.__order != order:
self.__order = order
self.Modified(run_again=True)
def get_time_step_values(self):
"""Use this in ParaView decorator to register timesteps on the pipeline."""
return self.__timesteps.tolist() if self.__timesteps is not None else None
def set_time_delta(self, dt):
"""An advanced property to set the time step in seconds."""
if dt != self.__dt:
self.__dt = dt
self.Modified()
def set_use_points(self, flag):
"""Set whether or not to place the data on the nodes/cells of the grid.
True places data on nodes, false places data at cell centers (CellData).
In ParaView, switching can be a bit buggy: be sure to turn the visibility
of this data object OFF on the pipeline when changing between nodes/cells.
"""
if self.__usePointData != flag:
self.__usePointData = flag
self.Modified(run_again=True)
###############################################################################
class ReverseImageDataAxii(FilterBase):
"""This filter will flip ``vtkImageData`` on any of the three cartesian axii.
A checkbox is provided for each axis on which you may desire to flip the data.
"""
__displayname__ = 'Reverse Image Data Axii'
__category__ = 'filter'
def __init__(self, axes=(True, True, True)):
FilterBase.__init__(
self,
nInputPorts=1,
inputType='vtkImageData',
nOutputPorts=1,
outputType='vtkImageData',
)
self.__axes = list(axes[::-1]) # Z Y X (FORTRAN)
def _reverse_grid_axes(self, idi, ido):
"""Internal helper to reverse data along specified axii"""
# Copy over input to output to be flipped around
# Deep copy keeps us from messing with the input data
ox, oy, oz = idi.GetOrigin()
ido.SetOrigin(ox, oy, oz)
sx, sy, sz = idi.GetSpacing()
ido.SetSpacing(sx, sy, sz)
ext = idi.GetExtent()
nx, ny, nz = ext[1] + 1, ext[3] + 1, ext[5] + 1
ido.SetDimensions(nx, ny, nz)
widi = dsa.WrapDataObject(idi)
# Iterate over all array in the PointData
for j in range(idi.GetPointData().GetNumberOfArrays()):
# Go through each axis and rotate if needed
arr = widi.PointData[j]
arr = np.reshape(arr, (nz, ny, nx))
for i in range(3):
if self.__axes[i]:
arr = np.flip(arr, axis=i)
# Now add that data array to the output
data = interface.convert_array(
arr.flatten(), name=idi.GetPointData().GetArrayName(j)
)
ido.GetPointData().AddArray(data)
# Iterate over all array in the CellData
for j in range(idi.GetCellData().GetNumberOfArrays()):
# Go through each axis and rotate if needed
arr = widi.CellData[j]
arr = np.reshape(arr, (nz - 1, ny - 1, nx - 1))
for i in range(3):
if self.__axes[i]:
arr = np.flip(arr, axis=i)
# Now add that data array to the output
data = interface.convert_array(
arr.flatten(), name=idi.GetCellData().GetArrayName(j)
)
ido.GetCellData().AddArray(data)
return ido
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output."""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
pdo = self.GetOutputData(outInfo, 0)
# Perfrom task
self._reverse_grid_axes(pdi, pdo)
return 1
#### Seters and Geters ####
def set_flip_x(self, flag):
"""Set the filter to flip th input data along the X-axis"""
if self.__axes[2] != flag:
self.__axes[2] = flag
self.Modified()
def set_flip_y(self, flag):
"""Set the filter to flip th input data along the Y-axis"""
if self.__axes[1] != flag:
self.__axes[1] = flag
self.Modified()
def set_flip_z(self, flag):
"""Set the filter to flip th input data along the Z-axis"""
if self.__axes[0] != flag:
self.__axes[0] = flag
self.Modified()
###############################################################################
# ---- Translate Grid Origin ----#
class TranslateGridOrigin(FilterBase):
"""This filter will translate the origin of `vtkImageData` to any specified
Corner of the data set assuming it is currently in the South West Bottom
Corner (will not work if Corner was moved prior).
"""
__displayname__ = 'Translate Grid Origin'
__category__ = 'filter'
def __init__(self, corner=1):
FilterBase.__init__(
self,
nInputPorts=1,
inputType='vtkImageData',
nOutputPorts=1,
outputType='vtkImageData',
)
self.__corner = corner
def _translate(self, pdi, pdo):
"""Internal helper to translate the inputs origin"""
if pdo is None:
pdo = vtk.vtkImageData()
[nx, ny, nz] = pdi.GetDimensions()
[sx, sy, sz] = pdi.GetSpacing()
[ox, oy, oz] = pdi.GetOrigin()
pdo.DeepCopy(pdi)
xx, yy, zz = 0.0, 0.0, 0.0
if self.__corner == 1:
# South East Bottom
xx = ox - (nx - 1) * sx
yy = oy
zz = oz
elif self.__corner == 2:
# North West Bottom
xx = ox
yy = oy - (ny - 1) * sy
zz = oz
elif self.__corner == 3:
# North East Bottom
xx = ox - (nx - 1) * sx
yy = oy - (ny - 1) * sy
zz = oz
elif self.__corner == 4:
# South West Top
xx = ox
yy = oy
zz = oz - (nz - 1) * sz
elif self.__corner == 5:
# South East Top
xx = ox - (nx - 1) * sx
yy = oy
zz = oz - (nz - 1) * sz
elif self.__corner == 6:
# North West Top
xx = ox
yy = oy - (ny - 1) * sy
zz = oz - (nz - 1) * sz
elif self.__corner == 7:
# North East Top
xx = ox - (nx - 1) * sx
yy = oy - (ny - 1) * sy
zz = oz - (nz - 1) * sz
pdo.SetOrigin(xx, yy, zz)
return pdo
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output."""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
pdo = self.GetOutputData(outInfo, 0)
# Perfrom task
self._translate(pdi, pdo)
return 1
#### Seters and Geters ####
def set_corner(self, corner):
"""Set the corner to use
Args:
corner (int) : corner location; see note.
Note:
* 1: South East Bottom
* 2: North West Bottom
* 3: North East Bottom
* 4: South West Top
* 5: South East Top
* 6: North West Top
* 7: North East Top
"""
if self.__corner != corner:
self.__corner = corner
self.Modified()
###############################################################################
|
{
"content_hash": "8a314b61a6650b60ebffa6c103a47611",
"timestamp": "",
"source": "github",
"line_count": 419,
"max_line_length": 89,
"avg_line_length": 34.30310262529833,
"alnum_prop": 0.5238989772490086,
"repo_name": "banesullivan/ParaViewGeophysics",
"id": "ba15ae30fe8854920fa121bef8b419487d4219a6",
"size": "14373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PVGeo/grids/transform.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "789"
},
{
"name": "Python",
"bytes": "191998"
},
{
"name": "Shell",
"bytes": "9602"
}
],
"symlink_target": ""
}
|
"""MQTT component mixins and helpers."""
from __future__ import annotations
from abc import abstractmethod
from collections.abc import Callable
import json
import logging
from typing import Any, Protocol
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_CONFIGURATION_URL,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_NAME,
ATTR_SUGGESTED_AREA,
ATTR_SW_VERSION,
ATTR_VIA_DEVICE,
CONF_DEVICE,
CONF_ENTITY_CATEGORY,
CONF_ICON,
CONF_NAME,
CONF_UNIQUE_ID,
CONF_VALUE_TEMPLATE,
)
from homeassistant.core import Event, HomeAssistant, callback
from homeassistant.helpers import (
config_validation as cv,
device_registry as dr,
entity_registry as er,
)
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import (
DeviceInfo,
Entity,
EntityCategory,
async_generate_entity_id,
validate_entity_category,
)
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.typing import ConfigType
from . import (
DATA_MQTT,
PLATFORMS,
MqttValueTemplate,
async_publish,
debug_info,
subscription,
)
from .const import (
ATTR_DISCOVERY_HASH,
ATTR_DISCOVERY_PAYLOAD,
ATTR_DISCOVERY_TOPIC,
CONF_AVAILABILITY,
CONF_ENCODING,
CONF_QOS,
CONF_TOPIC,
DATA_MQTT_RELOAD_NEEDED,
DEFAULT_ENCODING,
DEFAULT_PAYLOAD_AVAILABLE,
DEFAULT_PAYLOAD_NOT_AVAILABLE,
DOMAIN,
MQTT_CONNECTED,
MQTT_DISCONNECTED,
)
from .debug_info import log_message, log_messages
from .discovery import (
MQTT_DISCOVERY_DONE,
MQTT_DISCOVERY_NEW,
MQTT_DISCOVERY_UPDATED,
clear_discovery_hash,
set_discovery_hash,
)
from .models import PublishPayloadType, ReceiveMessage
from .subscription import (
async_prepare_subscribe_topics,
async_subscribe_topics,
async_unsubscribe_topics,
)
from .util import valid_subscribe_topic
_LOGGER = logging.getLogger(__name__)
AVAILABILITY_ALL = "all"
AVAILABILITY_ANY = "any"
AVAILABILITY_LATEST = "latest"
AVAILABILITY_MODES = [AVAILABILITY_ALL, AVAILABILITY_ANY, AVAILABILITY_LATEST]
CONF_AVAILABILITY_MODE = "availability_mode"
CONF_AVAILABILITY_TEMPLATE = "availability_template"
CONF_AVAILABILITY_TOPIC = "availability_topic"
CONF_ENABLED_BY_DEFAULT = "enabled_by_default"
CONF_PAYLOAD_AVAILABLE = "payload_available"
CONF_PAYLOAD_NOT_AVAILABLE = "payload_not_available"
CONF_JSON_ATTRS_TOPIC = "json_attributes_topic"
CONF_JSON_ATTRS_TEMPLATE = "json_attributes_template"
CONF_IDENTIFIERS = "identifiers"
CONF_CONNECTIONS = "connections"
CONF_MANUFACTURER = "manufacturer"
CONF_MODEL = "model"
CONF_SW_VERSION = "sw_version"
CONF_VIA_DEVICE = "via_device"
CONF_DEPRECATED_VIA_HUB = "via_hub"
CONF_SUGGESTED_AREA = "suggested_area"
CONF_CONFIGURATION_URL = "configuration_url"
CONF_OBJECT_ID = "object_id"
MQTT_ATTRIBUTES_BLOCKED = {
"assumed_state",
"available",
"context_recent_time",
"device_class",
"device_info",
"entity_category",
"entity_picture",
"entity_registry_enabled_default",
"extra_state_attributes",
"force_update",
"icon",
"name",
"should_poll",
"state",
"supported_features",
"unique_id",
"unit_of_measurement",
}
MQTT_AVAILABILITY_SINGLE_SCHEMA = vol.Schema(
{
vol.Exclusive(CONF_AVAILABILITY_TOPIC, "availability"): valid_subscribe_topic,
vol.Optional(CONF_AVAILABILITY_TEMPLATE): cv.template,
vol.Optional(
CONF_PAYLOAD_AVAILABLE, default=DEFAULT_PAYLOAD_AVAILABLE
): cv.string,
vol.Optional(
CONF_PAYLOAD_NOT_AVAILABLE, default=DEFAULT_PAYLOAD_NOT_AVAILABLE
): cv.string,
}
)
MQTT_AVAILABILITY_LIST_SCHEMA = vol.Schema(
{
vol.Optional(CONF_AVAILABILITY_MODE, default=AVAILABILITY_LATEST): vol.All(
cv.string, vol.In(AVAILABILITY_MODES)
),
vol.Exclusive(CONF_AVAILABILITY, "availability"): vol.All(
cv.ensure_list,
[
{
vol.Required(CONF_TOPIC): valid_subscribe_topic,
vol.Optional(
CONF_PAYLOAD_AVAILABLE, default=DEFAULT_PAYLOAD_AVAILABLE
): cv.string,
vol.Optional(
CONF_PAYLOAD_NOT_AVAILABLE,
default=DEFAULT_PAYLOAD_NOT_AVAILABLE,
): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
}
],
),
}
)
MQTT_AVAILABILITY_SCHEMA = MQTT_AVAILABILITY_SINGLE_SCHEMA.extend(
MQTT_AVAILABILITY_LIST_SCHEMA.schema
)
def validate_device_has_at_least_one_identifier(value: ConfigType) -> ConfigType:
"""Validate that a device info entry has at least one identifying value."""
if value.get(CONF_IDENTIFIERS) or value.get(CONF_CONNECTIONS):
return value
raise vol.Invalid(
"Device must have at least one identifying value in "
"'identifiers' and/or 'connections'"
)
MQTT_ENTITY_DEVICE_INFO_SCHEMA = vol.All(
cv.deprecated(CONF_DEPRECATED_VIA_HUB, CONF_VIA_DEVICE),
vol.Schema(
{
vol.Optional(CONF_IDENTIFIERS, default=list): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_CONNECTIONS, default=list): vol.All(
cv.ensure_list, [vol.All(vol.Length(2), [cv.string])]
),
vol.Optional(CONF_MANUFACTURER): cv.string,
vol.Optional(CONF_MODEL): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_SW_VERSION): cv.string,
vol.Optional(CONF_VIA_DEVICE): cv.string,
vol.Optional(CONF_SUGGESTED_AREA): cv.string,
vol.Optional(CONF_CONFIGURATION_URL): cv.url,
}
),
validate_device_has_at_least_one_identifier,
)
MQTT_ENTITY_COMMON_SCHEMA = MQTT_AVAILABILITY_SCHEMA.extend(
{
vol.Optional(CONF_DEVICE): MQTT_ENTITY_DEVICE_INFO_SCHEMA,
vol.Optional(CONF_ENABLED_BY_DEFAULT, default=True): cv.boolean,
vol.Optional(CONF_ENTITY_CATEGORY): validate_entity_category,
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(CONF_JSON_ATTRS_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_JSON_ATTRS_TEMPLATE): cv.template,
vol.Optional(CONF_OBJECT_ID): cv.string,
vol.Optional(CONF_UNIQUE_ID): cv.string,
}
)
class SetupEntity(Protocol):
"""Protocol type for async_setup_entities."""
async def __call__(
self,
hass: HomeAssistant,
async_add_entities: AddEntitiesCallback,
config: ConfigType,
config_entry: ConfigEntry | None = None,
discovery_data: dict[str, Any] | None = None,
) -> None:
"""Define setup_entities type."""
async def async_setup_entry_helper(hass, domain, async_setup, schema):
"""Set up entity, automation or tag creation dynamically through MQTT discovery."""
async def async_discover(discovery_payload):
"""Discover and add an MQTT entity, automation or tag."""
discovery_data = discovery_payload.discovery_data
try:
config = schema(discovery_payload)
await async_setup(config, discovery_data=discovery_data)
except Exception:
discovery_hash = discovery_data[ATTR_DISCOVERY_HASH]
clear_discovery_hash(hass, discovery_hash)
async_dispatcher_send(
hass, MQTT_DISCOVERY_DONE.format(discovery_hash), None
)
raise
async_dispatcher_connect(
hass, MQTT_DISCOVERY_NEW.format(domain, "mqtt"), async_discover
)
async def async_setup_platform_helper(
hass: HomeAssistant,
platform_domain: str,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
async_setup_entities: SetupEntity,
) -> None:
"""Return true if platform setup should be aborted."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
if not bool(hass.config_entries.async_entries(DOMAIN)):
hass.data[DATA_MQTT_RELOAD_NEEDED] = None
_LOGGER.warning(
"MQTT integration is not setup, skipping setup of manually configured "
"MQTT %s",
platform_domain,
)
return
await async_setup_entities(hass, async_add_entities, config)
def init_entity_id_from_config(hass, entity, config, entity_id_format):
"""Set entity_id from object_id if defined in config."""
if CONF_OBJECT_ID in config:
entity.entity_id = async_generate_entity_id(
entity_id_format, config[CONF_OBJECT_ID], None, hass
)
class MqttAttributes(Entity):
"""Mixin used for platforms that support JSON attributes."""
_attributes_extra_blocked: frozenset[str] = frozenset()
def __init__(self, config: dict) -> None:
"""Initialize the JSON attributes mixin."""
self._attributes: dict | None = None
self._attributes_sub_state = None
self._attributes_config = config
async def async_added_to_hass(self) -> None:
"""Subscribe MQTT events."""
await super().async_added_to_hass()
self._attributes_prepare_subscribe_topics()
await self._attributes_subscribe_topics()
def attributes_prepare_discovery_update(self, config: dict):
"""Handle updated discovery message."""
self._attributes_config = config
self._attributes_prepare_subscribe_topics()
async def attributes_discovery_update(self, config: dict):
"""Handle updated discovery message."""
await self._attributes_subscribe_topics()
def _attributes_prepare_subscribe_topics(self):
"""(Re)Subscribe to topics."""
attr_tpl = MqttValueTemplate(
self._attributes_config.get(CONF_JSON_ATTRS_TEMPLATE), entity=self
).async_render_with_possible_json_value
@callback
@log_messages(self.hass, self.entity_id)
def attributes_message_received(msg: ReceiveMessage) -> None:
try:
payload = attr_tpl(msg.payload)
json_dict = json.loads(payload) if isinstance(payload, str) else None
if isinstance(json_dict, dict):
filtered_dict = {
k: v
for k, v in json_dict.items()
if k not in MQTT_ATTRIBUTES_BLOCKED
and k not in self._attributes_extra_blocked
}
self._attributes = filtered_dict
self.async_write_ha_state()
else:
_LOGGER.warning("JSON result was not a dictionary")
self._attributes = None
except ValueError:
_LOGGER.warning("Erroneous JSON: %s", payload)
self._attributes = None
self._attributes_sub_state = async_prepare_subscribe_topics(
self.hass,
self._attributes_sub_state,
{
CONF_JSON_ATTRS_TOPIC: {
"topic": self._attributes_config.get(CONF_JSON_ATTRS_TOPIC),
"msg_callback": attributes_message_received,
"qos": self._attributes_config.get(CONF_QOS),
"encoding": self._attributes_config[CONF_ENCODING] or None,
}
},
)
async def _attributes_subscribe_topics(self):
"""(Re)Subscribe to topics."""
await async_subscribe_topics(self.hass, self._attributes_sub_state)
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._attributes_sub_state = async_unsubscribe_topics(
self.hass, self._attributes_sub_state
)
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return self._attributes
class MqttAvailability(Entity):
"""Mixin used for platforms that report availability."""
def __init__(self, config: dict) -> None:
"""Initialize the availability mixin."""
self._availability_sub_state = None
self._available: dict = {}
self._available_latest = False
self._availability_setup_from_config(config)
async def async_added_to_hass(self) -> None:
"""Subscribe MQTT events."""
await super().async_added_to_hass()
self._availability_prepare_subscribe_topics()
await self._availability_subscribe_topics()
self.async_on_remove(
async_dispatcher_connect(self.hass, MQTT_CONNECTED, self.async_mqtt_connect)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass, MQTT_DISCONNECTED, self.async_mqtt_connect
)
)
def availability_prepare_discovery_update(self, config: dict):
"""Handle updated discovery message."""
self._availability_setup_from_config(config)
self._availability_prepare_subscribe_topics()
async def availability_discovery_update(self, config: dict):
"""Handle updated discovery message."""
await self._availability_subscribe_topics()
def _availability_setup_from_config(self, config):
"""(Re)Setup."""
self._avail_topics = {}
if CONF_AVAILABILITY_TOPIC in config:
self._avail_topics[config[CONF_AVAILABILITY_TOPIC]] = {
CONF_PAYLOAD_AVAILABLE: config[CONF_PAYLOAD_AVAILABLE],
CONF_PAYLOAD_NOT_AVAILABLE: config[CONF_PAYLOAD_NOT_AVAILABLE],
CONF_AVAILABILITY_TEMPLATE: config.get(CONF_AVAILABILITY_TEMPLATE),
}
if CONF_AVAILABILITY in config:
for avail in config[CONF_AVAILABILITY]:
self._avail_topics[avail[CONF_TOPIC]] = {
CONF_PAYLOAD_AVAILABLE: avail[CONF_PAYLOAD_AVAILABLE],
CONF_PAYLOAD_NOT_AVAILABLE: avail[CONF_PAYLOAD_NOT_AVAILABLE],
CONF_AVAILABILITY_TEMPLATE: avail.get(CONF_VALUE_TEMPLATE),
}
for avail_topic_conf in self._avail_topics.values():
avail_topic_conf[CONF_AVAILABILITY_TEMPLATE] = MqttValueTemplate(
avail_topic_conf[CONF_AVAILABILITY_TEMPLATE],
entity=self,
).async_render_with_possible_json_value
self._avail_config = config
def _availability_prepare_subscribe_topics(self):
"""(Re)Subscribe to topics."""
@callback
@log_messages(self.hass, self.entity_id)
def availability_message_received(msg: ReceiveMessage) -> None:
"""Handle a new received MQTT availability message."""
topic = msg.topic
payload = self._avail_topics[topic][CONF_AVAILABILITY_TEMPLATE](msg.payload)
if payload == self._avail_topics[topic][CONF_PAYLOAD_AVAILABLE]:
self._available[topic] = True
self._available_latest = True
elif payload == self._avail_topics[topic][CONF_PAYLOAD_NOT_AVAILABLE]:
self._available[topic] = False
self._available_latest = False
self.async_write_ha_state()
self._available = {
topic: (self._available[topic] if topic in self._available else False)
for topic in self._avail_topics
}
topics = {
f"availability_{topic}": {
"topic": topic,
"msg_callback": availability_message_received,
"qos": self._avail_config[CONF_QOS],
"encoding": self._avail_config[CONF_ENCODING] or None,
}
for topic in self._avail_topics
}
self._availability_sub_state = async_prepare_subscribe_topics(
self.hass,
self._availability_sub_state,
topics,
)
async def _availability_subscribe_topics(self):
"""(Re)Subscribe to topics."""
await async_subscribe_topics(self.hass, self._availability_sub_state)
@callback
def async_mqtt_connect(self):
"""Update state on connection/disconnection to MQTT broker."""
if not self.hass.is_stopping:
self.async_write_ha_state()
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._availability_sub_state = async_unsubscribe_topics(
self.hass, self._availability_sub_state
)
@property
def available(self) -> bool:
"""Return if the device is available."""
if not self.hass.data[DATA_MQTT].connected and not self.hass.is_stopping:
return False
if not self._avail_topics:
return True
if self._avail_config[CONF_AVAILABILITY_MODE] == AVAILABILITY_ALL:
return all(self._available.values())
if self._avail_config[CONF_AVAILABILITY_MODE] == AVAILABILITY_ANY:
return any(self._available.values())
return self._available_latest
async def cleanup_device_registry(hass, device_id, config_entry_id):
"""Remove device registry entry if there are no remaining entities or triggers."""
# Local import to avoid circular dependencies
# pylint: disable-next=import-outside-toplevel
from . import device_trigger, tag
device_registry = dr.async_get(hass)
entity_registry = er.async_get(hass)
if (
device_id
and not er.async_entries_for_device(
entity_registry, device_id, include_disabled_entities=False
)
and not await device_trigger.async_get_triggers(hass, device_id)
and not tag.async_has_tags(hass, device_id)
):
device_registry.async_update_device(
device_id, remove_config_entry_id=config_entry_id
)
class MqttDiscoveryUpdate(Entity):
"""Mixin used to handle updated discovery message."""
def __init__(self, discovery_data, discovery_update=None) -> None:
"""Initialize the discovery update mixin."""
self._discovery_data = discovery_data
self._discovery_update = discovery_update
self._remove_signal: Callable | None = None
self._removed_from_hass = False
async def async_added_to_hass(self) -> None:
"""Subscribe to discovery updates."""
await super().async_added_to_hass()
self._removed_from_hass = False
discovery_hash = (
self._discovery_data[ATTR_DISCOVERY_HASH] if self._discovery_data else None
)
async def _async_remove_state_and_registry_entry(self) -> None:
"""Remove entity's state and entity registry entry.
Remove entity from entity registry if it is registered, this also removes the state.
If the entity is not in the entity registry, just remove the state.
"""
entity_registry = er.async_get(self.hass)
if entity_entry := entity_registry.async_get(self.entity_id):
entity_registry.async_remove(self.entity_id)
await cleanup_device_registry(
self.hass, entity_entry.device_id, entity_entry.config_entry_id
)
else:
await self.async_remove(force_remove=True)
async def discovery_callback(payload):
"""Handle discovery update."""
_LOGGER.info(
"Got update for entity with hash: %s '%s'",
discovery_hash,
payload,
)
old_payload = self._discovery_data[ATTR_DISCOVERY_PAYLOAD]
debug_info.update_entity_discovery_data(self.hass, payload, self.entity_id)
if not payload:
# Empty payload: Remove component
_LOGGER.info("Removing component: %s", self.entity_id)
self._cleanup_discovery_on_remove()
await _async_remove_state_and_registry_entry(self)
elif self._discovery_update:
if old_payload != self._discovery_data[ATTR_DISCOVERY_PAYLOAD]:
# Non-empty, changed payload: Notify component
_LOGGER.info("Updating component: %s", self.entity_id)
await self._discovery_update(payload)
else:
# Non-empty, unchanged payload: Ignore to avoid changing states
_LOGGER.info("Ignoring unchanged update for: %s", self.entity_id)
async_dispatcher_send(
self.hass, MQTT_DISCOVERY_DONE.format(discovery_hash), None
)
if discovery_hash:
debug_info.add_entity_discovery_data(
self.hass, self._discovery_data, self.entity_id
)
# Set in case the entity has been removed and is re-added, for example when changing entity_id
set_discovery_hash(self.hass, discovery_hash)
self._remove_signal = async_dispatcher_connect(
self.hass,
MQTT_DISCOVERY_UPDATED.format(discovery_hash),
discovery_callback,
)
async_dispatcher_send(
self.hass, MQTT_DISCOVERY_DONE.format(discovery_hash), None
)
async def async_removed_from_registry(self) -> None:
"""Clear retained discovery topic in broker."""
if not self._removed_from_hass:
# Stop subscribing to discovery updates to not trigger when we clear the
# discovery topic
self._cleanup_discovery_on_remove()
# Clear the discovery topic so the entity is not rediscovered after a restart
discovery_topic = self._discovery_data[ATTR_DISCOVERY_TOPIC]
await async_publish(self.hass, discovery_topic, "", retain=True)
@callback
def add_to_platform_abort(self) -> None:
"""Abort adding an entity to a platform."""
if self._discovery_data:
discovery_hash = self._discovery_data[ATTR_DISCOVERY_HASH]
clear_discovery_hash(self.hass, discovery_hash)
async_dispatcher_send(
self.hass, MQTT_DISCOVERY_DONE.format(discovery_hash), None
)
super().add_to_platform_abort()
async def async_will_remove_from_hass(self) -> None:
"""Stop listening to signal and cleanup discovery data.."""
self._cleanup_discovery_on_remove()
def _cleanup_discovery_on_remove(self) -> None:
"""Stop listening to signal and cleanup discovery data."""
if self._discovery_data and not self._removed_from_hass:
clear_discovery_hash(self.hass, self._discovery_data[ATTR_DISCOVERY_HASH])
self._removed_from_hass = True
if self._remove_signal:
self._remove_signal()
self._remove_signal = None
def device_info_from_config(config) -> DeviceInfo | None:
"""Return a device description for device registry."""
if not config:
return None
info = DeviceInfo(
identifiers={(DOMAIN, id_) for id_ in config[CONF_IDENTIFIERS]},
connections={(conn_[0], conn_[1]) for conn_ in config[CONF_CONNECTIONS]},
)
if CONF_MANUFACTURER in config:
info[ATTR_MANUFACTURER] = config[CONF_MANUFACTURER]
if CONF_MODEL in config:
info[ATTR_MODEL] = config[CONF_MODEL]
if CONF_NAME in config:
info[ATTR_NAME] = config[CONF_NAME]
if CONF_SW_VERSION in config:
info[ATTR_SW_VERSION] = config[CONF_SW_VERSION]
if CONF_VIA_DEVICE in config:
info[ATTR_VIA_DEVICE] = (DOMAIN, config[CONF_VIA_DEVICE])
if CONF_SUGGESTED_AREA in config:
info[ATTR_SUGGESTED_AREA] = config[CONF_SUGGESTED_AREA]
if CONF_CONFIGURATION_URL in config:
info[ATTR_CONFIGURATION_URL] = config[CONF_CONFIGURATION_URL]
return info
class MqttEntityDeviceInfo(Entity):
"""Mixin used for mqtt platforms that support the device registry."""
def __init__(self, device_config: ConfigType | None, config_entry=None) -> None:
"""Initialize the device mixin."""
self._device_config = device_config
self._config_entry = config_entry
def device_info_discovery_update(self, config: dict):
"""Handle updated discovery message."""
self._device_config = config.get(CONF_DEVICE)
device_registry = dr.async_get(self.hass)
config_entry_id = self._config_entry.entry_id
device_info = self.device_info
if config_entry_id is not None and device_info is not None:
device_registry.async_get_or_create(
config_entry_id=config_entry_id, **device_info
)
@property
def device_info(self) -> DeviceInfo | None:
"""Return a device description for device registry."""
return device_info_from_config(self._device_config)
class MqttEntity(
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
):
"""Representation of an MQTT entity."""
_entity_id_format: str
def __init__(self, hass, config, config_entry, discovery_data):
"""Init the MQTT Entity."""
self.hass = hass
self._config = config
self._unique_id = config.get(CONF_UNIQUE_ID)
self._sub_state = None
# Load config
self._setup_from_config(self._config)
# Initialize entity_id from config
self._init_entity_id()
# Initialize mixin classes
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_data, self.discovery_update)
MqttEntityDeviceInfo.__init__(self, config.get(CONF_DEVICE), config_entry)
def _init_entity_id(self):
"""Set entity_id from object_id if defined in config."""
init_entity_id_from_config(
self.hass, self, self._config, self._entity_id_format
)
async def async_added_to_hass(self):
"""Subscribe mqtt events."""
await super().async_added_to_hass()
self._prepare_subscribe_topics()
await self._subscribe_topics()
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = self.config_schema()(discovery_payload)
self._config = config
self._setup_from_config(self._config)
# Prepare MQTT subscriptions
self.attributes_prepare_discovery_update(config)
self.availability_prepare_discovery_update(config)
self.device_info_discovery_update(config)
self._prepare_subscribe_topics()
# Finalize MQTT subscriptions
await self.attributes_discovery_update(config)
await self.availability_discovery_update(config)
await self._subscribe_topics()
self.async_write_ha_state()
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._sub_state = subscription.async_unsubscribe_topics(
self.hass, self._sub_state
)
await MqttAttributes.async_will_remove_from_hass(self)
await MqttAvailability.async_will_remove_from_hass(self)
await MqttDiscoveryUpdate.async_will_remove_from_hass(self)
debug_info.remove_entity_data(self.hass, self.entity_id)
async def async_publish(
self,
topic: str,
payload: PublishPayloadType,
qos: int = 0,
retain: bool = False,
encoding: str = DEFAULT_ENCODING,
):
"""Publish message to an MQTT topic."""
log_message(self.hass, self.entity_id, topic, payload, qos, retain)
await async_publish(
self.hass,
topic,
payload,
qos,
retain,
encoding,
)
@staticmethod
@abstractmethod
def config_schema():
"""Return the config schema."""
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
@abstractmethod
def _prepare_subscribe_topics(self):
"""(Re)Subscribe to topics."""
@abstractmethod
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self._config[CONF_ENABLED_BY_DEFAULT]
@property
def entity_category(self) -> EntityCategory | str | None:
"""Return the entity category if any."""
return self._config.get(CONF_ENTITY_CATEGORY)
@property
def icon(self):
"""Return icon of the entity if any."""
return self._config.get(CONF_ICON)
@property
def name(self):
"""Return the name of the device if any."""
return self._config.get(CONF_NAME)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@callback
def async_removed_from_device(
hass: HomeAssistant, event: Event, mqtt_device_id: str, config_entry_id: str
) -> bool:
"""Check if the passed event indicates MQTT was removed from a device."""
device_id = event.data["device_id"]
if event.data["action"] not in ("remove", "update"):
return False
if device_id != mqtt_device_id:
return False
if event.data["action"] == "update":
if "config_entries" not in event.data["changes"]:
return False
device_registry = dr.async_get(hass)
if not (device_entry := device_registry.async_get(device_id)):
# The device is already removed, do cleanup when we get "remove" event
return False
if config_entry_id in device_entry.config_entries:
# Not removed from device
return False
return True
|
{
"content_hash": "76adca3c4be8c0b6dc39567a110037b6",
"timestamp": "",
"source": "github",
"line_count": 849,
"max_line_length": 106,
"avg_line_length": 35.45465253239105,
"alnum_prop": 0.6208099398691074,
"repo_name": "rohitranjan1991/home-assistant",
"id": "9f3722a8f31743a5bfc71476961685fcf79247e0",
"size": "30101",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/mqtt/mixins.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1017265"
},
{
"name": "Python",
"bytes": "1051086"
},
{
"name": "Shell",
"bytes": "3946"
}
],
"symlink_target": ""
}
|
from handlers import Handler
from utils import Helper_sign
# Signup handler
class Signup(Handler):
def get(self):
self.render("signup-form.html")
def post(self):
have_error = False
self.username = self.request.get('username')
self.password = self.request.get('password')
self.verify = self.request.get('verify')
self.email = self.request.get('email')
params = dict(username=self.username,
email=self.email)
if not Helper_sign.valid_username(self.username):
params['error_username'] = "Invalid username."
have_error = True
if not Helper_sign.valid_password(self.password):
params['error_password'] = "Invalid password."
have_error = True
elif self.password != self.verify:
params['error_verify'] = "Passwords didn't match."
have_error = True
if not Helper_sign.valid_email(self.email):
params['error_email'] = "Invalid email."
have_error = True
if have_error:
self.render('signup-form.html', **params)
else:
self.done()
def done(self, *a, **kw):
raise NotImplementedError
|
{
"content_hash": "69c32c0e16036162ca19efdff9519940",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 62,
"avg_line_length": 30.390243902439025,
"alnum_prop": 0.5818619582664526,
"repo_name": "YuhanLin1105/Multi-User-Blog",
"id": "853c75cd849b96812acc8caf18cb78683f1cfdd5",
"size": "1246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "handlers/signup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2824"
},
{
"name": "HTML",
"bytes": "9356"
},
{
"name": "Python",
"bytes": "18732"
}
],
"symlink_target": ""
}
|
import os
import fixtures
from oslo.config import cfg
from tuskar.common import policy as tuskar_policy
from tuskar.openstack.common import policy as common_policy
from tuskar.tests import fake_policy
CONF = cfg.CONF
class PolicyFixture(fixtures.Fixture):
def setUp(self):
super(PolicyFixture, self).setUp()
self.policy_dir = self.useFixture(fixtures.TempDir())
self.policy_file_name = os.path.join(self.policy_dir.path,
'policy.json')
with open(self.policy_file_name, 'w') as policy_file:
policy_file.write(fake_policy.policy_data)
CONF.set_override('policy_file', self.policy_file_name)
tuskar_policy.reset()
tuskar_policy.init()
self.addCleanup(tuskar_policy.reset)
def set_rules(self, rules):
common_policy.set_rules(common_policy.Rules(
dict((k, common_policy.parse_rule(v))
for k, v in rules.items())))
|
{
"content_hash": "afbb21825e583324798544fd7ff6e187",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 66,
"avg_line_length": 33,
"alnum_prop": 0.6373737373737374,
"repo_name": "ccrouch/tuskar",
"id": "5af88c3eee45a4ab8bae617576658fe18c668094",
"size": "1597",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tuskar/tests/policy_fixture.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import logging
from google.appengine.ext import ndb
import webapp2
import tasks
class DemoHandler(webapp2.RequestHandler):
"""
Basic handler that trigger some code to be executed later.
"""
def get(self):
# If you have the ndb.toplevel middleware you can just fire-and-forget:
tasks.addTask('default', doStuff, "foo")
# Otherwise it is recommended to call get_result before exiting the request handler:
tasks.addTask('default', doStuff, "bar").get_result()
self.response.write("Task enqueued")
def doStuff(what):
logging.info("Doing stuff: %s", what)
app = webapp2.WSGIApplication([
webapp2.Route('/', DemoHandler),
webapp2.Route('/_cb/deferred/<module>/<name>', tasks.DeferredHandler)
])
app = ndb.toplevel(app)
|
{
"content_hash": "1fbde77171add574e0b6e601ecae9965",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 92,
"avg_line_length": 25.84375,
"alnum_prop": 0.6541717049576784,
"repo_name": "freshplanet/AppEngine-Deferred",
"id": "26366077a3dac6fa255577e643d7f8980d3a082e",
"size": "827",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sampleApp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14652"
}
],
"symlink_target": ""
}
|
from networkapiclient.ApiGenericClient import ApiGenericClient
from networkapiclient.utils import build_uri_with_ids
class ApiEnvironment(ApiGenericClient):
def __init__(self, networkapi_url, user, password, user_ldap=None):
"""Class constructor receives parameters to connect to the networkAPI.
:param networkapi_url: URL to access the network API.
:param user: User for authentication.
:param password: Password for authentication.
"""
super(ApiEnvironment, self).__init__(
networkapi_url,
user,
password,
user_ldap
)
def list_all_environment_related_environment_vip(self):
"""
Return list environments related with environment vip
"""
uri = 'api/v3/environment/environment-vip/'
return super(ApiEnvironment, self).get(uri)
def get_environment(self, environment_ids):
"""
Method to get environment
"""
uri = 'api/v3/environment/%s/' % environment_ids
return super(ApiEnvironment, self).get(uri)
def create_environment(self, environment):
"""
Method to create environment
"""
uri = 'api/v3/environment/'
data = dict()
data['environments'] = list()
data['environments'].append(environment)
return super(ApiEnvironment, self).post(uri, data)
def update_environment(self, environment, environment_ids):
"""
Method to update environment
:param environment_ids: Ids of Environment
"""
uri = 'api/v3/environment/%s/' % environment_ids
data = dict()
data['environments'] = list()
data['environments'].append(environment)
return super(ApiEnvironment, self).put(uri, data)
def delete_environment(self, environment_ids):
"""
Method to delete environment
:param environment_ids: Ids of Environment
"""
uri = 'api/v3/environment/%s/' % environment_ids
return super(ApiEnvironment, self).delete(uri)
def search(self, **kwargs):
"""
Method to search environments based on extends search.
:param search: Dict containing QuerySets to find environments.
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing environments
"""
return super(ApiEnvironment, self).get(self.prepare_url('api/v3/environment/',
kwargs))
def get(self, ids, **kwargs):
"""
Method to get environments by their ids
:param ids: List containing identifiers of environments
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing environments
"""
url = build_uri_with_ids('api/v3/environment/%s/', ids)
return super(ApiEnvironment, self).get(self.prepare_url(url, kwargs))
def delete(self, ids):
"""
Method to delete environments by their id's
:param ids: Identifiers of environments
:return: None
"""
url = build_uri_with_ids('api/v3/environment/%s/', ids)
return super(ApiEnvironment, self).delete(url)
def update(self, environments):
"""
Method to update environments
:param environments: List containing environments desired to updated
:return: None
"""
data = {'environments': environments}
environments_ids = [str(env.get('id')) for env in environments]
return super(ApiEnvironment, self).put('api/v3/environment/%s/' %
';'.join(environments_ids), data)
def create(self, environments):
"""
Method to create environments
:param environments: Dict containing environments desired to be created on database
:return: None
"""
data = {'environments': environments}
return super(ApiEnvironment, self).post('api/v3/environment/', data)
|
{
"content_hash": "0563cd0202e5c0eb75add0ccd3797466",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 91,
"avg_line_length": 32.97841726618705,
"alnum_prop": 0.6151832460732984,
"repo_name": "globocom/GloboNetworkAPI-client-python",
"id": "c5be39339d1a718fc67345a241ba5ea2aca6df38",
"size": "5389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "networkapiclient/ApiEnvironment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "186"
},
{
"name": "Makefile",
"bytes": "2840"
},
{
"name": "Python",
"bytes": "767058"
}
],
"symlink_target": ""
}
|
"""Tests for manipulating Containers via the DB API"""
import six
from magnum.common import exception
from magnum.common import utils as magnum_utils
from magnum.tests.unit.db import base
from magnum.tests.unit.db import utils
class DbContainerTestCase(base.DbTestCase):
def test_create_container(self):
utils.create_test_container()
def test_create_container_already_exists(self):
utils.create_test_container()
self.assertRaises(exception.ContainerAlreadyExists,
utils.create_test_container)
def test_get_container_by_id(self):
container = utils.create_test_container()
res = self.dbapi.get_container_by_id(self.context, container.id)
self.assertEqual(container.id, res.id)
self.assertEqual(container.uuid, res.uuid)
def test_get_container_by_uuid(self):
container = utils.create_test_container()
res = self.dbapi.get_container_by_uuid(self.context,
container.uuid)
self.assertEqual(container.id, res.id)
self.assertEqual(container.uuid, res.uuid)
def test_get_container_by_name(self):
container = utils.create_test_container()
res = self.dbapi.get_container_by_name(self.context,
container.name)
self.assertEqual(container.id, res.id)
self.assertEqual(container.uuid, res.uuid)
def test_get_container_that_does_not_exist(self):
self.assertRaises(exception.ContainerNotFound,
self.dbapi.get_container_by_id, self.context, 99)
self.assertRaises(exception.ContainerNotFound,
self.dbapi.get_container_by_uuid,
self.context,
magnum_utils.generate_uuid())
def test_get_container_list(self):
uuids = []
for i in range(1, 6):
container = utils.create_test_container(
uuid=magnum_utils.generate_uuid())
uuids.append(six.text_type(container['uuid']))
res = self.dbapi.get_container_list(self.context)
res_uuids = [r.uuid for r in res]
self.assertEqual(sorted(uuids), sorted(res_uuids))
def test_get_container_list_with_filters(self):
container1 = utils.create_test_container(
name='container-one',
uuid=magnum_utils.generate_uuid())
container2 = utils.create_test_container(
name='container-two',
uuid=magnum_utils.generate_uuid())
res = self.dbapi.get_container_list(self.context,
filters={'name': 'container-one'})
self.assertEqual([container1.id], [r.id for r in res])
res = self.dbapi.get_container_list(self.context,
filters={'name': 'container-two'})
self.assertEqual([container2.id], [r.id for r in res])
res = self.dbapi.get_container_list(self.context,
filters={'name': 'bad-container'})
self.assertEqual([], [r.id for r in res])
def test_destroy_container(self):
container = utils.create_test_container()
self.dbapi.destroy_container(container.id)
self.assertRaises(exception.ContainerNotFound,
self.dbapi.get_container_by_id,
self.context, container.id)
def test_destroy_container_by_uuid(self):
container = utils.create_test_container()
self.dbapi.destroy_container(container.uuid)
self.assertRaises(exception.ContainerNotFound,
self.dbapi.get_container_by_uuid,
self.context, container.uuid)
def test_destroy_container_that_does_not_exist(self):
self.assertRaises(exception.ContainerNotFound,
self.dbapi.destroy_container,
magnum_utils.generate_uuid())
def test_update_container(self):
container = utils.create_test_container()
old_image = container.image
new_image = 'new-image'
self.assertNotEqual(old_image, new_image)
res = self.dbapi.update_container(container.id,
{'image': new_image})
self.assertEqual(new_image, res.image)
def test_update_container_not_found(self):
container_uuid = magnum_utils.generate_uuid()
new_image = 'new-image'
self.assertRaises(exception.ContainerNotFound,
self.dbapi.update_container,
container_uuid, {'image': new_image})
def test_update_container_uuid(self):
container = utils.create_test_container()
self.assertRaises(exception.InvalidParameterValue,
self.dbapi.update_container, container.id,
{'uuid': ''})
|
{
"content_hash": "d6629986eb264c451e069827d594bda0",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 78,
"avg_line_length": 41.67226890756302,
"alnum_prop": 0.5940713853599516,
"repo_name": "LaynePeng/magnum",
"id": "278b95988895189a24b22844b0354289c0b1894e",
"size": "5595",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "magnum/tests/unit/db/test_container.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "569"
},
{
"name": "Mako",
"bytes": "349"
},
{
"name": "Python",
"bytes": "1644287"
},
{
"name": "Shell",
"bytes": "24458"
}
],
"symlink_target": ""
}
|
import json
import os
try:
import testtools as unittest
except ImportError:
import unittest
from distill.exceptions import HTTPInternalServerError
from distill.renderers import RenderFactory, renderer
from distill.response import Response
class TestRenderers(unittest.TestCase):
def test_default_renderers(self):
RenderFactory.create({})
@renderer('json')
def fake_on_get_json(request, response):
return {"Hello": "world"}
resp = Response()
rendered = fake_on_get_json(None, resp)
self.assertEqual(resp.headers['Content-Type'], 'application/json')
data = json.loads(rendered)
self.assertEqual(data['Hello'], 'world')
class json_obj(object):
def __init__(self, name):
self.name = name
def json(self, requst):
return {'name': self.name}
class not_json_obj(object):
pass
@renderer('json')
def fake_on_get_json_obj(request, response):
return json_obj('Foobar')
@renderer('json')
def fake_on_get_non_json_obj(request, response):
return not_json_obj()
@renderer('json')
def fake_on_return_response(request, response):
return Response("719 I am not a teapot")
@renderer('json', pad=True)
def test_secure_pad(request, response):
return {"foo": "bar"}
resp = Response()
rendered = fake_on_get_json_obj(None, resp)
self.assertEqual(resp.headers['Content-Type'], 'application/json')
data = json.loads(rendered)
self.assertEqual(data['name'], 'Foobar')
self.assertRaises(TypeError, fake_on_get_non_json_obj, None, Response())
self.assertIsInstance(fake_on_return_response(None, None), Response)
resp = Response()
rendered = test_secure_pad(None, resp)
self.assertEqual(resp.headers['Content-Type'], 'application/json')
self.assertEqual(rendered, ")]}',\n" + json.dumps({"foo": "bar"}))
def test_file_templates(self):
RenderFactory.create(
{'distill.document_root': os.path.abspath(os.path.join(os.path.dirname(__file__), 'res'))})
@renderer('test.mako')
def fake_on_get_json(request, response):
return {"user": "Foobar"}
@renderer('test.mako')
def fake_on_get_string(request, response):
return "Hello world!"
resp = Response()
rendered = fake_on_get_json(None, resp)
self.assertEqual(resp.headers['Content-Type'], 'text/html')
self.assertEqual(rendered, 'Hello Foobar!')
resp = Response()
rendered = fake_on_get_string(None, resp)
self.assertEqual(rendered, 'Hello world!')
def test_add_renderer(self):
RenderFactory.create({})
class TextRenderer(object):
def __init__(self):
pass
def __call__(self, data, request, response):
response.headers['Content-Type'] = 'text/plain'
return str(data)
RenderFactory.add_renderer('text2', TextRenderer())
@renderer('text2')
def fake_on_get(request, response):
return "Hello world"
resp = Response()
rendered = fake_on_get(None, resp)
self.assertEqual(resp.headers['Content-Type'], 'text/plain')
self.assertEqual(rendered, 'Hello world')
def test_no_template(self):
RenderFactory.create({})
@renderer('foobar')
def fake_on_get(request, response):
return "How did I get here?"
self.assertRaises(HTTPInternalServerError, fake_on_get, None, None)
|
{
"content_hash": "4aeee8b2c84cdd255033048d181924a6",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 103,
"avg_line_length": 32.008620689655174,
"alnum_prop": 0.5957446808510638,
"repo_name": "Dreae/Distill",
"id": "b16ec4deda1137dd4f34029acb021fbbc78f9bcf",
"size": "3713",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_renderers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6802"
},
{
"name": "Python",
"bytes": "66315"
}
],
"symlink_target": ""
}
|
__author__ = 'thor'
from collections import OrderedDict
from ut.util.pobj import methods_of
class Analyzer(object):
"""
An Analyzer is a class to manage a simple dashboard that takes inputs from an html form, and takes action on these
inputs.
An Analyzer is defined by a list of dicts, each specifying an form input element in a way that is very similar to
standard html form elements. Each dict should contain a name and a type. If the type is 'button', the Analyzer
should have a method of the same name as the 'name' key of the element. This is checked by the
verify_existence_of_button_functions() method.
Attributes:
* input_element_collection: The underlying InputElementCollection
* button_method: A list of the names of the InputElements that have type='button'
* input: A dict containing the {key: val} pairs of the forms input values (these are set only when the
set_inputs(**kwargs) is called.
The set_input(**kwargs) method updates the input key values (only for those already existing keys)
The call_button_function(name, **kwargs) updates the input key values (using set_input(**kwargs)) and then calls
the named method with the **kwargs input.
"""
def __init__(self, form_elements, to_html_kwargs={}, analyzer_name=''):
to_html_kwargs = dict(
prefix='<div id="analyzer_input">\n',
suffix='<input type="hidden" name="analyzer_name" value="{}" />\n</div>'.format(
analyzer_name
),
**to_html_kwargs
)
self.input_element_collection = InputElementCollection(
form_elements, to_html_kwargs
)
self.button_methods = list()
self.input = dict()
for k, v in self.input_element_collection.items():
if v['type'] == 'button':
self.button_methods.append(v['name'])
else:
self.input.update({k: v.get('value', None)})
def to_html(self, **kwargs):
return self.input_element_collection.to_html(**kwargs)
def verify_existence_of_button_functions(self):
methods_list = self.method_list()
for method_name in self.button_methods:
assert (
method_name in methods_list
), 'This method is missing (is in button input_element): {}'.format(
method_name
)
def method_list(self):
return methods_of(self)
def set_inputs(self, **kwargs):
self.input.update(
**{k: v for k, v in kwargs.items() if k in list(self.input.keys())}
)
def call_button_function(self, name, **kwargs):
self.set_inputs(**kwargs)
return self.__getattribute__(name)(**kwargs)
def __repr__(self):
s = ''
for k, v in self.input.items():
s += '{input}: {val}\n'.format(input=k, val=v)
return s
def __str__(self):
return self.to_html()
class InputElementCollection(OrderedDict):
"""
A InputElementCollection is a class that specifies html form elements.
It's a collection of InputElements. More precisely, it's an OrderedDict indexed by the name of the InputElements.
Constructor arguments:
* form_elements is a list of dicts each specifying an InputElement
* to_html_kwargs is a dict that specifies how to generate html for a form with the InputElements
The to_html() method returns an html form with the form elements. The way the html is created can be controlled
through the constructor's to_html_kwargs argument.
"""
def __init__(self, form_elements, to_html_kwargs={}):
self.to_html_kwargs = dict(
dict(prefix='<form>\n', suffix='</form>\n', sep='\n<br>\n'),
**to_html_kwargs
)
form_element_list = [(x['name'], InputElement(x)) for x in form_elements]
# for form_element in form_elements:
# form_element_list.append((form_element['name'], form_element))
super(InputElementCollection, self).__init__(form_element_list)
def to_html(self, **kwargs):
kwargs.update(**self.to_html_kwargs)
html = kwargs['prefix']
for k, v in self.items():
html += v.to_html(**kwargs) + kwargs['sep']
html += kwargs['suffix']
return html
class InputElement(dict):
"""
An InputElement is a dict (but it forces you to have a name and type keys).
You can enter what ever type you want, but the usage is meant to be aligned with html form attributes
(see for example http://www.w3schools.com/tags/att_input_type.asp).
There are other keys that are not standard attributes:
* the display key specifies what text will be displayed before the input box. If the display key is not
specified, it will be created and given the value of name. Therefore, if you want to display nothing, you must
specify display="". The type='button' input elements are treated slightly differently. If display key is not
specified, it will not be created. But if the value key is not specified, it will take on the value of the
name attribute.
The to_html() method returns a string that corresponds to the html for the input element.
"""
def __init__(self, *args, **kwargs):
super(InputElement, self).__init__(*args, **kwargs)
self.verify_inputs()
def verify_inputs(self):
key_list = list(self.keys())
assert 'name' in key_list, "you need to have a 'name' in a InputElement"
assert (
'type' in key_list
), "you need to have a 'type' in a InputElement (see html input tag types)"
if self['type'] == 'button':
if not self.get('value'):
self['value'] = self['name']
else:
if not self.get('display'):
self['display'] = self['name']
# if self['type'] == 'button':
# if 'function' not in key_list:
# self.update(function=self['name'])
def update(self, *args, **kwargs):
super(InputElement, self).update(*args, **kwargs)
self.verify_inputs()
def to_html(self, **kwargs):
d = self.copy()
element_type = d['type']
element_name = d['name']
if element_type == 'input':
html = ''
d['value'] == d.pop('display', '')
else:
html = d.pop('display', '')
if html == ': ':
html = ''
html += '<input type="{type}" name="{name}"'.format(
type=d.pop('type'), name=d.pop('name')
)
# d.pop('function', None) # get rid of function
for k, v in d.items():
html += ' {}="{}"'.format(k, v)
if element_type == 'button':
html += ' onclick="getResult(\'{}\')"'.format(element_name)
html += '>'
return html
|
{
"content_hash": "bff977de599ef75eec327c05e63d65c3",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 118,
"avg_line_length": 39.23163841807909,
"alnum_prop": 0.5976382488479263,
"repo_name": "thorwhalen/ut",
"id": "b89a00823e2b25cfe82d5b6a2632899b9368d109",
"size": "6944",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ut/wserv/dashboard/analyzer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1174"
},
{
"name": "Python",
"bytes": "2258941"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.