text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import unittest
from cpuinfo import *
import helpers
class MockDataSource:
bits = '32bit'
cpu_count = 1
is_windows = False
arch_string_raw = 'armv7l'
uname_string_raw = ''
@staticmethod
def has_proc_cpuinfo():
return True
@staticmethod
def has_cpufreq_info():
return True
@staticmethod
def cat_proc_cpuinfo():
returncode = 0
output = r'''
processor : 0
model name : ARMv6-compatible processor rev 7 (v6l)
Features : swp half thumb fastmult vfp edsp java tls
CPU implementer : 0x41
CPU architecture: 7
CPU variant : 0x0
CPU part : 0xb76
CPU revision : 7
Hardware : BCM2708
Revision : 0010
Serial : 00000000be6d9ba0
'''
return returncode, output
@staticmethod
def cpufreq_info():
returncode = 0
output = r'''
cpufrequtils 008: cpufreq-info (C) Dominik Brodowski 2004-2009
Report errors and bugs to cpufreq@vger.kernel.org, please.
analyzing CPU 0:
driver: generic_cpu0
CPUs which run at the same hardware frequency: 0
CPUs which need to have their frequency coordinated by software: 0
maximum transition latency: 300 us.
hardware limits: 300 MHz - 1000 MHz
available frequency steps: 300 MHz, 600 MHz, 800 MHz, 1000 MHz
available cpufreq governors: conservative, ondemand, userspace, powersave, performance
current policy: frequency should be within 300 MHz and 1000 MHz.
The governor "performance" may decide which speed to use
within this range.
current CPU frequency is 1000 MHz.
cpufreq stats: 300 MHz:0.00%, 600 MHz:0.00%, 800 MHz:0.00%, 1000 MHz:100.00%
'''
return returncode, output
class TestLinux_BeagleBone(unittest.TestCase):
def setUp(self):
helpers.backup_data_source(cpuinfo)
helpers.monkey_patch_data_source(cpuinfo, MockDataSource)
def tearDown(self):
helpers.restore_data_source(cpuinfo)
'''
Make sure calls return the expected number of fields.
'''
def test_returns(self):
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_registry()))
self.assertEqual(4, len(cpuinfo._get_cpu_info_from_cpufreq_info()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_lscpu()))
self.assertEqual(3, len(cpuinfo._get_cpu_info_from_proc_cpuinfo()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysctl()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_kstat()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_dmesg()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_cat_var_run_dmesg_boot()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_ibm_pa_features()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_sysinfo()))
self.assertEqual(0, len(cpuinfo._get_cpu_info_from_cpuid()))
self.assertEqual(14, len(cpuinfo._get_cpu_info_internal()))
def test_get_cpu_info_from_cpufreq_info(self):
info = cpuinfo._get_cpu_info_from_cpufreq_info()
self.assertEqual('1.0000 GHz', info['hz_advertised_friendly'])
self.assertEqual('1.0000 GHz', info['hz_actual_friendly'])
self.assertEqual((1000000000, 0), info['hz_advertised'])
self.assertEqual((1000000000, 0), info['hz_actual'])
def test_get_cpu_info_from_proc_cpuinfo(self):
info = cpuinfo._get_cpu_info_from_proc_cpuinfo()
self.assertEqual('BCM2708', info['hardware_raw'])
self.assertEqual('ARMv6-compatible processor rev 7 (v6l)', info['brand_raw'])
self.assertEqual(
['edsp', 'fastmult', 'half', 'java', 'swp', 'thumb', 'tls', 'vfp']
,
info['flags']
)
def test_all(self):
info = cpuinfo._get_cpu_info_internal()
self.assertEqual('BCM2708', info['hardware_raw'])
self.assertEqual('ARMv6-compatible processor rev 7 (v6l)', info['brand_raw'])
self.assertEqual('1.0000 GHz', info['hz_advertised_friendly'])
self.assertEqual('1.0000 GHz', info['hz_actual_friendly'])
self.assertEqual((1000000000, 0), info['hz_advertised'])
self.assertEqual((1000000000, 0), info['hz_actual'])
self.assertEqual('ARM_7', info['arch'])
self.assertEqual(32, info['bits'])
self.assertEqual(1, info['count'])
self.assertEqual('armv7l', info['arch_string_raw'])
self.assertEqual(
['edsp', 'fastmult', 'half', 'java', 'swp', 'thumb', 'tls', 'vfp']
,
info['flags']
)
|
{
"content_hash": "e99d258e07bc7bef0b12e2367a12f096",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 86,
"avg_line_length": 31.66923076923077,
"alnum_prop": 0.7070682535827059,
"repo_name": "workhorsy/py-cpuinfo",
"id": "fb9a3a192ac870675428f51e8e9420bfbd154ca9",
"size": "4119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_linux_beagle_bone_arm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1860"
},
{
"name": "Python",
"bytes": "569052"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import unittest
from unittest import mock
from unittest.mock import call, patch
import pytest
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.datastore import DatastoreHook
GCP_PROJECT_ID = "test"
def mock_init(
self,
gcp_conn_id,
delegate_to=None,
impersonation_chain=None,
):
pass
class TestDatastoreHook(unittest.TestCase):
def setUp(self):
with patch(
'airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__', new=mock_init
):
self.datastore_hook = DatastoreHook()
@patch('airflow.providers.google.cloud.hooks.datastore.DatastoreHook._authorize')
@patch('airflow.providers.google.cloud.hooks.datastore.build')
def test_get_conn(self, mock_build, mock_authorize):
conn = self.datastore_hook.get_conn()
mock_build.assert_called_once_with(
'datastore', 'v1', http=mock_authorize.return_value, cache_discovery=False
)
assert conn == mock_build.return_value
assert conn == self.datastore_hook.connection
@patch('airflow.providers.google.cloud.hooks.datastore.DatastoreHook.get_conn')
def test_allocate_ids(self, mock_get_conn):
self.datastore_hook.connection = mock_get_conn.return_value
partial_keys = []
keys = self.datastore_hook.allocate_ids(partial_keys=partial_keys, project_id=GCP_PROJECT_ID)
projects = self.datastore_hook.connection.projects
projects.assert_called_once_with()
allocate_ids = projects.return_value.allocateIds
allocate_ids.assert_called_once_with(projectId=GCP_PROJECT_ID, body={'keys': partial_keys})
execute = allocate_ids.return_value.execute
execute.assert_called_once_with(num_retries=mock.ANY)
assert keys == execute.return_value['keys']
@patch(
'airflow.providers.google.cloud.hooks.datastore.DatastoreHook.project_id',
new_callable=mock.PropertyMock,
return_value=None,
)
@patch('airflow.providers.google.cloud.hooks.datastore.DatastoreHook.get_conn')
def test_allocate_ids_no_project_id(self, mock_get_conn, mock_project_id):
self.datastore_hook.connection = mock_get_conn.return_value
partial_keys = []
with pytest.raises(AirflowException) as ctx:
self.datastore_hook.allocate_ids(partial_keys=partial_keys)
assert "project_id" in str(ctx.value)
@patch('airflow.providers.google.cloud.hooks.datastore.DatastoreHook.get_conn')
def test_begin_transaction(self, mock_get_conn):
self.datastore_hook.connection = mock_get_conn.return_value
transaction = self.datastore_hook.begin_transaction(
project_id=GCP_PROJECT_ID,
transaction_options={},
)
projects = self.datastore_hook.connection.projects
projects.assert_called_once_with()
begin_transaction = projects.return_value.beginTransaction
begin_transaction.assert_called_once_with(projectId=GCP_PROJECT_ID, body={'transactionOptions': {}})
execute = begin_transaction.return_value.execute
execute.assert_called_once_with(num_retries=mock.ANY)
assert transaction == execute.return_value['transaction']
@patch(
'airflow.providers.google.cloud.hooks.datastore.DatastoreHook.project_id',
new_callable=mock.PropertyMock,
return_value=None,
)
@patch('airflow.providers.google.cloud.hooks.datastore.DatastoreHook.get_conn')
def test_begin_transaction_no_project_id(self, mock_get_conn, mock_project_id):
self.datastore_hook.connection = mock_get_conn.return_value
with pytest.raises(AirflowException) as ctx:
self.datastore_hook.begin_transaction()
assert "project_id" in str(ctx.value)
@patch('airflow.providers.google.cloud.hooks.datastore.DatastoreHook.get_conn')
def test_commit(self, mock_get_conn):
self.datastore_hook.connection = mock_get_conn.return_value
body = {'item': 'a'}
resp = self.datastore_hook.commit(body=body, project_id=GCP_PROJECT_ID)
projects = self.datastore_hook.connection.projects
projects.assert_called_once_with()
commit = projects.return_value.commit
commit.assert_called_once_with(projectId=GCP_PROJECT_ID, body=body)
execute = commit.return_value.execute
execute.assert_called_once_with(num_retries=mock.ANY)
assert resp == execute.return_value
@patch(
'airflow.providers.google.cloud.hooks.datastore.DatastoreHook.project_id',
new_callable=mock.PropertyMock,
return_value=None,
)
@patch('airflow.providers.google.cloud.hooks.datastore.DatastoreHook.get_conn')
def test_commit_no_project_id(self, mock_get_conn, mock_project_id):
self.datastore_hook.connection = mock_get_conn.return_value
body = {'item': 'a'}
with pytest.raises(AirflowException) as ctx:
self.datastore_hook.commit(body=body)
assert "project_id" in str(ctx.value)
@patch('airflow.providers.google.cloud.hooks.datastore.DatastoreHook.get_conn')
def test_lookup(self, mock_get_conn):
self.datastore_hook.connection = mock_get_conn.return_value
keys = []
read_consistency = 'ENUM'
transaction = 'transaction'
resp = self.datastore_hook.lookup(
keys=keys, read_consistency=read_consistency, transaction=transaction, project_id=GCP_PROJECT_ID
)
projects = self.datastore_hook.connection.projects
projects.assert_called_once_with()
lookup = projects.return_value.lookup
lookup.assert_called_once_with(
projectId=GCP_PROJECT_ID,
body={'keys': keys, 'readConsistency': read_consistency, 'transaction': transaction},
)
execute = lookup.return_value.execute
execute.assert_called_once_with(num_retries=mock.ANY)
assert resp == execute.return_value
@patch(
'airflow.providers.google.cloud.hooks.datastore.DatastoreHook.project_id',
new_callable=mock.PropertyMock,
return_value=None,
)
@patch('airflow.providers.google.cloud.hooks.datastore.DatastoreHook.get_conn')
def test_lookup_no_project_id(self, mock_get_conn, mock_project_id):
self.datastore_hook.connection = mock_get_conn.return_value
keys = []
read_consistency = 'ENUM'
transaction = 'transaction'
with pytest.raises(AirflowException) as ctx:
self.datastore_hook.lookup(
keys=keys,
read_consistency=read_consistency,
transaction=transaction,
)
assert "project_id" in str(ctx.value)
@patch('airflow.providers.google.cloud.hooks.datastore.DatastoreHook.get_conn')
def test_rollback(self, mock_get_conn):
self.datastore_hook.connection = mock_get_conn.return_value
transaction = 'transaction'
self.datastore_hook.rollback(transaction=transaction, project_id=GCP_PROJECT_ID)
projects = self.datastore_hook.connection.projects
projects.assert_called_once_with()
rollback = projects.return_value.rollback
rollback.assert_called_once_with(projectId=GCP_PROJECT_ID, body={'transaction': transaction})
execute = rollback.return_value.execute
execute.assert_called_once_with(num_retries=mock.ANY)
@patch(
'airflow.providers.google.cloud.hooks.datastore.DatastoreHook.project_id',
new_callable=mock.PropertyMock,
return_value=None,
)
@patch('airflow.providers.google.cloud.hooks.datastore.DatastoreHook.get_conn')
def test_rollback_no_project_id(self, mock_get_conn, mock_project_id):
self.datastore_hook.connection = mock_get_conn.return_value
transaction = 'transaction'
with pytest.raises(AirflowException) as ctx:
self.datastore_hook.rollback(transaction=transaction)
assert "project_id" in str(ctx.value)
@patch('airflow.providers.google.cloud.hooks.datastore.DatastoreHook.get_conn')
def test_run_query(self, mock_get_conn):
self.datastore_hook.connection = mock_get_conn.return_value
body = {'item': 'a'}
resp = self.datastore_hook.run_query(body=body, project_id=GCP_PROJECT_ID)
projects = self.datastore_hook.connection.projects
projects.assert_called_once_with()
run_query = projects.return_value.runQuery
run_query.assert_called_once_with(projectId=GCP_PROJECT_ID, body=body)
execute = run_query.return_value.execute
execute.assert_called_once_with(num_retries=mock.ANY)
assert resp == execute.return_value['batch']
@patch(
'airflow.providers.google.cloud.hooks.datastore.DatastoreHook.project_id',
new_callable=mock.PropertyMock,
return_value=None,
)
@patch('airflow.providers.google.cloud.hooks.datastore.DatastoreHook.get_conn')
def test_run_query_no_project_id(self, mock_get_conn, mock_project_id):
self.datastore_hook.connection = mock_get_conn.return_value
body = {'item': 'a'}
with pytest.raises(AirflowException) as ctx:
self.datastore_hook.run_query(body=body)
assert "project_id" in str(ctx.value)
@patch('airflow.providers.google.cloud.hooks.datastore.DatastoreHook.get_conn')
def test_get_operation(self, mock_get_conn):
self.datastore_hook.connection = mock_get_conn.return_value
name = 'name'
resp = self.datastore_hook.get_operation(name=name)
projects = self.datastore_hook.connection.projects
projects.assert_called_once_with()
operations = projects.return_value.operations
operations.assert_called_once_with()
get = operations.return_value.get
get.assert_called_once_with(name=name)
execute = get.return_value.execute
execute.assert_called_once_with(num_retries=mock.ANY)
assert resp == execute.return_value
@patch('airflow.providers.google.cloud.hooks.datastore.DatastoreHook.get_conn')
def test_delete_operation(self, mock_get_conn):
self.datastore_hook.connection = mock_get_conn.return_value
name = 'name'
resp = self.datastore_hook.delete_operation(name=name)
projects = self.datastore_hook.connection.projects
projects.assert_called_once_with()
operations = projects.return_value.operations
operations.assert_called_once_with()
delete = operations.return_value.delete
delete.assert_called_once_with(name=name)
execute = delete.return_value.execute
execute.assert_called_once_with(num_retries=mock.ANY)
assert resp == execute.return_value
@patch('airflow.providers.google.cloud.hooks.datastore.time.sleep')
@patch(
'airflow.providers.google.cloud.hooks.datastore.DatastoreHook.get_operation',
side_effect=[
{'metadata': {'common': {'state': 'PROCESSING'}}},
{'metadata': {'common': {'state': 'NOT PROCESSING'}}},
],
)
def test_poll_operation_until_done(self, mock_get_operation, mock_time_sleep):
name = 'name'
polling_interval_in_seconds = 10
result = self.datastore_hook.poll_operation_until_done(name, polling_interval_in_seconds)
mock_get_operation.assert_has_calls([call(name), call(name)])
mock_time_sleep.assert_called_once_with(polling_interval_in_seconds)
assert result == {'metadata': {'common': {'state': 'NOT PROCESSING'}}}
@patch('airflow.providers.google.cloud.hooks.datastore.DatastoreHook.get_conn')
def test_export_to_storage_bucket(self, mock_get_conn):
self.datastore_hook.admin_connection = mock_get_conn.return_value
bucket = 'bucket'
namespace = None
entity_filter = {}
labels = {}
resp = self.datastore_hook.export_to_storage_bucket(
bucket=bucket,
namespace=namespace,
entity_filter=entity_filter,
labels=labels,
project_id=GCP_PROJECT_ID,
)
projects = self.datastore_hook.admin_connection.projects
projects.assert_called_once_with()
export = projects.return_value.export
export.assert_called_once_with(
projectId=GCP_PROJECT_ID,
body={
'outputUrlPrefix': 'gs://' + '/'.join(filter(None, [bucket, namespace])),
'entityFilter': entity_filter,
'labels': labels,
},
)
execute = export.return_value.execute
execute.assert_called_once_with(num_retries=mock.ANY)
assert resp == execute.return_value
@patch(
'airflow.providers.google.cloud.hooks.datastore.DatastoreHook.project_id',
new_callable=mock.PropertyMock,
return_value=None,
)
@patch('airflow.providers.google.cloud.hooks.datastore.DatastoreHook.get_conn')
def test_export_to_storage_bucket_no_project_id(self, mock_get_conn, mock_project_id):
self.datastore_hook.admin_connection = mock_get_conn.return_value
bucket = 'bucket'
namespace = None
entity_filter = {}
labels = {}
with pytest.raises(AirflowException) as ctx:
self.datastore_hook.export_to_storage_bucket(
bucket=bucket,
namespace=namespace,
entity_filter=entity_filter,
labels=labels,
)
assert "project_id" in str(ctx.value)
@patch('airflow.providers.google.cloud.hooks.datastore.DatastoreHook.get_conn')
def test_import_from_storage_bucket(self, mock_get_conn):
self.datastore_hook.admin_connection = mock_get_conn.return_value
bucket = 'bucket'
file = 'file'
namespace = None
entity_filter = {}
labels = {}
resp = self.datastore_hook.import_from_storage_bucket(
bucket=bucket,
file=file,
namespace=namespace,
entity_filter=entity_filter,
labels=labels,
project_id=GCP_PROJECT_ID,
)
projects = self.datastore_hook.admin_connection.projects
projects.assert_called_once_with()
import_ = projects.return_value.import_
import_.assert_called_once_with(
projectId=GCP_PROJECT_ID,
body={
'inputUrl': 'gs://' + '/'.join(filter(None, [bucket, namespace, file])),
'entityFilter': entity_filter,
'labels': labels,
},
)
execute = import_.return_value.execute
execute.assert_called_once_with(num_retries=mock.ANY)
assert resp == execute.return_value
@patch(
'airflow.providers.google.cloud.hooks.datastore.DatastoreHook.project_id',
new_callable=mock.PropertyMock,
return_value=None,
)
@patch('airflow.providers.google.cloud.hooks.datastore.DatastoreHook.get_conn')
def test_import_from_storage_bucket_no_project_id(self, mock_get_conn, mock_project_id):
self.datastore_hook.admin_connection = mock_get_conn.return_value
bucket = 'bucket'
file = 'file'
namespace = None
entity_filter = {}
labels = {}
with pytest.raises(AirflowException) as ctx:
self.datastore_hook.import_from_storage_bucket(
bucket=bucket,
file=file,
namespace=namespace,
entity_filter=entity_filter,
labels=labels,
)
assert "project_id" in str(ctx.value)
|
{
"content_hash": "9cd01debdf9df2bfc2abe5ff68eba984",
"timestamp": "",
"source": "github",
"line_count": 388,
"max_line_length": 108,
"avg_line_length": 40.56958762886598,
"alnum_prop": 0.6561209580077505,
"repo_name": "cfei18/incubator-airflow",
"id": "9a90fa6845f3419be1458d0537adde7d1d33842c",
"size": "16528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/providers/google/cloud/hooks/test_datastore.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "72003"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "173434"
},
{
"name": "JavaScript",
"bytes": "143068"
},
{
"name": "Jinja",
"bytes": "38808"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "22660683"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "312715"
},
{
"name": "TypeScript",
"bytes": "472379"
}
],
"symlink_target": ""
}
|
"""Unit tests for converting TensorFlow debugging ops to Relay."""
try:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
except ImportError:
import tensorflow as tf
import numpy as np
from tvm import relay
from tvm.relay.frontend.tensorflow import from_tensorflow
def run_relay(graph):
mod, params = from_tensorflow(graph.as_graph_def(add_shapes=True))
return relay.create_executor("debug", mod=mod).evaluate()(**params)
def test_no_op():
g = tf.Graph()
with g.as_default():
no_op = tf.no_op()
with tf.Session() as sess:
# In TF, the type of a no-op is None.
assert sess.run(no_op) is None
# In TVM, no-op is currently translated to 0, though it should
# probably be none or an empty tuple.
np.testing.assert_allclose(0, run_relay(g).numpy())
if __name__ == "__main__":
test_no_op()
|
{
"content_hash": "508383e61ea7178de1ffd8feb6251c15",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 71,
"avg_line_length": 28.03125,
"alnum_prop": 0.6443701226309922,
"repo_name": "dmlc/tvm",
"id": "4f8583f71cff4bfedd6ba6b2b80daa8df799eaed",
"size": "1682",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/python/frontend/tensorflow/test_no_op.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "6112"
},
{
"name": "C",
"bytes": "92947"
},
{
"name": "C++",
"bytes": "5765945"
},
{
"name": "CMake",
"bytes": "74045"
},
{
"name": "Go",
"bytes": "112384"
},
{
"name": "HTML",
"bytes": "8625"
},
{
"name": "Java",
"bytes": "171101"
},
{
"name": "JavaScript",
"bytes": "49803"
},
{
"name": "Makefile",
"bytes": "55807"
},
{
"name": "Objective-C",
"bytes": "15241"
},
{
"name": "Objective-C++",
"bytes": "46673"
},
{
"name": "Python",
"bytes": "7183810"
},
{
"name": "Rust",
"bytes": "181961"
},
{
"name": "Scala",
"bytes": "202148"
},
{
"name": "Shell",
"bytes": "97271"
},
{
"name": "Tcl",
"bytes": "53645"
},
{
"name": "Verilog",
"bytes": "30605"
}
],
"symlink_target": ""
}
|
from __future__ import division, absolute_import, print_function, unicode_literals
# monkey patching
from kasaya.core.lib.mpatcher import damonkey
damonkey()
del damonkey
# imports
from kasaya.conf import settings
from kasaya.core.lib import LOG, system
from kasaya.core.events import add_event_handler
from kasaya.core.worker.worker_base import WorkerBase
from .syncworker import SyncWorker
from .db.netstatedb import NetworkStateDB
from .broadcast import UDPBroadcast
from .dbsync import Synchronizer
import gevent
class KasayaDaemon(WorkerBase):
def __init__(self):
super(KasayaDaemon, self).__init__(is_host=True)
# event handlers
add_event_handler("host-join", self.on_remote_kasayad_start)
add_event_handler("host-leave", self.on_remote_kasayad_stop)
self.hostname = system.get_hostname()
LOG.info("Starting local kasaya daemon with ID: [%s]" % self.ID)
self.DB = NetworkStateDB() # database
self.BC = UDPBroadcast(self.ID) # broadcaster
self.SYNC = Synchronizer(self.DB, self.ID) # synchronisation
self.WORKER = SyncWorker(server=self, database=self.DB)
self.BC.set_own_ip(self.WORKER.own_ip)
def close(self):
"""
Notifies network about shutting down, closes database
and all used sockets.
"""
LOG.info("Stopping local kasaya daemon")
self.on_local_kasayad_stop(self.ID, local=True)
self.WORKER.close()
self.DB.close()
self.BC.close()
# global network changes
def notify_kasayad_start(self, ID, hostname, ip, services, local=False):
"""
Send information about startup of host to all other hosts in network.
"""
isnew = self.DB.host_register(ID, hostname, ip, services)
if local:
# it is ourself starting, send broadcast to other kasaya daemons
self.BC.send_host_start(ID, hostname, ip, services)
if isnew:
# new kasayad
# send request to local workers to send immadiately ping broadcast
# to inform new kasaya daemon about self
#self.WORKER.request_workers_broadcast()
# it's remote host starting, information is from broadcast
LOG.info("Remote kasaya daemon [%s] started, address [%s], ID [%s]" % (hostname, ip, ID) )
# if registered new kasayad AND it's not local host, then
# it must be new host in network, which don't know other hosts.
# We send again registering information about self syncd instance.
gevent.sleep(0.5)
self.notify_kasayad_self_start()
def on_remote_kasayad_start(self, host_id, addr):
"""
Remote kasaya host started
"""
# register self in database
self.DB.host_register(host_id, addr)
LOG.info("Remote kasaya daemon started, address: %s [id:%s]" % (addr, host_id) )
def on_local_kasayad_start(self):
"""
send information about self start to all hosts
"""
# register self in database
self.DB.host_register(self.ID, self.WORKER.own_addr)
# register own services
self.DB.service_update_list(self.ID, self.WORKER.local_services_list() )
# broadcast own existence
self.BC.broadcast_host_start(self.WORKER.own_addr)
def on_remote_kasayad_stop(self, host_id):
"""
received information about kasaya host leaving network
"""
self.DB.host_unregister(self.ID)
LOG.info("Remote kasaya daemon stopped, [id:%s]" % host_id)
def on_local_kasayad_stop(self, ID, local=False):
"""
Send information about shutdown to all hosts in network
"""
self.DB.host_unregister(self.ID)
#self.BC.broadcast_host_stop()
def notify_kasayad_refresh(self, ID, services=None, local=False):
"""
Received information on host changes
"""
if services is not None:
slst = ", ".join(services)
if local:
# local changes require broadcast new service status
self.BC.send_host_refresh(self.ID, services=services)
LOG.info("Local service list changed [%s]" % slst)
else:
# remote host services requires daabase update
# local updates are entered to database
# before notify_kasayad_refresh is called
self.DB.service_update_list(self.ID, services)
LOG.info("Remote host service list changed [%s]" % slst)
# main loop
def run(self):
self.on_local_kasayad_start()
try:
loops = self.WORKER.get_loops()
loops.append(self.BC.loop)
loops = [ gevent.spawn(loop) for loop in loops ]
gevent.joinall(loops)
finally:
self.close()
|
{
"content_hash": "7b774f8b6f47a8048d50bed029f49814",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 102,
"avg_line_length": 34.640845070422536,
"alnum_prop": 0.6184183777190486,
"repo_name": "AYAtechnologies/Kasaya-esb",
"id": "4074a5488f59ad79ab3dd280edcd2635aeb04fdf",
"size": "4956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kasaya/workers/kasayad/kasayad.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "254516"
}
],
"symlink_target": ""
}
|
'''
Scatter
=======
.. image:: images/scatter.gif
:align: right
:class:`Scatter` is used to build interactive widgets that can be translated,
rotated and scaled with two or more fingers on a multitouch system.
Scatter has its own matrix transformation: the modelview matrix is changed
before the children are drawn and the previous matrix is restored when the
drawing is finished. That makes it possible to perform rotation, scaling and
translation over the entire children tree without changing any widget
properties. That specific behavior makes the scatter unique, but there are some
advantages / constraints that you should consider:
#. The children are positioned relative to the scatter similarly to a
:mod:`~kivy.uix.relativelayout.RelativeLayout`. So when dragging the
scatter, the position of the children don't change, only the position of
the scatter does.
#. The scatter size has no impact on the size of it's children.
#. If you want to resize the scatter, use scale, not size (read #2). Scale
transforms both the scatter and its children, but does not change size.
#. The scatter is not a layout. You must manage the size of the children
yourself.
For touch events, the scatter converts from the parent matrix to the scatter
matrix automatically in on_touch_down/move/up events. If you are doing things
manually, you will need to use :meth:`~kivy.uix.widget.Widget.to_parent` and
:meth:`~kivy.uix.widget.Widget.to_local`.
Usage
-----
By default, the Scatter does not have a graphical representation: it is a
container only. The idea is to combine the Scatter with another widget, for
example an :class:`~kivy.uix.image.Image`::
scatter = Scatter()
image = Image(source='sun.jpg')
scatter.add_widget(image)
Control Interactions
--------------------
By default, all interactions are enabled. You can selectively disable
them using the do_rotation, do_translation and do_scale properties.
Disable rotation::
scatter = Scatter(do_rotation=False)
Allow only translation::
scatter = Scatter(do_rotation=False, do_scale=False)
Allow only translation on x axis::
scatter = Scatter(do_rotation=False, do_scale=False,
do_translation_y=False)
Automatic Bring to Front
------------------------
If the :attr:`Scatter.auto_bring_to_front` property is True, the scatter
widget will be removed and re-added to the parent when it is touched
(brought to front, above all other widgets in the parent). This is useful
when you are manipulating several scatter widgets and don't want the active
one to be partially hidden.
Scale Limitation
----------------
We are using a 32-bit matrix in double representation. That means we have
a limit for scaling. You cannot do infinite scaling down/up with our
implementation. Generally, you don't hit the minimum scale (because you don't
see it on the screen), but the maximum scale is 9.99506983235e+19 (2^66).
You can also limit the minimum and maximum scale allowed::
scatter = Scatter(scale_min=.5, scale_max=3.)
Behavior
--------
.. versionchanged:: 1.1.0
If no control interactions are enabled, then the touch handler will never
return True.
'''
__all__ = ('Scatter', 'ScatterPlane')
from math import radians
from kivy.properties import BooleanProperty, AliasProperty, \
NumericProperty, ObjectProperty, BoundedNumericProperty
from kivy.vector import Vector
from kivy.uix.widget import Widget
from kivy.graphics.transformation import Matrix
class Scatter(Widget):
'''Scatter class. See module documentation for more information.
:Events:
`on_transform_with_touch`:
Fired when the scatter has been transformed by user touch
or multitouch, such as panning or zooming.
`on_bring_to_front`:
Fired when the scatter is brought to the front.
.. versionchanged:: 1.9.0
Event `on_bring_to_front` added.
.. versionchanged:: 1.8.0
Event `on_transform_with_touch` added.
'''
__events__ = ('on_transform_with_touch', 'on_bring_to_front')
auto_bring_to_front = BooleanProperty(True)
'''If True, the widget will be automatically pushed on the top of parent
widget list for drawing.
:attr:`auto_bring_to_front` is a :class:`~kivy.properties.BooleanProperty`
and defaults to True.
'''
do_translation_x = BooleanProperty(True)
'''Allow translation on the X axis.
:attr:`do_translation_x` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
do_translation_y = BooleanProperty(True)
'''Allow translation on Y axis.
:attr:`do_translation_y` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
def _get_do_translation(self):
return (self.do_translation_x, self.do_translation_y)
def _set_do_translation(self, value):
if type(value) in (list, tuple):
self.do_translation_x, self.do_translation_y = value
else:
self.do_translation_x = self.do_translation_y = bool(value)
do_translation = AliasProperty(
_get_do_translation, _set_do_translation,
bind=('do_translation_x', 'do_translation_y'))
'''Allow translation on the X or Y axis.
:attr:`do_translation` is an :class:`~kivy.properties.AliasProperty` of
(:attr:`do_translation_x` + :attr:`do_translation_y`)
'''
translation_touches = BoundedNumericProperty(1, min=1)
'''Determine whether translation was triggered by a single or multiple
touches. This only has effect when :attr:`do_translation` = True.
:attr:`translation_touches` is a :class:`~kivy.properties.NumericProperty`
and defaults to 1.
.. versionadded:: 1.7.0
'''
do_rotation = BooleanProperty(True)
'''Allow rotation.
:attr:`do_rotation` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
do_scale = BooleanProperty(True)
'''Allow scaling.
:attr:`do_scale` is a :class:`~kivy.properties.BooleanProperty` and
defaults to True.
'''
do_collide_after_children = BooleanProperty(False)
'''If True, the collision detection for limiting the touch inside the
scatter will be done after dispaching the touch to the children.
You can put children outside the bounding box of the scatter and still be
able to touch them.
.. versionadded:: 1.3.0
'''
scale_min = NumericProperty(0.01)
'''Minimum scaling factor allowed.
:attr:`scale_min` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.01.
'''
scale_max = NumericProperty(1e20)
'''Maximum scaling factor allowed.
:attr:`scale_max` is a :class:`~kivy.properties.NumericProperty` and
defaults to 1e20.
'''
transform = ObjectProperty(Matrix())
'''Transformation matrix.
:attr:`transform` is an :class:`~kivy.properties.ObjectProperty` and
defaults to the identity matrix.
.. note::
This matrix reflects the current state of the transformation matrix
but setting it directly will erase previously applied
transformations. To apply a transformation considering context,
please use the :attr:`~Scatter.apply_transform` method.
'''
transform_inv = ObjectProperty(Matrix())
'''Inverse of the transformation matrix.
:attr:`transform_inv` is an :class:`~kivy.properties.ObjectProperty` and
defaults to the identity matrix.
'''
def _get_bbox(self):
xmin, ymin = xmax, ymax = self.to_parent(0, 0)
for point in [(self.width, 0), (0, self.height), self.size]:
x, y = self.to_parent(*point)
if x < xmin:
xmin = x
if y < ymin:
ymin = y
if x > xmax:
xmax = x
if y > ymax:
ymax = y
return (xmin, ymin), (xmax - xmin, ymax - ymin)
bbox = AliasProperty(_get_bbox, None, bind=(
'transform', 'width', 'height'))
'''Bounding box of the widget in parent space::
((x, y), (w, h))
# x, y = lower left corner
:attr:`bbox` is an :class:`~kivy.properties.AliasProperty`.
'''
def _get_rotation(self):
v1 = Vector(0, 10)
tp = self.to_parent
v2 = Vector(*tp(*self.pos)) - tp(self.x, self.y + 10)
return -1.0 * (v1.angle(v2) + 180) % 360
def _set_rotation(self, rotation):
angle_change = self.rotation - rotation
r = Matrix().rotate(-radians(angle_change), 0, 0, 1)
self.apply_transform(r, post_multiply=True,
anchor=self.to_local(*self.center))
rotation = AliasProperty(_get_rotation, _set_rotation, bind=(
'x', 'y', 'transform'))
'''Rotation value of the scatter in degrees moving in a counterclockwise
direction.
:attr:`rotation` is an :class:`~kivy.properties.AliasProperty` and defaults
to 0.0.
'''
def _get_scale(self):
p1 = Vector(*self.to_parent(0, 0))
p2 = Vector(*self.to_parent(1, 0))
scale = p1.distance(p2)
# XXX float calculation are not accurate, and then, scale can be
# throwed again even with only the position change. So to
# prevent anything wrong with scale, just avoid to dispatch it
# if the scale "visually" didn't change. #947
# Remove this ugly hack when we'll be Python 3 only.
if hasattr(self, '_scale_p'):
if str(scale) == str(self._scale_p):
return self._scale_p
self._scale_p = scale
return scale
def _set_scale(self, scale):
rescale = scale * 1.0 / self.scale
self.apply_transform(Matrix().scale(rescale, rescale, rescale),
post_multiply=True,
anchor=self.to_local(*self.center))
scale = AliasProperty(_get_scale, _set_scale, bind=('x', 'y', 'transform'))
'''Scale value of the scatter.
:attr:`scale` is an :class:`~kivy.properties.AliasProperty` and defaults to
1.0.
'''
def _get_center(self):
return (self.bbox[0][0] + self.bbox[1][0] / 2.0,
self.bbox[0][1] + self.bbox[1][1] / 2.0)
def _set_center(self, center):
if center == self.center:
return False
t = Vector(*center) - self.center
trans = Matrix().translate(t.x, t.y, 0)
self.apply_transform(trans)
center = AliasProperty(_get_center, _set_center, bind=('bbox', ))
def _get_pos(self):
return self.bbox[0]
def _set_pos(self, pos):
_pos = self.bbox[0]
if pos == _pos:
return
t = Vector(*pos) - _pos
trans = Matrix().translate(t.x, t.y, 0)
self.apply_transform(trans)
pos = AliasProperty(_get_pos, _set_pos, bind=('bbox', ))
def _get_x(self):
return self.bbox[0][0]
def _set_x(self, x):
if x == self.bbox[0][0]:
return False
self.pos = (x, self.y)
return True
x = AliasProperty(_get_x, _set_x, bind=('bbox', ))
def _get_y(self):
return self.bbox[0][1]
def _set_y(self, y):
if y == self.bbox[0][1]:
return False
self.pos = (self.x, y)
return True
y = AliasProperty(_get_y, _set_y, bind=('bbox', ))
def get_right(self):
return self.x + self.bbox[1][0]
def set_right(self, value):
self.x = value - self.bbox[1][0]
right = AliasProperty(get_right, set_right, bind=('x', 'width'))
def get_top(self):
return self.y + self.bbox[1][1]
def set_top(self, value):
self.y = value - self.bbox[1][1]
top = AliasProperty(get_top, set_top, bind=('y', 'height'))
def get_center_x(self):
return self.x + self.bbox[1][0] / 2.
def set_center_x(self, value):
self.x = value - self.bbox[1][0] / 2.
center_x = AliasProperty(get_center_x, set_center_x, bind=('x', 'width'))
def get_center_y(self):
return self.y + self.bbox[1][1] / 2.
def set_center_y(self, value):
self.y = value - self.bbox[1][1] / 2.
center_y = AliasProperty(get_center_y, set_center_y, bind=('y', 'height'))
def __init__(self, **kwargs):
self._touches = []
self._last_touch_pos = {}
super(Scatter, self).__init__(**kwargs)
def on_transform(self, instance, value):
self.transform_inv = value.inverse()
def collide_point(self, x, y):
x, y = self.to_local(x, y)
return 0 <= x <= self.width and 0 <= y <= self.height
def to_parent(self, x, y, **k):
p = self.transform.transform_point(x, y, 0)
return (p[0], p[1])
def to_local(self, x, y, **k):
p = self.transform_inv.transform_point(x, y, 0)
return (p[0], p[1])
def _apply_transform(self, m, pos=None):
m = self.transform.multiply(m)
return super(Scatter, self)._apply_transform(m, (0, 0))
def apply_transform(self, trans, post_multiply=False, anchor=(0, 0)):
'''
Transforms the scatter by applying the "trans" transformation
matrix (on top of its current transformation state). The resultant
matrix can be found in the :attr:`~Scatter.transform` property.
:Parameters:
`trans`: :class:`~kivy.graphics.transformation.Matrix`.
Transformation matix to be applied to the scatter widget.
`anchor`: tuple, defaults to (0, 0).
The point to use as the origin of the transformation
(uses local widget space).
`post_multiply`: bool, defaults to False.
If True, the transform matrix is post multiplied
(as if applied before the current transform).
Usage example::
from kivy.graphics.transformation import Matrix
mat = Matrix().scale(3, 3, 3)
scatter_instance.apply_transform(mat)
'''
t = Matrix().translate(anchor[0], anchor[1], 0)
t = t.multiply(trans)
t = t.multiply(Matrix().translate(-anchor[0], -anchor[1], 0))
if post_multiply:
self.transform = self.transform.multiply(t)
else:
self.transform = t.multiply(self.transform)
def transform_with_touch(self, touch):
# just do a simple one finger drag
changed = False
if len(self._touches) == self.translation_touches:
# _last_touch_pos has last pos in correct parent space,
# just like incoming touch
dx = (touch.x - self._last_touch_pos[touch][0]) \
* self.do_translation_x
dy = (touch.y - self._last_touch_pos[touch][1]) \
* self.do_translation_y
dx = dx / self.translation_touches
dy = dy / self.translation_touches
self.apply_transform(Matrix().translate(dx, dy, 0))
changed = True
if len(self._touches) == 1:
return changed
# we have more than one touch... list of last known pos
points = [Vector(self._last_touch_pos[t]) for t in self._touches
if t is not touch]
# add current touch last
points.append(Vector(touch.pos))
# we only want to transform if the touch is part of the two touches
# farthest apart! So first we find anchor, the point to transform
# around as another touch farthest away from current touch's pos
anchor = max(points[:-1], key=lambda p: p.distance(touch.pos))
# now we find the touch farthest away from anchor, if its not the
# same as touch. Touch is not one of the two touches used to transform
farthest = max(points, key=anchor.distance)
if farthest is not points[-1]:
return changed
# ok, so we have touch, and anchor, so we can actually compute the
# transformation
old_line = Vector(*touch.ppos) - anchor
new_line = Vector(*touch.pos) - anchor
if not old_line.length(): # div by zero
return changed
angle = radians(new_line.angle(old_line)) * self.do_rotation
self.apply_transform(Matrix().rotate(angle, 0, 0, 1), anchor=anchor)
if self.do_scale:
scale = new_line.length() / old_line.length()
new_scale = scale * self.scale
if new_scale < self.scale_min:
scale = self.scale_min / self.scale
elif new_scale > self.scale_max:
scale = self.scale_max / self.scale
self.apply_transform(Matrix().scale(scale, scale, scale),
anchor=anchor)
changed = True
return changed
def _bring_to_front(self, touch):
# auto bring to front
if self.auto_bring_to_front and self.parent:
parent = self.parent
if parent.children[0] is self:
return
parent.remove_widget(self)
parent.add_widget(self)
self.dispatch('on_bring_to_front', touch)
def on_touch_down(self, touch):
x, y = touch.x, touch.y
# if the touch isnt on the widget we do nothing
if not self.do_collide_after_children:
if not self.collide_point(x, y):
return False
# let the child widgets handle the event if they want
touch.push()
touch.apply_transform_2d(self.to_local)
if super(Scatter, self).on_touch_down(touch):
# ensure children don't have to do it themselves
if 'multitouch_sim' in touch.profile:
touch.multitouch_sim = True
touch.pop()
self._bring_to_front(touch)
return True
touch.pop()
# if our child didn't do anything, and if we don't have any active
# interaction control, then don't accept the touch.
if not self.do_translation_x and \
not self.do_translation_y and \
not self.do_rotation and \
not self.do_scale:
return False
if self.do_collide_after_children:
if not self.collide_point(x, y):
return False
if 'multitouch_sim' in touch.profile:
touch.multitouch_sim = True
# grab the touch so we get all it later move events for sure
self._bring_to_front(touch)
touch.grab(self)
self._touches.append(touch)
self._last_touch_pos[touch] = touch.pos
return True
def on_touch_move(self, touch):
x, y = touch.x, touch.y
# let the child widgets handle the event if they want
if self.collide_point(x, y) and not touch.grab_current == self:
touch.push()
touch.apply_transform_2d(self.to_local)
if super(Scatter, self).on_touch_move(touch):
touch.pop()
return True
touch.pop()
# rotate/scale/translate
if touch in self._touches and touch.grab_current == self:
if self.transform_with_touch(touch):
self.dispatch('on_transform_with_touch', touch)
self._last_touch_pos[touch] = touch.pos
# stop propagating if its within our bounds
if self.collide_point(x, y):
return True
def on_transform_with_touch(self, touch):
'''
Called when a touch event has transformed the scatter widget.
By default this does nothing, but can be overriden by derived
classes that need to react to transformations caused by user
input.
:Parameters:
`touch`:
The touch object which triggered the transformation.
.. versionadded:: 1.8.0
'''
pass
def on_bring_to_front(self, touch):
'''
Called when a touch event causes the scatter to be brought to the
front of the parent (only if :attr:`auto_bring_to_front` is True)
:Parameters:
`touch`:
The touch object which brought the scatter to front.
.. versionadded:: 1.9.0
'''
pass
def on_touch_up(self, touch):
x, y = touch.x, touch.y
# if the touch isnt on the widget we do nothing, just try children
if not touch.grab_current == self:
touch.push()
touch.apply_transform_2d(self.to_local)
if super(Scatter, self).on_touch_up(touch):
touch.pop()
return True
touch.pop()
# remove it from our saved touches
if touch in self._touches and touch.grab_state:
touch.ungrab(self)
del self._last_touch_pos[touch]
self._touches.remove(touch)
# stop propagating if its within our bounds
if self.collide_point(x, y):
return True
class ScatterPlane(Scatter):
'''This is essentially an unbounded Scatter widget. It's a convenience
class to make it easier to handle infinite planes.
'''
def __init__(self, **kwargs):
if 'auto_bring_to_front' not in kwargs:
self.auto_bring_to_front = False
super(ScatterPlane, self).__init__(**kwargs)
def collide_point(self, x, y):
return True
|
{
"content_hash": "714adc59ff55b7103651fe744634c424",
"timestamp": "",
"source": "github",
"line_count": 624,
"max_line_length": 79,
"avg_line_length": 34.12019230769231,
"alnum_prop": 0.6088018411535391,
"repo_name": "jkankiewicz/kivy",
"id": "cd92be05fa151b61e090384e09c1b1fff2149687",
"size": "21291",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "kivy/uix/scatter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "321734"
},
{
"name": "C++",
"bytes": "3551"
},
{
"name": "Emacs Lisp",
"bytes": "9671"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Makefile",
"bytes": "4084"
},
{
"name": "Objective-C",
"bytes": "14779"
},
{
"name": "Python",
"bytes": "3894763"
},
{
"name": "Shell",
"bytes": "356"
},
{
"name": "Vim script",
"bytes": "1123"
}
],
"symlink_target": ""
}
|
import json
from kombu import Connection
from oslo_config import cfg
from st2common import log as logging
from st2common.constants.action import LIVEACTION_STATUS_SUCCEEDED
from st2common.constants.action import LIVEACTION_FAILED_STATES
from st2common.constants.action import LIVEACTION_COMPLETED_STATES
from st2common.constants.triggers import INTERNAL_TRIGGER_TYPES
from st2common.models.api.trace import TraceContext
from st2common.models.db.liveaction import LiveActionDB
from st2common.persistence.action import Action
from st2common.persistence.policy import Policy
from st2common import policies
from st2common.models.system.common import ResourceReference
from st2common.persistence.execution import ActionExecution
from st2common.services import trace as trace_service
from st2common.transport import consumers, liveaction, publishers
from st2common.transport import utils as transport_utils
from st2common.transport.reactor import TriggerDispatcher
from st2common.util import isotime
from st2common.util import jinja as jinja_utils
from st2common.constants.action import ACTION_CONTEXT_KV_PREFIX
from st2common.constants.action import ACTION_PARAMETERS_KV_PREFIX
from st2common.constants.action import ACTION_RESULTS_KV_PREFIX
from st2common.constants.system import SYSTEM_KV_PREFIX
from st2common.services.keyvalues import KeyValueLookup
__all__ = [
'Notifier',
'get_notifier'
]
LOG = logging.getLogger(__name__)
ACTIONUPDATE_WORK_Q = liveaction.get_queue('st2.notifiers.work',
routing_key=publishers.UPDATE_RK)
ACTION_SENSOR_ENABLED = cfg.CONF.action_sensor.enable
# XXX: Fix this nasty positional dependency.
ACTION_TRIGGER_TYPE = INTERNAL_TRIGGER_TYPES['action'][0]
NOTIFY_TRIGGER_TYPE = INTERNAL_TRIGGER_TYPES['action'][1]
class Notifier(consumers.MessageHandler):
message_type = LiveActionDB
def __init__(self, connection, queues, trigger_dispatcher=None):
super(Notifier, self).__init__(connection, queues)
if not trigger_dispatcher:
trigger_dispatcher = TriggerDispatcher(LOG)
self._trigger_dispatcher = trigger_dispatcher
self._notify_trigger = ResourceReference.to_string_reference(
pack=NOTIFY_TRIGGER_TYPE['pack'],
name=NOTIFY_TRIGGER_TYPE['name'])
self._action_trigger = ResourceReference.to_string_reference(
pack=ACTION_TRIGGER_TYPE['pack'],
name=ACTION_TRIGGER_TYPE['name'])
def process(self, liveaction):
live_action_id = str(liveaction.id)
extra = {'live_action_db': liveaction}
LOG.debug('Processing liveaction %s', live_action_id, extra=extra)
if liveaction.status not in LIVEACTION_COMPLETED_STATES:
LOG.debug('Skipping processing of liveaction %s since it\'s not in a completed state' %
(live_action_id), extra=extra)
return
execution = self._get_execution_for_liveaction(liveaction)
if not execution:
LOG.exception('Execution object corresponding to LiveAction %s not found.',
live_action_id, extra=extra)
return None
self._apply_post_run_policies(liveaction=liveaction)
if liveaction.notify is not None:
self._post_notify_triggers(liveaction=liveaction, execution=execution)
self._post_generic_trigger(liveaction=liveaction, execution=execution)
def _get_execution_for_liveaction(self, liveaction):
execution = ActionExecution.get(liveaction__id=str(liveaction.id))
if not execution:
return None
return execution
def _post_notify_triggers(self, liveaction=None, execution=None):
notify = getattr(liveaction, 'notify', None)
if not notify:
return
if notify.on_complete:
self._post_notify_subsection_triggers(
liveaction=liveaction, execution=execution,
notify_subsection=notify.on_complete,
default_message_suffix='completed.')
if liveaction.status == LIVEACTION_STATUS_SUCCEEDED and notify.on_success:
self._post_notify_subsection_triggers(
liveaction=liveaction, execution=execution,
notify_subsection=notify.on_success,
default_message_suffix='succeeded.')
if liveaction.status in LIVEACTION_FAILED_STATES and notify.on_failure:
self._post_notify_subsection_triggers(
liveaction=liveaction, execution=execution,
notify_subsection=notify.on_failure,
default_message_suffix='failed.')
def _post_notify_subsection_triggers(self, liveaction=None, execution=None,
notify_subsection=None,
default_message_suffix=None):
routes = (getattr(notify_subsection, 'routes') or
getattr(notify_subsection, 'channels', None))
execution_id = str(execution.id)
if routes and len(routes) >= 1:
payload = {}
message = notify_subsection.message or (
'Action ' + liveaction.action + ' ' + default_message_suffix)
data = notify_subsection.data or {}
jinja_context = self._build_jinja_context(liveaction=liveaction, execution=execution)
try:
message = self._transform_message(message=message,
context=jinja_context)
except:
LOG.exception('Failed (Jinja) transforming `message`.')
try:
data = self._transform_data(data=data, context=jinja_context)
except:
LOG.exception('Failed (Jinja) transforming `data`.')
# At this point convert result to a string. This restricts the rulesengines
# ability to introspect the result. On the other handle atleast a json usable
# result is sent as part of the notification. If jinja is required to convert
# to a string representation it uses str(...) which make it impossible to
# parse the result as json any longer.
# TODO: Use to_serializable_dict
data['result'] = json.dumps(liveaction.result)
payload['message'] = message
payload['data'] = data
payload['execution_id'] = execution_id
payload['status'] = liveaction.status
payload['start_timestamp'] = isotime.format(liveaction.start_timestamp)
payload['end_timestamp'] = isotime.format(liveaction.end_timestamp)
payload['action_ref'] = liveaction.action
payload['runner_ref'] = self._get_runner_ref(liveaction.action)
trace_context = self._get_trace_context(execution_id=execution_id)
failed_routes = []
for route in routes:
try:
payload['route'] = route
# Deprecated. Only for backward compatibility reasons.
payload['channel'] = route
LOG.debug('POSTing %s for %s. Payload - %s.', NOTIFY_TRIGGER_TYPE['name'],
liveaction.id, payload)
self._trigger_dispatcher.dispatch(self._notify_trigger, payload=payload,
trace_context=trace_context)
except:
failed_routes.append(route)
if len(failed_routes) > 0:
raise Exception('Failed notifications to routes: %s' % ', '.join(failed_routes))
def _build_jinja_context(self, liveaction, execution):
context = {SYSTEM_KV_PREFIX: KeyValueLookup()}
context.update({ACTION_PARAMETERS_KV_PREFIX: liveaction.parameters})
context.update({ACTION_CONTEXT_KV_PREFIX: liveaction.context})
context.update({ACTION_RESULTS_KV_PREFIX: execution.result})
return context
def _transform_message(self, message, context=None):
mapping = {'message': message}
context = context or {}
return (jinja_utils.render_values(mapping=mapping, context=context)).get('message',
message)
def _transform_data(self, data, context=None):
return jinja_utils.render_values(mapping=data, context=context)
def _get_trace_context(self, execution_id):
trace_db = trace_service.get_trace_db_by_action_execution(
action_execution_id=execution_id)
if trace_db:
return TraceContext(id_=str(trace_db.id), trace_tag=trace_db.trace_tag)
# If no trace_context is found then do not create a new one here. If necessary
# it shall be created downstream. Sure this is impl leakage of some sort.
return None
def _post_generic_trigger(self, liveaction=None, execution=None):
if not ACTION_SENSOR_ENABLED:
LOG.debug('Action trigger is disabled, skipping trigger dispatch...')
return
execution_id = str(execution.id)
payload = {'execution_id': execution_id,
'status': liveaction.status,
'start_timestamp': str(liveaction.start_timestamp),
# deprecate 'action_name' at some point and switch to 'action_ref'
'action_name': liveaction.action,
'action_ref': liveaction.action,
'runner_ref': self._get_runner_ref(liveaction.action),
'parameters': liveaction.get_masked_parameters(),
'result': liveaction.result}
# Use execution_id to extract trace rather than liveaction. execution_id
# will look-up an exact TraceDB while liveaction depending on context
# may not end up going to the DB.
trace_context = self._get_trace_context(execution_id=execution_id)
LOG.debug('POSTing %s for %s. Payload - %s. TraceContext - %s',
ACTION_TRIGGER_TYPE['name'], liveaction.id, payload, trace_context)
self._trigger_dispatcher.dispatch(self._action_trigger, payload=payload,
trace_context=trace_context)
def _apply_post_run_policies(self, liveaction=None):
# Apply policies defined for the action.
policy_dbs = Policy.query(resource_ref=liveaction.action)
LOG.debug('Applying %s post_run policies' % (len(policy_dbs)))
for policy_db in policy_dbs:
driver = policies.get_driver(policy_db.ref,
policy_db.policy_type,
**policy_db.parameters)
try:
LOG.debug('Applying post_run policy "%s" (%s) for liveaction %s' %
(policy_db.ref, policy_db.policy_type, str(liveaction.id)))
liveaction = driver.apply_after(liveaction)
except:
LOG.exception('An exception occurred while applying policy "%s".', policy_db.ref)
def _get_runner_ref(self, action_ref):
"""
Retrieve a runner reference for the provided action.
:rtype: ``str``
"""
action = Action.get_by_ref(action_ref)
return action['runner_type']['name']
def get_notifier():
with Connection(transport_utils.get_messaging_urls()) as conn:
return Notifier(conn, [ACTIONUPDATE_WORK_Q], trigger_dispatcher=TriggerDispatcher(LOG))
|
{
"content_hash": "3a3b86ee1b32c926f5553a0de623a210",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 99,
"avg_line_length": 45.35686274509804,
"alnum_prop": 0.628220646723154,
"repo_name": "dennybaa/st2",
"id": "d6249b597139c4b8fda2eaad1466a36181f20bed",
"size": "12346",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "st2actions/st2actions/notifier/notifier.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "37319"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "3306365"
},
{
"name": "Shell",
"bytes": "27148"
},
{
"name": "Slash",
"bytes": "677"
}
],
"symlink_target": ""
}
|
'''
@author Jiexin Guo
a set of methods that using json or the changing of class that related to json
Copyright (C) 2013-2014 sysu-software. All Rights Reserved.
'''
import json
import jsonUtil
import string
from database import SqliteDatabase
class modeling:
def __init__(self,database,AValue,BValue,ProteinAName,ProteinBName,isDepressing):
self.db = database
self.__ProteinAValue=AValue
self.__ProteinBValue=BValue
self.AExpressionValueRecord=self.__getBestExpressionValueRecord(self.__ProteinAValue)
self.APromoter= self.__getPromoterByExpressionValueRecord(self.AExpressionValueRecord)
self.APlasmidBackbone= self.__getPlasmidBackboneByExpressionValueRecord(self.AExpressionValueRecord)
#print self.AExpressionValueRecord
self.BExpressionValueRecord=self.__getBestExpressionValueRecord(self.__ProteinBValue)
self.BPromoter= self.__getPromoterByExpressionValueRecord(self.BExpressionValueRecord)
self.BPlasmidBackbone= self.__getPlasmidBackboneByExpressionValueRecord(self.BExpressionValueRecord)
#print self.BExpressionValueRecord
self.ProteinA=self.__getProteinByName(ProteinAName)
self.ProteinB=self.__getProteinByName(ProteinBName)
self.RBS=self.__getRBSByName('BBa_J61102')
self.RepressorTable=self.__getRepressor()
f = open("out.txt","w")
for item in self.RepressorTable:
self.depressingFunction(item,f)
def __getBestExpressionValueRecord(self,idealValue):
#data= self.db.getExpressionValue()
#minDiff=abs(data[0]['ExpressionValue']-idealValue)
#minIndex=0
#for i in xrange(1,len(data)):
# if(abs(data[i]['ExpressionValue']-idealValue)<minDiff):
# minIndex=i
# minDiff=abs(data[i]['ExpressionValue']-idealValue)
#return data[minIndex]
self.db._SqliteDatabase__cursor.execute('select * from expression_value order by abs(expression_value.ExpressionValue-%f) limit 0,1' %idealValue)
jsonEncoded = jsonUtil.turnSelectionResultToJson(self.db._SqliteDatabase__cursor.description,self.db._SqliteDatabase__cursor.fetchall())
decodejson = json.loads(jsonEncoded)
return decodejson[0]
def __getRepressor(self):
self.db._SqliteDatabase__cursor.execute('SELECT [repressor].* FROM [repressor]')
jsonEncoded = jsonUtil.turnSelectionResultToJson(self.db._SqliteDatabase__cursor.description,self.db._SqliteDatabase__cursor.fetchall())
decodejson = json.loads(jsonEncoded)
return decodejson
def __getPromoterByExpressionValueRecord(self,ExpressionValueRecord):
self.db._SqliteDatabase__cursor.execute('select * from promoter where Number="%s"' %(ExpressionValueRecord['Promoter']))
jsonEncoded = jsonUtil.turnSelectionResultToJson(self.db._SqliteDatabase__cursor.description,self.db._SqliteDatabase__cursor.fetchall())
decodejson = json.loads(jsonEncoded)
return decodejson[0]
def __getPlasmidBackboneByExpressionValueRecord(self,ExpressionValueRecord):
self.db._SqliteDatabase__cursor.execute('select * from plasmid_backbone where Number="%s"' %(ExpressionValueRecord['PlasmidBackbone']))
jsonEncoded = jsonUtil.turnSelectionResultToJson(self.db._SqliteDatabase__cursor.description,self.db._SqliteDatabase__cursor.fetchall())
decodejson = json.loads(jsonEncoded)
return decodejson[0]
def __getProteinByName(self,proteinName):
self.db._SqliteDatabase__cursor.execute('select * from Protein where Number="%s"' %(proteinName))
jsonEncoded = jsonUtil.turnSelectionResultToJson(self.db._SqliteDatabase__cursor.description,self.db._SqliteDatabase__cursor.fetchall())
decodejson = json.loads(jsonEncoded)
return decodejson[0]
def __getRBSByName(self,rbsName):
self.db._SqliteDatabase__cursor.execute('select * from RBS where Number="%s"' %(rbsName))
jsonEncoded = jsonUtil.turnSelectionResultToJson(self.db._SqliteDatabase__cursor.description,self.db._SqliteDatabase__cursor.fetchall())
decodejson = json.loads(jsonEncoded)
return decodejson[0]
def depressingFunction(self,repressor,f):
repressor['K1']=repressor['K1']*10
CopyNumber1=string.atof(self.APlasmidBackbone['CopyNumber'])
CopyNumber2=string.atof(self.BPlasmidBackbone['CopyNumber'])
LeakageRate1=string.atof(self.APromoter['LeakageRate'])
MPPromoter1=string.atof(self.APromoter['MPPromoter'])
#if LeakageRate1>MPPromoter1:
#LeakageRate1=string.atof(self.APromoter['MPPromoter'])
#MPPromoter1=string.atof(self.APromoter['LeakageRate'])
LeakageRate2=string.atof(self.BPromoter['LeakageRate'])
#LeakageRate2=0.1
MPPromoter2=string.atof(self.BPromoter['MPPromoter'])
#if LeakageRate2>MPPromoter2:
#LeakageRate2=string.atof(self.BPromoter['MPPromoter'])
#MPPromoter2=string.atof(self.BPromoter['LeakageRate'])
c1=CopyNumber1*(MPPromoter1-LeakageRate1)
c2=CopyNumber2*(MPPromoter2-LeakageRate2)
proteina0=self.RBS['MPRBS']/self.ProteinB['DegRatePro']*((c2+LeakageRate2)/self.ProteinB['DegRatemRNA'])
#print>>f,'CopyNumber1',CopyNumber1
#print>>f,'CopyNumber2',CopyNumber2
#print>>f,'LeakageRate1',LeakageRate1
#print>>f,'MPPromoter1',MPPromoter1
#print>>f,'c1',c1
#print>>f,'c2',c2
#print>>f,'k1',repressor['K1']
#print>>f,'proteina0',proteina0
RepressorResult=pow(proteina0/repressor['K1'],repressor['HillCoeff1'])+1
#print>>f,'temp',pow(proteina0/repressor['K1'],repressor['HillCoeff1'])
#RepressorResult=1+1.5
RepressorResult=c2/RepressorResult#+LeakageRate2
RepressorResult=RepressorResult/c2#+LeakageRate2)
print>>f,'RepressorResult',RepressorResult
return RepressorResult
def repress_rate(database, grp1, CopyNumber1, grp2, CopyNumber2):
LeakageRate1 = 0.1
LeakageRate2 = 0.1
DegRatemPro1 = 0.1
DegRatemPro2 = 0.1
DegRatemRNA1 = 0.1
DegRatemRNA2 = 0.1
repressor = database.select_with_name("repressor", grp1[-2]["name"])
if repressor is None:
repressor = database.select_with_name("activator", grp1[-2]["name"])
promoter1 = database.select_with_name("promoter", grp1[0]["name"])
promoter2 = database.select_with_name("promoter", grp2[0]["name"])
rbs1 = database.select_with_name("RBS", grp1[3]["name"])
rbs2 = database.select_with_name("RBS", grp1[1]["name"])
c1 = CopyNumber1 * (promoter1["MPPromoter"] - LeakageRate1)
c2 = CopyNumber2 * (promoter2["MPPromoter"] - LeakageRate2)
c_p1 = rbs1["MPRBS"] * (c1 + LeakageRate1) / (DegRatemPro1 * DegRatemRNA1)
c_p2 = rbs2["MPRBS"] * (c2 + LeakageRate2) / (DegRatemPro2 * DegRatemRNA2)
repress_rate = (c2 / (1 + (c_p1 / repressor["K1"]) ** repressor["HillCoeff1"])\
+ LeakageRate2) / (c2 + LeakageRate2)
return (c_p2, repress_rate)
def concen_without_repress(database, group, CopyNumber, pro_idx):
promoter = database.select_with_name("promoter", group[0]["name"])
rbs = database.select_with_name("RBS", group[pro_idx - 1]["name"])
return (CopyNumber * promoter["MPPromoter"] * rbs["MPRBS"], 1)
if __name__=="__main__":
sql=SqliteDatabase()
print repress_rate(sql, ['BBa_I712074', 'BBa_J61104', 'BBa_C0060',\
'BBa_J61104', u'BBa_K518003', 'BBa_B0013'], 15, ['BBa_J64000',\
'BBa_J61104', 'BBa_C0160', 'BBa_B0013'], 15)
m=modeling(sql,0.1,0.9,'BBa_K091109','BBa_I725011',True)
|
{
"content_hash": "fc0f20720a8a78a1ae6c4d00814aa291",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 149,
"avg_line_length": 48.682758620689654,
"alnum_prop": 0.7512395523445248,
"repo_name": "igemsoftware/SYSU-Software2013",
"id": "7d9f571af44ebd7935498f6c573b1f2e7e748983",
"size": "7072",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "project/Python27_32/web/modeling.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "4234"
},
{
"name": "C",
"bytes": "2246655"
},
{
"name": "C#",
"bytes": "30903"
},
{
"name": "C++",
"bytes": "344228"
},
{
"name": "CSS",
"bytes": "437211"
},
{
"name": "F#",
"bytes": "9222"
},
{
"name": "JavaScript",
"bytes": "7288480"
},
{
"name": "Python",
"bytes": "55202181"
},
{
"name": "Shell",
"bytes": "23510"
},
{
"name": "Tcl",
"bytes": "3329368"
},
{
"name": "Visual Basic",
"bytes": "4330"
},
{
"name": "XSLT",
"bytes": "38160"
}
],
"symlink_target": ""
}
|
from django.core.management.base import BaseCommand
from hc.api.models import Flip
from hc.lib.date import month_boundaries
class Command(BaseCommand):
help = "Prune old Flip objects."
def handle(self, *args, **options):
threshold = min(month_boundaries(months=3))
q = Flip.objects.filter(created__lt=threshold)
n_pruned, _ = q.delete()
return "Done! Pruned %d flips." % n_pruned
|
{
"content_hash": "a9ba2cf239bd17b41b7ee8149e6d85d9",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 54,
"avg_line_length": 26.625,
"alnum_prop": 0.6737089201877934,
"repo_name": "iphoting/healthchecks",
"id": "faa45963fffd65e437f0e1351decba3427eda514",
"size": "426",
"binary": false,
"copies": "1",
"ref": "refs/heads/heroku",
"path": "hc/api/management/commands/pruneflips.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "64145"
},
{
"name": "Dockerfile",
"bytes": "939"
},
{
"name": "HTML",
"bytes": "595497"
},
{
"name": "JavaScript",
"bytes": "55883"
},
{
"name": "Less",
"bytes": "14135"
},
{
"name": "Python",
"bytes": "894208"
},
{
"name": "Shell",
"bytes": "4382"
}
],
"symlink_target": ""
}
|
"""
WSGI config for depot project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "depot.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
{
"content_hash": "53ff9b8fa3cd2fcb45924389ca8ae586",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 79,
"avg_line_length": 40.42857142857143,
"alnum_prop": 0.799469964664311,
"repo_name": "qianyu668899/Django",
"id": "3da45261a822596fcda5009c8ed766900f7701cb",
"size": "1132",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "depot/depot/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "890"
},
{
"name": "C",
"bytes": "447223"
},
{
"name": "C++",
"bytes": "2005"
},
{
"name": "CSS",
"bytes": "24544"
},
{
"name": "HTML",
"bytes": "77630"
},
{
"name": "JavaScript",
"bytes": "374"
},
{
"name": "Nginx",
"bytes": "787"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "209145"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns('simplemooc.core.views',
url(r'^$', 'home', name='home'),
url(r'^contact/$', 'contact', name='contact'),
)
|
{
"content_hash": "8d75aa59436afeb83a155a256bb459e8",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 51,
"avg_line_length": 30.833333333333332,
"alnum_prop": 0.6756756756756757,
"repo_name": "kaiocesar/simplemooc",
"id": "5821814bde2ccfcf42564127a44350111fe5dc08",
"size": "185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simplemooc/core/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4704"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "1415"
}
],
"symlink_target": ""
}
|
from libs.models.student_vgg import VGGStudent
from libs.models.student_rrdb import RRDBStudent
from libs.models.student_residual import ResidualStudent
|
{
"content_hash": "e6d36f013320e4f706ebe07808f2b538",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 56,
"avg_line_length": 51,
"alnum_prop": 0.8627450980392157,
"repo_name": "captain-pool/GSOC",
"id": "9dbcbfa8325c12ed6c2a7c263132f35f2bfb4818",
"size": "153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "E3_Distill_ESRGAN/libs/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "129354"
},
{
"name": "Shell",
"bytes": "4163"
},
{
"name": "Starlark",
"bytes": "2954"
}
],
"symlink_target": ""
}
|
import sys
import numpy as np
from scipy.cluster.vq import whiten
from scipy.cluster.vq import kmeans
from scipy.cluster.vq import vq
import pickle
if len(sys.argv)<3:
print "Usage: python get_clusters.py word_vecs.txt N_CLUST"
print "Perhaps necessary:\n\tcut -d ' ' -f 2- my_reps.txt > word_vecs.txt\n\tawk '{ print $1 }' my_reps.txt > word_vecs.words.txt"
sys.exit()
N_CLUST = int(sys.argv[2])
# filenames
datafile_name = sys.argv[1]
wordlist_name = datafile_name.replace('.txt','.words.txt')
clusterlist_name = datafile_name.replace('.txt','.clusters.'+str(N_CLUST)+'.txt')
# data
data = np.loadtxt(datafile_name)
# normalise the data ('whiten')
white_data = whiten(data)
words_mess = open(wordlist_name,'r').readlines()
words = np.array(map(str.strip,words_mess))
print 'Data acquired!'
# outfile
cluster_file = open(clusterlist_name,'w')
# kmeans clusering
print 'Clustering!'
clust = kmeans(white_data,N_CLUST)
clust_codebook = clust[0]
print 'Cluster centroids obtained, making assignments...'
vq_out = vq(white_data,clust_codebook)
kmeans_assignments = vq_out[0]
distances = vq_out[1]
for i in range(len(words)):
word = words[i]
cluster_assignment = kmeans_assignments[i]
dist_to_centroid = distances[i]
cluster_file.write(str(word)+'\t'+str(cluster_assignment)+'\t'+str(dist_to_centroid)+'\n')
mean_distance = np.mean(distances)
print 'Mean distance to centroid:',mean_distance
# save the word-cluster assignments as numpy array
with_assignment = np.column_stack((words,kmeans_assignments))
pickle.dump(with_assignment,open("_master_dictionary_assigned.pk","wb"))
|
{
"content_hash": "23d1c11a5f14950c50a68b2a7e94133c",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 134,
"avg_line_length": 31,
"alnum_prop": 0.7202233250620348,
"repo_name": "corcra/word_reps",
"id": "47d54096bb32856fae9f1fc62600f8697ca62977",
"size": "2761",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "get_clusters.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3004"
},
{
"name": "Python",
"bytes": "7075"
},
{
"name": "R",
"bytes": "1473"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/lair/base/shared_lair_base_nest_tree_lg_dark.iff"
result.attribute_template_id = -1
result.stfName("lair_n","generic_nest")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "e32f16796a8b8f41096e0d2a8d23b77d",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 85,
"avg_line_length": 24.23076923076923,
"alnum_prop": 0.692063492063492,
"repo_name": "anhstudios/swganh",
"id": "07c85a092f966cbb5af722588b0aa45adbc3de0a",
"size": "460",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/lair/base/shared_lair_base_nest_tree_lg_dark.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
"""
test_get_vcard.py -- Given the URI of a vcard, return values and uris
assocuiated withe vcard. Handle repeating values such as telephone
and email.
Version 0.1 MC 2014-07-24
-- Initial version for tools 2.0
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2014, University of Florida"
__license__ = "BSD 3-Clause license"
__version__ = "0.1"
from vivopeople import get_vcard
from datetime import datetime
import json
print datetime.now(),"Start"
print "\n",json.dumps(get_vcard("http://vivo.ufl.edu/individual/n6754"),
indent=4)
print datetime.now(),"Finish"
|
{
"content_hash": "b2c4a082090362160c06155fe3fcb0a1",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 73,
"avg_line_length": 28.318181818181817,
"alnum_prop": 0.6725521669341894,
"repo_name": "mconlon17/vivo-1.6-upgrade",
"id": "261490b7837277e8b9f238ff85745135f7669c7d",
"size": "623",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/test_get_vcard.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "68639"
},
{
"name": "TeX",
"bytes": "551"
}
],
"symlink_target": ""
}
|
from yapsy.IPlugin import IPlugin
from api.motor import Motor
from helpers.diode import Diode
class GreenLed(Motor, IPlugin):
def __init__(self):
super().__init__()
self._diode = Diode(led_pin=8)
self._diode.on()
def on_trigger(self, current_state):
if current_state['termination']:
self._diode.off()
return
if 'weather' not in current_state:
self._diode.on()
return
rain_chances = map(lambda x: x['chance_of_rain'], current_state['weather']['forecast'][:3])
risky_forecasts = [x for x in rain_chances if x >= 10]
if len(risky_forecasts) > 0:
self._diode.toggle()
else:
self._diode.on()
|
{
"content_hash": "e53c4b10f22560bb1018406a8c05c0a0",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 99,
"avg_line_length": 26.642857142857142,
"alnum_prop": 0.561662198391421,
"repo_name": "sceeter89/command-center",
"id": "71a2da692b199370d9ef955bf2ae4d1e434e2a5d",
"size": "746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/motors/green_led/green_led.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43750"
}
],
"symlink_target": ""
}
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 30, transform = "None", sigma = 0.0, exog_count = 0, ar_order = 12);
|
{
"content_hash": "1cfd53a2f6b0b6a89b1a844ad70f679d",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 160,
"avg_line_length": 37.142857142857146,
"alnum_prop": 0.7,
"repo_name": "antoinecarme/pyaf",
"id": "b65ff2113af1dba24b3a2fa74c498e514c35b3e7",
"size": "260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_None/trend_PolyTrend/cycle_30/ar_12/test_artificial_128_None_PolyTrend_30_12_0.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
import hr_job
|
{
"content_hash": "0c14ed3219573684d396e2a1034149f9",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 13,
"avg_line_length": 14,
"alnum_prop": 0.7857142857142857,
"repo_name": "jesus-gh/sumar",
"id": "113786087b71f8a01f49a714cf9403d252fd2be8",
"size": "38",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "poi_x_sumar/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31698"
}
],
"symlink_target": ""
}
|
from __future__ import division
import numpy
from chainer.training import extension
class PolynomialShift(extension.Extension):
"""Trainer extension to polynomially shift an optimizer attribute.
This extension polynomially decreases the specified attribute of the
optimizer. The typical use case is a polynomial decay of the
learning rate at each iteration.
For example, suppose that this extension is invoke at every iteration.
Then this extension will set the corresponding attribute to
``init_value * (1 - i / max_iter) ^ rate`` at the ``i``-th iteration, where
the ``max_iter`` is the number of iterations to be running.
This extension is also called before the training loop starts by default.
Args:
attr (str): Name of the attribute to shift.
rate (float): Exponent of polynomial shift.
max_count (int): Number of this extension to be invoked.
init (float): Initial value of the attribute. If it is ``None``, the
extension extracts the attribute at the first call and uses it as
the initial value.
target (float): Target value of the attribute. If the attribute reaches
this value, the shift stops.
optimizer (~chainer.Optimizer): Target optimizer to adjust the
attribute. If it is ``None``, the main optimizer of the updater is
used.
"""
invoke_before_training = True
def __init__(self, attr, rate, max_count, init=None, target=None,
optimizer=None):
self._attr = attr
self._rate = rate
self._init = init
self._target = target
self._optimizer = optimizer
self._t = 0
self._max_count = max_count
self._last_value = None
def initialize(self, trainer):
optimizer = self._get_optimizer(trainer)
# ensure that _init is set
if self._init is None:
self._init = getattr(optimizer, self._attr)
if self._last_value is not None: # resuming from a snapshot
self._update_value(optimizer, self._last_value)
else:
self._update_value(optimizer, self._init)
def __call__(self, trainer):
self._t += 1
optimizer = self._get_optimizer(trainer)
decay = max(1 - self._t / self._max_count, 0)
value = self._init * decay ** self._rate
if self._target is not None:
if self._rate > 0:
# almost same as value = min(value, self._target), but this
# line supports negative values, too
if self._target / value > 1:
value = self._target
else:
# ditto
if self._target / value < 1:
value = self._target
self._update_value(optimizer, value)
def serialize(self, serializer):
self._t = serializer('_t', self._t)
self._last_value = serializer('_last_value', self._last_value)
if isinstance(self._last_value, numpy.ndarray):
self._last_value = numpy.asscalar(self._last_value)
def _get_optimizer(self, trainer):
return self._optimizer or trainer.updater.get_optimizer('main')
def _update_value(self, optimizer, value):
setattr(optimizer, self._attr, value)
self._last_value = value
|
{
"content_hash": "d501077e4ec4387170efd0f3a4097893",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 79,
"avg_line_length": 35.776595744680854,
"alnum_prop": 0.6092774308652988,
"repo_name": "rezoo/chainer",
"id": "5c4a9935b8d25bd0788bff3355ce98a2dea2630f",
"size": "3363",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chainer/training/extensions/polynomial_shift.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "Dockerfile",
"bytes": "1238"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "4367165"
}
],
"symlink_target": ""
}
|
from django import forms
from . import models
class StatusForm(forms.ModelForm):
message = forms.CharField(widget=forms.Textarea)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super().__init__(*args, **kwargs)
def save(self, commit=True):
instance = super().save(commit=False)
instance.user = self.user
if commit:
instance.save()
return instance
class Meta:
model = models.Status
fields = ['message']
|
{
"content_hash": "724b1efa91d34650a967fcb5506e1343",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 52,
"avg_line_length": 22.695652173913043,
"alnum_prop": 0.5919540229885057,
"repo_name": "consideratecode/csrf_example",
"id": "5cdf89ac63ee5e2da1fd26161ab76a98e3c8fc50",
"size": "522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chirp/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4525"
},
{
"name": "Python",
"bytes": "10678"
}
],
"symlink_target": ""
}
|
"""
Class for Handling KeystoneEvents in OpenStack's RabbitMQ/QPID
Uses either pika or proton libraries for handling the AMQP protocol, depending whether the message broker is RabbitMQ or QPID, and then implements
the necessary callbacks for Keystone events, such as tenant creation
"""
__copyright__ = "Istituto Nazionale di Fisica Nucleare (INFN)"
__license__ = "Apache 2"
import json
import pika
import logging
class ProjectEvents:
def __init__(self, rpc_type, rpc_host, rpc_user, rpc_pass, zabbix_handler):
self.rpc_type = rpc_type
self.rpc_host = rpc_host
self.rpc_user = rpc_user
self.rpc_pass = rpc_pass
self.zabbix_handler = zabbix_handler
self.logger = logging.getLogger('ZCP')
self.logger.info('Projects listener started')
def keystone_amq_rabbitmq(self):
"""
Method used to listen to keystone events (with rabbitmq amq)
"""
connection = pika.BlockingConnection(pika.ConnectionParameters(host = self.rpc_host, credentials = pika.PlainCredentials(username = self.rpc_user, password = self.rpc_pass)))
channel = connection.channel()
result = channel.queue_declare(exclusive = True)
queue_name = result.method.queue
# exchange name should be made available as option, maybe advanced
channel.exchange_declare(exchange = 'openstack', type = 'topic')
channel.exchange_declare(exchange = 'keystone', type = 'topic')
channel.queue_bind(exchange = 'openstack', queue = queue_name, routing_key = 'notifications.#')
channel.queue_bind(exchange = 'keystone', queue = queue_name, routing_key = 'keystone.#')
channel.basic_consume(self.keystone_callback_rabbitmq, queue = queue_name, no_ack = True)
channel.start_consuming()
def keystone_callback_rabbitmq(self, ch, method, properties, body):
"""
Method used by method keystone_amq() to filter messages by type of message.
:param ch: refers to the head of the protocol
:param method: refers to the method used in callback
:param properties: refers to the proprieties of the message
:param body: refers to the message transmitted
"""
payload = json.loads(body)
try:
if payload['event_type'] == 'identity.project.created':
tenant_id = payload['payload']['resource_info']
tenants = self.zabbix_handler.get_tenants()
tenant_name = self.zabbix_handler.get_tenant_name(tenants, tenant_id)
self.zabbix_handler.group_list.append([tenant_name, tenant_id])
self.zabbix_handler.create_host_group(tenant_name)
self.logger.info("New project (%s) created -> corresponding host group created on zabbix" %(tenant_name))
elif payload['event_type'] == 'identity.project.deleted':
tenant_id = payload['payload']['resource_info']
tenants = self.zabbix_handler.get_tenants()
tenant_name = self.zabbix_handler.get_tenant_name(tenants, tenant_id)
self.zabbix_handler.project_delete(tenant_id)
self.logger.info("Project %s deleted -> Corresponding host group deleted from zabbix" %(tenant_name))
except KeyError, e:
self.logger.info("JSON KeyError, skipping message..")
pass
def keystone_listener(self):
self.logger.info("Contacting keystone rpc on host %s (rpc type %s) " %(self.rpc_host, self.rpc_type))
if self.rpc_type == 'rabbitmq':
self.keystone_amq_rabbitmq()
elif self.rpc_type == 'qpid':
self.nova_amq_qpid()
## SUPPORT FOR QPID TO BE ADDED
|
{
"content_hash": "5e3c20214067f8defe0af555e5ec3502",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 182,
"avg_line_length": 46,
"alnum_prop": 0.6476113794954375,
"repo_name": "egiorgio/ProZaC",
"id": "9c733d4757fd563a0c1b2877e20f4668f8225c90",
"size": "3726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project_handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "52399"
},
{
"name": "Shell",
"bytes": "1960"
}
],
"symlink_target": ""
}
|
""" An adapter factory that caches adapters per instance. """
import weakref
from traits.api import Any, Bool, HasTraits, Property
from traits.util.api import import_symbol
class CachedAdapterFactory(HasTraits):
""" An adapter factory that caches adapters per instance.
We provide this class to provide the caching functionality of the
old traits 'adapts' implementation. However, note that the cache will
not be cleared unless you take care of cleaning the 'adaptee' trait once
your adapter are deleted.
This class will be removed when the 'adapts' function is removed.
"""
#### 'object' protocol #####################################################
def __call__(self, adaptee):
""" The adapter manager uses callables for adapter factories. """
adapter = self._adapter_cache.get(adaptee, None)
if adapter is None:
adapter = self.factory(adaptee)
self._adapter_cache[adaptee] = adapter
return adapter
#### 'CachedAdapterFactory' protocol #######################################
#: A callable that actually creates the adapters!
#:
#: The factory must ba callable that takes exactly one argument which is
#: the object to be adapted (known as the adaptee), and returns an
#: adapter from the `from_protocol` to the `to_protocol`.
#:
#: The factory can be specified as either a callable, or a string in the
#: form 'foo.bar.baz' which is turned into an import statement
#: 'from foo.bar import baz' and imported when the trait is first accessed.
factory = Property(Any)
#: True if the cache is empty, otherwise False.
#:
#: This method is mostly here to help testing - the framework does not
#: rely on it for any other purpose.
is_empty = Property(Bool)
def _get_is_empty(self):
return len(self._adapter_cache) == 0
#### Private protocol ######################################################
_adapter_cache = Any
def __adapter_cache_default(self):
return weakref.WeakKeyDictionary()
#: Shadow trait for the corresponding property.
_factory = Any
_factory_loaded = Bool(False)
def _get_factory(self):
""" Trait property getter. """
if not self._factory_loaded:
if isinstance(self._factory, basestring):
self._factory = import_symbol(self._factory)
self._factory_loaded = True
return self._factory
def _set_factory(self, factory):
""" Trait property setter. """
self._factory = factory
return
#### EOF #######################################################################
|
{
"content_hash": "c74aa44546c38d91c6f26ec65eafbd04",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 80,
"avg_line_length": 32.46987951807229,
"alnum_prop": 0.5996289424860853,
"repo_name": "burnpanck/traits",
"id": "11af5f10638bc30280b76481215dd767825b50d4",
"size": "3283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "traits/adaptation/cached_adapter_factory.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "660"
},
{
"name": "C",
"bytes": "186780"
},
{
"name": "Python",
"bytes": "1085281"
}
],
"symlink_target": ""
}
|
from .. utils import TranspileTestCase, BuiltinFunctionTestCase
class AbsTests(TranspileTestCase):
def test_abs_not_implemented(self):
self.assertCodeExecution("""
class NotAbsLike:
pass
x = NotAbsLike()
try:
print(abs(x))
except TypeError as err:
print(err)
""")
class BuiltinAbsFunctionTests(BuiltinFunctionTestCase, TranspileTestCase):
functions = ["abs"]
not_implemented = [
'test_class',
'test_frozenset',
]
|
{
"content_hash": "5b5604d3f3cd7acf264573ad41c89695",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 74,
"avg_line_length": 24.608695652173914,
"alnum_prop": 0.568904593639576,
"repo_name": "gEt-rIgHt-jR/voc",
"id": "6f127df258687291b819f50816b87c6b79aaab47",
"size": "566",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/builtins/test_abs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "764713"
},
{
"name": "Python",
"bytes": "976467"
}
],
"symlink_target": ""
}
|
import pygame, sys, os, utils, animation, button, options, world, save, game
util = utils.Utils()
class Menu(object):
def __init__(self, screen, clock, fps, resolution, version):
self.screen = screen
self.clock = clock
self.fps = fps
self.resolution = resolution
self.halfResolution = (self.resolution[0] // 2, self.resolution[1] // 2)
self.fullscreen = False
self.version = version
self.volume = pygame.mixer.music.get_volume()
self.menuObj = None
self.save = save.Save("opt")
self.volume = float(self.save.get("volume"))
if 0 <= self.volume <= 20:
pygame.mixer.music.set_volume(self.volume * 0.05)
self.fps = int(self.save.get("fps"))
self.fullscreen = bool(int(self.save.get("fullscreen")))
if self.fullscreen:
pygame.display.set_mode(self.resolution, pygame.FULLSCREEN)
self.running = True
for anim in os.listdir("assets/characters/player"):
info = anim[:anim.find(".")].split("+")
if info[0] == "standingRight":
self.logo = animation.Animation("assets/characters/player/%s" % anim, 50, 50, float(info[1]), float(info[2]))
self.splice = self.logo.getSplice()
self.background = pygame.image.load("assets/sprites/backgrounds/world1.png").convert_alpha()
self.bgPos = (0, 0)
self.smallFont = pygame.font.Font("assets/fonts/OpenSans-Semibold.ttf", 14)
self.mediumFont = pygame.font.Font("assets/fonts/OpenSans-Semibold.ttf", 30)
self.bigFont = pygame.font.Font("assets/fonts/OpenSans-Semibold.ttf", 72)
self.mainText = self.bigFont.render("Layer Switcher", 1, (0, 0, 0))
self.versionText = self.mediumFont.render(self.version, 1, (0, 0, 0))
self.currentMenu = ""
self.mainMenu()
while self.running:
dt = self.clock.tick(self.fps)
pygame.display.set_caption("Layer Switcher %3d FPS" % (self.clock.get_fps()), "Layer Switcher")
mouseTrigger = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.leave()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
if self.currentMenu == "main":
self.leave()
else:
self.mainMenu()
if event.key == pygame.K_SPACE:
if self.currentMenu == "main":
world.World(self)
elif self.currentMenu == "world":
self.world.start()
if self.currentMenu == "world":
if event.key == pygame.K_a:
if self.menuObj.mapIndex > 0:
self.menuObj.mapDown()
elif self.menuObj.worldIndex > 0:
self.menuObj.worldDown()
self.menuObj.mapIndex = len(self.menuObj.maps) - 2
self.menuObj.mapUp()
if event.key == pygame.K_d:
if self.menuObj.mapIndex < len(self.menuObj.maps) - 1:
self.menuObj.mapUp()
elif self.menuObj.worldIndex < len(self.menuObj.worlds) - 1:
self.menuObj.worldUp()
if event.type == pygame.MOUSEBUTTONUP:
if event.button == 1:
mouseTrigger = True
self.screen.fill((82, 246, 255))
mPos = pygame.mouse.get_pos()
self.bgPos = (
-util.remap(mPos[0], 0, self.resolution[0], 0, self.background.get_width() - self.resolution[0]),
-util.remap(mPos[1], 0, self.resolution[1], 0, self.background.get_height() - self.resolution[1])
)
self.screen.blit(self.background, self.bgPos)
self.screen.blit(self.mainText, (self.halfResolution[0] - self.mainText.get_width() // 2, 100))
self.screen.blit(self.versionText, (5, self.resolution[1] - self.versionText.get_height()))
if self.currentMenu == "main":
self.logo.update(dt * 0.001)
self.screen.blit(self.splice, (self.halfResolution[0] - self.splice.get_width() // 2, 270))
for butt in button.Button.group:
butt.updateAndDraw(self.screen, mPos, mouseTrigger)
pygame.display.flip()
def mainMenu(self):
self.currentMenu = "main"
button.Button.group = []
button.Button("big", self.mediumFont, "World", (0, self.resolution[1] - 330), self.resolution, lambda: world.World(self))
button.Button("big", self.mediumFont, "Tutorial", (0, self.resolution[1] - 255), self.resolution, self.tutorial)
button.Button("big", self.mediumFont, "Options", (0, self.resolution[1] - 180), self.resolution, lambda: options.Options(self))
button.Button("big", self.mediumFont, "Quit", (0, self.resolution[1] - 105), self.resolution, self.leave)
if int(self.save.get("displayTip")):
button.Button("text", self.mediumFont, "Menu Haiku:", (self.halfResolution[0] - 250, self.halfResolution[1] - 150), self.resolution)
button.Button("medium", self.smallFont, "SPACE makes you advance \\ESCAPE will help you go back \\A and D change map",
(self.halfResolution[0] - 250, self.halfResolution[1] - 100), self.resolution, self.hideTip, True)
button.Button("text", self.smallFont, "Click to hide", (self.halfResolution[0] - 250, self.halfResolution[1] + 5), self.resolution)
def hideTip(self):
button.Button.group[-3:] = []
self.save.set("displayTip", 0)
def tutorial(self):
game.Game(self, "Game", "Tutorial")
def leave(self):
self.running = False
pygame.quit()
sys.exit(0)
|
{
"content_hash": "c04ad878d29447e81f2ba09345f4c0c4",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 135,
"avg_line_length": 35.673758865248224,
"alnum_prop": 0.6671968190854871,
"repo_name": "pedro-b/layer-switcher",
"id": "38c101839758b46475ae649507b77c7cccc0625f",
"size": "5030",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "menu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "141200"
}
],
"symlink_target": ""
}
|
""" CISCO_IETF_FRR_MIB
This MIB module contains managed object definitions for MPLS
Fast Reroute (FRR) as defined in\:Pan, P., Gan, D., Swallow, G.,
Vasseur, J.Ph., Cooper, D., Atlas, A., Jork, M., Fast Reroute
Techniques in RSVP\-TE, draft\-ietf\-mpls\-rsvp\-lsp\-fastreroute\-
00.txt, January 2002.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class CiscoIetfFrrMib(object):
"""
.. attribute:: cmplsfrrconsttable
This table shows detour setup constraints
**type**\: :py:class:`Cmplsfrrconsttable <ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB.CiscoIetfFrrMib.Cmplsfrrconsttable>`
.. attribute:: cmplsfrrfacroutedbtable
The mplsFrrFacRouteDBTable provides information about the fast reroute database. Each entry belongs to an interface, protecting backup tunnel and protected tunnel. MPLS interfaces defined on this node are protected by backup tunnels and are indexed by mplsFrrFacRouteProtectedIndex. Backup tunnels defined to protect the tunnels traversing an interface, and are indexed by mplsFrrFacRouteProtectingTunIndex. Note that the tunnel instance index is not required, since it is implied to be 0, which indicates the tunnel head interface for the protecting tunnel. The protecting tunnel is defined to exist on the PLR in the FRR specification. Protected tunnels are the LSPs that traverse the protected link. These LSPs are uniquely identified by mplsFrrFacRouteProtectedTunIndex, mplsFrrFacRouteProtectedTunInstance, mplsFrrFacRouteProtectedTunIngressLSRId, and mplsFrrFacRouteProtectedTunEgressLSRId
**type**\: :py:class:`Cmplsfrrfacroutedbtable <ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB.CiscoIetfFrrMib.Cmplsfrrfacroutedbtable>`
.. attribute:: cmplsfrrlogtable
The fast reroute log table records fast reroute events such as protected links going up or down or the FRR feature kicking in
**type**\: :py:class:`Cmplsfrrlogtable <ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB.CiscoIetfFrrMib.Cmplsfrrlogtable>`
.. attribute:: cmplsfrrscalars
**type**\: :py:class:`Cmplsfrrscalars <ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB.CiscoIetfFrrMib.Cmplsfrrscalars>`
"""
_prefix = 'CISCO-IETF-FRR-MIB'
_revision = '2008-04-29'
def __init__(self):
self.cmplsfrrconsttable = CiscoIetfFrrMib.Cmplsfrrconsttable()
self.cmplsfrrconsttable.parent = self
self.cmplsfrrfacroutedbtable = CiscoIetfFrrMib.Cmplsfrrfacroutedbtable()
self.cmplsfrrfacroutedbtable.parent = self
self.cmplsfrrlogtable = CiscoIetfFrrMib.Cmplsfrrlogtable()
self.cmplsfrrlogtable.parent = self
self.cmplsfrrscalars = CiscoIetfFrrMib.Cmplsfrrscalars()
self.cmplsfrrscalars.parent = self
class Cmplsfrrscalars(object):
"""
.. attribute:: cmplsfrractprotectedifs
Indicates the number of interfaces currently being protected by the FRR feature if mplsFrrConstProtectionMethod is set to facilityBackup(1), otherwise this value should return 0 to indicate that LSPs traversing any interface may be protected. This value MUST be less than or equal to mplsFrrConfIfs
**type**\: int
**range:** 0..4294967295
.. attribute:: cmplsfrractprotectedlsps
Indicates the number of LSPs currently protected by the FRR feature. If mplsFrrConstProtectionMethod is set to facilityBackup(1)this object MUST return 0
**type**\: int
**range:** 0..4294967295
.. attribute:: cmplsfrractprotectedtuns
Indicates the number of bypass tunnels indicated in mplsFrrConfProtectingTuns whose operStatus is up(1) indicating that they are currently protecting facilities on this LSR using the FRR feature. This object MUST return 0 if mplsFrrConstProtectionMethod is set to facilityBackup(1)
**type**\: int
**range:** 0..4294967295
.. attribute:: cmplsfrrconfprotectingtuns
Indicates the number of bypass tunnels configured to protect facilities on this LSR using the FRR feature if mplsFrrConstProtectionMethod is set to facilityBackup(1), otherwise this value MUST return 0
**type**\: int
**range:** 0..4294967295
.. attribute:: cmplsfrrconstprotectionmethod
Indicates which protection method is to be used for fast reroute. Some devices may require a reboot of their routing processors if this variable is changed. An agent which does not wish to reboot or modify its FRR mode MUST return an inconsistentValue error. Please consult the device's agent capability statement for more details
**type**\: :py:class:`CmplsfrrconstprotectionmethodEnum <ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB.CiscoIetfFrrMib.Cmplsfrrscalars.CmplsfrrconstprotectionmethodEnum>`
.. attribute:: cmplsfrrdetourincoming
The number of detour LSPs entering the device if mplsFrrConstProtectionMethod is set to oneToOneBackup(0), or or 0 if mplsFrrConstProtectionMethod is set to facilityBackup(1)
**type**\: int
**range:** 0..4294967295
.. attribute:: cmplsfrrdetouroriginating
The number of detour LSPs originating at this PLR if mplsFrrConstProtectionMethod is set to oneToOneBackup(0). This object MUST return 0 if the mplsFrrConstProtectionMethod is set to facilityBackup(1)
**type**\: int
**range:** 0..4294967295
.. attribute:: cmplsfrrdetouroutgoing
The number of detour LSPs leaving the device if mplsFrrConstProtectionMethod is set to oneToOneBackup(0), or 0 if mplsFrrConstProtectionMethod is set to to facilityBackup(1)
**type**\: int
**range:** 0..4294967295
.. attribute:: cmplsfrrlogtablecurrentries
Indicates the current number of entries in the FRR log table
**type**\: int
**range:** 0..4294967295
.. attribute:: cmplsfrrlogtablemaxentries
Indicates the maximum number of entries allowed in the FRR Log table. Agents receiving SETs for values that cannot be used must return an inconsistent value error. If a manager sets this value to 0, this indicates that no logging should take place by the agent. If this value is returned as 0, this indicates that no additional log entries will be added to the current table either because the table has been completely filled or logging has been disabled. However, agents may wish to not delete existing entries in the log table so that managers may review them in the future. It is implied that when mplsFrrLogTableCurrEntries has reached the value of this variable, that logging entries may not continue to be added to the table, although existing ones may remain. Furthermore, an agent may begin to delete existing (perhaps the oldest entries) entries to make room for new ones
**type**\: int
**range:** 0..4294967295
.. attribute:: cmplsfrrnotifmaxrate
This variable indicates the number of milliseconds that must elapse between notification emissions. If events occur more rapidly, the implementation may simply fail to emit these notifications during that period, or may queue them until an appropriate time in the future. A value of 0 means no minimum elapsed period is specified
**type**\: int
**range:** 0..4294967295
.. attribute:: cmplsfrrnotifsenabled
Enables or disables FRR notifications defined in this MIB module. Notifications are disabled by default
**type**\: bool
.. attribute:: cmplsfrrnumofconfifs
Indicates the number of MPLS interfaces configured for protection by the FRR feature, otherwise this value MUST return 0 to indicate that LSPs traversing any interface may be protected
**type**\: int
**range:** 0..4294967295
.. attribute:: cmplsfrrswitchover
The number of tunnel instances that are switched over to their corresponding detour LSP if mplsFrrConstProtectionMethod is set to oneToOneBackup(0), or tunnels being switched over if mplsFrrConstProtectionMethod is set to facilityBackup(1)
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'CISCO-IETF-FRR-MIB'
_revision = '2008-04-29'
def __init__(self):
self.parent = None
self.cmplsfrractprotectedifs = None
self.cmplsfrractprotectedlsps = None
self.cmplsfrractprotectedtuns = None
self.cmplsfrrconfprotectingtuns = None
self.cmplsfrrconstprotectionmethod = None
self.cmplsfrrdetourincoming = None
self.cmplsfrrdetouroriginating = None
self.cmplsfrrdetouroutgoing = None
self.cmplsfrrlogtablecurrentries = None
self.cmplsfrrlogtablemaxentries = None
self.cmplsfrrnotifmaxrate = None
self.cmplsfrrnotifsenabled = None
self.cmplsfrrnumofconfifs = None
self.cmplsfrrswitchover = None
class CmplsfrrconstprotectionmethodEnum(Enum):
"""
CmplsfrrconstprotectionmethodEnum
Indicates which protection method is to be used for fast
reroute. Some devices may require a reboot of their routing
processors if this variable is changed. An agent which
does not wish to reboot or modify its FRR mode
MUST return an inconsistentValue error. Please
consult the device's agent capability statement
for more details.
.. data:: oneToOneBackup = 0
.. data:: facilityBackup = 1
"""
oneToOneBackup = 0
facilityBackup = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_IETF_FRR_MIB as meta
return meta._meta_table['CiscoIetfFrrMib.Cmplsfrrscalars.CmplsfrrconstprotectionmethodEnum']
@property
def _common_path(self):
return '/CISCO-IETF-FRR-MIB:CISCO-IETF-FRR-MIB/CISCO-IETF-FRR-MIB:cmplsFrrScalars'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.cmplsfrractprotectedifs is not None:
return True
if self.cmplsfrractprotectedlsps is not None:
return True
if self.cmplsfrractprotectedtuns is not None:
return True
if self.cmplsfrrconfprotectingtuns is not None:
return True
if self.cmplsfrrconstprotectionmethod is not None:
return True
if self.cmplsfrrdetourincoming is not None:
return True
if self.cmplsfrrdetouroriginating is not None:
return True
if self.cmplsfrrdetouroutgoing is not None:
return True
if self.cmplsfrrlogtablecurrentries is not None:
return True
if self.cmplsfrrlogtablemaxentries is not None:
return True
if self.cmplsfrrnotifmaxrate is not None:
return True
if self.cmplsfrrnotifsenabled is not None:
return True
if self.cmplsfrrnumofconfifs is not None:
return True
if self.cmplsfrrswitchover is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_IETF_FRR_MIB as meta
return meta._meta_table['CiscoIetfFrrMib.Cmplsfrrscalars']['meta_info']
class Cmplsfrrconsttable(object):
"""
This table shows detour setup constraints.
.. attribute:: cmplsfrrconstentry
An entry in this table represents detour LSP or bypass tunnel setup constraints for a tunnel instance to be protected by detour LSPs or a tunnel. Agents must allow entries in this table to be created only for tunnel instances that require fast\-reroute. Entries indexed with mplsFrrConstIfIndex set to 0 apply to all interfaces on this device for which the FRR feature can operate on
**type**\: list of :py:class:`Cmplsfrrconstentry <ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB.CiscoIetfFrrMib.Cmplsfrrconsttable.Cmplsfrrconstentry>`
"""
_prefix = 'CISCO-IETF-FRR-MIB'
_revision = '2008-04-29'
def __init__(self):
self.parent = None
self.cmplsfrrconstentry = YList()
self.cmplsfrrconstentry.parent = self
self.cmplsfrrconstentry.name = 'cmplsfrrconstentry'
class Cmplsfrrconstentry(object):
"""
An entry in this table represents detour LSP or bypass tunnel
setup constraints for a tunnel instance to be protected by
detour LSPs or a tunnel. Agents must allow entries in this table
to be created only for tunnel instances that require fast\-reroute.
Entries indexed with mplsFrrConstIfIndex set to 0 apply to all
interfaces on this device for which the FRR feature can operate
on.
.. attribute:: cmplsfrrconstifindex <key>
Uniquely identifies an interface for which fast reroute is configured. Tabular entries indexed with a 0 value apply to all interfaces on this device for which the FRR feature can operate on
**type**\: int
**range:** 0..2147483647
.. attribute:: cmplsfrrconsttunnelindex <key>
Uniquely identifies a tunnel for which fast reroute is requested
**type**\: int
**range:** 0..65535
.. attribute:: cmplsfrrconsttunnelinstance <key>
Uniquely identifies an instance of this tunnel for which fast reroute is requested
**type**\: int
**range:** 0..4294967295
.. attribute:: cmplsfrrconstbandwidth
This variable represents the bandwidth for detour LSPs of this tunnel, in units of thousands of bits per second (Kbps)
**type**\: int
**range:** 0..4294967295
.. attribute:: cmplsfrrconstexclallaffinity
A link satisfies the exclude\-all constraint if and only if the link contains none of the administrative groups specified in the constraint
**type**\: int
**range:** 0..4294967295
.. attribute:: cmplsfrrconstholdingprio
Indicates the holding priority for detour LSP
**type**\: int
**range:** 0..7
.. attribute:: cmplsfrrconsthoplimit
The maximum number of hops that the detour LSP may traverse
**type**\: int
**range:** 1..65535
.. attribute:: cmplsfrrconstinclallaffinity
A link satisfies the include\-all constraint if and only if the link contains all of the administrative groups specified in the constraint
**type**\: int
**range:** 0..4294967295
.. attribute:: cmplsfrrconstinclanyaffinity
A link satisfies the include\-any constraint if and only if the constraint is zero, or the link and the constraint have a resource class in common
**type**\: int
**range:** 0..4294967295
.. attribute:: cmplsfrrconstnumprotectedtunonif
The number of tunnels protected on this interface
**type**\: int
**range:** 0..4294967295
.. attribute:: cmplsfrrconstnumprotectingtunonif
The number of backup tunnels protecting the specified interface
**type**\: int
**range:** 0..4294967295
.. attribute:: cmplsfrrconstrowstatus
This object is used to create, modify, and/or delete a row in this table
**type**\: :py:class:`RowstatusEnum <ydk.models.cisco_ios_xe.SNMPv2_TC.RowstatusEnum>`
.. attribute:: cmplsfrrconstsetupprio
Indicates the setup priority of detour LSP
**type**\: int
**range:** 0..7
"""
_prefix = 'CISCO-IETF-FRR-MIB'
_revision = '2008-04-29'
def __init__(self):
self.parent = None
self.cmplsfrrconstifindex = None
self.cmplsfrrconsttunnelindex = None
self.cmplsfrrconsttunnelinstance = None
self.cmplsfrrconstbandwidth = None
self.cmplsfrrconstexclallaffinity = None
self.cmplsfrrconstholdingprio = None
self.cmplsfrrconsthoplimit = None
self.cmplsfrrconstinclallaffinity = None
self.cmplsfrrconstinclanyaffinity = None
self.cmplsfrrconstnumprotectedtunonif = None
self.cmplsfrrconstnumprotectingtunonif = None
self.cmplsfrrconstrowstatus = None
self.cmplsfrrconstsetupprio = None
@property
def _common_path(self):
if self.cmplsfrrconstifindex is None:
raise YPYModelError('Key property cmplsfrrconstifindex is None')
if self.cmplsfrrconsttunnelindex is None:
raise YPYModelError('Key property cmplsfrrconsttunnelindex is None')
if self.cmplsfrrconsttunnelinstance is None:
raise YPYModelError('Key property cmplsfrrconsttunnelinstance is None')
return '/CISCO-IETF-FRR-MIB:CISCO-IETF-FRR-MIB/CISCO-IETF-FRR-MIB:cmplsFrrConstTable/CISCO-IETF-FRR-MIB:cmplsFrrConstEntry[CISCO-IETF-FRR-MIB:cmplsFrrConstIfIndex = ' + str(self.cmplsfrrconstifindex) + '][CISCO-IETF-FRR-MIB:cmplsFrrConstTunnelIndex = ' + str(self.cmplsfrrconsttunnelindex) + '][CISCO-IETF-FRR-MIB:cmplsFrrConstTunnelInstance = ' + str(self.cmplsfrrconsttunnelinstance) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.cmplsfrrconstifindex is not None:
return True
if self.cmplsfrrconsttunnelindex is not None:
return True
if self.cmplsfrrconsttunnelinstance is not None:
return True
if self.cmplsfrrconstbandwidth is not None:
return True
if self.cmplsfrrconstexclallaffinity is not None:
return True
if self.cmplsfrrconstholdingprio is not None:
return True
if self.cmplsfrrconsthoplimit is not None:
return True
if self.cmplsfrrconstinclallaffinity is not None:
return True
if self.cmplsfrrconstinclanyaffinity is not None:
return True
if self.cmplsfrrconstnumprotectedtunonif is not None:
return True
if self.cmplsfrrconstnumprotectingtunonif is not None:
return True
if self.cmplsfrrconstrowstatus is not None:
return True
if self.cmplsfrrconstsetupprio is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_IETF_FRR_MIB as meta
return meta._meta_table['CiscoIetfFrrMib.Cmplsfrrconsttable.Cmplsfrrconstentry']['meta_info']
@property
def _common_path(self):
return '/CISCO-IETF-FRR-MIB:CISCO-IETF-FRR-MIB/CISCO-IETF-FRR-MIB:cmplsFrrConstTable'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.cmplsfrrconstentry is not None:
for child_ref in self.cmplsfrrconstentry:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_IETF_FRR_MIB as meta
return meta._meta_table['CiscoIetfFrrMib.Cmplsfrrconsttable']['meta_info']
class Cmplsfrrlogtable(object):
"""
The fast reroute log table records fast reroute events such
as protected links going up or down or the FRR feature
kicking in.
.. attribute:: cmplsfrrlogentry
An entry in this table is created to describe one fast reroute event. Entries in this table are only created and destroyed by the agent implementation. The maximum number of entries in this log is governed by the scalar
**type**\: list of :py:class:`Cmplsfrrlogentry <ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB.CiscoIetfFrrMib.Cmplsfrrlogtable.Cmplsfrrlogentry>`
"""
_prefix = 'CISCO-IETF-FRR-MIB'
_revision = '2008-04-29'
def __init__(self):
self.parent = None
self.cmplsfrrlogentry = YList()
self.cmplsfrrlogentry.parent = self
self.cmplsfrrlogentry.name = 'cmplsfrrlogentry'
class Cmplsfrrlogentry(object):
"""
An entry in this table is created to describe one fast
reroute event. Entries in this table are only created and
destroyed by the agent implementation. The maximum number
of entries in this log is governed by the scalar.
.. attribute:: cmplsfrrlogindex <key>
Uniquely identifies a fast reroute event entry
**type**\: int
**range:** 0..4294967295
.. attribute:: cmplsfrrlogeventduration
This object describes the duration of this event
**type**\: int
**range:** 0..4294967295
.. attribute:: cmplsfrrlogeventreasonstring
This object contains an implementation\-specific explanation of the event
**type**\: str
**length:** 128
.. attribute:: cmplsfrrlogeventtime
This object provides the amount of time ticks since this event occured
**type**\: int
**range:** 0..4294967295
.. attribute:: cmplsfrrlogeventtype
This object describes what type of fast reroute event occured
**type**\: :py:class:`CmplsfrrlogeventtypeEnum <ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB.CiscoIetfFrrMib.Cmplsfrrlogtable.Cmplsfrrlogentry.CmplsfrrlogeventtypeEnum>`
.. attribute:: cmplsfrrloginterface
This object indicates which interface was affected by this FRR event. This value may be set to 0 if mplsFrrConstProtectionMethod is set to oneToOneBackup(0)
**type**\: int
**range:** 0..2147483647
"""
_prefix = 'CISCO-IETF-FRR-MIB'
_revision = '2008-04-29'
def __init__(self):
self.parent = None
self.cmplsfrrlogindex = None
self.cmplsfrrlogeventduration = None
self.cmplsfrrlogeventreasonstring = None
self.cmplsfrrlogeventtime = None
self.cmplsfrrlogeventtype = None
self.cmplsfrrloginterface = None
class CmplsfrrlogeventtypeEnum(Enum):
"""
CmplsfrrlogeventtypeEnum
This object describes what type of fast reroute event
occured.
.. data:: other = 1
.. data:: protected = 2
"""
other = 1
protected = 2
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_IETF_FRR_MIB as meta
return meta._meta_table['CiscoIetfFrrMib.Cmplsfrrlogtable.Cmplsfrrlogentry.CmplsfrrlogeventtypeEnum']
@property
def _common_path(self):
if self.cmplsfrrlogindex is None:
raise YPYModelError('Key property cmplsfrrlogindex is None')
return '/CISCO-IETF-FRR-MIB:CISCO-IETF-FRR-MIB/CISCO-IETF-FRR-MIB:cmplsFrrLogTable/CISCO-IETF-FRR-MIB:cmplsFrrLogEntry[CISCO-IETF-FRR-MIB:cmplsFrrLogIndex = ' + str(self.cmplsfrrlogindex) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.cmplsfrrlogindex is not None:
return True
if self.cmplsfrrlogeventduration is not None:
return True
if self.cmplsfrrlogeventreasonstring is not None:
return True
if self.cmplsfrrlogeventtime is not None:
return True
if self.cmplsfrrlogeventtype is not None:
return True
if self.cmplsfrrloginterface is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_IETF_FRR_MIB as meta
return meta._meta_table['CiscoIetfFrrMib.Cmplsfrrlogtable.Cmplsfrrlogentry']['meta_info']
@property
def _common_path(self):
return '/CISCO-IETF-FRR-MIB:CISCO-IETF-FRR-MIB/CISCO-IETF-FRR-MIB:cmplsFrrLogTable'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.cmplsfrrlogentry is not None:
for child_ref in self.cmplsfrrlogentry:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_IETF_FRR_MIB as meta
return meta._meta_table['CiscoIetfFrrMib.Cmplsfrrlogtable']['meta_info']
class Cmplsfrrfacroutedbtable(object):
"""
The mplsFrrFacRouteDBTable provides information about the
fast reroute database. Each entry belongs to an interface,
protecting backup tunnel and protected tunnel. MPLS
interfaces defined on this node are protected by backup
tunnels and are indexed by mplsFrrFacRouteProtectedIndex.
Backup tunnels defined to protect the tunnels traversing an
interface, and are indexed by
mplsFrrFacRouteProtectingTunIndex. Note that the tunnel
instance index is not required, since it is implied to be 0,
which indicates the tunnel head interface for the protecting
tunnel. The protecting tunnel is defined to exist on the PLR
in the FRR specification. Protected tunnels are the LSPs that
traverse the protected link. These LSPs are uniquely
identified by mplsFrrFacRouteProtectedTunIndex,
mplsFrrFacRouteProtectedTunInstance,
mplsFrrFacRouteProtectedTunIngressLSRId, and
mplsFrrFacRouteProtectedTunEgressLSRId.
.. attribute:: cmplsfrrfacroutedbentry
An entry in the mplsFrrDBTable represents a single protected LSP, protected by a backup tunnel and defined for a specific protected interface. Note that for brevity, managers should consult the mplsTunnelTable present in the MPLS\-TE MIB for additional information about the protecting and protected tunnels, and the ifEntry in the IF\-MIB for the protected interface
**type**\: list of :py:class:`Cmplsfrrfacroutedbentry <ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB.CiscoIetfFrrMib.Cmplsfrrfacroutedbtable.Cmplsfrrfacroutedbentry>`
"""
_prefix = 'CISCO-IETF-FRR-MIB'
_revision = '2008-04-29'
def __init__(self):
self.parent = None
self.cmplsfrrfacroutedbentry = YList()
self.cmplsfrrfacroutedbentry.parent = self
self.cmplsfrrfacroutedbentry.name = 'cmplsfrrfacroutedbentry'
class Cmplsfrrfacroutedbentry(object):
"""
An entry in the mplsFrrDBTable represents a single protected
LSP, protected by a backup tunnel and defined for a specific
protected interface. Note that for brevity, managers should
consult the mplsTunnelTable present in the MPLS\-TE MIB for
additional information about the protecting and protected
tunnels, and the ifEntry in the IF\-MIB for the protected
interface.
.. attribute:: cmplsfrrfacrouteprotectedifindex <key>
Uniquely identifies the interface configured for FRR protection
**type**\: int
**range:** 1..2147483647
.. attribute:: cmplsfrrfacrouteprotectingtunindex <key>
Uniquely identifies the mplsTunnelEntry primary index for the tunnel head interface designated to protect the interface as specified in the mplsFrrFacRouteIfProtectedIndex (and all of the tunnels using this interface)
**type**\: int
**range:** 0..65535
.. attribute:: cmplsfrrfacrouteprotectedtunindex <key>
Uniquely identifies an mplsTunnelEntry that is being protected by FRR
**type**\: int
**range:** 0..65535
.. attribute:: cmplsfrrfacrouteprotectedtuninstance <key>
Uniquely identifies an mplsTunnelEntry that is being protected by FRR
**type**\: int
**range:** 0..4294967295
.. attribute:: cmplsfrrfacrouteprotectedtuningresslsrid <key>
Uniquely identifies an mplsTunnelEntry that is being protected by FRR
**type**\: str
**length:** 4
.. attribute:: cmplsfrrfacrouteprotectedtunegresslsrid <key>
Uniquely identifies an mplsTunnelEntry that is being protected by FRR
**type**\: str
**length:** 4
.. attribute:: cmplsfrrfacrouteprotectedtunstatus
Specifies the state of the protected tunnel. active This tunnel's label has been placed in the LFIB and is ready to be applied to incoming packets. ready \- This tunnel's label entry has been created but is not yet in the LFIB. partial \- This tunnel's label entry as not been fully created
**type**\: :py:class:`CmplsfrrfacrouteprotectedtunstatusEnum <ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB.CiscoIetfFrrMib.Cmplsfrrfacroutedbtable.Cmplsfrrfacroutedbentry.CmplsfrrfacrouteprotectedtunstatusEnum>`
.. attribute:: cmplsfrrfacrouteprotectingtunprotectiontype
Indicates type of the resource protection
**type**\: :py:class:`CmplsfrrfacrouteprotectingtunprotectiontypeEnum <ydk.models.cisco_ios_xe.CISCO_IETF_FRR_MIB.CiscoIetfFrrMib.Cmplsfrrfacroutedbtable.Cmplsfrrfacroutedbentry.CmplsfrrfacrouteprotectingtunprotectiontypeEnum>`
.. attribute:: cmplsfrrfacrouteprotectingtunresvbw
Specifies the amount of bandwidth in megabytes per second that is actually reserved by the backup tunnel for facility backup. This value is repeated here from the MPLS\- TE MIB because the tunnel entry will reveal the bandwidth reserved by the signaling protocol, which is typically 0 for backup tunnels so as to not over\-book bandwidth. However, internal reservations are typically made on the PLR, thus this value should be revealed here
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'CISCO-IETF-FRR-MIB'
_revision = '2008-04-29'
def __init__(self):
self.parent = None
self.cmplsfrrfacrouteprotectedifindex = None
self.cmplsfrrfacrouteprotectingtunindex = None
self.cmplsfrrfacrouteprotectedtunindex = None
self.cmplsfrrfacrouteprotectedtuninstance = None
self.cmplsfrrfacrouteprotectedtuningresslsrid = None
self.cmplsfrrfacrouteprotectedtunegresslsrid = None
self.cmplsfrrfacrouteprotectedtunstatus = None
self.cmplsfrrfacrouteprotectingtunprotectiontype = None
self.cmplsfrrfacrouteprotectingtunresvbw = None
class CmplsfrrfacrouteprotectedtunstatusEnum(Enum):
"""
CmplsfrrfacrouteprotectedtunstatusEnum
Specifies the state of the protected tunnel.
active This tunnel's label has been placed in the
LFIB and is ready to be applied to incoming
packets.
ready \- This tunnel's label entry has been created but is
not yet in the LFIB.
partial \- This tunnel's label entry as not been fully
created.
.. data:: active = 1
.. data:: ready = 2
.. data:: partial = 3
"""
active = 1
ready = 2
partial = 3
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_IETF_FRR_MIB as meta
return meta._meta_table['CiscoIetfFrrMib.Cmplsfrrfacroutedbtable.Cmplsfrrfacroutedbentry.CmplsfrrfacrouteprotectedtunstatusEnum']
class CmplsfrrfacrouteprotectingtunprotectiontypeEnum(Enum):
"""
CmplsfrrfacrouteprotectingtunprotectiontypeEnum
Indicates type of the resource protection.
.. data:: linkProtection = 0
.. data:: nodeProtection = 1
"""
linkProtection = 0
nodeProtection = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_IETF_FRR_MIB as meta
return meta._meta_table['CiscoIetfFrrMib.Cmplsfrrfacroutedbtable.Cmplsfrrfacroutedbentry.CmplsfrrfacrouteprotectingtunprotectiontypeEnum']
@property
def _common_path(self):
if self.cmplsfrrfacrouteprotectedifindex is None:
raise YPYModelError('Key property cmplsfrrfacrouteprotectedifindex is None')
if self.cmplsfrrfacrouteprotectingtunindex is None:
raise YPYModelError('Key property cmplsfrrfacrouteprotectingtunindex is None')
if self.cmplsfrrfacrouteprotectedtunindex is None:
raise YPYModelError('Key property cmplsfrrfacrouteprotectedtunindex is None')
if self.cmplsfrrfacrouteprotectedtuninstance is None:
raise YPYModelError('Key property cmplsfrrfacrouteprotectedtuninstance is None')
if self.cmplsfrrfacrouteprotectedtuningresslsrid is None:
raise YPYModelError('Key property cmplsfrrfacrouteprotectedtuningresslsrid is None')
if self.cmplsfrrfacrouteprotectedtunegresslsrid is None:
raise YPYModelError('Key property cmplsfrrfacrouteprotectedtunegresslsrid is None')
return '/CISCO-IETF-FRR-MIB:CISCO-IETF-FRR-MIB/CISCO-IETF-FRR-MIB:cmplsFrrFacRouteDBTable/CISCO-IETF-FRR-MIB:cmplsFrrFacRouteDBEntry[CISCO-IETF-FRR-MIB:cmplsFrrFacRouteProtectedIfIndex = ' + str(self.cmplsfrrfacrouteprotectedifindex) + '][CISCO-IETF-FRR-MIB:cmplsFrrFacRouteProtectingTunIndex = ' + str(self.cmplsfrrfacrouteprotectingtunindex) + '][CISCO-IETF-FRR-MIB:cmplsFrrFacRouteProtectedTunIndex = ' + str(self.cmplsfrrfacrouteprotectedtunindex) + '][CISCO-IETF-FRR-MIB:cmplsFrrFacRouteProtectedTunInstance = ' + str(self.cmplsfrrfacrouteprotectedtuninstance) + '][CISCO-IETF-FRR-MIB:cmplsFrrFacRouteProtectedTunIngressLSRId = ' + str(self.cmplsfrrfacrouteprotectedtuningresslsrid) + '][CISCO-IETF-FRR-MIB:cmplsFrrFacRouteProtectedTunEgressLSRId = ' + str(self.cmplsfrrfacrouteprotectedtunegresslsrid) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.cmplsfrrfacrouteprotectedifindex is not None:
return True
if self.cmplsfrrfacrouteprotectingtunindex is not None:
return True
if self.cmplsfrrfacrouteprotectedtunindex is not None:
return True
if self.cmplsfrrfacrouteprotectedtuninstance is not None:
return True
if self.cmplsfrrfacrouteprotectedtuningresslsrid is not None:
return True
if self.cmplsfrrfacrouteprotectedtunegresslsrid is not None:
return True
if self.cmplsfrrfacrouteprotectedtunstatus is not None:
return True
if self.cmplsfrrfacrouteprotectingtunprotectiontype is not None:
return True
if self.cmplsfrrfacrouteprotectingtunresvbw is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_IETF_FRR_MIB as meta
return meta._meta_table['CiscoIetfFrrMib.Cmplsfrrfacroutedbtable.Cmplsfrrfacroutedbentry']['meta_info']
@property
def _common_path(self):
return '/CISCO-IETF-FRR-MIB:CISCO-IETF-FRR-MIB/CISCO-IETF-FRR-MIB:cmplsFrrFacRouteDBTable'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.cmplsfrrfacroutedbentry is not None:
for child_ref in self.cmplsfrrfacroutedbentry:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_IETF_FRR_MIB as meta
return meta._meta_table['CiscoIetfFrrMib.Cmplsfrrfacroutedbtable']['meta_info']
@property
def _common_path(self):
return '/CISCO-IETF-FRR-MIB:CISCO-IETF-FRR-MIB'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.cmplsfrrconsttable is not None and self.cmplsfrrconsttable._has_data():
return True
if self.cmplsfrrfacroutedbtable is not None and self.cmplsfrrfacroutedbtable._has_data():
return True
if self.cmplsfrrlogtable is not None and self.cmplsfrrlogtable._has_data():
return True
if self.cmplsfrrscalars is not None and self.cmplsfrrscalars._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _CISCO_IETF_FRR_MIB as meta
return meta._meta_table['CiscoIetfFrrMib']['meta_info']
|
{
"content_hash": "c403b990c5b495d16079a538b0ddf17c",
"timestamp": "",
"source": "github",
"line_count": 984,
"max_line_length": 915,
"avg_line_length": 42.11585365853659,
"alnum_prop": 0.6140630278461464,
"repo_name": "111pontes/ydk-py",
"id": "92cbd92e5edb7001a14549068f9e3b83463cd01a",
"size": "41442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_IETF_FRR_MIB.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "7226"
},
{
"name": "Python",
"bytes": "446117948"
}
],
"symlink_target": ""
}
|
import bac,bcm
import json
import sys
import util
import difflib
from collections import OrderedDict
OUT = "../../../sf4tools-gh-pages/"
BASE = "C:\\Program Files (x86)\\Steam\\steamapps\\common\\Super Street Fighter IV - Arcade Edition\\"
HEADER = '''---
layout: default
---
'''
FOOTER = ''''''
def diffCollectionsHeader(col1,col2,log,level=0):
removedSet = set(col1.keys()).difference(set(col2.keys()))
addedSet = set(col2.keys()).difference(set(col1.keys()))
bothSet = set(col2.keys()).intersection(set(col1.keys()))
if level == 0:
log.write('<ul class="nav nav-pills">')
for removed in removedSet:
fancyName = removed
try:
fancyName = col1[removed]["Name"]
except Exception:
pass
log.write("<li class='bg-danger'><a href='#change-{0}{1}'><span class='glyphicon glyphicon-minus'></span>Removed {1}</a></li>".format(removed,fancyName))
for added in addedSet:
fancyName = added
try:
fancyName = col2[added]["Name"]
except Exception:
pass
log.write("<li class='bg-success'><a href='#change-{0}{1}'><span class='glyphicon glyphicon-plus'></span>Added {1}</a></li>".format(added,fancyName))
for both in bothSet:
if repr(col1[both]) == repr(col2[both]):
continue
fancyName = both
try:
fancyName = col2[both]["Name"]
except Exception:
pass
log.write("<li class='bg-warning'><a href='#change-{0}{1}'><span class='glyphicon glyphicon-pencil'></span>Changed {1}</a></li>".format(both,fancyName))
log.write("</ul>")
def diffCollections(col1,col2,log,level=0):
removedSet = set(col1.keys()).difference(set(col2.keys()))
addedSet = set(col2.keys()).difference(set(col1.keys()))
bothSet = set(col2.keys()).intersection(set(col1.keys()))
for removed in removedSet:
fancyName = removed
try:
fancyName = col1[removed]["Name"]
except Exception:
pass
log.write("<div class='panel panel-danger'>");
log.write("<span class='anchor' id='change-{0}{1}'></span><div class='panel-heading'>Removed {1}</div>".format(removed,fancyName))
log.write("<div class='panel-body'><pre>")
log.write(json.dumps(col1[removed],indent=5))
log.write("</pre></div></div>")
for added in addedSet:
fancyName = added
try:
fancyName = col2[added]["Name"]
except Exception:
pass
log.write("<div class='panel panel-success'>");
log.write("<span class='anchor' id='change-{0}{1}'></span><div class='panel-heading'>Added {1}</div>".format(added,fancyName))
log.write("<div class='panel-body'><pre>")
log.write(json.dumps(col2[added],indent=5))
log.write("</pre></div></div>")
for both in set(col2.keys()).intersection(set(col1.keys())):
if repr(col1[both]) == repr(col2[both]):
continue
fancyName = both
try:
fancyName = col2[both]["Name"]
except Exception:
pass
log.write("<div class='panel panel-warning'>");
log.write("<span class='anchor' id='change-{0}{1}'></span><div class='panel-heading'>Changed {1}</div><div class='panel-body'>".format(both,fancyName))
if type(col1[both]) is dict or type(col1[both]) is OrderedDict:
diffCollections(col1[both],col2[both],log,level+1)
else:
if type(col1[both]) is list:
differ = difflib.HtmlDiff()
log.write(differ.make_table(json.dumps(col1[both],indent=5).splitlines(),json.dumps(col2[both],indent=5).splitlines()))
else:
log.write("<div class='container-fluid'>")
log.write("<div class='col-md-5'>Old<br />")
log.write("<pre>")
log.write( json.dumps(col1[both],indent=5))
log.write("</pre></div>")
log.write("<div class='col-md-5'>New<br />")
log.write("<pre>")
log.write( json.dumps(col2[both],indent=5))
log.write("</pre></div>")
log.write("</div>")
log.write("</div></div>")
def getVersionData():
paths = []
names = []
paths.append("resource\\battle\\chara")
names.append("Super")
paths.append("dlc\\03_character_free\\battle\\regulation\\latest")
names.append("AE")
paths.append("patch\\battle\\regulation\\latest_ae")
names.append("AE2012")
paths.append("patch\\battle\\regulation\\v101")
names.append("AE2012 v1.01")
paths.append("patch\\battle\\regulation\\v104")
names.append("AE2012 v1.04")
paths.append("dlc\\04_ae2\\battle\\regulation\\ae2")
names.append("Ultra v1.01")
paths.append("patch_ae2_tu1\\battle\\regulation\\ae2_109")
names.append("Ultra v1.02")
paths.append("patch_ae2_tu1b\\battle\\regulation\\ae2_109b")
names.append("Ultra v1.03")
paths.append("patch_ae2_tu2\\battle\\regulation\\ae2_110")
names.append("Ultra v1.04")
paths.append("patch_ae2_tu3\\battle\\regulation\\ae2_111")
names.append("Ultra v1.05")
return paths, names
def rebuildIndex():
paths, names = getVersionData()
global OUT, BASE
index = open(OUT+"_includes\\table.html","w")
charcount = 0
for char in os.listdir(BASE+"\\dlc\\04_ae2\\battle\\regulation\\ae2"):
if charcount % 15 == 0:
index.write( """ <tr>
<th class="brright">Character</th>
<th class="vertical" ><div>Super</div></th>
<th class="vertical" colspan="2"><div>AE</div></th>
<th class="vertical" colspan="2"><div>AE2012</div></th>
<th class="vertical" colspan="2"><div>AE2012 v1.01</div></th>
<th class="vertical" colspan="2" ><div>AE2012 v1.04</div></th>
<th class="vertical" colspan="2"><div>Ultra v1.01</div></th>
<th class="vertical" colspan="2"><div>Ultra v1.02</div></th>
<th class="vertical" colspan="2"><div>Ultra v1.03</div></th>
<th class="vertical" colspan="2"><div>Ultra v1.04</div></th>
<th class="vertical" ><div>Ultra v1.05</div></th>
</tr>""")
charcount += 1
print(char)
index.write("<tr><th>{0}</th>".format(char))
i = 0
while i < len(names)-1:
found = False
for g in range(i+1,len(names)):
name = names[i]+"_TO_"+names[g]
pname = names[i]+" > "+names[g]
if not os.path.exists(OUT+"characters\\"+char+"\\"+name+".html"):
pass
#index.write('<td colspan="2">—</td>')
else:
found = True
index.write('<td colspan="{1}" class="success"><a href="{{{{ site.baseurl }}}}{0}">{2}</a></td>'.format("characters/"+char+"/"+name+".html",(g-i)*2,pname))
i = g-1
break
if not found:
index.write('<td colspan="{0}">—</td>'.format(2))
i += 1
index.write("</tr>")
index.close()
def compareChar(char, index=None):
html = "<tr>"
postfix = char+"\\"+char
global BASE, OUT
if not os.path.exists(OUT+"characters\\"+char):
os.makedirs(OUT+"characters\\"+char)
paths, names = getVersionData()
bacs = []
bcms = []
namescopy = list(names)
for i,version in enumerate(list(paths)):
if not os.path.isfile(BASE+version+"\\"+postfix+".bac"):
print char,"doesn't have",namescopy[i]
paths.remove(version)
names.remove(namescopy[i])
continue
if os.path.isfile( "../json/"+char+"_"+namescopy[i]+".bac.json"):
pass
print("\t"+namescopy[i])
tmp = bac.BACFile(BASE+version+"\\"+postfix+".bac")
log = open("../json/"+char+"_"+namescopy[i]+".bac.json","w")
log.write(tmp.toJSON())
bacs.append(tmp)
log.close()
tmp = bcm.BCMFile(BASE+version+"\\"+postfix+".bcm")
log = open("../json/"+char+"_"+namescopy[i]+".bcm.json","w")
log.write(tmp.toJSON())
#tmp.toFile("../out/"+char+"_"+names[i]+".bcm")
bcms.append(tmp)
log.close()
for i in range(0,len(paths)-1):
name = names[i]+"_TO_"+names[i+1]
print "\tDoing ",name
log = open(OUT+"characters\\"+char+"\\"+name+".html","w")
log.write(HEADER.format(char,name))
if len(bacs) < 2 or len(bcms) < 2:
return;
firstBAC= bacs[i]
secondBAC = bacs[i+1]
firstBCM= bcms[i]
secondBCM = bcms[i+1]
for k in firstBCM.keys():
log.write("<a class='anchor' id='"+k+"'></a><h2>"+k+"</h2>")
if type(firstBCM[k]) is list:
pass
else:
diffCollectionsHeader(firstBCM[k],secondBCM[k],log)
for k in firstBAC.keys():
log.write("<a class='anchor' id='"+k+"'></a><h2>"+k+"</h2>")
if type(firstBAC[k]) is list:
pass
else:
diffCollectionsHeader(firstBAC[k],secondBAC[k],log)
for k in firstBCM.keys():
log.write("<a></a><h2>"+k+"</h2>")
if type(firstBCM[k]) is list:
if repr(firstBCM[k]) != repr(secondBCM[k]):
log.write("<div class='container-fluid'>")
log.write("<div class='col-md-5'>Old<br />")
log.write("<pre>")
log.write( json.dumps(firstBCM[k],indent=5))
log.write("</pre></div>")
log.write("<div class='col-md-5'>New<br />")
log.write("<pre>")
log.write( json.dumps(secondBCM[k],indent=5))
log.write("</pre></div>")
log.write("</div>")
else:
diffCollections(firstBCM[k],secondBCM[k],log)
for k in firstBAC.keys():
log.write("<a></a><h2>"+k+"</h2>")
if type(firstBAC[k]) is list:
if repr(firstBAC[k]) != repr(secondBAC[k]):
log.write("<div class='container-fluid'>")
log.write("<div class='col-md-6'>Old<br />")
log.write("<pre>")
log.write( json.dumps(firstBAC[k],indent=5))
log.write("</pre></div>")
log.write("<div class='col-md-6'>New<br />")
log.write("<pre>")
log.write( json.dumps(secondBAC[k],indent=5))
log.write("</pre></div>")
log.write("</div>")
else:
diffCollections(firstBAC[k],secondBAC[k],log)
log.write(FOOTER)
log.close()
import os
#compareChar("RYU")
#compareChar("SGT")
#for char in os.listdir("C:\\Program Files (x86)\\Steam\\steamapps\\common\\Super Street Fighter IV - Arcade Edition\\dlc\\04_ae2\\battle\\regulation\\ae2"):
# print(char)
# compareChar(char)
rebuildIndex()
|
{
"content_hash": "6140fefc3d4acc0829dd57b5d6dd5a98",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 175,
"avg_line_length": 39.90747330960854,
"alnum_prop": 0.5291599785981809,
"repo_name": "dantarion/sf4tools",
"id": "0fa0b23ef91297fe212e692be31aaafdbfcf2c5b",
"size": "11214",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SFUltraDiff/src/diff.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47307"
}
],
"symlink_target": ""
}
|
"""
Module containing the FooWrapper1 class
"""
import subprocess
import time
from wopmars.utils.Logger import Logger
from wopmars.models.ToolWrapper import ToolWrapper
class FooWrapper4(ToolWrapper):
"""
This class has been done for example/testing purpose.
Modifications may lead to failure in tests.
"""
__mapper_args__ = {'polymorphic_identity': "FooWrapper4"}
def specify_input_file(self):
return ["input1"]
def specify_output_file(self):
return ["output1"]
def run(self):
Logger.instance().info(self.__class__.__name__ + " is running...")
p = subprocess.Popen(["touch", self.output_file("output1")])
p.wait()
time.sleep(0.1)
|
{
"content_hash": "dd109e381e8778f0e1517a988baca93d",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 74,
"avg_line_length": 25.607142857142858,
"alnum_prop": 0.6513249651324965,
"repo_name": "aitgon/wopmars",
"id": "78723894f343f909f10f0c39f921cf177b1838fc",
"size": "717",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wopmars/tests/resource/wrapper/FooWrapper4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "338509"
},
{
"name": "Shell",
"bytes": "1526"
}
],
"symlink_target": ""
}
|
import json
import requests
from django.conf import settings
from .models import Token
def check_token(token):
resp = requests.get(settings.HEROES_URL.format(token))
if resp.status_code == 404:
return False
return True
def new_token():
resp = requests.get(settings.GET_API_KEY_URL)
resp = json.loads(resp.content)
try:
return resp['apiKey']
except KeyError:
return ''
def get_token():
'''
Utility to get the current unexpired token
If expired, it fetches a new one.
'''
try:
token = Token.objects.get(pk=1)
if not check_token(token):
token.token = new_token()
token.save()
except Token.DoesNotExist:
_token = new_token()
token = Token.objects.create(token=_token)
return token
def get_heroes():
'''
Function to fetch all the heroes
'''
token = get_token()
resp = requests.get(settings.HEROES_URL.format(token.token))
d_resp = json.loads(resp.content)
from heroes.heroes.functions import create_from_dict
if resp.status_code == 200:
create_from_dict(d_resp)
def post_hero(hero):
'''
function to post a new hero to the API
'''
token = get_token()
from heroes.heroes.serializers import HeroSerializer
serializer = HeroSerializer(hero)
resp = requests.post(setttings.HEROES_URL.format(token.token),
data=serializer.data)
if resp.status_code == 201:
return True
return False
|
{
"content_hash": "5bf2ddac5cada6e08cd44dcd4b1bc0f7",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 63,
"avg_line_length": 20.08955223880597,
"alnum_prop": 0.7109955423476969,
"repo_name": "devrishik/Heroes",
"id": "da84ea520726f6c4c9e41bed0ad54b66b1a602e8",
"size": "1346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utilities/request.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2142"
},
{
"name": "HTML",
"bytes": "23986"
},
{
"name": "JavaScript",
"bytes": "817"
},
{
"name": "Python",
"bytes": "57731"
},
{
"name": "Shell",
"bytes": "4200"
}
],
"symlink_target": ""
}
|
from django.db import models
# Create your models here.
class Page(models.Model):
outlinks = models.ManyToManyField('self', related_name='inlinks', symmetrical=False)
title = models.TextField()
contents = models.TextField()
access_probability = models.FloatField()
date = models.DateTimeField(db_index=True)
|
{
"content_hash": "6f2c82681f12ac42eab8ed5d0db39097",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 88,
"avg_line_length": 33,
"alnum_prop": 0.7303030303030303,
"repo_name": "eob/synckit-research",
"id": "60626ab08dd700da4be0ef02f6f09b077df5405a",
"size": "330",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/wiki/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "65239505"
},
{
"name": "PHP",
"bytes": "15712"
},
{
"name": "Python",
"bytes": "125913"
},
{
"name": "R",
"bytes": "21637"
},
{
"name": "Shell",
"bytes": "2697"
}
],
"symlink_target": ""
}
|
import chain_flow
import datetime
import time
class Execute_Cf_Environment():
def __init__(self,cf ):
self.cf = cf
def execute(self):
time_stamp = datetime.datetime.today()
old_day = time_stamp.day
old_hour = time_stamp.hour
old_minute = time_stamp.minute
old_second = time_stamp.second
self.cf.execute_initialize()
while True:
time.sleep(.1)
#self.cf.queue_event("SUB_SECOND_TICK",10)
time_stamp = datetime.datetime.today()
hour = time_stamp.hour
minute = time_stamp.minute
second = time_stamp.second
day = time_stamp.day
if old_second != second :
self.cf.queue_event( "TIME_TICK", second )
if old_minute != minute :
self.cf.queue_event( "MINUTE_TICK", minute )
if old_hour != hour :
self.cf.queue_event( "HOUR_TICK", minute )
if old_day != day :
self.cf.queue_event( "DAY_TICK", day )
old_hour = hour
old_minute = minute
old_second = second
old_day = day
try:
self.cf.execute( )
except:
print( "chain flow exception" )
print( "current chain is ",self.cf.current_chain["name"] )
print( "current link is ",self.cf.current_link )
raise
class CF_Interpreter(chain_flow.CF_Base_Interpreter ):
def __init__(self):
chain_flow.CF_Base_Interpreter.__init__(self)
def terminate( self, link_name ):
self.opcodes[ "Terminate"] = self.terminate_code
def halt( self, link):
self.insert_link( link ,"Halt", [] )
def one_step( self, link, function, parameters):
self.insert_link( link ,"Reset", [function, parameters] )
def reset( self, link):
self.insert_link( link ,"Reset", [] )
def send_event( self, event_name,data ):
self.insert_link( link ,"SendEvent",[event_name, data] )
#note python dow is Monday 0 Sunday 6
def wait_tod( self, link,dow,hour,minute,second):
self.insert_link( link ,"WaitTod",[dow,hour,minute,second] )
def wait_event( self, link,event_name):
self.insert_link( link ,"WaitEvent",[event_name] )
def wait_time( self, link, time_tick):
self.insert_link( link ,"WaitTime",[time_tick] )
def wait_condition( self, link, function, parameters ):
self.insert_link( link ,"Wait",[function, parameters] )
def wait_event_reset( self, link, event_name, time_count):
self.insert_link( link ,"WaitEvent_Reset",[event_name,time_count,0] )
def wait_event_count( self, link, event_name, count ):
self.insert_link( link ,"WaitEvent_Reset",[event_name, count,0] )
def wait_condition( self, link,function,parameters ):
self.insert_link( link ,"Wait_Reset",[function, parameters] )
def verify_condition( self, link,function,parameters):
self.insert_link( link ,"Verify",[function, parameters] )
def nop( self, link):
self.insert_link( link ,"Nop" )
def log( self, link, message ):
self.insert_link( link ,"Log",[message] )
def enable_chain( self, link, chain_names ):
self.insert_link(link,"Enable_Chain",[chain_names])
def disable_chain( self, link, chain_names):
self.insert_link(link,"Disable_Chain",[chain_names] )
def init_state( self, link):
self.opcodes["Init_State_Machine"] = self.init_state
pass
def change_state( self, link, chain, state ):
self.insert_link(link,"Change_State",[chain,state] )
def system_reset( self, link):
self.insert_link(link,"RESET_SYSTEM")
# test code
if __name__ == "__main__":
cf = CF_Interpreter()
cf.define_chain( "Chain_1", True )
cf.log( "test1","Chain_1 +++ is printed" )
cf.reset("test2")
cf.define_chain( "Chain_2", True )
cf.log( "test1","Chain_2 +++ is printed" )
cf.reset("test2")
cf.execute_initialize()
for i in range(0,10):
print( i )
cf.queue_event("TEST", [] )
cf.execute( )
print("done")
|
{
"content_hash": "55a32cbffbb47613a5456825cd593d65",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 73,
"avg_line_length": 26.67515923566879,
"alnum_prop": 0.579512893982808,
"repo_name": "glenn-edgar/local_controller_2",
"id": "f74994e64ddb29be14c738d7dc623dc75f682c27",
"size": "4188",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "py_cf/cf_interpreter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1392"
},
{
"name": "Batchfile",
"bytes": "2452"
},
{
"name": "CSS",
"bytes": "3169864"
},
{
"name": "HTML",
"bytes": "1762520"
},
{
"name": "JavaScript",
"bytes": "7044628"
},
{
"name": "Makefile",
"bytes": "5136"
},
{
"name": "PHP",
"bytes": "93357"
},
{
"name": "Python",
"bytes": "3189928"
},
{
"name": "Shell",
"bytes": "532"
},
{
"name": "Smalltalk",
"bytes": "189"
},
{
"name": "TeX",
"bytes": "3153"
}
],
"symlink_target": ""
}
|
from kubernetes_py.utils import is_valid_string, filter_model
class ConfigMapEnvSource(object):
"""
https://kubernetes.io/docs/api-reference/v1.8/#configmapenvsource-v1-core
ConfigMapEnvSource selects a ConfigMap to populate the environment variables with.
The contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.
"""
def __init__(self, model=None):
super(ConfigMapEnvSource, self).__init__()
self._name = None
self._optional = None
if model is not None:
m = filter_model(model)
self._build_with_model(m)
def _build_with_model(self, model=None):
if "name" in model:
self.name = model["name"]
if "optional" in model:
self.optional = model["optional"]
# ------------------------------------------------------------------------------------- name
@property
def name(self):
return self._name
@name.setter
def name(self, name=None):
if not is_valid_string(name):
raise SyntaxError("ConfigMapVolumeSource: name: [ {0} ] is invalid.".format(name))
self._name = name
# ------------------------------------------------------------------------------------- optional
@property
def optional(self):
return self._optional
@optional.setter
def optional(self, v=None):
if not isinstance(v, bool):
raise SyntaxError("ConfigMapVolumeSource: optional: [ {0} ] is invalid.".format(v))
self._optional = v
# ------------------------------------------------------------------------------------- serialize
def serialize(self):
data = {}
if self.name is not None:
data["name"] = self.name
if self.optional is not None:
data["optional"] = self.optional
return data
|
{
"content_hash": "a12bc3e6c42152380fb69bcfb33ca39d",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 114,
"avg_line_length": 31.278688524590162,
"alnum_prop": 0.5225366876310272,
"repo_name": "mnubo/kubernetes-py",
"id": "db661ce0151cdfbdae024e47c519a25f85b0d2ff",
"size": "2086",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "kubernetes_py/models/v1/ConfigMapEnvSource.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1073836"
},
{
"name": "Shell",
"bytes": "423"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "spring_batch_dashboard.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
{
"content_hash": "1d87aee6619b0121182bb94ef9e65861",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 86,
"avg_line_length": 38,
"alnum_prop": 0.62531328320802,
"repo_name": "vishu-guntupalli/spring-batch-dashboard",
"id": "cd11932a93c9184f0e0114e038fe8f0e59015ad7",
"size": "820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spring_batch_dashboard/manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "896"
},
{
"name": "Dockerfile",
"bytes": "812"
},
{
"name": "HTML",
"bytes": "8010"
},
{
"name": "JavaScript",
"bytes": "3367"
},
{
"name": "Python",
"bytes": "20354"
}
],
"symlink_target": ""
}
|
from typing import Union
from selene.core.entity import Element
from selene.core.wait import Command
# noinspection PyPep8Naming
class js:
@staticmethod
def set_value(value: Union[str, int]) -> Command[Element]:
def fn(element: Element):
element.execute_script(
"""
var text = arguments[0];
var maxlength = element.getAttribute('maxlength') === null
? -1
: parseInt(element.getAttribute('maxlength'));
element.value = maxlength === -1
? text
: text.length <= maxlength
? text
: text.substring(0, maxlength);
return null;
""",
str(value),
)
return Command(f'set value by js: {value}', fn)
@staticmethod
def type(keys: Union[str, int]) -> Command[Element]:
def fn(element: Element):
element.execute_script(
"""
textToAppend = arguments[0];
var value = element.value || '';
var text = value + textToAppend;
var maxlength = element.getAttribute('maxlength') === null
? -1
: parseInt(element.getAttribute('maxlength'));
element.value = maxlength === -1
? text
: text.length <= maxlength
? text
: text.substring(0, maxlength);
return null;
""",
str(keys),
)
return Command(f'set value by js: {keys}', fn)
scroll_into_view = Command(
'scroll into view',
lambda element: element.execute_script('element.scrollIntoView(true)'),
)
click = Command(
'click',
lambda element: element.execute_script('element.click()'),
)
clear_local_storage = Command(
'clear local storage',
lambda browser: browser.driver.execute_script(
'window.localStorage.clear()'
),
)
clear_session_storage = Command(
'clear local storage',
lambda browser: browser.driver.execute_script(
'window.sessionStorage.clear()'
),
)
remove = Command(
'remove',
lambda entity: (
entity.execute_script('element.remove()')
if not hasattr(entity, '__iter__')
else [
element.execute_script('element.remove()')
for element in entity
]
),
)
@staticmethod
def set_style_property(
name: str, value: Union[str, int]
) -> Command[Element]:
return Command(
f'set element.style.{name}="{value}"',
lambda entity: (
entity.execute_script(f'element.style.{name}="{value}"')
if not hasattr(entity, '__iter__')
else [
element.execute_script(f'element.style.{name}="{value}"')
for element in entity
]
),
)
set_style_display_to_none = Command(
'set element.style.display="none"',
lambda entity: (
entity.execute_script('element.style.display="none"')
if not hasattr(entity, '__iter__')
else [
element.execute_script('element.style.display="none"')
for element in entity
]
),
)
set_style_display_to_block = Command(
'set element.style.display="block"',
lambda entity: (
entity.execute_script('element.style.display="block"')
if not hasattr(entity, '__iter__')
else [
element.execute_script('element.style.display="block"')
for element in entity
]
),
)
set_style_visibility_to_hidden = Command(
'set element.style.visibility="hidden"',
lambda entity: (
entity.execute_script('element.style.visibility="hidden"')
if not hasattr(entity, '__iter__')
else [
element.execute_script('element.style.visibility="hidden"')
for element in entity
]
),
)
set_style_visibility_to_visible = Command(
'set element.style.visibility="visible"',
lambda entity: (
entity.execute_script('element.style.visibility="visible"')
if not hasattr(entity, '__iter__')
else [
element.execute_script('element.style.visibility="visible"')
for element in entity
]
),
)
|
{
"content_hash": "7828153fad43a282036a9ac1396a66fd",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 79,
"avg_line_length": 31.5364238410596,
"alnum_prop": 0.49601007979840406,
"repo_name": "yashaka/selene",
"id": "ecd9cef2104013b9f84bd2c7381a7c049c2510c9",
"size": "5878",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "selene/core/command.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4424"
},
{
"name": "JavaScript",
"bytes": "4519"
},
{
"name": "Python",
"bytes": "371020"
},
{
"name": "Shell",
"bytes": "677"
}
],
"symlink_target": ""
}
|
from .common import *
DEBUG = True
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
ALLOWED_HOSTS = ['*']
_MIDDLEWARE_CLASSES = (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
_INSTALLED_APPS = (
'debug_toolbar',
)
# IPs allowed to see django-debug-toolbar output.
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TOOLBAR_CALLBACK': None,
'EXTRA_SIGNALS': [],
'HIDE_DJANGO_SQL': True,
'SHOW_TEMPLATE_CONTEXT': True,
'TAG': 'body',
}
|
{
"content_hash": "6f090d6222b383a8edbe05cb6ddc8f32",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 65,
"avg_line_length": 19.393939393939394,
"alnum_prop": 0.6453125,
"repo_name": "LucasMagnum/pyexplain",
"id": "4a29b8a9c4bd6287c2ee8b7c994bc0a09ac096fa",
"size": "640",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "pyexplain/pyexplain/settings/dev.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5450"
},
{
"name": "HTML",
"bytes": "15419"
},
{
"name": "JavaScript",
"bytes": "25539"
},
{
"name": "Python",
"bytes": "32714"
}
],
"symlink_target": ""
}
|
def test_formula(salt):
'''
Test that the states are synced to minion
'''
dirs = salt('cp.list_master_dirs')
assert 'states' in dirs
def test_wordpress_module(salt):
'''
Test that the wordpress dir grain was set on the minion
'''
wordpressdir = salt('grains.get', 'wordpressdir')
assert salt('wordpress.is_installed', wordpressdir)
|
{
"content_hash": "48e563a6209381ba71990ef016b8749c",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 59,
"avg_line_length": 28.692307692307693,
"alnum_prop": 0.6541554959785523,
"repo_name": "saltstack/salt",
"id": "ac23d2c543c5ea392b650a3f9c73ef6ceb852bc9",
"size": "373",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/kitchen/tests/wordpress/tests/salt/test_salt.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, print_function
from base64 import b64encode
import boto.ec2
from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
from boto.ec2.networkinterface import (
NetworkInterfaceCollection, NetworkInterfaceSpecification)
from .clusterconfig import ClusterConfiguration
from .instanceinfo import get_instance_id, get_region, get_vpc_id
import sys
from sys import argv
from time import gmtime, sleep, strftime, time
amazon_linux_ami = {
"ap-northeast-1": "ami-4985b048",
"ap-southeast-1": "ami-ac5c7afe",
"ap-southeast-2": "ami-63f79559",
"cn-north-1": "ami-ce46d4f7",
"eu-central-1": "ami-b43503a9",
"eu-west-1": "ami-6e7bd919",
"sa-east-1": "ami-8737829a",
"us-east-1": "ami-b66ed3de",
"us-west-1": "ami-4b6f650e",
"us-west-2": "ami-b5a7ea85",
}
init_script = """\
#!/bin/sh
hostname '%(nodename)s'
instance_id=`curl --silent http://169.254.169.254/latest/meta-data/instance-id`
aws --region %(region)s ec2 create-tags --resources $instance_id --tags \
'Key=SLURMHostname,Value=%(nodename)s' \
'Key=SLURMS3Root,Value=%(slurm_s3_root)s' \
'Key=Name,Value=SLURM Computation Node %(nodename)s'
cat > /etc/slurm-ec2.conf <<.EOF
%(slurm_ec2_conf)s
.EOF
if [[ ! -z "%(os_packages)s" ]]; then
yum -y install %(os_packages)s;
fi;
for package in %(external_packages)s; do
tmpdir=`mktemp -d`
aws s3 cp %(slurm_s3_root)s/packages/$package $tmpdir/$package
case $package in
*.rpm )
rpm --install $tmpdir/$package;;
*.tgz | *.tar.gz )
tar -C / -x -z -f $tmpdir/$package;;
*.tbz2 | *.tar.bz2 )
tar -C / -x -j -f $tmpdir/$package;;
*.tZ | *.tar.Z )
tar -C / -x -Z -f $tmpdir/$package;;
* )
chmod 755 $tmpdir/$package;
mv $tmpdir/$package /usr/bin;;
esac;
rm -rf $tmpdir;
done
aws s3 cp %(slurm_s3_root)s/packages/slurm-ec2-bootstrap \
/usr/bin/slurm-ec2-bootstrap
chmod 755 /usr/bin/slurm-ec2-bootstrap
/usr/bin/slurm-ec2-bootstrap --slurm-s3-root '%(slurm_s3_root)s'
"""
def start_logging():
fd = open("/var/log/slurm/slurm-ec2-powersave.log", "a")
sys.stdout = sys.stderr = fd
def start_node():
start_logging()
print(" ".join(argv))
if len(argv) != 2:
print("Usage: %s <nodename>" % (argv[0],), file=sys.stderr)
return 1
nodename = argv[1]
cc = ClusterConfiguration.from_config()
region = get_region()
ec2 = boto.ec2.connect_to_region(region)
if not ec2:
print("Could not connect to EC2 endpoint in region %r" % (region,),
file=sys.stderr)
return 1
kw = {}
slurm_s3_root = cc.slurm_s3_root
kw['image_id'] = (
cc.compute_ami if cc.compute_ami is not None
else amazon_linux_ami[region])
if cc.instance_profile is not None:
if cc.instance_profile.startswith("arn:"):
kw['instance_profile_arn'] = cc.instance_profile
else:
kw['instance_profile_name'] = cc.instance_profile
kw['key_name'] = cc.key_name
kw['instance_type'] = cc.compute_instance_type
if cc.compute_bid_price is not None:
end = time() + 24 * 60 * 60 # FIXME: Don't hardcode this.
kw['price'] = cc.compute_bid_price
kw['valid_until'] = strftime("%Y-%m-%dT%H:%M:%SZ", gmtime(end))
node_address = cc.get_address_for_nodename(nodename)
node_subnet = cc.get_subnet_for_address(node_address)
user_data = init_script % {
"region": region,
"nodename": nodename,
"os_packages": " ".join(
cc.compute_os_packages
if cc.compute_os_packages is not None
else []),
"external_packages": " ".join(
cc.compute_external_packages
if cc.compute_external_packages is not None
else []),
"slurm_ec2_conf": cc.slurm_ec2_configuration,
"slurm_s3_root": slurm_s3_root,
}
user_data = b64encode(user_data)
kw['user_data'] = user_data
# Map the ethernet interface to the correct IP address
eth0 = NetworkInterfaceSpecification(
associate_public_ip_address=True,
delete_on_termination=True,
device_index=0,
groups=cc.security_groups,
private_ip_address=str(node_address),
subnet_id=node_subnet.id)
kw['network_interfaces'] = NetworkInterfaceCollection(eth0)
# Attach any ephemeral storage devices
block_device_map = BlockDeviceMapping()
block_device_map['/dev/xvda'] = BlockDeviceType(size=32, volume_type="gp2")
devices = cc.ephemeral_stores[cc.compute_instance_type]
for i, device in enumerate(devices):
drive = "/dev/sd" + chr(ord('b') + i)
block_device_map[drive] = BlockDeviceType(
ephemeral_name="ephemeral%d" % i)
kw['block_device_map'] = block_device_map
if cc.compute_bid_price is None:
print("run_instances: %r" % kw)
reservation = ec2.run_instances(**kw)
tags = {
'SLURMHostname': nodename,
'SLURMS3Root': slurm_s3_root,
'Name': "SLURM Computation Node %s" % nodename,
}
print("instances: %s" %
" ".join([instance.id for instance in reservation.instances]))
# create-tags can fail at times since the tag resource database is
# a bit behind EC2's actual state.
for i in xrange(10):
try:
ec2.create_tags([
instance.id for instance in reservation.instances], tags)
break
except Exception as e:
print("Failed to tag instance: %s" % e, file=sys.stderr)
sleep(0.5 * i)
else:
print("request_spot_instances: %r" % kw, file=sys.stderr)
requests = ec2.request_spot_instances(**kw)
print("requests: %s" % " ".join([request.id for request in requests]))
return 0
def stop_node():
start_logging()
print(" ".join(argv))
if len(argv) != 2:
print("Usage: %s <nodename>" % (argv[0],), file=sys.stderr)
return 1
nodename = argv[1]
cc = ClusterConfiguration.from_config()
region = get_region()
ec2 = boto.ec2.connect_to_region(region)
instances = ec2.get_only_instances(filters={"tag:SLURMHostname": nodename})
if len(instances) == 0:
print("No instances found for %r" % nodename)
return 1
instance_ids = [instance.id for instance in instances]
print("Terminating instance(s): %s" % " ".join(instance_ids))
ec2.terminate_instances(instance_ids)
return 0
|
{
"content_hash": "5f0453bf092e4285404d3f19824f79f0",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 79,
"avg_line_length": 31.985645933014354,
"alnum_prop": 0.6016454749439043,
"repo_name": "dacut/slurm-ec2-utils",
"id": "433d20bc4b3203318e5fae455793550f103011fb",
"size": "6703",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slurmec2utils/powersave.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "51620"
},
{
"name": "Shell",
"bytes": "16558"
}
],
"symlink_target": ""
}
|
"""Class that is responsible for building and assessing proposed.
bonding patterns.
"""
import operator
from typing import List
import numpy as np
from smu import dataset_pb2
from smu.parser import smu_utils_lib
class MatchingParameters:
"""A class to specify optional matching parameters for TopologyMolecule.place_bonds."""
def __init__(self):
self.must_match_all_bonds: bool = True
self.smiles_with_h: bool = False
self.smiles_with_labels: bool = False
# A variant on matching is to consider all N and O as neutral forms during
# matching, and then as a post processing step, see whether a valid,
# neutral, molecule can be formed.
self.neutral_forms_during_bond_matching: bool = True
# If not a bond is being considered during matching.
self.consider_not_bonded = True
# Avoid destroying rings if not bonded is enabled.
# Note that only the ring atom count is considered.
self.ring_atom_count_cannot_decrease = False
# Should we verify that the hydrogens have an appropriate bond length?
self.check_hydrogen_dists = False
def add_bond(a1, a2, btype, destination):
"""Add a new Bond to `destination`.
Args:
a1: atom
a2: atom
btype: bond type.
destination:
"""
destination.bonds.append(
dataset_pb2.BondTopology.Bond(
atom_a=a1,
atom_b=a2,
bond_type=smu_utils_lib.INTEGER_TO_BOND_TYPE[btype]))
class TopologyMolecule:
"""Holds information about partially built molecules."""
def __init__(self, hydrogens_attached, bonds_to_scores, matching_parameters):
"""Class to perform bonding assessments.
Args:
hydrogens_attached: a BondTopology that has all atoms, and the bonds
associated with the Hydrogen atoms.
bonds_to_scores: A dict that maps tuples of pairs of atoms, to a numpy
array of scores [0,3], for each possible bond type.
matching_parameters: contains possible optional behaviour modifiers.
Returns:
"""
self._starting_bond_topology = hydrogens_attached
self._natoms = len(hydrogens_attached.atoms)
self._heavy_atoms = sum(1 for atom in hydrogens_attached.atoms
if atom != dataset_pb2.BondTopology.ATOM_H)
self._contains_both_oxygen_and_nitrogen = False
# If the molecule contains both N and O atoms, then we can
# do more extensive atom type matching if requested.
if matching_parameters.neutral_forms_during_bond_matching:
self.set_contains_both_oxygen_and_nitrogen(hydrogens_attached)
# For each atom, the maximum number of bonds that can be attached.
self._max_bonds = np.zeros(self._natoms, dtype=np.int32)
if matching_parameters.neutral_forms_during_bond_matching and self._contains_both_oxygen_and_nitrogen:
for i in range(0, self._natoms):
self._max_bonds[i] = smu_utils_lib.ATOM_TYPE_TO_MAX_BONDS_ANY_FORM[
hydrogens_attached.atoms[i]]
else:
for i in range(0, self._natoms):
self._max_bonds[i] = smu_utils_lib.ATOM_TYPE_TO_MAX_BONDS[
hydrogens_attached.atoms[i]]
# With the Hydrogens attached, the number of bonds to each atom.
self._bonds_with_hydrogens_attached = np.zeros((self._natoms),
dtype=np.int32)
for bond in hydrogens_attached.bonds:
self._bonds_with_hydrogens_attached[bond.atom_a] += 1
self._bonds_with_hydrogens_attached[bond.atom_b] += 1
self._current_bonds_attached = np.zeros((self._natoms), dtype=np.int32)
# We turn bonds_to_scores into two arrays. So they can be iterated
# via itertools.
self._bonds = list(bonds_to_scores.keys())
self._scores = list(bonds_to_scores.values())
# Initialize for probability type accumulation
self._initial_score = 1.0
self._accumulate_score = operator.mul
# For testing, it can be convenient to allow for partial matches
# For example this allows matching C-C and C=C without the need
# to add explicit hydrogens
self._must_match_all_bonds = matching_parameters.must_match_all_bonds
def set_contains_both_oxygen_and_nitrogen(self, bt):
"""Examine `bt` and set self._contains_both_oxygen_and_nitrogen.
Args:
bt: BondTopology
"""
self._contains_both_oxygen_and_nitrogen = False
oxygen_count = 0
nitrogen_count = 0
for atom in bt.atoms:
if atom in [
dataset_pb2.BondTopology.ATOM_N, dataset_pb2.BondTopology.ATOM_NPOS
]:
nitrogen_count += 1
elif atom in [
dataset_pb2.BondTopology.ATOM_O, dataset_pb2.BondTopology.ATOM_ONEG
]:
oxygen_count += 1
if oxygen_count > 0 and nitrogen_count > 0:
self._contains_both_oxygen_and_nitrogen = True
def set_initial_score_and_incrementer(self, initial_score, op):
"""Update values used for computing scores."""
self._initial_score = initial_score
self._accumulate_score = op
def _initialize(self):
"""Make the molecule ready for adding bonds between heavy atoms."""
self._current_bonds_attached = np.copy(self._bonds_with_hydrogens_attached)
def _place_bond(self, a1, a2, btype):
"""Possibly add a new bond to the current config.
If the bond can be placed, updates self._current_bonds_attached for
both `a`` and `a2`.
Args:
a1:
a2:
btype:
Returns:
Bool.
"""
if self._current_bonds_attached[a1] + btype > self._max_bonds[a1]:
return False
if self._current_bonds_attached[a2] + btype > self._max_bonds[a2]:
return False
self._current_bonds_attached[a1] += btype
self._current_bonds_attached[a2] += btype
return True
def generate_search_state(self):
"""For each pair of atoms, return a list of plausible bond types.
This will be passed to itertools.product, which thereby enumerates all
possible bonding combinations.
Args:
Returns:
List of lists - one for each atom pair.
"""
result: List[List[int]] = []
for ndx in range(0, len(self._bonds)):
# For each pair of atoms, the plausible bond types - non zero score.
plausible_types: List[int] = []
for i, score in enumerate(self._scores[ndx]):
if score > 0.0:
plausible_types.append(i)
result.append(plausible_types)
return result
def place_bonds_inner(self, state):
"""Place bonds corresponding to `state`.
No validity checking is done, the calling function is responsible
for that.
Args:
state: for each pair of atoms, the kind of bond to be placed.
Returns:
If successful, a BondTopology.
"""
self._current_bonds_attached = np.copy(self._bonds_with_hydrogens_attached)
result = dataset_pb2.BondTopology()
result.CopyFrom(self._starting_bond_topology) # only Hydrogens attached.
result.score = self._initial_score
# Make sure each atoms gets at least one bond
atom_got_bond = np.zeros(self._heavy_atoms)
for i, btype in enumerate(state):
if btype != dataset_pb2.BondTopology.BOND_UNDEFINED:
a1 = self._bonds[i][0]
a2 = self._bonds[i][1]
if not self._place_bond(a1, a2, btype):
return None
add_bond(a1, a2, btype, result)
atom_got_bond[a1] = 1
atom_got_bond[a2] = 1
result.score = self._accumulate_score(result.score,
self._scores[i][btype])
if not np.all(atom_got_bond):
return None
return result
def place_bonds(self, state, matching_parameters):
"""Place bonds corresponding to `state`.
Args:
state: bonding pattern to be placed.
matching_parameters: optional settings
Returns:
If successful, a BondTopology
"""
bt = self.place_bonds_inner(state)
if not bt:
return None
if matching_parameters.neutral_forms_during_bond_matching and self._contains_both_oxygen_and_nitrogen:
if not self.assign_charged_atoms(bt):
return None
# all bonds matched has already been checked.
return bt
# Optionally check whether all bonds have been matched
if not self._must_match_all_bonds:
return bt
if not np.array_equal(self._current_bonds_attached, self._max_bonds):
return None
return bt
def assign_charged_atoms(self, bt):
"""Assign (N, N+) and (O, O-) possibilities in `bt`.
bt must contain both N and O atoms.
Note that we assume _must_match_all_bonds, and return None if that cannot
be achieved.
Args:
bt: BondTopology, bt.atoms are updated in place
Returns:
True if successful, False otherwise
"""
carbon = dataset_pb2.BondTopology.ATOM_C
hydrogen = dataset_pb2.BondTopology.ATOM_H
fluorine = dataset_pb2.BondTopology.ATOM_F
nitrogen = dataset_pb2.BondTopology.ATOM_N
npos = dataset_pb2.BondTopology.ATOM_NPOS
oxygen = dataset_pb2.BondTopology.ATOM_O
oneg = dataset_pb2.BondTopology.ATOM_ONEG
net_charge = 0
for i, atom in enumerate(bt.atoms):
if atom in [carbon, hydrogen, fluorine]:
if self._max_bonds[i] != self._current_bonds_attached[i]:
return False
elif atom in [nitrogen, npos]:
if self._current_bonds_attached[i] == 4:
bt.atoms[i] = npos
net_charge += 1
elif self._current_bonds_attached[i] == 3:
bt.atoms[i] = nitrogen
else:
return False
elif atom in [oxygen, oneg]:
if self._current_bonds_attached[i] == 2:
bt.atoms[i] = oxygen
elif self._current_bonds_attached[i] == 1:
bt.atoms[i] = oneg
net_charge -= 1
else: # not attached.
return False
if net_charge != 0:
return False
return True
|
{
"content_hash": "5e7da0ffaca89d867c290f28c2682a13",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 106,
"avg_line_length": 32.20065789473684,
"alnum_prop": 0.6527735212994177,
"repo_name": "google-research/google-research",
"id": "90de92e32dc740256962b58783421b63a9ff9fdb",
"size": "11582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smu/geometry/topology_molecule.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
}
|
"""
Systemd services
================
This module provides low-level tools for managing `systemd`_ services.
.. _systemd: http://www.freedesktop.org/wiki/Software/systemd
"""
from __future__ import print_function
from fabric.api import hide, settings
from burlap.utils import run_as_root
def action(action, service):
return run_as_root('systemctl %s %s.service' % (action, service,))
def enable(service):
"""
Enable a service.
::
burlap.enable('httpd')
.. note:: This function is idempotent.
"""
action('enable', service)
def disable(service):
"""
Disable a service.
::
burlap.systemd.disable('httpd')
.. note:: This function is idempotent.
"""
action('disable', service)
def is_running(service):
"""
Check if a service is running.
::
if burlap.systemd.is_running('httpd'):
print("Service httpd is running!")
"""
with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):
return action('status', service).succeeded
def start(service):
"""
Start a service.
::
if not burlap.systemd.is_running('httpd'):
burlap.systemd.start('httpd')
.. note:: This function is idempotent.
"""
action('start', service)
def stop(service):
"""
Stop a service.
::
if burlap.systemd.is_running('foo'):
burlap.systemd.stop('foo')
.. note:: This function is idempotent.
"""
action('stop', service)
def restart(service):
"""
Restart a service.
::
if burlap.systemd.is_running('httpd'):
burlap.systemd.restart('httpd')
else:
burlap.systemd.start('httpd')
"""
action('restart', service)
def reload(service): # pylint: disable=redefined-builtin
"""
Reload a service.
::
burlap.systemd.reload('foo')
.. warning::
The service needs to support the ``reload`` operation.
"""
action('reload', service)
def start_and_enable(service):
"""
Start and enable a service (convenience function).
.. note:: This function is idempotent.
"""
start(service)
enable(service)
def stop_and_disable(service):
"""
Stop and disable a service (convenience function).
.. note:: This function is idempotent.
"""
stop(service)
disable(service)
|
{
"content_hash": "ea41544f52711601ac43215c1b9549e2",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 83,
"avg_line_length": 17.970149253731343,
"alnum_prop": 0.5930232558139535,
"repo_name": "chrisspen/burlap",
"id": "b24ce36983a1af32e02fb7576588df2978f0ad7d",
"size": "2408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "burlap/systemd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "722479"
},
{
"name": "Shell",
"bytes": "11659"
}
],
"symlink_target": ""
}
|
"""
The module provides all database models at current HEAD.
Its purpose is to create comparable metadata with current database schema.
Based on this comparison database can be healed with healing migration.
"""
from neutron.db import agents_db # noqa
from neutron.db import agentschedulers_db # noqa
from neutron.db import allowedaddresspairs_db # noqa
from neutron.db import dvr_mac_db # noqa
from neutron.db import external_net_db # noqa
from neutron.db import extradhcpopt_db # noqa
from neutron.db import extraroute_db # noqa
from neutron.db import l3_agentschedulers_db # noqa
from neutron.db import l3_attrs_db # noqa
from neutron.db import l3_db # noqa
from neutron.db import l3_dvrscheduler_db # noqa
from neutron.db import l3_gwmode_db # noqa
from neutron.db import l3_hamode_db # noqa
from neutron.db.metering import metering_db # noqa
from neutron.db import model_base
from neutron.db import models_v2 # noqa
from neutron.db import portbindings_db # noqa
from neutron.db import portsecurity_db # noqa
from neutron.db import quota_db # noqa
from neutron.db import securitygroups_db # noqa
from neutron.db import servicetype_db # noqa
from neutron.ipam.drivers.neutrondb_ipam import db_models # noqa
from neutron.plugins.bigswitch.db import consistency_db # noqa
from neutron.plugins.bigswitch import routerrule_db # noqa
from neutron.plugins.brocade.db import models as brocade_models # noqa
from neutron.plugins.cisco.db.l3 import l3_models # noqa
from neutron.plugins.cisco.db import n1kv_models_v2 # noqa
from neutron.plugins.cisco.db import network_models_v2 # noqa
from neutron.plugins.linuxbridge.db import l2network_models_v2 # noqa
from neutron.plugins.metaplugin import meta_models_v2 # noqa
from neutron.plugins.ml2.drivers.arista import db # noqa
from neutron.plugins.ml2.drivers.brocade.db import ( # noqa
models as ml2_brocade_models)
from neutron.plugins.ml2.drivers.cisco.apic import apic_model # noqa
from neutron.plugins.ml2.drivers.cisco.n1kv import n1kv_models # noqa
from neutron.plugins.ml2.drivers.cisco.nexus import ( # noqa
nexus_models_v2 as ml2_nexus_models_v2)
from neutron.plugins.ml2.drivers.cisco.ucsm import ucsm_model # noqa
from neutron.plugins.ml2.drivers import type_flat # noqa
from neutron.plugins.ml2.drivers import type_gre # noqa
from neutron.plugins.ml2.drivers import type_vlan # noqa
from neutron.plugins.ml2.drivers import type_vxlan # noqa
from neutron.plugins.ml2 import models # noqa
from neutron.plugins.nec.db import models as nec_models # noqa
from neutron.plugins.nuage import nuage_models # noqa
from neutron.plugins.openvswitch import ovs_models_v2 # noqa
from neutron.plugins.vmware.dbexts import nsx_models # noqa
from neutron.plugins.vmware.dbexts import nsxv_models # noqa
from neutron.plugins.vmware.dbexts import vcns_models # noqa
def get_metadata():
return model_base.BASEV2.metadata
|
{
"content_hash": "102aa2513060a869460ccf9a90ad9a57",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 74,
"avg_line_length": 47.85245901639344,
"alnum_prop": 0.7855429941760877,
"repo_name": "bgxavier/neutron",
"id": "a2649a12237dae5da3ded4915d51c2d511128f56",
"size": "3560",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "neutron/db/migration/models/head.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "980"
},
{
"name": "Python",
"bytes": "7224545"
},
{
"name": "Shell",
"bytes": "12807"
}
],
"symlink_target": ""
}
|
import random
from deap import base
from deap import creator
from deap import tools
IND_SIZE = 5
creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0))
creator.create("Individual", list, fitness=creator.FitnessMin)
toolbox = base.Toolbox()
toolbox.register("attr_float", random.random)
toolbox.register("individual", tools.initRepeat, creator.Individual,
toolbox.attr_float, n=IND_SIZE)
ind1 = toolbox.individual()
print ind1 # [0.86..., 0.27..., 0.70..., 0.03..., 0.87...]
print ind1.fitness.valid # False
## 3.2 Evaluation
def evaluate(individual):
# Do some hard computing on the individual
a = sum(individual)
b = len(individual)
return a, 1. / b
ind1.fitness.values = evaluate(ind1)
print ind1.fitness.valid # True
print ind1.fitness # (2.73, 0.2)
## 3.3 Mutation
mutant = toolbox.clone(ind1)
ind2, = tools.mutGaussian(mutant, mu=0.0, sigma=0.2, indpb=0.2)
del mutant.fitness.values
print ind2 is mutant # True
print mutant is ind1 # False
## 3.4 Crossover
child1, child2 = [toolbox.clone(ind) for ind in (ind1, ind2)]
tools.cxBlend(child1, child2, 0.5)
del child1.fitness.values
del child2.fitness.values
## 3.5 Selection
selected = tools.selBest([child1, child2], 2)
print child1 in selected # True
## 3.5 Note
LAMBDA = 10
toolbox.register("select", tools.selRandom)
population = [ind1, ind2]*10
selected = toolbox.select(population, LAMBDA)
offspring = [toolbox.clone(ind) for ind in selected]
|
{
"content_hash": "315c56008d183e35214aca8f5a2bffd4",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 72,
"avg_line_length": 26.607142857142858,
"alnum_prop": 0.6973154362416107,
"repo_name": "GrimRanger/GeneticAlgorithm",
"id": "ff59e088ac3f333e64530cfb29b6ed6afbd24705",
"size": "1516",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "helps/deap/deap-master/doc/code/tutorials/part_3/3_next_step.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "48558"
},
{
"name": "C++",
"bytes": "24037"
},
{
"name": "Java",
"bytes": "15591"
},
{
"name": "Makefile",
"bytes": "3143"
},
{
"name": "Python",
"bytes": "622361"
},
{
"name": "R",
"bytes": "1032"
}
],
"symlink_target": ""
}
|
from verta._swagger.base_type import BaseType
class ModeldbCreateJob(BaseType):
def __init__(self, description=None, start_time=None, end_time=None, metadata=None, job_status=None, job_type=None):
required = {
"description": False,
"start_time": False,
"end_time": False,
"metadata": False,
"job_status": False,
"job_type": False,
}
self.description = description
self.start_time = start_time
self.end_time = end_time
self.metadata = metadata
self.job_status = job_status
self.job_type = job_type
for k, v in required.items():
if self[k] is None and v:
raise ValueError('attribute {} is required'.format(k))
@staticmethod
def from_json(d):
from .CommonKeyValue import CommonKeyValue
from .JobStatusEnumJobStatus import JobStatusEnumJobStatus
from .JobTypeEnumJobType import JobTypeEnumJobType
tmp = d.get('description', None)
if tmp is not None:
d['description'] = tmp
tmp = d.get('start_time', None)
if tmp is not None:
d['start_time'] = tmp
tmp = d.get('end_time', None)
if tmp is not None:
d['end_time'] = tmp
tmp = d.get('metadata', None)
if tmp is not None:
d['metadata'] = [CommonKeyValue.from_json(tmp) for tmp in tmp]
tmp = d.get('job_status', None)
if tmp is not None:
d['job_status'] = JobStatusEnumJobStatus.from_json(tmp)
tmp = d.get('job_type', None)
if tmp is not None:
d['job_type'] = JobTypeEnumJobType.from_json(tmp)
return ModeldbCreateJob(**d)
|
{
"content_hash": "e0d7d4f40d60a9c4a471c19bdb39c21a",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 118,
"avg_line_length": 28.69090909090909,
"alnum_prop": 0.6311787072243346,
"repo_name": "mitdbg/modeldb",
"id": "f1ebc9168b07400f34161183d0edbe447536e65c",
"size": "1621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client/verta/verta/_swagger/_public/modeldb/model/ModeldbCreateJob.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "43352"
},
{
"name": "Dockerfile",
"bytes": "235"
},
{
"name": "HTML",
"bytes": "30924"
},
{
"name": "Java",
"bytes": "393927"
},
{
"name": "JavaScript",
"bytes": "1017682"
},
{
"name": "Python",
"bytes": "178774"
},
{
"name": "Scala",
"bytes": "251259"
},
{
"name": "Shell",
"bytes": "16870"
},
{
"name": "Thrift",
"bytes": "55683"
}
],
"symlink_target": ""
}
|
import pytest
import cupy
from cupy import cuda
from cupy import testing
import cupyx
@pytest.mark.skipif(cuda.runtime.is_hip,
reason='HIP does not support this')
@pytest.mark.skipif(cuda.driver.get_build_version() < 10010,
reason='Only CUDA 10.1+ supports this')
class TestGraph:
def _helper1(self, a):
# this tests ufuncs involving simple arithmetic
a = a + 3
a = a * 7.6
return a**2
def _helper2(self, a):
# this tests ufuncs involving math API calls
a = 3 * cupy.sin(a)
return cupy.sqrt(a)
def _helper3(self, a):
# this tests CUDA library calls
a = a * cupy.fft.fft(a)
return cupy.fft.ifft(a)
def _helper4(self, a):
# this tests a common pattern in CuPy internal in which the host
# operation depends on intermediate outcome on GPU (and thus requires
# synchronization)
result = cupy.zeros((1,), dtype=cupy.int32)
if a.sum() > 0: # synchronize!
result += 1
if a[-1] >= 0: # synchronize!
result += 2
return result
@pytest.mark.parametrize('upload', (True, False))
def test_capture_run_on_same_stream(self, upload):
s = cupy.cuda.Stream(non_blocking=True)
for n in range(3):
func = getattr(self, '_helper{}'.format(n+1))
a = cupy.random.random((100,))
with s:
s.begin_capture()
out1 = func(a)
g = s.end_capture()
if upload and cuda.runtime.runtimeGetVersion() >= 11010:
g.upload()
g.launch()
s.synchronize()
out2 = func(a)
testing.assert_array_equal(out1, out2)
@pytest.mark.parametrize('upload', (True, False))
def test_capture_run_on_different_streams(self, upload):
s1 = cupy.cuda.Stream(non_blocking=True)
s2 = cupy.cuda.Stream(non_blocking=True)
for n in range(3):
func = getattr(self, '_helper{}'.format(n+1))
a = cupy.random.random((100,))
with s1:
s1.begin_capture()
out1 = func(a)
g = s1.end_capture()
with s2:
if upload and cuda.runtime.runtimeGetVersion() >= 11010:
g.upload()
g.launch()
s2.synchronize()
out2 = func(a)
testing.assert_array_equal(out1, out2)
@pytest.mark.parametrize('upload', (True, False))
def test_stream_is_capturing(self, upload):
s = cupy.cuda.Stream(non_blocking=True)
a = cupy.random.random((100,))
with s:
s.begin_capture()
assert s.is_capturing()
assert not cuda.Stream.null.is_capturing()
b = a * 3
g = s.end_capture()
assert not s.is_capturing()
assert not cuda.Stream.null.is_capturing()
# check the graph integrity
if upload and cuda.runtime.runtimeGetVersion() >= 11010:
g.upload()
g.launch()
s.synchronize()
testing.assert_array_equal(b, 3 * a)
@pytest.mark.parametrize('upload', (True, False))
def test_stream_fork_join(self, upload):
s1 = cupy.cuda.Stream(non_blocking=True)
s2 = cupy.cuda.Stream(non_blocking=True)
e1 = cupy.cuda.Event()
e2 = cupy.cuda.Event()
a = cupy.random.random((100,))
def func(x):
return 3 * x + 1
with s1:
s1.begin_capture()
out1 = a * 100
e1.record(s1)
s2.wait_event(e1)
with s2:
out2 = func(out1)
e2.record(s2)
s1.wait_event(e2)
g = s1.end_capture()
# check integrity
assert not s1.is_capturing()
assert not s2.is_capturing()
if upload and cuda.runtime.runtimeGetVersion() >= 11010:
g.upload()
g.launch()
s1.synchronize()
testing.assert_array_equal(out2, func(a * 100))
@pytest.mark.parametrize('upload', (True, False))
def test_null_stream_cannot_capture(self, upload):
s = cupy.cuda.Stream(non_blocking=False)
a = cupy.random.random((100,))
with s:
s.begin_capture()
b = a + 4
assert s.is_capturing()
# cudaStreamLegacy is unhappy when a blocking stream is capturing
with pytest.raises(cuda.runtime.CUDARuntimeError) as e:
cuda.Stream.null.is_capturing()
assert 'cudaErrorStreamCaptureImplicit' in str(e.value)
g = s.end_capture()
assert not s.is_capturing()
assert not cuda.Stream.null.is_capturing()
# check the graph integrity
if upload and cuda.runtime.runtimeGetVersion() >= 11010:
g.upload()
g.launch()
s.synchronize()
testing.assert_array_equal(b, a + 4)
def test_stream_capture_failure1(self):
s = cupy.cuda.Stream(non_blocking=True)
with s:
s.begin_capture()
with pytest.raises(cuda.runtime.CUDARuntimeError) as e:
s.synchronize()
assert 'cudaErrorStreamCaptureUnsupported' in str(e.value)
# invalid operation causes the capture sequence to be invalidated
with pytest.raises(cuda.runtime.CUDARuntimeError) as e:
g = s.end_capture() # noqa
assert 'cudaErrorStreamCaptureInvalidated' in str(e.value)
# check s left the capture mode and permits normal usage
assert not s.is_capturing()
s.synchronize()
def test_stream_capture_failure2(self):
s1 = cupy.cuda.Stream(non_blocking=True)
s2 = cupy.cuda.Stream(non_blocking=True)
e2 = cupy.cuda.Event()
a = cupy.random.random((100,))
with s1:
s1.begin_capture()
with pytest.raises(cuda.runtime.CUDARuntimeError) as e:
g = s2.end_capture()
assert 'cudaErrorIllegalState' in str(e.value)
e2.record(s1)
s2.wait_event(e2)
with s2:
b = a**3 # noqa
with pytest.raises(cuda.runtime.CUDARuntimeError) as e:
g = s2.end_capture()
assert 'cudaErrorStreamCaptureUnmatched' in str(e.value)
# invalid operation causes the capture sequence to be invalidated
with pytest.raises(cuda.runtime.CUDARuntimeError) as e:
g = s1.end_capture() # noqa
assert 'cudaErrorStreamCaptureInvalidated' in str(e.value)
# check both s1 and s2 left the capture mode and permit normal usage
assert not s1.is_capturing()
assert not s2.is_capturing()
s1.synchronize()
s2.synchronize()
def test_stream_capture_failure3(self):
s1 = cupy.cuda.Stream(non_blocking=True)
s2 = cupy.cuda.Stream(non_blocking=True)
e2 = cupy.cuda.Event()
a = cupy.random.random((100,))
with s1:
s1.begin_capture()
e2.record(s1)
s2.wait_event(e2)
with s2:
# internally the function requires synchronization, which is
# incompatible with stream capturing and so we raise
with pytest.raises(RuntimeError) as e:
b = cupy.where(a > 0.5) # noqa
assert 'is capturing' in str(e.value)
# invalid operation causes the capture sequence to be invalidated
with pytest.raises(cuda.runtime.CUDARuntimeError) as e:
g = s1.end_capture() # noqa
assert 'cudaErrorStreamCaptureUnjoined' in str(e.value)
# check both s1 and s2 left the capture mode and permit normal usage
assert not s1.is_capturing()
assert not s2.is_capturing()
s1.synchronize()
s2.synchronize()
def test_stream_capture_failure4(self):
s = cupy.cuda.Stream(non_blocking=True)
with s:
s.begin_capture()
# query the stream status is illegal during capturing
s.done
with pytest.raises(cuda.runtime.CUDARuntimeError) as e:
s.end_capture()
assert 'cudaErrorStreamCaptureInvalidated' in str(e.value)
# check s left the capture mode and permits normal usage
assert not s.is_capturing()
s.synchronize()
def test_stream_capture_failure5(self):
s = cupy.cuda.Stream(non_blocking=True)
func = self._helper4
a = cupy.random.random((100,))
with s:
s.begin_capture()
# internally the function requires synchronization, which is
# incompatible with stream capturing and so we raise
with pytest.raises(RuntimeError) as e:
func(a)
assert 'is capturing' in str(e.value)
s.end_capture()
# check s left the capture mode and permits normal usage
assert not s.is_capturing()
s.synchronize()
def test_stream_capture_failure6(self):
s = cupy.cuda.Stream(non_blocking=True)
with s:
s.begin_capture()
# synchronize the stream is illegal during capturing
with pytest.raises(cuda.runtime.CUDARuntimeError) as e:
s.synchronize()
assert 'cudaErrorStreamCaptureUnsupported' in str(e.value)
with pytest.raises(cuda.runtime.CUDARuntimeError) as e:
s.end_capture()
assert 'cudaErrorStreamCaptureInvalidated' in str(e.value)
# check s left the capture mode and permits normal usage
assert not s.is_capturing()
s.synchronize()
def test_stream_capture_failure_cublas(self):
s = cupy.cuda.Stream(non_blocking=True)
a = cupy.random.random((3, 4))
b = cupy.random.random((4, 5))
with s:
s.begin_capture()
with pytest.raises(NotImplementedError) as e:
cupy.matmul(a, b)
assert 'cuBLAS' in str(e.value)
s.end_capture()
# check s left the capture mode and permits normal usage
assert not s.is_capturing()
s.synchronize()
def test_stream_capture_failure_cusolver(self):
s = cupy.cuda.Stream(non_blocking=True)
a = cupy.random.random((8, 8))
a += a.T
with s:
s.begin_capture()
with pytest.raises(NotImplementedError) as e:
cupy.linalg.svd(a)
assert 'cuSOLVER' in str(e.value)
s.end_capture()
# check s left the capture mode and permits normal usage
assert not s.is_capturing()
s.synchronize()
def test_stream_capture_failure_curand(self):
s = cupy.cuda.Stream(non_blocking=True)
with s:
s.begin_capture()
with pytest.raises(NotImplementedError) as e:
cupy.random.random(100)
assert 'cuRAND' in str(e.value)
s.end_capture()
# check s left the capture mode and permits normal usage
assert not s.is_capturing()
s.synchronize()
def test_stream_capture_failure_cusparse(self):
s = cupy.cuda.Stream(non_blocking=True)
a = cupy.zeros((3, 4))
a[0] = 1
a = cupyx.scipy.sparse.csr_matrix(a)
a.has_canonical_format # avoid launching custom kernels during capture
with s:
s.begin_capture()
with pytest.raises(NotImplementedError) as e:
a * a.T
assert 'cuSPARSE' in str(e.value)
s.end_capture()
# check s left the capture mode and permits normal usage
assert not s.is_capturing()
s.synchronize()
|
{
"content_hash": "8f458f8cf16ab1033528969d74d61850",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 79,
"avg_line_length": 34.405797101449274,
"alnum_prop": 0.5653748946925021,
"repo_name": "cupy/cupy",
"id": "6d70f85806bf7ab184ac2515abe6c6840096626e",
"size": "11870",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/cupy_tests/cuda_tests/test_graph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "38"
},
{
"name": "C",
"bytes": "712019"
},
{
"name": "C++",
"bytes": "895316"
},
{
"name": "Cuda",
"bytes": "151799"
},
{
"name": "Cython",
"bytes": "1996454"
},
{
"name": "Dockerfile",
"bytes": "40251"
},
{
"name": "PowerShell",
"bytes": "7361"
},
{
"name": "Python",
"bytes": "4841354"
},
{
"name": "Shell",
"bytes": "24521"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('foodle_polls', '0006_auto_20160211_2050'),
]
operations = [
migrations.AlterField(
model_name='poll',
name='description',
field=models.CharField(blank=True, max_length=100, verbose_name='Description'),
),
migrations.AlterField(
model_name='poll',
name='initiator',
field=models.CharField(blank=True, max_length=20, verbose_name='Initiator'),
),
migrations.AlterField(
model_name='poll',
name='location',
field=models.CharField(blank=True, max_length=50, verbose_name='Location'),
),
]
|
{
"content_hash": "249f50858e6dcc92aa8974258a17031b",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 91,
"avg_line_length": 28.892857142857142,
"alnum_prop": 0.5834363411619283,
"repo_name": "FabianWe/foodle",
"id": "f1feeab714b1ac9a83499cd0e370d19d353992bf",
"size": "881",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "foodle/foodle_polls/migrations/0007_auto_20160212_0054.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1392"
},
{
"name": "HTML",
"bytes": "32514"
},
{
"name": "JavaScript",
"bytes": "23028"
},
{
"name": "Python",
"bytes": "71358"
},
{
"name": "Shell",
"bytes": "342"
}
],
"symlink_target": ""
}
|
"""Self-tests for (some of) Crypto.Util.number"""
__revision__ = "$Id$"
from Crypto.Util.python_compat import *
import unittest
# NB: In some places, we compare tuples instead of just output values so that
# if any inputs cause a test failure, we'll be able to tell which ones.
class MiscTests(unittest.TestCase):
def setUp(self):
global number, math
from Crypto.Util import number
import math
def test_ceil_shift(self):
"""Util.number.ceil_shift"""
self.assertRaises(AssertionError, number.ceil_shift, -1, 1)
self.assertRaises(AssertionError, number.ceil_shift, 1, -1)
# b = 0
self.assertEqual(0, number.ceil_shift(0, 0))
self.assertEqual(1, number.ceil_shift(1, 0))
self.assertEqual(2, number.ceil_shift(2, 0))
self.assertEqual(3, number.ceil_shift(3, 0))
# b = 1
self.assertEqual(0, number.ceil_shift(0, 1))
self.assertEqual(1, number.ceil_shift(1, 1))
self.assertEqual(1, number.ceil_shift(2, 1))
self.assertEqual(2, number.ceil_shift(3, 1))
# b = 2
self.assertEqual(0, number.ceil_shift(0, 2))
self.assertEqual(1, number.ceil_shift(1, 2))
self.assertEqual(1, number.ceil_shift(2, 2))
self.assertEqual(1, number.ceil_shift(3, 2))
self.assertEqual(1, number.ceil_shift(4, 2))
self.assertEqual(2, number.ceil_shift(5, 2))
self.assertEqual(2, number.ceil_shift(6, 2))
self.assertEqual(2, number.ceil_shift(7, 2))
self.assertEqual(2, number.ceil_shift(8, 2))
self.assertEqual(3, number.ceil_shift(9, 2))
for b in range(3, 1+129, 3): # 3, 6, ... , 129
self.assertEqual(0, number.ceil_shift(0, b))
n = 1L
while n <= 2L**(b+2):
(q, r) = divmod(n-1, 2L**b)
expected = q + int(not not r)
self.assertEqual((n-1, b, expected),
(n-1, b, number.ceil_shift(n-1, b)))
(q, r) = divmod(n, 2L**b)
expected = q + int(not not r)
self.assertEqual((n, b, expected),
(n, b, number.ceil_shift(n, b)))
(q, r) = divmod(n+1, 2L**b)
expected = q + int(not not r)
self.assertEqual((n+1, b, expected),
(n+1, b, number.ceil_shift(n+1, b)))
n *= 2
def test_ceil_div(self):
"""Util.number.ceil_div"""
self.assertRaises(TypeError, number.ceil_div, "1", 1)
self.assertRaises(ZeroDivisionError, number.ceil_div, 1, 0)
self.assertRaises(ZeroDivisionError, number.ceil_div, -1, 0)
# b = -1
self.assertEqual(0, number.ceil_div(0, -1))
self.assertEqual(-1, number.ceil_div(1, -1))
self.assertEqual(-2, number.ceil_div(2, -1))
self.assertEqual(-3, number.ceil_div(3, -1))
# b = 1
self.assertEqual(0, number.ceil_div(0, 1))
self.assertEqual(1, number.ceil_div(1, 1))
self.assertEqual(2, number.ceil_div(2, 1))
self.assertEqual(3, number.ceil_div(3, 1))
# b = 2
self.assertEqual(0, number.ceil_div(0, 2))
self.assertEqual(1, number.ceil_div(1, 2))
self.assertEqual(1, number.ceil_div(2, 2))
self.assertEqual(2, number.ceil_div(3, 2))
self.assertEqual(2, number.ceil_div(4, 2))
self.assertEqual(3, number.ceil_div(5, 2))
# b = 3
self.assertEqual(0, number.ceil_div(0, 3))
self.assertEqual(1, number.ceil_div(1, 3))
self.assertEqual(1, number.ceil_div(2, 3))
self.assertEqual(1, number.ceil_div(3, 3))
self.assertEqual(2, number.ceil_div(4, 3))
self.assertEqual(2, number.ceil_div(5, 3))
self.assertEqual(2, number.ceil_div(6, 3))
self.assertEqual(3, number.ceil_div(7, 3))
# b = 4
self.assertEqual(0, number.ceil_div(0, 4))
self.assertEqual(1, number.ceil_div(1, 4))
self.assertEqual(1, number.ceil_div(2, 4))
self.assertEqual(1, number.ceil_div(3, 4))
self.assertEqual(1, number.ceil_div(4, 4))
self.assertEqual(2, number.ceil_div(5, 4))
self.assertEqual(2, number.ceil_div(6, 4))
self.assertEqual(2, number.ceil_div(7, 4))
self.assertEqual(2, number.ceil_div(8, 4))
self.assertEqual(3, number.ceil_div(9, 4))
# b = -4
self.assertEqual(3, number.ceil_div(-9, -4))
self.assertEqual(2, number.ceil_div(-8, -4))
self.assertEqual(2, number.ceil_div(-7, -4))
self.assertEqual(2, number.ceil_div(-6, -4))
self.assertEqual(2, number.ceil_div(-5, -4))
self.assertEqual(1, number.ceil_div(-4, -4))
self.assertEqual(1, number.ceil_div(-3, -4))
self.assertEqual(1, number.ceil_div(-2, -4))
self.assertEqual(1, number.ceil_div(-1, -4))
self.assertEqual(0, number.ceil_div(0, -4))
self.assertEqual(0, number.ceil_div(1, -4))
self.assertEqual(0, number.ceil_div(2, -4))
self.assertEqual(0, number.ceil_div(3, -4))
self.assertEqual(-1, number.ceil_div(4, -4))
self.assertEqual(-1, number.ceil_div(5, -4))
self.assertEqual(-1, number.ceil_div(6, -4))
self.assertEqual(-1, number.ceil_div(7, -4))
self.assertEqual(-2, number.ceil_div(8, -4))
self.assertEqual(-2, number.ceil_div(9, -4))
def test_exact_log2(self):
"""Util.number.exact_log2"""
self.assertRaises(TypeError, number.exact_log2, "0")
self.assertRaises(ValueError, number.exact_log2, -1)
self.assertRaises(ValueError, number.exact_log2, 0)
self.assertEqual(0, number.exact_log2(1))
self.assertEqual(1, number.exact_log2(2))
self.assertRaises(ValueError, number.exact_log2, 3)
self.assertEqual(2, number.exact_log2(4))
self.assertRaises(ValueError, number.exact_log2, 5)
self.assertRaises(ValueError, number.exact_log2, 6)
self.assertRaises(ValueError, number.exact_log2, 7)
e = 3
n = 8
while e < 16:
if n == 2**e:
self.assertEqual(e, number.exact_log2(n), "expected=2**%d, n=%d" % (e, n))
e += 1
else:
self.assertRaises(ValueError, number.exact_log2, n)
n += 1
for e in range(16, 1+64, 2):
self.assertRaises(ValueError, number.exact_log2, 2L**e-1)
self.assertEqual(e, number.exact_log2(2L**e))
self.assertRaises(ValueError, number.exact_log2, 2L**e+1)
def test_exact_div(self):
"""Util.number.exact_div"""
# Positive numbers
self.assertEqual(1, number.exact_div(1, 1))
self.assertRaises(ValueError, number.exact_div, 1, 2)
self.assertEqual(1, number.exact_div(2, 2))
self.assertRaises(ValueError, number.exact_div, 3, 2)
self.assertEqual(2, number.exact_div(4, 2))
# Negative numbers
self.assertEqual(-1, number.exact_div(-1, 1))
self.assertEqual(-1, number.exact_div(1, -1))
self.assertRaises(ValueError, number.exact_div, -1, 2)
self.assertEqual(1, number.exact_div(-2, -2))
self.assertEqual(-2, number.exact_div(-4, 2))
# Zero dividend
self.assertEqual(0, number.exact_div(0, 1))
self.assertEqual(0, number.exact_div(0, 2))
# Zero divisor (allow_divzero == False)
self.assertRaises(ZeroDivisionError, number.exact_div, 0, 0)
self.assertRaises(ZeroDivisionError, number.exact_div, 1, 0)
# Zero divisor (allow_divzero == True)
self.assertEqual(0, number.exact_div(0, 0, allow_divzero=True))
self.assertRaises(ValueError, number.exact_div, 1, 0, allow_divzero=True)
def test_floor_div(self):
"""Util.number.floor_div"""
self.assertRaises(TypeError, number.floor_div, "1", 1)
for a in range(-10, 10):
for b in range(-10, 10):
if b == 0:
self.assertRaises(ZeroDivisionError, number.floor_div, a, b)
else:
self.assertEqual((a, b, int(math.floor(float(a) / b))),
(a, b, number.floor_div(a, b)))
def test_getStrongPrime(self):
"""Util.number.getStrongPrime"""
self.assertRaises(ValueError, number.getStrongPrime, 256)
self.assertRaises(ValueError, number.getStrongPrime, 513)
bits = 512
x = number.getStrongPrime(bits)
self.assertNotEqual(x % 2, 0)
self.assertEqual(x > (1L << bits-1)-1, 1)
self.assertEqual(x < (1L << bits), 1)
e = 2**16+1
x = number.getStrongPrime(bits, e)
self.assertEqual(number.GCD(x-1, e), 1)
self.assertNotEqual(x % 2, 0)
self.assertEqual(x > (1L << bits-1)-1, 1)
self.assertEqual(x < (1L << bits), 1)
e = 2**16+2
x = number.getStrongPrime(bits, e)
self.assertEqual(number.GCD((x-1)>>1, e), 1)
self.assertNotEqual(x % 2, 0)
self.assertEqual(x > (1L << bits-1)-1, 1)
self.assertEqual(x < (1L << bits), 1)
def test_isPrime(self):
"""Util.number.isPrime"""
self.assertEqual(number.isPrime(2), True)
self.assertEqual(number.isPrime(3), True)
self.assertEqual(number.isPrime(4), False)
self.assertEqual(number.isPrime(2L**1279-1), True)
# test some known gmp pseudo-primes taken from
# http://www.trnicely.net/misc/mpzspsp.html
for composite in (43 * 127 * 211, 61 * 151 * 211, 15259 * 30517,
346141L * 692281L, 1007119L * 2014237L, 3589477L * 7178953L,
4859419L * 9718837L, 2730439L * 5460877L,
245127919L * 490255837L, 963939391L * 1927878781L,
4186358431L * 8372716861L, 1576820467L * 3153640933L):
self.assertEqual(number.isPrime(long(composite)), False)
def get_tests(config={}):
from Crypto.SelfTest.st_common import list_test_cases
return list_test_cases(MiscTests)
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
|
{
"content_hash": "0c1cd634ad5624284ecc2922bdc372a1",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 90,
"avg_line_length": 41.17131474103586,
"alnum_prop": 0.5745113218502033,
"repo_name": "cipicip/appengine",
"id": "1ce8e9071c4e07729e07a40ecc3815c1bd4e1449",
"size": "11473",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "test_crypto/Crypto/SelfTest/Util/test_number.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6537"
},
{
"name": "HTML",
"bytes": "18890"
},
{
"name": "JavaScript",
"bytes": "100184"
},
{
"name": "Python",
"bytes": "541939"
},
{
"name": "Shell",
"bytes": "60"
}
],
"symlink_target": ""
}
|
import os
import numpy as np
from pyhlm.model import WeakLimitHDPHLM, WeakLimitHDPHLMPython
from pyhlm.internals.hlm_states import WeakLimitHDPHLMStates
from pyhlm.word_model import LetterHSMM, LetterHSMMPython
import pyhsmm
import warnings
from tqdm import trange
warnings.filterwarnings('ignore')
import time
#%%
def load_datas():
data = []
names = np.loadtxt("files.txt", dtype=str)
files = names
for name in names:
data.append(np.loadtxt("DATA/" + name + ".txt"))
return data
def unpack_durations(dur):
unpacked = np.zeros(dur.sum())
d = np.cumsum(dur[:-1])
unpacked[d-1] = 1.0
return unpacked
def save_stateseq(model):
# Save sampled states sequences.
names = np.loadtxt("files.txt", dtype=str)
for i, s in enumerate(model.states_list):
# with open("results/" + names[i] + "_s.txt", "a") as f:
# np.savetxt(f, s.stateseq)
# with open("results/" + names[i] + "_l.txt", "a") as f:
# np.savetxt(f, s.letter_stateseq)
# with open("results/" + names[i] + "_d.txt", "a") as f:
# np.savetxt(f, unpack_durations(s.durations_censored))
with open("results/" + names[i] + "_l.txt", "a") as f:
np.savetxt(f, s.stateseq)
def save_params(itr_idx, model):
with open("parameters/l_ITR_{0:04d}.txt".format(itr_idx), "w") as f:
f.write(str(model.params))
#%%
if not os.path.exists('results'):
os.mkdir('results')
if not os.path.exists('parameters'):
os.mkdir('parameters')
if not os.path.exists('summary_files'):
os.mkdir('summary_files')
#%%
thread_num = 4
train_iter = 100
trunc = 60
obs_dim = 3
letter_upper = 10
word_upper = 10
model_hypparams = {'num_states': word_upper, 'alpha': 10, 'gamma': 10, 'init_state_concentration': 10}
obs_hypparams = {
'mu_0':np.zeros(obs_dim),
'sigma_0':np.identity(obs_dim),
'kappa_0':0.01,
'nu_0':obs_dim+5
}
dur_hypparams = {
'alpha_0':200,
'beta_0':10
}
#%%
letter_obs_distns = [pyhsmm.distributions.Gaussian(**obs_hypparams) for state in range(letter_upper)]
letter_dur_distns = [pyhsmm.distributions.PoissonDuration(**dur_hypparams) for state in range(letter_upper)]
# dur_distns = [pyhsmm.distributions.PoissonDuration(lmbda=20) for state in range(word_upper)]
# length_distn = pyhsmm.distributions.PoissonDuration(alpha_0=30, beta_0=10, lmbda=3)
#%%
letter_hsmm = LetterHSMM(alpha=10, gamma=10, init_state_concentration=10, obs_distns=letter_obs_distns, dur_distns=letter_dur_distns)
#%%
files = np.loadtxt("files.txt", dtype=str)
datas = load_datas()
#%% Save init params and pyper params
with open("parameters/l_hypparams.txt", "w") as f:
f.write(str(letter_hsmm.hypparams))
save_params(0, letter_hsmm)
# save_loglikelihood(model)
#%% Pre training.
print("Training...")
for d in datas:
letter_hsmm.add_data(d, trunc=trunc)
for t in trange(train_iter):
letter_hsmm.resample_model(num_procs=thread_num)
save_stateseq(letter_hsmm)
save_params(t+1, letter_hsmm)
print("Done!")
|
{
"content_hash": "d2b0b35c53b85eaed0d653f45dfc0599",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 133,
"avg_line_length": 29.93069306930693,
"alnum_prop": 0.6619252398279855,
"repo_name": "EmergentSystemLabStudent/NPB_DAA",
"id": "a4382dc4cfdea1251ac954ac793d44317c79c58a",
"size": "3023",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sample/letterhsmm_sample.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "9810"
},
{
"name": "Python",
"bytes": "31906"
}
],
"symlink_target": ""
}
|
from . import Validator
from ..errors import InvalidLength
class Length(Validator):
"""Validates the size of value"""
def __init__(self, min=None, max=None):
"""
Args:
min (int): minimum length
max (int): maximum length
"""
self._min = min
self._max = max
def __call__(self, value, path):
"""Validate that value is contained in the expected ones
Args:
value (object): value to validate
path (list): error path
"""
try:
length = len(value)
except TypeError:
raise InvalidLength(None, self._min, self._max, path)
if self._min is not None and length < self._min or\
self._max is not None and length > self._max:
raise InvalidLength(length, self._min, self._max, path)
|
{
"content_hash": "8dd030386faab91a2fdefdcb84a9af96",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 67,
"avg_line_length": 28.096774193548388,
"alnum_prop": 0.5419058553386912,
"repo_name": "holinnn/lupin",
"id": "53cb5ccbbdf802f10acb3428051a048eb8b88cea",
"size": "871",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/lupin/validators/length.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "107452"
}
],
"symlink_target": ""
}
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
browser = webdriver.Firefox()
browser.get('http://www.yahoo.com')
assert 'Yahoo' in browser.title
elem = browser.find_element_by_name('p') # Find the search box
elem.send_keys('seleniumhq' + Keys.RETURN)
browser.quit()
|
{
"content_hash": "5cbe1bc4bd5cb69045b962f6c83c5366",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 63,
"avg_line_length": 25.166666666666668,
"alnum_prop": 0.7582781456953642,
"repo_name": "dichovsky/tests",
"id": "f878fa09e72f86645a6a026542a5fc26ccc3e523",
"size": "302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6822"
}
],
"symlink_target": ""
}
|
import json
import os
import platform
import tempfile
import time
import unittest
from unittest import mock
import uuid
from knack.util import CLIError
from azure.cli.testsdk.scenario_tests import AllowLargeResponse, record_only
from azure.cli.core.profiles import ResourceType
from azure.cli.testsdk import (
ScenarioTest, ResourceGroupPreparer, LiveScenarioTest, api_version_constraint,
StorageAccountPreparer)
TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
TEST_SSH_KEY_PUB = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCbIg1guRHbI0lV11wWDt1r2cUdcNd27CJsg+SfgC7miZeubtwUhbsPdhMQsfDyhOWHq1+ZL0M+nJZV63d/1dhmhtgyOqejUwrPlzKhydsbrsdUor+JmNJDdW01v7BXHyuymT8G4s09jCasNOwiufbP/qp72ruu0bIA1nySsvlf9pCQAuFkAnVnf/rFhUlOkhtRpwcq8SUNY2zRHR/EKb/4NWY1JzR4sa3q2fWIJdrrX0DvLoa5g9bIEd4Df79ba7v+yiUBOS0zT2ll+z4g9izHK3EO5d8hL4jYxcjKs+wcslSYRWrascfscLgMlMGh0CdKeNTDjHpGPncaf3Z+FwwwjWeuiNBxv7bJo13/8B/098KlVDl4GZqsoBCEjPyJfV6hO0y/LkRGkk7oHWKgeWAfKtfLItRp00eZ4fcJNK9kCaSMmEugoZWcI7NGbZXzqFWqbpRI7NcDP9+WIQ+i9U5vqWsqd/zng4kbuAJ6UuKqIzB0upYrLShfQE3SAck8oaLhJqqq56VfDuASNpJKidV+zq27HfSBmbXnkR/5AK337dc3MXKJypoK/QPMLKUAP5XLPbs+NddJQV7EZXd29DLgp+fRIg3edpKdO7ZErWhv7d+3Kws+e1Y+ypmR2WIVSwVyBEUfgv2C8Ts9gnTF4pNcEY/S2aBicz5Ew2+jdyGNQQ== test@example.com\n"
def _write_config_file(user_name):
public_key = ('ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8InHIPLAu6lMc0d+5voyXqigZfT5r6fAM1+FQAi+mkPDdk2hNq1BG0Bwfc88G'
'm7BImw8TS+x2bnZmhCbVnHd6BPCDY7a+cHCSqrQMW89Cv6Vl4ueGOeAWHpJTV9CTLVz4IY1x4HBdkLI2lKIHri9+z7NIdvFk7iOk'
'MVGyez5H1xDbF2szURxgc4I2/o5wycSwX+G8DrtsBvWLmFv9YAPx+VkEHQDjR0WWezOjuo1rDn6MQfiKfqAjPuInwNOg5AIxXAOR'
'esrin2PUlArNtdDH1zlvI4RZi36+tJO7mtm3dJiKs4Sj7G6b1CjIU6aaj27MmKy3arIFChYav9yYM3IT')
config = {
'username': user_name,
'ssh_key': public_key
}
_, config_file = tempfile.mkstemp()
with open(config_file, 'w') as outfile:
json.dump(config, outfile)
return config_file
# class VMImageListByAliasesScenarioTest(ScenarioTest):
# def test_vm_image_list_by_alias(self):
# result = self.cmd('vm image list --offer ubuntu').get_output_in_json()
# self.assertTrue(len(result) >= 1)
# self.assertEqual(result[0]['publisher'], 'Canonical')
# self.assertTrue(result[0]['sku'].endswith('LTS'))
# class VMUsageScenarioTest(ScenarioTest):
# def test_vm_usage(self):
# self.cmd('vm list-usage --location westus',
# checks=self.check('type(@)', 'array'))
# class VMImageListThruServiceScenarioTest(ScenarioTest):
# @AllowLargeResponse()
# def test_vm_images_list_thru_services(self):
# result = self.cmd('vm image list -l westus --publisher Canonical --offer UbuntuServer -o tsv --all').output
# assert result.index('16.04') >= 0
# result = self.cmd('vm image list -p Canonical -f UbuntuServer -o tsv --all').output
# assert result.index('16.04') >= 0
# class VMOpenPortTest(ScenarioTest):
# @ResourceGroupPreparer(name_prefix='cli_test_open_port')
# def test_vm_open_port(self, resource_group):
# self.kwargs.update({
# 'vm': 'vm1'
# })
# self.cmd('vm create -g {rg} -l westus -n {vm} --admin-username ubuntu --image Canonical:UbuntuServer:14.04.4-LTS:latest --admin-password @PasswordPassword1! --public-ip-address-allocation dynamic --authentication-type password')
# # min params - apply to existing NIC (updates existing NSG)
# self.kwargs['nsg_id'] = self.cmd('vm open-port -g {rg} -n {vm} --port "*" --priority 900').get_output_in_json()['id']
# self.kwargs['nsg'] = os.path.split(self.kwargs['nsg_id'])[1]
# self.cmd('network nsg show -g {rg} -n {nsg}',
# checks=self.check("length(securityRules[?name == 'open-port-all'])", 1))
# # apply to subnet (creates new NSG)
# self.kwargs['nsg'] = 'newNsg'
# self.cmd('vm open-port -g {rg} -n {vm} --apply-to-subnet --nsg-name {nsg} --port "*" --priority 900')
# self.cmd('network nsg show -g {rg} -n {nsg}',
# checks=self.check("length(securityRules[?name == 'open-port-all'])", 1))
# class VMShowListSizesListIPAddressesScenarioTest(ScenarioTest):
# @ResourceGroupPreparer(name_prefix='cli_test_vm_list_ip')
# def test_vm_show_list_sizes_list_ip_addresses(self, resource_group):
# self.kwargs.update({
# 'loc': 'centralus',
# 'vm': 'vm-with-public-ip',
# 'allocation': 'dynamic',
# 'zone': 2
# })
# # Expecting no results at the beginning
# self.cmd('vm list-ip-addresses --resource-group {rg}', checks=self.is_empty())
# self.cmd('vm create --resource-group {rg} --location {loc} -n {vm} --admin-username ubuntu --image Canonical:UbuntuServer:14.04.4-LTS:latest'
# ' --admin-password testPassword0 --public-ip-address-allocation {allocation} --authentication-type password --zone {zone}')
# result = self.cmd('vm show --resource-group {rg} --name {vm} -d', checks=[
# self.check('type(@)', 'object'),
# self.check('name', '{vm}'),
# self.check('location', '{loc}'),
# self.check('resourceGroup', '{rg}')
# ]).get_output_in_json()
# self.assertEqual(4, len(result['publicIps'].split('.')))
# result = self.cmd('vm list --resource-group {rg} -d', checks=[
# self.check('[0].name', '{vm}'),
# self.check('[0].location', '{loc}'),
# self.check('[0].resourceGroup', '{rg}'),
# self.check('[0].powerState', 'VM running')
# ]).get_output_in_json()
# self.assertEqual(4, len(result[0]['publicIps'].split('.')))
# self.cmd('vm list-vm-resize-options --resource-group {rg} --name {vm}',
# checks=self.check('type(@)', 'array'))
# # Expecting the one we just added
# self.kwargs['rg_caps'] = resource_group.upper() # test the command handles name with casing diff.
# self.cmd('vm list-ip-addresses --resource-group {rg_caps}', checks=[
# self.check('length(@)', 1),
# self.check('[0].virtualMachine.name', '{vm}'),
# self.check('[0].virtualMachine.resourceGroup', '{rg}'),
# self.check('length([0].virtualMachine.network.publicIpAddresses)', 1),
# self.check('[0].virtualMachine.network.publicIpAddresses[0].ipAllocationMethod', self.kwargs['allocation'].title()),
# self.check('type([0].virtualMachine.network.publicIpAddresses[0].ipAddress)', 'string'),
# self.check('[0].virtualMachine.network.publicIpAddresses[0].zone', '{zone}'),
# self.check('type([0].virtualMachine.network.publicIpAddresses[0].name)', 'string'),
# self.check('[0].virtualMachine.network.publicIpAddresses[0].resourceGroup', '{rg}')
# ])
class VMSizeListScenarioTest(ScenarioTest):
def test_vm_size_list(self):
self.cmd('vm list-sizes --location westus',
checks=self.check('type(@)', 'array'))
class VMImageListOffersScenarioTest(ScenarioTest):
def test_vm_image_list_offers(self):
self.kwargs.update({
'loc': 'westus',
'pub': 'Canonical'
})
result = self.cmd('vm image list-offers --location {loc} --publisher {pub}').get_output_in_json()
self.assertTrue(len(result) > 0)
self.assertFalse([i for i in result if i['location'].lower() != self.kwargs['loc']])
class VMImageListPublishersScenarioTest(ScenarioTest):
@AllowLargeResponse()
def test_vm_image_list_publishers(self):
self.kwargs.update({
'loc': 'westus'
})
self.cmd('vm image list-publishers --location {loc}', checks=[
self.check('type(@)', 'array'),
self.check("length([?location == '{loc}']) == length(@)", True),
])
class VMImageListSkusScenarioTest(ScenarioTest):
def test_vm_image_list_skus(self):
self.kwargs.update({
'loc': 'westus',
'pub': 'Canonical',
'offer': 'UbuntuServer'
})
result = self.cmd("vm image list-skus --location {loc} -p {pub} --offer {offer} --query \"length([].id.contains(@, '/Publishers/{pub}/ArtifactTypes/VMImage/Offers/{offer}/Skus/'))\"").get_output_in_json()
self.assertTrue(result > 0)
class VMImageShowScenarioTest(ScenarioTest):
def test_vm_image_show(self):
self.kwargs.update({
'loc': 'westus',
'pub': 'Canonical',
'offer': 'UbuntuServer',
'sku': '14.04.2-LTS',
'ver': '14.04.201503090'
})
self.cmd('vm image show --location {loc} --publisher {pub} --offer {offer} --sku {sku} --version {ver}', checks=[
self.check('type(@)', 'object'),
self.check('location', '{loc}'),
self.check('name', '{ver}'),
self.check("contains(id, '/Publishers/{pub}/ArtifactTypes/VMImage/Offers/{offer}/Skus/{sku}/Versions/{ver}')", True)
])
class VMGeneralizeScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_generalize_vm')
def test_vm_generalize(self, resource_group):
self.kwargs.update({
'vm': 'vm-generalize'
})
self.cmd('vm create -g {rg} -n {vm} --admin-username ubuntu --image UbuntuLTS --admin-password testPassword0 --authentication-type password --use-unmanaged-disk')
self.cmd('vm stop -g {rg} -n {vm}')
# Should be able to generalize the VM after it has been stopped
self.cmd('vm generalize -g {rg} -n {vm}', checks=self.is_empty())
vm = self.cmd('vm show -g {rg} -n {vm}').get_output_in_json()
self.cmd('vm capture -g {rg} -n {vm} --vhd-name-prefix vmtest')
# capture to a custom image
self.kwargs['image'] = 'myImage'
self.cmd('image create -g {rg} -n {image} --source {vm}', checks=[
self.check('name', '{image}'),
self.check('sourceVirtualMachine.id', vm['id']),
self.check('storageProfile.zoneResilient', None)
])
@ResourceGroupPreparer(name_prefix='cli_test_generalize_vm')
def test_vm_capture_zone_resilient_image(self, resource_group):
self.kwargs.update({
'loc': 'francecentral',
'vm': 'vm-generalize'
})
self.cmd('vm create -g {rg} --location {loc} -n {vm} --admin-username ubuntu --image centos --admin-password testPassword0 --authentication-type password')
self.cmd('vm deallocate -g {rg} -n {vm}')
# Should be able to generalize the VM after it has been stopped
self.cmd('vm generalize -g {rg} -n {vm}', checks=self.is_empty())
vm = self.cmd('vm show -g {rg} -n {vm}').get_output_in_json()
# capture to a custom image
self.kwargs['image'] = 'myImage2'
self.cmd('image create -g {rg} -n {image} --source {vm} --zone-resilient -l {loc}', checks=[
self.check('name', '{image}'),
self.check('sourceVirtualMachine.id', vm['id']),
self.check('storageProfile.zoneResilient', True)
])
class VMVMSSWindowsLicenseTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_windows_license_type')
def test_vm_vmss_windows_license_type(self, resource_group):
self.kwargs.update({
'vm': 'winvm',
'vmss': 'winvmss'
})
self.cmd('vm create -g {rg} -n {vm} --image Win2012R2Datacenter --admin-username clitest1234 --admin-password Test123456789# --license-type Windows_Server')
self.cmd('vm show -g {rg} -n {vm}', checks=[
self.check('licenseType', 'Windows_Server')
])
self.cmd('vm update -g {rg} -n {vm} --license-type None', checks=[
self.check('licenseType', 'None')
])
self.cmd('vmss create -g {rg} -n {vmss} --image Win2012R2Datacenter --admin-username clitest1234 --admin-password Test123456789# --license-type Windows_Server --instance-count 1')
self.cmd('vmss show -g {rg} -n {vmss}', checks=[
self.check('virtualMachineProfile.licenseType', 'Windows_Server')
])
self.cmd('vmss update -g {rg} -n {vmss} --license-type None', checks=[
self.check('virtualMachineProfile.licenseType', 'None')
])
class VMCustomImageTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vm_custom_image')
def test_vm_custom_image(self, resource_group):
self.kwargs.update({
'vm1': 'vm-unmanaged-disk',
'vm2': 'vm-managed-disk',
'newvm1': 'fromimage1',
'newvm2': 'fromimage2',
'image1': 'img-from-unmanaged',
'image2': 'img-from-managed',
})
self.cmd('vm create -g {rg} -n {vm1} --image ubuntults --use-unmanaged-disk --admin-username sdk-test-admin --admin-password testPassword0')
# deprovision the VM, but we have to do it async to avoid hanging the run-command itself
self.cmd('vm run-command invoke -g {rg} -n {vm1} --command-id RunShellScript --scripts "echo \'sudo waagent -deprovision+user --force\' | at -M now + 1 minutes"')
time.sleep(70)
self.cmd('vm deallocate -g {rg} -n {vm1}')
self.cmd('vm generalize -g {rg} -n {vm1}')
self.cmd('image create -g {rg} -n {image1} --source {vm1}')
self.cmd('vm create -g {rg} -n {vm2} --image ubuntults --storage-sku standard_lrs --data-disk-sizes-gb 1 1 --admin-username sdk-test-admin --admin-password testPassword0')
self.cmd('vm run-command invoke -g {rg} -n {vm2} --command-id RunShellScript --scripts "echo \'sudo waagent -deprovision+user --force\' | at -M now + 1 minutes"')
time.sleep(70)
self.cmd('vm deallocate -g {rg} -n {vm2}')
self.cmd('vm generalize -g {rg} -n {vm2}')
self.cmd('image create -g {rg} -n {image2} --source {vm2}')
self.cmd('vm create -g {rg} -n {newvm1} --image {image1} --admin-username sdk-test-admin --admin-password testPassword0 --authentication-type password')
self.cmd('vm show -g {rg} -n {newvm1}', checks=[
self.check('storageProfile.imageReference.resourceGroup', '{rg}'),
self.check('storageProfile.osDisk.createOption', 'FromImage')
])
self.cmd('vmss create -g {rg} -n vmss1 --image {image1} --admin-username sdk-test-admin --admin-password testPassword0 --authentication-type password', checks=[
self.check('vmss.virtualMachineProfile.storageProfile.imageReference.resourceGroup', '{rg}'),
self.check('vmss.virtualMachineProfile.storageProfile.osDisk.createOption', 'FromImage')
])
self.cmd('vm create -g {rg} -n {newvm2} --image {image2} --admin-username sdk-test-admin --admin-password testPassword0 --authentication-type password')
self.cmd('vm show -g {rg} -n {newvm2}', checks=[
self.check('storageProfile.imageReference.resourceGroup', '{rg}'),
self.check('storageProfile.osDisk.createOption', 'FromImage'),
self.check("length(storageProfile.dataDisks)", 2),
self.check("storageProfile.dataDisks[0].createOption", 'FromImage'),
self.check('storageProfile.dataDisks[0].managedDisk.storageAccountType', 'Standard_LRS')
])
self.cmd('vmss create -g {rg} -n vmss2 --image {image2} --admin-username sdk-test-admin --admin-password testPassword0 --authentication-type password', checks=[
self.check('vmss.virtualMachineProfile.storageProfile.imageReference.resourceGroup', '{rg}'),
self.check('vmss.virtualMachineProfile.storageProfile.osDisk.createOption', 'FromImage'),
self.check("length(vmss.virtualMachineProfile.storageProfile.dataDisks)", 2),
self.check("vmss.virtualMachineProfile.storageProfile.dataDisks[0].createOption", 'FromImage'),
self.check("vmss.virtualMachineProfile.storageProfile.dataDisks[0].managedDisk.storageAccountType", 'Standard_LRS'),
self.check("vmss.virtualMachineProfile.storageProfile.dataDisks[1].createOption", 'FromImage'),
self.check("vmss.virtualMachineProfile.storageProfile.dataDisks[1].managedDisk.storageAccountType", 'Standard_LRS')
])
@ResourceGroupPreparer(name_prefix='cli_test_vm_custom_image_conflict')
def test_vm_custom_image_name_conflict(self, resource_group):
self.kwargs.update({
'vm': 'test-vm',
'image1': 'img-from-vm',
'image2': 'img-from-vm-id',
'image3': 'img-from-disk-id',
})
self.cmd('vm create -g {rg} -n {vm} --image debian --use-unmanaged-disk --admin-username ubuntu --admin-password testPassword0 --authentication-type password')
vm1_info = self.cmd('vm show -g {rg} -n {vm}').get_output_in_json()
self.cmd('vm stop -g {rg} -n {vm}')
# set variables up to test against name conflict between disk and vm.
self.kwargs.update({
'os_disk_vhd_uri': vm1_info['storageProfile']['osDisk']['vhd']['uri'],
'vm_id': vm1_info['id'],
'os_disk': vm1_info['name']
})
# create disk with same name as vm
disk_info = self.cmd('disk create -g {rg} -n {os_disk} --source {os_disk_vhd_uri} --os-type linux').get_output_in_json()
self.kwargs.update({'os_disk_id': disk_info['id']})
# Deallocate and generalize vm. Do not need to deprovision vm as this test will not recreate a vm from the image.
self.cmd('vm deallocate -g {rg} -n {vm}')
self.cmd('vm generalize -g {rg} -n {vm}')
# Create image from vm
self.cmd('image create -g {rg} -n {image1} --source {vm}', checks=[
self.check("sourceVirtualMachine.id", '{vm_id}'),
self.check("storageProfile.osDisk.managedDisk", None)
])
# Create image from vm id
self.cmd('image create -g {rg} -n {image2} --source {vm_id}', checks=[
self.check("sourceVirtualMachine.id", '{vm_id}'),
self.check("storageProfile.osDisk.managedDisk", None)
])
# Create image from disk id
self.cmd('image create -g {rg} -n {image3} --source {os_disk_id} --os-type linux', checks=[
self.check("sourceVirtualMachine", None),
self.check("storageProfile.osDisk.managedDisk.id", '{os_disk_id}')
])
class VMImageWithPlanTest(ScenarioTest):
@unittest.skip('You cannot purchase reservation because required AAD tenant information is missing')
@ResourceGroupPreparer()
def test_vm_create_with_market_place_image(self, resource_group, resource_group_location):
# test 2 scenarios, 1. create vm from market place image, 2. create from a custom image captured from such vms
self.kwargs.update({
'location': resource_group_location,
'publisher': 'microsoft-ads',
'offer': 'linux-data-science-vm-ubuntu',
'sku': 'linuxdsvmubuntu',
'vm1': 'vm1',
'vm2': 'vm2',
'image': 'image1'
})
self.kwargs['urn'] = '{publisher}:{offer}:{sku}:latest'.format(**self.kwargs)
# extract out the plan info to be used when create the vm from the captured image
plan = self.cmd('vm image show --urn {urn}').get_output_in_json()['plan']
self.kwargs['plan_name'] = plan['name']
self.kwargs['plan_product'] = plan['product']
self.kwargs['plan_publisher'] = plan['publisher']
# let us accept the term
self.cmd('vm image accept-terms --urn {urn}', checks=self.check('accepted', True))
# create a vm and capture an image from it
self.cmd('vm create -g {rg} -n {vm1} --image {urn} --admin-username sdk-test-admin --admin-password testPassword0')
# deprovision the VM, but we have to do it async to avoid hanging the run-command itself
self.cmd('vm run-command invoke -g {rg} -n {vm1} --command-id RunShellScript --scripts "echo \'sudo waagent -deprovision+user --force\' | at -M now + 1 minutes"')
time.sleep(70)
self.cmd('vm deallocate -g {rg} -n {vm1}')
self.cmd('vm generalize -g {rg} -n {vm1}')
self.cmd('image create -g {rg} -n {image} --source {vm1}')
self.cmd('vm create -g {rg} -n {vm2} --image {image} --admin-username sdk-test-admin --admin-password testPassword0 --authentication-type password --plan-publisher {plan_publisher} --plan-name {plan_name} --plan-product {plan_product}')
self.cmd('vm show -g {rg} -n {vm2}', checks=self.check('provisioningState', 'Succeeded'))
class VMCreateFromUnmanagedDiskTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vm_from_unmanaged_disk')
def test_vm_create_from_unmanaged_disk(self, resource_group):
# create a vm with unmanaged os disk
self.kwargs.update({
'loc': 'westus',
'vm': 'vm1'
})
self.cmd('vm create -g {rg} -n {vm} --image debian --use-unmanaged-disk --admin-username ubuntu --admin-password testPassword0 --authentication-type password')
vm1_info = self.cmd('vm show -g {rg} -n {vm}', checks=[
self.check('name', '{vm}'),
self.check('licenseType', None)
]).get_output_in_json()
self.cmd('vm stop -g {rg} -n {vm}')
# import the unmanaged os disk into a specialized managed disk
self.kwargs.update({
'os_disk_vhd_uri': vm1_info['storageProfile']['osDisk']['vhd']['uri'],
'vm': 'vm2',
'os_disk': 'os1'
})
self.cmd('disk create -g {rg} -n {os_disk} --source {os_disk_vhd_uri} --os-type linux',
checks=[self.check('name', '{os_disk}'), self.check('osType', 'Linux')])
# create a vm by attaching to it
self.cmd('vm create -g {rg} -n {vm} --attach-os-disk {os_disk} --os-type linux',
checks=self.check('powerState', 'VM running'))
class VMCreateWithSpecializedUnmanagedDiskTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vm_with_specialized_unmanaged_disk')
def test_vm_create_with_specialized_unmanaged_disk(self, resource_group):
self.kwargs.update({
'loc': 'westus'
})
# create a vm with unmanaged os disk
self.cmd('vm create -g {rg} -n vm1 --image debian --use-unmanaged-disk --admin-username ubuntu --admin-password testPassword0 --authentication-type password')
vm1_info = self.cmd('vm show -g {rg} -n vm1').get_output_in_json()
self.kwargs['disk_uri'] = vm1_info['storageProfile']['osDisk']['vhd']['uri']
self.cmd('vm delete -g {rg} -n vm1 -y')
# create a vm by attaching the OS disk from the deleted VM
self.cmd('vm create -g {rg} -n vm2 --attach-os-disk {disk_uri} --os-type linux --use-unmanaged-disk',
checks=self.check('powerState', 'VM running'))
@ResourceGroupPreparer(name_prefix='cli_test_vm_with_specialized_unmanaged_disk')
def test_vm_create_with_unmanaged_data_disks(self, resource_group):
self.kwargs.update({
'vm': 'vm1',
'vm2': 'vm2'
})
# create a unmanaged bm with 2 unmanaged disks
vm_create_cmd = 'vm create -g {rg} -n vm1 --image debian --use-unmanaged-disk --admin-username ubuntu --admin-password testPassword0 --authentication-type password'
self.cmd(vm_create_cmd)
self.cmd('vm unmanaged-disk attach -g {rg} --vm-name {vm} --new --size-gb 1')
self.cmd('vm unmanaged-disk attach -g {rg} --vm-name {vm} --new --size-gb 2')
vm1_info = self.cmd('vm show -g {rg} -n {vm}').get_output_in_json()
self.kwargs['disk_uri'] = vm1_info['storageProfile']['osDisk']['vhd']['uri']
self.kwargs['data_disk'] = vm1_info['storageProfile']['dataDisks'][0]['vhd']['uri']
self.kwargs['data_disk2'] = vm1_info['storageProfile']['dataDisks'][1]['vhd']['uri']
self.cmd('vm delete -g {rg} -n vm1 -y')
# create a vm by attaching the OS disk from the deleted VM
vm_create_cmd = ('vm create -g {rg} -n {vm2} --attach-os-disk {disk_uri} --os-type linux --use-unmanaged-disk '
'--attach-data-disks {data_disk} {data_disk2} --data-disk-caching 0=ReadWrite 1=ReadOnly')
self.cmd(vm_create_cmd)
self.cmd('vm show -g {rg} -n {vm2} -d', checks=[
self.check('storageProfile.dataDisks[0].caching', 'ReadWrite'),
self.check('storageProfile.dataDisks[0].lun', 0),
self.check('storageProfile.dataDisks[1].caching', 'ReadOnly'),
self.check('storageProfile.dataDisks[1].lun', 1)
])
class VMAttachDisksOnCreate(ScenarioTest):
@ResourceGroupPreparer()
def test_vm_create_by_attach_os_and_data_disks(self, resource_group):
# the testing below follow a real custom's workflow requiring the support of attaching data disks on create
# creating a vm
self.cmd('vm create -g {rg} -n vm1 --image centos --admin-username centosadmin --admin-password testPassword0 --authentication-type password --data-disk-sizes-gb 2')
result = self.cmd('vm show -g {rg} -n vm1').get_output_in_json()
self.kwargs.update({
'origin_os_disk': result['storageProfile']['osDisk']['name'],
'origin_data_disk': result['storageProfile']['dataDisks'][0]['name'],
# snapshot the os & data disks
'os_snapshot': 'oSnapshot',
'os_disk': 'sDisk',
'data_snapshot': 'dSnapshot',
'data_disk': 'dDisk'
})
self.cmd('snapshot create -g {rg} -n {os_snapshot} --source {origin_os_disk}')
self.cmd('disk create -g {rg} -n {os_disk} --source {os_snapshot}')
self.cmd('snapshot create -g {rg} -n {data_snapshot} --source {origin_data_disk}')
self.cmd('disk create -g {rg} -n {data_disk} --source {data_snapshot}')
# rebuild a new vm
# (os disk can be resized)
self.cmd('vm create -g {rg} -n vm2 --attach-os-disk {os_disk} --attach-data-disks {data_disk} --data-disk-sizes-gb 3 --os-disk-size-gb 100 --os-type linux',
checks=self.check('powerState', 'VM running'))
self.cmd('vm show -g {rg} -n vm2', checks=[
self.check('length(storageProfile.dataDisks)', 2),
self.check('storageProfile.dataDisks[0].diskSizeGb', 3),
self.check('storageProfile.dataDisks[0].managedDisk.storageAccountType', 'Premium_LRS'),
self.check('storageProfile.osDisk.diskSizeGb', 100)
])
@ResourceGroupPreparer()
def test_vm_create_by_attach_unmanaged_os_and_data_disks(self, resource_group):
# creating a vm
self.cmd('vm create -g {rg} -n vm1 --use-unmanaged-disk --image centos --admin-username centosadmin --admin-password testPassword0 --authentication-type password')
self.cmd('vm unmanaged-disk attach -g {rg} --vm-name vm1 --new --size-gb 2')
result = self.cmd('vm show -g {rg} -n vm1').get_output_in_json()
self.kwargs['os_disk_vhd'] = result['storageProfile']['osDisk']['vhd']['uri']
self.kwargs['data_disk_vhd'] = result['storageProfile']['dataDisks'][0]['vhd']['uri']
# delete the vm to end vhd's leases so they can be used to create a new vm through attaching
self.cmd('vm deallocate -g {rg} -n vm1')
self.cmd('vm delete -g {rg} -n vm1 -y')
# rebuild a new vm
self.cmd('vm create -g {rg} -n vm2 --attach-os-disk {os_disk_vhd} --attach-data-disks {data_disk_vhd} --os-type linux --use-unmanaged-disk',
checks=self.check('powerState', 'VM running'))
class VMManagedDiskScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_managed_disk')
def test_vm_managed_disk(self, resource_group):
self.kwargs.update({
'loc': 'westus',
'disk1': 'd1',
'disk2': 'd2',
'snapshot1': 's1',
'snapshot2': 's2',
'image': 'i1',
'image_2': 'i2',
'image_3': 'i3'
})
# create a disk and update
data_disk = self.cmd('disk create -g {rg} -n {disk1} --size-gb 1 --tags tag1=d1', checks=[
self.check('sku.name', 'Premium_LRS'),
self.check('diskSizeGb', 1),
self.check('tags.tag1', 'd1')
]).get_output_in_json()
self.cmd('disk update -g {rg} -n {disk1} --size-gb 10 --sku Standard_LRS', checks=[
self.check('sku.name', 'Standard_LRS'),
self.check('diskSizeGb', 10)
])
# get SAS token
result = self.cmd('disk grant-access -g {rg} -n {disk1} --duration-in-seconds 10').get_output_in_json()
self.assertTrue('sv=' in result['accessSas'])
# create another disk by importing from the disk1
self.kwargs['disk1_id'] = data_disk['id']
data_disk2 = self.cmd('disk create -g {rg} -n {disk2} --source {disk1_id}').get_output_in_json()
# create a snpashot
os_snapshot = self.cmd('snapshot create -g {rg} -n {snapshot1} --size-gb 1 --sku Premium_LRS --tags tag1=s1', checks=[
self.check('sku.name', 'Premium_LRS'),
self.check('diskSizeGb', 1),
self.check('tags.tag1', 's1')
]).get_output_in_json()
# update the sku
self.cmd('snapshot update -g {rg} -n {snapshot1} --sku Standard_LRS', checks=[
self.check('sku.name', 'Standard_LRS'),
self.check('diskSizeGb', 1)
])
# create another snapshot by importing from the disk1
data_snapshot = self.cmd('snapshot create -g {rg} -n {snapshot2} --source {disk1} --sku Premium_LRS').get_output_in_json()
self.kwargs.update({
'snapshot1_id': os_snapshot['id'],
'snapshot2_id': data_snapshot['id'],
'disk2_id': data_disk2['id']
})
# till now, image creation doesn't inspect the disk for os, so the command below should succeed with junk disk
self.cmd('image create -g {rg} -n {image} --source {snapshot1} --data-disk-sources {disk1} {snapshot2_id} {disk2_id} --os-type Linux --tags tag1=i1', checks=[
self.check('storageProfile.osDisk.osType', 'Linux'),
self.check('storageProfile.osDisk.snapshot.id', '{snapshot1_id}'),
self.check('length(storageProfile.dataDisks)', 3),
self.check('storageProfile.dataDisks[0].lun', 0),
self.check('storageProfile.dataDisks[1].lun', 1),
self.check('tags.tag1', 'i1')
])
# test that images can be created with different storage skus and os disk caching settings.
self.cmd('image create -g {rg} -n {image_2} --source {snapshot1} --data-disk-sources {disk1} {snapshot2_id} {disk2_id}'
' --os-type Linux --tags tag1=i1 --storage-sku Premium_LRS --os-disk-caching None',
checks=[
self.check('storageProfile.osDisk.storageAccountType', 'Premium_LRS'),
self.check('storageProfile.osDisk.osType', 'Linux'),
self.check('storageProfile.osDisk.snapshot.id', '{snapshot1_id}'),
self.check('length(storageProfile.dataDisks)', 3),
self.check('storageProfile.dataDisks[0].lun', 0),
self.check('storageProfile.dataDisks[1].lun', 1),
self.check('storageProfile.osDisk.caching', 'None'),
self.check('tags.tag1', 'i1')
])
self.cmd('image create -g {rg} -n {image_3} --source {snapshot1} --data-disk-sources {disk1} {snapshot2_id} {disk2_id}'
' --os-type Linux --tags tag1=i1 --storage-sku Standard_LRS --os-disk-caching ReadWrite',
checks=[
self.check('storageProfile.osDisk.storageAccountType', 'Standard_LRS'),
self.check('storageProfile.osDisk.caching', 'ReadWrite')
])
class ComputeListSkusScenarioTest(ScenarioTest):
@unittest.skip("Need to check this")
@AllowLargeResponse(size_kb=99999)
def test_list_compute_skus_table_output(self):
result = self.cmd('vm list-skus -l eastus2 -otable')
lines = result.output.split('\n')
# 1st line is header
self.assertEqual(lines[0].split(), ['ResourceType', 'Locations', 'Name', 'Zones', 'Restrictions'])
# spot check the first 4 entries
fd_found, ud_found, size_found, zone_found = False, False, False, False
for line in lines[2:]:
parts = line.split()
if not fd_found and (parts[:4] == ['availabilitySets', 'eastus2', 'Classic', 'None']):
fd_found = True
elif not ud_found and (parts[:4] == ['availabilitySets', 'eastus2', 'Aligned', 'None']):
ud_found = True
elif not size_found and parts[:3] == ['disks', 'eastus2', 'Standard_LRS']:
size_found = True
elif not zone_found and parts[3] == '1,2,3':
zone_found = True
self.assertTrue(fd_found)
self.assertTrue(ud_found)
self.assertTrue(size_found)
self.assertTrue(zone_found)
@unittest.skip("Need to check this")
@AllowLargeResponse(size_kb=16144)
def test_list_compute_skus_filter(self):
result = self.cmd('vm list-skus -l eastus2 --size Standard_DS1_V2 --zone').get_output_in_json()
self.assertTrue(result and len(result) == len([x for x in result if x['name'] == 'Standard_DS1_v2' and x['locationInfo'][0]['zones']]))
result = self.cmd('vm list-skus -l westus --resource-type disks').get_output_in_json()
self.assertTrue(result and len(result) == len([x for x in result if x['resourceType'] == 'disks']))
class VMExtensionScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vm_extension')
def test_vm_extension(self, resource_group):
user_name = 'foouser1'
config_file = _write_config_file(user_name)
self.kwargs.update({
'vm': 'myvm',
'pub': 'Microsoft.OSTCExtensions',
'ext': 'VMAccessForLinux',
'config': config_file,
'user': user_name
})
self.cmd('vm create -n {vm} -g {rg} --image UbuntuLTS --authentication-type password --admin-username user11 --admin-password testPassword0')
self.cmd('vm extension list --vm-name {vm} --resource-group {rg}',
checks=self.check('length([])', 0))
self.cmd('vm extension set -n {ext} --publisher {pub} --version 1.2 --vm-name {vm} --resource-group {rg} --protected-settings "{config}" --force-update')
result = self.cmd('vm get-instance-view -n {vm} -g {rg}', checks=[
self.check('*.extensions[0].name', ['VMAccessForLinux']),
]).get_output_in_json()
# ensure the minor version is 2+
minor_version = int(result['instanceView']['extensions'][0]['typeHandlerVersion'].split('.')[1])
self.assertGreater(minor_version, 2)
result = self.cmd('vm extension show --resource-group {rg} --vm-name {vm} --name {ext}', checks=[
self.check('type(@)', 'object'),
self.check('name', '{ext}'),
self.check('resourceGroup', '{rg}'),
]).get_output_in_json()
uuid.UUID(result['forceUpdateTag'])
self.cmd('vm extension delete --resource-group {rg} --vm-name {vm} --name {ext}')
@ResourceGroupPreparer(name_prefix='cli_test_vm_extension_2')
def test_vm_extension_instance_name(self, resource_group):
user_name = 'foouser1'
config_file = _write_config_file(user_name)
self.kwargs.update({
'vm': 'myvm',
'pub': 'Microsoft.OSTCExtensions',
'ext_type': 'VMAccessForLinux',
'config': config_file,
'user': user_name,
'ext_name': 'MyAccessExt'
})
self.cmd('vm create -n {vm} -g {rg} --image UbuntuLTS --authentication-type password --admin-username user11 --admin-password testPassword0')
self.cmd('vm extension set -n {ext_type} --publisher {pub} --version 1.2 --vm-name {vm} --resource-group {rg} '
'--protected-settings "{config}" --extension-instance-name {ext_name}')
self.cmd('vm extension show --resource-group {rg} --vm-name {vm} --name {ext_name}', checks=[
self.check('name', '{ext_name}'),
self.check('typePropertiesType', '{ext_type}')
])
self.cmd('vm extension delete --resource-group {rg} --vm-name {vm} --name {ext_name}')
class VMMachineExtensionImageScenarioTest(ScenarioTest):
def test_vm_machine_extension_image(self):
self.kwargs.update({
'loc': 'westus',
'pub': 'Microsoft.Azure.Diagnostics',
'ext': 'IaaSDiagnostics',
'ver': '1.6.4.0'
})
self.cmd('vm extension image list-names --location {loc} --publisher {pub}', checks=[
self.check('type(@)', 'array'),
self.check("length([?location == '{loc}']) == length(@)", True),
])
self.cmd('vm extension image list-versions --location {loc} -p {pub} --name {ext}', checks=[
self.check('type(@)', 'array'),
self.check("length([?location == '{loc}']) == length(@)", True),
])
self.cmd('vm extension image show --location {loc} -p {pub} --name {ext} --version {ver}', checks=[
self.check('type(@)', 'object'),
self.check('location', '{loc}'),
self.check("contains(id, '/Providers/Microsoft.Compute/Locations/{loc}/Publishers/{pub}/ArtifactTypes/VMExtension/Types/{ext}/Versions/{ver}')", True)
])
class VMExtensionImageSearchScenarioTest(LiveScenarioTest):
def test_vm_extension_image_search(self):
# pick this specific name, so the search will be under one publisher. This avoids
# the parallel searching behavior that causes incomplete VCR recordings.
self.kwargs.update({
'pub': 'Test.Microsoft.VisualStudio.Services',
'image': 'TeamServicesAgentLinux1'
})
self.cmd('vm extension image list -l westus --publisher {pub} --name {image}', checks=[
self.check('type(@)', 'array'),
self.check("length([?name == '{image}']) == length(@)", True)
])
class VMCreateUbuntuScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vm_create_ubuntu')
def test_vm_create_ubuntu(self, resource_group, resource_group_location):
self.kwargs.update({
'username': 'ubuntu',
'vm': 'cli-test-vm2',
'image': 'UbuntuLTS',
'auth': 'ssh',
'ssh_key': TEST_SSH_KEY_PUB,
'loc': resource_group_location
})
self.cmd('vm create --resource-group {rg} --admin-username {username} --name {vm} --authentication-type {auth} --image {image} --ssh-key-value \'{ssh_key}\' --location {loc} --data-disk-sizes-gb 1 --data-disk-caching ReadOnly')
self.cmd('vm show -g {rg} -n {vm}', checks=[
self.check('provisioningState', 'Succeeded'),
self.check('osProfile.adminUsername', '{username}'),
self.check('osProfile.computerName', '{vm}'),
self.check('osProfile.linuxConfiguration.disablePasswordAuthentication', True),
self.check('osProfile.linuxConfiguration.ssh.publicKeys[0].keyData', '{ssh_key}'),
self.check('storageProfile.dataDisks[0].managedDisk.storageAccountType', 'Premium_LRS'),
self.check('storageProfile.osDisk.managedDisk.storageAccountType', 'Premium_LRS'),
self.check('storageProfile.dataDisks[0].lun', 0),
self.check('storageProfile.dataDisks[0].caching', 'ReadOnly'),
])
# test for idempotency--no need to reverify, just ensure the command doesn't fail
self.cmd('vm create --resource-group {rg} --admin-username {username} --name {vm} --authentication-type {auth} --image {image} --ssh-key-value \'{ssh_key}\' --location {loc} --data-disk-sizes-gb 1 --data-disk-caching ReadOnly ')
class VMCreateExistingOptions(ScenarioTest):
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix='cli_test_vm_create_existing')
@StorageAccountPreparer()
def test_vm_create_existing_options(self, resource_group, storage_account):
self.kwargs.update({
'availset': 'vrfavailset',
'pubip': 'vrfpubip',
'vnet': 'vrfvnet',
'subnet': 'vrfsubnet',
'nsg': 'vrfnsg',
'vm': 'vrfvm',
'disk': 'vrfosdisk',
'container': 'vrfcontainer',
'ssh_key': TEST_SSH_KEY_PUB
})
self.cmd('vm availability-set create --name {availset} -g {rg} --unmanaged --platform-fault-domain-count 3 --platform-update-domain-count 3')
self.cmd('network public-ip create --name {pubip} -g {rg}')
self.cmd('network vnet create --name {vnet} -g {rg} --subnet-name {subnet}')
self.cmd('network nsg create --name {nsg} -g {rg}')
self.cmd('vm create --image UbuntuLTS --os-disk-name {disk} --vnet-name {vnet} --subnet {subnet} --availability-set {availset} --public-ip-address {pubip} -l "West US" --nsg {nsg} --use-unmanaged-disk --size Standard_DS2 --admin-username user11 --storage-account {sa} --storage-container-name {container} -g {rg} --name {vm} --ssh-key-value \'{ssh_key}\'')
self.cmd('vm availability-set show -n {availset} -g {rg}',
checks=self.check('virtualMachines[0].id.ends_with(@, \'{}\')'.format(self.kwargs['vm'].upper()), True))
self.cmd('network nsg show -n {nsg} -g {rg}',
checks=self.check('networkInterfaces[0].id.ends_with(@, \'{vm}VMNic\')', True))
self.cmd('network nic show -n {vm}VMNic -g {rg}',
checks=self.check('ipConfigurations[0].publicIpAddress.id.ends_with(@, \'{pubip}\')', True))
self.cmd('vm show -n {vm} -g {rg}',
checks=self.check('storageProfile.osDisk.vhd.uri', 'https://{sa}.blob.core.windows.net/{container}/{disk}.vhd'))
class VMCreateExistingIdsOptions(ScenarioTest):
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix='cli_test_vm_create_existing_ids')
@StorageAccountPreparer()
def test_vm_create_existing_ids_options(self, resource_group, storage_account):
from azure.cli.core.commands.client_factory import get_subscription_id
from msrestazure.tools import resource_id, is_valid_resource_id
subscription_id = self.get_subscription_id()
self.kwargs.update({
'availset': 'vrfavailset',
'pubip': 'vrfpubip',
'vnet': 'vrfvnet',
'subnet': 'vrfsubnet',
'nsg': 'vrfnsg',
'vm': 'vrfvm',
'disk': 'vrfosdisk',
'container': 'vrfcontainer',
'ssh_key': TEST_SSH_KEY_PUB
})
self.cmd('vm availability-set create --name {availset} -g {rg} --unmanaged --platform-fault-domain-count 3 --platform-update-domain-count 3')
self.cmd('network public-ip create --name {pubip} -g {rg}')
self.cmd('network vnet create --name {vnet} -g {rg} --subnet-name {subnet}')
self.cmd('network nsg create --name {nsg} -g {rg}')
rg = self.kwargs['rg']
self.kwargs.update({
'availset_id': resource_id(subscription=subscription_id, resource_group=rg, namespace='Microsoft.Compute', type='availabilitySets', name=self.kwargs['availset']),
'pubip_id': resource_id(subscription=subscription_id, resource_group=rg, namespace='Microsoft.Network', type='publicIpAddresses', name=self.kwargs['pubip']),
'subnet_id': resource_id(subscription=subscription_id, resource_group=rg, namespace='Microsoft.Network', type='virtualNetworks', child_type_1='subnets', name=self.kwargs['vnet'], child_name_1=self.kwargs['subnet']),
'nsg_id': resource_id(subscription=subscription_id, resource_group=rg, namespace='Microsoft.Network', type='networkSecurityGroups', name=self.kwargs['nsg'])
})
assert is_valid_resource_id(self.kwargs['availset_id'])
assert is_valid_resource_id(self.kwargs['pubip_id'])
assert is_valid_resource_id(self.kwargs['subnet_id'])
assert is_valid_resource_id(self.kwargs['nsg_id'])
self.cmd('vm create --image UbuntuLTS --os-disk-name {disk} --subnet {subnet_id} --availability-set {availset_id} --public-ip-address {pubip_id} -l "West US" --nsg {nsg_id} --use-unmanaged-disk --size Standard_DS2 --admin-username user11 --storage-account {sa} --storage-container-name {container} -g {rg} --name {vm} --ssh-key-value \'{ssh_key}\'')
self.cmd('vm availability-set show -n {availset} -g {rg}',
checks=self.check('virtualMachines[0].id.ends_with(@, \'{}\')'.format(self.kwargs['vm'].upper()), True))
self.cmd('network nsg show -n {nsg} -g {rg}',
checks=self.check('networkInterfaces[0].id.ends_with(@, \'{vm}VMNic\')', True))
self.cmd('network nic show -n {vm}VMNic -g {rg}',
checks=self.check('ipConfigurations[0].publicIpAddress.id.ends_with(@, \'{pubip}\')', True))
self.cmd('vm show -n {vm} -g {rg}',
checks=self.check('storageProfile.osDisk.vhd.uri', 'https://{sa}.blob.core.windows.net/{container}/{disk}.vhd'))
# region VMSS Tests
class VMSSCreateAndModify(ScenarioTest):
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix='cli_test_vmss_create_and_modify')
def test_vmss_create_and_modify(self):
self.kwargs.update({
'vmss': 'vmss1',
'count': 5,
'new_count': 4
})
self.cmd('vmss create --admin-password testPassword0 --name {vmss} -g {rg} --admin-username myadmin --image Win2012R2Datacenter --instance-count {count}')
self.cmd('vmss show --name {vmss} -g {rg}', checks=[
self.check('virtualMachineProfile.priority', None),
self.check('sku.name', 'Standard_DS1_v2'),
])
self.cmd('vmss list',
checks=self.check('type(@)', 'array'))
self.cmd('vmss list --resource-group {rg}', checks=[
self.check('type(@)', 'array'),
self.check('length(@)', 1),
self.check('[0].name', '{vmss}'),
self.check('[0].resourceGroup', '{rg}')
])
self.cmd('vmss list-skus --resource-group {rg} --name {vmss}',
checks=self.check('type(@)', 'array'))
self.cmd('vmss show --resource-group {rg} --name {vmss}', checks=[
self.check('type(@)', 'object'),
self.check('name', '{vmss}'),
self.check('resourceGroup', '{rg}')
])
result = self.cmd('vmss list-instances --resource-group {rg} --name {vmss} --query "[].instanceId"').get_output_in_json()
self.kwargs['instance_ids'] = result[3] + ' ' + result[4]
self.cmd('vmss update-instances --resource-group {rg} --name {vmss} --instance-ids {instance_ids}')
self.cmd('vmss get-instance-view --resource-group {rg} --name {vmss}', checks=[
self.check('type(@)', 'object'),
self.check('type(virtualMachine)', 'object'),
self.check('type(statuses)', 'array')
])
self.cmd('vmss stop --resource-group {rg} --name {vmss}')
self.cmd('vmss start --resource-group {rg} --name {vmss}')
self.cmd('vmss restart --resource-group {rg} --name {vmss}')
self.cmd('vmss scale --resource-group {rg} --name {vmss} --new-capacity {new_count}')
self.cmd('vmss show --resource-group {rg} --name {vmss}', checks=[
self.check('sku.capacity', '{new_count}'),
self.check('virtualMachineProfile.osProfile.windowsConfiguration.enableAutomaticUpdates', True)
])
result = self.cmd('vmss list-instances --resource-group {rg} --name {vmss} --query "[].instanceId"').get_output_in_json()
self.kwargs['instance_ids'] = result[2] + ' ' + result[3]
self.cmd('vmss delete-instances --resource-group {rg} --name {vmss} --instance-ids {instance_ids}')
self.cmd('vmss get-instance-view --resource-group {rg} --name {vmss}', checks=[
self.check('type(@)', 'object'),
self.check('type(virtualMachine)', 'object'),
self.check('virtualMachine.statusesSummary[0].count', self.kwargs['new_count'] - 2)
])
self.cmd('vmss deallocate --resource-group {rg} --name {vmss}')
self.cmd('vmss delete --resource-group {rg} --name {vmss}')
self.cmd('vmss list --resource-group {rg}', checks=self.is_empty())
class VMSSCreateOptions(ScenarioTest):
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix='cli_test_vmss_create_options')
def test_vmss_create_options(self, resource_group):
self.kwargs.update({
'vmss': 'vrfvmss',
'count': 2,
'caching': 'ReadWrite',
'update': 'automatic',
'ip': 'vrfpubip'
})
self.cmd('network public-ip create --name {ip} -g {rg}')
self.cmd('vmss create --image Debian --admin-password testPassword0 -l westus -g {rg} -n {vmss} --disable-overprovision --instance-count {count} --os-disk-caching {caching} --upgrade-policy-mode {update} --authentication-type password --admin-username myadmin --public-ip-address {ip} --data-disk-sizes-gb 1 --vm-sku Standard_D2_v2')
self.cmd('network lb show -g {rg} -n {vmss}lb ',
checks=self.check('frontendIpConfigurations[0].publicIpAddress.id.ends_with(@, \'{ip}\')', True))
self.cmd('vmss show -g {rg} -n {vmss}', checks=[
self.check('sku.capacity', '{count}'),
self.check('virtualMachineProfile.storageProfile.osDisk.caching', '{caching}'),
self.check('upgradePolicy.mode', self.kwargs['update'].title()),
self.check('singlePlacementGroup', True),
])
self.kwargs['id'] = self.cmd('vmss list-instances -g {rg} -n {vmss} --query "[].instanceId"').get_output_in_json()[0]
self.cmd('vmss show -g {rg} -n {vmss} --instance-id {id}',
checks=self.check('instanceId', '{id}'))
self.cmd('vmss disk attach -g {rg} --vmss-name {vmss} --size-gb 3')
self.cmd('vmss show -g {rg} -n {vmss}', checks=[
self.check('length(virtualMachineProfile.storageProfile.dataDisks)', 2),
self.check('virtualMachineProfile.storageProfile.dataDisks[0].diskSizeGb', 1),
self.check('virtualMachineProfile.storageProfile.dataDisks[0].managedDisk.storageAccountType', 'Standard_LRS'),
self.check('virtualMachineProfile.storageProfile.dataDisks[1].diskSizeGb', 3),
self.check('virtualMachineProfile.storageProfile.dataDisks[1].managedDisk.storageAccountType', 'Standard_LRS'),
])
self.cmd('vmss disk detach -g {rg} --vmss-name {vmss} --lun 1')
self.cmd('vmss show -g {rg} -n {vmss}', checks=[
self.check('length(virtualMachineProfile.storageProfile.dataDisks)', 1),
self.check('virtualMachineProfile.storageProfile.dataDisks[0].lun', 0),
self.check('virtualMachineProfile.storageProfile.dataDisks[0].diskSizeGb', 1)
])
@ResourceGroupPreparer(name_prefix='cli_test_vmss_create_options')
def test_vmss_update_instance_disks(self, resource_group):
self.kwargs.update({
'vmss': 'vmss1',
'caching': 'ReadWrite',
'update': 'automatic',
'ip': 'vrfpubip',
'disk': 'd1',
'instance_id': '1',
'sku': 'Standard_LRS'
})
self.cmd('vmss create --image Debian --admin-username clitest1 --admin-password testPassword0 -l westus -g {rg} -n {vmss} --storage-sku {sku}')
self.cmd('disk create -g {rg} -n {disk} --size-gb 1 --sku {sku}')
instances = self.cmd('vmss list-instances -g {rg} -n {vmss}').get_output_in_json()
self.kwargs['instance_id'] = instances[0]['instanceId']
self.cmd('vmss disk attach -g {rg} --vmss-name {vmss} --instance-id {instance_id} --disk {disk} --caching {caching}')
self.cmd("vmss list-instances -g {rg} -n {vmss} --query \"[?instanceId=='{instance_id}']\"", checks=[
self.check('length([0].storageProfile.dataDisks)', 1),
self.check('[0].storageProfile.dataDisks[0].caching', self.kwargs['caching'])
])
self.cmd('vmss disk detach -g {rg} --vmss-name {vmss} --instance-id {instance_id} --lun 0')
self.cmd("vmss list-instances -g {rg} -n {vmss} --query \"[?instanceId=='{instance_id}']\"", checks=[
self.check('length([0].storageProfile.dataDisks)', 0)
])
@ResourceGroupPreparer(name_prefix='cli_test_vmss_create_options')
def test_vmss_create_auth(self, resource_group):
self.kwargs.update({
'vmss_1': 'vmss1',
'vmss_2': 'vmss2',
'ssh_key': TEST_SSH_KEY_PUB,
})
self.cmd('vmss create --image Debian -l westus -g {rg} -n {vmss_1} --authentication-type all '
' --admin-username myadmin --admin-password testPassword0 --ssh-key-value \'{ssh_key}\'',
checks=[
self.check('vmss.virtualMachineProfile.osProfile.linuxConfiguration.disablePasswordAuthentication', False),
self.check('vmss.virtualMachineProfile.osProfile.linuxConfiguration.ssh.publicKeys[0].keyData', TEST_SSH_KEY_PUB)
])
self.cmd('vmss create --image Debian -l westus -g {rg} -n {vmss_2} --authentication-type ssh '
' --admin-username myadmin --ssh-key-value \'{ssh_key}\'',
checks=[
self.check('vmss.virtualMachineProfile.osProfile.linuxConfiguration.disablePasswordAuthentication', True)
])
class VMSSCreateBalancerOptionsTest(ScenarioTest): # pylint: disable=too-many-instance-attributes
@ResourceGroupPreparer(name_prefix='cli_test_vmss_create_none')
def test_vmss_create_none_options(self, resource_group):
self.kwargs.update({
'vmss': 'vmss1',
'ssh_key': TEST_SSH_KEY_PUB,
'quotes': '""' if platform.system() == 'Windows' else "''"
})
self.cmd('vmss create -n {vmss} -g {rg} --image Debian --load-balancer {quotes} --admin-username ubuntu --ssh-key-value \'{ssh_key}\' --public-ip-address {quotes} --tags {quotes} --vm-sku Basic_A1')
self.cmd('vmss show -n {vmss} -g {rg}', checks=[
self.check('tags', {}),
self.check('virtualMachineProfile.networkProfile.networkInterfaceConfigurations.ipConfigurations.loadBalancerBackendAddressPools', None),
self.check('sku.name', 'Basic_A1'),
self.check('sku.tier', 'Basic')
])
self.cmd('vmss update -g {rg} -n {vmss} --set tags.test=success',
checks=self.check('tags.test', 'success'))
self.cmd('network public-ip show -n {vmss}PublicIP -g {rg}', expect_failure=True)
@ResourceGroupPreparer(name_prefix='cli_test_vmss_create_w_ag')
def test_vmss_create_with_app_gateway(self, resource_group):
self.kwargs.update({
'vmss': 'vmss1',
'ssh_key': TEST_SSH_KEY_PUB
})
self.cmd("vmss create -n {vmss} -g {rg} --image Debian --admin-username clittester --ssh-key-value '{ssh_key}' --app-gateway apt1 --instance-count 5",
checks=self.check('vmss.provisioningState', 'Succeeded'))
# spot check it is using gateway
self.cmd('vmss show -n {vmss} -g {rg}', checks=[
self.check('sku.capacity', 5),
self.check('virtualMachineProfile.networkProfile.networkInterfaceConfigurations[0].ipConfigurations[0].applicationGatewayBackendAddressPools[0].resourceGroup', '{rg}')
])
@ResourceGroupPreparer(name_prefix='cli_test_vmss_create_existing_lb')
def test_vmss_existing_lb(self, resource_group):
self.kwargs.update({
'vmss': 'vmss1',
'lb': 'lb1'
})
self.cmd('network lb create -g {rg} -n {lb} --backend-pool-name test')
self.cmd('vmss create -g {rg} -n {vmss} --load-balancer {lb} --image UbuntuLTS --admin-username clitester --admin-password TestTest12#$')
@ResourceGroupPreparer()
def test_vmss_single_placement_group_default_to_std_lb(self, resource_group):
self.kwargs.update({
'vmss': 'vmss123'
})
self.cmd('vmss create -g {rg} -n {vmss} --admin-username clitester --admin-password PasswordPassword1! --image debian --single-placement-group false')
self.cmd('vmss show -g {rg} -n {vmss}', checks=[
self.check('singlePlacementGroup', False)
])
self.cmd('network lb list -g {rg}', checks=[
self.check('[0].sku.name', 'Standard')
])
self.cmd('network public-ip list -g {rg}', checks=[
self.check('[0].sku.name', 'Standard')
])
class VMSSCreatePublicIpPerVm(ScenarioTest): # pylint: disable=too-many-instance-attributes
@ResourceGroupPreparer(name_prefix='cli_test_vmss_create_w_ips')
def test_vmss_public_ip_per_vm_custom_domain_name(self, resource_group):
self.kwargs.update({
'vmss': 'vmss1',
'nsg': 'testnsg',
'ssh_key': TEST_SSH_KEY_PUB,
'dns_label': self.create_random_name('clivmss', 20)
})
nsg_result = self.cmd('network nsg create -g {rg} -n {nsg}').get_output_in_json()
self.cmd("vmss create -n {vmss} -g {rg} --image Debian --admin-username clittester --ssh-key-value '{ssh_key}' --vm-domain-name {dns_label} --public-ip-per-vm --dns-servers 10.0.0.6 10.0.0.5 --nsg {nsg}",
checks=self.check('vmss.provisioningState', 'Succeeded'))
result = self.cmd("vmss show -n {vmss} -g {rg}", checks=[
self.check('length(virtualMachineProfile.networkProfile.networkInterfaceConfigurations[0].dnsSettings.dnsServers)', 2),
self.check('virtualMachineProfile.networkProfile.networkInterfaceConfigurations[0].dnsSettings.dnsServers[0]', '10.0.0.6'),
self.check('virtualMachineProfile.networkProfile.networkInterfaceConfigurations[0].dnsSettings.dnsServers[1]', '10.0.0.5'),
self.check('virtualMachineProfile.networkProfile.networkInterfaceConfigurations[0].networkSecurityGroup.id', nsg_result['NewNSG']['id'])
])
# spot check we have the domain name and have a public ip
result = self.cmd('vmss list-instance-public-ips -n {vmss} -g {rg}').get_output_in_json()
self.assertEqual(len(result[0]['ipAddress'].split('.')), 4)
self.assertTrue(result[0]['dnsSettings']['domainNameLabel'].endswith(self.kwargs['dns_label']))
# class SecretsScenarioTest(ScenarioTest): # pylint: disable=too-many-instance-attributes
# @ResourceGroupPreparer(name_prefix='cli_test_vm_secrets')
# def test_vm_create_linux_secrets(self, resource_group, resource_group_location):
# self.kwargs.update({
# 'admin': 'ubuntu',
# 'loc': 'westus',
# 'image': 'UbuntuLTS',
# 'auth': 'ssh',
# 'ssh_key': TEST_SSH_KEY_PUB,
# 'vm': 'vm-name',
# 'secrets': json.dumps([{'sourceVault': {'id': 'id'}, 'vaultCertificates': []}]),
# 'vault': self.create_random_name('vmlinuxkv', 20)
# })
# message = 'Secret is missing vaultCertificates array or it is empty at index 0'
# with self.assertRaisesRegex(CLIError, message):
# self.cmd('vm create -g {rg} -n {vm} --admin-username {admin} --authentication-type {auth} --image {image} --ssh-key-value \'{ssh_key}\' -l {loc} --secrets \'{secrets}\' --nsg-rule NONE')
# vault_out = self.cmd('keyvault create -g {rg} -n {vault} -l {loc} --enabled-for-deployment true --enabled-for-template-deployment true').get_output_in_json()
# time.sleep(60)
# self.kwargs['policy_path'] = os.path.join(TEST_DIR, 'keyvault', 'policy.json')
# self.cmd('keyvault certificate create --vault-name {vault} -n cert1 -p @"{policy_path}"')
# self.kwargs['secret_out'] = self.cmd('keyvault secret list-versions --vault-name {vault} -n cert1 --query "[?attributes.enabled].id" -o tsv').output.strip()
# vm_format = self.cmd('vm secret format -s {secret_out}').get_output_in_json()
# self.kwargs['secrets'] = json.dumps(vm_format)
# self.cmd('vm create -g {rg} -n {vm} --admin-username {admin} --authentication-type {auth} --image {image} --ssh-key-value \'{ssh_key}\' -l {loc} --secrets \'{secrets}\' --nsg-rule NONE')
# self.cmd('vm show -g {rg} -n {vm}', checks=[
# self.check('provisioningState', 'Succeeded'),
# self.check('osProfile.secrets[0].sourceVault.id', vault_out['id']),
# self.check('osProfile.secrets[0].vaultCertificates[0].certificateUrl', '{secret_out}')
# ])
# @ResourceGroupPreparer()
# def test_vm_create_windows_secrets(self, resource_group, resource_group_location):
# self.kwargs.update({
# 'admin': 'windowsUser',
# 'loc': 'westus',
# 'image': 'Win2012R2Datacenter',
# 'vm': 'vm-name',
# 'secrets': json.dumps([{'sourceVault': {'id': 'id'}, 'vaultCertificates': [{'certificateUrl': 'certurl'}]}]),
# 'vault': self.create_random_name('vmkeyvault', 20)
# })
# message = 'Secret is missing certificateStore within vaultCertificates array at secret index 0 and ' \
# 'vaultCertificate index 0'
# with self.assertRaisesRegex(CLIError, message):
# self.cmd('vm create -g {rg} -n {vm} --admin-username {admin} --admin-password VerySecret!12 --image {image} -l {loc} --secrets \'{secrets}\' --nsg-rule NONE')
# vault_out = self.cmd(
# 'keyvault create -g {rg} -n {vault} -l {loc} --enabled-for-deployment true --enabled-for-template-deployment true').get_output_in_json()
# time.sleep(60)
# self.kwargs['policy_path'] = os.path.join(TEST_DIR, 'keyvault', 'policy.json')
# self.cmd('keyvault certificate create --vault-name {vault} -n cert1 -p @"{policy_path}"')
# self.kwargs['secret_out'] = self.cmd('keyvault secret list-versions --vault-name {vault} -n cert1 --query "[?attributes.enabled].id" -o tsv').output.strip()
# self.kwargs['secrets'] = self.cmd('vm secret format -s {secret_out} --certificate-store "My"').get_output_in_json()
# self.cmd('vm create -g {rg} -n {vm} --admin-username {admin} --admin-password VerySecret!12 --image {image} -l {loc} --secrets "{secrets}" --nsg-rule NONE')
# self.cmd('vm show -g {rg} -n {vm}', checks=[
# self.check('provisioningState', 'Succeeded'),
# self.check('osProfile.secrets[0].sourceVault.id', vault_out['id']),
# self.check('osProfile.secrets[0].vaultCertificates[0].certificateUrl', self.kwargs['secret_out']),
# self.check('osProfile.secrets[0].vaultCertificates[0].certificateStore', 'My')
# ])
# class VMSSCreateLinuxSecretsScenarioTest(ScenarioTest):
# @ResourceGroupPreparer(name_prefix='cli_test_vmss_create_linux_secrets')
# @AllowLargeResponse()
# def test_vmss_create_linux_secrets(self, resource_group):
# self.kwargs.update({
# 'loc': 'westus',
# 'vmss': 'vmss1-name',
# 'secrets': json.dumps([{'sourceVault': {'id': 'id'}, 'vaultCertificates': []}]),
# 'vault': self.create_random_name('vmsslinuxkv', 20),
# 'secret': 'mysecret',
# 'ssh_key': TEST_SSH_KEY_PUB
# })
# vault_out = self.cmd('keyvault create -g {rg} -n {vault} -l {loc} --enabled-for-deployment true --enabled-for-template-deployment true').get_output_in_json()
# time.sleep(60)
# self.kwargs['policy_path'] = os.path.join(TEST_DIR, 'keyvault', 'policy.json')
# self.cmd('keyvault certificate create --vault-name {vault} -n cert1 -p @"{policy_path}"')
# self.kwargs['secret_out'] = self.cmd('keyvault secret list-versions --vault-name {vault} -n cert1 --query "[?attributes.enabled].id" -o tsv').output.strip()
# vm_format = self.cmd('vm secret format -s {secret_out}').get_output_in_json()
# self.kwargs['secrets'] = json.dumps(vm_format)
# self.cmd('vmss create -n {vmss} -g {rg} --image Debian --admin-username deploy --ssh-key-value \'{ssh_key}\' --secrets \'{secrets}\'')
# self.cmd('vmss show -n {vmss} -g {rg}', checks=[
# self.check('provisioningState', 'Succeeded'),
# self.check('virtualMachineProfile.osProfile.secrets[0].sourceVault.id', vault_out['id']),
# self.check('virtualMachineProfile.osProfile.secrets[0].vaultCertificates[0].certificateUrl', '{secret_out}')
# ])
class VMSSCreateExistingOptions(ScenarioTest):
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix='cli_test_vmss_create_existing_options')
def test_vmss_create_existing_options(self):
self.kwargs.update({
'vmss': 'vrfvmss',
'os_disk': 'vrfosdisk',
'container': 'vrfcontainer',
'sku': 'Standard_A3',
'vnet': 'vrfvnet',
'subnet': 'vrfsubnet',
'lb': 'vrflb',
'bepool': 'mybepool',
'ssh_key': TEST_SSH_KEY_PUB
})
self.cmd('network vnet create -n {vnet} -g {rg} --subnet-name {subnet}')
self.cmd('network lb create --name {lb} -g {rg} --backend-pool-name {bepool}')
self.cmd('vmss create --image CentOS --os-disk-name {os_disk} --admin-username ubuntu --vnet-name {vnet} --subnet {subnet} -l "West US" --vm-sku {sku} --storage-container-name {container} -g {rg} --name {vmss} --load-balancer {lb} --ssh-key-value \'{ssh_key}\' --backend-pool-name {bepool} --use-unmanaged-disk')
self.cmd('vmss show --name {vmss} -g {rg}', checks=[
self.check('sku.name', '{sku}'),
self.check('virtualMachineProfile.storageProfile.osDisk.name', '{os_disk}'),
self.check('virtualMachineProfile.storageProfile.osDisk.vhdContainers[0].ends_with(@, \'{container}\')', True)
])
self.cmd('network lb show --name {lb} -g {rg}',
checks=self.check('backendAddressPools[0].backendIpConfigurations[0].id.contains(@, \'{vmss}\')', True))
self.cmd('network vnet show --name {vnet} -g {rg}',
checks=self.check('subnets[0].ipConfigurations[0].id.contains(@, \'{vmss}\')', True))
class VMSSCreateExistingIdsOptions(ScenarioTest):
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix='cli_test_vmss_create_existing_ids')
def test_vmss_create_existing_ids_options(self, resource_group):
from msrestazure.tools import resource_id, is_valid_resource_id
subscription_id = self.get_subscription_id()
self.kwargs.update({
'vmss': 'vrfvmss',
'os_disk': 'vrfosdisk',
'container': 'vrfcontainer',
'sku': 'Standard_A3',
'vnet': 'vrfvnet',
'subnet': 'vrfsubnet',
'lb': 'vrflb',
'bepool': 'mybepool',
'ssh_key': TEST_SSH_KEY_PUB
})
self.cmd('network vnet create -n {vnet} -g {rg} --subnet-name {subnet}')
self.cmd('network lb create --name {lb} -g {rg} --backend-pool-name {bepool}')
self.kwargs.update({
'subnet_id': resource_id(subscription=subscription_id, resource_group=resource_group, namespace='Microsoft.Network', type='virtualNetworks', child_type_1='subnets', name=self.kwargs['vnet'], child_name_1=self.kwargs['subnet']),
'lb_id': resource_id(subscription=subscription_id, resource_group=resource_group, namespace='Microsoft.Network', type='loadBalancers', name=self.kwargs['lb'])
})
assert is_valid_resource_id(self.kwargs['subnet_id'])
assert is_valid_resource_id(self.kwargs['lb_id'])
self.cmd('vmss create --image CentOS --os-disk-name {os_disk} --admin-username ubuntu --subnet {subnet_id} -l "West US" --vm-sku {sku} --storage-container-name {container} -g {rg} --name {vmss} --load-balancer {lb_id} --ssh-key-value \'{ssh_key}\' --backend-pool-name {bepool} --use-unmanaged-disk')
self.cmd('vmss show --name {vmss} -g {rg}', checks=[
self.check('sku.name', '{sku}'),
self.check('virtualMachineProfile.storageProfile.osDisk.name', '{os_disk}'),
self.check('virtualMachineProfile.storageProfile.osDisk.vhdContainers[0].ends_with(@, \'{container}\')', True)
])
self.cmd('network lb show --name {lb} -g {rg}',
checks=self.check('backendAddressPools[0].backendIpConfigurations[0].id.contains(@, \'{vmss}\')', True))
self.cmd('network vnet show --name {vnet} -g {rg}',
checks=self.check('subnets[0].ipConfigurations[0].id.contains(@, \'{vmss}\')', True))
class VMSSVMsScenarioTest(ScenarioTest):
def _check_vms_power_state(self, *args):
for iid in self.kwargs['instance_ids']:
result = self.cmd('vmss get-instance-view --resource-group {{rg}} --name {{vmss}} --instance-id {}'.format(iid)).get_output_in_json()
self.assertTrue(result['statuses'][1]['code'] in args)
@ResourceGroupPreparer(name_prefix='cli_test_vmss_vms')
def test_vmss_vms(self, resource_group):
self.kwargs.update({
'vmss': 'vmss1',
'count': 2,
'instance_ids': []
})
self.cmd('vmss create -g {rg} -n {vmss} --image UbuntuLTS --authentication-type password --admin-username admin123 --admin-password TestTest12#$ --instance-count {count}')
instance_list = self.cmd('vmss list-instances --resource-group {rg} --name {vmss}', checks=[
self.check('type(@)', 'array'),
self.check('length(@)', '{count}'),
self.check("length([].name.starts_with(@, '{vmss}'))", self.kwargs['count'])
]).get_output_in_json()
self.kwargs['instance_ids'] = [x['instanceId'] for x in instance_list]
self.kwargs['id'] = self.kwargs['instance_ids'][0]
self.cmd('vmss show --resource-group {rg} --name {vmss} --instance-id {id}', checks=[
self.check('type(@)', 'object'),
self.check('instanceId', '{id}')
])
result = self.cmd('vmss list-instance-connection-info --resource-group {rg} --name {vmss}').get_output_in_json()
self.assertEqual(result['instance 0'].split(':')[1], '50000')
self.cmd('vmss restart --resource-group {rg} --name {vmss} --instance-ids *')
self._check_vms_power_state('PowerState/running', 'PowerState/starting')
self.cmd('vmss stop --resource-group {rg} --name {vmss} --instance-ids *')
self._check_vms_power_state('PowerState/stopped')
self.cmd('vmss start --resource-group {rg} --name {vmss} --instance-ids *')
self._check_vms_power_state('PowerState/running', 'PowerState/starting')
self.cmd('vmss deallocate --resource-group {rg} --name {vmss} --instance-ids *')
self._check_vms_power_state('PowerState/deallocated')
self.cmd('vmss delete-instances --resource-group {rg} --name {vmss} --instance-ids *')
self.cmd('vmss list-instances --resource-group {rg} --name {vmss}')
class VMSSCustomDataScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vmss_create_custom_data')
def test_vmss_create_custom_data(self):
self.kwargs.update({
'vmss': 'vmss-custom-data',
'ssh_key': TEST_SSH_KEY_PUB
})
self.cmd('vmss create -n {vmss} -g {rg} --image Debian --admin-username deploy --ssh-key-value "{ssh_key}" --custom-data "#cloud-config\nhostname: myVMhostname"')
# custom data is write only, hence we have no automatic way to cross check. Here we just verify VM was provisioned
self.cmd('vmss show -n {vmss} -g {rg}',
checks=self.check('provisioningState', 'Succeeded'))
class VMSSNicScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vmss_nics')
def test_vmss_nics(self):
self.kwargs.update({
'vmss': 'vmss1',
})
self.cmd('vmss create -g {rg} -n {vmss} --authentication-type password --admin-username admin123 --admin-password PasswordPassword1! --image Win2012R2Datacenter')
self.cmd('vmss nic list -g {rg} --vmss-name {vmss}', checks=[
self.check('type(@)', 'array'),
self.check("length([?resourceGroup == '{rg}']) == length(@)", True)
])
result = self.cmd('vmss list-instances -g {rg} -n {vmss}').get_output_in_json()
self.kwargs['iid'] = result[0]['instanceId']
nic_list = self.cmd('vmss nic list-vm-nics -g {rg} --vmss-name {vmss} --instance-id {iid}', checks=[
self.check('type(@)', 'array'),
self.check("length([?resourceGroup == '{rg}']) == length(@)", True)
]).get_output_in_json()
self.kwargs['nic'] = nic_list[0].get('name')
self.cmd('vmss nic show --resource-group {rg} --vmss-name {vmss} --instance-id {iid} -n {nic}', checks=[
self.check('type(@)', 'object'),
self.check('name', '{nic}'),
self.check('resourceGroup', '{rg}')
])
class VMSSCreateIdempotentTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vmss_create_idempotent')
def test_vmss_create_idempotent(self, resource_group):
self.kwargs.update({'vmss': 'vmss1'})
# run the command twice with the same parameters and verify it does not fail
self.cmd('vmss create -g {rg} -n {vmss} --authentication-type password --admin-username admin123 --admin-password PasswordPassword1! --image UbuntuLTS --use-unmanaged-disk')
self.cmd('vmss create -g {rg} -n {vmss} --authentication-type password --admin-username admin123 --admin-password PasswordPassword1! --image UbuntuLTS --use-unmanaged-disk')
# still 1 vnet and 1 subnet inside
self.cmd('network vnet list -g {rg}', checks=[
self.check('length([])', 1),
self.check('[0].name', self.kwargs['vmss'] + 'VNET'),
self.check('[0].subnets[0].addressPrefix', '10.0.0.0/24'),
self.check('length([0].subnets)', 1),
])
class VMSSILBTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vmss_ilb')
def test_vmss_with_ilb(self, resource_group):
self.kwargs.update({'vmss': 'vmss1'})
self.cmd('vmss create -g {rg} -n {vmss} --admin-username admin123 --admin-password PasswordPassword1! --image centos --instance-count 1 --public-ip-address ""')
# TODO: restore error validation when #5155 is addressed
# with self.assertRaises(AssertionError) as err:
self.cmd('vmss list-instance-connection-info -g {rg} -n {vmss}', expect_failure=True)
# self.assertTrue('internal load balancer' in str(err.exception))
@api_version_constraint(ResourceType.MGMT_NETWORK, min_api='2017-08-01')
class VMSSLoadBalancerWithSku(ScenarioTest):
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix='cli_test_vmss_lb_sku')
def test_vmss_lb_sku(self, resource_group):
self.kwargs.update({
'vmss0': 'vmss0',
'vmss': 'vmss1',
'lb': 'lb1',
'ip': 'pubip1',
'sku': 'standard',
'loc': 'eastus2'
})
# default to Basic
self.cmd('vmss create -g {rg} -l {loc} -n {vmss0} --image UbuntuLTS --admin-username admin123 --admin-password PasswordPassword1!')
self.cmd('network lb list -g {rg}', checks=self.check('[0].sku.name', 'Basic'))
self.cmd('network public-ip list -g {rg}', checks=[
self.check('[0].sku.name', 'Basic'),
self.check('[0].publicIpAllocationMethod', 'Dynamic')
])
# but you can overrides the defaults
self.cmd('vmss create -g {rg} -l {loc} -n {vmss} --lb {lb} --lb-sku {sku} --public-ip-address {ip} --image UbuntuLTS --admin-username admin123 --admin-password PasswordPassword1!')
self.cmd('network lb show -g {rg} -n {lb}',
checks=self.check('sku.name', 'Standard'))
self.cmd('network public-ip show -g {rg} -n {ip}', checks=[
self.check('sku.name', 'Standard'),
self.check('publicIpAllocationMethod', 'Static')
])
class VMLiveScenarioTest(LiveScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vm_create_progress')
def test_vm_create_progress(self, resource_group):
from azure.cli.testsdk.utilities import force_progress_logging
self.kwargs.update({'vm': 'vm123'})
with force_progress_logging() as test_io:
self.cmd('vm create -g {rg} -n {vm} --admin-username {vm} --admin-password PasswordPassword1! --image debian')
content = test_io.getvalue()
# check log has okay format
lines = content.splitlines()
for line in lines:
self.assertTrue(line.split(':')[0] in ['Accepted', 'Succeeded'])
# spot check we do have some relevant progress messages coming out
# (Note, CLI's progress controller does routine "sleep" before sample the LRO response.
# This has the consequence that it can't promise each resource's result wil be displayed)
self.assertTrue(any(line.startswith('Succeeded:') or line.startswith('Accepted:') for line in lines))
@api_version_constraint(ResourceType.MGMT_COMPUTE, min_api='2017-03-30')
class VMZoneScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_disk_zones', location='eastus2')
def test_vm_disk_create_zones(self, resource_group, resource_group_location):
self.kwargs.update({
'zones': '2',
'disk': 'disk123',
'size': 1
})
self.cmd('disk create -g {rg} -n {disk} --size-gb {size} --zone {zones}', checks=[
self.check('zones[0]', '{zones}')
])
self.cmd('disk show -g {rg} -n {disk}',
checks=self.check('zones[0]', '{zones}'))
result = self.cmd('disk show -g {rg} -n {disk} -otable')
table_output = set(result.output.splitlines()[2].split())
self.assertTrue(set([resource_group, resource_group_location, self.kwargs['disk'], self.kwargs['zones'], str(self.kwargs['size']), 'Premium_LRS']).issubset(table_output))
result = self.cmd('disk list -g {rg} -otable')
table_output = set(result.output.splitlines()[2].split())
self.assertTrue(set([resource_group, resource_group_location, self.kwargs['disk'], self.kwargs['zones']]).issubset(table_output))
class VMRunCommandScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vm_run_command')
def test_vm_run_command_e2e(self, resource_group, resource_group_location):
self.kwargs.update({
'vm': 'test-run-command-vm',
'loc': resource_group_location
})
self.cmd('vm run-command list -l {loc}')
self.cmd('vm run-command show --command-id RunShellScript -l {loc}')
public_ip = self.cmd('vm create -g {rg} -n {vm} --image ubuntults --admin-username clitest1 --admin-password Test12345678!!').get_output_in_json()['publicIpAddress']
self.cmd('vm open-port -g {rg} -n {vm} --port 80')
self.cmd('vm run-command invoke -g {rg} -n{vm} --command-id RunShellScript --script "sudo apt-get update && sudo apt-get install -y nginx"')
time.sleep(15) # 15 seconds should be enough for nginx started(Skipped under playback mode)
import requests
r = requests.get('http://' + public_ip)
self.assertTrue('Welcome to nginx' in str(r.content))
@ResourceGroupPreparer(name_prefix='cli_test_vm_run_command_w_params')
def test_vm_run_command_with_parameters(self, resource_group):
self.kwargs.update({'vm': 'test-run-command-vm2'})
self.cmd('vm create -g {rg} -n {vm} --image debian --admin-username clitest1 --admin-password Test12345678!!')
self.cmd('vm run-command invoke -g {rg} -n{vm} --command-id RunShellScript --scripts "echo $0 $1" --parameters hello world')
# @ResourceGroupPreparer(name_prefix='cli_test_vm_encryption', location='westus')
# def test_vm_disk_encryption_e2e(self, resource_group, resource_group_location):
# self.kwargs.update({
# 'vault': self.create_random_name('vault', 10),
# 'vm': 'vm1'
# })
# self.cmd('keyvault create -g {rg} -n {vault} --enabled-for-disk-encryption "true"')
# time.sleep(60) # to avoid 504(too many requests) on a newly created vault
# self.cmd('vm create -g {rg} -n {vm} --image win2012datacenter --admin-username clitester1 --admin-password Test123456789!')
# self.cmd('vm encryption enable -g {rg} -n {vm} --disk-encryption-keyvault {vault}')
# self.cmd('vm encryption show -g {rg} -n {vm}', checks=[self.check('disks[0].statuses[0].code', 'EncryptionState/encrypted')])
# self.cmd('vm encryption disable -g {rg} -n {vm}')
@api_version_constraint(ResourceType.MGMT_COMPUTE, min_api='2017-03-30')
class VMSSRollingUpgrade(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_vmss_rolling_update')
def test_vmss_rolling_upgrade(self, resource_group):
self.kwargs.update({
'lb': 'lb1',
'probe': 'probe1',
'vmss': 'vmss1'
})
# set up a LB with the probe for rolling upgrade
self.cmd('network lb create -g {rg} -n {lb}')
self.cmd('network lb probe create -g {rg} --lb-name {lb} -n {probe} --protocol http --port 80 --path /')
self.cmd('network lb rule create -g {rg} --lb-name {lb} -n rule1 --protocol tcp --frontend-port 80 --backend-port 80 --probe-name {probe}')
self.cmd('network lb inbound-nat-pool create -g {rg} --lb-name {lb} -n nat-pool1 --backend-port 22 --frontend-port-range-start 50000 --frontend-port-range-end 50119 --protocol Tcp --frontend-ip-name LoadBalancerFrontEnd')
# create a scaleset to use the LB, note, we start with the manual mode as we are not done with the setup yet
self.cmd('vmss create -g {rg} -n {vmss} --image ubuntults --admin-username clitester1 --admin-password Testqwer1234! --lb {lb} --health-probe {probe}')
# install the web server
_, settings_file = tempfile.mkstemp()
with open(settings_file, 'w') as outfile:
json.dump({
"commandToExecute": "sudo apt-get update && sudo apt-get install -y nginx",
}, outfile)
settings_file = settings_file.replace('\\', '\\\\')
self.kwargs['settings'] = settings_file
self.cmd('vmss extension set -g {rg} --vmss-name {vmss} -n customScript --publisher Microsoft.Azure.Extensions --settings {settings} --version 2.0')
self.cmd('vmss update-instances -g {rg} -n {vmss} --instance-ids "*"')
# now we are ready for the rolling upgrade mode
self.cmd('vmss update -g {rg} -n {vmss} --set upgradePolicy.mode=rolling')
# make sure the web server works
result = self.cmd('vmss list-instance-connection-info -g {rg} -n {vmss} -o tsv')
time.sleep(15) # 15 seconds should be enough for nginx started(Skipped under playback mode)
import requests
r = requests.get('http://' + result.output.split(':')[0])
self.assertTrue('Welcome to nginx' in str(r.content))
# do some rolling upgrade, maybe nonsense, but we need to test the command anyway
self.cmd('vmss rolling-upgrade start -g {rg} -n {vmss}')
result = self.cmd('vmss rolling-upgrade get-latest -g {rg} -n {vmss}').get_output_in_json()
self.assertTrue(('policy' in result) and ('progress' in result)) # spot check that it is about rolling upgrade
# 'cancel' should fail as we have no active upgrade to cancel
self.cmd('vmss rolling-upgrade cancel -g {rg} -n {vmss}', expect_failure=True)
class VMCreateWithExistingNic(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_create_vm_existing_nic')
def test_vm_create_existing_nic(self, resource_group):
import re
self.cmd('network public-ip create -g {rg} -n my-pip')
self.cmd('network vnet create -g {rg} -n my-vnet --subnet-name my-subnet1')
self.cmd('network nic create -g {rg} -n my-nic --subnet my-subnet1 --vnet-name my-vnet --public-ip-address my-pip')
self.cmd('network nic ip-config create -n my-ipconfig2 -g {rg} --nic-name my-nic --private-ip-address-version IPv6')
self.cmd('vm create -g {rg} -n vm1 --image ubuntults --nics my-nic --generate-ssh-keys --admin-username ubuntuadmin')
result = self.cmd('vm show -g {rg} -n vm1 -d').get_output_in_json()
self.assertTrue(re.match(r'[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+', result['publicIps']))
self.assertTrue(re.match(r'[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+', result['privateIps']))
class VMOsDiskSwap(ScenarioTest):
@ResourceGroupPreparer()
def test_vm_os_disk_swap(self, resource_group):
self.kwargs.update({
'vm': 'vm1',
'backupDisk': 'disk1',
})
self.cmd('vm create -g {rg} -n {vm} --image centos --admin-username clitest123 --generate-ssh-keys')
res = self.cmd('vm show -g {rg} -n {vm}').get_output_in_json()
original_disk_id = res['storageProfile']['osDisk']['managedDisk']['id']
backup_disk_id = self.cmd('disk create -g {{rg}} -n {{backupDisk}} --source {}'.format(original_disk_id)).get_output_in_json()['id']
self.cmd('vm stop -g {rg} -n {vm}')
self.cmd('vm update -g {{rg}} -n {{vm}} --os-disk {}'.format(backup_disk_id))
self.cmd('vm show -g {rg} -n {vm}', checks=[
self.check('storageProfile.osDisk.managedDisk.id', backup_disk_id),
self.check('storageProfile.osDisk.name', self.kwargs['backupDisk'])
])
class VMGenericUpdate(ScenarioTest):
@ResourceGroupPreparer()
def test_vm_generic_update(self, resource_group):
self.kwargs.update({
'vm': 'vm1',
})
self.cmd('vm create -g {rg} -n {vm} --image debian --data-disk-sizes-gb 1 2 --admin-username cligenerics --generate-ssh-keys')
# we will try all kinds of generic updates we can
self.cmd('vm update -g {rg} -n {vm} --set identity.type="SystemAssigned"', checks=[
self.check('identity.type', 'SystemAssigned')
])
self.cmd('vm update -g {rg} -n {vm} --set storageProfile.dataDisks[0].caching="ReadWrite"', checks=[
self.check('storageProfile.dataDisks[0].caching', 'ReadWrite')
])
self.cmd('vm update -g {rg} -n {vm} --remove storageProfile.dataDisks', checks=[
self.check('storageProfile.dataDisks', [])
])
# endregion
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "14ecb8613c5cf4794227b904e9a4a380",
"timestamp": "",
"source": "github",
"line_count": 1697,
"max_line_length": 764,
"avg_line_length": 51.58868591632292,
"alnum_prop": 0.6102963013729925,
"repo_name": "yugangw-msft/azure-cli",
"id": "4b46db317f2238f330b946fadc98c7a1bcaeae9f",
"size": "87924",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "src/azure-cli/azure/cli/command_modules/vm/tests/hybrid_2020_09_01/test_vm_commands.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "5355"
},
{
"name": "Batchfile",
"bytes": "14110"
},
{
"name": "Bicep",
"bytes": "1679"
},
{
"name": "C#",
"bytes": "1971"
},
{
"name": "C++",
"bytes": "275"
},
{
"name": "Dockerfile",
"bytes": "8427"
},
{
"name": "HTML",
"bytes": "794"
},
{
"name": "JavaScript",
"bytes": "1404"
},
{
"name": "Jupyter Notebook",
"bytes": "389"
},
{
"name": "PowerShell",
"bytes": "1781"
},
{
"name": "Python",
"bytes": "24270340"
},
{
"name": "Rich Text Format",
"bytes": "12032"
},
{
"name": "Roff",
"bytes": "1036959"
},
{
"name": "Shell",
"bytes": "56023"
},
{
"name": "TSQL",
"bytes": "1145"
}
],
"symlink_target": ""
}
|
"""
Providing iterator functions that are not in all version of Python we support.
Where possible, we try to use the system-native version and only fall back to
these implementations if necessary.
"""
def is_iterable(x):
"A implementation independent way of checking for iterables"
try:
iter(x)
except TypeError:
return False
else:
return True
|
{
"content_hash": "d343aeb716e1450008594a4fea6cdcb2",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 78,
"avg_line_length": 26.733333333333334,
"alnum_prop": 0.6758104738154613,
"repo_name": "diego-d5000/MisValesMd",
"id": "70627e0be4840d2fd205a419ccba95e78e8c910a",
"size": "401",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "env/lib/python2.7/site-packages/django/utils/itercompat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "115465"
},
{
"name": "Groff",
"bytes": "22"
},
{
"name": "HTML",
"bytes": "1415583"
},
{
"name": "JavaScript",
"bytes": "1381588"
},
{
"name": "PowerShell",
"bytes": "8325"
},
{
"name": "Python",
"bytes": "8107650"
},
{
"name": "Shell",
"bytes": "11786"
}
],
"symlink_target": ""
}
|
import unittest
import googleapiclient.discovery
from google.cloud import storage
from google.cloud import billing_v1
from resources.base import Resource
storage_client = storage.Client()
billing_client = billing_v1.CloudBillingClient()
resource_client = googleapiclient.discovery.build("cloudresourcemanager", "v3")
project_permissions = ["bigquery.datasets.create", "storage.buckets.create"]
org_permissions = [
"resourcemanager.folders.create",
"resourcemanager.projects.create",
"resourcemanager.projects.delete",
]
class TestPermissions(unittest.TestCase):
def test_can_create_project_resources(self):
request = resource_client.projects().testIamPermissions(
resource="projects/{id}".format(id=Resource.TEST_PROJECT_ID),
body={"permissions": project_permissions},
)
returnedPermissions = request.execute()
self.assertEqual(
set(project_permissions), set(returnedPermissions["permissions"])
)
def test_can_create_org_resources(self):
request = resource_client.organizations().testIamPermissions(
resource=Resource.TEST_ORG,
body={"permissions": org_permissions},
)
returnedPermissions = request.execute()
self.assertEqual(
set(org_permissions), set(returnedPermissions["permissions"])
)
|
{
"content_hash": "282f1a2e11770c317a49caea9a9ceaf5",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 79,
"avg_line_length": 35.07692307692308,
"alnum_prop": 0.7010233918128655,
"repo_name": "GoogleCloudPlatform/professional-services",
"id": "97619e15c51187e7f6ac9be92ba01370de8c3358",
"size": "1970",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tools/iam-permissions-copier/tests/test_permissions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "117994"
},
{
"name": "C++",
"bytes": "174"
},
{
"name": "CSS",
"bytes": "13405"
},
{
"name": "Component Pascal",
"bytes": "798"
},
{
"name": "Dockerfile",
"bytes": "15093"
},
{
"name": "Go",
"bytes": "352968"
},
{
"name": "HCL",
"bytes": "204776"
},
{
"name": "HTML",
"bytes": "1229668"
},
{
"name": "Java",
"bytes": "338810"
},
{
"name": "JavaScript",
"bytes": "59905"
},
{
"name": "Jinja",
"bytes": "60083"
},
{
"name": "Makefile",
"bytes": "14129"
},
{
"name": "Python",
"bytes": "2250081"
},
{
"name": "Scala",
"bytes": "978327"
},
{
"name": "Shell",
"bytes": "109299"
},
{
"name": "Smarty",
"bytes": "19839"
},
{
"name": "TypeScript",
"bytes": "147194"
}
],
"symlink_target": ""
}
|
"""MySQLdb type conversion module
This module handles all the type conversions for MySQL. If the default
type conversions aren't what you need, you can make your own. The
dictionary conversions maps some kind of type to a conversion function
which returns the corresponding value:
Key: FIELD_TYPE.* (from MySQLdb.constants)
Conversion function:
Arguments: string
Returns: Python object
Key: Python type object (from types) or class
Conversion function:
Arguments: Python object of indicated type or class AND
conversion dictionary
Returns: SQL literal value
Notes: Most conversion functions can ignore the dictionary, but
it is a required parameter. It is necessary for converting
things like sequences and instances.
Don't modify conversions if you can avoid it. Instead, make copies
(with the copy() method), modify the copies, and then pass them to
MySQL.connect().
"""
from _mysql import string_literal, escape_sequence, escape_dict, escape, NULL
from MySQLdb.constants import FIELD_TYPE, FLAG
from MySQLdb.times import *
try:
from types import IntType, LongType, FloatType, NoneType, TupleType, ListType, DictType, InstanceType, \
StringType, UnicodeType, ObjectType, BooleanType, ClassType, TypeType
except ImportError:
# Python 3
long = int
IntType, LongType, FloatType, NoneType = int, long, float, type(None)
TupleType, ListType, DictType, InstanceType = tuple, list, dict, None
StringType, UnicodeType, ObjectType, BooleanType = bytes, str, object, bool
import array
try:
set
except NameError:
from sets import Set as set
def Bool2Str(s, d): return str(int(s))
def Str2Set(s):
return set([ i for i in s.split(',') if i ])
def Set2Str(s, d):
return string_literal(','.join(s), d)
def Thing2Str(s, d):
"""Convert something into a string via str()."""
return str(s)
def Unicode2Str(s, d):
"""Convert a unicode object to a string using the default encoding.
This is only used as a placeholder for the real function, which
is connection-dependent."""
return s.encode()
Long2Int = Thing2Str
def Float2Str(o, d):
return '%.15g' % o
def None2NULL(o, d):
"""Convert None to NULL."""
return NULL # duh
def Thing2Literal(o, d):
"""Convert something into a SQL string literal. If using
MySQL-3.23 or newer, string_literal() is a method of the
_mysql.MYSQL object, and this function will be overridden with
that method when the connection is created."""
return string_literal(o, d)
def Instance2Str(o, d):
"""
Convert an Instance to a string representation. If the __str__()
method produces acceptable output, then you don't need to add the
class to conversions; it will be handled by the default
converter. If the exact class is not found in d, it will use the
first class it can find for which o is an instance.
"""
if o.__class__ in d:
return d[o.__class__](o, d)
cl = filter(lambda x,o=o:
type(x) is ClassType
and isinstance(o, x), d.keys())
if not cl and hasattr(types, 'ObjectType'):
cl = filter(lambda x,o=o:
type(x) is TypeType
and isinstance(o, x)
and d[x] is not Instance2Str,
d.keys())
if not cl:
return d[types.StringType](o,d)
d[o.__class__] = d[cl[0]]
return d[cl[0]](o, d)
def char_array(s):
return array.array('c', s)
def array2Str(o, d):
return Thing2Literal(o.tostring(), d)
conversions = {
IntType: Thing2Str,
LongType: Long2Int,
FloatType: Float2Str,
NoneType: None2NULL,
TupleType: escape_sequence,
ListType: escape_sequence,
DictType: escape_dict,
InstanceType: Instance2Str,
array.ArrayType: array2Str,
StringType: Thing2Literal, # default
UnicodeType: Unicode2Str,
ObjectType: Instance2Str,
BooleanType: Bool2Str,
DateTimeType: DateTime2literal,
DateTimeDeltaType: DateTimeDelta2literal,
set: Set2Str,
FIELD_TYPE.TINY: int,
FIELD_TYPE.SHORT: int,
FIELD_TYPE.LONG: long,
FIELD_TYPE.FLOAT: float,
FIELD_TYPE.DOUBLE: float,
FIELD_TYPE.DECIMAL: float,
FIELD_TYPE.NEWDECIMAL: float,
FIELD_TYPE.LONGLONG: long,
FIELD_TYPE.INT24: int,
FIELD_TYPE.YEAR: int,
FIELD_TYPE.SET: Str2Set,
FIELD_TYPE.TIMESTAMP: mysql_timestamp_converter,
FIELD_TYPE.DATETIME: DateTime_or_None,
FIELD_TYPE.TIME: TimeDelta_or_None,
FIELD_TYPE.DATE: Date_or_None,
FIELD_TYPE.BLOB: [
(FLAG.BINARY, str),
],
FIELD_TYPE.STRING: [
(FLAG.BINARY, str),
],
FIELD_TYPE.VAR_STRING: [
(FLAG.BINARY, str),
],
FIELD_TYPE.VARCHAR: [
(FLAG.BINARY, str),
],
}
try:
from decimal import Decimal
conversions[FIELD_TYPE.DECIMAL] = Decimal
conversions[FIELD_TYPE.NEWDECIMAL] = Decimal
except ImportError:
pass
|
{
"content_hash": "df224bec99ea4cb1c9d43596362db8eb",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 108,
"avg_line_length": 27.674033149171272,
"alnum_prop": 0.6612098223198243,
"repo_name": "mdsafwan/Deal-My-Stuff",
"id": "14b1f522846e7f8d296e9976a3946004e2a05e5b",
"size": "5009",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "Lib/site-packages/MySQLdb/converters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "898"
},
{
"name": "C",
"bytes": "521537"
},
{
"name": "C++",
"bytes": "125678"
},
{
"name": "CSS",
"bytes": "127882"
},
{
"name": "HTML",
"bytes": "172987"
},
{
"name": "JavaScript",
"bytes": "256471"
},
{
"name": "PowerShell",
"bytes": "8104"
},
{
"name": "Python",
"bytes": "7186078"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('work_entries', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='workentry',
name='end',
field=models.DateTimeField(verbose_name='end'),
),
]
|
{
"content_hash": "4e4f2d0391d1ae2a61dd7a479afe7c54",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 59,
"avg_line_length": 21.166666666666668,
"alnum_prop": 0.5905511811023622,
"repo_name": "Clarity-89/clarityv2",
"id": "734c490986c737309d87cad2ca5eca3dce7eb13c",
"size": "451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/clarityv2/work_entries/migrations/0002_auto_20161219_1914.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "272"
},
{
"name": "Dockerfile",
"bytes": "2230"
},
{
"name": "HTML",
"bytes": "46778"
},
{
"name": "JavaScript",
"bytes": "5460"
},
{
"name": "Python",
"bytes": "131598"
},
{
"name": "SCSS",
"bytes": "18878"
},
{
"name": "Shell",
"bytes": "2008"
}
],
"symlink_target": ""
}
|
from fabric.api import cd
from fabric.api import run
from fabric.api import env
from fabric.api import prefix
from fabric.contrib.files import exists
env.user = 'dhites'
env.host_string = 'hites.org'
env.forward_agent = True
env.no_agent = True
REPO_DIR = '~/repositories'
HITES_REPO_DIR = '~/repositories/hites'
PROJECT_DIR = '~/flask_env'
def _setup_repo():
run('mkdir -p %s' % REPO_DIR)
with cd(REPO_DIR):
run('git clone git@github.com:Nizebulous/hites.git')
def deploy():
if not exists(HITES_REPO_DIR):
_setup_repo()
with cd(HITES_REPO_DIR):
run('git fetch origin')
run('git checkout master')
run('git pull --rebase origin master')
with cd(HITES_REPO_DIR):
with prefix('source %s/bin/activate' % PROJECT_DIR):
run('pip install -r requirements.txt')
with cd(PROJECT_DIR):
run('ln -sf %s/hites' % (HITES_REPO_DIR))
run('touch tmp/restart.txt')
|
{
"content_hash": "db82546b099c940ec4d9dde2c7273e60",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 60,
"avg_line_length": 25.783783783783782,
"alnum_prop": 0.640461215932914,
"repo_name": "Nizebulous/hites",
"id": "46aee94a30420ff34aefa2a86847fda8b6782c75",
"size": "954",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabfile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "532"
},
{
"name": "JavaScript",
"bytes": "4436"
},
{
"name": "Python",
"bytes": "8476"
},
{
"name": "Shell",
"bytes": "146"
}
],
"symlink_target": ""
}
|
"""
Utils for parsing wikipedia category graph
"""
import sys
import re
from string import lower
from bz2 import *
from collections import defaultdict
from operator import itemgetter
from glob import glob
from math import log, exp
def open_or_bz2(file):
if file.endswith('.bz2'):
import bz2
return bz2.BZ2File(file)
else:
return open(file)
def log_add(x, y):
if x == 0:
return y
if y == 0:
return x
if x-y > 16:
return x
elif x > y:
return x + log(1 + exp(y-x))
elif y-x > 16:
return y
else:
return y + log(1 + exp(x-y))
TOLERANCE = 0.000001
def read_docify(docify_file, parse_counts=True):
if docify_file.endswith('.bz2'):
f = open_or_bz2(docify_file)
else:
f = open(docify_file)
for line in f:
tokens = line.strip().split('\t')
if parse_counts:
doc_title, words = tokens[0], map(parse_count, tokens[1:])
else:
doc_title, words = tokens[0], tokens[1:]
yield doc_title, words
def read_hlda_dictionary(dictionary_file):
category_of = {}
word_of = {}
doc_of = {}
doc_to_categories = {}
unvisited_categories = []
visited_categories = set()
word_dictionary_section = False
sys.stderr.write('Loading document-topic map...\n')
doc_count = 0
lines = open_or_bz2(dictionary_file)
for line in lines:
if line.strip():
tokens = line.strip().split('\t')
if word_dictionary_section:
assert(len(tokens) == 2)
key, word = tokens
key = int(key, 0)
assert(key not in word_of)
word_of[key] = word
elif not unvisited_categories and not line.startswith('0x'): # REALLY NASTY HACK
document, doc_no, categories = tokens[0], tokens[1], tokens[2:]
categories = [int(x.strip(),0) for x in categories]
# print 'original', set(categories)
# print 'adding', set(categories).difference(visited_categories)
# TODO: fix this bug where there can be multiple _c entries for
# the same topic
unvisited_categories = [x for x in categories if not x in visited_categories]
doc_to_categories[document] = categories
doc_of[doc_count] = document
doc_count += 1
else:
# print 'here bc not visited', unvisited_categories
assert(len(tokens) == 2) # What
key, category = tokens
key = int(key, 0)
# print '[%s]' % category
# assert(category in unvisited_categories)
assert(key not in category_of)
if not key in unvisited_categories:
sys.stderr.write('BUG: dup [%s] for %s\n' % (category,
document))
else:
unvisited_categories.remove(key)
visited_categories.add(key)
category_of[key] = category
else:
sys.stderr.write('Loading word dictionary...\n')
word_dictionary_section = True
return (category_of, word_of, doc_of, doc_to_categories)
def glob_samples(Moniker, MapOrBayes, ToKeep=20):
if MapOrBayes == 'map':
Header = Moniker + '*-best.hlda*'
# Samples = [os.path.dirname(Header)+'/'+x for x in os.listdir(os.path.dirname(Header)) if x.startswith(os.path.basename(Header))]
Samples = glob(Header)
else:
Header = Moniker + '*-sample*'
# Samples = [os.path.dirname(Header)+'/'+x for x in os.listdir(os.path.dirname(Header)) if x.startswith(os.path.basename(Header))]
Samples = glob(Header)
sys.stderr.write('%s\n' % Header)
sys.stderr.write('%d\n' % len(Samples))
if not Samples:
sys.stderr.write('FAIL No samples matched %s\n' % Header)
sys.exit()
if len(Samples) > ToKeep:
Samples = Samples[::-1][:ToKeep]
sys.stderr.write('keeping these %r\n' % Samples)
return Samples
def load_all_ncrp_samples(Moniker, MapOrBayes, restrict_docs=False, ToKeep=20):
""" Loads a set of samples from an ncrp and returns some sufficient
statistics """
node_term_dist = defaultdict(lambda: defaultdict(int))
term_node_dist = defaultdict(lambda: defaultdict(int))
node_doc_dist = defaultdict(lambda: defaultdict(int))
doc_node_dist = defaultdict(lambda: defaultdict(int))
prev = defaultdict(set)
sys.stderr.write('Quantizing to TOLERANCE=%f\n' % TOLERANCE)
alpha, eta, V, T = load_append_ncrp_samples(glob_samples(Moniker, MapOrBayes, ToKeep),
node_term_dist,
term_node_dist,
node_doc_dist,
doc_node_dist,
prev,
restrict_docs=restrict_docs)
return (node_term_dist, term_node_dist, node_doc_dist, doc_node_dist, prev,
alpha, eta, V, T)
def ncrp_sample_iterator(sample):
"""
Yields node data from an ncrp sample
"""
visited = set()
for (line_no, line) in enumerate(open_or_bz2(sample)):
if line.startswith('ll ='):
m = re.search('alpha = (.*) eta = (.*) gamma .* L = (.*)', line)
alpha = float(m.group(1))
eta = float(m.group(2))
L = int(m.group(3))
sys.stderr.write('Got alpha = %f eta = %f L = %d\n' % (alpha, eta, L))
continue
line = line.replace('\n','')
try:
(node, _, m, raw_nw, raw_nd, tables) = line.split('||')
except:
sys.stderr.write('Excepted on %s\n' % sample)
break
# nodename is the actual memory address (uid of the node) nodelabel
# is the tree label
node = int(node.replace('\t',''), 0)
assert node not in visited
visited.add(node)
parsed_nw = [x.rsplit('@@@') for x in raw_nw.split('\t') if x]
parsed_nd = [x.rsplit('@@@') for x in raw_nd.split('\t') if x]
nwsum = float(sum([int(c) for _,c in parsed_nw]))
yield (node, parsed_nw, parsed_nd, nwsum, tables, alpha, eta, L)
def collect_term_term_count(Samples):
"""
Collects the number of times two terms co-occur across all topics; this is
normalized for frequency, so beware
"""
# This version doesnt use the intermediate
joint = defaultdict(float)
marginal = defaultdict(float)
for file_no, file in enumerate(Samples):
for (node, nw, nd, nwsum, tables, alpha, eta, L) in ncrp_sample_iterator(file):
for i, (word, count) in enumerate(nw):
for k, (word2, count2) in enumerate(nw):
marginal[intern(word)] += 1.0
marginal[intern(word2)] += 1.0
if i < k:
(w1, w2) = sorted([word, word2])
joint[(intern(w1),intern(w2))] += 1.0
return (joint, marginal)
def collect_term_pmi(Samples):
"""
Computes the pmi from a joint and marginal distribution
"""
joint, marginal = collect_term_term_count(Samples)
pmi_and_freq = defaultdict(float)
for (w1,w2), freq in joint.iteritems():
pmi_and_freq[(intern(w1),intern(w2))] = (log(freq) - log(marginal[w1]*marginal[w2]), freq)
return pmi_and_freq
def load_append_ncrp_samples(Samples, node_term_dist, term_node_dist,
node_doc_dist, doc_node_dist, prev, restrict_docs=set()):
V = set()
for file_no, file in enumerate(Samples):
for (node, nw, nd, nwsum, tables, alpha, eta, L) in ncrp_sample_iterator(file):
for (word, count) in nw:
if float(count) / nwsum > TOLERANCE:
node_term_dist[node][intern(word)] += int(count)
term_node_dist[intern(word)][node] += int(count)
V.add(intern(word))
for (_, doc_name, count) in nd:
node_doc_dist[node][intern(doc_name)] += int(count)
doc_node_dist[intern(doc_name)][node] += int(count)
# prev stores the DAG structure
tables = [int(x,0) for x in tables.split('\t') if x != '']
for t in tables:
prev[t].add(node)
return (alpha, eta, len(V), L)
def get_smoothed_terms_for(doc, doc_to_categories, node_doc_dist,
node_term_dist, word_of, category_of, alpha=0,
eta=0):
pw = defaultdict(float)
if not doc_to_categories.has_key(doc):
sys.stderr.write('missing [%s]\n' % doc)
return pw
sys.stderr.write('Quantizing to TOLERANCE=%f\n' % TOLERANCE)
T = len(doc_to_categories[doc])
V = len(word_of)
if alpha != None:
sys.stderr.write('Smoothing with alpha=%f eta=%f\n' % (alpha,eta))
dsum = float(sum([node_doc_dist[category_of[c]][doc] for c in
doc_to_categories[doc]]))
for raw_node in doc_to_categories[doc]:
node = category_of[raw_node]
d = node_doc_dist[node][doc]
# print node, 'in ndd?', node_doc_dist.has_key(node)
# print node, 'in ntd?', node_term_dist.has_key(node)
sys.stderr.write('found %d/%d of [%s] in [%s]\n' % (d, dsum, doc, node))
lpd = log(float(d)+alpha) - log(dsum+alpha*T)
wsum = float(sum(node_term_dist[node].itervalues()))
for word, w in node_term_dist[node].iteritems():
lp = lpd + log(float(w)+eta) - log(wsum+eta*V)
pw[intern(word)] = log_add(lp, pw[intern(word)])
return pw
def HACK_ncrp_get_smoothed_terms_for(doc, doc_node_dist, node_doc_dist, node_term_dist, V, T, alpha=0, eta=0):
"""
The bug here is that we can't get a list of all the possible nodes for doc;
hence instead we have to just rely on where it is actually present
(problematic)
"""
pw = defaultdict(float)
sys.stderr.write('USING HACK SMOOTHED TERMS\n')
sys.stderr.write('Quantizing to TOLERANCE=%f\n' % TOLERANCE)
if alpha != None:
sys.stderr.write('Smoothing with alpha=%f eta=%f\n' % (alpha,eta))
dsum = float(sum(doc_node_dist[doc].itervalues()))
assert doc_node_dist.has_key(doc)
for node, d in doc_node_dist[doc].iteritems():
sys.stderr.write('found %d/%d of [%s] in [%s]\n' % (d, dsum, doc, node))
lpd = log(float(d)+alpha) - log(dsum+alpha*T)
wsum = float(sum(node_term_dist[node].itervalues()))
for word, w in node_term_dist[node].iteritems():
lp = lpd + log(float(w)+eta) - log(wsum+eta*V)
pw[intern(word)] = log_add(lp, pw[intern(word)])
return pw
def build_sort(dist, to_show=100):
"""
Builds a sorted list summarizing the distribution
"""
sorted_dist = []
for concept in dist.keys():
attribs = '\t%s' % '\n\t'.join(['%s %d' % (v, k) for (v, k) in
sorted(dist[concept].items(), key=itemgetter(1),
reverse=True)[:to_show]])
count = sum(dist[concept].values())
sorted_dist.append((count, concept, attribs))
return sorted_dist
def load_append_llda_samples(samples, word_of, doc_of, category_of, node_term_dist,
term_node_dist, node_doc_dist, doc_node_dist, restrict_categories=set(),
skip_noise=False):
alpha, eta = 0, 0
sys.stderr.write('Loading samples...\n')
for sample in samples:
sys.stderr.write('%s\n' % sample)
f = open_or_bz2(sample)
for line in f:
if line.startswith('ll ='):
# ll = -434060662.375486 (-434060662.375486 at 123) -627048446 alpha = 0.001000 eta = 0.100000 L = 454
# ll = -432994653.484200 (-432994653.484200 at 100) -1071382526 alpha = 0.001000 eta = 0.100000 L = 211
m = re.search('ll = .* alpha = (.*) eta = (.*) L = .*', line)
alpha = float(m.group(1))
eta = float(m.group(2))
sys.stderr.write('Got alpha = %f eta = %f\n' % (alpha, eta))
continue
concept, _, rest = line.partition('\t||\t')
concept = int(concept, 0)
if (not skip_noise or not category_of[concept].startswith('NOISE')) and \
(not restrict_categories or concept in restrict_categories):
tokens = rest.strip().split('\t||\t')
if len(tokens) == 3:
m, w, d = tokens
else:
assert len(tokens) == 2
m, w = tokens
d = []
if w != '||':
d = map(parse_int_float_count, d.split('\t'))
w = map(parse_int_float_count, w.split('\t'))
# print 'Found', len(d), 'docs and', len(w), 'terms at', category_of[concept]
assert concept in category_of
nw_sum = float(sum([c for _,c in w]))
for ww, c in w:
assert ww in word_of
if nw_sum < 10000 or float(c) / nw_sum > TOLERANCE: # Allow small nodes and high prob things
node_term_dist[intern(category_of[concept])][intern(word_of[ww])] += c
term_node_dist[intern(word_of[ww])][intern(category_of[concept])] += c
for dd, c in d:
assert dd in doc_of
node_doc_dist[intern(category_of[concept])][intern(doc_of[dd])] += c
doc_node_dist[intern(doc_of[dd])][intern(category_of[concept])] += c
return alpha, eta
def load_all_llda_samples(DictionaryFile, MapOrBayes, restrict_docs=set(),
skip_noise=False):
node_term_dist = defaultdict(lambda: defaultdict(int))
term_node_dist = defaultdict(lambda: defaultdict(int))
node_doc_dist = defaultdict(lambda: defaultdict(int))
doc_node_dist = defaultdict(lambda: defaultdict(int))
sys.stderr.write('Quantizing to TOLERANCE=%f\n' % TOLERANCE)
if MapOrBayes == 'map':
Header = DictionaryFile.split('.dictionary')[0] + '-best'
# Samples = [os.path.dirname(Header)+'/'+x for x in os.listdir(os.path.dirname(Header)) if x.startswith(os.path.basename(Header))]
Samples = glob(Header+'*')
else:
Header = DictionaryFile.split('.dictionary')[0] + '-sample'
# Samples = [os.path.dirname(Header)+'/'+x for x in os.listdir(os.path.dirname(Header)) if x.startswith(os.path.basename(Header))]
Samples = glob(Header+'*')
sys.stderr.write('%s\n' % Header)
sys.stderr.write('%d\n' % len(Samples))
# Load the dictioanry
(category_of, word_of, doc_of, doc_to_categories) = read_hlda_dictionary(DictionaryFile)
# Compute all the categories covered by the interesting docs
restrict_categories = set()
for d in restrict_docs:
if doc_to_categories.has_key(d):
sys.stderr.write('FOUND [%s] at %r\n' % (d, [category_of[dd] for dd
in doc_to_categories[d]]))
restrict_categories.update(doc_to_categories[d])
else:
sys.stderr.write('MISSING [%s]\n' % (d))
# Actually load the samples
alpha, eta = load_append_llda_samples(Samples, word_of, doc_of, category_of,
node_term_dist, term_node_dist, node_doc_dist, doc_node_dist,
restrict_categories=restrict_categories, skip_noise=skip_noise)
return (word_of, doc_of, category_of, doc_to_categories, node_term_dist,
term_node_dist, node_doc_dist, doc_node_dist, alpha, eta)
|
{
"content_hash": "dd247b60a6e220ca91da5b738015fca8",
"timestamp": "",
"source": "github",
"line_count": 406,
"max_line_length": 139,
"avg_line_length": 39.00246305418719,
"alnum_prop": 0.5532680770445216,
"repo_name": "josephreisinger/lvm-toolkit",
"id": "71acec579d261bcf34eb953e0db6ec2978480eb1",
"size": "15835",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/model_file_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "366013"
},
{
"name": "Python",
"bytes": "16840"
},
{
"name": "Shell",
"bytes": "10750"
}
],
"symlink_target": ""
}
|
from PyObjCTools.TestSupport import *
from AppKit import *
try:
unicode
except NameError:
unicode = str
class TestNSPrintInfo (TestCase):
def testConstants(self):
self.assertEqual(NSPortraitOrientation, 0)
self.assertEqual(NSLandscapeOrientation, 1)
self.assertEqual(NSAutoPagination, 0)
self.assertEqual(NSFitPagination, 1)
self.assertEqual(NSClipPagination, 2)
self.assertIsInstance(NSPrintSpoolJob, unicode)
self.assertIsInstance(NSPrintPreviewJob, unicode)
self.assertIsInstance(NSPrintSaveJob, unicode)
self.assertIsInstance(NSPrintCancelJob, unicode)
self.assertIsInstance(NSPrintPaperName, unicode)
self.assertIsInstance(NSPrintPaperSize, unicode)
self.assertIsInstance(NSPrintOrientation, unicode)
self.assertIsInstance(NSPrintScalingFactor, unicode)
self.assertIsInstance(NSPrintLeftMargin, unicode)
self.assertIsInstance(NSPrintRightMargin, unicode)
self.assertIsInstance(NSPrintTopMargin, unicode)
self.assertIsInstance(NSPrintBottomMargin, unicode)
self.assertIsInstance(NSPrintHorizontallyCentered, unicode)
self.assertIsInstance(NSPrintVerticallyCentered, unicode)
self.assertIsInstance(NSPrintHorizontalPagination, unicode)
self.assertIsInstance(NSPrintVerticalPagination, unicode)
self.assertIsInstance(NSPrintPrinter, unicode)
self.assertIsInstance(NSPrintCopies, unicode)
self.assertIsInstance(NSPrintAllPages, unicode)
self.assertIsInstance(NSPrintFirstPage, unicode)
self.assertIsInstance(NSPrintLastPage, unicode)
self.assertIsInstance(NSPrintMustCollate, unicode)
self.assertIsInstance(NSPrintReversePageOrder, unicode)
self.assertIsInstance(NSPrintJobDisposition, unicode)
self.assertIsInstance(NSPrintSavePath, unicode)
self.assertIsInstance(NSPrintPagesAcross, unicode)
self.assertIsInstance(NSPrintPagesDown, unicode)
self.assertIsInstance(NSPrintTime, unicode)
self.assertIsInstance(NSPrintDetailedErrorReporting, unicode)
self.assertIsInstance(NSPrintFaxNumber, unicode)
self.assertIsInstance(NSPrintPrinterName, unicode)
self.assertIsInstance(NSPrintHeaderAndFooter, unicode)
self.assertIsInstance(NSPrintFormName, unicode)
self.assertIsInstance(NSPrintJobFeatures, unicode)
self.assertIsInstance(NSPrintManualFeed, unicode)
self.assertIsInstance(NSPrintPagesPerSheet, unicode)
self.assertIsInstance(NSPrintPaperFeed, unicode)
self.assertIsInstance(NSPrintFaxCoverSheetName, unicode)
self.assertIsInstance(NSPrintFaxHighResolution, unicode)
self.assertIsInstance(NSPrintFaxModem, unicode)
self.assertIsInstance(NSPrintFaxReceiverNames, unicode)
self.assertIsInstance(NSPrintFaxReceiverNumbers, unicode)
self.assertIsInstance(NSPrintFaxReturnReceipt, unicode)
self.assertIsInstance(NSPrintFaxSendTime, unicode)
self.assertIsInstance(NSPrintFaxTrimPageEnds, unicode)
self.assertIsInstance(NSPrintFaxUseCoverSheet, unicode)
self.assertIsInstance(NSPrintFaxJob, unicode)
def testMethods(self):
self.assertResultIsBOOL(NSPrintInfo.isHorizontallyCentered)
self.assertResultIsBOOL(NSPrintInfo.isVerticallyCentered)
self.assertArgIsBOOL(NSPrintInfo.setHorizontallyCentered_, 0)
self.assertArgIsBOOL(NSPrintInfo.setVerticallyCentered_, 0)
@min_os_level('10.6')
def testConstants10_6(self):
self.assertIsInstance(NSPrintSelectionOnly, unicode)
self.assertIsInstance(NSPrintJobSavingURL, unicode)
self.assertIsInstance(NSPrintJobSavingFileNameExtensionHidden, unicode)
@min_os_level('10.6')
def testMethods10_6(self):
self.assertResultIsBOOL(NSPrintInfo.isSelectionOnly)
self.assertArgIsBOOL(NSPrintInfo.setSelectionOnly_, 0)
if __name__ == "__main__":
main()
|
{
"content_hash": "f8a11e017222f548e0d34b8c302a2d2a",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 79,
"avg_line_length": 46.12643678160919,
"alnum_prop": 0.7530525791178669,
"repo_name": "Khan/pyobjc-framework-Cocoa",
"id": "c9999fd144635b40828563032abfd1669a87da33",
"size": "4014",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "PyObjCTest/test_nsprintinfo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "M",
"bytes": "5481"
},
{
"name": "Objective-C",
"bytes": "213902"
},
{
"name": "Python",
"bytes": "2450939"
}
],
"symlink_target": ""
}
|
__author__ = 'Aaron Hosford'
import unittest
from xcs.bitstrings import BitString
from xcs.scenarios import MUXProblem
class TestMUXProblem(unittest.TestCase):
def setUp(self):
self.scenario = MUXProblem(10)
def test_get_possible_actions(self):
actions = self.scenario.get_possible_actions()
self.assertTrue(len(actions) == 2)
self.assertTrue(True in actions)
self.assertTrue(False in actions)
def test_sense(self):
situation_size = (self.scenario.address_size +
(1 << self.scenario.address_size))
previous = self.scenario.sense()
self.assertIsInstance(previous, BitString)
self.assertTrue(len(previous) == situation_size)
while self.scenario.more():
current = self.scenario.sense()
self.assertIsInstance(current, BitString)
self.assertTrue(len(current) == situation_size)
if current != previous:
break
else:
self.fail("All situations are the same.")
def test_execute(self):
situation = self.scenario.sense()
index = int(situation[:self.scenario.address_size])
value = situation[self.scenario.address_size + index]
self.assertEqual(1, self.scenario.execute(value))
self.assertEqual(0, self.scenario.execute(not value))
def test_more(self):
self.scenario.reset()
for _ in range(self.scenario.initial_training_cycles):
self.scenario.sense()
self.assertTrue(self.scenario.more())
self.scenario.execute(False)
self.assertFalse(self.scenario.more())
def main():
unittest.main()
if __name__ == "__main__":
main()
|
{
"content_hash": "298ef47c23193cc67b5675595b80a43d",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 62,
"avg_line_length": 31.581818181818182,
"alnum_prop": 0.6234887737478411,
"repo_name": "hosford42/xcs",
"id": "ef5b1ebe1816280dbd8a43787049e74637aa7fc9",
"size": "1737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_MUXProblem.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "200294"
}
],
"symlink_target": ""
}
|
import os
import importlib
import random
import logging
from flask.ext.script import Command
logger = logging.getLogger()
def load_from_packages(app):
pass
def load_from_folder(app):
"""
This code looks for any modules or packages in the given
directory, loads them
and then registers a blueprint
- blueprints must be created with the name 'module'
Implemented directory scan
Bulk of the code taken from:
https://github.com/smartboyathome/
Cheshire-Engine/blob/master/ScoringServer/utils.py
"""
blueprints_path = app.config.get('BLUEPRINTS_PATH', 'modules')
path = os.path.join(
app.config.get('PROJECT_ROOT', '..'),
blueprints_path
)
base_module_name = ".".join([app.name, blueprints_path])
dir_list = os.listdir(path)
mods = {}
object_name = app.config.get('BLUEPRINTS_OBJECT_NAME', 'module')
for fname in dir_list:
if not os.path.exists(os.path.join(path, fname, 'DISABLED')) and \
os.path.isdir(os.path.join(path, fname)) and \
os.path.exists(os.path.join(path, fname, '__init__.py')):
# register blueprint object
module_name = ".".join([base_module_name, fname])
mods[fname] = importlib.import_module(module_name)
blueprint = getattr(mods[fname], object_name)
if blueprint.name not in app.blueprints:
app.register_blueprint(blueprint)
else:
blueprint.name += str(random.getrandbits(8))
app.register_blueprint(blueprint)
logger.warning(
"CONFLICT:%s already registered, using %s",
fname,
blueprint.name
)
# register admin
try:
importlib.import_module(".".join([module_name, 'admin']))
except ImportError:
logger.info("%s module does not define admin", fname)
logger.info("%s modules loaded", mods.keys())
def load_blueprint_commands(manager):
app = manager.app
blueprints_path = app.config.get('BLUEPRINTS_PATH', 'modules')
path = os.path.join(
app.config.get('PROJECT_ROOT', '..'),
blueprints_path
)
base_module_name = ".".join([app.name, blueprints_path])
dir_list = os.listdir(path)
mods = {}
for fname in dir_list:
if not os.path.exists(os.path.join(path, fname, 'DISABLED')) and \
os.path.isdir(os.path.join(path, fname)) and \
os.path.exists(os.path.join(path, fname, '__init__.py')):
# register management commands
module_name = ".".join([base_module_name, fname])
try:
mod = importlib.import_module(
".".join([module_name, 'commands'])
)
mods[fname] = mod
for obj_name in dir(mod):
obj = getattr(mod, obj_name)
if obj_name != 'Command' and type(obj) == type and \
issubclass(obj, Command):
name = getattr(obj, 'command_name', obj_name.lower())
if name in manager._commands:
name += str(random.getrandbits(8))
logger.info("registering command %s", name)
manager.add_command(name, obj())
except ImportError:
logger.info("%s module does not define commands", fname)
logger.info("%s management commands loaded", mods.keys())
|
{
"content_hash": "f78de54b469df2fa67280530775985bd",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 77,
"avg_line_length": 37.173469387755105,
"alnum_prop": 0.5492725775459786,
"repo_name": "felipevolpone/quokka",
"id": "6e976ebc0852a94e49b670419c8af0468e0efa77",
"size": "3659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quokka/ext/blueprints.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "104"
},
{
"name": "CSS",
"bytes": "68245"
},
{
"name": "HTML",
"bytes": "169100"
},
{
"name": "JavaScript",
"bytes": "546416"
},
{
"name": "Makefile",
"bytes": "466"
},
{
"name": "Python",
"bytes": "178190"
},
{
"name": "Shell",
"bytes": "9253"
}
],
"symlink_target": ""
}
|
__author__ = 'Marko A. Rodriguez (http://markorodriguez.com)'
import datetime
import calendar
import json
import uuid
import math
from decimal import *
from mock import Mock
from gremlin_python.statics import *
from gremlin_python.structure.graph import Vertex, Edge, Property, VertexProperty, Path
from gremlin_python.structure.io.graphsonV3d0 import GraphSONWriter, GraphSONReader, GraphSONUtil
import gremlin_python.structure.io.graphsonV3d0
from gremlin_python.process.traversal import P, Merge, Barrier, Order, Operator, Direction
from gremlin_python.process.strategies import SubgraphStrategy
from gremlin_python.process.graph_traversal import __
from_ = Direction.OUT
class TestGraphSONReader(object):
graphson_reader = GraphSONReader()
def test_collections(self):
x = self.graphson_reader.read_object(
json.dumps({"@type": "g:List", "@value": [{"@type": "g:Int32", "@value": 1},
{"@type": "g:Int32", "@value": 2},
"3"]}))
assert isinstance(x, list)
assert x[0] == 1
assert x[1] == 2
assert x[2] == "3"
##
x = self.graphson_reader.read_object(
json.dumps({"@type": "g:Set", "@value": [{"@type": "g:Int32", "@value": 1},
{"@type": "g:Int32", "@value": 2},
"3"]}))
# return a set as normal
assert isinstance(x, set)
assert x == set([1, 2, "3"])
x = self.graphson_reader.read_object(
json.dumps({"@type": "g:Set", "@value": [{"@type": "g:Int32", "@value": 1},
{"@type": "g:Int32", "@value": 2},
{"@type": "g:Float", "@value": 2.0},
"3"]}))
# coerce to list here because Java might return numerics of different types which python won't recognize
# see comments of TINKERPOP-1844 for more details
assert isinstance(x, list)
assert x == list([1, 2, 2.0, "3"])
##
x = self.graphson_reader.read_object(
json.dumps({"@type": "g:Map",
"@value": ['a', {"@type": "g:Int32", "@value": 1}, 'b', "marko"]}))
assert isinstance(x, dict)
assert x['a'] == 1
assert x['b'] == "marko"
assert len(x) == 2
# BulkSet gets coerced to a List - both have the same behavior
x = self.graphson_reader.read_object(
json.dumps({"@type": "g:BulkSet",
"@value": ["marko", {"@type": "g:Int64", "@value": 1}, "josh", {"@type": "g:Int64", "@value": 3}]}))
assert isinstance(x, list)
assert len(x) == 4
assert x.count("marko") == 1
assert x.count("josh") == 3
def test_number_input(self):
x = self.graphson_reader.read_object(json.dumps({
"@type": "gx:Byte",
"@value": 1
}))
assert isinstance(x, SingleByte)
assert 1 == x
x = self.graphson_reader.read_object(json.dumps({
"@type": "g:Int32",
"@value": 31
}))
assert isinstance(x, int)
assert 31 == x
##
x = self.graphson_reader.read_object(json.dumps({
"@type": "g:Int64",
"@value": 31
}))
assert isinstance(x, long)
assert long(31) == x
##
x = self.graphson_reader.read_object(json.dumps({
"@type": "g:Float",
"@value": 31.3
}))
assert isinstance(x, float)
assert 31.3 == x
##
x = self.graphson_reader.read_object(json.dumps({
"@type": "g:Double",
"@value": 31.2
}))
assert isinstance(x, float)
assert 31.2 == x
##
x = self.graphson_reader.read_object(json.dumps({
"@type": "g:Double",
"@value": "NaN"
}))
assert isinstance(x, float)
assert math.isnan(x)
##
x = self.graphson_reader.read_object(json.dumps({
"@type": "g:Double",
"@value": "Infinity"
}))
assert isinstance(x, float)
assert math.isinf(x) and x > 0
##
x = self.graphson_reader.read_object(json.dumps({
"@type": "g:Double",
"@value": "-Infinity"
}))
assert isinstance(x, float)
assert math.isinf(x) and x < 0
##
x = self.graphson_reader.read_object(json.dumps({
"@type": "gx:BigDecimal",
"@value": 31.2
}))
assert isinstance(x, Decimal)
assert Decimal(31.2) == x
##
x = self.graphson_reader.read_object(json.dumps({
"@type": "gx:BigDecimal",
"@value": 123456789987654321123456789987654321
}))
assert isinstance(x, Decimal)
assert Decimal('123456789987654321123456789987654321') == x
##
x = self.graphson_reader.read_object(json.dumps({
"@type": "gx:BigDecimal",
"@value": "NaN"
}))
assert isinstance(x, Decimal)
assert math.isnan(x)
##
x = self.graphson_reader.read_object(json.dumps({
"@type": "gx:BigDecimal",
"@value": "Infinity"
}))
assert isinstance(x, Decimal)
assert math.isinf(x) and x > 0
##
x = self.graphson_reader.read_object(json.dumps({
"@type": "gx:BigDecimal",
"@value": "-Infinity"
}))
assert isinstance(x, Decimal)
assert math.isinf(x) and x < 0
##
x = self.graphson_reader.read_object(json.dumps({
"@type": "gx:BigInteger",
"@value": 31
}))
assert isinstance(x, long)
assert 31 == x
##
x = self.graphson_reader.read_object(json.dumps({
"@type": "gx:BigInteger",
"@value": 123456789987654321123456789987654321
}))
assert isinstance(x, long)
assert 123456789987654321123456789987654321 == x
def test_graph(self):
vertex = self.graphson_reader.read_object("""
{"@type":"g:Vertex", "@value":{"id":{"@type":"g:Int32","@value":1},"label":"person","outE":{"created":[{"id":{"@type":"g:Int32","@value":9},"inV":{"@type":"g:Int32","@value":3},"properties":{"weight":{"@type":"g:Double","@value":0.4}}}],"knows":[{"id":{"@type":"g:Int32","@value":7},"inV":{"@type":"g:Int32","@value":2},"properties":{"weight":{"@type":"g:Double","@value":0.5}}},{"id":{"@type":"g:Int32","@value":8},"inV":{"@type":"g:Int32","@value":4},"properties":{"weight":{"@type":"g:Double","@value":1.0}}}]},"properties":{"name":[{"id":{"@type":"g:Int64","@value":0},"value":"marko"}],"age":[{"id":{"@type":"g:Int64","@value":1},"value":{"@type":"g:Int32","@value":29}}]}}}""")
assert isinstance(vertex, Vertex)
assert "person" == vertex.label
assert 1 == vertex.id
assert isinstance(vertex.id, int)
assert vertex == Vertex(1)
##
vertex = self.graphson_reader.read_object("""
{"@type":"g:Vertex", "@value":{"id":{"@type":"g:Float","@value":45.23}}}""")
assert isinstance(vertex, Vertex)
assert 45.23 == vertex.id
assert isinstance(vertex.id, FloatType)
assert "vertex" == vertex.label
assert vertex == Vertex(45.23)
##
vertex_property = self.graphson_reader.read_object("""
{"@type":"g:VertexProperty", "@value":{"id":"anId","label":"aKey","value":true,"vertex":{"@type":"g:Int32","@value":9}}}""")
assert isinstance(vertex_property, VertexProperty)
assert "anId" == vertex_property.id
assert "aKey" == vertex_property.label
assert vertex_property.value
assert vertex_property.vertex == Vertex(9)
##
vertex_property = self.graphson_reader.read_object("""
{"@type":"g:VertexProperty", "@value":{"id":{"@type":"g:Int32","@value":1},"label":"name","value":"marko"}}""")
assert isinstance(vertex_property, VertexProperty)
assert 1 == vertex_property.id
assert "name" == vertex_property.label
assert "marko" == vertex_property.value
assert vertex_property.vertex is None
##
edge = self.graphson_reader.read_object("""
{"@type":"g:Edge", "@value":{"id":{"@type":"g:Int64","@value":17},"label":"knows","inV":"x","outV":"y","inVLabel":"xLab","properties":{"aKey":"aValue","bKey":true}}}""")
# print edge
assert isinstance(edge, Edge)
assert 17 == edge.id
assert "knows" == edge.label
assert edge.inV == Vertex("x", "xLabel")
assert edge.outV == Vertex("y", "vertex")
##
property = self.graphson_reader.read_object("""
{"@type":"g:Property", "@value":{"key":"aKey","value":{"@type":"g:Int64","@value":17},"element":{"@type":"g:Edge","@value":{"id":{"@type":"g:Int64","@value":122},"label":"knows","inV":"x","outV":"y","inVLabel":"xLab"}}}}""")
# print property
assert isinstance(property, Property)
assert "aKey" == property.key
assert 17 == property.value
assert Edge(122, Vertex("x"), "knows", Vertex("y")) == property.element
def test_path(self):
path = self.graphson_reader.read_object(
"""{"@type":"g:Path","@value":{"labels":{"@type":"g:List","@value":[{"@type":"g:Set","@value":["a"]},{"@type":"g:Set","@value":["b","c"]},{"@type":"g:Set","@value":[]}]},"objects":{"@type":"g:List","@value":[{"@type":"g:Vertex","@value":{"id":{"@type":"g:Int32","@value":1},"label":"person","properties":{"name":[{"@type":"g:VertexProperty","@value":{"id":{"@type":"g:Int64","@value":0},"value":"marko","label":"name"}}],"age":[{"@type":"g:VertexProperty","@value":{"id":{"@type":"g:Int64","@value":1},"value":{"@type":"g:Int32","@value":29},"label":"age"}}]}}},{"@type":"g:Vertex","@value":{"id":{"@type":"g:Int32","@value":3},"label":"software","properties":{"name":[{"@type":"g:VertexProperty","@value":{"id":{"@type":"g:Int64","@value":4},"value":"lop","label":"name"}}],"lang":[{"@type":"g:VertexProperty","@value":{"id":{"@type":"g:Int64","@value":5},"value":"java","label":"lang"}}]}}},"lop"]}}}"""
)
assert isinstance(path, Path)
assert "path[v[1], v[3], lop]" == str(path)
assert Vertex(1) == path[0]
assert Vertex(1) == path["a"]
assert "lop" == path[2]
assert 3 == len(path)
def test_custom_mapping(self):
# extended mapping
class X(object):
pass
type_string = "test:Xtype"
override_string = "g:Int64"
serdes = Mock()
reader = GraphSONReader(deserializer_map={type_string: serdes})
assert type_string in reader.deserializers
# base dicts are not modified
assert type_string not in gremlin_python.structure.io.graphsonV3d0._deserializers
x = X()
o = reader.to_object({GraphSONUtil.TYPE_KEY: type_string, GraphSONUtil.VALUE_KEY: x})
serdes.objectify.assert_called_once_with(x, reader)
assert o is serdes.objectify()
# overridden mapping
type_string = "g:Int64"
serdes = Mock()
reader = GraphSONReader(deserializer_map={type_string: serdes, override_string: serdes})
assert gremlin_python.structure.io.graphsonV3d0._deserializers[type_string] is not reader.deserializers[
type_string]
value = 3
o = reader.to_object({GraphSONUtil.TYPE_KEY: type_string, GraphSONUtil.VALUE_KEY: value})
serdes.objectify.assert_called_once_with(value, reader)
assert o is serdes.objectify()
def test_datetime(self):
expected = datetime.datetime(2016, 12, 14, 16, 14, 36, 295000)
pts = calendar.timegm(expected.utctimetuple()) + expected.microsecond / 1e6
ts = int(round(pts * 1000))
dt = self.graphson_reader.read_object(json.dumps({"@type": "g:Date", "@value": ts}))
assert isinstance(dt, datetime.datetime)
# TINKERPOP-1848
assert dt == expected
def test_timestamp(self):
dt = self.graphson_reader.read_object(json.dumps({"@type": "g:Timestamp", "@value": 1481750076295}))
assert isinstance(dt, timestamp)
assert float(dt) == 1481750076.295
def test_duration(self):
d = self.graphson_reader.read_object(json.dumps({"@type": "gx:Duration", "@value": "PT120H"}))
assert isinstance(d, datetime.timedelta)
assert d == datetime.timedelta(hours=120)
def test_uuid(self):
prop = self.graphson_reader.read_object(
json.dumps({'@type': 'g:UUID', '@value': "41d2e28a-20a4-4ab0-b379-d810dede3786"}))
assert isinstance(prop, uuid.UUID)
assert str(prop) == '41d2e28a-20a4-4ab0-b379-d810dede3786'
def test_metrics(self):
prop = self.graphson_reader.read_object(
json.dumps([{'@type': 'g:TraversalMetrics', '@value': {'dur': 1.468594, 'metrics': [
{'@type': 'g:Metrics', '@value': {'dur': 1.380957, 'counts': {}, 'name': 'GraphStep(__.V())', 'annotations': {'percentDur': 94.03259171697556}, 'id': '4.0.0()'}},
{'@type': 'g:Metrics', '@value': {'dur': 0.087637, 'counts': {}, 'name': 'ReferenceElementStep', 'annotations': {'percentDur': 5.967408283024444}, 'id': '3.0.0()'}}
]}}]))
assert isinstance(prop, list)
assert prop == [{'dur': 1.468594, 'metrics': [
{'dur': 1.380957, 'counts': {}, 'name': 'GraphStep(__.V())', 'annotations': {'percentDur': 94.03259171697556}, 'id': '4.0.0()'},
{'dur': 0.087637, 'counts': {}, 'name': 'ReferenceElementStep', 'annotations': {'percentDur': 5.967408283024444}, 'id': '3.0.0()'}
]}]
def test_bytebuffer(self):
bb = self.graphson_reader.read_object(
json.dumps({"@type": "gx:ByteBuffer", "@value": "c29tZSBieXRlcyBmb3IgeW91"}))
assert isinstance(bb, ByteBufferType)
assert ByteBufferType("c29tZSBieXRlcyBmb3IgeW91", "utf8") == bb
def test_char(self):
c = self.graphson_reader.read_object(json.dumps({"@type": "gx:Char", "@value": "L"}))
assert isinstance(c, SingleChar)
assert chr(76) == c
def test_null(self):
c = self.graphson_reader.read_object(json.dumps(None))
assert c is None
class TestGraphSONWriter(object):
graphson_writer = GraphSONWriter()
graphson_reader = GraphSONReader()
def test_collections(self):
assert {"@type": "g:List", "@value": [{"@type": "g:Int32", "@value": 1},
{"@type": "g:Int32", "@value": 2},
{"@type": "g:Int32", "@value": 3}]} == json.loads(
self.graphson_writer.write_object([1, 2, 3]))
assert {"@type": "g:Set", "@value": [{"@type": "g:Int32", "@value": 1},
{"@type": "g:Int32", "@value": 2},
{"@type": "g:Int32", "@value": 3}]} == json.loads(
self.graphson_writer.write_object(set([1, 2, 3, 3])))
assert {"@type": "g:Map",
"@value": ['a', {"@type": "g:Int32", "@value": 1}]} == json.loads(
self.graphson_writer.write_object({'a': 1}))
def test_numbers(self):
assert {"@type": "gx:Byte", "@value": 1} == json.loads(self.graphson_writer.write_object(int.__new__(SingleByte, 1)))
assert {"@type": "g:Int64", "@value": 2} == json.loads(self.graphson_writer.write_object(long(2)))
assert {"@type": "g:Int64", "@value": 851401972585122} == json.loads(self.graphson_writer.write_object(long(851401972585122)))
assert {"@type": "g:Int64", "@value": -2} == json.loads(self.graphson_writer.write_object(long(-2)))
assert {"@type": "g:Int64", "@value": -851401972585122} == json.loads(self.graphson_writer.write_object(long(-851401972585122)))
assert {"@type": "g:Int32", "@value": 1} == json.loads(self.graphson_writer.write_object(1))
assert {"@type": "g:Int32", "@value": -1} == json.loads(self.graphson_writer.write_object(-1))
assert {"@type": "g:Int64", "@value": 851401972585122} == json.loads(self.graphson_writer.write_object(851401972585122))
assert {"@type": "g:Double", "@value": 3.2} == json.loads(self.graphson_writer.write_object(3.2))
assert {"@type": "g:Double", "@value": "NaN"} == json.loads(self.graphson_writer.write_object(float('nan')))
assert {"@type": "g:Double", "@value": "Infinity"} == json.loads(self.graphson_writer.write_object(float('inf')))
assert {"@type": "g:Double", "@value": "-Infinity"} == json.loads(self.graphson_writer.write_object(float('-inf')))
assert {"@type": "gx:BigDecimal", "@value": "123456789987654321123456789987654321"} == json.loads(self.graphson_writer.write_object(Decimal('123456789987654321123456789987654321')))
assert {"@type": "gx:BigDecimal", "@value": "NaN"} == json.loads(self.graphson_writer.write_object(Decimal('nan')))
assert {"@type": "gx:BigDecimal", "@value": "Infinity"} == json.loads(self.graphson_writer.write_object(Decimal('inf')))
assert {"@type": "gx:BigDecimal", "@value": "-Infinity"} == json.loads(self.graphson_writer.write_object(Decimal('-inf')))
assert {"@type": "gx:BigInteger", "@value": "123456789987654321123456789987654321"} == json.loads(self.graphson_writer.write_object(long(123456789987654321123456789987654321)))
assert {"@type": "gx:BigInteger", "@value": "123456789987654321123456789987654321"} == json.loads(self.graphson_writer.write_object(123456789987654321123456789987654321))
assert """true""" == self.graphson_writer.write_object(True)
def test_enum(self):
assert {"@type": "g:Merge", "@value": "onMatch"} == json.loads(self.graphson_writer.write_object(Merge.on_match))
assert {"@type": "g:Order", "@value": "shuffle"} == json.loads(self.graphson_writer.write_object(Order.shuffle))
assert {"@type": "g:Barrier", "@value": "normSack"} == json.loads(self.graphson_writer.write_object(Barrier.norm_sack))
assert {"@type": "g:Operator", "@value": "sum"} == json.loads(self.graphson_writer.write_object(Operator.sum_))
assert {"@type": "g:Operator", "@value": "sumLong"} == json.loads(self.graphson_writer.write_object(Operator.sum_long))
assert {"@type": "g:Direction", "@value": "OUT"} == json.loads(self.graphson_writer.write_object(Direction.OUT))
assert {"@type": "g:Direction", "@value": "OUT"} == json.loads(self.graphson_writer.write_object(from_))
def test_P(self):
result = {'@type': 'g:P',
'@value': {
'predicate': 'and',
'value': [{
'@type': 'g:P',
'@value': {
'predicate': 'or',
'value': [{
'@type': 'g:P',
'@value': {'predicate': 'lt', 'value': 'b'}
},
{'@type': 'g:P', '@value': {'predicate': 'gt', 'value': 'c'}}
]
}
},
{'@type': 'g:P', '@value': {'predicate': 'neq', 'value': 'd'}}]}}
assert result == json.loads(
self.graphson_writer.write_object(P.lt("b").or_(P.gt("c")).and_(P.neq("d"))))
result = {'@type': 'g:P', '@value': {'predicate': 'within', 'value': {'@type': 'g:List', '@value': [
{"@type": "g:Int32", "@value": 1}, {"@type": "g:Int32", "@value": 2}]}}}
assert result == json.loads(self.graphson_writer.write_object(P.within([1, 2])))
assert result == json.loads(self.graphson_writer.write_object(P.within({1, 2})))
assert result == json.loads(self.graphson_writer.write_object(P.within(1, 2)))
result = {'@type': 'g:P', '@value': {'predicate': 'within', 'value': {'@type': 'g:List', '@value': [
{"@type": "g:Int32", "@value": 1}]}}}
assert result == json.loads(self.graphson_writer.write_object(P.within([1])))
assert result == json.loads(self.graphson_writer.write_object(P.within({1})))
assert result == json.loads(self.graphson_writer.write_object(P.within(1)))
def test_strategies(self):
# we have a proxy model for now given that we don't want to have to have g:XXX all registered on the
# Gremlin traversal machine (yet)
assert {"@type": "g:SubgraphStrategy", "@value": {}} == json.loads(
self.graphson_writer.write_object(SubgraphStrategy))
assert {"@type": "g:SubgraphStrategy", "@value": {
"vertices": {"@type": "g:Bytecode", "@value": {"step": [["has", "name", "marko"]]}}}} == json.loads(
self.graphson_writer.write_object(SubgraphStrategy(vertices=__.has("name", "marko"))))
def test_graph(self):
# TODO: this assert is not compatible with python 3 and now that we test with both 2 and 3 it fails
assert {"@type": "g:Vertex",
"@value": {"id": {"@type": "g:Int64", "@value": 12}, "label": "person"}} == json.loads(
self.graphson_writer.write_object(Vertex(long(12), "person")))
assert {"@type": "g:Edge", "@value": {"id": {"@type": "g:Int32", "@value": 7},
"outV": {"@type": "g:Int32", "@value": 0},
"outVLabel": "person",
"label": "knows",
"inV": {"@type": "g:Int32", "@value": 1},
"inVLabel": "dog"}} == json.loads(
self.graphson_writer.write_object(Edge(7, Vertex(0, "person"), "knows", Vertex(1, "dog"))))
assert {"@type": "g:VertexProperty", "@value": {"id": "blah", "label": "keyA", "value": True,
"vertex": "stephen"}} == json.loads(
self.graphson_writer.write_object(VertexProperty("blah", "keyA", True, Vertex("stephen"))))
assert {"@type": "g:Property",
"@value": {"key": "name", "value": "marko", "element": {"@type": "g:VertexProperty",
"@value": {
"vertex": "vertexId",
"id": {"@type": "g:Int32", "@value": 1234},
"label": "aKey"}}}} == json.loads(
self.graphson_writer.write_object(
Property("name", "marko", VertexProperty(1234, "aKey", 21345, Vertex("vertexId")))))
vertex = self.graphson_reader.read_object(self.graphson_writer.write_object(Vertex(1, "person")))
assert 1 == vertex.id
assert "person" == vertex.label
edge = self.graphson_reader.read_object(
self.graphson_writer.write_object(Edge(3, Vertex(1, "person"), "knows", Vertex(2, "dog"))))
assert "knows" == edge.label
assert 3 == edge.id
assert 1 == edge.outV.id
assert 2 == edge.inV.id
vertex_property = self.graphson_reader.read_object(
self.graphson_writer.write_object(VertexProperty(1, "age", 32, Vertex(1))))
assert 1 == vertex_property.id
assert "age" == vertex_property.key
assert 32 == vertex_property.value
property = self.graphson_reader.read_object(self.graphson_writer.write_object(Property("age", 32.2, Edge(1, Vertex(2), "knows", Vertex(3)))))
assert "age" == property.key
assert 32.2 == property.value
def test_custom_mapping(self):
# extended mapping
class X(object):
pass
serdes = Mock()
writer = GraphSONWriter(serializer_map={X: serdes})
assert X in writer.serializers
# base dicts are not modified
assert X not in gremlin_python.structure.io.graphsonV3d0._serializers
obj = X()
d = writer.to_dict(obj)
serdes.dictify.assert_called_once_with(obj, writer)
assert d is serdes.dictify()
# overridden mapping
serdes = Mock()
writer = GraphSONWriter(serializer_map={int: serdes})
assert gremlin_python.structure.io.graphsonV3d0._serializers[int] is not writer.serializers[int]
value = 3
d = writer.to_dict(value)
serdes.dictify.assert_called_once_with(value, writer)
assert d is serdes.dictify()
def test_write_long(self):
mapping = self.graphson_writer.to_dict(1)
assert mapping['@type'] == 'g:Int32'
assert mapping['@value'] == 1
mapping = self.graphson_writer.to_dict(long(1))
assert mapping['@type'] == 'g:Int64'
assert mapping['@value'] == 1
def test_datetime(self):
expected = json.dumps({"@type": "g:Date", "@value": 1481750076295}, separators=(',', ':'))
dt = datetime.datetime.utcfromtimestamp(1481750076295 / 1000.0)
output = self.graphson_writer.write_object(dt)
assert expected == output
def test_timestamp(self):
expected = json.dumps({"@type": "g:Timestamp", "@value": 1481750076295}, separators=(',', ':'))
ts = timestamp(1481750076295 / 1000.0)
output = self.graphson_writer.write_object(ts)
assert expected == output
def test_duration(self):
expected = json.dumps({"@type": "gx:Duration", "@value": "P5D"}, separators=(',', ':'))
d = datetime.timedelta(hours=120)
output = self.graphson_writer.write_object(d)
assert expected == output
def test_uuid(self):
expected = json.dumps({'@type': 'g:UUID', '@value': "41d2e28a-20a4-4ab0-b379-d810dede3786"}, separators=(',', ':'))
prop = uuid.UUID("41d2e28a-20a4-4ab0-b379-d810dede3786")
output = self.graphson_writer.write_object(prop)
assert expected == output
def test_bytebuffer(self):
expected = json.dumps({'@type': 'gx:ByteBuffer', '@value': 'c29tZSBieXRlcyBmb3IgeW91'}, separators=(',', ':'))
bb = ByteBufferType("c29tZSBieXRlcyBmb3IgeW91", "utf8")
output = self.graphson_writer.write_object(bb)
assert expected == output
def test_char(self):
expected = json.dumps({'@type': 'gx:Char', '@value': 'L'}, separators=(',', ':'))
c = str.__new__(SingleChar, chr(76))
output = self.graphson_writer.write_object(c)
assert expected == output
|
{
"content_hash": "2eb7ebbc6e29f0b7dc96b7e2ecffa9f3",
"timestamp": "",
"source": "github",
"line_count": 530,
"max_line_length": 917,
"avg_line_length": 50.9,
"alnum_prop": 0.5421655484301442,
"repo_name": "apache/tinkerpop",
"id": "c80fa4ab681632e5978a134b875055c734940e1b",
"size": "27767",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gremlin-python/src/main/python/tests/structure/io/test_graphsonV3d0.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "59230"
},
{
"name": "Awk",
"bytes": "2335"
},
{
"name": "Batchfile",
"bytes": "3976"
},
{
"name": "C#",
"bytes": "1745461"
},
{
"name": "Dockerfile",
"bytes": "8353"
},
{
"name": "Gherkin",
"bytes": "606034"
},
{
"name": "Go",
"bytes": "776105"
},
{
"name": "Groovy",
"bytes": "337658"
},
{
"name": "Java",
"bytes": "11495240"
},
{
"name": "JavaScript",
"bytes": "596328"
},
{
"name": "Python",
"bytes": "685711"
},
{
"name": "Shell",
"bytes": "71980"
},
{
"name": "TypeScript",
"bytes": "154521"
},
{
"name": "XSLT",
"bytes": "2205"
}
],
"symlink_target": ""
}
|
import os
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'wp88fi$)5dbve^!(@-k2%5tqep+16uoz078h*sttghy2%uid7c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['localhost','127.0.0.1']
ADMINS = (
('Admin User', 'admin@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'geoq', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'geoq',
'PASSWORD': 'geoq',
'HOST': 'localhost', # Empty for local through domain sockets or '127.0.0.1' for local through TCP.
'PORT': '5432', # Set to empty string for default.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = '/usr/local/src/geoq'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/images/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_URL_FOLDER = '' # Can be set to something like 'geoq-test/' if the app is not run at root level
STATIC_ROOT = '{0}{1}'.format('/Users/srjones/www/static/', STATIC_URL_FOLDER)
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '{0}{1}'.format('/static/', STATIC_URL_FOLDER)
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(SITE_ROOT, 'static'),
# TODO: Should we add this static location back in?
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
#'django.contrib.staticfiles.finders.DefaultStorageFinder',
'compressor.finders.CompressorFinder',
)
#Change back to True after finishing development to verify it still works
COMPRESS_ENABLED = False
COMPRESS_PRECOMPILERS = (
('text/less', 'lessc {infile} {outfile}'),
)
LEAFLET_CSS = [
STATIC_URL + 'leaflet/leaflet-draw/leaflet.draw.css',
os.path.join(STATIC_ROOT, '/static/leaflet/leaflet-draw/leaflet.draw.css')
]
LEAFLET_CONFIG = {
'RESET_VIEW' : False,
'MAX_ZOOM' : 18,
'PLUGINS': {
'proj4js': {
'css': [],
'js': [STATIC_URL + 'leaflet/proj4-src.js', STATIC_URL + 'leaflet/proj4defs.js', STATIC_URL + 'leaflet/proj4leaflet.js'],
'repo': 'https://github.com/proj4js'
},
'draw': {
'css': LEAFLET_CSS,
'js': STATIC_URL + 'leaflet/leaflet-draw/leaflet.draw-src.js',
'repo': 'https://github.com/Leaflet/Leaflet.draw'
},
'esri': {
'css': [],
'js': [STATIC_URL + 'leaflet/esri-leaflet-src.js'],
'repo': 'https://github.com/Esri/esri-leaflet'
},
'esriCluster': {
'css': [STATIC_URL + 'leaflet/MarkerCluster.css'],
'js': [STATIC_URL + 'leaflet/ClusteredFeatureLayer.js', STATIC_URL + 'leaflet/leaflet.markercluster.js'],
'repo': 'https://github.com/Esri/esri-leaflet'
},
'MakiMarkers': {
'css': [],
'js': [STATIC_URL + 'leaflet/Leaflet.MakiMarkers.js'],
'repo': 'https://github.com/jseppi/Leaflet.MakiMarkers'
},
'MediaQ': {
'css': [],
'js': [STATIC_URL + 'leaflet/Leaflet.MediaQ.js'],
'repo': 'https://github.com/stephenrjones/Leaflet.MediaQ'
},
'AutoResizeSVG': {
'css': [],
'js': [STATIC_URL + 'leaflet/marker-resize-svg.js'],
'repo': 'https://github.com/john-kilgo/L.Marker.AutoResizeSVG'
},
'NWSIcons': {
'css': [],
'js': [STATIC_URL + 'leaflet/nws-leaflet.js'],
'repo': 'https://github.com/john-kilgo/L.Marker.NWS'
},
'OpenSensorHub': {
'css': [],
'js': [STATIC_URL + 'leaflet/Leaflet.SOS.min.js'],
'repo': 'https://github.com/opensensorhub/osh-js'
},
'WCS': {
'css': [],
'js': [STATIC_URL + 'leaflet/NonTiledLayer.WCS.js'],
'repo': 'https://github.com/stuartmatthews/Leaflet.NonTiledLayer.WCS'
},
'WMSHeader': {
'css': [],
'js': [STATIC_URL + 'leaflet/leaflet-plugins/layer/tile/leaflet-wms-header.js'],
'repo': 'https://https://github.com/ticinum-aerospace/leaflet-wms-header'
}
}
}
# List of callables that know how to import templates from various sources
# Location of template files
#TEMPLATE_DIRS = (
# os.path.join(SITE_ROOT, 'templates'),
# SITE_ROOT,
#)
#.
#TEMPLATE_LOADERS = (
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# #'django.template.loaders.eggs.Loader',
#)
#
#TEMPLATE_CONTEXT_PROCESSORS = (
## 'django.contrib.auth.context_processors.auth',
# 'django.core.context_processors.request',
# 'django.core.context_processors.static',
# 'django.contrib.messages.context_processors.messages',
# 'geoq.core.contextprocessors.app_settings',
#)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [ os.path.join(SITE_ROOT, 'templates'),
SITE_ROOT ],
'OPTIONS': {
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
#'django.template.loaders.eggs.Loader'
],
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'geoq.core.contextprocessors.app_settings'
]
}
}
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.gis',
'django.contrib.humanize',
'django_select2',
'reversion',
'easy_thumbnails',
'userena',
'guardian',
'compressor',
'geoexplorer',
'bootstrap_toolkit',
'leaflet',
'jsonfield',
'crispy_forms',
'django_extensions',
'debug_toolbar',
#'httpproxy',
'bootstrap3',
#'feedgen',
'geoq.feedback.apps.FeedbackConfig',
'geoq.accounts.apps.AccountsConfig',
'geoq.locations.apps.LocationsConfig',
'geoq.mage.apps.MageConfig',
'geoq.mgrs.apps.MgrsConfig',
'geoq.proxy.apps.ProxyConfig',
'geoq.training.apps.TrainingConfig',
'geoq.core.apps.CoreConfig',
'geoq.maps.apps.MapsConfig',
'geoq.workflow.apps.WorkflowConfig',
'geoq.ontology.apps.OntologyConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware'
]
# removed middleware
# 'geoq.core.middleware.UserPermsMiddleware',
# 'geoq.core.middleware.Http403Middleware',
# 'geoq.core.middleware.UpdateLastActivityMiddleware',
# auth setup
AUTHENTICATION_BACKENDS = (
'userena.backends.UserenaAuthenticationBackend',
'guardian.backends.ObjectPermissionBackend',
'django.contrib.auth.backends.ModelBackend', # default
)
SITE_ID = 1
ANONYMOUS_USER_NAME = "ANONYMOUS_USER_NAME"
AUTH_PROFILE_MODULE = 'accounts.UserProfile'
LOGIN_REDIRECT_URL = '/accounts/%(username)s/' #'/geoq/' #
LOGIN_URL = '/accounts/signin/'
LOGOUT_URL = '/geoq'
EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
USERENA_ACTIVATION_DAYS = 3
USERENA_MUGSHOT_DEFAULT = 'identicon'
USERENA_HIDE_EMAIL = True
USERENA_HTML_EMAIL = False
ROOT_URLCONF = 'geoq.urls'
WSGI_APPLICATION = 'geoq.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING_CONFIG = None
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# take out later
REST_FRAMEWORK = {
'UNAUTHENTICATED_USER': None,
}
# Set default login location
#LOGIN_REDIRECT_URL = '/'
# Gamification variables
GAMIFICATION_SERVER = ''
GAMIFICATION_PROJECT = 'geoq'
#GeoServer
GEOSERVER_WFS_JOB_LAYER = None
# For Django Debug Toolbar - need to set this to resolve some errors
DEBUG_TOOLBAR_PATCH_SETTINGS = False
# Able to vary what we call workcells
GEOQ_LEXICON = {
'WORKCELL_NAME': 'Target'
}
# Bootstrap variables to work with django-bootstrap-toolkit
# Comment these out to use cdnjs.cloudflare.com versions of Bootstrap
BOOTSTRAP_BASE_URL = STATIC_URL
BOOTSTRAP_JS_BASE_URL = BOOTSTRAP_BASE_URL + 'bootstrap/js/'
BOOTSTRAP_JS_URL = BOOTSTRAP_JS_BASE_URL + 'bootstrap.min.js'
BOOTSTRAP_CSS_BASE_URL = BOOTSTRAP_BASE_URL + 'bootstrap/css/'
BOOTSTRAP_CSS_URL = BOOTSTRAP_CSS_BASE_URL + 'bootstrap.css'
#Time to check if users online (in milliseconds)
ONLINE_TIME = 10 * 60 * 1000
########## Select2 Settings
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'default-cache',
},
'select2': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'select2-cache',
}
}
SELECT2_CACHE_BACKEND = 'select2'
########## MAGE Settings
MAGE_USERNAME = 'username'
MAGE_UID = '12345'
MAGE_PASSWORD = 'password'
MAGE_URL = 'https://mage.server.com/api'
########## End MAGE Settings
########## DEBUG TOOLBAR CONFIGURATION
DEBUG_TOOLBAR_PATCH_SETTINGS = False
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
]
INTERNAL_IPS = ['127.0.0.1']
########## COMPRESSION CONFIGURATION
# COMPRESS_ENABLED = True
# Default : the opposite of DEBUG
# see https://github.com/jezdez/django_compressor/issues/226
COMPRESS_OUTPUT_DIR = 'STATIC_CACHE'
# See: http://django_compressor.readthedocs.org/en/latest/settings/#django.conf.settings.COMPRESS_OFFLINE
COMPRESS_OFFLINE = False
# See: http://django_compressor.readthedocs.org/en/latest/settings/#django.conf.settings.COMPRESS_STORAGE
COMPRESS_STORAGE = 'compressor.storage.CompressorFileStorage'
# See: http://django_compressor.readthedocs.org/en/latest/settings/#django.conf.settings.COMPRESS_CSS_FILTERS
COMPRESS_CSS_FILTERS = [
'compressor.filters.cssmin.CSSMinFilter',
]
# See: http://django_compressor.readthedocs.org/en/latest/settings/#django.conf.settings.COMPRESS_JS_FILTERS
COMPRESS_JS_FILTERS = [
'compressor.filters.jsmin.JSMinFilter',
]
COMPRESS_DEBUG_TOGGLE = 'nocompress'
COMPRESS_JS_COMPRESSOR = 'compressor.js.JsCompressor'
COMPRESS_CSS_COMPRESSOR = 'compressor.css.CssCompressor'
COMPRESS_PARSER = 'compressor.parser.AutoSelectParser'
COMPRESS_URL = STATIC_URL
COMPRESS_ROOT = STATIC_ROOT
COMPRESS_VERBOSE = False
COMPRESS_CACHEABLE_PRECOMPILERS = (
'text/coffeescript',
)
########## END COMPRESSION CONFIGURATION
########## BOOTSTRAP 3 CONFIGURATION
# Default settings
BOOTSTRAP3 = {
# The URL to the jQuery JavaScript file
'jquery_url': STATIC_URL + 'jquery/jquery.min.js',
# The Bootstrap base URL
'base_url': STATIC_URL + 'bootstrap/',
# The complete URL to the Bootstrap CSS file (None means derive it from base_url)
'css_url': STATIC_URL + 'bootstrap/css/bootstrap.css',
# The complete URL to the Bootstrap CSS file (None means no theme)
'theme_url': STATIC_URL + 'bootstrap/css/bootstrap-theme.css',
# The complete URL to the Bootstrap JavaScript file (None means derive it from base_url)
'javascript_url': STATIC_URL + 'bootstrap/js/bootstrap.min.js',
# Put JavaScript in the HEAD section of the HTML document (only relevant if you use bootstrap3.html)
'javascript_in_head': False,
# Include jQuery with Bootstrap JavaScript (affects django-bootstrap3 template tags)
'include_jquery': False,
# Label class to use in horizontal forms
'horizontal_label_class': 'col-md-3',
# Field class to use in horizontal forms
'horizontal_field_class': 'col-md-9',
# Set HTML required attribute on required fields, for Django <= 1.8 only
'set_required': True,
# Set HTML disabled attribute on disabled fields, for Django <= 1.8 only
'set_disabled': False,
# Set placeholder attributes to label if no placeholder is provided
'set_placeholder': True,
# Class to indicate required (better to set this in your Django form)
'required_css_class': '',
# Class to indicate error (better to set this in your Django form)
'error_css_class': 'has-error',
# Class to indicate success, meaning the field has valid input (better to set this in your Django form)
'success_css_class': 'has-success',
# Renderers (only set these if you have studied the source and understand the inner workings)
'formset_renderers':{
'default': 'bootstrap3.renderers.FormsetRenderer',
},
'form_renderers': {
'default': 'bootstrap3.renderers.FormRenderer',
},
'field_renderers': {
'default': 'bootstrap3.renderers.FieldRenderer',
'inline': 'bootstrap3.renderers.InlineFieldRenderer',
},
}
########## END BOOTSTRAP 3 CONFIGURATION
# Special case
IMAGE_TRACKING = False
# For KML uploads
KML_REPOSITORY_ROOT = 'kml/'
# initialize apps
#django.setup()
# Override production settings with local settings if they exist
#try:
# from local_settings import *
#
#except ImportError, e:
# # local_settings does not exist
# pass
|
{
"content_hash": "c4c78a287c8ddac86584ec8ae6754162",
"timestamp": "",
"source": "github",
"line_count": 543,
"max_line_length": 133,
"avg_line_length": 31.83977900552486,
"alnum_prop": 0.6662039447047255,
"repo_name": "ngageoint/geoq",
"id": "5ea319112cfa9a3f5127bc3f141f5c16eb5a8799",
"size": "17520",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "geoq/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "167032"
},
{
"name": "Dockerfile",
"bytes": "834"
},
{
"name": "HTML",
"bytes": "311431"
},
{
"name": "JavaScript",
"bytes": "6919093"
},
{
"name": "Less",
"bytes": "16412"
},
{
"name": "Python",
"bytes": "575801"
},
{
"name": "Shell",
"bytes": "2484"
}
],
"symlink_target": ""
}
|
from pysnmp.entity import engine, config
from pysnmp.carrier.asynsock.dgram import udp6
from pysnmp.entity.rfc3413 import cmdgen
# Create SNMP engine instance
snmpEngine = engine.SnmpEngine()
#
# SNMPv3/USM setup
#
# user: usr-md5-des, auth: MD5, priv NONE
config.addV3User(
snmpEngine, 'usr-md5-none',
config.usmHMACMD5AuthProtocol, 'authkey1'
)
config.addTargetParams(snmpEngine, 'my-creds', 'usr-md5-none', 'authNoPriv')
#
# Setup transport endpoint and bind it with security settings yielding
# a target name
#
# UDP/IPv6
config.addTransport(
snmpEngine,
udp6.domainName,
udp6.Udp6SocketTransport().openClientMode()
)
config.addTargetAddr(
snmpEngine, 'my-router',
udp6.domainName, ('::1', 161),
'my-creds'
)
# Error/response receiver
def cbFun(snmpEngine, sendRequestHandle, errorIndication,
errorStatus, errorIndex, varBindTable, cbCtx):
if errorIndication:
print(errorIndication)
return
if errorStatus:
print('%s at %s' % (
errorStatus.prettyPrint(),
errorIndex and varBindTable[-1][int(errorIndex)-1][0] or '?'
)
)
return # stop on error
for varBindRow in varBindTable:
for oid, val in varBindRow:
print('%s = %s' % (oid.prettyPrint(), val.prettyPrint()))
return True # signal dispatcher to continue
# Prepare initial request to be sent
cmdgen.NextCommandGenerator().sendVarBinds(
snmpEngine,
'my-router',
None, '', # contextEngineId, contextName
[ ((1,3,6,1,2,1,1), None),
((1,3,6,1,4,1,1), None) ],
cbFun
)
# Run I/O dispatcher which would send pending queries and process responses
snmpEngine.transportDispatcher.runDispatcher()
|
{
"content_hash": "0b8e1cd1ea3dedb7259c6070c7ad6961",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 76,
"avg_line_length": 26.615384615384617,
"alnum_prop": 0.6757225433526012,
"repo_name": "ww9rivers/pysnmp",
"id": "2a9e93a12015c03c4d1249829d6c00f5a0cd45a9",
"size": "2196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/v3arch/manager/cmdgen/getnext-v3-over-ipv6.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1004861"
}
],
"symlink_target": ""
}
|
"""A library of tasks.
This interface is intended to implement a wide variety of navigation
tasks. See go/navigation_tasks for a list.
"""
import abc
import collections
import math
import threading
import networkx as nx
import numpy as np
import tensorflow as tf
#from pyglib import logging
#import gin
from envs import task_env
from envs import util as envs_util
# Utility functions.
def _pad_or_clip_array(np_arr, arr_len, is_front_clip=True, output_mask=False):
"""Make np_arr array to have length arr_len.
If the array is shorter than arr_len, then it is padded from the front with
zeros. If it is longer, then it is clipped either from the back or from the
front. Only the first dimension is modified.
Args:
np_arr: numpy array.
arr_len: integer scalar.
is_front_clip: a boolean. If true then clipping is done in the front,
otherwise in the back.
output_mask: If True, outputs a numpy array of rank 1 which represents
a mask of which values have been added (0 - added, 1 - actual output).
Returns:
A numpy array and the size of padding (as a python int32). This size is
negative is the array is clipped.
"""
shape = list(np_arr.shape)
pad_size = arr_len - shape[0]
padded_or_clipped = None
if pad_size < 0:
if is_front_clip:
padded_or_clipped = np_arr[-pad_size:, :]
else:
padded_or_clipped = np_arr[:arr_len, :]
elif pad_size > 0:
padding = np.zeros([pad_size] + shape[1:], dtype=np_arr.dtype)
padded_or_clipped = np.concatenate([np_arr, padding], axis=0)
else:
padded_or_clipped = np_arr
if output_mask:
mask = np.ones((arr_len,), dtype=np.int)
if pad_size > 0:
mask[-pad_size:] = 0
return padded_or_clipped, pad_size, mask
else:
return padded_or_clipped, pad_size
def classification_loss(truth, predicted, weights=None, is_one_hot=True):
"""A cross entropy loss.
Computes the mean of cross entropy losses for all pairs of true labels and
predictions. It wraps around a tf implementation of the cross entropy loss
with additional reformating of the inputs. If the truth and predicted are
n-rank Tensors with n > 2, then these are reshaped to 2-rank Tensors. It
allows for truth to be specified as one hot vector or class indices. Finally,
a weight can be specified for each element in truth and predicted.
Args:
truth: an n-rank or (n-1)-rank Tensor containing labels. If is_one_hot is
True, then n-rank Tensor is expected, otherwise (n-1) rank one.
predicted: an n-rank float Tensor containing prediction probabilities.
weights: an (n-1)-rank float Tensor of weights
is_one_hot: a boolean.
Returns:
A TF float scalar.
"""
num_labels = predicted.get_shape().as_list()[-1]
if not is_one_hot:
truth = tf.reshape(truth, [-1])
truth = tf.one_hot(
truth, depth=num_labels, on_value=1.0, off_value=0.0, axis=-1)
else:
truth = tf.reshape(truth, [-1, num_labels])
predicted = tf.reshape(predicted, [-1, num_labels])
losses = tf.nn.softmax_cross_entropy_with_logits(
labels=truth, logits=predicted)
if weights is not None:
losses = tf.boolean_mask(losses,
tf.cast(tf.reshape(weights, [-1]), dtype=tf.bool))
return tf.reduce_mean(losses)
class UnrolledTaskIOConfig(object):
"""Configuration of task inputs and outputs.
A task can have multiple inputs, which define the context, and a task query
which defines what is to be executed in this context. The desired execution
is encoded in an output. The config defines the shapes of the inputs, the
query and the outputs.
"""
def __init__(self, inputs, output, query=None):
"""Constructs a Task input/output config.
Args:
inputs: a list of tuples. Each tuple represents the configuration of an
input, with first element being the type (a string value) and the second
element the shape.
output: a tuple representing the configuration of the output.
query: a tuple representing the configuration of the query. If no query,
then None.
"""
# A configuration of a single input, output or query. Consists of the type,
# which can be one of the three specified above, and a shape. The shape must
# be consistent with the type, e.g. if type == 'image', then shape is a 3
# valued list.
io_config = collections.namedtuple('IOConfig', ['type', 'shape'])
def assert_config(config):
if not isinstance(config, tuple):
raise ValueError('config must be a tuple. Received {}'.format(
type(config)))
if len(config) != 2:
raise ValueError('config must have 2 elements, has %d' % len(config))
if not isinstance(config[0], tf.DType):
raise ValueError('First element of config must be a tf.DType.')
if not isinstance(config[1], list):
raise ValueError('Second element of config must be a list.')
assert isinstance(inputs, collections.OrderedDict)
for modality_type in inputs:
assert_config(inputs[modality_type])
self._inputs = collections.OrderedDict(
[(k, io_config(*value)) for k, value in inputs.iteritems()])
if query is not None:
assert_config(query)
self._query = io_config(*query)
else:
self._query = None
assert_config(output)
self._output = io_config(*output)
@property
def inputs(self):
return self._inputs
@property
def output(self):
return self._output
@property
def query(self):
return self._query
class UnrolledTask(object):
"""An interface for a Task which can be unrolled during training.
Each example is called episode and consists of inputs and target output, where
the output can be considered as desired unrolled sequence of actions for the
inputs. For the specified tasks, these action sequences are to be
unambiguously definable.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, config):
assert isinstance(config, UnrolledTaskIOConfig)
self._config = config
# A dict of bookkeeping variables.
self.info = {}
# Tensorflow input is multithreaded and this lock is needed to prevent
# race condition in the environment. Without the lock, non-thread safe
# environments crash.
self._lock = threading.Lock()
@property
def config(self):
return self._config
@abc.abstractmethod
def episode(self):
"""Returns data needed to train and test a single episode.
Each episode consists of inputs, which define the context of the task, a
query which defines the task, and a target output, which defines a
sequence of actions to be executed for this query. This sequence should not
require feedback, i.e. can be predicted purely from input and query.]
Returns:
inputs, query, output, where inputs is a list of numpy arrays and query
and output are numpy arrays. These arrays must be of shape and type as
specified in the task configuration.
"""
pass
def reset(self, observation):
"""Called after the environment is reset."""
pass
def episode_batch(self, batch_size):
"""Returns a batch of episodes.
Args:
batch_size: size of batch.
Returns:
(inputs, query, output, masks) where inputs is list of numpy arrays and
query, output, and mask are numpy arrays. These arrays must be of shape
and type as specified in the task configuration with one additional
preceding dimension corresponding to the batch.
Raises:
ValueError: if self.episode() returns illegal values.
"""
batched_inputs = collections.OrderedDict(
[[mtype, []] for mtype in self.config.inputs])
batched_queries = []
batched_outputs = []
batched_masks = []
for _ in range(int(batch_size)):
with self._lock:
# The episode function needs to be thread-safe. Since the current
# implementation for the envs are not thread safe we need to have lock
# the operations here.
inputs, query, outputs = self.episode()
if not isinstance(outputs, tuple):
raise ValueError('Outputs return value must be tuple.')
if len(outputs) != 2:
raise ValueError('Output tuple must be of size 2.')
if inputs is not None:
for modality_type in batched_inputs:
batched_inputs[modality_type].append(
np.expand_dims(inputs[modality_type], axis=0))
if query is not None:
batched_queries.append(np.expand_dims(query, axis=0))
batched_outputs.append(np.expand_dims(outputs[0], axis=0))
if outputs[1] is not None:
batched_masks.append(np.expand_dims(outputs[1], axis=0))
batched_inputs = {
k: np.concatenate(i, axis=0) for k, i in batched_inputs.iteritems()
}
if batched_queries:
batched_queries = np.concatenate(batched_queries, axis=0)
batched_outputs = np.concatenate(batched_outputs, axis=0)
if batched_masks:
batched_masks = np.concatenate(batched_masks, axis=0).astype(np.float32)
else:
# When the array is empty, the default np.dtype is float64 which causes
# py_func to crash in the tests.
batched_masks = np.array([], dtype=np.float32)
batched_inputs = [batched_inputs[k] for k in self._config.inputs]
return batched_inputs, batched_queries, batched_outputs, batched_masks
def tf_episode_batch(self, batch_size):
"""A batch of episodes as TF Tensors.
Same as episode_batch with the difference that the return values are TF
Tensors.
Args:
batch_size: a python float for the batch size.
Returns:
inputs, query, output, mask where inputs is a dictionary of tf.Tensor
where the keys are the modality types specified in the config.inputs.
query, output, and mask are TF Tensors. These tensors must
be of shape and type as specified in the task configuration with one
additional preceding dimension corresponding to the batch. Both mask and
output have the same shape as output.
"""
# Define TF outputs.
touts = []
shapes = []
for _, i in self._config.inputs.iteritems():
touts.append(i.type)
shapes.append(i.shape)
if self._config.query is not None:
touts.append(self._config.query.type)
shapes.append(self._config.query.shape)
# Shapes and types for batched_outputs.
touts.append(self._config.output.type)
shapes.append(self._config.output.shape)
# Shapes and types for batched_masks.
touts.append(self._config.output.type)
shapes.append(self._config.output.shape[0:1])
def episode_batch_func():
if self.config.query is None:
inp, _, output, masks = self.episode_batch(int(batch_size))
return tuple(inp) + (output, masks)
else:
inp, query, output, masks = self.episode_batch(int(batch_size))
return tuple(inp) + (query, output, masks)
tf_episode_batch = tf.py_func(episode_batch_func, [], touts,
stateful=True, name='taskdata')
for episode, shape in zip(tf_episode_batch, shapes):
episode.set_shape([batch_size] + shape)
tf_episode_batch_dict = collections.OrderedDict([
(mtype, episode)
for mtype, episode in zip(self.config.inputs.keys(), tf_episode_batch)
])
cur_index = len(self.config.inputs.keys())
tf_query = None
if self.config.query is not None:
tf_query = tf_episode_batch[cur_index]
cur_index += 1
tf_outputs = tf_episode_batch[cur_index]
tf_masks = tf_episode_batch[cur_index + 1]
return tf_episode_batch_dict, tf_query, tf_outputs, tf_masks
@abc.abstractmethod
def target_loss(self, true_targets, targets, weights=None):
"""A loss for training a task model.
This loss measures the discrepancy between the task outputs, the true and
predicted ones.
Args:
true_targets: tf.Tensor of shape and type as defined in the task config
containing the true outputs.
targets: tf.Tensor of shape and type as defined in the task config
containing the predicted outputs.
weights: a bool tf.Tensor of shape as targets. Only true values are
considered when formulating the loss.
"""
pass
def reward(self, obs, done, info):
"""Returns a reward.
The tasks has to compute a reward based on the state of the environment. The
reward computation, though, is task specific. The task is to use the
environment interface, as defined in task_env.py, to compute the reward. If
this interface does not expose enough information, it is to be updated.
Args:
obs: Observation from environment's step function.
done: Done flag from environment's step function.
info: Info dict from environment's step function.
Returns:
obs: Observation.
reward: Floating point value.
done: Done flag.
info: Info dict.
"""
# Default implementation does not do anything.
return obs, 0.0, done, info
class RandomExplorationBasedTask(UnrolledTask):
"""A Task which starts with a random exploration of the environment."""
def __init__(self,
env,
seed,
add_query_noise=False,
query_noise_var=0.0,
*args,
**kwargs): # pylint: disable=keyword-arg-before-vararg
"""Initializes a Task using a random exploration runs.
Args:
env: an instance of type TaskEnv and gym.Env.
seed: a random seed.
add_query_noise: boolean, if True then whatever queries are generated,
they are randomly perturbed. The semantics of the queries depends on the
concrete task implementation.
query_noise_var: float, the variance of Gaussian noise used for query
perturbation. Used iff add_query_noise==True.
*args: see super class.
**kwargs: see super class.
"""
super(RandomExplorationBasedTask, self).__init__(*args, **kwargs)
assert isinstance(env, task_env.TaskEnv)
self._env = env
self._env.set_task(self)
self._rng = np.random.RandomState(seed)
self._add_query_noise = add_query_noise
self._query_noise_var = query_noise_var
# GoToStaticXTask can also take empty config but for the rest of the classes
# the number of modality types is 1.
if len(self.config.inputs.keys()) > 1:
raise NotImplementedError('current implementation supports input '
'with only one modality type or less.')
def _exploration(self):
"""Generates a random exploration run.
The function uses the environment to generate a run.
Returns:
A tuple of numpy arrays. The i-th array contains observation of type and
shape as specified in config.inputs[i].
A list of states along the exploration path.
A list of vertex indices corresponding to the path of the exploration.
"""
in_seq_len = self._config.inputs.values()[0].shape[0]
path, _, states, step_outputs = self._env.random_step_sequence(
min_len=in_seq_len)
obs = {modality_type: [] for modality_type in self._config.inputs}
for o in step_outputs:
step_obs, _, done, _ = o
# It is expected that each value of step_obs is a dict of observations,
# whose dimensions are consistent with the config.inputs sizes.
for modality_type in self._config.inputs:
assert modality_type in step_obs, '{}'.format(type(step_obs))
o = step_obs[modality_type]
i = self._config.inputs[modality_type]
assert len(o.shape) == len(i.shape) - 1
for dim_o, dim_i in zip(o.shape, i.shape[1:]):
assert dim_o == dim_i, '{} != {}'.format(dim_o, dim_i)
obs[modality_type].append(o)
if done:
break
if not obs:
return obs, states, path
max_path_len = int(
round(in_seq_len * float(len(path)) / float(len(obs.values()[0]))))
path = path[-max_path_len:]
states = states[-in_seq_len:]
# The above obs is a list of tuples of np,array. Re-format them as tuple of
# np.array, each array containing all observations from all steps.
def regroup(obs, i):
"""Regroups observations.
Args:
obs: a list of tuples of same size. The k-th tuple contains all the
observations from k-th step. Each observation is a numpy array.
i: the index of the observation in each tuple to be grouped.
Returns:
A numpy array of shape config.inputs[i] which contains all i-th
observations from all steps. These are concatenated along the first
dimension. In addition, if the number of observations is different from
the one specified in config.inputs[i].shape[0], then the array is either
padded from front or clipped.
"""
grouped_obs = np.concatenate(
[np.expand_dims(o, axis=0) for o in obs[i]], axis=0)
in_seq_len = self._config.inputs[i].shape[0]
# pylint: disable=unbalanced-tuple-unpacking
grouped_obs, _ = _pad_or_clip_array(
grouped_obs, in_seq_len, is_front_clip=True)
return grouped_obs
all_obs = {i: regroup(obs, i) for i in self._config.inputs}
return all_obs, states, path
def _obs_to_state(self, path, states):
"""Computes mapping between path nodes and states."""
# Generate a numpy array of locations corresponding to the path vertices.
path_coordinates = map(self._env.vertex_to_pose, path)
path_coordinates = np.concatenate(
[np.reshape(p, [1, 2]) for p in path_coordinates])
# The observations are taken along a smoothed trajectory following the path.
# We compute a mapping between the obeservations and the map vertices.
path_to_obs = collections.defaultdict(list)
obs_to_state = []
for i, s in enumerate(states):
location = np.reshape(s[0:2], [1, 2])
index = np.argmin(
np.reshape(
np.sum(np.power(path_coordinates - location, 2), axis=1), [-1]))
index = path[index]
path_to_obs[index].append(i)
obs_to_state.append(index)
return path_to_obs, obs_to_state
def _perturb_state(self, state, noise_var):
"""Perturbes the state.
The location are purturbed using a Gaussian noise with variance
noise_var. The orientation is uniformly sampled.
Args:
state: a numpy array containing an env state (x, y locations).
noise_var: float
Returns:
The perturbed state.
"""
def normal(v, std):
if std > 0:
n = self._rng.normal(0.0, std)
n = min(n, 2.0 * std)
n = max(n, -2.0 * std)
return v + n
else:
return v
state = state.copy()
state[0] = normal(state[0], noise_var)
state[1] = normal(state[1], noise_var)
if state.size > 2:
state[2] = self._rng.uniform(-math.pi, math.pi)
return state
def _sample_obs(self,
indices,
observations,
observation_states,
path_to_obs,
max_obs_index=None,
use_exploration_obs=True):
"""Samples one observation which corresponds to vertex_index in path.
In addition, the sampled observation must have index in observations less
than max_obs_index. If these two conditions cannot be satisfied the
function returns None.
Args:
indices: a list of integers.
observations: a list of numpy arrays containing all the observations.
observation_states: a list of numpy arrays, each array representing the
state of the observation.
path_to_obs: a dict of path indices to lists of observation indices.
max_obs_index: an integer.
use_exploration_obs: if True, then the observation is sampled among the
specified observations, otherwise it is obtained from the environment.
Returns:
A tuple of:
-- A numpy array of size width x height x 3 representing the sampled
observation.
-- The index of the sampld observation among the input observations.
-- The state at which the observation is captured.
Raises:
ValueError: if the observation and observation_states lists are of
different lengths.
"""
if len(observations) != len(observation_states):
raise ValueError('observation and observation_states lists must have '
'equal lengths')
if not indices:
return None, None, None
vertex_index = self._rng.choice(indices)
if use_exploration_obs:
obs_indices = path_to_obs[vertex_index]
if max_obs_index is not None:
obs_indices = [i for i in obs_indices if i < max_obs_index]
if obs_indices:
index = self._rng.choice(obs_indices)
if self._add_query_noise:
xytheta = self._perturb_state(observation_states[index],
self._query_noise_var)
return self._env.observation(xytheta), index, xytheta
else:
return observations[index], index, observation_states[index]
else:
return None, None, None
else:
xy = self._env.vertex_to_pose(vertex_index)
xytheta = np.array([xy[0], xy[1], 0.0])
xytheta = self._perturb_state(xytheta, self._query_noise_var)
return self._env.observation(xytheta), None, xytheta
class AreNearbyTask(RandomExplorationBasedTask):
"""A task of identifying whether a query is nearby current location or not.
The query is guaranteed to be in proximity of an already visited location,
i.e. close to one of the observations. For each observation we have one
query, which is either close or not to this observation.
"""
def __init__(
self,
max_distance=0,
*args,
**kwargs): # pylint: disable=keyword-arg-before-vararg
super(AreNearbyTask, self).__init__(*args, **kwargs)
self._max_distance = max_distance
if len(self.config.inputs.keys()) != 1:
raise NotImplementedError('current implementation supports input '
'with only one modality type')
def episode(self):
"""Episode data.
Returns:
observations: a tuple with one element. This element is a numpy array of
size in_seq_len x observation_size x observation_size x 3 containing
in_seq_len images.
query: a numpy array of size
in_seq_len x observation_size X observation_size x 3 containing a query
image.
A tuple of size two. First element is a in_seq_len x 2 numpy array of
either 1.0 or 0.0. The i-th element denotes whether the i-th query
image is neraby (value 1.0) or not (value 0.0) to the i-th observation.
The second element in the tuple is a mask, a numpy array of size
in_seq_len x 1 and values 1.0 or 0.0 denoting whether the query is
valid or not (it can happen that the query is not valid, e.g. there are
not enough observations to have a meaningful queries).
"""
observations, states, path = self._exploration()
assert len(observations.values()[0]) == len(states)
# The observations are taken along a smoothed trajectory following the path.
# We compute a mapping between the obeservations and the map vertices.
path_to_obs, obs_to_path = self._obs_to_state(path, states)
# Go over all observations, and sample a query. With probability 0.5 this
# query is a nearby observation (defined as belonging to the same vertex
# in path).
g = self._env.graph
queries = []
labels = []
validity_masks = []
query_index_in_observations = []
for i, curr_o in enumerate(observations.values()[0]):
p = obs_to_path[i]
low = max(0, i - self._max_distance)
# A list of lists of vertex indices. Each list in this group corresponds
# to one possible label.
index_groups = [[], [], []]
# Nearby visited indices, label 1.
nearby_visited = [
ii for ii in path[low:i + 1] + g[p].keys() if ii in obs_to_path[:i]
]
nearby_visited = [ii for ii in index_groups[1] if ii in path_to_obs]
# NOT Nearby visited indices, label 0.
not_nearby_visited = [ii for ii in path[:low] if ii not in g[p].keys()]
not_nearby_visited = [ii for ii in index_groups[0] if ii in path_to_obs]
# NOT visited indices, label 2.
not_visited = [
ii for ii in range(g.number_of_nodes()) if ii not in path[:i + 1]
]
index_groups = [not_nearby_visited, nearby_visited, not_visited]
# Consider only labels for which there are indices.
allowed_labels = [ii for ii, group in enumerate(index_groups) if group]
label = self._rng.choice(allowed_labels)
indices = list(set(index_groups[label]))
max_obs_index = None if label == 2 else i
use_exploration_obs = False if label == 2 else True
o, obs_index, _ = self._sample_obs(
indices=indices,
observations=observations.values()[0],
observation_states=states,
path_to_obs=path_to_obs,
max_obs_index=max_obs_index,
use_exploration_obs=use_exploration_obs)
query_index_in_observations.append(obs_index)
# If we cannot sample a valid query, we mark it as not valid in mask.
if o is None:
label = 0.0
o = curr_o
validity_masks.append(0)
else:
validity_masks.append(1)
queries.append(o.values()[0])
labels.append(label)
query = np.concatenate([np.expand_dims(q, axis=0) for q in queries], axis=0)
def one_hot(label, num_labels=3):
a = np.zeros((num_labels,), dtype=np.float)
a[int(label)] = 1.0
return a
outputs = np.stack([one_hot(l) for l in labels], axis=0)
validity_mask = np.reshape(
np.array(validity_masks, dtype=np.int32), [-1, 1])
self.info['query_index_in_observations'] = query_index_in_observations
self.info['observation_states'] = states
return observations, query, (outputs, validity_mask)
def target_loss(self, truth, predicted, weights=None):
pass
class NeighboringQueriesTask(RandomExplorationBasedTask):
"""A task of identifying whether two queries are closeby or not.
The proximity between queries is defined by the length of the shorest path
between them.
"""
def __init__(
self,
max_distance=1,
*args,
**kwargs): # pylint: disable=keyword-arg-before-vararg
"""Initializes a NeighboringQueriesTask.
Args:
max_distance: integer, the maximum distance in terms of number of vertices
between the two queries, so that they are considered neighboring.
*args: for super class.
**kwargs: for super class.
"""
super(NeighboringQueriesTask, self).__init__(*args, **kwargs)
self._max_distance = max_distance
if len(self.config.inputs.keys()) != 1:
raise NotImplementedError('current implementation supports input '
'with only one modality type')
def episode(self):
"""Episode data.
Returns:
observations: a tuple with one element. This element is a numpy array of
size in_seq_len x observation_size x observation_size x 3 containing
in_seq_len images.
query: a numpy array of size
2 x observation_size X observation_size x 3 containing a pair of query
images.
A tuple of size two. First element is a numpy array of size 2 containing
a one hot vector of whether the two observations are neighobring. Second
element is a boolean numpy value denoting whether this is a valid
episode.
"""
observations, states, path = self._exploration()
assert len(observations.values()[0]) == len(states)
path_to_obs, _ = self._obs_to_state(path, states)
# Restrict path to ones for which observations have been generated.
path = [p for p in path if p in path_to_obs]
# Sample first query.
query1_index = self._rng.choice(path)
# Sample label.
label = self._rng.randint(2)
# Sample second query.
# If label == 1, then second query must be nearby, otherwise not.
closest_indices = nx.single_source_shortest_path(
self._env.graph, query1_index, self._max_distance).keys()
if label == 0:
# Closest indices on the path.
indices = [p for p in path if p not in closest_indices]
else:
# Indices which are not closest on the path.
indices = [p for p in closest_indices if p in path]
query2_index = self._rng.choice(indices)
# Generate an observation.
query1, query1_index, _ = self._sample_obs(
[query1_index],
observations.values()[0],
states,
path_to_obs,
max_obs_index=None,
use_exploration_obs=True)
query2, query2_index, _ = self._sample_obs(
[query2_index],
observations.values()[0],
states,
path_to_obs,
max_obs_index=None,
use_exploration_obs=True)
queries = np.concatenate(
[np.expand_dims(q, axis=0) for q in [query1, query2]])
labels = np.array([0, 0])
labels[label] = 1
is_valid = np.array([1])
self.info['observation_states'] = states
self.info['query_indices_in_observations'] = [query1_index, query2_index]
return observations, queries, (labels, is_valid)
def target_loss(self, truth, predicted, weights=None):
pass
#@gin.configurable
class GotoStaticXTask(RandomExplorationBasedTask):
"""Task go to a static X.
If continuous reward is used only one goal is allowed so that the reward can
be computed as a delta-distance to that goal..
"""
def __init__(self,
step_reward=0.0,
goal_reward=1.0,
hit_wall_reward=-1.0,
done_at_target=False,
use_continuous_reward=False,
*args,
**kwargs): # pylint: disable=keyword-arg-before-vararg
super(GotoStaticXTask, self).__init__(*args, **kwargs)
if len(self.config.inputs.keys()) > 1:
raise NotImplementedError('current implementation supports input '
'with only one modality type or less.')
self._step_reward = step_reward
self._goal_reward = goal_reward
self._hit_wall_reward = hit_wall_reward
self._done_at_target = done_at_target
self._use_continuous_reward = use_continuous_reward
self._previous_path_length = None
def episode(self):
observations, _, path = self._exploration()
if len(path) < 2:
raise ValueError('The exploration path has only one node.')
g = self._env.graph
start = path[-1]
while True:
goal = self._rng.choice(path[:-1])
if goal != start:
break
goal_path = nx.shortest_path(g, start, goal)
init_orientation = self._rng.uniform(0, np.pi, (1,))
trajectory = np.array(
[list(self._env.vertex_to_pose(p)) for p in goal_path])
init_xy = np.reshape(trajectory[0, :], [-1])
init_state = np.concatenate([init_xy, init_orientation], 0)
trajectory = trajectory[1:, :]
deltas = envs_util.trajectory_to_deltas(trajectory, init_state)
output_seq_len = self._config.output.shape[0]
arr = _pad_or_clip_array(deltas, output_seq_len, output_mask=True)
# pylint: disable=unbalanced-tuple-unpacking
thetas, _, thetas_mask = arr
query = self._env.observation(self._env.vertex_to_pose(goal)).values()[0]
return observations, query, (thetas, thetas_mask)
def reward(self, obs, done, info):
if 'wall_collision' in info and info['wall_collision']:
return obs, self._hit_wall_reward, done, info
reward = 0.0
current_vertex = self._env.pose_to_vertex(self._env.state)
if current_vertex in self._env.targets():
if self._done_at_target:
done = True
else:
obs = self._env.reset()
reward = self._goal_reward
else:
if self._use_continuous_reward:
if len(self._env.targets()) != 1:
raise ValueError(
'FindX task with continuous reward is assuming only one target.')
goal_vertex = self._env.targets()[0]
path_length = self._compute_path_length(goal_vertex)
reward = self._previous_path_length - path_length
self._previous_path_length = path_length
else:
reward = self._step_reward
return obs, reward, done, info
def _compute_path_length(self, goal_vertex):
current_vertex = self._env.pose_to_vertex(self._env.state)
path = nx.shortest_path(self._env.graph, current_vertex, goal_vertex)
assert len(path) >= 2
curr_xy = np.array(self._env.state[:2])
next_xy = np.array(self._env.vertex_to_pose(path[1]))
last_step_distance = np.linalg.norm(next_xy - curr_xy)
return (len(path) - 2) * self._env.cell_size_px + last_step_distance
def reset(self, observation):
if self._use_continuous_reward:
if len(self._env.targets()) != 1:
raise ValueError(
'FindX task with continuous reward is assuming only one target.')
goal_vertex = self._env.targets()[0]
self._previous_path_length = self._compute_path_length(goal_vertex)
def target_loss(self, truth, predicted, weights=None):
"""Action classification loss.
Args:
truth: a batch_size x sequence length x number of labels float
Tensor containing a one hot vector for each label in each batch and
time.
predicted: a batch_size x sequence length x number of labels float
Tensor containing a predicted distribution over all actions.
weights: a batch_size x sequence_length float Tensor of bool
denoting which actions are valid.
Returns:
An average cross entropy over all batches and elements in sequence.
"""
return classification_loss(
truth=truth, predicted=predicted, weights=weights, is_one_hot=True)
class RelativeLocationTask(RandomExplorationBasedTask):
"""A task of estimating the relative location of a query w.r.t current.
It is to be used for debugging. It is designed such that the output is a
single value, out of a discrete set of values, so that it can be phrased as
a classification problem.
"""
def __init__(self, num_labels, *args, **kwargs):
"""Initializes a relative location task.
Args:
num_labels: integer, number of orientations to bin the relative
orientation into.
*args: see super class.
**kwargs: see super class.
"""
super(RelativeLocationTask, self).__init__(*args, **kwargs)
self._num_labels = num_labels
if len(self.config.inputs.keys()) != 1:
raise NotImplementedError('current implementation supports input '
'with only one modality type')
def episode(self):
observations, states, path = self._exploration()
# Select a random element from history.
path_to_obs, _ = self._obs_to_state(path, states)
use_exploration_obs = not self._add_query_noise
query, _, query_state = self._sample_obs(
path[:-1],
observations.values()[0],
states,
path_to_obs,
max_obs_index=None,
use_exploration_obs=use_exploration_obs)
x, y, theta = tuple(states[-1])
q_x, q_y, _ = tuple(query_state)
t_x, t_y = q_x - x, q_y - y
(rt_x, rt_y) = (np.sin(theta) * t_x - np.cos(theta) * t_y,
np.cos(theta) * t_x + np.sin(theta) * t_y)
# Bins are [a(i), a(i+1)] for a(i) = -pi + 0.5 * bin_size + i * bin_size.
shift = np.pi * (1 - 1.0 / (2.0 * self._num_labels))
orientation = np.arctan2(rt_y, rt_x) + shift
if orientation < 0:
orientation += 2 * np.pi
label = int(np.floor(self._num_labels * orientation / (2 * np.pi)))
out_shape = self._config.output.shape
if len(out_shape) != 1:
raise ValueError('Output shape should be of rank 1.')
if out_shape[0] != self._num_labels:
raise ValueError('Output shape must be of size %d' % self._num_labels)
output = np.zeros(out_shape, dtype=np.float32)
output[label] = 1
return observations, query, (output, None)
def target_loss(self, truth, predicted, weights=None):
return classification_loss(
truth=truth, predicted=predicted, weights=weights, is_one_hot=True)
class LocationClassificationTask(UnrolledTask):
"""A task of classifying a location as one of several classes.
The task does not have an input, but just a query and an output. The query
is an observation of the current location, e.g. an image taken from the
current state. The output is a label classifying this location in one of
predefined set of locations (or landmarks).
The current implementation classifies locations as intersections based on the
number and directions of biforcations. It is expected that a location can have
at most 4 different directions, aligned with the axes. As each of these four
directions might be present or not, the number of possible intersections are
2^4 = 16.
"""
def __init__(self, env, seed, *args, **kwargs):
super(LocationClassificationTask, self).__init__(*args, **kwargs)
self._env = env
self._rng = np.random.RandomState(seed)
# A location property which can be set. If not set, a random one is
# generated.
self._location = None
if len(self.config.inputs.keys()) > 1:
raise NotImplementedError('current implementation supports input '
'with only one modality type or less.')
@property
def location(self):
return self._location
@location.setter
def location(self, location):
self._location = location
def episode(self):
# Get a location. If not set, sample on at a vertex with a random
# orientation
location = self._location
if location is None:
num_nodes = self._env.graph.number_of_nodes()
vertex = int(math.floor(self._rng.uniform(0, num_nodes)))
xy = self._env.vertex_to_pose(vertex)
theta = self._rng.uniform(0, 2 * math.pi)
location = np.concatenate(
[np.reshape(xy, [-1]), np.array([theta])], axis=0)
else:
vertex = self._env.pose_to_vertex(location)
theta = location[2]
neighbors = self._env.graph.neighbors(vertex)
xy_s = [self._env.vertex_to_pose(n) for n in neighbors]
def rotate(xy, theta):
"""Rotates a vector around the origin by angle theta.
Args:
xy: a numpy darray of shape (2, ) of floats containing the x and y
coordinates of a vector.
theta: a python float containing the rotation angle in radians.
Returns:
A numpy darray of floats of shape (2,) containing the x and y
coordinates rotated xy.
"""
rotated_x = np.cos(theta) * xy[0] - np.sin(theta) * xy[1]
rotated_y = np.sin(theta) * xy[0] + np.cos(theta) * xy[1]
return np.array([rotated_x, rotated_y])
# Rotate all intersection biforcation by the orientation of the agent as the
# intersection label is defined in an agent centered fashion.
xy_s = [
rotate(xy - location[0:2], -location[2] - math.pi / 4) for xy in xy_s
]
th_s = [np.arctan2(xy[1], xy[0]) for xy in xy_s]
out_shape = self._config.output.shape
if len(out_shape) != 1:
raise ValueError('Output shape should be of rank 1.')
num_labels = out_shape[0]
if num_labels != 16:
raise ValueError('Currently only 16 labels are supported '
'(there are 16 different 4 way intersection types).')
th_s = set([int(math.floor(4 * (th / (2 * np.pi) + 0.5))) for th in th_s])
one_hot_label = np.zeros((num_labels,), dtype=np.float32)
label = 0
for th in th_s:
label += pow(2, th)
one_hot_label[int(label)] = 1.0
query = self._env.observation(location).values()[0]
return [], query, (one_hot_label, None)
def reward(self, obs, done, info):
raise ValueError('Do not call.')
def target_loss(self, truth, predicted, weights=None):
return classification_loss(
truth=truth, predicted=predicted, weights=weights, is_one_hot=True)
class GotoStaticXNoExplorationTask(UnrolledTask):
"""An interface for findX tasks without exploration.
The agent is initialized a random location in a random world and a random goal
and the objective is for the agent to move toward the goal. This class
generates episode for such task. Each generates a sequence of observations x
and target outputs y. x is the observations and is an OrderedDict with keys
provided from config.inputs.keys() and the shapes provided in the
config.inputs. The output is a numpy arrays with the shape specified in the
config.output. The shape of the array is (sequence_length x action_size) where
action is the number of actions that can be done in the environment. Note that
config.output.shape should be set according to the number of actions that can
be done in the env.
target outputs y are the groundtruth value of each action that is computed
from the environment graph. The target output for each action is proportional
to the progress that each action makes. Target value of 1 means that the
action takes the agent one step closer, -1 means the action takes the agent
one step farther. Value of -2 means that action should not take place at all.
This can be because the action leads to collision or it wants to terminate the
episode prematurely.
"""
def __init__(self, env, *args, **kwargs):
super(GotoStaticXNoExplorationTask, self).__init__(*args, **kwargs)
if self._config.query is not None:
raise ValueError('query should be None.')
if len(self._config.output.shape) != 2:
raise ValueError('output should only have two dimensions:'
'(sequence_length x number_of_actions)')
for input_config in self._config.inputs.values():
if input_config.shape[0] != self._config.output.shape[0]:
raise ValueError('the first dimension of the input and output should'
'be the same.')
if len(self._config.output.shape) != 2:
raise ValueError('output shape should be '
'(sequence_length x number_of_actions)')
self._env = env
def _compute_shortest_path_length(self, vertex, target_vertices):
"""Computes length of the shortest path from vertex to any target vertexes.
Args:
vertex: integer, index of the vertex in the environment graph.
target_vertices: list of the target vertexes
Returns:
integer, minimum distance from the vertex to any of the target_vertices.
Raises:
ValueError: if there is no path between the vertex and at least one of
the target_vertices.
"""
try:
return np.min([
len(nx.shortest_path(self._env.graph, vertex, t))
for t in target_vertices
])
except:
#logging.error('there is no path between vertex %d and at least one of '
# 'the targets %r', vertex, target_vertices)
raise
def _compute_gt_value(self, vertex, target_vertices):
"""Computes groundtruth value of all the actions at the vertex.
The value of each action is the difference each action makes in the length
of the shortest path to the goal. If an action takes the agent one step
closer to the goal the value is 1. In case, it takes the agent one step away
from the goal it would be -1. If it leads to collision or if the agent uses
action stop before reaching to the goal it is -2. To avoid scale issues the
gt_values are multipled by 0.5.
Args:
vertex: integer, the index of current vertex.
target_vertices: list of the integer indexes of the target views.
Returns:
numpy array with shape (action_size,) and each element is the groundtruth
value of each action based on the progress each action makes.
"""
action_size = self._config.output.shape[1]
output_value = np.ones((action_size), dtype=np.float32) * -2
my_distance = self._compute_shortest_path_length(vertex, target_vertices)
for adj in self._env.graph[vertex]:
adj_distance = self._compute_shortest_path_length(adj, target_vertices)
if adj_distance is None:
continue
action_index = self._env.action(
self._env.vertex_to_pose(vertex), self._env.vertex_to_pose(adj))
assert action_index is not None, ('{} is not adjacent to {}. There might '
'be a problem in environment graph '
'connectivity because there is no '
'direct edge between the given '
'vertices').format(
self._env.vertex_to_pose(vertex),
self._env.vertex_to_pose(adj))
output_value[action_index] = my_distance - adj_distance
return output_value * 0.5
def episode(self):
"""Returns data needed to train and test a single episode.
Returns:
(inputs, None, output) where inputs is a dictionary of modality types to
numpy arrays. The second element is query but we assume that the goal
is also given as part of observation so it should be None for this task,
and the outputs is the tuple of ground truth action values with the
shape of (sequence_length x action_size) that is coming from
config.output.shape and a numpy array with the shape of
(sequence_length,) that is 1 if the corresponding element of the
input and output should be used in the training optimization.
Raises:
ValueError: If the output values for env.random_step_sequence is not
valid.
ValueError: If the shape of observations coming from the env is not
consistent with the config.
ValueError: If there is a modality type specified in the config but the
environment does not return that.
"""
# Sequence length is the first dimension of any of the input tensors.
sequence_length = self._config.inputs.values()[0].shape[0]
modality_types = self._config.inputs.keys()
path, _, _, step_outputs = self._env.random_step_sequence(
max_len=sequence_length)
target_vertices = [self._env.pose_to_vertex(x) for x in self._env.targets()]
if len(path) != len(step_outputs):
raise ValueError('path, and step_outputs should have equal length'
' {}!={}'.format(len(path), len(step_outputs)))
# Building up observations. observations will be a OrderedDict of
# modality types. The values are numpy arrays that follow the given shape
# in the input config for each modality type.
observations = collections.OrderedDict([k, []] for k in modality_types)
for step_output in step_outputs:
obs_dict = step_output[0]
# Only going over the modality types that are specified in the input
# config.
for modality_type in modality_types:
if modality_type not in obs_dict:
raise ValueError('modality type is not returned from the environment.'
'{} not in {}'.format(modality_type,
obs_dict.keys()))
obs = obs_dict[modality_type]
if np.any(
obs.shape != tuple(self._config.inputs[modality_type].shape[1:])):
raise ValueError(
'The observations should have the same size as speicifed in'
'config for modality type {}. {} != {}'.format(
modality_type, obs.shape,
self._config.inputs[modality_type].shape[1:]))
observations[modality_type].append(obs)
gt_value = [self._compute_gt_value(v, target_vertices) for v in path]
# pylint: disable=unbalanced-tuple-unpacking
gt_value, _, value_mask = _pad_or_clip_array(
np.array(gt_value),
sequence_length,
is_front_clip=False,
output_mask=True,
)
for modality_type, obs in observations.iteritems():
observations[modality_type], _, mask = _pad_or_clip_array(
np.array(obs), sequence_length, is_front_clip=False, output_mask=True)
assert np.all(mask == value_mask)
return observations, None, (gt_value, value_mask)
def reset(self, observation):
"""Called after the environment is reset."""
pass
def target_loss(self, true_targets, targets, weights=None):
"""A loss for training a task model.
This loss measures the discrepancy between the task outputs, the true and
predicted ones.
Args:
true_targets: tf.Tensor of tf.float32 with the shape of
(batch_size x sequence_length x action_size).
targets: tf.Tensor of tf.float32 with the shape of
(batch_size x sequence_length x action_size).
weights: tf.Tensor of tf.bool with the shape of
(batch_size x sequence_length).
Raises:
ValueError: if the shapes of the input tensors are not consistent.
Returns:
L2 loss between the predicted action values and true action values.
"""
targets_shape = targets.get_shape().as_list()
true_targets_shape = true_targets.get_shape().as_list()
if len(targets_shape) != 3 or len(true_targets_shape) != 3:
raise ValueError('invalid shape for targets or true_targets_shape')
if np.any(targets_shape != true_targets_shape):
raise ValueError('the shape of targets and true_targets are not the same'
'{} != {}'.format(targets_shape, true_targets_shape))
if weights is not None:
# Filtering targets and true_targets using weights.
weights_shape = weights.get_shape().as_list()
if np.any(weights_shape != targets_shape[0:2]):
raise ValueError('The first two elements of weights shape should match'
'target. {} != {}'.format(weights_shape,
targets_shape))
true_targets = tf.boolean_mask(true_targets, weights)
targets = tf.boolean_mask(targets, weights)
return tf.losses.mean_squared_error(tf.reshape(targets, [-1]),
tf.reshape(true_targets, [-1]))
def reward(self, obs, done, info):
raise NotImplementedError('reward is not implemented for this task')
################################################################################
class NewTask(UnrolledTask):
def __init__(self, env, *args, **kwargs):
super(NewTask, self).__init__(*args, **kwargs)
self._env = env
def _compute_shortest_path_length(self, vertex, target_vertices):
"""Computes length of the shortest path from vertex to any target vertexes.
Args:
vertex: integer, index of the vertex in the environment graph.
target_vertices: list of the target vertexes
Returns:
integer, minimum distance from the vertex to any of the target_vertices.
Raises:
ValueError: if there is no path between the vertex and at least one of
the target_vertices.
"""
try:
return np.min([
len(nx.shortest_path(self._env.graph, vertex, t))
for t in target_vertices
])
except:
logging.error('there is no path between vertex %d and at least one of '
'the targets %r', vertex, target_vertices)
raise
def _compute_gt_value(self, vertex, target_vertices):
"""Computes groundtruth value of all the actions at the vertex.
The value of each action is the difference each action makes in the length
of the shortest path to the goal. If an action takes the agent one step
closer to the goal the value is 1. In case, it takes the agent one step away
from the goal it would be -1. If it leads to collision or if the agent uses
action stop before reaching to the goal it is -2. To avoid scale issues the
gt_values are multipled by 0.5.
Args:
vertex: integer, the index of current vertex.
target_vertices: list of the integer indexes of the target views.
Returns:
numpy array with shape (action_size,) and each element is the groundtruth
value of each action based on the progress each action makes.
"""
action_size = self._config.output.shape[1]
output_value = np.ones((action_size), dtype=np.float32) * -2
# own compute _compute_shortest_path_length - returnts float
my_distance = self._compute_shortest_path_length(vertex, target_vertices)
for adj in self._env.graph[vertex]:
adj_distance = self._compute_shortest_path_length(adj, target_vertices)
if adj_distance is None:
continue
action_index = self._env.action(
self._env.vertex_to_pose(vertex), self._env.vertex_to_pose(adj))
assert action_index is not None, ('{} is not adjacent to {}. There might '
'be a problem in environment graph '
'connectivity because there is no '
'direct edge between the given '
'vertices').format(
self._env.vertex_to_pose(vertex),
self._env.vertex_to_pose(adj))
output_value[action_index] = my_distance - adj_distance
return output_value * 0.5
def episode(self):
"""Returns data needed to train and test a single episode.
Returns:
(inputs, None, output) where inputs is a dictionary of modality types to
numpy arrays. The second element is query but we assume that the goal
is also given as part of observation so it should be None for this task,
and the outputs is the tuple of ground truth action values with the
shape of (sequence_length x action_size) that is coming from
config.output.shape and a numpy array with the shape of
(sequence_length,) that is 1 if the corresponding element of the
input and output should be used in the training optimization.
Raises:
ValueError: If the output values for env.random_step_sequence is not
valid.
ValueError: If the shape of observations coming from the env is not
consistent with the config.
ValueError: If there is a modality type specified in the config but the
environment does not return that.
"""
# Sequence length is the first dimension of any of the input tensors.
sequence_length = self._config.inputs.values()[0].shape[0]
modality_types = self._config.inputs.keys()
path, _, _, step_outputs = self._env.random_step_sequence(
max_len=sequence_length)
target_vertices = [self._env.pose_to_vertex(x) for x in self._env.targets()]
if len(path) != len(step_outputs):
raise ValueError('path, and step_outputs should have equal length'
' {}!={}'.format(len(path), len(step_outputs)))
# Building up observations. observations will be a OrderedDict of
# modality types. The values are numpy arrays that follow the given shape
# in the input config for each modality type.
observations = collections.OrderedDict([k, []] for k in modality_types)
for step_output in step_outputs:
obs_dict = step_output[0]
# Only going over the modality types that are specified in the input
# config.
for modality_type in modality_types:
if modality_type not in obs_dict:
raise ValueError('modality type is not returned from the environment.'
'{} not in {}'.format(modality_type,
obs_dict.keys()))
obs = obs_dict[modality_type]
if np.any(
obs.shape != tuple(self._config.inputs[modality_type].shape[1:])):
raise ValueError(
'The observations should have the same size as speicifed in'
'config for modality type {}. {} != {}'.format(
modality_type, obs.shape,
self._config.inputs[modality_type].shape[1:]))
observations[modality_type].append(obs)
gt_value = [self._compute_gt_value(v, target_vertices) for v in path]
# pylint: disable=unbalanced-tuple-unpacking
gt_value, _, value_mask = _pad_or_clip_array(
np.array(gt_value),
sequence_length,
is_front_clip=False,
output_mask=True,
)
for modality_type, obs in observations.iteritems():
observations[modality_type], _, mask = _pad_or_clip_array(
np.array(obs), sequence_length, is_front_clip=False, output_mask=True)
assert np.all(mask == value_mask)
return observations, None, (gt_value, value_mask)
def reset(self, observation):
"""Called after the environment is reset."""
pass
def target_loss(self, true_targets, targets, weights=None):
"""A loss for training a task model.
This loss measures the discrepancy between the task outputs, the true and
predicted ones.
Args:
true_targets: tf.Tensor of tf.float32 with the shape of
(batch_size x sequence_length x action_size).
targets: tf.Tensor of tf.float32 with the shape of
(batch_size x sequence_length x action_size).
weights: tf.Tensor of tf.bool with the shape of
(batch_size x sequence_length).
Raises:
ValueError: if the shapes of the input tensors are not consistent.
Returns:
L2 loss between the predicted action values and true action values.
"""
targets_shape = targets.get_shape().as_list()
true_targets_shape = true_targets.get_shape().as_list()
if len(targets_shape) != 3 or len(true_targets_shape) != 3:
raise ValueError('invalid shape for targets or true_targets_shape')
if np.any(targets_shape != true_targets_shape):
raise ValueError('the shape of targets and true_targets are not the same'
'{} != {}'.format(targets_shape, true_targets_shape))
if weights is not None:
# Filtering targets and true_targets using weights.
weights_shape = weights.get_shape().as_list()
if np.any(weights_shape != targets_shape[0:2]):
raise ValueError('The first two elements of weights shape should match'
'target. {} != {}'.format(weights_shape,
targets_shape))
true_targets = tf.boolean_mask(true_targets, weights)
targets = tf.boolean_mask(targets, weights)
return tf.losses.mean_squared_error(tf.reshape(targets, [-1]),
tf.reshape(true_targets, [-1]))
def reward(self, obs, done, info):
raise NotImplementedError('reward is not implemented for this task')
|
{
"content_hash": "798b2ef6c0db0e58c65f622b1c04c8c5",
"timestamp": "",
"source": "github",
"line_count": 1492,
"max_line_length": 80,
"avg_line_length": 39.40080428954423,
"alnum_prop": 0.6464124111182935,
"repo_name": "derekjchow/models",
"id": "c3ef6ca328f7454ffe9aec61a704d1322d680d31",
"size": "59475",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "research/cognitive_planning/tasks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1523636"
},
{
"name": "Dockerfile",
"bytes": "9821"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33316"
},
{
"name": "Jupyter Notebook",
"bytes": "2831692"
},
{
"name": "Makefile",
"bytes": "4933"
},
{
"name": "Python",
"bytes": "14201542"
},
{
"name": "Shell",
"bytes": "158255"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from math import fabs
import operator
from random import shuffle
from functools import reduce
try:
from scipy.stats.stats import betai
except ImportError:
betai = None
from nltk.compat import xrange, izip
from nltk.util import LazyConcatenation, LazyMap
def accuracy(reference, test):
"""
Given a list of reference values and a corresponding list of test
values, return the fraction of corresponding values that are
equal. In particular, return the fraction of indices
``0<i<=len(test)`` such that ``test[i] == reference[i]``.
:type reference: list
:param reference: An ordered list of reference values.
:type test: list
:param test: A list of values to compare against the corresponding
reference values.
:raise ValueError: If ``reference`` and ``length`` do not have the
same length.
"""
if len(reference) != len(test):
raise ValueError("Lists must have the same length.")
return float(sum(x == y for x, y in izip(reference, test))) / len(test)
def precision(reference, test):
"""
Given a set of reference values and a set of test values, return
the fraction of test values that appear in the reference set.
In particular, return card(``reference`` intersection ``test``)/card(``test``).
If ``test`` is empty, then return None.
:type reference: set
:param reference: A set of reference values.
:type test: set
:param test: A set of values to compare against the reference set.
:rtype: float or None
"""
if (not hasattr(reference, 'intersection') or
not hasattr(test, 'intersection')):
raise TypeError('reference and test should be sets')
if len(test) == 0:
return None
else:
return float(len(reference.intersection(test)))/len(test)
def recall(reference, test):
"""
Given a set of reference values and a set of test values, return
the fraction of reference values that appear in the test set.
In particular, return card(``reference`` intersection ``test``)/card(``reference``).
If ``reference`` is empty, then return None.
:type reference: set
:param reference: A set of reference values.
:type test: set
:param test: A set of values to compare against the reference set.
:rtype: float or None
"""
if (not hasattr(reference, 'intersection') or
not hasattr(test, 'intersection')):
raise TypeError('reference and test should be sets')
if len(reference) == 0:
return None
else:
return float(len(reference.intersection(test)))/len(reference)
def f_measure(reference, test, alpha=0.5):
"""
Given a set of reference values and a set of test values, return
the f-measure of the test values, when compared against the
reference values. The f-measure is the harmonic mean of the
``precision`` and ``recall``, weighted by ``alpha``. In particular,
given the precision *p* and recall *r* defined by:
- *p* = card(``reference`` intersection ``test``)/card(``test``)
- *r* = card(``reference`` intersection ``test``)/card(``reference``)
The f-measure is:
- *1/(alpha/p + (1-alpha)/r)*
If either ``reference`` or ``test`` is empty, then ``f_measure``
returns None.
:type reference: set
:param reference: A set of reference values.
:type test: set
:param test: A set of values to compare against the reference set.
:rtype: float or None
"""
p = precision(reference, test)
r = recall(reference, test)
if p is None or r is None:
return None
if p == 0 or r == 0:
return 0
return 1.0/(alpha/p + (1-alpha)/r)
def log_likelihood(reference, test):
"""
Given a list of reference values and a corresponding list of test
probability distributions, return the average log likelihood of
the reference values, given the probability distributions.
:param reference: A list of reference values
:type reference: list
:param test: A list of probability distributions over values to
compare against the corresponding reference values.
:type test: list(ProbDistI)
"""
if len(reference) != len(test):
raise ValueError("Lists must have the same length.")
# Return the average value of dist.logprob(val).
total_likelihood = sum(dist.logprob(val)
for (val, dist) in izip(reference, test))
return total_likelihood/len(reference)
def approxrand(a, b, **kwargs):
"""
Returns an approximate significance level between two lists of
independently generated test values.
Approximate randomization calculates significance by randomly drawing
from a sample of the possible permutations. At the limit of the number
of possible permutations, the significance level is exact. The
approximate significance level is the sample mean number of times the
statistic of the permutated lists varies from the actual statistic of
the unpermuted argument lists.
:return: a tuple containing an approximate significance level, the count
of the number of times the pseudo-statistic varied from the
actual statistic, and the number of shuffles
:rtype: tuple
:param a: a list of test values
:type a: list
:param b: another list of independently generated test values
:type b: list
"""
shuffles = kwargs.get('shuffles', 999)
# there's no point in trying to shuffle beyond all possible permutations
shuffles = \
min(shuffles, reduce(operator.mul, xrange(1, len(a) + len(b) + 1)))
stat = kwargs.get('statistic', lambda lst: float(sum(lst)) / len(lst))
verbose = kwargs.get('verbose', False)
if verbose:
print('shuffles: %d' % shuffles)
actual_stat = fabs(stat(a) - stat(b))
if verbose:
print('actual statistic: %f' % actual_stat)
print('-' * 60)
c = 1e-100
lst = LazyConcatenation([a, b])
indices = list(range(len(a) + len(b)))
for i in xrange(shuffles):
if verbose and i % 10 == 0:
print('shuffle: %d' % i)
shuffle(indices)
pseudo_stat_a = stat(LazyMap(lambda i: lst[i], indices[:len(a)]))
pseudo_stat_b = stat(LazyMap(lambda i: lst[i], indices[len(a):]))
pseudo_stat = fabs(pseudo_stat_a - pseudo_stat_b)
if pseudo_stat >= actual_stat:
c += 1
if verbose and i % 10 == 0:
print('pseudo-statistic: %f' % pseudo_stat)
print('significance: %f' % (float(c + 1) / (i + 1)))
print('-' * 60)
significance = float(c + 1) / (shuffles + 1)
if verbose:
print('significance: %f' % significance)
if betai:
for phi in [0.01, 0.05, 0.10, 0.15, 0.25, 0.50]:
print("prob(phi<=%f): %f" % (phi, betai(c, shuffles, phi)))
return (significance, c, shuffles)
def demo():
print('-'*75)
reference = 'DET NN VB DET JJ NN NN IN DET NN'.split()
test = 'DET VB VB DET NN NN NN IN DET NN'.split()
print('Reference =', reference)
print('Test =', test)
print('Accuracy:', accuracy(reference, test))
print('-'*75)
reference_set = set(reference)
test_set = set(test)
print('Reference =', reference_set)
print('Test = ', test_set)
print('Precision:', precision(reference_set, test_set))
print(' Recall:', recall(reference_set, test_set))
print('F-Measure:', f_measure(reference_set, test_set))
print('-'*75)
if __name__ == '__main__':
demo()
|
{
"content_hash": "99a275c6af9fe207db2108f717513b19",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 88,
"avg_line_length": 34.4027149321267,
"alnum_prop": 0.6410627383927398,
"repo_name": "bbengfort/TextBlob",
"id": "3ec544ab7e37cf2b2147ef5ae76b6e30fe931b39",
"size": "7858",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "textblob/nltk/metrics/scores.py",
"mode": "33261",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""Operations for embeddings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
# Imports gradient definitions.
from tensorflow.python.ops import data_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
def _clip(params, ids, max_norm):
"""Helper function for _embedding_lookup_and_transform.
This function optionally clips embeddings to an l2-norm of max_norm.
Args:
params: A `Tensor` of embeddings retrieved by `gather`.
ids: The `ids` argument that was passed to `gather`.
max_norm: If not `None`, each embedding is clipped if its l2-norm is
larger than this value.
Returns:
A `Tensor` with the same type as `params`.
"""
def _rank(x):
"""Helper function to retrieve the rank of a tensor.
Args:
x: Something convertible to `Tensor`.
Returns:
Either a pair `(rank, True)` where `rank` is an integer or a pair
`(rank, False)` where `rank` is an integer `Tensor`. In either case,
`rank` is the rank of `x`.
"""
rank = ops.convert_to_tensor(x).get_shape().ndims
if rank:
return rank, True
else:
return array_ops.rank(x), False
if max_norm is None:
return params
ids_rank, ids_static = _rank(ids)
params_rank, params_static = _rank(params)
return clip_ops.clip_by_norm(
params,
max_norm,
axes=(list(range(ids_rank, params_rank))
if ids_static and params_static
else math_ops.range(ids_rank, params_rank)))
def _embedding_lookup_and_transform(params,
ids,
partition_strategy="mod",
name=None,
max_norm=None,
transform_fn=None):
"""Helper function for embedding_lookup and _compute_sampled_logits.
This function is a generalization of embedding_lookup that optionally
applies a caller-specified transformation to each embedding. This is
done through the `transform_fn` argument. If provided, the function is
applied to each partitioned tensor of retrieved embeddings, colocated
with the embeddings. This function will be called with a single `Tensor`
argument of the same type as the `params` tensor and should return a
`Tensor`. The shape of the argument will be the same as `params` except
for the size of the first dimension. The first dimension of the result's
shape must be the same size as the argument's.
Args:
params: See embedding_lookup.
ids: See embedding_lookup.
partition_strategy: See embedding_lookup.
name: See embedding_lookup.
max_norm: See embedding_lookup.
transform_fn: An optional function to apply to each retrieved embedding.
If max_norm is provided, transform_fn is applied to the norm-limited
embeddings.
Returns:
See embedding_lookup for details.
Raises:
ValueError: If `params` is empty.
"""
if params is None or params in ((), []):
raise ValueError("Need at least one param")
if isinstance(params, variables.PartitionedVariable):
params = list(params) # Iterate to get the underlying Variables.
if not isinstance(params, list):
params = [params]
with ops.name_scope(name, "embedding_lookup", params + [ids]) as name:
np = len(params) # Number of partitions
# Preserve the resource variable status to avoid accidental dense reads.
if not any(
isinstance(p, resource_variable_ops.ResourceVariable) for p in params):
params = ops.convert_n_to_tensor_or_indexed_slices(params, name="params")
ids = ops.convert_to_tensor(ids, name="ids")
if np == 1 and (not transform_fn or ids.get_shape().ndims == 1):
with ops.colocate_with(params[0]):
result = _clip(array_ops.gather(params[0], ids, name=name),
ids, max_norm)
if transform_fn:
result = transform_fn(result)
# Make sure the final result does not have colocation contraints on the
# params. Similar to the case np > 1 where parallel_dynamic_stitch is
# outside the scioe of all with ops.colocate_with(params[p]).
return array_ops.identity(result)
else:
# Flatten the ids. There are two cases where we need to do this.
# - There is more than one params tensor.
# - There is a transform_fn and ids is not statically known to be 1-D.
# We must flatten in this case because transform_fn expects a flat
# tensor of embeddings.
flat_ids = array_ops.reshape(ids, [-1])
original_indices = math_ops.range(array_ops.size(flat_ids))
# Create p_assignments and set new_ids depending on the strategy.
if partition_strategy == "mod":
p_assignments = flat_ids % np
new_ids = flat_ids // np
elif partition_strategy == "div":
# Compute num_total_ids as the sum of dim-0 of params, then assign to
# partitions based on a constant number of ids per partition. Optimize
# if we already know the full shape statically.
dim_0_size = tensor_shape.Dimension(tensor_shape.dimension_value(
params[0].get_shape()[0]))
for p in xrange(1, np):
dim_0_size += tensor_shape.Dimension(tensor_shape.dimension_value(
params[p].get_shape()[0]))
if dim_0_size.value:
num_total_ids = constant_op.constant(dim_0_size.value, flat_ids.dtype)
else:
dim_0_sizes = []
for p in xrange(np):
param_p_dim = tensor_shape.dimension_value(params[p].get_shape()[0])
if param_p_dim is not None:
dim_0_sizes.append(param_p_dim)
else:
with ops.colocate_with(params[p]):
dim_0_sizes.append(array_ops.shape(params[p])[0])
num_total_ids = math_ops.reduce_sum(
math_ops.cast(array_ops.stack(dim_0_sizes), flat_ids.dtype))
ids_per_partition = num_total_ids // np
extras = num_total_ids % np
p_assignments = math_ops.maximum(
flat_ids // (ids_per_partition + 1),
(flat_ids - extras) // ids_per_partition)
# Emulate a conditional using a boolean indicator tensor
new_ids = array_ops.where(p_assignments < extras,
flat_ids % (ids_per_partition + 1),
(flat_ids - extras) % ids_per_partition)
else:
raise ValueError("Unrecognized partition strategy: " +
partition_strategy)
# Cast partition assignments to int32 for use in dynamic_partition.
# There really should not be more than 2^32 partitions.
p_assignments = math_ops.cast(p_assignments, dtypes.int32)
# Partition list of ids based on assignments into np separate lists
gather_ids = data_flow_ops.dynamic_partition(new_ids, p_assignments, np)
# Similarly, partition the original indices.
pindices = data_flow_ops.dynamic_partition(original_indices,
p_assignments, np)
# Do np separate lookups, finding embeddings for plist[p] in params[p]
partitioned_result = []
for p in xrange(np):
pids = gather_ids[p]
with ops.colocate_with(params[p]):
result = array_ops.gather(params[p], pids)
if transform_fn:
# If transform_fn is provided, the clip_by_norm precedes
# the transform and hence must be co-located. See below
# for the counterpart if transform_fn is not proveded.
result = transform_fn(_clip(result, pids, max_norm))
partitioned_result.append(result)
# Stitch these back together
ret = data_flow_ops.parallel_dynamic_stitch(
pindices, partitioned_result, name=name)
# Determine the static element shape.
if transform_fn is None:
element_shape_s = params[0].get_shape()[1:]
for p in params[1:]:
element_shape_s = element_shape_s.merge_with(p.get_shape()[1:])
else:
element_shape_s = ret.get_shape()[1:]
# Compute the dynamic element shape.
if element_shape_s.is_fully_defined():
element_shape_d = element_shape_s
elif transform_fn is None:
# It's important that we compute params[0].shape on the right device
# to avoid data motion.
with ops.colocate_with(params[0]):
params_shape = array_ops.shape(params[0])
element_shape_d = params_shape[1:]
else:
element_shape_d = array_ops.shape(ret)[1:]
# Reshape to reverse the flattening of ids.
ret = array_ops.reshape(ret,
array_ops.concat(
[array_ops.shape(ids), element_shape_d], 0))
# Normally the reshape is sufficient, but setting shape explicitly
# teaches shape inference that params[1:].get_shape() matters
# (in the case that transform_fn is None).
ret.set_shape(ids.get_shape().concatenate(element_shape_s))
if not transform_fn:
# If transform_fn was provided, the clip_by_norm was done above.
ret = _clip(ret, ids, max_norm)
return ret
@tf_export(v1=["nn.embedding_lookup"])
def embedding_lookup(
params,
ids,
partition_strategy="mod",
name=None,
validate_indices=True, # pylint: disable=unused-argument
max_norm=None):
"""Looks up `ids` in a list of embedding tensors.
This function is used to perform parallel lookups on the list of
tensors in `params`. It is a generalization of
`tf.gather`, where `params` is
interpreted as a partitioning of a large embedding tensor. `params` may be
a `PartitionedVariable` as returned by using `tf.get_variable()` with a
partitioner.
If `len(params) > 1`, each element `id` of `ids` is partitioned between
the elements of `params` according to the `partition_strategy`.
In all strategies, if the id space does not evenly divide the number of
partitions, each of the first `(max_id + 1) % len(params)` partitions will
be assigned one more id.
If `partition_strategy` is `"mod"`, we assign each id to partition
`p = id % len(params)`. For instance,
13 ids are split across 5 partitions as:
`[[0, 5, 10], [1, 6, 11], [2, 7, 12], [3, 8], [4, 9]]`
If `partition_strategy` is `"div"`, we assign ids to partitions in a
contiguous manner. In this case, 13 ids are split across 5 partitions as:
`[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]]`
The results of the lookup are concatenated into a dense
tensor. The returned tensor has shape `shape(ids) + shape(params)[1:]`.
Args:
params: A single tensor representing the complete embedding tensor,
or a list of P tensors all of same shape except for the first dimension,
representing sharded embedding tensors. Alternatively, a
`PartitionedVariable`, created by partitioning along dimension 0. Each
element must be appropriately sized for the given `partition_strategy`.
ids: A `Tensor` with type `int32` or `int64` containing the ids to be looked
up in `params`.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default
is `"mod"`.
name: A name for the operation (optional).
validate_indices: DEPRECATED. If this operation is assigned to CPU, values
in `indices` are always validated to be within range. If assigned to GPU,
out-of-bound indices result in safe but unspecified behavior, which may
include raising an error.
max_norm: If not `None`, each embedding is clipped if its l2-norm is
larger than this value.
Returns:
A `Tensor` with the same type as the tensors in `params`.
Raises:
ValueError: If `params` is empty.
"""
return _embedding_lookup_and_transform(
params=params,
ids=ids,
partition_strategy=partition_strategy,
name=name,
max_norm=max_norm,
transform_fn=None)
@tf_export("nn.embedding_lookup", v1=[])
def embedding_lookup_v2(
params,
ids,
max_norm=None,
name=None):
"""Looks up `ids` in a list of embedding tensors.
This function is used to perform parallel lookups on the list of
tensors in `params`. It is a generalization of
`tf.gather`, where `params` is
interpreted as a partitioning of a large embedding tensor. `params` may be
a `PartitionedVariable` as returned by using `tf.get_variable()` with a
partitioner.
If `len(params) > 1`, each element `id` of `ids` is partitioned between
the elements of `params` according to the `partition_strategy`.
In all strategies, if the id space does not evenly divide the number of
partitions, each of the first `(max_id + 1) % len(params)` partitions will
be assigned one more id.
The `partition_strategy` is always `"div"` currently. This means that we
assign ids to partitions in a contiguous manner. For instance, 13 ids are
split across 5 partitions as:
`[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]]`
The results of the lookup are concatenated into a dense
tensor. The returned tensor has shape `shape(ids) + shape(params)[1:]`.
Args:
params: A single tensor representing the complete embedding tensor,
or a list of P tensors all of same shape except for the first dimension,
representing sharded embedding tensors. Alternatively, a
`PartitionedVariable`, created by partitioning along dimension 0. Each
element must be appropriately sized for the 'div' `partition_strategy`.
ids: A `Tensor` with type `int32` or `int64` containing the ids to be looked
up in `params`.
max_norm: If not `None`, each embedding is clipped if its l2-norm is
larger than this value.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as the tensors in `params`.
Raises:
ValueError: If `params` is empty.
"""
return embedding_lookup(params, ids, "div", name,
max_norm=max_norm)
@tf_export(v1=["nn.embedding_lookup_sparse"])
def embedding_lookup_sparse(params,
sp_ids,
sp_weights,
partition_strategy="mod",
name=None,
combiner=None,
max_norm=None):
"""Computes embeddings for the given ids and weights.
This op assumes that there is at least one id for each row in the dense tensor
represented by sp_ids (i.e. there are no rows with empty features), and that
all the indices of sp_ids are in canonical row-major order.
It also assumes that all id values lie in the range [0, p0), where p0
is the sum of the size of params along dimension 0.
Args:
params: A single tensor representing the complete embedding tensor,
or a list of P tensors all of same shape except for the first dimension,
representing sharded embedding tensors. Alternatively, a
`PartitionedVariable`, created by partitioning along dimension 0. Each
element must be appropriately sized for the given `partition_strategy`.
sp_ids: N x M `SparseTensor` of int64 ids where N is typically batch size
and M is arbitrary.
sp_weights: either a `SparseTensor` of float / double weights, or `None` to
indicate all weights should be taken to be 1. If specified, `sp_weights`
must have exactly the same shape and indices as `sp_ids`.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default
is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: Optional name for the op.
combiner: A string specifying the reduction op. Currently "mean", "sqrtn"
and "sum" are supported.
"sum" computes the weighted sum of the embedding results for each row.
"mean" is the weighted sum divided by the total weight.
"sqrtn" is the weighted sum divided by the square root of the sum of the
squares of the weights.
max_norm: If not `None`, each embedding is clipped if its l2-norm is
larger than this value, before combining.
Returns:
A dense tensor representing the combined embeddings for the
sparse ids. For each row in the dense tensor represented by `sp_ids`, the op
looks up the embeddings for all ids in that row, multiplies them by the
corresponding weight, and combines these embeddings as specified.
In other words, if
`shape(combined params) = [p0, p1, ..., pm]`
and
`shape(sp_ids) = shape(sp_weights) = [d0, d1, ..., dn]`
then
`shape(output) = [d0, d1, ..., dn-1, p1, ..., pm]`.
For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are
```python
[0, 0]: id 1, weight 2.0
[0, 1]: id 3, weight 0.5
[1, 0]: id 0, weight 1.0
[2, 3]: id 1, weight 3.0
```
with `combiner`="mean", then the output will be a 3x20 matrix where
```python
output[0, :] = (params[1, :] * 2.0 + params[3, :] * 0.5) / (2.0 + 0.5)
output[1, :] = (params[0, :] * 1.0) / 1.0
output[2, :] = (params[1, :] * 3.0) / 3.0
```
Raises:
TypeError: If `sp_ids` is not a `SparseTensor`, or if `sp_weights` is
neither `None` nor `SparseTensor`.
ValueError: If `combiner` is not one of {"mean", "sqrtn", "sum"}.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"mean\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
if combiner not in ("mean", "sqrtn", "sum"):
raise ValueError("combiner must be one of 'mean', 'sqrtn' or 'sum'")
if isinstance(params, variables.PartitionedVariable):
params = list(params) # Iterate to get the underlying Variables.
if not isinstance(params, list):
params = [params]
if not isinstance(sp_ids, sparse_tensor.SparseTensor):
raise TypeError("sp_ids must be SparseTensor")
ignore_weights = sp_weights is None
if not ignore_weights:
if not isinstance(sp_weights, sparse_tensor.SparseTensor):
raise TypeError("sp_weights must be either None or SparseTensor")
sp_ids.values.get_shape().assert_is_compatible_with(
sp_weights.values.get_shape())
sp_ids.indices.get_shape().assert_is_compatible_with(
sp_weights.indices.get_shape())
sp_ids.dense_shape.get_shape().assert_is_compatible_with(
sp_weights.dense_shape.get_shape())
# TODO(yleon): Add enhanced node assertions to verify that sp_ids and
# sp_weights have equal indices and shapes.
with ops.name_scope(name, "embedding_lookup_sparse",
params + [sp_ids]) as name:
segment_ids = sp_ids.indices[:, 0]
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
ids = sp_ids.values
ids, idx = array_ops.unique(ids)
embeddings = embedding_lookup(
params, ids, partition_strategy=partition_strategy, max_norm=max_norm)
if embeddings.dtype in (dtypes.float16, dtypes.bfloat16):
embeddings = math_ops.to_float(embeddings)
if not ignore_weights:
weights = sp_weights.values
if weights.dtype != embeddings.dtype:
weights = math_ops.cast(weights, embeddings.dtype)
embeddings = array_ops.gather(embeddings, idx)
# Reshape weights to allow broadcast
ones = array_ops.fill(
array_ops.expand_dims(array_ops.rank(embeddings) - 1, 0), 1)
bcast_weights_shape = array_ops.concat([array_ops.shape(weights), ones],
0)
orig_weights_shape = weights.get_shape()
weights = array_ops.reshape(weights, bcast_weights_shape)
# Set the weight shape, since after reshaping to bcast_weights_shape,
# the shape becomes None.
if embeddings.get_shape().ndims is not None:
weights.set_shape(
orig_weights_shape.concatenate(
[1 for _ in range(embeddings.get_shape().ndims - 1)]))
embeddings *= weights
if combiner == "sum":
embeddings = math_ops.segment_sum(embeddings, segment_ids, name=name)
elif combiner == "mean":
embeddings = math_ops.segment_sum(embeddings, segment_ids)
weight_sum = math_ops.segment_sum(weights, segment_ids)
embeddings = math_ops.div(embeddings, weight_sum, name=name)
elif combiner == "sqrtn":
embeddings = math_ops.segment_sum(embeddings, segment_ids)
weights_squared = math_ops.pow(weights, 2)
weight_sum = math_ops.segment_sum(weights_squared, segment_ids)
weight_sum_sqrt = math_ops.sqrt(weight_sum)
embeddings = math_ops.div(embeddings, weight_sum_sqrt, name=name)
else:
assert False, "Unrecognized combiner"
else:
assert idx is not None
if combiner == "sum":
embeddings = math_ops.sparse_segment_sum(
embeddings, idx, segment_ids, name=name)
elif combiner == "mean":
embeddings = math_ops.sparse_segment_mean(
embeddings, idx, segment_ids, name=name)
elif combiner == "sqrtn":
embeddings = math_ops.sparse_segment_sqrt_n(
embeddings, idx, segment_ids, name=name)
else:
assert False, "Unrecognized combiner"
return embeddings
@tf_export("nn.embedding_lookup_sparse", v1=[])
def embedding_lookup_sparse_v2(params,
sp_ids,
sp_weights,
combiner=None,
max_norm=None,
name=None):
"""Computes embeddings for the given ids and weights.
This op assumes that there is at least one id for each row in the dense tensor
represented by sp_ids (i.e. there are no rows with empty features), and that
all the indices of sp_ids are in canonical row-major order.
It also assumes that all id values lie in the range [0, p0), where p0
is the sum of the size of params along dimension 0.
Args:
params: A single tensor representing the complete embedding tensor,
or a list of P tensors all of same shape except for the first dimension,
representing sharded embedding tensors. Alternatively, a
`PartitionedVariable`, created by partitioning along dimension 0. Each
element must be appropriately sized for ``"div"`` `partition_strategy`.
sp_ids: N x M `SparseTensor` of int64 ids where N is typically batch size
and M is arbitrary.
sp_weights: either a `SparseTensor` of float / double weights, or `None` to
indicate all weights should be taken to be 1. If specified, `sp_weights`
must have exactly the same shape and indices as `sp_ids`.
combiner: A string specifying the reduction op. Currently "mean", "sqrtn"
and "sum" are supported.
"sum" computes the weighted sum of the embedding results for each row.
"mean" is the weighted sum divided by the total weight.
"sqrtn" is the weighted sum divided by the square root of the sum of the
squares of the weights.
max_norm: If not `None`, each embedding is clipped if its l2-norm is
larger than this value, before combining.
name: Optional name for the op.
Returns:
A dense tensor representing the combined embeddings for the
sparse ids. For each row in the dense tensor represented by `sp_ids`, the op
looks up the embeddings for all ids in that row, multiplies them by the
corresponding weight, and combines these embeddings as specified.
In other words, if
`shape(combined params) = [p0, p1, ..., pm]`
and
`shape(sp_ids) = shape(sp_weights) = [d0, d1, ..., dn]`
then
`shape(output) = [d0, d1, ..., dn-1, p1, ..., pm]`.
For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are
```python
[0, 0]: id 1, weight 2.0
[0, 1]: id 3, weight 0.5
[1, 0]: id 0, weight 1.0
[2, 3]: id 1, weight 3.0
```
with `combiner`="mean", then the output will be a 3x20 matrix where
```python
output[0, :] = (params[1, :] * 2.0 + params[3, :] * 0.5) / (2.0 + 0.5)
output[1, :] = (params[0, :] * 1.0) / 1.0
output[2, :] = (params[1, :] * 3.0) / 3.0
```
Raises:
TypeError: If `sp_ids` is not a `SparseTensor`, or if `sp_weights` is
neither `None` nor `SparseTensor`.
ValueError: If `combiner` is not one of {"mean", "sqrtn", "sum"}.
"""
return embedding_lookup_sparse(
params, sp_ids, sp_weights, "div", name, combiner, max_norm)
@tf_export("nn.safe_embedding_lookup_sparse", v1=[])
def safe_embedding_lookup_sparse_v2(embedding_weights,
sparse_ids,
sparse_weights=None,
combiner="mean",
default_id=None,
max_norm=None,
name=None):
"""Lookup embedding results, accounting for invalid IDs and empty features.
The partitioned embedding in `embedding_weights` must all be the same shape
except for the first dimension. The first dimension is allowed to vary as the
vocabulary size is not necessarily a multiple of `P`. `embedding_weights`
may be a `PartitionedVariable` as returned by using `tf.get_variable()` with a
partitioner.
Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs
with non-positive weight. For an entry with no features, the embedding vector
for `default_id` is returned, or the 0-vector if `default_id` is not supplied.
The ids and weights may be multi-dimensional. Embeddings are always aggregated
along the last dimension.
Note: when doing embedding lookup on `embedding_weights`, "div" partition
strategy will be used. Support for other partition strategy will be added
later.
Args:
embedding_weights: A list of `P` float `Tensor`s or values representing
partitioned embedding `Tensor`s. Alternatively, a `PartitionedVariable`
created by partitioning along dimension 0. The total unpartitioned shape
should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the vocab size
and `e_1, ..., e_m` are the embedding dimensions.
sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the
ids. `d_0` is typically batch size.
sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing
float weights corresponding to `sparse_ids`, or `None` if all weights are
be assumed to be 1.0.
combiner: A string specifying how to combine embedding results for each
entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean" the
default.
default_id: The id to use for an entry with no features.
max_norm: If not `None`, all embeddings are l2-normalized to max_norm before
combining.
name: A name for this operation (optional).
Returns:
Dense `Tensor` of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`.
Raises:
ValueError: if `embedding_weights` is empty.
"""
return safe_embedding_lookup_sparse(
embedding_weights,
sparse_ids,
sparse_weights=sparse_weights,
combiner=combiner,
default_id=default_id,
name=name,
partition_strategy="div",
max_norm=max_norm)
@tf_export(v1=["nn.safe_embedding_lookup_sparse"])
def safe_embedding_lookup_sparse(embedding_weights,
sparse_ids,
sparse_weights=None,
combiner='mean',
default_id=None,
name=None,
partition_strategy='div',
max_norm=None):
"""Lookup embedding results, accounting for invalid IDs and empty features.
The partitioned embedding in `embedding_weights` must all be the same shape
except for the first dimension. The first dimension is allowed to vary as the
vocabulary size is not necessarily a multiple of `P`. `embedding_weights`
may be a `PartitionedVariable` as returned by using `tf.get_variable()` with a
partitioner.
Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs
with non-positive weight. For an entry with no features, the embedding vector
for `default_id` is returned, or the 0-vector if `default_id` is not supplied.
The ids and weights may be multi-dimensional. Embeddings are always aggregated
along the last dimension.
Args:
embedding_weights: A list of `P` float `Tensor`s or values representing
partitioned embedding `Tensor`s. Alternatively, a `PartitionedVariable`
created by partitioning along dimension 0. The total unpartitioned
shape should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the
vocab size and `e_1, ..., e_m` are the embedding dimensions.
sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the
ids. `d_0` is typically batch size.
sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing
float weights corresponding to `sparse_ids`, or `None` if all weights
are be assumed to be 1.0.
combiner: A string specifying how to combine embedding results for each
entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean"
the default.
default_id: The id to use for an entry with no features.
name: A name for this operation (optional).
partition_strategy: A string specifying the partitioning strategy.
Currently `"div"` and `"mod"` are supported. Default is `"div"`.
max_norm: If not `None`, all embeddings are l2-normalized to max_norm before
combining.
Returns:
Dense `Tensor` of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`.
Raises:
ValueError: if `embedding_weights` is empty.
"""
if embedding_weights is None:
raise ValueError('Missing embedding_weights %s.' % embedding_weights)
if isinstance(embedding_weights, variables.PartitionedVariable):
embedding_weights = list(embedding_weights) # get underlying Variables.
if not isinstance(embedding_weights, list):
embedding_weights = [embedding_weights]
if len(embedding_weights) < 1:
raise ValueError('Missing embedding_weights %s.' % embedding_weights)
dtype = sparse_weights.dtype if sparse_weights is not None else None
embedding_weights = [
w if (isinstance(w, resource_variable_ops.ResourceVariable)
and dtype in (None, w.dtype))
else ops.convert_to_tensor(w, dtype=dtype)
for w in embedding_weights
]
with ops.name_scope(name, 'embedding_lookup',
embedding_weights + [sparse_ids,
sparse_weights]) as scope:
# Reshape higher-rank sparse ids and weights to linear segment ids.
original_shape = sparse_ids.dense_shape
original_rank_dim = tensor_shape.dimension_value(
sparse_ids.dense_shape.get_shape()[0])
original_rank = (
array_ops.size(original_shape)
if original_rank_dim is None
else original_rank_dim)
sparse_ids = sparse_ops.sparse_reshape(sparse_ids, [
math_ops.reduce_prod(
array_ops.slice(original_shape, [0], [original_rank - 1])),
array_ops.gather(original_shape, original_rank - 1)])
if sparse_weights is not None:
sparse_weights = sparse_tensor.SparseTensor(
sparse_ids.indices,
sparse_weights.values, sparse_ids.dense_shape)
# Prune invalid ids and weights.
sparse_ids, sparse_weights = _prune_invalid_ids(sparse_ids, sparse_weights)
if combiner != 'sum':
sparse_ids, sparse_weights = _prune_invalid_weights(
sparse_ids, sparse_weights)
# Fill in dummy values for empty features, if necessary.
sparse_ids, is_row_empty = sparse_ops.sparse_fill_empty_rows(sparse_ids,
default_id or
0)
if sparse_weights is not None:
sparse_weights, _ = sparse_ops.sparse_fill_empty_rows(sparse_weights, 1.0)
result = embedding_lookup_sparse(
embedding_weights,
sparse_ids,
sparse_weights,
combiner=combiner,
partition_strategy=partition_strategy,
name=None if default_id is None else scope,
max_norm=max_norm)
if default_id is None:
# Broadcast is_row_empty to the same shape as embedding_lookup_result,
# for use in Select.
is_row_empty = array_ops.tile(
array_ops.reshape(is_row_empty, [-1, 1]),
array_ops.stack([1, array_ops.shape(result)[1]]))
result = array_ops.where(is_row_empty,
array_ops.zeros_like(result),
result,
name=scope)
# Reshape back from linear ids back into higher-dimensional dense result.
final_result = array_ops.reshape(
result,
array_ops.concat([
array_ops.slice(
math_ops.cast(original_shape, dtypes.int32), [0],
[original_rank - 1]),
array_ops.slice(array_ops.shape(result), [1], [-1])
], 0))
final_result.set_shape(tensor_shape.unknown_shape(
(tensor_shape.Dimension(original_rank_dim) - 1).value).concatenate(
result.get_shape()[1:]))
return final_result
def _prune_invalid_ids(sparse_ids, sparse_weights):
"""Prune invalid IDs (< 0) from the input ids and weights."""
is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)
if sparse_weights is not None:
is_id_valid = math_ops.logical_and(
is_id_valid,
array_ops.ones_like(sparse_weights.values, dtype=dtypes.bool))
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)
if sparse_weights is not None:
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)
return sparse_ids, sparse_weights
def _prune_invalid_weights(sparse_ids, sparse_weights):
"""Prune invalid weights (< 0) from the input ids and weights."""
if sparse_weights is not None:
is_weights_valid = math_ops.greater(sparse_weights.values, 0)
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_weights_valid)
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_weights_valid)
return sparse_ids, sparse_weights
|
{
"content_hash": "ce17952e8db01e1022cebb94311b1595",
"timestamp": "",
"source": "github",
"line_count": 832,
"max_line_length": 81,
"avg_line_length": 42.60216346153846,
"alnum_prop": 0.6440682747919312,
"repo_name": "ageron/tensorflow",
"id": "55f9a9ea0ba699befb0b01dd9d94cc7057abec04",
"size": "36134",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/embedding_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3560"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "644380"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "59281238"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "75509"
},
{
"name": "Go",
"bytes": "1501606"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "908340"
},
{
"name": "Jupyter Notebook",
"bytes": "2510253"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "94466"
},
{
"name": "Objective-C",
"bytes": "60069"
},
{
"name": "Objective-C++",
"bytes": "118322"
},
{
"name": "PHP",
"bytes": "15024"
},
{
"name": "Pascal",
"bytes": "617"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "46230508"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "481859"
},
{
"name": "Smarty",
"bytes": "27249"
},
{
"name": "Swift",
"bytes": "53109"
}
],
"symlink_target": ""
}
|
from django.db.models import CharField
from django.db.models.functions import Upper
from django.test import TestCase
from django.test.utils import register_lookup
from ..models import Author
class UpperTests(TestCase):
def test_basic(self):
Author.objects.create(name="John Smith", alias="smithj")
Author.objects.create(name="Rhonda")
authors = Author.objects.annotate(upper_name=Upper("name"))
self.assertQuerysetEqual(
authors.order_by("name"),
[
"JOHN SMITH",
"RHONDA",
],
lambda a: a.upper_name,
)
Author.objects.update(name=Upper("name"))
self.assertQuerysetEqual(
authors.order_by("name"),
[
("JOHN SMITH", "JOHN SMITH"),
("RHONDA", "RHONDA"),
],
lambda a: (a.upper_name, a.name),
)
def test_transform(self):
with register_lookup(CharField, Upper):
Author.objects.create(name="John Smith", alias="smithj")
Author.objects.create(name="Rhonda")
authors = Author.objects.filter(name__upper__exact="JOHN SMITH")
self.assertQuerysetEqual(
authors.order_by("name"),
[
"John Smith",
],
lambda a: a.name,
)
|
{
"content_hash": "06a73f105bef054caac75b1dfa8c3461",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 76,
"avg_line_length": 32.395348837209305,
"alnum_prop": 0.5384063173007897,
"repo_name": "MarkusH/django",
"id": "728ea8226504a6e69074f8f85233d4ef6158452a",
"size": "1393",
"binary": false,
"copies": "8",
"ref": "refs/heads/main",
"path": "tests/db_functions/text/test_upper.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "89800"
},
{
"name": "HTML",
"bytes": "238228"
},
{
"name": "JavaScript",
"bytes": "147868"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Python",
"bytes": "16079477"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "392"
}
],
"symlink_target": ""
}
|
'''
Created on May 20, 2015
This module implements a simple crawler that can start from a given url and collect all urls
and linked content iteratively. You have to specify the amount of pages to download.
Use python3 crawler.py --help for help.
If you want to use it as an API look at the Crawler class.
@author: Daniil Sorokin<sorokin@ukp.informatik.tu-darmstadt.de>
'''
from urllib.request import urlopen
from urllib.parse import urlparse
from bs4 import BeautifulSoup
import mysouputils
from identify_language import LangIdentifier
import codecs,time,re,argparse,os
import random
from collections import OrderedDict
# Parameters
my_encoding = "utf-8"
should_log = False
data_folder = "crawler-data/"
save_content_to = data_folder + "content/"
save_logs_to = data_folder + "logs/"
class PriorityQueue:
''' Describes a queue where elements are sorted according to a score.
If sort parameter is set to False the queue functions as a simple Set.
'''
def __init__(self, sort_by_score=False):
self._l_scores = OrderedDict()
self._queue = []
self._sort = sort_by_score
def add_all(self, elements, score=None):
self._l_scores.update([(element, score) for element in elements])
self._queue = sorted(self._l_scores.keys(), key = lambda x: self._l_scores[x]) if self._sort else list(self._l_scores.keys())
def update(self, tuples):
self._l_scores.update(tuples)
self._queue = sorted(self._l_scores.keys(), key = lambda x: self._l_scores[x]) if self._sort else list(self._l_scores.keys())
def pop(self, rand_item = False):
element = random.choice(self._queue) if rand_item else self._queue.pop(0)
score = self._l_scores[element]
del self._l_scores[element]
return (element, score)
def __len__(self):
return len(self._queue)
class Crawler:
''' Main class that runs a crawler. There are only two public methods:
run() and add_to_queue(). The latter should be used to add the starting link
before calling run(). '''
def __init__(self, server_lock=0.1, language=None):
self._servers = {} # List of servers that where already visited, each mapped to a timestamp of the last visit
self._visited = set() # List of visited URLS
self._log_extracted_counter = 1
self._failed_access_counter = 0
self._server_lock = server_lock
if language: # Load language identifier and substitute queue for a priority queue if there is a language given
self._lang_focus = language
self._langid = LangIdentifier("lang_id_data/langs/")
self._queue = PriorityQueue(sort_by_score=True)
else:
self._lang_focus = None
self._langid = None
self._queue = PriorityQueue()
if not os.path.exists(data_folder): os.makedirs(data_folder)
if not os.path.exists(save_content_to): os.makedirs(save_content_to)
if not os.path.exists(save_content_to + "html/"): os.makedirs(save_content_to + "html/")
if not os.path.exists(save_content_to + "text/"): os.makedirs(save_content_to + "text/")
if not os.path.exists(save_logs_to): os.makedirs(save_logs_to)
self._content_folder = save_content_to
self._log_visited = codecs.open(save_logs_to + "visited_links.log", "a", encoding=my_encoding)
self._log_visited.write("id,url,urls-extracted\n")
self._log_extracted = codecs.open(save_logs_to + "extracted_links.log", "a", encoding=my_encoding)
self._log_extracted.write("id,url,source-url\n")
def run(self, limit, url_prefix_filter=None, url_filters=None, random_item = False):
self._print_stat()
while len(self._visited) < limit and len(self._queue) > 0:
current_url = self._get_from_queue(random_item)
if should_log: print("{},{},".format(len(self._visited), current_url), end="")
soup = self._openUrl(current_url)
if soup:
urls = mysouputils.get_urls_from_soup(soup)
urls = mysouputils.canonize_and_filter_urls(current_url, urls)
if url_prefix_filter:
urls = [url for url in urls if url.startswith(url_prefix_filter)]
if url_filters:
urls = [url for url in urls if not any(url_filter in urlparse(url).path for url_filter in url_filters)]
if self._lang_focus:
l_score = self._langid.get_score(mysouputils.get_content_from_soup(soup), "de")
self.add_to_queue(urls,l_score)
else:
self.add_to_queue(urls)
if should_log: print("{}".format(len(urls)))
self._log(current_url, urls)
url_id = len(self._visited)
self._save_content(soup, current_url, url_id)
if len(self._visited) % 100 == 0: self._print_stat()
def add_to_queue(self, urls, score=0.0):
self._queue.add_all([url for url in urls if url not in self._visited], score)
def _get_from_queue(self, random_item=False):
url = self._queue.pop(random_item)
server = urlparse(url[0]).netloc
collected = []
while len(self._queue) > 0 and len(collected) < 100 and server in self._servers and time.time() - self._servers[server] < self._server_lock:
if should_log: print("Locked: {}, try another".format(server))
collected.append(url)
url = self._queue.pop(random_item)
server = urlparse(url[0]).netloc
self._servers[server] = time.time();
self._queue.update(collected)
return url[0]
def _openUrl(self, url):
self._visited.add(url)
try:
with urlopen(url, timeout=5) as d:
soup = BeautifulSoup(d.read())
return soup
except:
self._failed_access_counter += 1
return None
def _print_stat(self):
print("No_visited: {}, Queue_size: {}, Failed access: {}".format(len(self._visited), len(self._queue), self._failed_access_counter))
def _log(self, visited_url, extracted_urls):
try:
self._log_visited.write("{},{},{}\n".format(len(self._visited), visited_url, len(extracted_urls)))
for url in extracted_urls:
self._log_extracted.write("{},{},{}\n".format(self._log_extracted_counter, url, visited_url))
self._log_extracted_counter += 1
except:
print("Can't log. Problem: " + visited_url)
def _save_content(self, soup, url, url_id, save_text=True):
try:
str_url = re.sub("\W","_", urlparse(url).netloc + "_" + str(url_id))
with codecs.open(self._content_folder + "html/" + str_url, "w", encoding=my_encoding) as f:
f.write(str(soup))
if save_text:
with codecs.open(self._content_folder + "text/" + str_url, "w", encoding=my_encoding) as f:
f.write(mysouputils.get_content_from_soup(soup))
except:
print("Can't save content. Problem: " + url)
if __name__ == '__main__':
start = time.perf_counter()
parser = argparse.ArgumentParser()
parser.add_argument('-n',type=int, help = "Number of pages to download.", default=3000)
parser.add_argument('-s',type=float, help = "Access interval to the same server (in sec.).", default=0.1)
parser.add_argument('-l',type=str, help = "Language to focus on. Web pages in this language will be preferred.",
default=None)
parser.add_argument('-p',type=str, help = "Prefix to filter out urls in the queue. Only urls that start with the given prefix will be considered.",
default=None)
parser.add_argument('-f',type=str, nargs="*", help = "Filter out urls that contain given symbols in the path (not server name).",
default=None)
parser.add_argument('-r',action='store_true', help = "Always choose a random link from the queue, otherwise breadth search.")
parser.add_argument('start', type=str, help = "Start url.")
params = parser.parse_args()
crawler = Crawler(params.s, language=params.l)
crawler.add_to_queue([params.start], 1.0)
crawler.run(params.n, params.p, params.f, params.r)
end = time.perf_counter()
print("Elapsed time: " + str(end - start))
|
{
"content_hash": "3bd9d359ac39bae090ca2cd731555805",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 151,
"avg_line_length": 46.29100529100529,
"alnum_prop": 0.5920676648759858,
"repo_name": "daniilsorokin/Web-Mining-Exercises",
"id": "b140a11778177dbb4f54cb4bafed2f96ef9ef361",
"size": "8749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/crawler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "73422"
},
{
"name": "Shell",
"bytes": "2308"
}
],
"symlink_target": ""
}
|
"""Test KNX scene."""
from homeassistant.components.knx.const import KNX_ADDRESS
from homeassistant.components.knx.schema import SceneSchema
from homeassistant.const import CONF_ENTITY_CATEGORY, CONF_NAME
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_registry import (
async_get_registry as async_get_entity_registry,
)
from .conftest import KNXTestKit
async def test_activate_knx_scene(hass: HomeAssistant, knx: KNXTestKit):
"""Test KNX scene."""
await knx.setup_integration(
{
SceneSchema.PLATFORM: [
{
CONF_NAME: "test",
SceneSchema.CONF_SCENE_NUMBER: 24,
KNX_ADDRESS: "1/1/1",
CONF_ENTITY_CATEGORY: EntityCategory.DIAGNOSTIC,
},
]
}
)
assert len(hass.states.async_all()) == 1
registry = await async_get_entity_registry(hass)
entity = registry.async_get("scene.test")
assert entity.entity_category is EntityCategory.DIAGNOSTIC
assert entity.unique_id == "1/1/1_24"
await hass.services.async_call(
"scene", "turn_on", {"entity_id": "scene.test"}, blocking=True
)
# assert scene was called on bus
await knx.assert_write("1/1/1", (0x17,))
|
{
"content_hash": "326bd3e766a76b39271fad47eda79718",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 72,
"avg_line_length": 32.853658536585364,
"alnum_prop": 0.6473645137342242,
"repo_name": "home-assistant/home-assistant",
"id": "37e4ac12728aea715e6dde0caade3714011dc2ae",
"size": "1347",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/knx/test_scene.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20557383"
},
{
"name": "Shell",
"bytes": "6671"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import plugin
from flexget.event import event
log = logging.getLogger('duplicates')
class Duplicates(object):
"""
Take action on entries with duplicate field values
Example::
duplicates:
field: <field name>
action: [accept|reject]
"""
schema = {
'type': 'object',
'properties': {'field': {'type': 'string'}, 'action': {'enum': ['accept', 'reject']}},
'required': ['field', 'action'],
'additionalProperties': False,
}
def on_task_filter(self, task, config):
field = config['field']
action = config['action']
for entry in task.entries:
for prospect in task.entries:
if entry == prospect:
continue
if entry.get(field) is not None and entry[field] == prospect.get(field):
msg = 'Field {} value {} equals on {} and {}'.format(
field, entry[field], entry['title'], prospect['title']
)
if action == 'accept':
entry.accept(msg)
else:
entry.reject(msg)
@event('plugin.register')
def register_plugin():
plugin.register(Duplicates, 'duplicates', api_ver=2)
|
{
"content_hash": "808eb4cbfe10530da590e53710000551",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 94,
"avg_line_length": 29.29787234042553,
"alnum_prop": 0.5374001452432825,
"repo_name": "gazpachoking/Flexget",
"id": "3390dd5035222f680aad81597089600eb3e1b5e7",
"size": "1377",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "flexget/plugins/filter/duplicates.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "36113"
},
{
"name": "JavaScript",
"bytes": "133743"
},
{
"name": "Python",
"bytes": "1494170"
}
],
"symlink_target": ""
}
|
import os
import tempfile
import unittest
from django.conf import settings
from django.core.cache import cache
from django.core.validators import ValidationError
from django.utils import translation
import mock
from nose.tools import eq_, assert_raises, raises
from amo.utils import (cache_ns_key, escape_all, find_language,
LocalFileStorage, no_translation, resize_image,
rm_local_tmp_dir, slugify, slug_validator, to_language)
from product_details import product_details
u = u'Ελληνικά'
def test_slug_validator():
eq_(slug_validator(u.lower()), None)
eq_(slug_validator('-'.join([u.lower(), u.lower()])), None)
assert_raises(ValidationError, slug_validator, '234.add')
assert_raises(ValidationError, slug_validator, 'a a a')
assert_raises(ValidationError, slug_validator, 'tags/')
def test_slugify():
x = '-'.join([u, u])
y = ' - '.join([u, u])
def check(x, y):
eq_(slugify(x), y)
slug_validator(slugify(x))
s = [('xx x - "#$@ x', 'xx-x-x'),
(u'Bän...g (bang)', u'bäng-bang'),
(u, u.lower()),
(x, x.lower()),
(y, x.lower()),
(' a ', 'a'),
('tags/', 'tags'),
('holy_wars', 'holy_wars'),
# I don't really care what slugify returns. Just don't crash.
(u'x荿', u'x\u837f'),
(u'ϧ蒬蓣', u'\u03e7\u84ac\u84e3'),
(u'¿x', u'x'),
]
for val, expected in s:
yield check, val, expected
def test_resize_image():
# src and dst shouldn't be the same.
assert_raises(Exception, resize_image, 't', 't', 'z')
def test_resize_transparency():
src = os.path.join(settings.ROOT, 'apps', 'amo', 'tests',
'images', 'transparent.png')
dest = tempfile.mkstemp(dir=settings.TMP_PATH)[1]
expected = src.replace('.png', '-expected.png')
try:
resize_image(src, dest, (32, 32), remove_src=False, locally=True)
with open(dest) as dfh:
with open(expected) as efh:
assert dfh.read() == efh.read()
finally:
if os.path.exists(dest):
os.remove(dest)
def test_to_language():
tests = (('en-us', 'en-US'),
('en_US', 'en-US'),
('en_us', 'en-US'),
('FR', 'fr'),
('el', 'el'))
def check(a, b):
eq_(to_language(a), b)
for a, b in tests:
yield check, a, b
def test_find_language():
tests = (('en-us', 'en-US'),
('en_US', 'en-US'),
('en', 'en-US'),
('cy', 'cy'), # A hidden language.
('FR', 'fr'),
('es-ES', None), # We don't go from specific to generic.
('xxx', None))
def check(a, b):
eq_(find_language(a), b)
for a, b in tests:
yield check, a, b
def test_spotcheck():
"""Check a couple product-details files to make sure they're available."""
languages = product_details.languages
eq_(languages['el']['English'], 'Greek')
eq_(languages['el']['native'], u'Ελληνικά')
eq_(product_details.firefox_history_major_releases['1.0'], '2004-11-09')
def test_no_translation():
"""
`no_translation` provides a context where only the default
language is active.
"""
lang = translation.get_language()
translation.activate('pt-br')
with no_translation():
eq_(translation.get_language(), settings.LANGUAGE_CODE)
eq_(translation.get_language(), 'pt-br')
with no_translation('es'):
eq_(translation.get_language(), 'es')
eq_(translation.get_language(), 'pt-br')
translation.activate(lang)
class TestLocalFileStorage(unittest.TestCase):
def setUp(self):
self.tmp = tempfile.mkdtemp()
self.stor = LocalFileStorage()
def tearDown(self):
rm_local_tmp_dir(self.tmp)
def test_read_write(self):
fn = os.path.join(self.tmp, 'somefile.txt')
with self.stor.open(fn, 'w') as fd:
fd.write('stuff')
with self.stor.open(fn, 'r') as fd:
eq_(fd.read(), 'stuff')
def test_non_ascii_filename(self):
fn = os.path.join(self.tmp, u'Ivan Krsti\u0107.txt')
with self.stor.open(fn, 'w') as fd:
fd.write('stuff')
with self.stor.open(fn, 'r') as fd:
eq_(fd.read(), 'stuff')
def test_non_ascii_content(self):
fn = os.path.join(self.tmp, 'somefile.txt')
with self.stor.open(fn, 'w') as fd:
fd.write(u'Ivan Krsti\u0107.txt'.encode('utf8'))
with self.stor.open(fn, 'r') as fd:
eq_(fd.read().decode('utf8'), u'Ivan Krsti\u0107.txt')
def test_make_file_dirs(self):
dp = os.path.join(self.tmp, 'path', 'to')
self.stor.open(os.path.join(dp, 'file.txt'), 'w').close()
assert os.path.exists(self.stor.path(dp)), (
'Directory not created: %r' % dp)
def test_do_not_make_file_dirs_when_reading(self):
fpath = os.path.join(self.tmp, 'file.txt')
with open(fpath, 'w') as fp:
fp.write('content')
# Make sure this doesn't raise an exception.
self.stor.open(fpath, 'r').close()
def test_make_dirs_only_once(self):
dp = os.path.join(self.tmp, 'path', 'to')
with self.stor.open(os.path.join(dp, 'file.txt'), 'w') as fd:
fd.write('stuff')
# Make sure it doesn't try to make the dir twice
with self.stor.open(os.path.join(dp, 'file.txt'), 'w') as fd:
fd.write('stuff')
with self.stor.open(os.path.join(dp, 'file.txt'), 'r') as fd:
eq_(fd.read(), 'stuff')
def test_delete_empty_dir(self):
dp = os.path.join(self.tmp, 'path')
os.mkdir(dp)
self.stor.delete(dp)
eq_(os.path.exists(dp), False)
@raises(OSError)
def test_cannot_delete_non_empty_dir(self):
dp = os.path.join(self.tmp, 'path')
with self.stor.open(os.path.join(dp, 'file.txt'), 'w') as fp:
fp.write('stuff')
self.stor.delete(dp)
def test_delete_file(self):
dp = os.path.join(self.tmp, 'path')
fn = os.path.join(dp, 'file.txt')
with self.stor.open(fn, 'w') as fp:
fp.write('stuff')
self.stor.delete(fn)
eq_(os.path.exists(fn), False)
eq_(os.path.exists(dp), True)
class TestCacheNamespaces(unittest.TestCase):
def setUp(self):
cache.clear()
self.namespace = 'redis-is-dead'
@mock.patch('amo.utils.epoch')
def test_no_preexisting_key(self, epoch_mock):
epoch_mock.return_value = 123456
eq_(cache_ns_key(self.namespace), '123456:ns:%s' % self.namespace)
@mock.patch('amo.utils.epoch')
def test_no_preexisting_key_incr(self, epoch_mock):
epoch_mock.return_value = 123456
eq_(cache_ns_key(self.namespace, increment=True),
'123456:ns:%s' % self.namespace)
@mock.patch('amo.utils.epoch')
def test_key_incr(self, epoch_mock):
epoch_mock.return_value = 123456
cache_ns_key(self.namespace) # Sets ns to 123456
ns_key = cache_ns_key(self.namespace, increment=True)
expected = '123457:ns:%s' % self.namespace
eq_(ns_key, expected)
eq_(cache_ns_key(self.namespace), expected)
def test_escape_all():
x = '-'.join([u, u])
y = ' - '.join([u, u])
def check(x, y):
eq_(escape_all(x), y)
# All I ask: Don't crash me, bro.
s = [
('<script>alert("BALL SO HARD")</script>',
'<script>alert("BALL SO HARD")</script>'),
(u'Bän...g (bang)', u'Bän...g (bang)'),
(u, u),
(x, x),
(y, y),
(u'x荿', u'x\u837f'),
(u'ϧ蒬蓣', u'\u03e7\u0383\u84ac\u84e3'),
(u'¿x', u'¿x'),
]
for val, expected in s:
yield check, val, expected
|
{
"content_hash": "487450449e5c6d5c545da9c60f3dddcb",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 78,
"avg_line_length": 31.63855421686747,
"alnum_prop": 0.5548362528560549,
"repo_name": "wagnerand/zamboni",
"id": "08ff6bb279fd7a01695f7ecca0a764c541087bc5",
"size": "7941",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "apps/amo/tests/test_amo_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4145"
},
{
"name": "CSS",
"bytes": "884290"
},
{
"name": "JavaScript",
"bytes": "1677558"
},
{
"name": "Puppet",
"bytes": "13808"
},
{
"name": "Python",
"bytes": "6284101"
},
{
"name": "Shell",
"bytes": "19774"
}
],
"symlink_target": ""
}
|
import os
import pandas as pd
import requests
from datetime import datetime
from furl import furl
SQUASH_API_URL = os.environ.get('SQUASH_API_URL',
'http://localhost:8000/dashboard/api/')
def get_endpoint_urls():
"""
Lookup API endpoint URLs
"""
r = requests.get(SQUASH_API_URL)
r.raise_for_status()
return r.json()
def get_data(endpoint, params=None):
"""Return data as a dict from
an API endpoint """
api = get_endpoint_urls()
# e.g. http://localhost:8000/AMx?ci_id=1&ci_dataset=cfht&metric=AM1
r = requests.get(api[endpoint],
params=params)
r.raise_for_status()
return r.json()
def get_data_as_pandas_df(endpoint, params=None):
"""
Return data as a pandas dataframe from
an API endpoint
"""
result = get_data(endpoint, params)
data = pd.DataFrame.from_dict(result, orient='index').transpose()
return data
def get_datasets(default=None):
"""Get a list of datasets from the API
and a default value
Returns
-------
datasets : list
list of dataset names
default : str
if a valid default value is provided, overwrite
the default value obtained from the API
"""
datasets = get_data('datasets')
default_dataset = get_data('defaults')['ci_dataset']
if default:
if default in datasets:
default_dataset = default
return {'datasets': datasets, 'default': default_dataset}
def get_metrics(default=None):
"""Get the list of metrics from the API
and a default value
Returns
-------
metrics : list
list of metric names
default : str
if a valid default value is provided, overwrite
the default value returned from the API
"""
r = get_data('metrics')
metrics = [m['metric'] for m in r['results']]
default_metric = get_data('defaults')['metric']
if default:
if default in metrics:
default_metric = default
return {'metrics': metrics, 'default': default_metric}
def get_value(specs, name):
""" Helper function to unpack metric specification
values
Parameters
----------
specs: dict
a dict with keys value and name
name: str
the spec name
Return
------
value: float or None
value of the spec if exists, None otherwise
"""
value = None
for s in specs:
if s['name'] == name:
value = s['value']
break
return value
def get_specs(name):
"""Get metric specifications thresholds
from its name
Parameters
----------
name: str
a valid metric name
Returns
-------
unit: str
metric unit
description:
metric description
minimum: float
metric minimum specification
design: float
metric design specification
stretch: float
metric stretch goal
"""
r = get_data('metrics')
unit = str()
description = str()
specs = []
minimum = None
design = None
stretch = None
for m in r['results']:
if m['metric'] == name:
unit = m['unit']
description = m['description']
specs = eval(str(m['specs']))
break
if specs:
minimum = get_value(specs, 'minimum')
design = get_value(specs, 'design')
stretch = get_value(specs, 'stretch')
return {'unit': unit, 'description': description,
'minimum': minimum, 'design': design, 'stretch': stretch}
def get_url_args(doc, defaults=None):
"""Return url args recovered from django_full_path cookie in
the bokeh request header.
If defaults values are provided, overwrite the default values
obtained from the API
"""
args = get_data('defaults')
# overwrite api default values
if defaults:
for key in defaults:
args[key] = defaults[key]
r = doc().session_context.request
if r:
if 'django_full_path' in r.cookies:
django_full_path = r.cookies['django_full_path'].value
tmp = furl(django_full_path).args
for key in tmp:
# overwrite default values with those passed
# as url args, make sure the url arg (key) is valid
if key in args:
args[key] = tmp[key]
# the bokeh app name is the second segment of the url path
args['bokeh_app'] = furl(django_full_path).path.segments[1]
return args
# TODO: these functions are used by the monitor app and need refactoring
def get_initial_page(page_size, num_pages, window):
# Page size in hours assuming CI_TIME_INTERVAL
CI_TIME_INTERVAL = 8
page_window = page_size * CI_TIME_INTERVAL
if window == 'weeks':
initial_page = num_pages - int((24*7)/page_window)
elif window == 'months':
# maximum window of 3 months
initial_page = num_pages - int((24*30*3)/page_window)
elif window == 'years':
# maximum window of 1 year
initial_page = num_pages - int((24*365)/page_window)
else:
# everything
initial_page = 1
# Make sure we have enough pages for the input time window
if initial_page < 1:
initial_page = 1
return initial_page
def get_meas_by_dataset_and_metric(selected_dataset, selected_metric, window):
""" Get measurements for a given dataset and metric from the measurements
api endpoint
Parameters
----------
selected_dataset : str
the current selected dataset
selected_metric : str
the current selected metric
Returns
-------
ci_id : list
list of job ids from the CI system
dates : list
list of datetimes for each job measurement
measurements : list
flat list of dicts where the key is the metric and the value
is its measurement
ci_url : list
list of URLs for the jobs in the CI system
"""
api = get_endpoint_urls()
# http://localhost:8000/dashboard/api/measurements/?job__ci_dataset=cfht&metric=AM1
r = requests.get(api['measurements'],
params={'job__ci_dataset': selected_dataset,
'metric': selected_metric})
r.raise_for_status()
results = r.json()
# results are paginated, walk through each page
# TODO: figure out how to retrieve the number of pages in DRF
count = results['count']
page_size = len(results['results'])
measurements = []
if page_size > 0:
# ceiling integer
num_pages = int(count/page_size) + (count % page_size > 0)
initial_page = get_initial_page(page_size, num_pages, window)
for page in range(initial_page, num_pages + 1):
r = requests.get(
api['measurements'],
params={'job__ci_dataset': selected_dataset,
'metric': selected_metric,
'page': page})
r.raise_for_status()
measurements.extend(r.json()['results'])
ci_ids = [int(m['ci_id']) for m in measurements]
# 2016-08-10T05:22:37.700146Z
# after DM-7517 jobs return is sorted by date and the same is done for
# the measurements
dates = [datetime.strptime(m['date'], '%Y-%m-%dT%H:%M:%S.%fZ')
for m in measurements]
values = [m['value'] for m in measurements]
ci_urls = [m['ci_url'] for m in measurements]
packages = [m['changed_packages'] for m in measurements]
# list of package names, name is the first element in the tuple
names = []
for i, sublist in enumerate(packages):
names.append([])
for package in sublist:
names[i].append(package[0])
# list of git urls, git package commit sha and base url are the second and
# third elements in the tuple
git_urls = []
for i, sublist in enumerate(packages):
git_urls.append([])
for package in sublist:
git_urls[i].append("{}/commit/{}".format(package[2].strip('.git'),
package[1]))
return {'ci_ids': ci_ids, 'dates': dates, 'values': values,
'ci_urls': ci_urls, 'names': names, 'git_urls': git_urls}
|
{
"content_hash": "b96508c08c0fa4f4a3c679f47c673b97",
"timestamp": "",
"source": "github",
"line_count": 314,
"max_line_length": 87,
"avg_line_length": 26.5,
"alnum_prop": 0.5880302848215359,
"repo_name": "lsst-sqre/qa-dashboard",
"id": "f711e50db3b1e0ae05786cda0ee212109cd501ae",
"size": "8321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "squash/dashboard/viz/api_helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "133"
},
{
"name": "HTML",
"bytes": "8535"
},
{
"name": "Makefile",
"bytes": "98"
},
{
"name": "Python",
"bytes": "75162"
},
{
"name": "Shell",
"bytes": "338"
}
],
"symlink_target": ""
}
|
import codecs
import collections
import copy
import csv
import datetime
import logging
import os
import re
import shutil
import xml.etree.ElementTree as ET
import xml.dom.minidom as minidom
from king_phisher import archive
from king_phisher import ipaddress
from king_phisher import serializers
from king_phisher import utilities
from king_phisher.errors import KingPhisherInputValidationError
from boltons import iterutils
import dateutil.tz
import geojson
from smoke_zephyr.utilities import escape_single_quote
from smoke_zephyr.utilities import unescape_single_quote
import xlsxwriter
__all__ = (
'campaign_to_xml',
'convert_value',
'liststore_export',
'liststore_to_csv',
'liststore_to_xlsx_worksheet',
'message_data_to_kpm'
)
KPM_ARCHIVE_FILES = {
'attachment_file': 'message_attachment.bin',
'target_file': 'target_file.csv'
}
KPM_INLINE_IMAGE_REGEXP = re.compile(r"""{{\s*inline_image\(\s*(('(?:[^'\\]|\\.)+')|("(?:[^"\\]|\\.)+"))\s*\)\s*}}""")
logger = logging.getLogger('KingPhisher.Client.export')
XLSXWorksheetOptions = collections.namedtuple('XLSXWorksheetOptions', ('column_widths', 'title'))
def message_template_to_kpm(template):
files = []
cursor = 0
match = True
while match:
match = KPM_INLINE_IMAGE_REGEXP.search(template[cursor:])
if not match:
break
file_path = unescape_single_quote(match.group(1)[1:-1])
files.append(file_path)
file_name = os.path.basename(file_path)
start = cursor + match.start()
end = cursor + match.end()
inline_tag = "{{{{ inline_image('{0}') }}}}".format(escape_single_quote(file_name))
template = template[:start] + inline_tag + template[end:]
cursor = start + len(inline_tag)
return template, files
def message_template_from_kpm(template, files):
files = dict(zip(map(os.path.basename, files), files))
cursor = 0
match = True
while match:
match = KPM_INLINE_IMAGE_REGEXP.search(template[cursor:])
if not match:
break
file_name = unescape_single_quote(match.group(1)[1:-1])
file_path = files.get(file_name)
start = cursor + match.start()
end = cursor + match.end()
if not file_path:
cursor = end
continue
insert_tag = "{{{{ inline_image('{0}') }}}}".format(escape_single_quote(file_path))
template = template[:start] + insert_tag + template[end:]
cursor = start + len(insert_tag)
return template
def convert_value(table_name, key, value):
"""
Perform any conversions necessary to neatly display the data in XML format.
:param str table_name: The table name that the key and value pair are from.
:param str key: The data key.
:param value: The data value to convert.
:return: The converted value.
:rtype: str
"""
if isinstance(value, datetime.datetime):
value = value.isoformat()
if value is not None:
value = str(value)
return value
def campaign_to_xml(rpc, campaign_id, xml_file, encoding='utf-8'):
"""
Load all information for a particular campaign and dump it to an XML file.
:param rpc: The connected RPC instance to load the information with.
:type rpc: :py:class:`.KingPhisherRPCClient`
:param campaign_id: The ID of the campaign to load the information for.
:param str xml_file: The destination file for the XML data.
:param str encoding: The encoding to use for strings.
"""
tzutc = dateutil.tz.tzutc()
root = ET.Element('king_phisher')
# Generate export metadata
metadata = ET.SubElement(root, 'metadata')
serializers.to_elementtree_subelement(
metadata,
'timestamp',
datetime.datetime.utcnow().replace(tzinfo=tzutc),
attrib={'utc': 'true'}
)
serializers.to_elementtree_subelement(metadata, 'version', '1.3')
campaign = ET.SubElement(root, 'campaign')
campaign_info = rpc.remote_table_row('campaigns', campaign_id)
for key, value in campaign_info._asdict().items():
if isinstance(value, datetime.datetime):
value = value.replace(tzinfo=tzutc)
serializers.to_elementtree_subelement(campaign, key, value)
# Tables with a campaign_id field
for table_name in ('landing_pages', 'messages', 'visits', 'credentials', 'deaddrop_deployments', 'deaddrop_connections'):
table_element = ET.SubElement(campaign, table_name)
for table_row in rpc.remote_table(table_name, query_filter={'campaign_id': campaign_id}):
table_row_element = ET.SubElement(table_element, table_name[:-1])
for key, value in table_row._asdict().items():
if isinstance(value, datetime.datetime):
value = value.replace(tzinfo=tzutc)
serializers.to_elementtree_subelement(table_row_element, key, value)
document = minidom.parseString(ET.tostring(root))
with open(xml_file, 'wb') as file_h:
file_h.write(document.toprettyxml(indent=' ', encoding=encoding))
def campaign_credentials_to_msf_txt(rpc, campaign_id, target_file):
"""
Export credentials into a format that can easily be used with Metasploit's
USERPASS_FILE option.
:param rpc: The connected RPC instance to load the information with.
:type rpc: :py:class:`.KingPhisherRPCClient`
:param campaign_id: The ID of the campaign to load the information for.
:param str target_file: The destination file for the credential data.
"""
with open(target_file, 'w') as file_h:
for credential in rpc.remote_table('credentials', query_filter={'campaign_id': campaign_id}):
file_h.write("{0} {1}\n".format(credential.username, credential.password))
def campaign_visits_to_geojson(rpc, campaign_id, geojson_file):
"""
Export the geo location information for all the visits of a campaign into
the `GeoJSON <http://geojson.org/>`_ format.
:param rpc: The connected RPC instance to load the information with.
:type rpc: :py:class:`.KingPhisherRPCClient`
:param campaign_id: The ID of the campaign to load the information for.
:param str geojson_file: The destination file for the GeoJSON data.
"""
ips_for_georesolution = {}
ip_counter = collections.Counter()
for visit in rpc.remote_table('visits', query_filter={'campaign_id': campaign_id}):
ip_counter.update((visit.visitor_ip,))
visitor_ip = ipaddress.ip_address(visit.visitor_ip)
if not isinstance(visitor_ip, ipaddress.IPv4Address):
continue
if visitor_ip.is_loopback or visitor_ip.is_private:
continue
if not visitor_ip in ips_for_georesolution:
ips_for_georesolution[visitor_ip] = visit.first_visit
elif ips_for_georesolution[visitor_ip] > visit.first_visit:
ips_for_georesolution[visitor_ip] = visit.first_visit
ips_for_georesolution = [ip for (ip, _) in sorted(ips_for_georesolution.items(), key=lambda x: x[1])]
locations = {}
for ip_addresses in iterutils.chunked(ips_for_georesolution, 50):
locations.update(rpc.geoip_lookup_multi(ip_addresses))
points = []
for ip, location in locations.items():
if not (location.coordinates and location.coordinates[0] and location.coordinates[1]):
continue
points.append(geojson.Feature(geometry=location, properties={'count': ip_counter[ip], 'ip-address': ip}))
feature_collection = geojson.FeatureCollection(points)
with open(geojson_file, 'w') as file_h:
serializers.JSON.dump(feature_collection, file_h, pretty=True)
def message_data_from_kpm(target_file, dest_dir, encoding='utf-8'):
"""
Retrieve the stored details describing a message from a previously exported
file.
:param str target_file: The file to load as a message archive.
:param str dest_dir: The directory to extract data and attachment files to.
:param str encoding: The encoding to use for strings.
:return: The restored details from the message config.
:rtype: dict
"""
if not archive.is_archive(target_file):
logger.warning('the file is not recognized as a valid archive')
raise KingPhisherInputValidationError('file is not in the correct format')
kpm = archive.ArchiveFile(target_file, 'r')
attachment_member_names = [n for n in kpm.file_names if n.startswith('attachments' + os.path.sep)]
attachments = []
if not kpm.has_file('message_config.json'):
logger.warning('the kpm archive is missing the message_config.json file')
raise KingPhisherInputValidationError('data is missing from the message archive')
message_config = kpm.get_data('message_config.json')
message_config = message_config.decode(encoding)
message_config = serializers.JSON.loads(message_config)
if attachment_member_names:
attachment_dir = os.path.join(dest_dir, 'attachments')
if not os.path.isdir(attachment_dir):
os.mkdir(attachment_dir)
for file_name in attachment_member_names:
arcfile_h = kpm.get_file(file_name)
file_path = os.path.join(attachment_dir, os.path.basename(file_name))
with open(file_path, 'wb') as file_h:
shutil.copyfileobj(arcfile_h, file_h)
attachments.append(file_path)
logger.debug("extracted {0} attachment file{1} from the archive".format(len(attachments), 's' if len(attachments) > 1 else ''))
for config_name, file_name in KPM_ARCHIVE_FILES.items():
if not file_name in kpm.file_names:
if config_name in message_config:
logger.warning("the kpm archive is missing the {0} file".format(file_name))
raise KingPhisherInputValidationError('data is missing from the message archive')
continue
if not message_config.get(config_name):
logger.warning("the kpm message configuration is missing the {0} setting".format(config_name))
raise KingPhisherInputValidationError('data is missing from the message archive')
arcfile_h = kpm.get_file(file_name)
file_path = os.path.join(dest_dir, os.path.basename(message_config[config_name]))
with open(file_path, 'wb') as file_h:
shutil.copyfileobj(arcfile_h, file_h)
message_config[config_name] = file_path
if 'message_content.html' in kpm.file_names:
if not 'html_file' in message_config:
logger.warning('the kpm message configuration is missing the html_file setting')
raise KingPhisherInputValidationError('data is missing from the message archive')
arcfile_h = kpm.get_file('message_content.html')
file_path = os.path.join(dest_dir, os.path.basename(message_config['html_file']))
with open(file_path, 'wb') as file_h:
file_h.write(message_template_from_kpm(arcfile_h.read().decode(encoding), attachments).encode(encoding))
message_config['html_file'] = file_path
elif 'html_file' in message_config:
logger.warning('the kpm archive is missing the message_content.html file')
raise KingPhisherInputValidationError('data is missing from the message archive')
kpm.close()
return message_config
def message_data_to_kpm(message_config, target_file, encoding='utf-8'):
"""
Save details describing a message to the target file.
:param dict message_config: The message details from the client configuration.
:param str target_file: The file to write the data to.
:param str encoding: The encoding to use for strings.
"""
message_config = copy.copy(message_config)
kpm = archive.ArchiveFile(target_file, 'w')
for config_name, file_name in KPM_ARCHIVE_FILES.items():
if os.access(message_config.get(config_name, ''), os.R_OK):
kpm.add_file(file_name, message_config[config_name])
message_config[config_name] = os.path.basename(message_config[config_name])
continue
if len(message_config.get(config_name, '')):
logger.info("the specified {0} '{1}' is not readable, the setting will be removed".format(config_name, message_config[config_name]))
if config_name in message_config:
del message_config[config_name]
if os.access(message_config.get('html_file', ''), os.R_OK):
with codecs.open(message_config['html_file'], 'r', encoding=encoding) as file_h:
template = file_h.read()
message_config['html_file'] = os.path.basename(message_config['html_file'])
template, attachments = message_template_to_kpm(template)
logger.debug("identified {0} attachment file{1} to be archived".format(len(attachments), 's' if len(attachments) > 1 else ''))
kpm.add_data('message_content.html', template)
for attachment in attachments:
if os.access(attachment, os.R_OK):
kpm.add_file(os.path.join('attachments', os.path.basename(attachment)), attachment)
else:
if len(message_config.get('html_file', '')):
logger.info("the specified html_file '{0}' is not readable, the setting will be removed".format(message_config['html_file']))
if 'html_file' in message_config:
del message_config['html_file']
kpm.add_data('message_config.json', serializers.JSON.dumps(message_config, pretty=True))
kpm.close()
return
def _split_columns(columns):
if isinstance(columns, collections.OrderedDict):
column_names = (columns[c] for c in columns.keys())
store_columns = columns.keys()
else:
column_names = (columns[c] for c in sorted(columns.keys()))
store_columns = sorted(columns.keys())
return tuple(column_names), tuple(store_columns)
def liststore_export(store, columns, cb_write, cb_write_args, row_offset=0, write_columns=True):
"""
A function to facilitate writing values from a list store to an arbitrary
callback for exporting to different formats. The callback will be called
with the row number, the column values and the additional arguments
specified in *\\*cb_write_args*.
.. code-block:: python
cb_write(row, column_values, *cb_write_args).
:param store: The store to export the information from.
:type store: :py:class:`Gtk.ListStore`
:param dict columns: A dictionary mapping store column ids to the value names.
:param function cb_write: The callback function to be called for each row of data.
:param tuple cb_write_args: Additional arguments to pass to *cb_write*.
:param int row_offset: A modifier value to add to the row numbers passed to *cb_write*.
:param bool write_columns: Write the column names to the export.
:return: The number of rows that were written.
:rtype: int
"""
column_names, store_columns = _split_columns(columns)
if write_columns:
cb_write(0, column_names, *cb_write_args)
store_iter = store.get_iter_first()
rows_written = 0
while store_iter:
cb_write(rows_written + 1 + row_offset, (store.get_value(store_iter, c) for c in store_columns), *cb_write_args)
rows_written += 1
store_iter = store.iter_next(store_iter)
return rows_written
def _csv_write(row, columns, writer):
writer.writerow(tuple(columns))
def liststore_to_csv(store, target_file, columns):
"""
Write the contents of a :py:class:`Gtk.ListStore` to a csv file.
:param store: The store to export the information from.
:type store: :py:class:`Gtk.ListStore`
:param str target_file: The destination file for the CSV data.
:param dict columns: A dictionary mapping store column ids to the value names.
:return: The number of rows that were written.
:rtype: int
"""
target_file_h = open(target_file, 'w')
writer = csv.writer(target_file_h, quoting=csv.QUOTE_ALL)
rows = liststore_export(store, columns, _csv_write, (writer,))
target_file_h.close()
return rows
def _xlsx_write(row, columns, worksheet, row_format=None):
for column, text in enumerate(columns):
worksheet.write(row, column, text, row_format)
def liststore_to_xlsx_worksheet(store, worksheet, columns, title_format, xlsx_options=None):
"""
Write the contents of a :py:class:`Gtk.ListStore` to an XLSX workseet.
:param store: The store to export the information from.
:type store: :py:class:`Gtk.ListStore`
:param worksheet: The destination sheet for the store's data.
:type worksheet: :py:class:`xlsxwriter.worksheet.Worksheet`
:param dict columns: A dictionary mapping store column ids to the value names.
:param xlsx_options: A collection of additional options for formatting the Excel Worksheet.
:type xlsx_options: :py:class:`.XLSXWorksheetOptions`
:return: The number of rows that were written.
:rtype: int
"""
utilities.assert_arg_type(worksheet, xlsxwriter.worksheet.Worksheet, 2)
utilities.assert_arg_type(columns, dict, 3)
utilities.assert_arg_type(title_format, xlsxwriter.format.Format, 4)
utilities.assert_arg_type(xlsx_options, XLSXWorksheetOptions, 5)
if xlsx_options is None:
worksheet.set_column(0, len(columns), 30)
else:
for column, width in enumerate(xlsx_options.column_widths):
worksheet.set_column(column, column, width)
column_names, _ = _split_columns(columns)
if xlsx_options is None:
start_row = 0
else:
start_row = 2
worksheet.merge_range(0, 0, 0, len(column_names) - 1, xlsx_options.title, title_format)
row_count = liststore_export(store, columns, _xlsx_write, (worksheet,), row_offset=start_row, write_columns=False)
options = {
'columns': list({'header': column_name} for column_name in column_names),
'style': 'Table Style Medium 1'
}
worksheet.add_table(start_row, 0, row_count + start_row, len(column_names) - 1, options=options)
worksheet.freeze_panes(1 + start_row, 0)
return row_count
|
{
"content_hash": "8b3a9906a515963d181b80f300470d91",
"timestamp": "",
"source": "github",
"line_count": 406,
"max_line_length": 135,
"avg_line_length": 40.75369458128079,
"alnum_prop": 0.7331076997461622,
"repo_name": "hdemeyer/king-phisher",
"id": "5bd26482c667798702793fa012827049855d3d5d",
"size": "18128",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "king_phisher/client/export.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "33168"
},
{
"name": "HTML",
"bytes": "552"
},
{
"name": "JavaScript",
"bytes": "1328"
},
{
"name": "Jupyter Notebook",
"bytes": "11394"
},
{
"name": "Mako",
"bytes": "574"
},
{
"name": "Python",
"bytes": "966857"
},
{
"name": "Ruby",
"bytes": "7629"
}
],
"symlink_target": ""
}
|
#Redacted information as requested.
#It does so by placing the data into a pandas dataframe and processing the data such that data types
# are correct and anomalous domains are removed.
#Of interest is how since there was not much data - nor many features - natural language processing of
# the domain names was also conducted, thereby checking if each domain name had tokens that were contained
# in a created dictionary (of the most common tokens). It is assumed that domain names are related to the
# type of website it represents, and thus offer insights into the amount spent for that domain.
#
#The various terms will be explained as if one were present at an online auction (one for buying the space/time to
# show your online ad)
#
#advertiser: identity of advertiser
#domain: website url (any given auction)
#num_seen: number of impressions seen (seeing an item at the auction)
#num_avail: number of impressions typically available at that domain. Not usually filled out.
#num_served: number of impressions served (number of items bought/won at the auction)
#num_visible: number of impressions actually visible on website (a percentage of num_served)
#total_spent: the amount of money spent at an auction
#total_ecp: "An estimate of a bid that is likely to win the impression from a given publisher based on
# observing historical bids." Calculated using AppNexus.
#Importing all necesssary packages
import csv
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from wordsegment import segment
import nltk
from sklearn.model_selection import KFold, StratifiedKFold, cross_val_score, GridSearchCV, train_test_split
from sklearn.linear_model import Ridge, RidgeCV, ElasticNet, ElasticNetCV, LassoCV, LassoLarsCV
from sklearn.linear_model import Lasso, BayesianRidge
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
import xgboost as xgb
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.callbacks import ModelCheckpoint, EarlyStopping
#----------------------------------------------------------
"""Processing data and moving it into a pandas dataframe"""
#----------------------------------------------------------
#Reading data into list. Replace file path with custom path.
with open('custom_path.csv', 'r') as f:
reader = csv.reader(f, dialect='excel', delimiter='\t')
lst = list(reader)
#Removing tab-delimited characters.
data = []
for line in lst:
temp = line[0].split('\t')
data.append(temp)
#Reading data into a pandas dataframe.
headers = data.pop(0)
df = pd.DataFrame(data, columns=headers)
#----------------------------------------------------------
"""Processing dataframe datatypes and dropping irrelevant features"""
#----------------------------------------------------------
#df.dtypes shows that all elements are strings.
#Change numeric features to numeric and round floats to closest cent.
numeric_cols = ['num_seen', 'num_avail', 'num_served', 'num_visible', 'total_spent', 'total_ecp']
for col in numeric_cols:
df[col] = pd.to_numeric(df[col], errors='coerce')
df.total_spent = df.total_spent.round(decimals=2)
df.total_ecp = df.total_ecp.round(decimals=2)
#only advertiser for this is _______. Dropping since it doesn't add any additional information
df = df.drop('advertiser', axis=1)
#num_avail has 127/20899 (0.6%) of entries filled out. Dropping feature.
df = df.drop('num_avail', axis=1)
#----------------------------------------------------------
""" Dropping anomalous domains and cleaning domain name strings to tokenize.
Also taking top 160 most common tokens and making new dataframe 'data'
Finally adding numerical feautures from df to 'data' """
#----------------------------------------------------------
#Get rid of anomalous domains (domains that dont have served impressions nor a coherent domain name)
df.drop(df.index[[0,1,2,3,12,13,14,15,16,17,27,70,71, -1, -2, -3]], inplace=True)
#Getting rid of quotation marks in domain names
df.loc[df.domain == "'cbssports.com'", 'domain'] = "cbssports.com"
df.loc[df.domain == "'imdb.com'", 'domain'] = "imdb.com"
df.loc[df.domain == "-1.latineuro.com", 'domain'] = "latineuro.com"
#Get rid of signs in front of domain names, and generally cleaning up the names
#Getting rid of .com endings and using word segmentation on the domain names
domains = df.domain.tolist()
words = []
for i in range(0,len(domains)):
if domains[i].startswith(('.','$')):
domains[i] = domains[i][1:]
if domains[i].startswith('&referrer='):
domains[i] = domains[i][10:]
if domains[i].endswith('.com'):
domains[i] = domains[i][:-4]
words.append(segment(domains[i]))
df['domain'] = pd.Series(domains)
#Take top 160 features, with the cutoff being that the token is present in about at least 40 domain-names
wordlist = []
for blurbs in words:
wordlist.extend(blurbs)
wordlist = nltk.FreqDist(wordlist)
mostcommon = wordlist.most_common(160)
common_words = []
for terms in mostcommon:
common_words.append(terms[0])
features = []
for i in range(0,len(words)): #for each set of words from each domainname
word = set(words[i])
temp = {}
for feat in common_words: #for each token from the word_features
temp[feat] = (feat in word)
features.append(dict(temp))
data = pd.DataFrame(features)
data['num_seen'] = df['num_seen']
data['served'] = df['num_served']
data['num_visible'] = df['num_visible']
data['total_ecp'] = df['total_ecp']
data['total_spent'] = df['total_spent']
#Dropping any domains that don't have any served impressions.
#In hindsight, for larger datasets, it would be wiser to drop these before tokenizing, but for
# a preliminary analysis, it should be sufficient.
data = data[data.total_ecp != 0]
data = data[pd.notnull(data['total_ecp'])]
#----------------------------------------------------------
""" Applying regression techniques to create a predictive model..."""
#----------------------------------------------------------
X = #Redacted information as requested.
Y = #Redacted information as requested.
#We split our data to have unseen data to test against later.
# Could have just done cross-validation on the whole set, but this is better practice.
X_train, X_test, Y_train, Y_test = train_test_split(X,Y, train_size=.8)
#Since not all of our token features should be important, trying out lasso and elasticNet first
#Also trying ridge, SVR, ensembles, and boosting.
#Spot-checking not shown.
#Lasso
alphas=[0.3, 0.1, 0.03, 0.001, 0.003, 0.0001,1,2,3,4,5,6,7,8,9,10,11,12]
lasso = Lasso(random_state=17)
lasso_model = GridSearchCV(estimator=lasso, param_grid=dict(alpha=alphas), cv=10)
lasso_model.fit(X_train, Y_train)
#print(lasso_model.best_score_)
#print(lasso_model.best_estimator_.alpha)
#print(lasso_model.best_estimator_)
lasso = Lasso(alpha = 1, random_state=17).fit(X_train, Y_train)
score_lasso = lasso.score(X_test, Y_test)
#Elastic Net
ENSTest = ElasticNetCV(alphas=[0.0001,0.0003, 0.0005, 0.001, 0.03, 0.01, 0.3, 0.1, 3, 1, 10, 30], l1_ratio=[.01, 0.3, .1, .3, .5, .9, .99], max_iter=5000, random_state=3).fit(X_train, Y_train)
score_EN = ENSTest.score(X_test, Y_test)
#Ridge
alphas=[0.1,0.001,0.0001,1,2,3,4,5,6,7,8,9,10,11,12,15]
ridge = Ridge(random_state=2)
ridge_model = GridSearchCV(estimator=ridge, param_grid=dict(alpha=alphas))
ridge_model.fit(X_train, Y_train)
#print(ridge_model.best_score_)
#print(ridge_model.best_estimator_.alpha)
#print(ridge_model.best_estimator_)
ridge = Ridge(alpha = 15, random_state=2).fit(X_train, Y_train)
score_ridge = ridge.score(X_test, Y_test)
#Random Forest
random_forest = RandomForestRegressor(n_estimators=2900, random_state=11).fit(X_train, Y_train)
score_forest = random_forest.score(X_test, Y_test)
#Gradient Boosting
GBest = GradientBoostingRegressor(n_estimators=3000, learning_rate=0.05, max_depth=3, max_features='sqrt',
min_samples_leaf=15, min_samples_split=10, loss='huber', random_state=5).fit(X_train, Y_train)
score_GBest = GBest.score(X_test, Y_test)
#XGBoosting
parameters= {'max_depth': [2,4,6,7,8], 'n_estimators': [50,100,200], 'learning_rate': [0.05,0.1,0.3]}
xgb = xgb.XGBRegressor(seed=7)
xgb_model = GridSearchCV(xgb, parameters, n_jobs=1, cv=10)
xgb_model.fit(X_train, Y_train)
#print(ridge_model.best_estimator_). Note: increased n_estimators.
model_xgb = xgb.XGBRegressor(n_estimators=360, max_depth=2, learning_rate=0.05, seed=7) #the params were tuned using xgb.cv
model_xgb.fit(X_train, Y_train)
score_xgb = model_xgb.score(X_test, Y_test)
#Neural Network (Fully Connected)
#Inputs need to be numpy arrays
temp_data = data.values
train = temp_data[:,1:].astype(float)
features = temp_data[:,0]
#Making fully connected neural network. Could implement regularization with dropout, but seems to not be needed
# as acc and val_acc are similar
nnmodel = Sequential()
nnmodel.add(Dense(80, input_dim=164, kernel_initializer='normal', activation='relu'))
nnmodel.add(Dense(20, kernel_initializer='normal', activation='relu'))
nnmodel.add(Dense(1, kernel_initializer='normal'))
# Compile model
nnmodel.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
#Checkpointing
filepath='C:/Users/Michael Kang/Desktop/RockerboxRegressionBest.hdf5'
#Early = EarlyStopping(monitor='val_acc', min_delta=0, patience=2, verbose=2, mode='auto')
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
#Plot learning curves
nn_model_history = nnmodel.fit(train, features, validation_split=0.25, epochs=100, batch_size=10, callbacks=callbacks_list)
plt.plot(nn_model_history.history['acc'])
plt.plot(nn_model_history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
#Since just getting an estimate, just getting final value since graph shows it to have mostly stabilized.
nn_score = nn_model_history.history['acc'][-1:][0]
#Just making a nice visual to compare accuracy.
models = pd.DataFrame({
'Model': ['Lasso', 'Elastic Net', 'Ridge', 'Random Forest', 'Gradient Boosting', 'XGB', 'Neural Network'],
'Score': [score_lasso, score_EN, score_ridge, score_forest, score_GBest, score_xgb, nn_score]})
print(models.sort_values(by='Score', ascending=False))
#combining the results of XGBoost, the neural network, and Random Forests, which had the best results.
#Run this last with X_provided being a processed and tokenized domain. Outputs predicted total_spent.
def predict(X_provided):
random_forest = RandomForestRegressor(n_estimators=2900, random_state=11).fit(X, Y)
pred_random_forest = random_forest.predict(X_provided)
Y_index = nnmodel.predict(X_provided)
pred_neural = np.argmax(Y_index,axis=1)
xgbost = xgb.XGBRegressor(n_estimators=360, max_depth=2, learning_rate=0.05, seed=7).fit(X, Y)
pred_xgb = xgbost.predict(X_provided)
return (pred_random_forest + pred_xgb + pred_neural)/3
|
{
"content_hash": "b8f7e245f9c88f230d23bdc6fa52e390",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 192,
"avg_line_length": 43.778625954198475,
"alnum_prop": 0.6750653879686138,
"repo_name": "MichaelMKKang/Projects",
"id": "1b4ddf0bcb9992c08aabca5f3076120bf88b954b",
"size": "11470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Online_Marketing_Analysis/dataset_regression.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76850"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.optim as optim
from utils.helpers import Experience
class Agent(object):
def __init__(self, args, env_prototype, model_prototype, memory_prototype=None):
# logging
self.logger = args.logger
# prototypes for env & model & memory
self.env_prototype = env_prototype # NOTE: instantiated in fit_model() of inherited Agents
self.env_params = args.env_params
self.model_prototype = model_prototype # NOTE: instantiated in fit_model() of inherited Agents
self.model_params = args.model_params
self.memory_prototype = memory_prototype # NOTE: instantiated in __init__() of inherited Agents (dqn needs, a3c doesn't so only pass in None)
self.memory_params = args.memory_params
# params
self.model_name = args.model_name # NOTE: will save the current model to model_name
self.model_file = args.model_file # NOTE: will load pretrained model_file if not None
# icm
self.icm = args.icm if hasattr(args, "icm") else False
if self.icm:
self.icm_inv_lr = args.icm_inv_lr
self.icm_fwd_lr = args.icm_fwd_lr
self.icm_inv_model_prototype = args.icm_inv_model
self.icm_fwd_model_prototype = args.icm_fwd_model
self.icm_inv_model = None # initialize in subclasses
self.icm_fwd_model = None # initialize in subclasses
self.icm_inv_model_name = args.icm_inv_model_name
self.icm_inv_model_file = args.icm_inv_model_file
self.icm_fwd_model_name = args.icm_fwd_model_name
self.icm_fwd_model_file = args.icm_fwd_model_file
self.render = args.render
self.visualize = args.visualize
if self.visualize:
self.vis = args.vis
self.refs = args.refs
self.save_best = args.save_best
self.icm_save_best = args.icm_save_best
if self.save_best:
self.best_step = None # NOTE: achieves best_reward at this step
self.best_reward = None # NOTE: only save a new model if achieves higher reward
if self.icm_save_best:
self.best_icm_inv_step = None
self.best_icm_inv_loss = None
self.best_icm_fwd_step = None
self.best_icm_fwd_loss = None
self.hist_len = args.hist_len
self.hidden_dim = args.model_params.hidden_dim
self.use_cuda = args.use_cuda
self.dtype = args.dtype
# agent_params
# criteria and optimizer
self.value_criteria = args.value_criteria
self.optim = args.optim
# hyperparameters
self.steps = args.steps
self.early_stop = args.early_stop
self.gamma = args.gamma
self.clip_grad = args.clip_grad
self.lr = args.lr
self.lr_decay = args.lr_decay
self.weight_decay = args.weight_decay
self.eval_freq = args.eval_freq
self.eval_steps = args.eval_steps
self.prog_freq = args.prog_freq
self.test_nepisodes = args.test_nepisodes
if args.agent_type == "dqn":
self.enable_double_dqn = args.enable_double_dqn
self.enable_dueling = args.enable_dueling
self.dueling_type = args.dueling_type
self.learn_start = args.learn_start
self.batch_size = args.batch_size
self.valid_size = args.valid_size
self.eps_start = args.eps_start
self.eps_end = args.eps_end
self.eps_eval = args.eps_eval
self.eps_decay = args.eps_decay
self.target_model_update = args.target_model_update
self.action_repetition = args.action_repetition
self.memory_interval = args.memory_interval
self.train_interval = args.train_interval
elif args.agent_type == "a3c":
self.enable_log_at_train_step = args.enable_log_at_train_step
self.enable_lstm = args.enable_lstm
self.enable_continuous = args.enable_continuous
self.num_processes = args.num_processes
self.rollout_steps = args.rollout_steps
self.tau = args.tau
self.beta = args.beta
self.icm_plus_reward = args.icm_plus_reward
self.icm_beta = args.icm_beta
self.icm_fwd_wt = args.icm_fwd_wt
elif args.agent_type == "acer":
self.enable_bias_correction = args.enable_bias_correction
self.enable_1st_order_trpo = args.enable_1st_order_trpo
self.enable_log_at_train_step = args.enable_log_at_train_step
self.enable_lstm = args.enable_lstm
self.enable_continuous = args.enable_continuous
self.num_processes = args.num_processes
self.replay_ratio = args.replay_ratio
self.replay_start = args.replay_start
self.batch_size = args.batch_size
self.valid_size = args.valid_size
self.clip_trace = args.clip_trace
self.clip_1st_order_trpo = args.clip_1st_order_trpo
self.avg_model_decay = args.avg_model_decay
self.rollout_steps = args.rollout_steps
self.tau = args.tau
self.beta = args.beta
def _reset_experience(self):
self.experience = Experience(state0 = None,
action = None,
reward = None,
state1 = None,
terminal1 = False)
def _load_model(self, model_file):
if model_file:
self.logger.warning("Loading Model: " + self.model_file + " ...")
self.model.load_state_dict(torch.load(model_file))
self.logger.warning("Loaded Model: " + self.model_file + " ...")
else:
self.logger.warning("No Pretrained Model. Will Train From Scratch.")
def _save_model(self, step, curr_reward):
self.logger.warning("Saving Model @ Step: " + str(step) + ": " + self.model_name + " ...")
if self.save_best:
if self.best_step is None:
self.best_step = step
self.best_reward = curr_reward
if curr_reward >= self.best_reward:
self.best_step = step
self.best_reward = curr_reward
torch.save(self.model.state_dict(), self.model_name)
self.logger.warning("Saved Model @ Step: " + str(step) + ": " + self.model_name + ". {Best Step: " + str(self.best_step) + " | Best Reward: " + str(self.best_reward) + "}")
else:
idx = self.model_name.index('.pth')
step_name = self.model_name[:idx] + '_step_' + str(step) + '.pth'
torch.save(self.model.state_dict(), step_name)
self.logger.warning("Saved Model @ Step: " + str(step) + ": " + self.model_name + ".")
# TODO: used everywhere where needed? (a3c.py)
def _load_icm_models(self, inv_model_file, fwd_model_file):
if inv_model_file and fwd_model_file:
self.logger.warning("Loading ICM Inverse Model: " + inv_model_file + " ...")
self.icm_inv_model.load_state_dict(torch.load(inv_model_file))
self.logger.warning("Loaded ICM Inverse Model: " + inv_model_file + " ...")
self.logger.warning("Loading ICM Forward Model: " + fwd_model_file + " ...")
self.icm_fwd_model.load_state_dict(torch.load(fwd_model_file))
self.logger.warning("Loaded ICM Forward Model: " + fwd_model_file + " ...")
else:
self.logger.warning("No Pretrained ICM Models. Will Train From Scratch.")
# TODO: used everywhere where needed? (a3c_single_process.py)
def _save_icm_models(self, step, curr_inv_loss, curr_fwd_loss):
self.logger.warning("Saving ICM Inverse Model @ Step: " + str(step) + ": "
+ self.icm_inv_model_name + " ...")
if self.icm_save_best:
if self.best_icm_inv_step is None:
self.best_icm_inv_step = step
self.best_icm_inv_loss = curr_inv_loss
if curr_inv_loss < self.best_icm_inv_loss:
self.best_icm_inv_step = step
self.best_icm_inv_loss = curr_inv_loss
torch.save(self.icm_inv_model.state_dict(), self.icm_inv_model_name)
self.logger.warning(
"Saved ICM Inverse Model @ Step: " + str(step) + ": " + self.icm_inv_model_name + ". {Best Step: " + str(
self.best_icm_inv_step) + " | Best Loss: " + str(self.best_icm_inv_loss) + "}")
self.logger.warning("Saving ICM Forward Model @ Step: " + str(step) + ": "
+ self.icm_fwd_model_name + " ...")
if self.best_icm_fwd_step is None:
self.best_icm_fwd_step = step
self.best_icm_fwd_loss = curr_fwd_loss
if curr_fwd_loss < self.best_icm_fwd_loss:
self.best_icm_fwd_step = step
self.best_icm_fwd_loss = curr_fwd_loss
torch.save(self.icm_fwd_model.state_dict(), self.icm_fwd_model_name)
self.logger.warning(
"Saved ICM Forward Model @ Step: " + str(step) + ": " + self.icm_fwd_model_name + ". {Best Step: " + str(
self.best_icm_fwd_step) + " | Best Loss: " + str(self.best_icm_fwd_loss) + "}")
else:
idx = self.icm_inv_model_name.index('.pth')
inv_step_name = self.icm_inv_model_name[:idx] + '_step_' + str(step) + '.pth'
torch.save(self.icm_inv_model.state_dict(), inv_step_name)
self.logger.warning("Saved ICM Inverse Model @ Step: " + str(step) + ": " + self.icm_inv_model_name + ".")
self.logger.warning("Saving ICM Forward Model @ Step: " + str(step) + ": " + self.icm_fwd_model_name + " ...")
idx = self.icm_fwd_model_name.index('.pth')
fwd_step_name = self.icm_fwd_model_name[:idx] + '_step_' + str(step) + '.pth'
torch.save(self.icm_fwd_model.state_dict(), fwd_step_name)
self.logger.warning("Saved ICM Forward Model @ Step: " + str(step) + ": " + self.icm_fwd_model_name + ".")
def _forward(self, observation):
raise NotImplementedError("not implemented in base calss")
def _backward(self, reward, terminal):
raise NotImplementedError("not implemented in base calss")
def _eval_model(self): # evaluation during training
raise NotImplementedError("not implemented in base calss")
def fit_model(self): # training
raise NotImplementedError("not implemented in base calss")
def test_model(self): # testing pre-trained models
raise NotImplementedError("not implemented in base calss")
|
{
"content_hash": "4de02b0b00af56236526c8e9109374f2",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 188,
"avg_line_length": 48.63436123348018,
"alnum_prop": 0.5750905797101449,
"repo_name": "AlekseyZhelo/pytorch-rl",
"id": "bb19ac593674c5a628b8b0c2d82ceb41e11a6a26",
"size": "11040",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/agent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "343662"
},
{
"name": "Shell",
"bytes": "5413"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
from cms.utils.i18n import get_language_tuple
class Migration(migrations.Migration):
dependencies = [
('cms', '__first__'),
]
operations = [
migrations.CreateModel(
name='InheritPagePlaceholder',
fields=[
('cmsplugin_ptr', models.OneToOneField(serialize=False, parent_link=True, auto_created=True, to='cms.CMSPlugin', primary_key=True)),
('from_language', models.CharField(help_text='Optional: the language of the plugins you want', blank=True, max_length=5, choices=get_language_tuple(), verbose_name='language', null=True)),
('from_page', models.ForeignKey(help_text='Choose a page to include its plugins into this placeholder, empty will choose current page', blank=True, to='cms.Page', null=True)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
|
{
"content_hash": "dc3e59fc21679dca8bfacf842de5ec2e",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 204,
"avg_line_length": 39.65384615384615,
"alnum_prop": 0.6139670223084384,
"repo_name": "bittner/djangocms-inherit",
"id": "2fdfe2240594cb61006ee5f213df5ca30973f795",
"size": "1055",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "djangocms_inherit/migrations_django/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "66"
},
{
"name": "Python",
"bytes": "17780"
}
],
"symlink_target": ""
}
|
import dictconfig
import logging
import os
# get the right settings module
settingmodule = os.environ.get('DJANGO_SETTINGS_MODULE', 'settings_local')
if settingmodule.startswith(('zamboni', # typical git clone destination
'workspace', # Jenkins
'project', # vagrant VM
'freddo')):
settingmodule = settingmodule.split('.', 1)[1]
import sys # noqa
import MySQLdb as mysql # noqa
import sqlalchemy.pool as pool # noqa
from django.utils import importlib # noqa
settings = importlib.import_module(settingmodule)
from mkt.constants.payments import ( # noqa
CONTRIB_CHARGEBACK, CONTRIB_NO_CHARGE,
CONTRIB_PURCHASE, CONTRIB_REFUND)
from lib.log_settings_base import formatters, handlers # noqa
def getconn():
db = settings.SERVICES_DATABASE
return mysql.connect(host=db['HOST'], user=db['USER'],
passwd=db['PASSWORD'], db=db['NAME'])
mypool = pool.QueuePool(getconn, max_overflow=10, pool_size=5, recycle=300)
def log_configure():
"""You have to call this to explicity configure logging."""
cfg = {
'version': 1,
'filters': {},
'formatters': dict(prod=formatters['prod']),
'handlers': dict(syslog=handlers['syslog']),
'loggers': {
'z': {'handlers': ['syslog'], 'level': logging.INFO},
},
'root': {},
# Since this configuration is applied at import time
# in verify.py we don't want it to clobber other logs
# when imported into the marketplace Django app.
'disable_existing_loggers': False,
}
dictconfig.dictConfig(cfg)
def log_exception(data):
# Note: although this logs exceptions, it logs at the info level so that
# on prod, we log at the error level and result in no logs on prod.
typ, value, discard = sys.exc_info()
error_log = logging.getLogger('z.receipt')
error_log.exception(u'Type: %s, %s. Data: %s' % (typ, value, data))
def log_info(msg):
error_log = logging.getLogger('z.receipt')
error_log.info(msg)
|
{
"content_hash": "17282e4670172f708ca5e03e747e9e1b",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 76,
"avg_line_length": 31.044117647058822,
"alnum_prop": 0.6295594504973946,
"repo_name": "clouserw/zamboni",
"id": "ab3f0bd6bb6db835bcbe906a791c9ba7888a3a85",
"size": "2111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "services/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "357623"
},
{
"name": "HTML",
"bytes": "2134650"
},
{
"name": "JavaScript",
"bytes": "532610"
},
{
"name": "Makefile",
"bytes": "4172"
},
{
"name": "Python",
"bytes": "3908875"
},
{
"name": "SQLPL",
"bytes": "98"
},
{
"name": "Shell",
"bytes": "10972"
},
{
"name": "Smarty",
"bytes": "1369"
}
],
"symlink_target": ""
}
|
import datetime
import logging
import webapp2
from cosmopolite.lib import models
from cosmopolite.lib import utils
import config
class CleanupPollingInstances(webapp2.RequestHandler):
@utils.local_namespace
def get(self):
cutoff = datetime.datetime.now() - datetime.timedelta(minutes=1)
query = (
models.Instance.all()
.filter('polling =', True)
.filter('last_poll <', cutoff))
for instance in query:
instance.Delete()
app = webapp2.WSGIApplication([
(config.URL_PREFIX + '/cron/cleanup_polling_instances', CleanupPollingInstances),
])
|
{
"content_hash": "21984eca9a398e712df11bac46de4e96",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 83,
"avg_line_length": 22.73076923076923,
"alnum_prop": 0.7106598984771574,
"repo_name": "flamingcowtv/cosmopolite",
"id": "32baa05fea847b015540fcf4e08eda33c85a4233",
"size": "1169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cron.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "48884"
},
{
"name": "CSS",
"bytes": "4853"
},
{
"name": "HTML",
"bytes": "3891"
},
{
"name": "JavaScript",
"bytes": "81915"
},
{
"name": "Makefile",
"bytes": "1080"
},
{
"name": "Python",
"bytes": "37075"
},
{
"name": "Shell",
"bytes": "1024"
}
],
"symlink_target": ""
}
|
import imp
import os.path
import sys
import unittest
from mojom.generate import module as mojom
from mojom.generate import translate
from mojom.parse import ast
class TranslateTest(unittest.TestCase):
"""Tests |parser.Parse()|."""
def testSimpleArray(self):
"""Tests a simple int32[]."""
# pylint: disable=W0212
self.assertEquals(translate._MapKind("int32[]"), "a:i32")
def testAssociativeArray(self):
"""Tests a simple uint8{string}."""
# pylint: disable=W0212
self.assertEquals(translate._MapKind("uint8{string}"), "m[s][u8]")
def testLeftToRightAssociativeArray(self):
"""Makes sure that parsing is done from right to left on the internal kinds
in the presence of an associative array."""
# pylint: disable=W0212
self.assertEquals(translate._MapKind("uint8[]{string}"), "m[s][a:u8]")
def testTranslateSimpleUnions(self):
"""Makes sure that a simple union is translated correctly."""
tree = ast.Mojom(None, ast.ImportList(), [
ast.Union(
"SomeUnion", None,
ast.UnionBody([
ast.UnionField("a", None, None, "int32"),
ast.UnionField("b", None, None, "string")
]))
])
translation = translate.OrderedModule(tree, "mojom_tree", [])
self.assertEqual(1, len(translation.unions))
union = translation.unions[0]
self.assertTrue(isinstance(union, mojom.Union))
self.assertEqual("SomeUnion", union.mojom_name)
self.assertEqual(2, len(union.fields))
self.assertEqual("a", union.fields[0].mojom_name)
self.assertEqual(mojom.INT32.spec, union.fields[0].kind.spec)
self.assertEqual("b", union.fields[1].mojom_name)
self.assertEqual(mojom.STRING.spec, union.fields[1].kind.spec)
def testMapKindRaisesWithDuplicate(self):
"""Verifies _MapTreeForType() raises when passed two values with the same
name."""
methods = [
ast.Method('dup', None, None, ast.ParameterList(), None),
ast.Method('dup', None, None, ast.ParameterList(), None)
]
with self.assertRaises(Exception):
translate._ElemsOfType(methods, ast.Method, 'scope')
def testAssociatedKinds(self):
"""Tests type spec translation of associated interfaces and requests."""
# pylint: disable=W0212
self.assertEquals(
translate._MapKind("asso<SomeInterface>?"), "?asso:x:SomeInterface")
self.assertEquals(translate._MapKind("rca<SomeInterface>?"),
"?rca:x:SomeInterface")
def testSelfRecursiveUnions(self):
"""Verifies _UnionField() raises when a union is self-recursive."""
tree = ast.Mojom(None, ast.ImportList(), [
ast.Union("SomeUnion", None,
ast.UnionBody([ast.UnionField("a", None, None, "SomeUnion")]))
])
with self.assertRaises(Exception):
translate.OrderedModule(tree, "mojom_tree", [])
tree = ast.Mojom(None, ast.ImportList(), [
ast.Union(
"SomeUnion", None,
ast.UnionBody([ast.UnionField("a", None, None, "SomeUnion?")]))
])
with self.assertRaises(Exception):
translate.OrderedModule(tree, "mojom_tree", [])
|
{
"content_hash": "8fb62c9828f86226fa2f363a39b1560a",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 80,
"avg_line_length": 36.53488372093023,
"alnum_prop": 0.6543602800763845,
"repo_name": "scheib/chromium",
"id": "5c9300bec73ed367fb3bb3fc184a3cd4ff1487de",
"size": "3305",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "mojo/public/tools/mojom/mojom/generate/translate_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import numpy as np
import os, sys, time
sys.path.append('../')
from commons.utils import logger
from commons import utils
from commons import dataloader
import evaluator
# parameter config area
para = {'dataPath': '../data/', # data path
'dataName': 'google-cluster-data', # set the dataset name
'dataType': 'cpu', # data type: cpu or memory
'dataSample': 'day-sample', # choose 'day-sample', 'week-sample', or 'all'
'outPath': 'result/', # output path for results
'metrics': ['MAE', 'NMAE', 'RMSE', 'MRE', 'NNPRE', 'SNR'], # delete where appropriate
'samplingRate': np.arange(0.05, 0.06, 0.05), # sampling rate
'monitorSelection': 'topW-Update', # monitor selection algorithm
# select from 'random', 'topW', 'topW-Update', 'batch-selection'
'trainingPeriod': 12, # training time periods
'saveTimeInfo': False, # whether to keep track of the running time
'saveLog': False, # whether to save log into file
'debugMode': False, # whether to record the debug info
'parallelMode': True # whether to leverage multiprocessing for speedup
}
startTime = time.time() # start timing
utils.setConfig(para) # set configuration
logger.info('==============================================')
logger.info('JGD: [Silvestri et al., ICDCS\'2015].')
# load the dataset
dataMatrix = dataloader.load(para)
dataMatrix = dataMatrix[:,0:24]
# evaluate compressive monitoring algorithm
evaluator.execute(dataMatrix, para)
logger.info('All done. Elaspsed time: ' + utils.formatElapsedTime(time.time() - startTime)) # end timing
logger.info('==============================================')
|
{
"content_hash": "a8bbe7c1f23465413dd36a1a56e75499",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 104,
"avg_line_length": 42.75,
"alnum_prop": 0.6239766081871345,
"repo_name": "cuhk-cse/CoMonitor",
"id": "7f3b39d24eb001dec19614c52d21ea71457c826d",
"size": "1936",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "benchmarks/JGD_icdcs15/run_googledata_cpu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "56398"
}
],
"symlink_target": ""
}
|
from typing import Any
from django.conf import settings
from django.core.cache import cache
from django.core.management.base import BaseCommand
from django.db.models import F
from zerver.models import UserMessage
class Command(BaseCommand):
help = """Script to mark all messages as unread."""
def handle(self, *args: Any, **options: Any) -> None:
assert settings.DEVELOPMENT
UserMessage.objects.all().update(flags=F('flags').bitand(~UserMessage.flags.read))
cache._cache.flush_all()
|
{
"content_hash": "26563935c3bf5962dcf075510e2ce94a",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 90,
"avg_line_length": 30.58823529411765,
"alnum_prop": 0.7288461538461538,
"repo_name": "brainwane/zulip",
"id": "0e4029459894686a17f98a4729b44f3a7a71f85a",
"size": "520",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "zilencer/management/commands/mark_all_messages_unread.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "423578"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "647926"
},
{
"name": "JavaScript",
"bytes": "2886792"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "398747"
},
{
"name": "Puppet",
"bytes": "90558"
},
{
"name": "Python",
"bytes": "6000548"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "110849"
},
{
"name": "TypeScript",
"bytes": "9543"
}
],
"symlink_target": ""
}
|
"""Unit tests for traffic statistics stuff."""
import unittest
import grokapi.queries
class TestGrok(unittest.TestCase):
"""Test Traffic class."""
def test_make_url(self):
"""Test _make_url()."""
grok = grokapi.queries.Grok('fr')
result = grok._make_url('France', 2013, 01)
expected = 'http://stats.grok.se/json/fr/201301/France'
self.assertEqual(result, expected)
def test_make_url_latest(self):
"""test make_url_latest()."""
grok = grokapi.queries.Grok('fr')
result = grok._make_url_latest('France', 90)
expected = 'http://stats.grok.se/json/fr/latest90/France'
self.assertEqual(result, expected)
def test_make_url_latest_with_wrong_value(self):
"""test make_url_latest() with a wrong value should raise a ValueError Exception."""
grok = grokapi.queries.Grok('fr')
with self.assertRaises(ValueError):
grok._make_url_latest('France', 42)
class TestGrokOnline(unittest.TestCase):
def test_get_latest_views(self):
grok = grokapi.queries.Grok('fr')
result = grok.get_latest_views('France', 90)
self.assertIn(u'month', result)
self.assertIn(u'rank', result)
self.assertIn(u'daily_views', result)
self.assertEquals(result[u'month'], u'latest90')
self.assertIsInstance(result[u'rank'], int)
self.assertIsInstance(result[u'daily_views'], dict)
def test_get_views_for_month(self):
grok = grokapi.queries.Grok('fr')
title = 'France'
result = grok.get_views_for_month(title, 2015, 1)
self.assertIn(u'project', result)
self.assertIn(u'title', result)
self.assertEquals(result[u'title'], title)
self.assertIn(u'daily_views', result)
self.assertIsInstance(result[u'daily_views'], dict)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "801ff430afce669a383dd715f6ec9eb7",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 92,
"avg_line_length": 33.964285714285715,
"alnum_prop": 0.6256572029442692,
"repo_name": "Commonists/Grokapi",
"id": "c4ecaf64c750d6a4847556e2534dee632f545323",
"size": "1945",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_queries.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6793"
}
],
"symlink_target": ""
}
|
import nltk
from nltk.collocations import *
import re
import codecs
import logging
import os
class Collocator(object):
def __init__(self, words):
self.words = words
path = os.path.dirname(__file__)
try:
f = codecs.open(os.path.join(path,'stopwords.txt'), 'r', 'utf-8')
except:
logging.error('Can\'t open stopwords.txt file')
stop_words = f.read()
f.close()
self._stop_words = set(stop_words.split(', '))
def find_collocations(self, freq_filter=10):
bigram_measures = nltk.collocations.BigramAssocMeasures()
trigram_measures = nltk.collocations.TrigramAssocMeasures()
finder = BigramCollocationFinder.from_words(self.words)
finder.apply_freq_filter(freq_filter)
finder.apply_word_filter(lambda w: len(w) < 4 or w in self._stop_words)
finder.apply_ngram_filter(lambda w1, w2: len(w1) + len(w2) < 10)
best_bigrams = finder.nbest(bigram_measures.chi_sq, 100000)
finder3 = TrigramCollocationFinder.from_words(self.words)
finder3.apply_freq_filter(freq_filter - 1 if freq_filter > 2 else 2)
finder3.apply_word_filter(lambda w: len(w) < 4 or w in self._stop_words)
finder3.apply_ngram_filter(lambda w1, w2, w3: len(w1) + len(w2) + len(w3) < 13)
best_trigrams = finder3.nbest(trigram_measures.chi_sq, 100000)
return best_bigrams + best_trigrams
|
{
"content_hash": "bfe2e2d4548aa874472682266402b514",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 87,
"avg_line_length": 36.34146341463415,
"alnum_prop": 0.6201342281879194,
"repo_name": "yakxxx/memek",
"id": "ad3ec15e81b82ba96b93d92a77ca0d11a126115d",
"size": "1505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "miner/collocator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25827"
}
],
"symlink_target": ""
}
|
""" Demo of PipelineDP with Spark.
For running:
1. Install Python and run on the command line `pip install pipeline-dp pyspark absl-py`
2. Run python python run_on_beam.py --input_file=<path to data.txt from 3> --output_file=<...>
"""
from absl import app
from absl import flags
import pyspark
import pipeline_dp
from pipeline_dp.private_spark import make_private
from pipeline_dp import SumParams
from common_utils import parse_partition
from common_utils import delete_if_exists
FLAGS = flags.FLAGS
flags.DEFINE_string('input_file', None, 'The file with the movie view data')
flags.DEFINE_string('output_file', None, 'Output file')
def main(unused_argv):
delete_if_exists(FLAGS.output_file)
# Setup Spark
# Here, we use one worker thread to load the file as 1 partition.
# For a truly distributed calculation, connect to a Spark cluster (e.g.
# running on some cloud provider).
master = "local[1]" # use one worker thread to load the file as 1 partition
conf = pyspark.SparkConf().setMaster(master)
sc = pyspark.SparkContext(conf=conf)
movie_views = sc \
.textFile(FLAGS.input_file) \
.mapPartitions(parse_partition)
# Define the privacy budget available for our computation.
budget_accountant = pipeline_dp.NaiveBudgetAccountant(total_epsilon=1,
total_delta=1e-6)
# Wrap Spark's RDD into its private version
private_movie_views = \
make_private(movie_views, budget_accountant, lambda mv: mv.user_id)
# Calculate the private sum
dp_result = private_movie_views.sum(
SumParams(
# Limits to how much one user can contribute:
# .. at most two movies rated per user
max_partitions_contributed=2,
# .. at most one rating for each movie
max_contributions_per_partition=1,
# .. with minimal rating of "1"
min_value=1,
# .. and maximum rating of "5"
max_value=5,
# The aggregation key: we're grouping by movies
partition_extractor=lambda mv: mv.movie_id,
# The value we're aggregating: we're summing up ratings
value_extractor=lambda mv: mv.rating))
budget_accountant.compute_budgets()
# Save the results
dp_result.saveAsTextFile(FLAGS.output_file)
return 0
if __name__ == '__main__':
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("output_file")
app.run(main)
|
{
"content_hash": "91092929a5fc110ebc500014c8d38eac",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 94,
"avg_line_length": 34.63013698630137,
"alnum_prop": 0.6550632911392406,
"repo_name": "OpenMined/PipelineDP",
"id": "8da4911b1d6bf5b89dde0982b987172a425aa14f",
"size": "3103",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/movie_view_ratings/run_on_spark.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "42478"
},
{
"name": "Makefile",
"bytes": "573"
},
{
"name": "Python",
"bytes": "671843"
}
],
"symlink_target": ""
}
|
"""
Project views loaded by configuration settings.
Use these views instead of calling the views directly, in order to allow for
settings override of the view class.
"""
from readthedocs.core.utils.extend import SettingsOverrideObject
from readthedocs.projects.views import private
# Project Import Wizard
class ImportWizardView(SettingsOverrideObject):
_default_class = private.ImportWizardView
_override_setting = 'PROJECT_IMPORT_VIEW'
# Project demo import
class ImportDemoView(SettingsOverrideObject):
_default_class = private.ImportDemoView
_override_setting = 'PROJECT_IMPORT_DEMO_VIEW'
|
{
"content_hash": "24b8b1785edd797e1609cb7399eab265",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 76,
"avg_line_length": 29.285714285714285,
"alnum_prop": 0.7934959349593496,
"repo_name": "rtfd/readthedocs.org",
"id": "ebae2d8b17313e94399d7e706d0fdac95876e197",
"size": "640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "readthedocs/projects/backends/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4515"
},
{
"name": "CSS",
"bytes": "66552"
},
{
"name": "Dockerfile",
"bytes": "205"
},
{
"name": "HTML",
"bytes": "196998"
},
{
"name": "JavaScript",
"bytes": "431128"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "1821332"
},
{
"name": "Shell",
"bytes": "682"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division
from .rv import (probability, expectation, density, where, given, pspace, cdf,
sample, sample_iter, random_symbols, independent, dependent)
from sympy import sqrt, simplify
__all__ = ['P', 'E', 'density', 'where', 'given', 'sample', 'cdf', 'pspace',
'sample_iter', 'variance', 'std', 'skewness', 'covariance',
'dependent', 'independent', 'random_symbols', 'correlation',
'moment', 'cmoment']
def moment(X, n, c=0, condition=None, **kwargs):
"""
Return the nth moment of a random expression about c i.e. E((X-c)**n)
Default value of c is 0.
Examples
========
>>> from sympy.stats import Die, moment, E
>>> X = Die('X', 6)
>>> moment(X, 1, 6)
-5/2
>>> moment(X, 2)
91/6
>>> moment(X, 1) == E(X)
True
"""
return expectation((X - c)**n, condition, **kwargs)
def variance(X, condition=None, **kwargs):
"""
Variance of a random expression
Expectation of (X-E(X))**2
Examples
========
>>> from sympy.stats import Die, E, Bernoulli, variance
>>> from sympy import simplify, Symbol
>>> X = Die('X', 6)
>>> p = Symbol('p')
>>> B = Bernoulli('B', p, 1, 0)
>>> variance(2*X)
35/3
>>> simplify(variance(B))
p*(-p + 1)
"""
return cmoment(X, 2, condition, **kwargs)
def standard_deviation(X, condition=None, **kwargs):
"""
Standard Deviation of a random expression
Square root of the Expectation of (X-E(X))**2
Examples
========
>>> from sympy.stats import Bernoulli, std
>>> from sympy import Symbol, simplify
>>> p = Symbol('p')
>>> B = Bernoulli('B', p, 1, 0)
>>> simplify(std(B))
sqrt(p*(-p + 1))
"""
return sqrt(variance(X, condition, **kwargs))
std = standard_deviation
def covariance(X, Y, condition=None, **kwargs):
"""
Covariance of two random expressions
The expectation that the two variables will rise and fall together
Covariance(X,Y) = E( (X-E(X)) * (Y-E(Y)) )
Examples
========
>>> from sympy.stats import Exponential, covariance
>>> from sympy import Symbol
>>> rate = Symbol('lambda', positive=True, real=True, bounded = True)
>>> X = Exponential('X', rate)
>>> Y = Exponential('Y', rate)
>>> covariance(X, X)
lambda**(-2)
>>> covariance(X, Y)
0
>>> covariance(X, Y + rate*X)
1/lambda
"""
return expectation(
(X - expectation(X, condition, **kwargs)) *
(Y - expectation(Y, condition, **kwargs)),
condition, **kwargs)
def correlation(X, Y, condition=None, **kwargs):
"""
Correlation of two random expressions, also known as correlation
coefficient or Pearson's correlation
The normalized expectation that the two variables will rise
and fall together
Correlation(X,Y) = E( (X-E(X)) * (Y-E(Y)) / (sigma(X) * sigma(Y)) )
Examples
========
>>> from sympy.stats import Exponential, correlation
>>> from sympy import Symbol
>>> rate = Symbol('lambda', positive=True, real=True, bounded = True)
>>> X = Exponential('X', rate)
>>> Y = Exponential('Y', rate)
>>> correlation(X, X)
1
>>> correlation(X, Y)
0
>>> correlation(X, Y + rate*X)
1/sqrt(1 + lambda**(-2))
"""
return covariance(X, Y, condition, **kwargs)/(std(X, condition, **kwargs)
* std(Y, condition, **kwargs))
def cmoment(X, n, condition=None, **kwargs):
"""
Return the nth central moment of a random expression about its mean
i.e. E((X - E(X))**n)
Examples
========
>>> from sympy.stats import Die, cmoment, variance
>>> X = Die('X', 6)
>>> cmoment(X, 3)
0
>>> cmoment(X, 2)
35/12
>>> cmoment(X, 2) == variance(X)
True
"""
mu = expectation(X, condition, **kwargs)
return moment(X, n, mu, condition, **kwargs)
def smoment(X, n, condition=None, **kwargs):
"""
Return the nth Standardized moment of a random expression i.e.
E( ((X - mu)/sigma(X))**n )
Examples
========
>>> from sympy.stats import skewness, Exponential, smoment
>>> from sympy import Symbol
>>> rate = Symbol('lambda', positive=True, real=True, bounded = True)
>>> Y = Exponential('Y', rate)
>>> smoment(Y, 4)
9
>>> smoment(Y, 4) == smoment(3*Y, 4)
True
>>> smoment(Y, 3) == skewness(Y)
True
"""
sigma = std(X, condition, **kwargs)
return (1/sigma)**n*cmoment(X, n, condition, **kwargs)
def skewness(X, condition=None, **kwargs):
"""
Measure of the asymmetry of the probability distribution
Positive skew indicates that most of the values lie to the right of
the mean
skewness(X) = E( ((X - E(X))/sigma)**3 )
Examples
========
>>> from sympy.stats import skewness, Exponential, Normal
>>> from sympy import Symbol
>>> X = Normal('X', 0, 1)
>>> skewness(X)
0
>>> rate = Symbol('lambda', positive=True, real=True, bounded = True)
>>> Y = Exponential('Y', rate)
>>> skewness(Y)
2
"""
return smoment(X, 3, condition, **kwargs)
P = probability
E = expectation
|
{
"content_hash": "81d6aaf403ea65e03ef676ab6c85acaf",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 78,
"avg_line_length": 24.43867924528302,
"alnum_prop": 0.570546226597182,
"repo_name": "ojengwa/sympy",
"id": "3ac496ed3fd9dcd7a70877b80a65de0b9c35629c",
"size": "5181",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sympy/stats/rv_interface.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from flask.ext import restful
class Test(restful.Resource):
def get(self):
return {'hello': 'world'}
|
{
"content_hash": "c979e24ee01ff85a972d728be09155c0",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 33,
"avg_line_length": 19.166666666666668,
"alnum_prop": 0.6521739130434783,
"repo_name": "wtneal/example-web",
"id": "63701c420f72761dfe9e5b0628a65d4077ddd774",
"size": "115",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/resources.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "749"
}
],
"symlink_target": ""
}
|
"""Class CollectiveAllReduceStrategy implementing DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import cross_device_utils
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import values
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.platform import tf_logging as logging
# TODO(yuefengz): support in-graph replication.
class CollectiveAllReduceStrategy(distribute_lib.DistributionStrategy):
"""Distribution strategy that uses collective ops for all-reduce.
It is similar to the MirroredStrategy but it uses collective ops for
reduction.
When `cluster_spec` is given by the `configure` method, it turns into the
mulit-worker version that works on multiple workers with between-graph
replication.
Note: `configure` will be called by higher-level APIs if running in
distributed environment.
"""
def __init__(self, num_gpus_per_worker=0):
"""Initializes the object.
Args:
num_gpus_per_worker: number of local GPUs or GPUs per worker, the default
is 0 meaning CPU only.
"""
super(CollectiveAllReduceStrategy, self).__init__(
CollectiveAllReduceExtended(self, num_gpus_per_worker))
class CollectiveAllReduceExtended(mirrored_strategy.MirroredExtended):
"""Implementation of CollectiveAllReduceStrategy."""
def __init__(self, container_strategy, num_gpus_per_worker):
distribute_lib.DistributionStrategyExtended.__init__(
self, container_strategy)
self._cross_device_ops = None
self._num_gpus_per_worker = num_gpus_per_worker
self._initialize_local_worker(num_gpus_per_worker)
assert isinstance(self._get_cross_device_ops(),
cross_device_ops_lib.CollectiveAllReduce)
def _initialize_local_worker(self, num_gpus_per_worker):
"""Initializes the object for local training."""
self._is_chief = True
self._num_workers = 1
if num_gpus_per_worker:
local_devices = tuple(
"/device:GPU:%d" % i for i in range(num_gpus_per_worker)
)
else:
local_devices = ("/device:CPU:0",)
self._worker_device = device_util.canonicalize("/device:CPU:0")
self._collective_keys = cross_device_utils.CollectiveKeys()
self._initialize_local(local_devices)
self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(
num_workers=self._num_workers,
num_gpus_per_worker=num_gpus_per_worker,
collective_keys=self._collective_keys)
self._cluster_spec = None
self._task_type = None
self._task_id = None
logging.info("CollectiveAllReduceStrategy with local_devices = %r",
local_devices)
def _initialize_multi_worker(self, num_gpus_per_worker, cluster_spec,
task_type, task_id):
"""Initializes the object for multi-worker training."""
if task_type is None or task_id is None:
raise ValueError("When `cluster_spec` is given, you must also specify "
"`task_type` and `task_id`")
if task_type not in ("chief", "worker"):
raise ValueError(
"Unrecognized task_type: %r, valid task types are: \"chief\", "
"\"worker\"." % task_type)
cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec)
self._num_workers = multi_worker_util.worker_count(cluster_spec, task_type)
if not self._num_workers:
raise ValueError("No `worker` or `chief` tasks can be found in "
"`cluster_spec`.")
self._is_chief = multi_worker_util.is_chief(cluster_spec, task_type,
task_id)
self._worker_device = "/job:%s/task:%d" % (task_type, task_id)
if num_gpus_per_worker:
local_devices = tuple(
"%s/device:GPU:%d" % (self._worker_device, i)
for i in range(num_gpus_per_worker)
)
else:
local_devices = (self._worker_device,)
self._collective_keys = cross_device_utils.CollectiveKeys()
self._initialize_local(local_devices)
self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(
num_workers=self._num_workers,
num_gpus_per_worker=num_gpus_per_worker,
collective_keys=self._collective_keys)
# Add a default device so that ops without specified devices will not end up
# on other workers.
self._default_device = "/job:%s/task:%d" % (task_type, task_id)
self._cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec)
self._task_type = task_type
self._task_id = task_id
logging.info(
"Multi-worker CollectiveAllReduceStrategy with "
"cluster_spec = %r, task_type = %r, task_id = %r, "
"num_workers = %r, local_devices = %r", cluster_spec.as_dict(),
task_type, task_id, self._num_workers, local_devices)
def _create_variable(self, next_creator, *args, **kwargs):
colocate_with = kwargs.pop("colocate_with", None)
devices = self._get_devices_from(colocate_with)
group_size = len(devices) * self._num_workers
group_key = self._collective_keys.get_group_key(self._devices)
def _real_mirrored_creator(devices, *args, **kwargs):
"""Creates one MirroredVariable on the current worker."""
index = {}
unique_var_name = ops.get_default_graph().unique_name(
kwargs["name"], mark_as_used=False).rstrip("/")
collective_instance_key = self._collective_keys.get_instance_key(
key_id=unique_var_name)
if "initial_value" not in kwargs:
raise ValueError("Initial value must be specified.")
initial_value = kwargs["initial_value"]
if callable(initial_value):
initial_value_fn = initial_value
else:
initial_value_fn = lambda: initial_value
for i, d in enumerate(devices):
with ops.device(d):
if i > 0:
# Give replicas meaningful distinct names:
var0name = index[devices[0]].name.split(":")[0]
# We append a / to variable names created on replicas with id > 0 to
# ensure that we ignore the name scope and instead use the given
# name as the absolute name of the variable.
kwargs["name"] = "%s/replica_%d/" % (var0name, i)
# The initial value fn makes sure variables all initialized to
# same values. The first device of the chief worker will send their
# variable values to other devices and other workers.
def _overridden_initial_value_fn(device=d, index=i): # pylint: disable=g-missing-docstring
with ops.device(device):
initial_value = initial_value_fn()
assert not callable(initial_value)
initial_value = ops.convert_to_tensor(initial_value)
if self._is_chief and index == 0:
bcast_send = collective_ops.broadcast_send(
initial_value, initial_value.shape, initial_value.dtype,
group_size, group_key, collective_instance_key)
with ops.control_dependencies([bcast_send]):
return array_ops.identity(initial_value)
else:
return collective_ops.broadcast_recv(
initial_value.shape, initial_value.dtype, group_size,
group_key, collective_instance_key)
kwargs["initial_value"] = _overridden_initial_value_fn
with context.context().device_policy(context.DEVICE_PLACEMENT_SILENT):
v = next_creator(*args, **kwargs)
if i == 0:
actual_var_name = v.name.split(":")[0]
assert unique_var_name == actual_var_name, "%r vs %r" % (
unique_var_name, actual_var_name)
assert not isinstance(v, values.DistributedVariable)
index[d] = v
return index
# pylint: disable=protected-access
return mirrored_strategy._create_mirrored_variable(
devices, _real_mirrored_creator, *args, **kwargs)
def _distribute_dataset(self, dataset_fn):
"""Distributes the dataset to each local GPU."""
# TODO(yuefengz): shard the dataset.
return values.PerReplicaDataset(
self._call_dataset_fn(dataset_fn), self._devices, True)
def _make_dataset_iterator(self, dataset):
worker_device_pairs = [(self._worker_device, self._devices)]
return values.DatasetIterator(dataset, worker_device_pairs,
self._num_replicas_in_sync)
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
"""Distributes the dataset to each local GPU."""
if self._cluster_spec is None:
input_pipeline_id = 0
else:
input_pipeline_id = multi_worker_util.id_in_cluster(
self._cluster_spec, self._task_type, self._task_id)
input_context = distribute_lib.InputContext(
num_input_pipelines=self._num_workers,
input_pipeline_id=input_pipeline_id,
num_replicas_in_sync=self._num_replicas_in_sync)
return values.InputFunctionIterator(
input_fn, [(self._worker_device, self._devices)], [input_context])
def _configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
"""Configures the object.
Args:
session_config: a `tf.ConfigProto`
cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the
cluster configurations.
task_type: the current task type, such as "worker".
task_id: the current task id.
Raises:
ValueError: if `task_type` is not in the `cluster_spec`.
"""
if not self._cluster_spec and cluster_spec:
# If a `cluster_spec` is already passed in, do nothing here.
# TODO(yuefengz): check `cluster_spec` is the same if this object has
# already been initialized with a `cluster_spec`.
self._initialize_multi_worker(self._num_gpus_per_worker, cluster_spec,
task_type, task_id)
assert isinstance(self._get_cross_device_ops(),
cross_device_ops_lib.CollectiveAllReduce)
if session_config:
session_config.CopyFrom(self._update_config_proto(session_config))
def _update_config_proto(self, config_proto):
updated_config = copy.deepcopy(config_proto)
# Enable the scoped allocator optimization for CollectiveOps. This
# optimization converts many small all-reduces into fewer larger
# all-reduces.
rewrite_options = updated_config.graph_options.rewrite_options
rewrite_options.scoped_allocator_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
# We turn on ScopedAllocator only for CollectiveReduce op, i.e. enable_op =
# ["CollectiveReduce"]. Since we can't assign to a repeated proto field, we
# clear and then append.
del rewrite_options.scoped_allocator_opts.enable_op[:]
rewrite_options.scoped_allocator_opts.enable_op.append("CollectiveReduce")
if not self._cluster_spec:
return updated_config
assert self._task_type
assert self._task_id is not None
# Collective group leader is needed for collective ops to coordinate
# workers.
if "chief" in self._cluster_spec.jobs:
updated_config.experimental.collective_group_leader = (
"/job:chief/replica:0/task:0")
else:
if "worker" not in self._cluster_spec.jobs:
raise ValueError(
"You must have `chief` or `worker` jobs in the `cluster_spec`.")
updated_config.experimental.collective_group_leader = (
"/job:worker/replica:0/task:0")
# The device filters prevent communication between workers.
del updated_config.device_filters[:]
updated_config.device_filters.append(
"/job:%s/task:%d" % (self._task_type, self._task_id))
return updated_config
@property
def experimental_between_graph(self):
return True
@property
def experimental_should_init(self):
return True
@property
def should_checkpoint(self):
return self._is_chief
@property
def should_save_summary(self):
return self._is_chief
@property
def _num_replicas_in_sync(self):
return len(self._devices) * self._num_workers
# TODO(priyag): Delete this once all strategies use global batch size.
@property
def _global_batch_size(self):
return False
|
{
"content_hash": "6d26ceaa4054913e906f6e221fb0516b",
"timestamp": "",
"source": "github",
"line_count": 326,
"max_line_length": 101,
"avg_line_length": 39.855828220858896,
"alnum_prop": 0.6596628954052182,
"repo_name": "asimshankar/tensorflow",
"id": "346513dc586f208315fd777dc7ddfa500c82f0d7",
"size": "13682",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/distribute/python/collective_all_reduce_strategy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4882"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "490070"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "52677142"
},
{
"name": "CMake",
"bytes": "207176"
},
{
"name": "Dockerfile",
"bytes": "39454"
},
{
"name": "Go",
"bytes": "1290930"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "890529"
},
{
"name": "Jupyter Notebook",
"bytes": "2618412"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "68402"
},
{
"name": "Objective-C",
"bytes": "16140"
},
{
"name": "Objective-C++",
"bytes": "102518"
},
{
"name": "PHP",
"bytes": "5172"
},
{
"name": "Pascal",
"bytes": "221"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "43038983"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "497659"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('basicviz', '0034_alphacorroptions'),
]
operations = [
migrations.AddField(
model_name='alphacorroptions',
name='normalise_alphas',
field=models.BooleanField(default=True),
preserve_default=False,
),
]
|
{
"content_hash": "d7284b38d7167310a7b9c0e0b15c362e",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 52,
"avg_line_length": 22.789473684210527,
"alnum_prop": 0.6096997690531177,
"repo_name": "sdrogers/ms2ldaviz",
"id": "062232047d8fd4a48999acf4c8fedd9e9fc0b685",
"size": "457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ms2ldaviz/basicviz/migrations/0035_alphacorroptions_normalise_alphas.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "155389"
},
{
"name": "Dockerfile",
"bytes": "324"
},
{
"name": "HTML",
"bytes": "281089"
},
{
"name": "JavaScript",
"bytes": "564464"
},
{
"name": "Jupyter Notebook",
"bytes": "22354299"
},
{
"name": "Python",
"bytes": "897444"
},
{
"name": "Shell",
"bytes": "561"
}
],
"symlink_target": ""
}
|
from wallaby.pf.room import *
from wallaby.backends.couchdb import *
from wallaby.plugins.couchdb.document import *
from twisted.internet import defer
from datetime import date
class Allcontracts(Room):
def __init__(self, name):
Room.__init__(self, name)
self.catch('Custom.In.CreateDueInvoices', self._createDueInvoices)
@defer.inlineCallbacks
def _createDueInvoices(self, action, payload):
print "Create Due invoices"
today = date.today()
db = Database.getDatabase(None)
try:
rows = yield db.view('_design/couchapp/_view/allDueContracts', endkey=[[today.year, today.month, today.day], {}], inclusive_end=True)
for row in rows:
doc = CouchdbDocument(data=row['value'])
newDoc = doc.clone()
newDoc.resetDocumentID()
newDoc.set('status', 'OPEN')
nextInvoice = doc.get('contract.nextInvoice')
y, m, d = nextInvoice[0], nextInvoice[1], nextInvoice[2]
if doc.get('interval')[0] == 'M':
m = m + 1
elif doc.get('interval')[0] == 'Q':
m = m + 3
elif doc.get('interval')[0] == 'J':
y = y + 1
if m > 12:
m = m - 12
y = y + 1
newDoc.set('workPeriod.fromDate', doc.get('contract.nextInvoice'))
doc.set('contract.nextInvoice', [y, m, d])
newDoc.set('workPeriod.toDate', doc.get('contract.nextInvoice'))
yield db.save(doc._data)
yield db.save(newDoc._data)
except Exception as e:
print "allcontractsContext EXCEPTION", e
|
{
"content_hash": "10562b64b0295f0fc388f34df53db6d7",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 145,
"avg_line_length": 33.62264150943396,
"alnum_prop": 0.5286195286195287,
"repo_name": "FreshXOpenSource/wallaby-app-crm",
"id": "0cb419526753cf5bbc6cad0a422feb5d53e89e32",
"size": "1888",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wallaby/apps/crm/rooms/allcontracts.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "11766"
},
{
"name": "Python",
"bytes": "41982"
}
],
"symlink_target": ""
}
|
"""Adds pre hook to data store queries to hide internal-only data.
Checks if the user has a google.com address, and hides data with the
internal_only property set if not.
"""
import webapp2
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import users
from google.appengine.datastore import datastore_pb
from dashboard.common import utils
# The list below contains all kinds that have an internal_only property.
# IMPORTANT: any new data types with internal_only properties must be added
# here in order to be restricted to internal users.
_INTERNAL_ONLY_KINDS = [
'Bot',
'TestMetadata',
'Row',
'Sheriff',
'Anomaly',
'TryJob',
'TableConfig',
'Histogram',
'SparseDiagnostic',
]
# Permissions namespaces.
EXTERNAL = 'externally_visible'
INTERNAL = 'internal_only'
def InstallHooks():
"""Installs datastore pre hook to add access checks to queries.
This only needs to be called once, when doing config (currently in
appengine_config.py).
"""
apiproxy_stub_map.apiproxy.GetPreCallHooks().Push(
'_DatastorePreHook', _DatastorePreHook, 'datastore_v3')
def SetPrivilegedRequest():
"""Allows the current request to act as a privileged user.
This should ONLY be called for handlers that are restricted from end users
by some other mechanism (IP whitelisting, admin-only pages).
This should be set once per request, before accessing the data store.
"""
request = webapp2.get_request()
request.registry['privileged'] = True
def SetSinglePrivilegedRequest():
"""Allows the current request to act as a privileged user only ONCE.
This should be called ONLY by handlers that have checked privilege immediately
before making a query. It will be automatically unset when the next query is
made.
"""
request = webapp2.get_request()
request.registry['single_privileged'] = True
def CancelSinglePrivilegedRequest():
"""Disallows the current request to act as a privileged user only."""
request = webapp2.get_request()
request.registry['single_privileged'] = False
def _IsServicingPrivilegedRequest():
"""Checks whether the request is considered privileged."""
try:
request = webapp2.get_request()
except AssertionError:
# This happens in unit tests, when code gets called outside of a request.
return False
path = getattr(request, 'path', '')
if path.startswith('/mapreduce'):
return True
if path.startswith('/_ah/queue/deferred'):
return True
if path.startswith('/_ah/pipeline/'):
return True
if request.registry.get('privileged', False):
return True
if request.registry.get('single_privileged', False):
request.registry['single_privileged'] = False
return True
whitelist = utils.GetIpWhitelist()
if whitelist and hasattr(request, 'remote_addr'):
return request.remote_addr in whitelist
return False
def IsUnalteredQueryPermitted():
"""Checks if the current user is internal, or the request is privileged.
"Internal users" are users whose email address belongs to a certain
privileged domain; but some privileged requests, such as task queue tasks,
are also considered privileged.
Returns:
True for users with google.com emails and privileged requests.
"""
if utils.IsInternalUser():
return True
if users.is_current_user_admin():
# It's possible to be an admin with a non-internal account; For example,
# the default login for dev appserver instances is test@example.com.
return True
return _IsServicingPrivilegedRequest()
def GetNamespace():
"""Returns a namespace prefix string indicating internal or external user."""
return INTERNAL if IsUnalteredQueryPermitted() else EXTERNAL
def _DatastorePreHook(service, call, request, _):
"""Adds a filter which checks whether to return internal data for queries.
If the user is not privileged, we don't want to return any entities that
have internal_only set to True. That is done here in a datastore hook.
See: https://developers.google.com/appengine/articles/hooks
Args:
service: Service name, must be 'datastore_v3'.
call: String representing function to call. One of 'Put', Get', 'Delete',
or 'RunQuery'.
request: Request protobuf.
_: Response protobuf (not used).
"""
assert service == 'datastore_v3'
if call != 'RunQuery':
return
if request.kind() not in _INTERNAL_ONLY_KINDS:
return
if IsUnalteredQueryPermitted():
return
# Add a filter for internal_only == False, because the user is external.
try:
external_filter = request.filter_list().add()
except AttributeError:
# This is required to support proto1, which may be used by the unit tests.
# Later, if we don't need to support proto1, then this can be removed.
external_filter = request.add_filter()
external_filter.set_op(datastore_pb.Query_Filter.EQUAL)
new_property = external_filter.add_property()
new_property.set_name('internal_only')
new_property.mutable_value().set_booleanvalue(False)
new_property.set_multiple(False)
|
{
"content_hash": "39c7704a5a0c743803bb52400591535e",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 80,
"avg_line_length": 32.38461538461539,
"alnum_prop": 0.7294140934283452,
"repo_name": "catapult-project/catapult-csm",
"id": "15aaaba6aabf02a0061c219278084416c2d98793",
"size": "5215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dashboard/dashboard/common/datastore_hooks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4902"
},
{
"name": "C++",
"bytes": "43728"
},
{
"name": "CSS",
"bytes": "24873"
},
{
"name": "Go",
"bytes": "80325"
},
{
"name": "HTML",
"bytes": "11817766"
},
{
"name": "JavaScript",
"bytes": "518002"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "Python",
"bytes": "6207634"
},
{
"name": "Shell",
"bytes": "2558"
}
],
"symlink_target": ""
}
|
'''
@author: frank
'''
import sys, os, os.path
import cephagent
from zstacklib.utils import log
from zstacklib.utils import linux
import zstacklib.utils.iptables as iptables
pidfile = '/var/run/zstack/ceph-backupstorage.pid'
log.configure_log('/var/log/zstack/ceph-backupstorage.log')
logger = log.get_logger(__name__)
def prepare_pid_dir(path):
pdir = os.path.dirname(path)
if not os.path.isdir(pdir):
os.makedirs(pdir)
def main():
usage = 'usage: python -c "from cephbackupstorage import cdaemon; cdaemon.main()" start|stop|restart'
if len(sys.argv) != 2 or not sys.argv[1] in ['start', 'stop', 'restart']:
print usage
sys.exit(1)
global pidfile
prepare_pid_dir(pidfile)
try:
iptc = iptables.from_iptables_save()
iptc.add_rule('-A INPUT -p tcp -m tcp --dport 7761 -j ACCEPT')
iptc.iptable_restore()
cmd = sys.argv[1]
agentdaemon = cephagent.CephDaemon(pidfile)
if cmd == 'start':
logger.debug('zstack-ceph-backupstorage starts')
agentdaemon.start()
elif cmd == 'stop':
logger.debug('zstack-ceph-backupstorage stops')
agentdaemon.stop()
elif cmd == 'restart':
logger.debug('zstack-ceph-backupstorage restarts')
agentdaemon.restart()
sys.exit(0)
except Exception:
logger.warning(linux.get_exception_stacktrace())
sys.exit(1)
if __name__ == '__main__':
main()
|
{
"content_hash": "78106b8a0114e958aed612408475741f",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 105,
"avg_line_length": 30,
"alnum_prop": 0.5948717948717949,
"repo_name": "ghxandsky/zstack-utility",
"id": "4a2d3b5f6ddee16969e4ed443f625157772a29cb",
"size": "1560",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cephbackupstorage/cephbackupstorage/cdaemon.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "AGS Script",
"bytes": "1147"
},
{
"name": "HTML",
"bytes": "4277"
},
{
"name": "Puppet",
"bytes": "10604"
},
{
"name": "Python",
"bytes": "1507292"
},
{
"name": "Shell",
"bytes": "188218"
}
],
"symlink_target": ""
}
|
import csv
# Put the full path to your CSV/Excel file here
MY_FILE = "2016Q2.csv"
opened_file = open(MY_FILE)
# Read the CSV data
csv_data = csv.reader(opened_file, delimiter=',')
# Setup an empty list
parsed_data = []
# Skip over the first line and save the second as headers
next(csv_data)
fields = next(csv_data)
# remove empty elements of the fields
fields = [item for item in fields if item != '']
# Iterate over each row of the csv file, zip together field -> value
# BREAK when growth forecestast are reached and remove previous row (empty)
for row in csv_data:
if "GROWTH" in row[0]:
break
parsed_data.append(dict(zip(fields, row)))
del parsed_data[len(parsed_data)-1]
print(len(parsed_data))
|
{
"content_hash": "674a73434a62d58c87faea1aee872740",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 79,
"avg_line_length": 27.482758620689655,
"alnum_prop": 0.6424090338770388,
"repo_name": "BadWizard/Inflation",
"id": "b0ef6ce00966b1eeceea44edaeb2f877648e5542",
"size": "797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DATA/temp/SPF/parse-SPF.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "18"
},
{
"name": "Groff",
"bytes": "182493"
},
{
"name": "Jupyter Notebook",
"bytes": "1866663"
},
{
"name": "Python",
"bytes": "36618"
},
{
"name": "R",
"bytes": "9755"
},
{
"name": "Shell",
"bytes": "2033"
}
],
"symlink_target": ""
}
|
from .models import Model
|
{
"content_hash": "2a886a2174faec3b46dd7cf5fc327536",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 25,
"avg_line_length": 25,
"alnum_prop": 0.84,
"repo_name": "uetke/experimentor",
"id": "c5861bec602afe49128885336325c1f557e7d0e5",
"size": "25",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experimentor/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "194029"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import collections
import itertools
import logging
import sys
import gold_inexact_matching.iterative_parameter_optimizer\
as iterative_optimizer
from gold_inexact_matching import parameter_set
class LocalMinimaParameterOptimizer(
iterative_optimizer.IterativeParameterOptimizer):
"""A ParameterOptimizer to find local minima.
Works on any number of variable parameters and is faster than brute
forcing, but not guaranteed to find all interesting parameter combinations.
"""
MIN_EDGE_THRESHOLD_WEIGHT = 0
MIN_MAX_DIFF_WEIGHT = MIN_DELTA_THRESHOLD_WEIGHT = 0
def __init__(self, args, test_name):
super().__init__(args, test_name)
# These are (or will be) maps of ints to maps of ints to ints, i.e. a 2D
# array containing ints, just using maps instead of lists. They hold the
# most permissive value visited so far that resulted in a comparison failure
# for a particular parameter given the other two parameters. These are used
# to prune combinations we don't care about, similar to skipping
# combinations that produce a higher weight than our smallest.
# Delta -> Edge -> Max Diff
self._permissive_max_diff_map = {}
# Max Diff -> Edge -> Delta
self._permissive_delta_map = {}
# Max Diff -> Delta -> Edge
self._permissive_edge_map = {}
@classmethod
def AddArguments(cls, parser):
common_group, sobel_group, fuzzy_group = super(
LocalMinimaParameterOptimizer, cls).AddArguments(parser)
common_group.add_argument(
'--use-bfs',
action='store_true',
default=False,
help='Use a breadth-first search instead of a depth-first search. This '
'will likely be significantly slower, but is more likely to find '
'multiple local minima with the same weight.')
sobel_group.add_argument(
'--edge-threshold-weight',
default=1,
type=int,
help='The weight associated with the edge threshold. Higher values '
'will penalize a more permissive parameter value more harshly.')
fuzzy_group.add_argument(
'--max-diff-weight',
default=3,
type=int,
help='The weight associated with the maximum number of different '
'pixels. Higher values will penalize a more permissive parameter value '
'more harshly.')
fuzzy_group.add_argument(
'--delta-threshold-weight',
default=10,
type=int,
help='The weight associated with the per-channel delta sum. Higher '
'values will penalize a more permissive parameter value more harshly.')
return common_group, sobel_group, fuzzy_group
def _VerifyArgs(self):
super()._VerifyArgs()
assert self._args.edge_threshold_weight >= self.MIN_EDGE_THRESHOLD_WEIGHT
assert self._args.max_diff_weight >= self.MIN_MAX_DIFF_WEIGHT
assert self._args.delta_threshold_weight >= self.MIN_DELTA_THRESHOLD_WEIGHT
def _RunOptimizationImpl(self):
visited_parameters = set()
to_visit = collections.deque()
smallest_weight = sys.maxsize
smallest_parameters = []
to_visit.append(self._GetMostPermissiveParameters())
# Do a search, only considering adjacent parameters if:
# 1. Their weight is less than or equal to the smallest found weight.
# 2. They haven't been visited already.
# 3. They are not guaranteed to fail based on previously tested parameters.
# 4. The current parameters result in a successful comparison.
while to_visit:
current_parameters = None
if self._args.use_bfs:
current_parameters = to_visit.popleft()
else:
current_parameters = to_visit.pop()
weight = self._GetWeight(current_parameters)
if weight > smallest_weight:
continue
if current_parameters in visited_parameters:
continue
if self._ParametersAreGuaranteedToFail(current_parameters):
visited_parameters.add(current_parameters)
continue
visited_parameters.add(current_parameters)
success, _, _ = self._RunComparisonForParameters(current_parameters)
if success:
for adjacent in self._AdjacentParameters(current_parameters):
to_visit.append(adjacent)
if smallest_weight == weight:
logging.info('Found additional smallest parameter %s',
current_parameters)
smallest_parameters.append(current_parameters)
else:
logging.info('Found new smallest parameter with weight %d: %s',
weight, current_parameters)
smallest_weight = weight
smallest_parameters = [current_parameters]
else:
self._UpdateMostPermissiveFailedParameters(current_parameters)
print('Found %d parameter(s) with the smallest weight:' %
len(smallest_parameters))
for p in smallest_parameters:
print(p)
def _ParametersAreGuaranteedToFail(self, parameters):
"""Checks whether the given ParameterSet is guaranteed to fail.
A ParameterSet is guaranteed to fail if we have already tried and failed
with a similar ParameterSet that was more permissive. Specifically, if we
have tried and failed with a ParameterSet with all but one parameters
matching, and the non-matching parameter was more permissive than the
current one.
Args:
parameters: The ParameterSet instance to check.
Returns:
True if |parameters| is guaranteed to fail based on previously tried
parameters, otherwise False.
"""
permissive_max_diff = self._permissive_max_diff_map.get(
parameters.delta_threshold, {}).get(parameters.edge_threshold, -1)
if parameters.max_diff < permissive_max_diff:
return True
permissive_delta = self._permissive_delta_map.get(
parameters.max_diff, {}).get(parameters.edge_threshold, -1)
if parameters.delta_threshold < permissive_delta:
return True
permissive_edge = self._permissive_edge_map.get(
parameters.max_diff, {}).get(parameters.delta_threshold, sys.maxsize)
if parameters.edge_threshold > permissive_edge:
return True
return False
def _UpdateMostPermissiveFailedParameters(self, parameters):
"""Updates the array of most permissive failed parameters.
This is used in conjunction with _ParametersAreGuaranteedToFail to prune
ParameterSets without having to actually test them. Values are updated if
|parameters| shares two parameters with a a previously failed ParameterSet,
but |parameters|' third parameter is more permissive.
Args:
parameters: A ParameterSet to pull updated values from.
"""
permissive_max_diff = self._permissive_max_diff_map.setdefault(
parameters.delta_threshold, {}).get(parameters.edge_threshold, -1)
permissive_max_diff = max(permissive_max_diff, parameters.max_diff)
self._permissive_max_diff_map[parameters.delta_threshold][
parameters.edge_threshold] = permissive_max_diff
permissive_delta = self._permissive_delta_map.setdefault(
parameters.max_diff, {}).get(parameters.edge_threshold, -1)
permissive_delta = max(permissive_delta, parameters.delta_threshold)
self._permissive_delta_map[parameters.max_diff][
parameters.edge_threshold] = permissive_delta
permissive_edge = self._permissive_edge_map.setdefault(
parameters.max_diff, {}).get(parameters.delta_threshold, sys.maxsize)
permissive_edge = min(permissive_edge, parameters.edge_threshold)
self._permissive_edge_map[parameters.max_diff][
parameters.delta_threshold] = permissive_edge
def _AdjacentParameters(self, starting_parameters):
max_diff = starting_parameters.max_diff
delta_threshold = starting_parameters.delta_threshold
edge_threshold = starting_parameters.edge_threshold
max_diff_step = self._args.max_diff_step
delta_threshold_step = self._args.delta_threshold_step
edge_threshold_step = self._args.edge_threshold_step
max_diffs = [
max(self._args.min_max_diff, max_diff - max_diff_step), max_diff,
min(self._args.max_max_diff, max_diff + max_diff_step)
]
delta_thresholds = [
max(self._args.min_delta_threshold,
delta_threshold - delta_threshold_step), delta_threshold,
min(self._args.max_delta_threshold,
delta_threshold + delta_threshold_step)
]
edge_thresholds = [
max(self._args.min_edge_threshold,
edge_threshold - edge_threshold_step), edge_threshold,
min(self._args.max_edge_threshold, edge_threshold + edge_threshold_step)
]
for combo in itertools.product(max_diffs, delta_thresholds,
edge_thresholds):
adjacent = parameter_set.ParameterSet(combo[0], combo[1], combo[2])
if adjacent != starting_parameters:
yield adjacent
def _GetWeight(self, parameters):
return (parameters.max_diff * self._args.max_diff_weight +
parameters.delta_threshold * self._args.delta_threshold_weight +
(self.MAX_EDGE_THRESHOLD - parameters.edge_threshold) *
self._args.edge_threshold_weight)
|
{
"content_hash": "7804ad8a70372bff90a79eec474e2181",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 80,
"avg_line_length": 40.77777777777778,
"alnum_prop": 0.6898092643051771,
"repo_name": "scheib/chromium",
"id": "4389dc9619224a85be8091d5dc0defe72cd28c29",
"size": "9338",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "content/test/gpu/gold_inexact_matching/local_minima_parameter_optimizer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import json
from itertools import ifilter
from django import forms
from django.core.exceptions import ObjectDoesNotExist,ValidationError
from django.forms.widgets import HiddenInput,TextInput
from django.contrib.admin.widgets import RelatedFieldWidgetWrapper
from tablemanager.models import (Normalise,NormalTable,Normalise_NormalTable,Publish,
Publish_NormalTable,ForeignTable,Input,NormalTable,Workspace,DataSource,
PublishChannel,DatasourceType)
from borg_utils.form_fields import GroupedModelChoiceField,CachedModelChoiceField
from borg_utils.widgets import MultiWidgetLayout
from borg_utils.form_fields import GeoserverSettingForm,MetaTilingFactorField,GridSetField,BorgSelect
from borg_utils.forms import BorgModelForm
from django.template import Context, Template
class ForeignTableForm(BorgModelForm):
"""
A form for ForeignTable Model
"""
def __init__(self, *args, **kwargs):
super(ForeignTableForm, self).__init__(*args, **kwargs)
#remove the empty label
#self.fields['server'].empty_label=None
if 'instance' in kwargs and kwargs['instance'] and kwargs['instance'].pk:
self.fields['name'].widget.attrs['readonly'] = True
#remote the "+" icon from html page because this field is readonly
self.fields['server'].widget = self.fields['server'].widget.widget
self.fields['server'].widget.attrs['readonly'] = True
class Meta:
model = ForeignTable
fields = "__all__"
widgets = {
'server': BorgSelect(),
}
class DataSourceForm(BorgModelForm):
"""
A form for DataSource Model
"""
CHANGE_TYPE = 100
def __init__(self, *args, **kwargs):
super(DataSourceForm, self).__init__(*args, **kwargs)
if 'instance' in kwargs and kwargs['instance'] and kwargs['instance'].pk:
self.fields['name'].widget.attrs['readonly'] = True
self.fields['type'].widget.attrs['readonly'] = True
def get_mode(self,data):
if data and "_change_type" in data:
return (DataSourceForm.CHANGE_TYPE,"change_type",True,False,('name','type'))
return super(DataSourceForm,self).get_mode(data)
def change_type(self):
if self.instance.type == DatasourceType.DATABASE:
self.data['sql'] = "CREATE SERVER {{self.name}} FOREIGN DATA WRAPPER oracle_fdw OPTIONS (dbserver '//<hostname>/<sid>');"
else:
self.data['sql'] = ""
class Meta:
model = DataSource
fields = "__all__"
widgets = {
'type': BorgSelect(attrs={"onChange":"django.jQuery('#datasource_form').append(\"<input type='hidden' name='_change_type' value=''>\");django.jQuery('#datasource_form').submit()"}),
'description': forms.TextInput(attrs={"style":"width:95%"})
}
class InputForm(BorgModelForm):
"""
A form for Input Model
"""
INSERT_FIELDS = 100
CHANGE_DATA_SOURCE = 101
CHANGE_FOREIGN_TABLE = 102
foreign_table = CachedModelChoiceField(queryset=ForeignTable.objects.all(),label_func=lambda table:table.name,required=False,choice_family="foreigntable",choice_name="foreigntable_options",
widget=BorgSelect(attrs={"onChange":"$('#input_form').append(\"<input type='hidden' name='_change_foreign_table' value=''>\"); $('#input_form').submit()"}))
def __init__(self, *args, **kwargs):
super(InputForm, self).__init__(*args, **kwargs)
#remote the "+" icon from html page because this will trigger onchange event and cause recusive submit html form to server
self.fields['data_source'].widget = self.fields['data_source'].widget.widget
if 'instance' in kwargs and kwargs['instance'] and kwargs['instance'].pk:
self.fields['name'].widget.attrs['readonly'] = True
self.fields['data_source'].widget.attrs['readonly'] = True
self.fields['foreign_table'].widget.attrs['readonly'] = True
def get_mode(self,data):
if data and "_insert_fields" in data:
return (InputForm.INSERT_FIELDS,"insert_fields",True,False,None)
elif data and "_change_data_source" in data:
return (InputForm.CHANGE_DATA_SOURCE,"change_data_source",True,False,('name','data_source'))
elif data and "_change_foreign_table" in data:
return (InputForm.CHANGE_DATA_SOURCE,"change_foreign_table",True,False,('name','data_source','foreign_table'))
return super(InputForm,self).get_mode(data)
def insert_fields(self):
self.data['source'] = self.instance.source
self.fields['foreign_table'].queryset = ForeignTable.objects.filter(server=self.instance.data_source)
self.fields['foreign_table'].choice_name = "foreigntable_options_{}".format(self.instance.data_source.name)
self.fields['foreign_table'].widget.choices = self.fields['foreign_table'].choices
def change_data_source(self):
if not hasattr(self.instance,"data_source"):
self.data['source'] = ""
elif self.instance.data_source.type == DatasourceType.FILE_SYSTEM:
self.data['source'] = self.instance.data_source.vrt
elif self.instance.data_source.type == DatasourceType.DATABASE:
self.fields['foreign_table'].queryset = ForeignTable.objects.filter(server=self.instance.data_source)
self.fields['foreign_table'].choice_name = "foreigntable_options_{}".format(self.instance.data_source.name)
self.fields['foreign_table'].widget.choices = self.fields['foreign_table'].choices
self.data['source'] = ""
else:
self.data['source'] = ""
def change_foreign_table(self):
self.data['source'] = str(Template(self.instance.data_source.vrt).render(Context({'self':self.instance,'db':Input.DB_TEMPLATE_CONTEXT})))
self.fields['foreign_table'].queryset = ForeignTable.objects.filter(server=self.instance.data_source)
self.fields['foreign_table'].choice_name = "foreigntable_options_{}".format(self.instance.data_source.name)
self.fields['foreign_table'].widget.choices = self.fields['foreign_table'].choices
class Meta:
model = Input
fields = "__all__"
widgets = {
'data_source': BorgSelect(attrs={"onChange":"$('#input_form').append(\"<input type='hidden' name='_change_data_source' value=''>\"); $('#input_form').submit();"}),
}
class NormalTableForm(BorgModelForm):
"""
A form for NormalTable Model
"""
def __init__(self, *args, **kwargs):
super(NormalTableForm, self).__init__(*args, **kwargs)
if 'instance' in kwargs and kwargs['instance'] and kwargs['instance'].pk:
self.fields['name'].widget.attrs['readonly'] = True
class Meta:
model = NormalTable
fields = "__all__"
class PublishChannelForm(BorgModelForm):
"""
A form for PublishChannel Model
"""
def __init__(self, *args, **kwargs):
super(PublishChannelForm, self).__init__(*args, **kwargs)
if 'instance' in kwargs and kwargs['instance'] and kwargs['instance'].pk:
self.fields['name'].widget.attrs['readonly'] = True
class Meta:
model = PublishChannel
fields = "__all__"
class WorkspaceForm(BorgModelForm):
"""
A form for Workspace Model
"""
def __init__(self, *args, **kwargs):
super(WorkspaceForm, self).__init__(*args, **kwargs)
if 'instance' in kwargs and kwargs['instance'] and kwargs['instance'].pk:
self.fields['name'].widget.attrs['readonly'] = True
self.fields['publish_channel'].widget = self.fields['publish_channel'].widget.widget
self.fields['publish_channel'].widget.attrs['readonly'] = True
class Meta:
model = Workspace
fields = "__all__"
widgets = {
'publish_channel': BorgSelect(),
}
class NormaliseForm(BorgModelForm):
"""
A form for Normalise Model
"""
input_table = GroupedModelChoiceField('data_source',queryset=Input.objects.all(),required=True,choice_family="input",choice_name="input_options")
dependents = forms.ModelMultipleChoiceField(queryset=NormalTable.objects.all(),required=False)
output_table = forms.ModelChoiceField(queryset=NormalTable.objects.all(),required=False,widget=BorgSelect())
def __init__(self, *args, **kwargs):
kwargs['initial']=kwargs.get('initial',{})
if 'instance' in kwargs and kwargs['instance']:
try:
kwargs['initial']['output_table']=kwargs['instance'].normaltable
except ObjectDoesNotExist:
pass
dependents = []
for relation in (kwargs['instance'].relations):
if relation:
for normal_table in relation.normal_tables:
if normal_table: dependents.append(normal_table)
kwargs['initial']['dependents'] = dependents
super(NormaliseForm, self).__init__(*args, **kwargs)
if 'instance' in kwargs and kwargs['instance'] and kwargs['instance'].pk:
self.fields['name'].widget.attrs['readonly'] = True
self.fields['output_table'].widget.attrs['readonly'] = True
def _post_clean(self):
super(NormaliseForm,self)._post_clean()
if self.errors:
return
if 'output_table' in self.cleaned_data:
self.instance.normal_table = self.cleaned_data['output_table']
else:
self.instance.normal_table = None
if 'dependents' in self.cleaned_data:
sorted_dependents = self.cleaned_data['dependents'].order_by('pk')
else:
sorted_dependents = []
pos = 0
normal_table_pos = 0
relation_index = 0
length = len(sorted_dependents)
for relation in (self.instance.relations):
normal_table_pos = 0
if pos < length:
if relation is None:
relation = Normalise_NormalTable()
self.instance.set_relation(relation_index,relation)
if relation is not None:
for normal_table in relation.normal_tables:
if pos < length:
relation.set_normal_table(normal_table_pos, sorted_dependents[pos])
elif relation:
relation.set_normal_table(normal_table_pos, None)
pos += 1
normal_table_pos += 1
relation_index += 1
class Meta:
model = Normalise
fields = ('name','input_table','dependents','output_table','sql')
class PublishForm(GeoserverSettingForm,BorgModelForm):
"""
A form for normal table's Publish Model
"""
create_cache_layer = forms.BooleanField(required=False,label="create_cache_layer",initial=True)
create_cache_layer.setting_type = "geoserver_setting"
server_cache_expire = forms.IntegerField(label="server_cache_expire",min_value=0,required=False,initial=0,help_text="Expire server cache after n seconds (set to 0 to use source setting)")
server_cache_expire.setting_type = "geoserver_setting"
client_cache_expire = forms.IntegerField(label="client_cache_expire",min_value=0,required=False,initial=0,help_text="Expire client cache after n seconds (set to 0 to use source setting)")
client_cache_expire.setting_type = "geoserver_setting"
workspace = GroupedModelChoiceField('publish_channel',queryset=Workspace.objects.all(),required=True,choice_family="workspace",choice_name="workspace_choices",widget=BorgSelect())
input_table = GroupedModelChoiceField('data_source',queryset=Input.objects.all(),required=False,choice_family="input",choice_name="input_options")
dependents = forms.ModelMultipleChoiceField(queryset=NormalTable.objects.all(),required=False)
def __init__(self, *args, **kwargs):
kwargs['initial']=kwargs.get('initial',{})
self.get_setting_from_model(*args,**kwargs)
if 'instance' in kwargs and kwargs['instance']:
#populate the dependents field value from table data
dependents = []
for relation in (kwargs['instance'].relations):
if relation:
for normal_table in relation.normal_tables:
if normal_table: dependents.append(normal_table)
kwargs['initial']['dependents'] = dependents
super(PublishForm, self).__init__(*args, **kwargs)
if 'instance' in kwargs and kwargs['instance'] and kwargs['instance'].pk:
self.fields['name'].widget.attrs['readonly'] = True
self.fields['workspace'].widget.attrs['readonly'] = True
def _post_clean(self):
super(PublishForm,self)._post_clean()
if self.errors:
return
#populate the value of the relation columns
if 'dependents' in self.cleaned_data:
sorted_dependents = self.cleaned_data['dependents'].order_by('pk')
else:
sorted_dependents = []
pos = 0
normal_table_pos = 0
relation_index = 0
length = len(sorted_dependents)
for relation in (self.instance.relations):
normal_table_pos = 0
if pos < length:
if relation is None:
relation = Publish_NormalTable()
self.instance.set_relation(relation_index,relation)
if relation is not None:
for normal_table in relation.normal_tables:
if pos < length:
relation.set_normal_table(normal_table_pos, sorted_dependents[pos])
elif relation:
relation.set_normal_table(normal_table_pos, None)
pos += 1
normal_table_pos += 1
relation_index += 1
if self.instance and self.instance.is_spatial:
self.set_setting_to_model()
class Meta:
model = Publish
fields = ('name','workspace','interval','status','input_table','dependents','priority','sql','create_extra_index_sql')
|
{
"content_hash": "1bf79a86fd4ce85db3a265fae6b4b87c",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 197,
"avg_line_length": 44.2111801242236,
"alnum_prop": 0.6262995223377353,
"repo_name": "parksandwildlife/borgcollector",
"id": "4aecefe825764240c385197d5ff10c3471109fb2",
"size": "14236",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tablemanager/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9821"
},
{
"name": "JavaScript",
"bytes": "55"
},
{
"name": "Python",
"bytes": "724885"
}
],
"symlink_target": ""
}
|
import attr
@attr.s()
class UserParameters:
"""
The user parameters entity
"""
profile_name = attr.ib(convert=str, default="Default Profile")
lang = attr.ib(convert=str, default="en")
referential = attr.ib(convert=int, default=0)
expert_mode = attr.ib(convert=bool, default=False)
digits_after_comma = attr.ib(convert=int, default=2)
maximized = attr.ib(convert=bool, default=False)
notifications = attr.ib(convert=bool, default=True)
enable_proxy = attr.ib(convert=bool, default=False)
proxy_type = attr.ib(convert=int, default=0)
proxy_address = attr.ib(convert=str, default="")
proxy_port = attr.ib(convert=int, default=8080)
proxy_user = attr.ib(convert=str, default="")
proxy_password = attr.ib(convert=str, default="")
dark_theme = attr.ib(convert=bool, default=False)
def proxy(self):
if self.enable_proxy is True:
if self.proxy_user and self.proxy_password:
return "http://{:}:{:}@{:}:{:}".format(self.proxy_user,
self.proxy_password,
self.proxy_address,
self.proxy_port)
else:
return "http://{0}:{1}".format(self.proxy_address, self.proxy_port)
|
{
"content_hash": "fb12f266ed219b4ac95821fddeb1c4da",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 83,
"avg_line_length": 42.4375,
"alnum_prop": 0.569219440353461,
"repo_name": "ucoin-io/cutecoin",
"id": "9bd51205ad63604cef636833f7991f448f4ae52e",
"size": "1358",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/sakia/data/entities/user_parameters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2475"
},
{
"name": "JavaScript",
"bytes": "1594"
},
{
"name": "PowerShell",
"bytes": "3111"
},
{
"name": "Python",
"bytes": "718811"
},
{
"name": "Shell",
"bytes": "3983"
}
],
"symlink_target": ""
}
|
from django import forms
# our new form
class ContactForm(forms.Form):
contact_name = forms.CharField(required=True)
contact_email = forms.EmailField(required=True)
content = forms.CharField(
required=True,
widget=forms.Textarea
)
def __init__(self, *args, **kwargs):
super(ContactForm, self).__init__(*args, **kwargs)
self.fields['contact_name'].label = "Ваше имя:"
self.fields['contact_email'].label = "Your email:"
self.fields['content'].label = "Что Вас интересует?"
|
{
"content_hash": "79c2af29a373be4cfb096a9f59bcc5bd",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 60,
"avg_line_length": 30.333333333333332,
"alnum_prop": 0.6336996336996337,
"repo_name": "skylifewww/pharmacognosy",
"id": "d4ad91322c5c9a78b0e21975f44a16e59af7d219",
"size": "595",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pharmacognosy/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "26540"
},
{
"name": "HTML",
"bytes": "101464"
},
{
"name": "JavaScript",
"bytes": "71414"
},
{
"name": "Makefile",
"bytes": "1487"
},
{
"name": "Nginx",
"bytes": "661"
},
{
"name": "Python",
"bytes": "92569"
}
],
"symlink_target": ""
}
|
""" Command Line Interface Module """
import optparse
import sys
import os
import requests
import json
import yaml
import getpass
import curses
import time
import signal
from outbit.parser import yacc
session = requests.Session()
sig_bg_pressed = 0
sig_kill_pressed = 0
# UNICODE
import locale
locale.setlocale(locale.LC_ALL, '')
# catch ctrl-z
def sig_background(signum, frame):
global sig_bg_pressed
sig_bg_pressed = 1
# catch ctrl-c
def sig_kill(signum, frame):
global sig_kill_pressed
sig_kill_pressed = 1
# signal for ctrl-z
signal.signal(signal.SIGTSTP, sig_background)
# signal for ctrl-c
signal.signal(signal.SIGINT, sig_kill)
class Cli(object):
""" outbit CLI """
def __init__(self):
""" Setup Arguments and Options for CLI """
# Parse CLI Arguments
parser = optparse.OptionParser()
parser.add_option("-u", "--user", dest="user",
help="outbit username",
metavar="USER",
default=None)
parser.add_option("-s", "--server", dest="server",
help="IP address or hostname of outbit-api server",
metavar="SERVER",
default=None)
parser.add_option("-p", "--port", dest="port",
help="tcp port of outbit-api server",
metavar="PORT",
default=None)
parser.add_option("-t", "--insecure", dest="is_secure",
help="Do Not Use SSL",
metavar="SECURE",
action="store_false",
default=True)
parser.add_option("-k", "--no-check-certificates", dest="is_ssl_verify",
help="Ignore Unverified Certificate",
metavar="VERIFY",
action="store_false",
default=True)
# Assign values from cli
(options, args) = parser.parse_args()
self.user = options.user
self.server = options.server
self.port = options.port
self.is_secure = options.is_secure
self.is_ssl_verify = options.is_ssl_verify
self.interactive_mode = True
self.noninteractive_commands = []
self.password = None
self.app_running = True
# Do Not Display SSL Verify Warning to stderr
requests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecureRequestWarning)
# Non-Interactive Command Parsing
if len(args) > 0:
self.interactive_mode = False
for command in args:
self.noninteractive_commands.append(command)
# Assign values from conf
outbit_config_locations = [os.path.expanduser("~")+"/.outbit.conf", "/etc/outbit.conf"]
outbit_conf_obj = {}
for outbit_conf in outbit_config_locations:
if os.path.isfile(outbit_conf):
with open(outbit_conf, 'r') as stream:
try:
outbit_conf_obj = yaml.load(stream)
except yaml.YAMLError as excep:
print("%s\n" % excep)
if self.user is None and "user" in outbit_conf_obj:
self.user = str(outbit_conf_obj["user"])
if self.password is None and "password" in outbit_conf_obj:
self.password = str(outbit_conf_obj["password"])
if self.server is None and "server" in outbit_conf_obj:
self.server = str(outbit_conf_obj["server"])
if self.port is None and "port" in outbit_conf_obj:
self.port = int(outbit_conf_obj["port"])
if self.is_secure == True and "secure" in outbit_conf_obj:
self.is_secure = bool(outbit_conf_obj["secure"])
if self.is_ssl_verify == True and "ssl_verify" in outbit_conf_obj:
self.is_ssl_verify = bool(outbit_conf_obj["ssl_verify"])
# Assign Default values if they were not specified at the cli or in the conf
if self.user is None:
self.user = "superadmin"
if self.server is None:
self.server = "127.0.0.1"
if self.port is None:
self.port = 8088
self.url = "%s://%s:%d" % ("https" if self.is_secure else "http", str(self.server), int(self.port))
self.screen = None
self.history = []
def welcome(self):
""" Welcome Message """
self.screen.addstr("======================\n")
self.screen.addstr("Welcome To outbit\n")
if "pong" in self.action_ping():
self.screen.addstr("Connected to Server %s\n" % self.url)
else:
print("Failed connecting to server %s\n" % self.url)
self.exit(1)
self.screen.addstr("======================\n")
def login_prompt(self):
auth_success = False
default_username = "superadmin"
default_pw = "superadmin"
if self.user is None:
self.user = raw_input("Username: ")
for trycount in [1, 2, 3]:
if self.password is None:
self.password = getpass.getpass()
if "pong" in self.action_ping():
auth_success = True
if self.user == default_username and self.password == default_pw:
for change_trycount in [1, 2, 3]:
print("Changing Password From Default")
new_password = getpass.getpass("Enter New Password: ")
new_password_repeat = getpass.getpass("Enter New Password Again: ")
if new_password == new_password_repeat:
if self.action_changepw(self.user, new_password) is not "":
self.password = new_password
break
break
else:
self.password = None
if auth_success == False:
print("Login Failed\n")
self.exit(1)
def exit(self, val):
sys.exit(val)
self.app_running = False # For unit testing
def action_changepw(self, username, password):
data = self.run_action(self.get_action_from_command("users edit username='%s' password='%s'"
% (username, password)))
if data is not None:
return data["response"]
else:
return ""
def action_ping(self):
data = self.run_action(self.get_action_from_command("ping"))
if data is not None:
return data["response"]
else:
return ""
def is_action_quit(self, action):
return len(action) == 1 and ( action[0] == "quit" or action[0] == "exit" )
def action_quit(self):
self.screen.addstr(" Goodbye!\n")
self.exit(0)
def run_action(self, actionjson):
r = session.post(self.url, verify=self.is_ssl_verify, headers={'Content-Type': 'application/json'},
auth=(self.user, self.password), data=json.dumps(actionjson))
if r.status_code == requests.codes.ok:
return json.loads(r.text)
else:
return None
def get_action_from_command(self, line):
if line is not None and len(line) > 0:
# Reset Parser Variables
yacc.parser_category = None
yacc.parser_action = None
yacc.parser_options = None
yacc.parser_error = None
# Parse line input
yacc.parser.parse(line)
# Return Action Object
return {'category': yacc.parser_category, "action": yacc.parser_action, "options": yacc.parser_options}
else:
return {'category': None, "action": None, "options": None}
def startshell(self, arg):
self.screen = curses.initscr()
self.welcome()
curses.curs_set(1)
self.screen.addstr("outbit> ")
self.screen.keypad(1)
self.screen.scrollok(1)
# left and right key
cursor_offset = 0
# ctrl-u
history_index = 0
# ctrl-r
search_mode = False
last_match = None
line = ""
while self.app_running:
s = self.screen.getch()
# Ascii
if s >= 32 and s <= 126:
if cursor_offset >= 0:
# cursor at end of line
line += chr(s)
self.screen.addstr(chr(s))
elif cursor_offset <= len(line)*-1:
# cursor at beginning of line
line = chr(s) + line
self.screen.insstr(chr(s))
(y, x) = self.screen.getyx()
self.screen.move(y, x+1)
else:
# cursor in the middle of the line
line = line[:len(line)+cursor_offset] + chr(s) + line[len(line)+cursor_offset:]
self.screen.insstr(chr(s))
(y, x) = self.screen.getyx()
self.screen.move(y, x+1)
if search_mode:
match = None
for item in reversed(self.history):
if line in item:
match = item
break
if match is None:
self.screen.addstr(y, 0, "(reverse-i-search)`':")
self.screen.addstr(y, len("(reverse-i-search)`':"), line)
self.screen.clrtoeol()
else:
(y, x) = self.screen.getyx()
self.screen.addstr(y, 0, "(reverse-i-search)`':")
self.screen.addstr(y, len("(reverse-i-search)`':"), match)
self.screen.clrtoeol()
last_match = match
history_index = 0
# Finished With Line Input
elif s == ord("\n"):
(y, x) = self.screen.getyx()
self.screen.move(y, len("outbit> ")+len(line))
self.screen.addstr("\n")
if search_mode:
if match is not None:
result = self.shell_parse_line(match)
self.screen.addstr(result)
else:
result = self.shell_parse_line(line)
if result is not None:
self.screen.addstr(result.encode("UTF-8"))
self.screen.addstr("\noutbit> ")
line = ""
history_index = 0
cursor_offset = 0
search_mode = False
# Backspace
elif s == curses.KEY_BACKSPACE or s == 127 or s == curses.erasechar():
(y, x) = self.screen.getyx()
if len(line) > 0 and x > len("outbit> "):
line = line[:len(line)+cursor_offset-1] + line[len(line)+cursor_offset:]
self.screen.delch(y, x-1)
history_index = 0
# Ctrl-u, clear line
elif s == 21:
(y, x) = self.screen.getyx()
self.screen.addstr(y, 0, "outbit> ")
self.screen.clrtoeol()
line = ""
history_index = 0
# Ctrl-r, search
elif s == 18:
search_mode = True
(y, x) = self.screen.getyx()
self.screen.addstr(y, 0, "(reverse-i-search)`':")
self.screen.clrtoeol()
line = ""
history_index = 0
elif s == curses.KEY_UP:
if len(self.history) < 1:
# prevent divide by zero when history is 0
continue
history_index += 1
cursor_offset = 0
(y, x) = self.screen.getyx()
self.screen.addstr(y, 0, "outbit> ")
self.screen.addstr(y, len("outbit> "), self.history[-(history_index%len(self.history))])
self.screen.clrtoeol()
line = self.history[-(history_index%len(self.history))]
elif s == curses.KEY_DOWN:
if len(self.history) < 1:
# prevent divide by zero when history is 0
continue
history_index -= 1
cursor_offset = 0
(y, x) = self.screen.getyx()
self.screen.addstr(y, 0, "outbit> ")
self.screen.addstr(y, len("outbit> "), self.history[-(history_index%len(self.history))])
self.screen.clrtoeol()
line = self.history[-(history_index%len(self.history))]
elif s == curses.KEY_LEFT:
if cursor_offset > len(line)*-1:
cursor_offset -= 1
(y, x) = self.screen.getyx()
self.screen.move(y, x-1)
elif s == curses.KEY_RIGHT:
if cursor_offset < 0:
cursor_offset += 1
(y, x) = self.screen.getyx()
self.screen.move(y, x+1)
else:
#self.screen.ddstr("Out of range: %d" % s)
history_index = 0
cursor_offset = 0
curses.endwin()
def blocking_get_response_queued_job(self, queue_id):
global sig_bg_pressed
global sig_kill_pressed
sig_bg_pressed = 0 # Reset ctrl-z state
sig_kill_pressed = 0 # reset ctrl-c state
data = {"response": " "}
last_response = ""
self.screen.addstr("\nJob is running with id=%s. Press ctrl-z to background job.\n" % str(queue_id))
self.screen.refresh()
while sig_bg_pressed == 0:
if sig_kill_pressed == 1:
# Kill job
data = self.run_action(self.get_action_from_command("jobs kill id=%s" % str(queue_id)))
self.screen.addstr(data["response"])
self.screen.refresh()
break
else:
data = self.run_action(self.get_action_from_command("jobs status id=%s" % str(queue_id)))
if data is None or "finished" not in data or data["finished"] == True:
# Its finished, print the last update
updatestr = data["response"].replace(last_response, "")
self.screen.addstr(updatestr)
self.screen.refresh()
return "" # prints no update since everything was already printed to the screen
if "response" in data and "exit_code" in data and data["exit_code"] != 0:
# Error happend
return data["response"] # prints error string
updatestr = data["response"].replace(last_response, "")
self.screen.addstr(updatestr)
self.screen.refresh()
last_response = data["response"]
time.sleep(5)
return "" # no update
def shell_parse_line(self, line):
line = line.strip()
# Return nothing for an empty line
if len(line) <= 0:
return("")
action = line.split()
if self.is_action_quit(action):
# outbit> quit
# outbit> exit
self.action_quit()
else:
# Server Side Handles Command Response
# outbit> [category ..] action [option1=something ..]
if line is not None and len(line) > 0:
self.history.append(line)
actionjson = self.get_action_from_command(line)
if yacc.parser_error is None:
data = self.run_action(actionjson)
else:
data = {"response": yacc.parser_error}
if data is not None:
if "response" in data:
return data["response"]
elif "queue_id" in data:
return self.blocking_get_response_queued_job(data["queue_id"])
else:
return("outbit - Invalid Response From server\n")
else:
return("outbit - Failed To Get Response From Server\n")
def run(self):
""" EntryPoint Of Application """
self.login_prompt()
if self.interactive_mode:
curses.wrapper(self.startshell)
else:
for command in self.noninteractive_commands:
print(self.shell_parse_line(command))
|
{
"content_hash": "de4b4acdaec2ae57c1a0f8b27422470f",
"timestamp": "",
"source": "github",
"line_count": 428,
"max_line_length": 115,
"avg_line_length": 38.82943925233645,
"alnum_prop": 0.50033094650701,
"repo_name": "starboarder2001/outbit",
"id": "8cd024a5c7e4cdfb0b65dbaed4f063b284638294",
"size": "16619",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "lib/outbit/cli/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1682"
},
{
"name": "HTML",
"bytes": "3411"
},
{
"name": "JavaScript",
"bytes": "6974"
},
{
"name": "Python",
"bytes": "136498"
},
{
"name": "Shell",
"bytes": "380"
}
],
"symlink_target": ""
}
|
'''This is the main zarkov event server. It uses gevent for a fast event loop in
a single thread. It should typically be invoked with the zarkov-server script.'''
import sys
import time
import logging
from datetime import datetime, timedelta
import bson
import gevent
from gevent_zeromq import zmq
from zarkov import model
from zarkov import util
log = logging.getLogger(__name__)
class Server(object):
def __init__(self, context, options, j):
self.context = context
self._options = options
self._j = j
self.handlers = {
'event_noval':self._handle_event_noval, # log an event with no validation
None:self._handle_event} # log an event with validation
if options.publish_bind_address:
self._pub_sock = context.socket(zmq.PUB)
self._pub_sock.bind(options.publish_bind_address)
else:
self._pub_sock = None
def serve_forever(self):
q = gevent.queue.Queue(2**10)
def bson_server():
# Wait on messages
s_bson = self.context.socket(zmq.PULL)
s_bson.bind(self._options.bson_bind_address)
while True:
q.put(s_bson.recv())
def json_server():
# Wait on messages
s_json = self.context.socket(zmq.PULL)
s_json.bind(self._options.json_bind_address)
while True:
msg = s_json.recv()
q.put(util.bson_from_json(msg))
if self._options.bson_bind_address:
gevent.spawn(bson_server)
if self._options.json_bind_address:
gevent.spawn(json_server)
if self._options.incremental:
gevent.spawn(self._g_aggregate)
def resource_print():
import resource
while True:
rc = resource.getrusage(resource.RUSAGE_SELF)
rs = resource.getrusage(resource.RUSAGE_CHILDREN)
log.info('Server rss %s', rc.ru_maxrss + rs.ru_maxrss)
gevent.sleep(10)
gevent.spawn(resource_print)
log.info('Starting main server on %s / %s',
self._options.bson_bind_address, self._options.json_bind_address)
while True:
try:
msg = q.get()
obj = bson.BSON(msg).decode()
command = obj.pop('$command', None)
command_handler = self.handlers.get(command, None)
if command_handler is None:
log.warning('Unknown command %r', command)
else:
command_handler(obj)
except KeyboardInterrupt:
raise
except:
log.exception('Error in message loop')
def _handle_event(self, obj):
'''Validate event, save to journal (and mongo)'''
ev = model.event(obj).make(obj)
self._j(ev)
def _handle_event_noval(self, obj):
'''Save to journal (and mongo)'''
self._j(obj)
def _g_aggregate(self):
'''Execute all defined AggDef aggregations.'''
zmr = self.context.socket(zmq.REQ)
zmr.connect(self._options.zmr['req_uri'])
sess =model.orm_session
next_agg = datetime.utcnow()
while True:
try:
now = datetime.utcnow()
if now < next_agg:
gevent.sleep((next_agg-now).seconds)
next_agg = datetime.utcnow() + timedelta(seconds=5)
aggs = model.AggDef.query.find(dict(realtime=True)).all()
if not aggs: continue
begin = time.time()
total_new = 0
for ad in aggs:
new_docs = ad.incremental(zmr, self._options.zmr['event_limit'])
if new_docs and self._pub_sock:
self._pub_sock.send(bson.BSON.encode({
'$command':'agg_complete',
'name': ad.name,
'docs':new_docs}))
total_new += new_docs
sess.flush()
sess.close()
if total_new:
log.info('%d aggregations complete in %.2fs', len(aggs),
time.time() - begin)
except:
log.exception('Error in aggregate')
def show_aggs():
aggs = model.AggDef.query.find().all()
for agg in aggs:
log.info('Agg %s:', agg.name)
for d in agg.collection.find():
try:
log.info('%s: %s', d['_id'].isoformat(' '), d['value'])
except:
log.info('%s', d)
model.orm_session.close()
|
{
"content_hash": "40f8f78ca0fbe209cd5c8772527653a2",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 85,
"avg_line_length": 35.721804511278194,
"alnum_prop": 0.5207324773731846,
"repo_name": "joeywen/zarkov",
"id": "0282e61effce69621c62a038e31bb4d42e3d3b4e",
"size": "4751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zarkov/server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import re
from sqlalchemy.ext.hybrid import hybrid_property
from indico.core.db import db
from indico.util.string import format_repr
class Location(db.Model):
__tablename__ = 'locations'
__table_args__ = (db.Index(None, 'name', unique=True, postgresql_where=db.text('NOT is_deleted')),
{'schema': 'roombooking'})
id = db.Column(
db.Integer,
primary_key=True
)
name = db.Column(
db.String,
nullable=False,
)
map_url_template = db.Column(
db.String,
nullable=False,
default=''
)
_room_name_format = db.Column(
'room_name_format',
db.String,
nullable=False,
default='%1$s/%2$s-%3$s'
)
is_deleted = db.Column(
db.Boolean,
nullable=False,
default=False,
)
#: The format used to display room names (with placeholders)
@hybrid_property
def room_name_format(self):
"""Translate Postgres' format syntax (e.g. `%1$s/%2$s-%3$s`) to Python's."""
placeholders = ['building', 'floor', 'number', 'site']
return re.sub(
r'%(\d)\$s',
lambda m: '{%s}' % placeholders[int(m.group(1)) - 1],
self._room_name_format
)
@room_name_format.expression
def room_name_format(cls):
return cls._room_name_format
@room_name_format.setter
def room_name_format(self, value):
self._room_name_format = value.format(
building='%1$s',
floor='%2$s',
number='%3$s',
site='%4$s'
)
rooms = db.relationship(
'Room',
back_populates='location',
cascade='all, delete-orphan',
primaryjoin='(Room.location_id == Location.id) & ~Room.is_deleted',
lazy=True
)
# relationship backrefs:
# - breaks (Break.own_venue)
# - contributions (Contribution.own_venue)
# - events (Event.own_venue)
# - session_blocks (SessionBlock.own_venue)
# - sessions (Session.own_venue)
def __repr__(self):
return format_repr(self, 'id', 'name', is_deleted=False)
|
{
"content_hash": "3f6e0371ea2d099cfb4bc22dc8844601",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 102,
"avg_line_length": 27.025316455696203,
"alnum_prop": 0.5569086651053864,
"repo_name": "DirkHoffmann/indico",
"id": "0370d5e57fc4128ae118bfd28c315406f7789075",
"size": "2349",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "indico/modules/rb/models/locations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "33249"
},
{
"name": "HTML",
"bytes": "1398354"
},
{
"name": "JavaScript",
"bytes": "2295843"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "5426206"
},
{
"name": "SCSS",
"bytes": "496904"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23435"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
}
|
"""SQL routines for full-text indexing."""
from ggrc import db
from ggrc.fulltext import Indexer
class SqlIndexer(Indexer):
def records_generator(self, record):
for prop, value in record.properties.items():
for subproperty, content in value.items():
if content is not None:
yield self.record_type(
key=record.key,
type=record.type,
context_id=record.context_id,
tags=record.tags,
property=prop,
subproperty=unicode(subproperty),
content=unicode(content),
)
def create_record(self, record, commit=True):
for db_record in self.records_generator(record):
db.session.add(db_record)
if commit:
db.session.commit()
def update_record(self, record, commit=True):
# remove the obsolete index entries
if record.properties:
db.session.query(self.record_type).filter(
self.record_type.key == record.key,
self.record_type.type == record.type,
self.record_type.property.in_(list(record.properties.keys())),
).delete(synchronize_session="fetch")
# add new index entries
self.create_record(record, commit=commit)
def delete_record(self, key, type, commit=True):
db.session.query(self.record_type).filter(
self.record_type.key == key,
self.record_type.type == type).delete()
if commit:
db.session.commit()
def delete_all_records(self, commit=True):
db.session.query(self.record_type).delete()
if commit:
db.session.commit()
def delete_records_by_type(self, type, commit=True):
db.session.query(self.record_type).filter(
self.record_type.type == type).delete()
if commit:
db.session.commit()
|
{
"content_hash": "02c42f5e909f1ddcf4a4fc774785d86a",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 72,
"avg_line_length": 32.25454545454546,
"alnum_prop": 0.6364148816234498,
"repo_name": "AleksNeStu/ggrc-core",
"id": "fed13c33f802ba143498b059c990463dc76ec918",
"size": "1887",
"binary": false,
"copies": "1",
"ref": "refs/heads/release/0.10-Raspberry",
"path": "src/ggrc/fulltext/sql.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "221201"
},
{
"name": "HTML",
"bytes": "1055542"
},
{
"name": "JavaScript",
"bytes": "1872353"
},
{
"name": "Makefile",
"bytes": "7044"
},
{
"name": "Mako",
"bytes": "4320"
},
{
"name": "Python",
"bytes": "2700938"
},
{
"name": "Shell",
"bytes": "31273"
}
],
"symlink_target": ""
}
|
exec("do evil")
|
{
"content_hash": "2a5eb0d5588dbf43f0dcc2b653a6df55",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 15,
"avg_line_length": 16,
"alnum_prop": 0.625,
"repo_name": "pombredanne/bandit",
"id": "17ac83a2731212f5957f0bad0d8eceebe1aa5b3a",
"size": "16",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/exec-py3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "476344"
},
{
"name": "Shell",
"bytes": "1286"
}
],
"symlink_target": ""
}
|
import random
from string import ascii_lowercase
from django.db import models
from django import forms
from staging.generators import BaseGenerator
class Generator(BaseGenerator):
name = 'Random full name'
slug = 'random-full-name'
for_fields = [models.CharField]
options_form = None
def __init__(self):
self.generated = []
self.first_names = self._get_first_names()
self.last_names = self._get_last_names()
def save(self, obj, field, form_data):
if field.unique:
setattr(obj, field.name, self._generate_unique())
else:
setattr(obj, field.name, self._generate())
def _generate(self):
return '%s %s' % (random.choice(self.first_names), random.choice(self.last_names))
def _generate_unique(self):
for _ in range(10000):
value = self._generate()
if value not in self.generated:
self.generated.append(value)
return value
def _get_first_names(self):
with open(self.rel_path('_first_names.txt'), 'r') as f:
return f.read().split()
def _get_last_names(self):
with open(self.rel_path('_last_names.txt'), 'r') as f:
return f.read().split()
|
{
"content_hash": "5ded135ddfefd8f06fb98b311e2c4b90",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 90,
"avg_line_length": 30.609756097560975,
"alnum_prop": 0.600796812749004,
"repo_name": "code-on/django-staging",
"id": "cb45d4e11f1a252c289d92afd25635ff3298489a",
"size": "1255",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "staging/generators/random_full_name.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "130"
},
{
"name": "HTML",
"bytes": "7937"
},
{
"name": "JavaScript",
"bytes": "14926"
},
{
"name": "Python",
"bytes": "45267"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.