prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
pass
<car | et><selection>n = 0
while n:
print("spam")</selecti | on>
pass |
"""
Creates an Azure serverless function.
"""
from common.methods import set_progress
from infrastructure.models import CustomField
from common.methods import generate_string_from_template
import os, json
def create_custom_fields_as_needed():
CustomField.objects.get_or_create(
name='azure_function_name', type='STR',
defaults={'label': 'Azure function name', 'description': 'Name of a deployed azure function', 'show_as_attribute': True}
)
CustomField.objects.get_or_create(
name='resource_group_name', type='STR',
defaults={'label': 'Azure Resource Group', 'de | scription': 'Used by the Azure blueprints',
'show_as_attribute': True}
)
def run(job, **kwargs):
resource = kwargs.get('resource')
function_name = '{{ function_name }}'
storage_account_name = function_name + "storageaccount"
file_location = "{{ file_location }}"
if file_location.startswith(settings.MEDIA_URL):
set_progress("Converting relative URL to filesystem path")
file_location = | file_location.replace(settings.MEDIA_URL, settings.MEDIA_ROOT)
create_custom_fields_as_needed()
#check if function name is already in use
function_name_check = "az functionapp list"
val = os.popen(function_name_check).read()
function_name_check_response = json.loads(val)
used_names = []
for function in function_name_check_response:
used_names.append(function['name'])
if function_name in used_names:
response = "{0} function name is already in use. Please use a different one.".format(function_name)
return "failure", response, ""
#create a resource group for the fucntion
resource_group_name = function_name + "-resource-group"
resource_group_create = 'az group create --name ' + resource_group_name + ' --location westeurope'
os.system(resource_group_create)
#check if storage name is already in use, create a function storage
name_check = "az storage account check-name --name {0}".format(storage_account_name)
name_check_response = json.loads(os.popen(name_check).read())
if name_check_response['nameAvailable']:
create_storage_command = "az storage account create --name {0} --location westeurope --resource-group {1} --sku Standard_LRS".format(storage_account_name, resource_group_name)
os.system(create_storage_command)
else:
return "failure", '{0}'.format(name_check_response['reason']), ""
#create the azure function
create_function_command = "az functionapp create --name " + function_name + " --storage-account " + storage_account_name + " --consumption-plan-location westeurope --resource-group " + resource_group_name
try:
create_fucntion_check = json.loads(os.popen(create_function_command).read())
except Exception as e:
return 'failure', 'the function app could not be created', '{0}'.format(e)
if create_fucntion_check['name'] == function_name:
set_progress('The function app has been succesfully created')
else:
return 'failure', 'The app could not be created', ''
resource.name = function_name
resource.resource_group_name = resource_group_name
resource.save()
fxn = "az functionapp deployment source config-zip -g {0} -n {1} --src {2}".format(resource_group_name, function_name, file_location)
json.loads(os.popen(fxn).read())
return 'success', 'The function has successfully been created.' , ''
|
#
# Copyright © 2012–2022 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the Li | cense, or
# (at your option) a | ny later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from django.apps import AppConfig
class AddonsConfig(AppConfig):
name = "weblate.addons"
label = "addons"
verbose_name = "Add-ons"
|
ult.gc_thresh1 = 1"
self.assertTrue(moduletests.src.arpcache.detect())
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output", side_effect=subprocess.CalledProcessError(
"1", "test", "/etc/sysctl.d/55-arp-gc_thresh1.conf: no such file or directory"))
def test_fix_cpe(self, check_output_mock):
with contextlib.redirect_stdout(self.output):
self.assertRaises(subprocess.CalledProcessError, moduletests.src.arpcache.fix, self.config_file_path)
self.assertTrue(self.output.getvalue().endswith(
"[UNFIXED] 'sysctl -w net.ipv4.neigh.default.gc_thresh1=0' failed for running system\n"))
self.assertTrue(check_output_mock.called)
@mock.patch("subprocess.check_output")
@mock.patch("moduletests.src.arpcache.os.path.exists", side_effect=[False])
@mock.patch("moduletests.src.arpcache.open", mock.mock_open(read_data="stuff"))
def test_fix_exists_sudo_true(self, check_output_mock, exists_mock):
check_output_mock.return_value = "True"
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.arpcache.fix(self.config_file_path))
self.assertTrue(self.output.getvalue().endswith(
"[FIXED] set net.ipv4.neigh.default.gc_thresh1=0 for running system\n"
"[FIXED] net.ipv4.neigh.default.gc_thresh1=0 in /etc/sysctl.d/55-arp-gc_thresh1.conf\n"))
self.assertTrue(check_output_mock.called)
self.assertTrue(exists_mock.called)
@mock.patch("subprocess.check_output")
@mock.patch("moduletests.src.arpcache.os.path.exists", side_effect=[True])
@mock.patch("moduletests.src.arpcache.open", mock.mock_open(read_data="net.ipv4.neigh.default.gc_thresh1 = 0\n"
"something else\n"))
def test_fix_sudo_true(self, check_output_mock, exists_mock):
check_output_mock.return_value = "True"
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.arpcache.fix(self.config_file_path))
self.assertTrue(self.output.getvalue().endswith(
"[FIXED] set net.ipv4.neigh.default.gc_thresh1=0 for running system\n"
"[FIXED] net.ipv4.neigh.default.gc_thresh1=0 in /etc/sysctl.d/55-arp-gc_thresh1.conf\n"))
self.assertTrue(check_output_mock.called)
self.assertTrue(exists_mock.called)
@mock.patch("subprocess.check_output")
@mock.patch("moduletests.src.arpcache.os.path.exists", side_effect=[True])
@mock.patch("moduletests.src.arpcache.open", mock.mock_open(read_data="net.ipv4.neigh.default.gc_thresh1 = 0\n"
"net.ipv4.neigh.default.gc_thresh1 = 0\n"))
def test_fix_sudo_true_found_twice(self, check_output_mock, exists_mock):
check_output_mock.return_value = "True"
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.arpcache.fix(self.config_file_path))
self.assertTrue(self.output.getvalue().endswith(
"[FIXED] set net.ipv4.neigh.default.gc_thresh1=0 for running system\n"
"[FIXED] net.ipv4.neigh.default.gc_thresh1=0 in /etc/sysctl.d/55-arp-gc_thresh1.conf\n"))
self.assertTrue(check_output_mock.called)
self.assertTrue(exists_mock.called)
@mock.patch("subprocess.check_output")
@mock.patch("moduletests.src.arpcache.os.path.exists", side_effect=[False])
@mock.patch("moduletests.src.arpcache.open", side_effect=IOError)
def test_fix_writefail(self, open_mock, exists_mock, check_output_mock):
check_output_mock.return_value = "True"
with contextlib.redirect_stdout(self.output):
self.assertRaises(IOError, moduletests.src.arpcache.fix, self.config_file_path)
se | lf.assertTrue(check_output_mock.called)
self.assertTrue(exists_mock.called)
self.assertTrue(open_mock.called)
self.assertTrue(self.output.getvalue().endswith(
"[UNFIXED] Failed to write config to /etc/sysctl.d/55-arp-gc_thresh1.conf\n"))
@mock.patch("moduletests | .src.arpcache.detect", return_value=False)
def test_run_success(self, detect_mock):
with contextlib.redirect_stdout(self.output):
self.assertTrue(moduletests.src.arpcache.run())
self.assertTrue(self.output.getvalue().endswith("Determining if aggressive ARP caching is enabled\n"
"[SUCCESS] Aggressive arp caching is disabled.\n"))
self.assertTrue(detect_mock.called)
@mock.patch("moduletests.src.arpcache.get_config_dict")
@mock.patch("moduletests.src.arpcache.detect", return_value=True)
def test_run_no_remediate(self, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": dict(),
"REMEDIATE": False,
"SUDO": True}
with contextlib.redirect_stdout(self.output):
moduletests.src.arpcache.run()
self.assertTrue("[UNFIXED] Remediation impossible without sudo and --remediate.\n"
"-- Running as root/sudo: True\n"
"-- Required --remediate flag specified: False\n"
"[FAILURE] Aggressive arp caching is enabled."
in self.output.getvalue())
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.arpcache.get_config_dict")
@mock.patch("moduletests.src.arpcache.detect", return_value=True)
@mock.patch("moduletests.src.arpcache.os.path.isfile", return_value=True)
@mock.patch("moduletests.src.arpcache.backup", return_value=True)
@mock.patch("moduletests.src.arpcache.fix", return_value=True)
@mock.patch("moduletests.src.arpcache.restore", return_value=True)
def test_run_failure_isfile(self, restore_mock, fix_mock, backup_mock, isfile_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": {self.config_file_path: "/some/path"},
"REMEDIATE": True,
"SUDO": True}
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.arpcache.run())
self.assertTrue("Determining if aggressive ARP caching is enabled\n"
"[FAILURE] Aggressive arp caching is enabled. "
"This can cause issues communicating with instances in the same subnet"
in self.output.getvalue())
self.assertTrue(restore_mock.called)
self.assertTrue(fix_mock.called)
self.assertTrue(backup_mock.called)
self.assertTrue(isfile_mock.called)
self.assertTrue(detect_mock.called)
self.assertTrue(config_mock.called)
@mock.patch("moduletests.src.arpcache.get_config_dict")
@mock.patch("moduletests.src.arpcache.detect", return_value=True)
@mock.patch("moduletests.src.arpcache.os.path.isfile", return_value=False)
@mock.patch("moduletests.src.arpcache.fix", return_value=True)
def test_run_failure(self, fix_mock, isfile_mock, detect_mock, config_mock):
config_mock.return_value = {"BACKUP_DIR": "/var/tmp/ec2rl",
"LOG_DIR": "/var/tmp/ec2rl",
"BACKED_FILES": dict(),
"REMEDIATE": True,
"SUDO": True}
with contextlib.redirect_stdout(self.output):
self.assertFalse(moduletests.src.arpcache.run())
self.assertTrue("Determining if aggressive ARP caching is enabled\n"
"[FAILURE] Aggressive arp caching is enabled. "
"Thi |
from .. import scalar_measures
import numpy
from numpy.testing import assert_array_almost_equal
def test_fractional_anisotropy(N=10, random=numpy.random.RandomState(0)):
tensors = random.randn(N, 3, 3)
fa = numpy.empty(N)
for i, t in enumerate(tensors):
tt = numpy.dot(t, t.T)
tensors[i] = tt
ev = numpy.linalg.eigvalsh(tt)
mn = ev.mean()
fa[i] = numpy.sqrt(1.5 * ((ev - mn) ** 2).sum() / (ev ** 2).sum())
assert_array_almost_equal(fa, scalar_measures.fractional_anisotropy(tensors))
def test_volume_fraction(N=10, random=numpy.random.RandomState(0)):
tensors = random.randn(N, 3, 3)
vf = numpy.empty(N)
for i, t in enumerate(tensors):
tt = numpy.dot(t, t.T)
tensors[i] = tt
ev = numpy.linalg.eigvalsh(tt)
mn = ev.mean()
vf[i] = 1 - ev.prod() / (mn ** 3)
assert_array_almost_equal(vf, scalar_measures.volume_fraction(tensors))
def test_tensor_determinant(N=10, random=numpy.random.RandomState(0)):
tensors = random.randn(N, 3, 3)
dt = numpy.empty(N)
for | i, t in enumerate(tensors):
tt | = numpy.dot(t, t.T)
tensors[i] = tt
dt[i] = numpy.linalg.det(tt)
assert_array_almost_equal(dt, scalar_measures.tensor_det(tensors))
def test_tensor_traces(N=10, random=numpy.random.RandomState(0)):
tensors = random.randn(N, 3, 3)
res = numpy.empty(N)
for i, t in enumerate(tensors):
tt = numpy.dot(t, t.T)
tensors[i] = tt
res[i] = numpy.trace(tt)
assert_array_almost_equal(res, scalar_measures.tensor_trace(tensors))
def test_tensor_contraction(N=10, random=numpy.random.RandomState(0)):
tensors1 = random.randn(N, 3, 3)
tensors2 = random.randn(N, 3, 3)
res = numpy.empty(N)
for i in range(N):
t1 = tensors1[i]
t2 = tensors2[i]
tt1 = numpy.dot(t1, t1.T)
tt2 = numpy.dot(t2, t2.T)
tensors1[i] = tt1
tensors2[i] = tt2
res[i] = numpy.trace(numpy.dot(tt1, tt2.T))
assert_array_almost_equal(res, scalar_measures.tensor_contraction(tensors1, tensors2))
|
self, type_):
return self.visit_SMALLINT(type_)
def visit_RAW(self, type_):
return "RAW(%(length)s)" % {'length' : type_.length}
def visit_ROWID(self, type_):
return "ROWID"
class OracleCompiler(compiler.SQLCompiler):
"""Oracle compiler modifies the lexical structure of Select
statements to work under non-ANSI configured Oracle databases, if
the use_ansi flag is False.
"""
compound_keywords = util.update_copy(
compiler.SQLCompiler.compound_keywords,
{
expression.CompoundSelect.EXCEPT : 'MINUS'
}
)
def __init__(self, *args, **kwargs):
super(OracleCompiler, self).__init__(*args, **kwargs)
self.__wheres = {}
self._quoted_bind_names = {}
def visit_mod(self, binary, **kw):
return "mod(%s, %s)" % (self.process(binary.left), self.process(binary.right))
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_char_length_func(self, fn, **kw):
return "LENGTH" + self.function_argspec(fn, **kw)
def visit_match_op(self, binary, **kw):
return "CONTAINS (%s, %s)" % (self.process(binary.left), self.process(binary.right))
def get_select_hint_text(self, byfroms):
return " ".join(
"/*+ %s */" % text for table, text in byfroms.items()
)
def function_argspec(self, fn, **kw):
if len(fn.clauses) > 0:
return compiler.SQLCompiler.function_argspec(self, fn, **kw)
else:
return ""
def default_from(self):
"""Called when a ``SELECT`` statement has no froms, and no ``FROM`` clause is to be appended.
The Oracle compiler tacks a "FROM DUAL" to the statement.
"""
return " FROM DUAL"
def visit_join(self, join, **kwargs):
if self.dialect.use_ansi:
return compiler.SQLCompiler.visit_join(self, join, **kwargs)
else:
kwargs['asfrom'] = True
return self.process(join.left, **kwargs) + \
", " + self.process(join.right, **kwargs)
def _get_nonansi_join_whereclause(self, froms):
clauses = []
def visit_join(join):
if join.isouter:
def visit_binary(binary):
if binary.operator == sql_operators.eq:
if binary.left.table is join.right:
binary.left = _OuterJoinColumn(binary.left)
elif binary.right.table is join.right:
binary.right = _OuterJoinColumn(binary.right)
clauses.append(visitors.cloned_traverse(join.onclause, {},
{'binary':visit_binary}))
else:
clauses.append(join.onclause)
for j in join.left, join.right:
if isinstance(j, expression.Join):
visit_join(j)
for f in froms:
if isinstance(f, expression.Join):
visit_join(f)
if not clauses:
return None
else:
return sql.and_(*clauses)
def visit_outer_join_column(self, vc):
return self.process(vc.column) + "(+)"
def visit_sequence(self, seq):
return self.dialect.identifier_preparer.format_sequence(seq) + ".nextval"
def visit_alias(self, alias, asfrom=False, ashint=False, **kwargs):
"""Oracle doesn't like ``FROM table AS alias``. Is the AS standard SQL??"""
if asfrom or ashint:
alias_name = isinstance(alias.name, expression._generated_label) and \
self._truncated_identifier("alias", alias.name) or alias.name
if ashint:
return alias_name
elif asfrom:
return self.process(alias.original, asfrom=asfrom, **kwargs) + \
" " + self.preparer.format_alias(alias, alias_name)
else:
return self.process(alias.original, **kwargs)
def returning_clause(self, stmt, returning_cols):
def create_out_param(col, i):
bindparam = sql.outparam("ret_%d" % i, type_=col.type)
self.binds[bindparam.key] = bindparam
return self.bindparam_string(self._truncate_bindparam(bindparam))
columnlist = list(expression._select_iterables(returning_cols))
# within_columns_clause =False so that labels (foo AS bar) don't render
columns = [self.process(c, within_columns_clause=False, result_map=self.result_map) for c in columnlist]
binds = [create_out_param(c, i) for i, c in enumerate(columnlist)]
return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds)
def _TODO_visit_compound_select(self, select):
"""Need to determine how to get ``LIMIT``/``OFFSET`` into a ``UNION`` for Oracle."""
pass
def visit_select(self, select, **kwargs):
"""Look for ``LIMIT`` and OFFSET in a select statement, and if
so tries to wrap it in a subquery with ``rownum`` criterion.
"""
if not getattr(select, '_oracle_visit', None):
if not self.dialect.use_ansi:
if self.stack and 'from' in self.stack[-1]:
existingfroms = self.stack[-1]['from']
else:
existingfroms = None
froms = select._get_display_froms(existingfroms)
whereclause = self._get_nonansi_join_whereclause(froms)
if whereclause is not None:
select = select.where(whereclause)
select._oracle_visit = True
if select._limit is not None or select._offset is not None:
# See http://www.oracle.com/technology/oramag/oracle/06-sep/o56asktom.html
#
# Generalized form of an Oracle pagination query:
# select ... from (
# select /*+ FIRST_ROWS(N) */ ...., rownum as ora_rn from (
# select distinct ... where ... order by ...
# ) where ROWNUM <= :limit+:offset
# ) where ora_rn > :offset
# Outer select and "ROWNUM as ora_rn" can be dropped if limit=0
# TODO: use annotations instead of clone + attr set ?
select = select._generate()
select._oracle_visit = True
# Wrap the | middle select and add the hint
limitselect = sql.select([c for c in select.c])
if select._limit and self.dialect.optimize_limits:
limitselect = limitselect.prefix_with("/*+ FIRST_ROWS(%d) */" % select._limit)
limitselect._oracle_visit = True
li | mitselect._is_wrapper = True
# If needed, add the limiting clause
if select._limit is not None:
max_row = select._limit
if select._offset is not None:
max_row += select._offset
if not self.dialect.use_binds_for_limits:
max_row = sql.literal_column("%d" % max_row)
limitselect.append_whereclause(
sql.literal_column("ROWNUM")<=max_row)
# If needed, add the ora_rn, and wrap again with offset.
if select._offset is None:
limitselect.for_update = select.for_update
select = limitselect
else:
limitselect = limitselect.column(
sql.literal_column("ROWNUM").label("ora_rn"))
limitselect._oracle_visit = True
limitselect._is_wrapper = True
offsetselect = sql.select(
[c for c in limitselect.c if c.key!='ora_rn'])
offsetselect._oracle_visit = True
offsetselect._is_wrapper = True
offset_value = select._offset
if not self.dialect.use_binds_for_limits:
offset_value = sql.literal_column("%d" |
import logging
import traceback
import numpy as np
from eemeter.structures import EnergyTrace
logger = logging.getLogger(__name__)
class SplitModeledEnergyTrace(object):
''' Light wrapper around models applicable to a single trace which
fits and predicts multiple models for different segments.
Parameters
----------
trace : eemeter.structures.EnergyTrace
Trace to be modeled.
formatter : eemeter.modeling.formatter.Formatter
Formatter to prep trace data for modeling.
model_mapping : dict
Items of this dictionary map `modeling_period_label` s to models
modeling_period_set : eemeter.structures.ModelingPeriodSet
The set of modeling periods over which models should be applicable.
'''
def __init__(self, trace, formatter, model_mapping, modeling_period_set):
self.trace = trace
self.formatter = formatter
self.model_mapping = model_mapping
self.modeling_period_set = modeling_period_set
self.fit_outputs = {}
def __repr__(self):
return (
"SplitModeledEnergyTrace(trace={}, formatter={},"
" model_mapping={}, modeling_period_set={})"
.format(self.trace, self.formatter, self.model_mapping,
self.modeling_period_set)
)
def fit(self, weather_source):
''' Fit all models associated with this trace.
Parameters
----------
weather_source : eemeter.weather.ISDWeatherSource
Weather source to use in creating covariate data.
'''
for modeling_period_label, modeling_period in \
self.modeling_period_set.iter_modeling_periods():
filtered_data = self._filter_by_modeling_period(
self.trace, modeling_period)
filtered_trace = EnergyTrace(
self.trace.interpretation, data=filtered_data,
unit=self.trace.unit)
model = self.model_mapping[modeling_period_label]
try:
input_data = self.formatter.create_input(
filtered_trace, weather_source)
except:
logger.warn(
'For trace "{}" and modeling_period "{}", was not'
' able to format input data for {}.'
.format(self.trace.interpretation, modeling_period_label,
model)
)
self.fit | _outputs[modeling_period_label] = {
"status": "FAILURE",
"traceback": traceback.format_exc(),
"start_date": None,
"end_date": None,
"rows": No | ne,
}
continue
else:
input_description = self.formatter.describe_input(input_data)
outputs = {
"start_date": input_description.get('start_date'),
"end_date": input_description.get('end_date'),
"n_rows": input_description.get('n_rows'),
}
try:
outputs.update(model.fit(input_data))
except:
logger.warn(
'For trace "{}" and modeling_period "{}", {} was not'
' able to fit using input data: {}'
.format(self.trace.interpretation, modeling_period_label,
model, input_data)
)
outputs.update({
"status": "FAILURE",
"traceback": traceback.format_exc(),
})
else:
logger.info(
'Successfully fitted {} to formatted input data for'
' trace "{}" and modeling_period "{}".'
.format(model, self.trace.interpretation,
modeling_period_label)
)
outputs.update({"status": "SUCCESS"})
self.fit_outputs[modeling_period_label] = outputs
return self.fit_outputs
def predict(self, modeling_period_label, demand_fixture_data,
params=None):
''' Predict for any one of the modeling_periods associated with this
trace. Light wrapper around :code:`model.predict(` method.
Parameters
----------
modeling_period_label : str
Modeling period indicating which model to use in making the
prediction.
demand_fixture_data : object
Data (formatted by :code:`self.formatter`) over which prediction
should be made.
params : object, default None
Fitted parameters for the model. If :code:`None`, use parameters
found when :code:`.fit(` method was called.
'''
outputs = self.fit_outputs[modeling_period_label]
if outputs["status"] == "FAILURE":
logger.warn(
'Skipping prediction for modeling_period "{}" because'
' model fit failed.'.format(modeling_period_label)
)
return None
if params is None:
params = outputs["model_params"]
return self.model_mapping[modeling_period_label].predict(
demand_fixture_data, params)
def compute_derivative(self, modeling_period_label, derivative_callable,
**kwargs):
''' Compute a modeling derivative for this modeling period.
Parameters
----------
modeling_period_label : str
Label for modeling period for which derivative should be computed.
derivative_callable : callable
Callable which can be used as follows:
.. code-block: python
>>> derivative_callable(formatter, model, **kwargs)
**kwargs
Arbitrary keyword arguments to be passed to the derviative callable
'''
outputs = self.fit_outputs[modeling_period_label]
if outputs["status"] == "FAILURE":
return None
model = self.model_mapping[modeling_period_label]
try:
derivative = derivative_callable(self.formatter, model, **kwargs)
except Exception:
logger.exception("Derivative computation failed.")
return None
return derivative
@staticmethod
def _filter_by_modeling_period(trace, modeling_period):
start = modeling_period.start_date
end = modeling_period.end_date
if start is None:
if end is None:
filtered_df = trace.data.copy()
else:
filtered_df = trace.data[:end].copy()
else:
if end is None:
filtered_df = trace.data[start:].copy()
else:
filtered_df = trace.data[start:end].copy()
# require NaN last data point as cap
if filtered_df.shape[0] > 0:
filtered_df.value.iloc[-1] = np.nan
filtered_df.estimated.iloc[-1] = False
return filtered_df
|
"""For seeding individual ops based on a graph-level seed.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
_DEFAULT_GRAPH_SEED = 87654321
def get_seed(op_seed):
"""Returns the local seeds an operation should use given an op-specific seed.
Given operation-specific seed, `op_seed`, this helper function returns two
seeds derived from graph-level and op-level seeds. Many random operations
internally use the two seeds to allow user to change the seed globally for a
graph, or for only specific operations.
For details on how the graph-level seed interacts with op seeds, see
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed).
Args:
op_seed: integer.
Returns:
A tuple of two integers that should be used for the local seed of this
operation.
"""
graph_seed = ops.get_default_graph().seed
if graph_seed is not None:
if op_seed is not None:
return graph_seed, op_seed
else:
return graph_seed, ops.get_default_graph()._last_id
else:
if op_seed is not None:
return _DEFAULT_GRAPH_SEED, op_seed
else:
return None, None
def set_random_seed(seed):
"""Sets the graph-level random | seed.
Operations that rely on a random seed actually derive it from two seeds:
the graph-level and operation-level seeds. This sets the graph-level seed.
Its interactions with operation-level seeds is as follows:
1. If neither the graph-level nor the operation seed is set:
A ra | ndom seed is used for this op.
2. If the graph-level seed is set, but the operation seed is not:
The system deterministically picks an operation seed in conjunction
with the graph-level seed so that it gets a unique random sequence.
3. If the graph-level seed is not set, but the operation seed is set:
A default graph-level seed and the specified operation seed are used to
determine the random sequence.
4. If both the graph-level and the operation seed are set:
Both seeds are used in conjunction to determine the random sequence.
To illustrate the user-visible effects, consider these examples:
To generate different sequences across sessions, set neither
graph-level nor op-level seeds:
```python
a = tf.random_uniform([1])
b = tf.random_normal([1])
print "Session 1"
with tf.Session() as sess1:
print sess1.run(a) # generates 'A1'
print sess1.run(a) # generates 'A2'
print sess1.run(b) # generates 'B1'
print sess1.run(b) # generates 'B2'
print "Session 2"
with tf.Session() as sess2:
print sess2.run(a) # generates 'A3'
print sess2.run(a) # generates 'A4'
print sess2.run(b) # generates 'B3'
print sess2.run(b) # generates 'B4'
```
To generate the same repeatable sequence for an op across sessions, set the
seed for the op:
```python
a = tf.random_uniform([1], seed=1)
b = tf.random_normal([1])
# Repeatedly running this block with the same graph will generate the same
# sequence of values for 'a', but different sequences of values for 'b'.
print "Session 1"
with tf.Session() as sess1:
print sess1.run(a) # generates 'A1'
print sess1.run(a) # generates 'A2'
print sess1.run(b) # generates 'B1'
print sess1.run(b) # generates 'B2'
print "Session 2"
with tf.Session() as sess2:
print sess2.run(a) # generates 'A1'
print sess2.run(a) # generates 'A2'
print sess2.run(b) # generates 'B3'
print sess2.run(b) # generates 'B4'
```
To make the random sequences generated by all ops be repeatable across
sessions, set a graph-level seed:
```python
tf.set_random_seed(1234)
a = tf.random_uniform([1])
b = tf.random_normal([1])
# Repeatedly running this block with the same graph will generate different
# sequences of 'a' and 'b'.
print "Session 1"
with tf.Session() as sess1:
print sess1.run(a) # generates 'A1'
print sess1.run(a) # generates 'A2'
print sess1.run(b) # generates 'B1'
print sess1.run(b) # generates 'B2'
print "Session 2"
with tf.Session() as sess2:
print sess2.run(a) # generates 'A1'
print sess2.run(a) # generates 'A2'
print sess2.run(b) # generates 'B1'
print sess2.run(b) # generates 'B2'
```
Args:
seed: integer.
"""
ops.get_default_graph().seed = seed
|
de | f address(self, data):
self.irc.send(self.privmsg("512 Shaw Court #105, Severn, MD 21144 | "))
|
from django.db.models.fields import related
def _is_many_to_many_relation(field):
"""Check if a field specified a many-to-many relationship as defined by django.
This is the case if the field is an instance of the ManyToManyDescriptor as generated
by the django framework
Args:
field (django.db.models.fields): The field to check
Returns:
bool: true if the field is a many-to-many relationship
"""
return isinstance(field, related.ManyToManyDescriptor)
def _is_one_to_one_relation(field):
"""Check if a field specified a one-to-one relationship as defined by django.
This is the case if the field is an instance of the ForwardManyToOne as generated
by the django framework
Args:
field (django.db.models.fields): The field to check
Returns:
bool: true if the field is a one-to-one relationship
"""
return isinstance(field, related.ForwardManyToOneDescriptor)
def _get_prefetchable_fields(serializer):
"""Get the fields that are prefetchable according to the serializer description.
Method mainly used by for automatic schema generation.
Args:
serializer (Serializer): [description]
"""
def _is_field_prefetchable(field):
return _is_one_to_one_relation(field) or _is_many_to_many_relation(field)
meta = getattr(serializer, "Meta", None)
if meta is None:
return []
model | = getattr(meta, "model", None)
if model is None:
return []
fields = []
for field_name in dir(model):
field = getattr(mo | del, field_name)
if _is_field_prefetchable(field):
# ManyToMany relationship can be reverse
if hasattr(field, 'reverse') and field.reverse:
fields.append((field_name, field.field.model))
else:
fields.append((field_name, field.field.related_model))
return fields
|
# context.predefined_names is filled, we stop.
# We don't want to check the if stmt itself, it's just about
# the content.
if element.start_pos > if_stmt_test.end_pos:
# Now we need to check if the names in the if_stmt match the
# names in the suite.
if_names = helpers.get_names_of_node(if_stmt_test)
element_names = helpers.get_names_of_node(element)
str_element_names = [str(e) for e in element_names]
if any(str(i) in str_element_names for i in if_names):
for if_name in if_names:
definitions = self.goto_definitions(context, if_name)
# Every name that has multiple different definitions
# causes the complexity to rise. The complexity should
# never fall below 1.
if len(definitions) > 1:
if len(name_dicts) * len(definitions) > 16:
debug.dbg('Too many options for if branch evaluation %s.', if_stmt)
# There's only a certain amount of branches
# Jedi can evaluate, otherwise it will take to
# long.
name_dicts = [{}]
break
original_name_dicts = list(name_dicts)
name_dicts = []
for definition in definitions:
new_name_dicts = list(original_name_dicts)
for i, name_dict in enumerate(new_name_dicts):
new_name_dicts[i] = name_dict.copy()
new_name_dicts[i][str(if_name)] = set([definition])
name_dicts += new_name_dicts
else:
for name_dict in name_dicts:
name_dict[str(if_name)] = definitions
if len(name_dicts) > 1:
result = set()
for name_dict in name_dicts:
with helpers.predefine_names(context, if_stmt, name_dict):
result |= self._eval_element_not_cached(context, element)
return result
else:
| return self._eval_element_if_evaluated(context, element)
else:
if predefined_if_name_dict:
return self._eval_element_not_cached(context, element)
else:
return self._eval_element_if_evaluated(context, e | lement)
def _eval_element_if_evaluated(self, context, element):
"""
TODO This function is temporary: Merge with eval_element.
"""
parent = element
while parent is not None:
parent = parent.parent
predefined_if_name_dict = context.predefined_names.get(parent)
if predefined_if_name_dict is not None:
return self._eval_element_not_cached(context, element)
return self._eval_element_cached(context, element)
@memoize_default(default=set(), evaluator_is_first_arg=True)
def _eval_element_cached(self, context, element):
return self._eval_element_not_cached(context, element)
@debug.increase_indent
def _eval_element_not_cached(self, context, element):
debug.dbg('eval_element %s@%s', element, element.start_pos)
types = set()
typ = element.type
if typ in ('name', 'number', 'string', 'atom'):
types = self.eval_atom(context, element)
elif typ == 'keyword':
# For False/True/None
if element.value in ('False', 'True', 'None'):
types.add(compiled.builtin_from_name(self, element.value))
# else: print e.g. could be evaluated like this in Python 2.7
elif typ == 'lambda':
types = set([er.FunctionContext(self, context, element)])
elif typ == 'expr_stmt':
types = self.eval_statement(context, element)
elif typ in ('power', 'atom_expr'):
first_child = element.children[0]
if not (first_child.type == 'keyword' and first_child.value == 'await'):
types = self.eval_atom(context, first_child)
for trailer in element.children[1:]:
if trailer == '**': # has a power operation.
right = self.eval_element(context, element.children[2])
types = set(precedence.calculate(self, context, types, trailer, right))
break
types = self.eval_trailer(context, types, trailer)
elif typ in ('testlist_star_expr', 'testlist',):
# The implicit tuple in statements.
types = set([iterable.SequenceLiteralContext(self, context, element)])
elif typ in ('not_test', 'factor'):
types = self.eval_element(context, element.children[-1])
for operator in element.children[:-1]:
types = set(precedence.factor_calculate(self, types, operator))
elif typ == 'test':
# `x if foo else y` case.
types = (self.eval_element(context, element.children[0]) |
self.eval_element(context, element.children[-1]))
elif typ == 'operator':
# Must be an ellipsis, other operators are not evaluated.
assert element.value == '...'
types = set([compiled.create(self, Ellipsis)])
elif typ == 'dotted_name':
types = self.eval_atom(context, element.children[0])
for next_name in element.children[2::2]:
# TODO add search_global=True?
types = unite(
typ.py__getattribute__(next_name, name_context=context)
for typ in types
)
types = types
elif typ == 'eval_input':
types = self._eval_element_not_cached(context, element.children[0])
elif typ == 'annassign':
types = pep0484._evaluate_for_annotation(context, element.children[1])
else:
types = precedence.calculate_children(self, context, element.children)
debug.dbg('eval_element result %s', types)
return types
def eval_atom(self, context, atom):
"""
Basically to process ``atom`` nodes. The parser sometimes doesn't
generate the node (because it has just one child). In that case an atom
might be a name or a literal as well.
"""
if atom.type == 'name':
# This is the first global lookup.
stmt = atom.get_definition()
if stmt.type == 'comp_for':
stmt = tree.search_ancestor(stmt, ('expr_stmt', 'lambda', 'funcdef', 'classdef'))
if stmt is None or stmt.type != 'expr_stmt':
# We only need to adjust the start_pos for statements, because
# there the name cannot be used.
stmt = atom
return context.py__getattribute__(
name_or_str=atom,
position=stmt.start_pos,
search_global=True
)
elif isinstance(atom, tree.Literal):
return set([compiled.create(self, atom.eval())])
else:
c = atom.children
if c[0].type == 'string':
# Will be one string.
types = self.eval_atom(context, c[0])
for string in c[1:]:
right = self.eval_atom(context, string)
types = precedence.calculate(self, context, types, '+', right)
return types
# Parentheses without commas are not tuples.
elif c[0] == '(' and not len(c) == 2 \
and not(c[1].type == 'testlist_comp' and
len(c[1].children) > 1):
return self.eval_element(context, c[1])
try:
comp_for |
in.features:
log.error("Feature unknown: '%s'" % sub)
sys.exit(1)
## Check output folder
if not options["output_folder"]:
options["output_folder"] = path.join(os.getcwd(), path.basename(options["input_folder"]))
if path.isdir(options["output_folder"]):
# output folder exist, probable disrupted job
if not options["continue"] and not options["overwrite"]:
log.error("Output folder '%s' exist. Previous run? use --continue to continue, or --overwrite to start over." % options["output_folder"])
sys.exit(1)
elif options["overwrite"]:
shutil.rmtree(options["output_folder"])
os.makedirs(options["output_folder"])
elif options["reset_preprocesses"]:
bgcjsonpath = path.join(options["output_folder"], "bgcjson")
if path.exists(bgcjsonpath):
shutil.rmtree(bgcjsonpath)
else:
os.makedirs(options["output_folder"])
## Parse gbks
## TODO: multi-threading?
log.info("Started preprocessing input files..")
utils.print_progress(0, len(input_files), prefix='Preprocessing input GBKs..', suffix='', decimals=1)
for i, filename in enumerate(input_files):
filepath = path.join(options["input_folder"], filename)
if not (path.exists(path.join(options["output_folder"], "bgcjson", "%s.bgcjson" % utils.get_bgc_name(filepath)))):
bgc = parser.parse_gbk(filepath)
if bgc is not None:
utils.save_bgcjson(bgc, options["output_folder"])
utils.print_progress(i + 1, len(input_files), prefix='Preprocessing input GBKs..', suffix='', decimals=1, bar_length=100)
log.info("Finished preprocessing input files..")
## Do feature extraction
# step 1: make folder structure & index file
feature_folder = utils.create_feature_folder(input_files, options["output_folder"])
# step 2: traverse FE modules and run algorithms, then save the results
feature_extraction_plugins = []
for plugin in utils.load_plugins("feature_extraction"):
if ("features" not in options) or (plugin.name in [feature["name"] for feature in options["features"]]):
feature_extraction_plugins.append(plugin)
# calculate features
options["feature_values"] = {}
if options["features_scope"] == "pair":
log.info("Started feature extraction for all BGC pairs..")
nrcomb = len(input_files) * (len(input_files) - 1) / 2
count = 0
utils.print_progress(0, nrcomb, prefix='Feature extraction..', suffix='', decimals=1)
for i, fn1 in enumerate(input_files):
for j, fn2 in enumerate(input_files):
if i < j:
bgc1 = parser.parse_bgcjson(path.join(options["output_folder"], "bgcjson", "%s.bgcjson" % utils.get_bgc_name(fn1)))
bgc2 = parser.parse_bgcjson(path.join(options["output_folder"], "bgcjson", "%s.bgcjson" % utils.get_bgc_name(fn2)))
for plugin in feature_extraction_plugins:
if plugin.name not in options["feature_values"]:
options["feature_values"][plugin.name] = {}
results = plugin.calculate(bgc1, bgc2)
options["feature_values"][plugin.name]["%d+%d" % (i, j)] = [float(result) for result in results]
count += 1
utils.print_progress(count, nrcomb, prefix='Feature extraction..', suffix='', decimals=1)
elif options["features_scope"] == "single":
log.info("Started feature extraction for all BGCs..")
count = 0
utils.print_progress(0, len(input_files), prefix='Feature extraction..', suffix='', decimals=1)
for i, fn in enumerate(input_files):
bgc = parser.parse_bgcjson(path.join(options["output_folder"], "bgcjson", "%s.bgcjson" % utils.get_bgc_name(fn)))
for plugin in feature_extraction_plugins:
if plugin.name not i | n options["feature_values"]:
options["feature_values"][plugin.nam | e] = {}
results = plugin.calculate(bgc)
options["feature_values"][plugin.name]["%d" % (i)] = [float(result) for result in results]
count += 1
utils.print_progress(count, len(input_files), prefix='Feature extraction..', suffix='', decimals=1)
else:
log.error("Invalid features scope: '%s'" % options["features_scope"])
sys.exit(1)
## Load features & value matrix
features_rows = []
if options["features_scope"] == "pair":
for i, fn1 in enumerate(input_files):
for j, fn2 in enumerate(input_files):
if i < j:
features_rows.append([i, j])
elif options["features_scope"] == "single":
for i in xrange(0, len(input_files)):
features_rows.append([i])
else:
log.error("Invalid features scope: '%s'" % options["features_scope"])
sys.exit(1)
if "features_columns" not in options:
options["features_columns"] = []
for feature in options["features"]:
for sub in feature["subs"]:
options["features_columns"].append("%s.%s" % (feature["name"], sub))
features_matrix = {}
for row_ids in ["+".join([str(row_id) for row_id in row_ids]) for row_ids in features_rows]:
row = [None] * len(options["features_columns"])
for plugin in feature_extraction_plugins:
plugin_folder = path.join(feature_folder, plugin.name)
values = options["feature_values"][plugin.name][row_ids]
if (len(values) != len(plugin.features)):
# technically impossible to reach this, unless output from calculate != #of results expected
log.error("...")
sys.exit(1)
else:
for n, col in enumerate(plugin.features):
colname = ("%s.%s" % (plugin.name, col))
if colname in options["features_columns"]:
row[options["features_columns"].index(colname)] = values[n]
features_matrix[row_ids] = row
## Execute algorithms & save results
if options["mode"] == "train":
## Fetch feature & values training matrix
training_matrix = []
training_target = []
training_rownames = []
if options["features_scope"] == "pair":
for i, idx1 in enumerate(options["train_set"]):
for j, idx2 in enumerate(options["train_set"]):
if idx1 < idx2:
training_matrix.append(features_matrix["%d+%d" % (idx1, idx2)])
training_rownames.append("%s+%s" % (utils.get_bgc_name(input_files[idx1]), utils.get_bgc_name(input_files[idx2])))
if options["algo_mode"] == "classification":
class1 = options["single_values"][idx1].split(",")
class2 = options["single_values"][idx2].split(",")
training_target.append(int(len(set(class1) & set(class2)) > 0))
elif options["algo_mode"] == "regression":
training_target.append(float(options["train_pair_values"][i][j]))
elif options["features_scope"] == "single":
for idx in options["train_set"]:
training_matrix.append(features_matrix["%d" % (idx)])
training_rownames.append("%s" % (utils.get_bgc_name(input_files[idx1])))
training_target.append(options["single_values"][idx])
training_matrix = np.array(training_matrix)
training_target = np.array(training_target)
## Fetch feature & values testing matrix
testing_matrix = []
testing_target = []
testing_rownames = []
if options["features_scope"] == "pair":
for i, idx1 in enumerate(options["test_set"]):
for j, idx2 in enumerate(options["test_set"]):
if idx1 < idx2:
testing_matrix.append(features_matrix[ |
s
self.flowId = getNextFlowId()
if (q != None):
self.flow.append(q)
if (operator != None):
self.flow.append(operator)
#debugLog('lib', 'new HoneDataFlow', self.flow)
def __rshift__(self, other):
#debugLog('lib', 'In rshift of HoneDataFlow', 'self', self.flow, 'other', \
# other.flow)
self.flow = self.flow + other.flow
return self
def addSubFlow(self, x):
self.subFlows.append(x)
def printDataFlow(self):
buf = StringIO()
print >>buf, 'flow id: ',self.flowId
if (isinstance(self.flow[0], HoneQuerySerialized)):
print >>buf, 'Select:',self.flow[0].se
print >>buf, 'From:',self.flow[0].ft
print >>buf, 'Where:',self.flow[0].wh
print >>buf, 'Groupby:',self.flow[0].gp
print >>buf, 'Every:',self.flow[0].ev
print >>buf, 'Aggregate:',self.flow[0].agg
print >>buf, self.flow[1:]
else:
print >>buf, self.flow
print >>buf, '\n'
ret = buf.getvalue()
buf.close()
for subFlow in self.subFlows:
ret += subFlow.printDataFlow()
return ret
def getFlowCriterion(self):
return self.flow[0].wh
''' query part '''
class HoneQuery:
def __init__(self,var,ft,wh,gp,every,agg,compose):
self.complete = False
self.var = var
self.ft = ft
self.wh = wh
self.gp = gp
self.every = every
self.agg = agg
self.compose = compose
def __rshift__(self, other):
HoneQuerySyntaxCheck(self)
#debugLog('lib', 'new HoneQuery instance created', self.printQuery())
return self.convertToHoneDataFlow() >> other
def __mul__(self, other):
otherName = other.__class__.__name__
if otherName=='HoneQuery':
return other.compose(self)
else:
raise Exception('HoneQuery cannot compose with %s' % otherName)
def printQuery(self):
ret = StringIO()
print >>ret, 'HoneQuery Select:',self.var
print >>ret, 'HoneQuery From:',self.ft
print >>ret, 'HoneQuery Where:',self.wh
print >>ret, 'HoneQuery Groupby:',self.gp
print >>ret, 'HoneQuery Every:',self.every
print >>ret, 'HoneQuery Aggregate:',self.agg
return ret.getvalue()
def convertToHoneDataFlow(self):
query = HoneQuerySerialized()
query.se = self.var
query.ft = self.ft
query.wh = self.wh
query.gp = self.gp
query.ev = self.every
query.agg = self.agg
return HoneDataFlow(query, None)
def Select(x):
def compose(q):
if q.var == None:
q.var = []
q.var = q.var+x
return q
agg = None
for i in range(0,len(x)):
if (type(x[i]) == type(tuple())):
if (agg == None):
agg = []
agg.append(x[i])
x[i] = x[i][0]
return HoneQuery(x,None,None,None,1000,agg,compose)
def From(ft):
def compose(q):
q.ft = ft
return q
return HoneQuery(None,ft,None,None,None,None,compose)
def Where(wh):
def compose(q):
if q.wh == None:
q.wh = []
q.wh = q.wh + wh
return q
return HoneQuery(None,None,wh,None,None,None,compose)
def Groupby(gp):
def compose(q):
if q.gp == None:
q.gp = []
q.gp = q.gp + gp
return q
return HoneQuery(None,None,None,gp,None,None,compose)
def Every(every):
def compose(q):
q.every = every
return q
return HoneQuery(None,None,None,None,every,None,compose)
def HoneQuerySyntaxCheck(q):
#debugLog('lib', 'syntax check of query', q.printQuery())
varOnlySupportEqualInWhere = ['app', 'srcIP', 'dstIP', 'srcPort', 'dstPort']
if q.var is None:
raise Exception('HoneQuery must at least have a Select')
if q.ft is None:
raise Exception('HoneQuery must have a From table')
if not hone_rts.HoneTableTypes.has_key(q.ft):
raise Exception('HoneQuery: No such From Table {}'.format(q.ft))
varName = []
for typ in q.var:
varName.append(typ)
if not (q.wh is None):
for (typ, op, value) in q.wh:
if not typ in varName:
raise Exception('HoneQuery: Where of not-Selected columns')
if (typ in varOnlySupportEqualInWhere) and (not (op == '==')):
raise Exception('Var {} only support == in Where clause'.format(typ))
if not (q.gp is None):
for typ in q.gp:
if not typ in varName:
raise Exception('HoneQuery: Groupby of not-Selected columns')
for typ in varName:
if not (typ in hone_rts.HoneTableTypes[q.ft]):
raise Exception('HoneQuery No type {} in Table {}'.format(typ, q.ft))
if q.agg is not None:
for (typ, op) in q.agg:
if not op in ['max', 'min', 'sum', 'avg']:
raise Exception('Only max, min, sum, avg are supported in Select {}'.format(typ))
if (q.ft == 'AppStatus'):
if 'app' not in varName:
#debugLog('lib', 'syntax check', q.printQuery())
raise Exception('Must Select \'app\' in AppStatus table')
''' operator part '''
def MapStreamSet(f):
if (isinstance(f,HoneDataFlow)):
return HoneDataFlow(None,['MapStreamSet'] + f.flow[0])
else:
return HoneDataFlow(None,['MapStreamSet', f.__name__])
def MapStream(f):
if (isinstance(f,HoneDataFlow)):
return HoneDataFlow(None,['MapStream'] + f.flow[0])
else:
return HoneDataFlow(None,['MapStream', f.__name__])
def MapList(f):
if isinstance(f,HoneDataFlow):
return HoneDataFlow(None,['MapList'] + f.flow[0])
else:
return HoneDataFlow(None,['MapList', f.__name__])
def FilterStreamSet(f):
if isinstance(f,HoneDataFlow):
return HoneDataFlow(None,['FilterStreamSet'] + f.flow[0])
else:
return HoneDataFlow(None,['FilterStreamSet', f.__name__])
def FilterStream(f):
if isinstance(f,HoneDataFlow):
return HoneDataFlow(None,['FilterStream'] + f.flow[0])
else:
return HoneDataFlow(None,['FilterStream', f.__name__])
def FilterList(f):
if isinstance(f,HoneDataFlow):
return HoneDataFlow(None,['FilterList'] + f.flow[0])
else:
return HoneDataFlow(None,['FilterList', f.__name__])
def ReduceStreamSet(f, init):
if isinstance(f,HoneDataFlow):
return HoneDataFlow(None,['ReduceStreamSet', init] + f.flow[0])
else:
return HoneDataFlow(None,['ReduceStreamSet', init, f.__name__])
def ReduceStream(f, init):
if isinstance(f,HoneDataFlow):
return HoneDataFlow(None,['ReduceStream', init] + f.flow[0])
else:
return HoneDataFlow(None,['ReduceStream', init, f.__name__])
def ReduceList(f, init):
if isinstance(f,HoneDataFlow):
return HoneDataFlow(None,['ReduceList', init] + f.flow[0])
else:
return HoneDataFlow(None,['ReduceList', init, f.__name__])
def MergeHosts():
return HoneDataFlow(None,['MergeHosts'])
def MergeStreams(stream1, stream2):
if isinstance(stream1, HoneQuery):
stream1 = stream1.convertToHoneDataFlow()
if isinstance(stream2, HoneQuery):
stream2 = stream2.convertToHoneDataFlow()
operator = ['MergeStreams']
stream1.addSubFlow(stream2)
operator.append(stream2.flowId)
stream1.flow.append(operator)
return stream1
def MergeStreamsForSet(stream1, stream2):
if isinstance(stream1, HoneQuery):
stream1 = stream1.convertToHoneDataFlow()
if isinstance(stream2, HoneQuery):
stream2 = stream2.convertToHoneDataFlow()
operator = ['MergeStreamsForSet']
stream1.addSubFlow(stream2)
operator.append(stream2.flowId)
| stream1.flow.append(operator)
return stream1
|
def Print(f=None):
if f:
return HoneDataFlow(None, ['Print', f.__name__])
else:
return HoneDataFlow(None, ['Print'])
def RegisterPolicy(f=None):
return HoneDataFlow(None, ['RegisterPolicy |
----
DONE *. clip_to_rect is inclusive on lower end and exclusive on upper end.
DONE *. clip_to_rect behaves intelligently under scaled ctm.
DONE *. clip_to_rect intersects input rect with the existing clipping rect.
DONE *. current rectangular clipping path is saved/restored to the stack when
save_state/restore_state are called.
DONE *. clip_to_rect clears current path.
DONE *. clip_to_rect raises NotImplementedError under a rotated ctm.
clip_to_rects() tests
---------------------
DONE *. Test that clip_to_rects raises not implemented, or whatever.
"""
import unittest
from numpy import array, transpose
import nose
from kiva.agg import GraphicsContextArray
import kiva
from test_utils import Utils
class ClipToRectTestCase(unittest.TestCase, Utils):
#------------------------------------------------------------------------
# Simple Clipping to a single rectangle.
#------------------------------------------------------------------------
def clip_to_rect_helper(self, desired, scale, clip_rects):
""" desired -- 2D array with a single channels expected byte pattern.
scale -- used in scale_ctm() to change the ctm.
clip_args -- passed in as *clip_args to clip_to_rect.
"""
shp = tuple(transpose(desired.shape))
gc = GraphicsContextArray(shp, pix_format="rgb24")
gc.scale_ctm(scale, scale)
# clear background to white values (255, 255, 255)
gc.clear((1.0, 1.0, 1.0))
if isinstance(clip_rects, tuple):
gc.clip_to_rect(*clip_rects)
else:
for rect in clip_rects:
gc.clip_to_rect(*rect)
gc.rect(0, 0, 4, 4)
# These settings allow the fastest path.
gc.set_fill_color((0.0, 0.0, 0.0)) # black
gc.fill_path()
# test a single color channel
actual = gc.bmp_array[:,:,0]
self.assertRavelEqual(desired, actual)
def test_clip_to_rect_simple(self):
desired = array([[255, 255, 255, 255],
[255, 0, 0, 255],
[255, 0, 0, 255],
[255, 255, 255, 255]])
clip_rect = (1, 1, 2, 2)
self.clip_to_rect_helper(desired, 1, clip_rect)
def test_clip_to_rect_simple2(self):
desired = array([[255, 255, 255, 255],
[255, 255, 255, 255],
[255, 0, 255, 255],
[255, 255, 255, 255]])
clip_rect = (1, 1, 1, 1)
self.clip_to_rect_helper(desired, 1, clip_rect)
def test_clip_to_rect_negative(self):
desired = array([[255, 255, 255, 255],
[ 0, 0, 0, 255],
[ 0, 0, 0, 255],
[ 0, 0, 0, 255]])
clip_rect = (-1, -1, 4, 4)
self.clip_to_rect_helper(desired, 1, clip_rect)
def test_clip_to_rect_simple3(self):
desired = array([[255, 255, 255, 255],
[255, 0, 0, 255],
[255, 0, 0, 255],
[255, 255, 255, 255]])
clip_rect = (1, 1, 2.49, 2.49)
self.clip_to_rect_helper(desired, 1, clip_rect)
def test_clip_to_rect_simple4(self):
desired = array([[255, 0, 0, 0],
[255, 0, 0, 0],
[255, 0, 0, 0],
[255, 255, 255, 255]])
clip_rect = (1, 1, 2.5, 2.5)
self.clip_to_rect_helper(desired, 1, clip_rect)
def test_clip_to_rect_simple5(self):
# This tests clipping with a larger rectangle
desired = array([[255, 255, 255, 255],
[255, 0, 0, 255],
[255, 0, 0, 255],
[255, 255, 255, 255]])
clip_rects = [(1, 1 | , 2, 2), (0, 0, 4, 4)]
self.clip_to_rect_helper(desired, 1, clip_rects)
def test_empty_clip_region(self):
# Thi | s tests when the clipping region is clipped down to nothing.
desired = array([[255, 255, 255, 255],
[255, 255, 255, 255],
[255, 255, 255, 255],
[255, 255, 255, 255]])
clip_rects = [(1,1,4,4), (3,3,1,1), (1,1,1,1)]
self.clip_to_rect_helper(desired, 1, clip_rects)
def test_clip_to_rect_scaled(self):
desired = array([[255, 255, 255, 255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255],
[255, 255, 0, 0, 0, 0, 255, 255],
[255, 255, 0, 0, 0, 0, 255, 255],
[255, 255, 0, 0, 0, 0, 255, 255],
[255, 255, 0, 0, 0, 0, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255]])
clip_rect = (1, 1, 2, 2)
self.clip_to_rect_helper(desired, 2.0, clip_rect)
def test_clip_to_rect_scaled2(self):
desired = array([[255, 255, 255, 255, 255, 255, 255, 255],
[255, 255, 0, 0, 0, 0, 0, 255],
[255, 255, 0, 0, 0, 0, 0, 255],
[255, 255, 0, 0, 0, 0, 0, 255],
[255, 255, 0, 0, 0, 0, 0, 255],
[255, 255, 0, 0, 0, 0, 0, 255],
[255, 255, 255, 255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255]])
clip_rect = (1, 1, 2.25, 2.25)
self.clip_to_rect_helper(desired, 2.0, clip_rect)
def test_save_restore_clip_state(self):
desired1 = array([[255, 255, 255, 255],
[255, 0, 0, 255],
[255, 0, 0, 255],
[255, 255, 255, 255]])
desired2 = array([[255, 0, 0, 0],
[255, 0, 0, 0],
[255, 0, 0, 0],
[255, 255, 255, 255]])
gc = GraphicsContextArray((4,4), pix_format="rgb24")
gc.clear((1.0, 1.0, 1.0))
gc.set_fill_color((0.0, 0.0, 0.0))
gc.clip_to_rect(1, 1, 3, 3)
gc.save_state()
gc.clip_to_rect(1, 1, 2, 2)
gc.rect(0, 0, 4, 4)
gc.fill_path()
actual1 = gc.bmp_array[:,:,0]
self.assertRavelEqual(desired1, actual1)
gc.restore_state()
gc.rect(0, 0, 4, 4)
gc.fill_path()
actual2 = gc.bmp_array[:,:,0]
self.assertRavelEqual(desired2, actual2)
def test_clip_to_rect_rotated(self):
# FIXME: test skipped
# This test raises an exception currently because the
# underlying library doesn't handle clipping to a rotated
# rectangle. For now, we catch the the case with an
# exception, so that people can't screw up. In the future,
# we should actually support this functionality.
raise nose.SkipTest
gc = GraphicsContextArray((1,1), pix_format="rgb24")
gc.rotate_ctm(1.0)
self.failUnlessRaises(NotImplementedError,
gc.clip_to_rect, 0, 0, 1, 1)
#------------------------------------------------------------------------
# Successive Clipping of multiple rectangles.
#------------------------------------------------------------------------
def successive_clip_helper(self, desired, scale,
clip_rect1, clip_rect2):
""" desired -- 2D array with a single channels expected byte pattern.
scale -- used in scale_ctm() to change the ctm.
clip_rect1 -- 1st clipping path.
clip_rect2 -- 2nd clipping path.
"""
shp = tuple(transpose(desired.shape))
gc = GraphicsContextArray(shp, pix_format="rgb24")
gc.scale_ctm(scale, scale)
# clear background to white values (255, 255, 255)
gc.clear((1.0, 1.0, 1.0))
gc.clip_to_ |
"""The tests for the Splunk component."""
import unittest
from unittest import mock
from homeassistant.setup import setup_component
import homeassistant.components.splunk as splunk
from homeassistant.const import STATE_ON, STATE_OFF, EVENT_STATE_CHANGED
from tests.common import get_test_home_assistant
class TestSplunk(unittest.TestCase):
"""Test the Splunk component."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_setup_config_full(self):
"""Test setup with all data."""
config = {
'splunk': {
'host': 'host',
'port': 123,
'token': 'secret',
'ssl': 'False',
'name': 'hostname',
}
}
self.hass.bus.listen = mock.MagicMock()
self.assertTrue(setup_component(self.hass, splunk.DOMAIN, config))
self.assertTrue(self.hass.bus.listen.called)
self.assertEqual(EVENT_STATE_CHANGED,
self.hass.bus.listen.call_args_list[0][0][0])
def test_setup_config_defaults(self):
"""Test setup with defaults."""
config = {
'splunk': {
'host': 'host',
'token': 'secret',
}
}
self.hass.bus.listen = mock.MagicMock()
self.assertTrue(setup_component(self.hass, splunk.DOMAIN, config))
self.assertTrue(self.hass.bus.listen.called)
self.assertEqual(EVENT_STATE_CHANGED,
self.hass.bus.listen.call_args_list[0][0][0])
def _setup(self, mock_requests):
"""Test the setup."""
self.mock_post = mock_requests.post
self.mock_request_exception = Exception
mock_requests.exceptions.RequestException = self.mock_request_exception
config = {
'splunk': {
'host': 'host',
'token': 'secret',
'port': 8088,
}
}
self.hass.bus.listen = mock.MagicMock()
setup_component(self.hass, splunk.DOMAIN, config)
self.handler_method = self.hass.bus.listen.call_args_list[0][0][1]
@mock.patch.object(splunk, 'requests')
@mock.patch('json.dumps')
def test_event_listener(self, mock_dump, mock_requests):
"""Test event listener."""
mock_dump.side_effect = lambda x: x
self._setup(mock_requests)
valid = {'1': 1,
'1.0': 1.0,
STATE_ON: 1,
STATE_OFF: 0,
'foo': 'foo',
}
for in_, out in valid.items():
state = mock.MagicMock(state=in_,
domain='fake',
object_id='entity',
attributes={})
event = mock.MagicMock(data={'new_state': state}, time_fired=12345)
body = [{
'domain': 'fake',
'entity_id': 'entity',
'attributes': {},
'time': '12345',
'value': out,
'host': 'HASS',
}]
payload = {'host': 'ht | tp://host:8088/services/collector/event',
'event': body}
self.handler_method(event)
self.assertEqual(self.mock_post.call_count, 1)
self.assertEqual(
self.mock_post.call_args,
mock.call(
payload['host'], data=payload,
headers={' | Authorization': 'Splunk secret'},
timeout=10
)
)
self.mock_post.reset_mock()
|
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class ParameterGroup(dict):
def __init__(self, connection=None):
dict.__init__(self)
self.connection = connection
self.name = None
self.description = None
self.engine = None
self._current_param = None
def __repr__(self):
return 'ParameterGroup:%s' % self.name
def startElement(self, name, attrs, connection):
if name == 'Parameter':
if self._current_param:
self[self._current_param.name] = self._current_param
self._current_param = Parameter(self)
return self._current_param
def endElement(self, name, value, connection):
if name == 'DBParameterGroupName':
self.name = value
elif name == 'Description':
self.description = value
elif name == 'Engine':
self.engine = value
else:
setattr(self, name, value)
def modifiable(self):
mod = []
for key in self:
p = self[key]
if p.is_modifiable:
mod.append(p)
return mod
def get_params(self):
pg = self.connection.get_all_dbparameters(self.name)
self.update(pg)
def add_param(self, name, value, apply_method):
param = Parameter()
param.name = name
param.value = value
param.apply_method = apply_method
self.params.append(param)
class Parameter(object):
"""
Represents a RDS Parameter
"""
ValidTypes = {'integer' : int,
'string' : str,
'boolean' : bool}
ValidSources = ['user', 'system', 'engine-default']
ValidApplyTypes = ['static', 'dynamic']
ValidApplyMethods = ['immediate', 'pending-reboot']
def __init__(self, group=None, name=None):
self.group = group
self.name = name
self._value = None
self.type = str
self.source = None
self.is_modifiable = True
self.description = None
self.apply_method = None
self.allowed_values = None
def __repr__(self):
return 'Parameter:%s' % self.name
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'ParameterName':
self.name = value
elif name == 'ParameterValue':
self._value = value
elif name == 'DataType':
if value in self.ValidTypes:
self.type = value
elif name == 'Source':
if value in self.ValidSources:
self.source = value
elif name == 'IsModifiable':
if value.lower() == 'true':
self.is_modifiable = True
else:
self.is_modifiable = False
elif name == 'Description':
self.description = value
elif name == 'ApplyType':
if value in self.ValidApplyTypes:
self.apply_type = value
elif name == 'AllowedValues':
self.allowed_values = value
else:
setattr(self, name, value)
def merge(self, d, i):
prefix = 'Parameters.member.%d.' % i
if self.name:
d[prefix+'ParameterName'] = self.name
if self._value is not None:
d[prefix+'ParameterValue'] = self._value
if self.apply_type:
d[prefix+'ApplyMethod'] = self.apply_method
def _set_string_value(self, value):
if not isinstance(value, | str) or isinstance(value, unicode):
raise ValueError, 'value must be of type str'
if self.allowed_values:
choices = self.allowed_values.split(',')
if value not in choices:
raise ValueError, 'value must be in %s' % self.allowed_values
self._value = value
def _set_integer_value(self, value):
if isinstance(value, str) or isinstance(value, unicode):
| value = int(value)
if isinstance(value, int) or isinstance(value, long):
if self.allowed_values:
min, max = self.allowed_values.split('-')
if value < int(min) or value > int(max):
raise ValueError, 'range is %s' % self.allowed_values
self._value = value
else:
raise ValueError, 'value must be integer'
def _set_boolean_value(self, value):
if isinstance(value, bool):
self._value = value
elif isinstance(value, str) or isinstance(value, unicode):
if value.lower() == 'true':
self._value = True
else:
self._value = False
else:
raise ValueError, 'value must be boolean'
def set_value(self, value):
if self.type == 'string':
self._set_string_value(value)
elif self.type == 'integer':
self._set_integer_value(value)
elif self.type == 'boolean':
self._set_boolean_value(value)
else:
raise TypeError, 'unknown type (%s)' % self.type
def get_value(self):
if self._value == None:
return self._value
if self.type == 'string':
return self._value
elif self.type == 'integer':
if not isinstance(self._value, int) and not isinstance(self._value, long):
self._set_integer_value(self._value)
return self._value
elif self.type == 'boolean':
if not isinstance(self._value, bool):
self._set_boolean_value(self._value)
return self._value
else:
raise TypeError, 'unknown type (%s)' % self.type
value = property(get_value, set_value, 'The value of the parameter')
def apply(self, immediate=False):
if immediate:
self.apply_method = 'immediate'
else:
self.apply_method = 'pending-reboot'
self.group.connection.modify_parameter_group(self.group.name, [self])
|
intially None.
"""
global _threadPool, _threadPoolLock
_threadPoolLock.acquire()
try:
oldThreadPool = _threadPool
_threadPool = threadPool
finally:
_threadPoolLock.release()
return oldThreadPool
def getDefaultThreadPool():
"""Get the current default thread pool for new tasks.
If no default thread pool exists then one will be created automatically.
"""
global _threadPool, _threadPoolLock
if _threadPool is None:
import cake.threadpool
processorCount = cake.threadpool.getProcessorCount()
_threadPoolLock.acquire()
try:
if _threadPool is None:
_threadPool = cake.threadpool.ThreadPool(numWorkers=processorCount)
finally:
_threadPoolLock.release()
return _threadPool
class TaskError(Exception):
"""An exception type raised by the L{Task} class.
"""
pass
def _makeTasks(value):
if value is None:
return []
elif isinstance(value, Task):
return [value]
else:
return list(value)
class Task(object):
"""An operation that is performed on a background thread.
"""
class State(object):
"""A class that represents the state of a L{Task}.
"""
NEW = "new"
"""The task is in an uninitialised state."""
WAITING_FOR_START = "waiting for start"
"""The task is waiting to be started."""
RUNNING = "running"
"""The task is running."""
WAITING_FOR_COMPLETE = "waiting for complete"
"""The task is waiting to complete."""
SUCCEEDED = "succeeded"
"""The task has succeeded."""
FAILED = "failed"
"""The task has failed."""
_current = threading.local()
def __init__(self, func=None):
"""Construct a task given a function.
@param func: The function this task should run.
@type func: any callable
"""
self._func = func
self._immediate = None
self._threadPool = None
self._required = False
self._parent = Task.getCurrent()
self._state = Task.State.NEW
self._lock = threading.Lock()
self._startAfterCount = 0
self._startAfterFailures = False
self._startAfterDependencies = None
self._completeAfterCount = 0
self._completeAfterFailures = False
self._completeAfterDependencies = None
self._callbacks = []
@staticmethod
def getCurrent():
"""Get the currently executing task.
@return: The currently executing Task or None if no current task.
@rtype: Task or None
"""
return getattr(Task._current, "value", None)
@property
def state(self):
"""Get the state of this task.
"""
return self._state
@property
def parent(self):
"""Get the parent of this task.
The parent task is the task that created this task.
"""
return self._parent
@property
def required(self):
"""True if this task is required to execute, False if it
has not yet been required to execute.
"""
return self._required
@property
def started(self):
"""True if this task has been started.
A task is started if start( | ), startAfter(), lazyStart(),
lazyStartAfter() or cancel() has been called on it.
"""
return self._state is not Task.State.NEW
@property
def completed(self):
"""True if this task has finished execution or has been cancelled.
"""
s = self._state
return s is Task.State.SUCCEEDED or s is Task.State.FAILED
@property
def succeeded(self):
"""T | rue if this task successfully finished execution.
"""
return self._state is Task.State.SUCCEEDED
@property
def failed(self):
"""True if this task failed or was cancelled.
"""
return self._state is Task.State.FAILED
@property
def result(self):
"""If the task has completed successfully then holds the
return value of the task, otherwise raises AttributeError.
"""
if self.succeeded:
task = self
while isinstance(task._result, Task):
task = task._result
return task._result
else:
raise AttributeError("result only available on successful tasks")
def lazyStart(self, threadPool=None):
"""Start this task only if required as a dependency of another 'required' task.
A 'required' task is a task that is started eagerly using L{start()} or L{startAfter()}
or a task that is a dependency of a 'required' task.
If no other required tasks have this task as a dependency then this task will never
be executed. i.e. it is a lazy task.
"""
self._start(other=None, immediate=False, required=False, threadPool=threadPool)
def lazyStartAfter(self, other, threadPool=None):
"""Start this task only if required as a dependency of another 'required' task.
But do not start this task until the 'other' tasks have completed.
If any of the other tasks complete with failure then this task will complete
with failure without being executed.
"""
self._start(other=other, immediate=False, required=False, threadPool=threadPool)
def start(self, immediate=False, threadPool=None):
"""Start this task now.
@param immediate: If True the task is pushed ahead of any other (waiting)
tasks on the task queue.
@type immediate: bool
@param threadPool: If specified then the task will be queued up to be
executed on the specified thread-pool. If not specified then the task
will be queued for execution on the default thread-pool.
@type threadPool: L{ThreadPool} or C{None}
@raise TaskError: If this task has already been started or
cancelled.
"""
self._start(other=None, immediate=immediate, required=True, threadPool=threadPool)
def startAfter(self, other, immediate=False, threadPool=None):
"""Start this task after other tasks have completed.
This task is cancelled (transition to Task.State.FAILED state) if any of the
other tasks fail.
@param other: The task or a list of tasks to start after.
@type other: L{Task} or C{list}(L{Task})
@param immediate: If True the task is pushed ahead of any other (waiting)
tasks on the task queue.
@type immediate: bool
@param threadPool: An optional thread pool to start this task on.
If not specified then the task is queued to the default thread-pool.
@type threadPool: L{ThreadPool} or None
@raise TaskError: If this task has already been started or
cancelled.
"""
self._start(other=other, immediate=immediate, required=True, threadPool=threadPool)
def _start(self, other, immediate, required, threadPool):
immediate = bool(immediate)
required = bool(required)
otherTasks = _makeTasks(other)
if threadPool is None:
threadPool = getDefaultThreadPool()
self._lock.acquire()
try:
if self._state is not Task.State.NEW:
raise TaskError("task already started")
self._state = Task.State.WAITING_FOR_START
self._startAfterCount = len(otherTasks) + 1
self._immediate = immediate
self._threadPool = threadPool
if required:
self._required = True
else:
required = self._required
if required:
completeAfterDependencies = self._completeAfterDependencies
self._completeAfterDependencies = None
else:
self._startAfterDependencies = otherTasks
finally:
self._lock.release()
if required:
for t in otherTasks:
t._require()
t.addCallback(lambda t=t: self._startAfterCallback(t))
if completeAfterDependencies:
for t in completeAfterDependencies:
t._require()
t.addCallback(lambda t=t: self._completeAfterCallback(t))
self._startAfterCallback(self)
def _require(self):
"""Flag this task as required.
If this task was started with a call to lazyStart/lazyStartAfter()
and has not yet been required by some other Task then this will
cause this task and all of it's dependencies to become required.
"""
if self.required:
return
startAfterDependencies = None
completeAfterDependencies = None
self._lock.acquire()
try:
alreadyRequired = self.required
if not alreadyRequired:
startAf |
# -*- coding: utf-8 -*-
#
# web_container.py
#
# Copyright © 2016-2017 Antergos
#
# This file is part of whither.
#
# whither is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# whither is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without | even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# The following additional terms are in effect as per Section 7 of the license:
#
# The preservation of all legal notices and author attributions in
# the material or in the Appropriate Legal Notic | es displayed
# by works containing it is required.
#
# You should have received a copy of the GNU General Public License
# along with whither; If not, see <http://www.gnu.org/licenses/>.
|
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed t | o in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the Lic | ense.
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import network
ALIAS = 'os-floating-ip-pools'
authorize = extensions.os_compute_authorizer(ALIAS)
def _translate_floating_ip_view(pool_name):
return {
'name': pool_name,
}
def _translate_floating_ip_pools_view(pools):
return {
'floating_ip_pools': [_translate_floating_ip_view(pool_name)
for pool_name in pools]
}
class FloatingIPPoolsController(wsgi.Controller):
"""The Floating IP Pool API controller for the OpenStack API."""
def __init__(self):
self.network_api = network.API(skip_policy_check=True)
super(FloatingIPPoolsController, self).__init__()
@extensions.expected_errors(())
def index(self, req):
"""Return a list of pools."""
context = req.environ['nova.context']
authorize(context)
pools = self.network_api.get_floating_ip_pools(context)
return _translate_floating_ip_pools_view(pools)
class FloatingIpPools(extensions.V21APIExtensionBase):
"""Floating IPs support."""
name = "FloatingIpPools"
alias = ALIAS
version = 1
def get_resources(self):
resource = [extensions.ResourceExtension(ALIAS,
FloatingIPPoolsController())]
return resource
def get_controller_extensions(self):
"""It's an abstract function V21APIExtensionBase and the extension
will not be loaded without it.
"""
return []
|
,
'mongodb://localhost:27017/')
mc = MongoClient(mongodb_client_url, document_class=OrderedDict)
db = mc[str(database_name)]
collection = db[str(collection_name)]
if return_keys:
return_dict = {}
for k in return_keys:
return_dict[k] = 1
# print "returndict=",return_dict
mysearchresult = collection.find(
query, return_dict).skip(skip).limit(limit).sort(
sortkey, DESCENDING)
else:
mysearchresult = collection.find(query).skip(
skip).limit(limit).sort(sortkey, DESCENDING)
# response_dict['num_results']=int(mysearchresult.count(with_limit_and_skip=False))
response_dict['code'] = 200
response_dict['type'] = "search-results"
for d in mysearchresult:
d['id'] = d['_id'].__str__()
del d['_id']
l.append(d)
response_dict['results'] = l
except Exception:
print("Error reading from Mongo")
print(str(sys.exc_info()))
response_dict['num_results'] = 0
response_dict['code'] = 500
response_dict['type'] = "Error"
response_dict['results'] = []
response_dict['message'] = str(sys.exc_info())
return response_dict
def delete_mongo(database_name, collection_name,
query={}, just_one=False):
"""delete from mongo helper"""
l = []
response_dict = {}
try:
mongodb_client_url = getattr(settings, 'MONGODB_CLIENT',
'mongodb://localhost:27017/')
mc = MongoClient(mongodb_client_url, document_class=OrderedDict)
db = mc[str(database_name)]
collection = db[str(collection_name)]
mysearchresult = collection.remove(query, just_one)
response_dict['code'] = 200
response_dict['type'] = "remove-confirmation"
except Exception:
# print "Error reading from Mongo"
# print str(sys.exc_info())
response_dict['num_results'] = 0
response_dict['code'] = 500
response_dict['type'] = "Error"
response_dict['results'] = []
response_dict['message'] = str(sys.exc_info())
return response_dict
def write_mongo(document, database_name,
collection_name, update=False):
"""Write a document to the collection. Return a response_dict containing
the written record. Method functions as both insert or update based on update
parameter"""
l = []
response_dict = {}
try:
mongodb_client_url = getattr(settings, 'MONGODB_CLIENT',
'mongodb://localhost:27017/')
mc = MongoClient(mongodb_client_url, document_class=OrderedDict)
db = mc[str(database_name)]
collection = db[str(collection_name)]
# Cast the query to integers
# if settings.CAST_ININGS_TO_INTEGERS:
# query = cast_number_strings_to_integers(query)
potential_key_found = False
existing_transaction_id = None
existing_mongo_id = None
# enforce non-repudiation constraint on create
# if document.has_key("transaction_id"):
# existing_transaction_id = collection.find_one({'transaction_id':document['transaction_id']})
# if existing_transaction_id:
# potential_key_found = True
if "id" in document:
document["_id"] = ObjectId(document["id"])
del document["id"]
if "_id" in document:
existing_mongo_id = collection.find_one({'_id': document['_id']})
if existing_mongo_id:
potential_key_found = True
if update == False and potential_key_found == True:
"""409 conflict"""
response_dict['code'] = 409
response_dict['type'] = "Error"
response_dict['results'] = []
response_dict[
'message'] = "Perhaps you meant to perform an update instead?"
response_dict['errors'] = [
"Conflict. This transaction_id has already been created.", ]
return response_dict
elif update and potential_key_found: # this is an update
# set kwargs _id to the existing_id to force to overwrite existing
# document
# if existing_transaction_id:
#
# document['_id'] = ObjectId(existing_transaction_id['_id'])
# document['history']=True
# history_collection_name = "%s_history" % str(collection_name)
# history_collection = db[str(history_collection_name)]
#
# history_object = existing_transaction_id
# history_object['historical_id'] = existing_transaction_id['_id']
# del history_object['_id']
# #now write the record to the historical collection
# written_object = history_collection.insert(history_object)
if existing_mongo_id:
document['_id'] = ObjectId(existing_mongo_id['_id'])
document['history'] = True
history_collection_name = "%s_history" % str(collection_name)
history_collection = db[str(history_collection_name)]
# print history_collection
# print existing_mongo_id
history_object = existing_mongo_id
| history_object['historical_id'] = existing_mongo_id['_id']
del history_object['_id']
# print history_object
# now write the record to the historical collection
written_object = history_collection.insert(history_object)
# update the record
myobjecti | d = collection.save(document)
else:
# this is new so perform an insert.
myobjectid = collection.insert(document)
# now fetch the record we just wrote so that we write it back to the
# DB.
myobject = collection.find_one({'_id': myobjectid})
response_dict['code'] = 200
response_dict['type'] = "write-results"
myobject['id'] = myobject['_id'].__str__()
del myobject['_id']
l.append(myobject)
response_dict['results'] = l
except Exception:
# print "Error reading from Mongo"
# print str(sys.exc_info())
response_dict['code'] = 400
response_dict['type'] = "Error"
response_dict['results'] = []
response_dict['message'] = str(sys.exc_info())
return response_dict
def bulk_csv_import_mongo(csvfile, database_name, collection_name,
delete_collection_before_import=False):
"""return a response_dict with a list of search results"""
"""method can be insert or update"""
l = []
response_dict = {}
try:
mongodb_client_url = getattr(settings, 'MONGODB_CLIENT',
'mongodb://localhost:27017/')
mconnection = MongoClient(
mongodb_client_url, document_class=OrderedDict)
db = mconnection[database_name]
collection = db[collection_name]
if delete_collection_before_import:
myobjectid = collection.remove({})
# open the csv file.
csvhandle = csv.reader(open(csvfile._get_path(), 'rb'), delimiter=',')
rowindex = 0
errors = 0
error_list = []
success = 0
for row in csvhandle:
if rowindex == 0:
column_headers = row
cleaned_headers = []
for c in column_headers:
c = c.replace(".", "")
c = c.replace("$", "-")
c = c.replace(" ", "_")
cleaned_headers.append(c)
else:
record = OrderedDict(zip(cleaned_headers, row))
# if there is no values, skip the key value pair
kwargs = OrderedDict()
# Only populate fields that are not blank.
for k, v in reco |
import re
import csv
from urllib import parse
import lxml.html
from pupa.scrape import Person, Scraper
class NoDetails(Exception):
pass
SESSION_NUMBERS = {
'2011': '62nd',
'2013': '63rd',
'2015': '64th',
'2017': '65th',
}
class MTPersonScraper(Scraper):
def url_xpath(self, url):
# Montana's legislator page was returning valid content with 500
# code as of 1/9/2013. Previous discussions with them after similar
# incidents in the past suggest some external part of their stack
# is having some issue and the error is bubbling up to the ret code.
self.raise_errors = False
html = self.get(url).text
doc = lxml.html.fromstring(html)
self.raise_errors = True
return doc
def scrape(self, chamber=None, session=None):
if not session:
session = max(SESSION_NUMBERS.keys())
session_number = SESSION_NUMBERS[session]
chambers = [chamber] if chamber else ['upper', 'lower']
for chamber in chambers:
url = 'http://leg.mt.gov/content/sessions/{}/{}{}Members.txt'.format(
session_number, session, 'Senate' if chamber == 'upper' else 'House'
)
yield from self.scrape_legislators(url, chamber=chamber)
def scrape_legislators(self, url, chamber):
data = self.get(url).text
data = data.replace('"""', '"') # weird triple quotes
data = data.splitlines()
fieldnames = ['last_name', 'first_name', 'party', 'district',
'address', 'city', 'state', 'zip']
csv_parser = csv.DictReader(data, fieldnames)
district_leg_urls = self._district_legislator_dict()
# Toss the row headers.
next(csv_parser)
for entry in csv_parser:
if not entry:
conti | nue
# District.
district = entry['district']
hd_or_sd, district = district.split() |
# Party.
party_letter = entry['party']
party = {'D': 'Democratic', 'R': 'Republican'}[party_letter]
# Get full name properly capped.
fullname = '%s %s' % (entry['first_name'].capitalize(),
entry['last_name'].capitalize())
# Get any info at the legislator's detail_url.
detail_url = district_leg_urls[hd_or_sd][district]
# Get the office.
address = '\n'.join([
entry['address'],
'%s, %s %s' % (entry['city'].title(), entry['state'], entry['zip'])
])
try:
deets = self._scrape_details(detail_url)
except NoDetails:
self.logger.warning("No details found at %r" % detail_url)
continue
legislator = Person(name=fullname, primary_org=chamber, district=district,
party=party, image=entry.get('photo_url', ''))
legislator.add_source(detail_url)
legislator.add_source(url)
legislator.add_link(detail_url)
legislator.add_contact_detail(type='address', value=address, note='District Office')
phone = deets.get('phone')
fax = deets.get('fax')
email = deets.get('email')
if phone:
legislator.add_contact_detail(type='voice', value=phone, note='District Office')
if fax:
legislator.add_contact_detail(type='fax', value=fax, note='District Office')
if email:
legislator.add_contact_detail(type='email', value=email, note='District Office')
yield legislator
def _district_legislator_dict(self):
'''Create a mapping of districts to the legislator who represents
each district in each house.
Used to get properly capitalized names in the legislator scraper.
'''
res = {'HD': {}, 'SD': {}}
url = 'http://leg.mt.gov/css/find%20a%20legislator.asp'
# Get base url.
parts = parse.urlparse(url)
parts._replace(path='')
baseurl = parts.geturl()
# Go the find-a-legislator page.
doc = self.url_xpath(url)
doc.make_links_absolute(baseurl)
# Get the link to the current member roster.
url = doc.xpath('//a[contains(@href, "roster.asp")]/@href')[0]
# Fetch it.
self.raise_errors = False
html = self.get(url).text
doc = lxml.html.fromstring(html)
self.raise_errors = True
# Get the new baseurl, like 'http://leg.mt.gov/css/Sessions/62nd/'
parts = parse.urlparse(url)
path, _, _ = parts.path.rpartition('/')
parts._replace(path=path)
baseurl = parts.geturl()
doc.make_links_absolute(baseurl)
table = doc.xpath('//table[@name="Legislators"]')[0]
for tr in table.xpath('tr'):
td1, td2 = tr.xpath('td')
# Skip header rows and retired legislators
if not td2.text_content().strip() or 'Resigned' in tr.text_content():
continue
# Get link to the member's page.
detail_url = td1.xpath('h4/a/@href')[0]
# Get the members district so we can match the
# profile page with its csv record.
house, district = td2.text_content().split()
res[house][district] = detail_url
return res
def _scrape_details(self, url):
'''Scrape the member's bio page.
Things available but not currently scraped are office address,
and waaay too much contact info, including personal email, phone.
'''
doc = self.url_xpath(url)
# Get base url.
parts = parse.urlparse(url)
parts._replace(path='')
baseurl = parts.geturl()
doc.make_links_absolute(baseurl)
xpath = '//img[contains(@src, "legislator")]/@src'
try:
photo_url = doc.xpath(xpath).pop()
except IndexError:
raise NoDetails('No details found at %r' % url)
details = {'photo_url': photo_url}
# # Parse address.
elements = list(doc.xpath('//b[contains(., "Address")]/..')[0])
# # MT's website currently has a typo that places the "address"
# # heading inline with the "Information Office" phone number.
# # This hack tempprarily makes things work.
elements = elements[3:]
chunks = []
for br in elements:
chunks.extend(filter(None, [br.text, br.tail]))
# As far as I can tell, MT legislators don't have capital offices.
for line in chunks[2:]:
if not line.strip():
continue
for key in ('ph', 'fax'):
if key in line.lower():
key = {'ph': 'phone'}.get(key)
break
number = re.search('\(\d{3}\) \d{3}\-\d{4}', line)
if number:
number = number.group()
if key:
# Used to set this on the office.
details[key] = number
try:
email = doc.xpath('//b[contains(., "Email")]/..')[0]
except IndexError:
pass
else:
if email:
html = lxml.html.tostring(email.getparent()).decode()
match = re.search(r'[a-zA-Z0-9\.\_\%\+\-]+@\w+\.[a-z]+', html)
if match:
details['email'] = match.group()
return details
|
import copy
import pytest
from peek.line import InvalidIpAddressException, Line, InvalidStatusException
# 127.0.0.1 - - [01/Jan/1970:00:00:01 +0000] "GET / HTTP/1.1" 200 193 "-" "Python"
test_line_contents = {
'ip_address': '127.0.0.1',
'timestamp': '[01/Jan/1970:00:00:01 +0000]',
'verb': 'GET',
'path': '/',
'status': '200',
'size': '193',
'referrer': '-',
'user_agent': 'Python'
}
def get_updated_line_contents(updates=None):
test_contents = copy.deepcopy(test_line_contents)
if updates is not None:
test_contents.update(updates)
return test_contents
test_line = Line(line_contents=test_line_contents)
class TestLineInstantiation:
@pytest.mark.parametrize('expected,actual', [
('127.0.0.1', test_line.ip_address),
(1, test_line.timestamp),
('GET', test_line.verb),
('/', test_line.path),
(200 | , test_line.status),
(193, test_line.byte_count),
('-', test_line.referrer),
('Python', test_line.user_agent)
])
def test_retrieval(self, expected, actual):
assert expected == actual
class TestLineExceptions:
def test_passing_invalid_ip_address_throws_exception(self):
with pytest.raises(InvalidIpAddressException):
line = Line(line_contents=get_ | updated_line_contents({'ip_address': 'foobar'}))
def test_passing_non_parseable_status_throws_exception(self):
with pytest.raises(InvalidStatusException):
Line(line_contents=get_updated_line_contents({'status': 'foobar'}))
|
from flask import request, render_template
from flask.ext.login import current_user, login_user
from my | site.weibo import Clien | t
from mysite import app, db
from mysite.models import Wuser, User
from . import weibo
@weibo.route('/oauthreturn')
def oauthreturn():
code = request.args.get('code', '')
if code:
client = Client(app.config['API_KEY'], app.config['API_SECRET'], app.config['REDIRECT_URI'])
client.set_code(code)
uid = client.token['uid']
profile = client.get('users/show', access_token=client.access_token, uid=uid)
wuser = Wuser.query.filter_by(uid=uid).first()
if wuser:
login_user(wuser.user)
else:
user = User()
wuser = Wuser(uid=uid)
wuser.user = user
db.session.add(user)
login_user(user)
wuser.update_access_token(client.token['access_token'])
wuser.update_profile(profile)
db.session.add(wuser)
db.session.commit()
return render_template("weibo/profile.html", wuser=wuser) |
# Copyright 2012 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_log import versionutils
from oslo_utils import timeutils
import webob.exc
from cinder.api import extensions
from cinder.api.openstack import wsgi
from cinder.api import xmlutil
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('volume', 'services')
class ServicesIndexTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('services')
elem = xmlutil.SubTemplateElement(root, 'service', selector='services')
elem.set('binary')
elem.set('host')
elem.set('zone')
elem.set('status')
elem.set('state')
elem.set('update_at')
elem.set('disabled_reason')
return xmlutil.MasterTemplate(root, 1)
class ServicesUpdateTemplate(xmlutil.TemplateBuilder):
def construct(self):
# TODO(uni): template elements of 'host', 'service' and 'disabled'
# should be deprecated to make ServicesUpdateTemplate consistent
# with ServicesIndexTemplate. Still keeping it here for API
# compatibility sake.
root = xmlutil.TemplateElement('host')
root.set('host')
root.set('service')
root.set('disabled')
root.set('binary')
root.set('status')
root.set('disabled_reason')
return xmlutil.MasterTemplate(root, 1)
class ServiceController(wsgi.Controller):
def __init__(self, ext_mgr=None):
self.ext_mgr = ext_mgr
super(ServiceController, self).__init__()
@wsgi.serializers(xml=ServicesIndexTemplate)
def index(self, req):
"""Return a list of all running services.
Filter by host & service name.
"""
context = req.environ['cinder.context']
authorize(context, action='index')
detailed = self.ext_mgr.is_loaded('os-extended-services')
now = timeutils.utcnow(with_timezone=True)
services = objects.ServiceList.get_all(context)
host = ''
if 'host' in req.GET:
host = req.GET['host']
service = ''
if 'service' in req.GET:
service = req.GET['service']
versionutils.report_deprecated_feature(LOG, _(
"Query by service parameter is deprecated. "
"Please use binary parameter instead."))
binary = ''
if 'binary' in req.GET:
binary = req.GET['binary']
if host:
services = [s for s in services if s.host == host]
# NOTE(uni): deprecating service request key, binary takes precedence
binary_key = binary or service
if binary_key:
services = [s for s in services if s.binary == binary_key]
svcs = []
for svc in services:
updated_at = svc.updated_at
delta = now - (svc.updated_at or svc.created_at)
delta_sec = delta.total_seconds()
if svc.modified_at:
delta_mod = now - svc.modified_at
if abs(delta_sec) >= abs(delta_mod.total_seconds()):
updated_at = svc.modified_at
alive = abs(delta_sec) <= CONF.service_down_time
art = (alive and "up") or "down"
active = 'enabled'
if svc.disabled:
active = 'disabled'
ret_fields = {'binary': svc.binary, 'host': svc.host,
'zone': svc.availability_zone,
'status': active, 'state': art,
'updated_at': timeutils.normalize_time(updated_at)}
if detailed:
ret_fields['disabled_reason'] = svc.disabled_reason
svcs.append( | ret_fields)
return {'services': svcs}
|
def _is_valid_as_reason(self, reason):
if not reason:
return False
try:
utils.check_string_length(reason.strip(), 'Disabled reason',
min_length=1, max_length=255)
except exception.InvalidInput:
return False
return True
@wsgi.serializers(xml=ServicesUpdateTemplate)
def update(self, req, id, body):
"""Enable/Disable scheduling for a service."""
context = req.environ['cinder.context']
authorize(context, action='update')
ext_loaded = self.ext_mgr.is_loaded('os-extended-services')
ret_val = {}
if id == "enable":
disabled = False
status = "enabled"
if ext_loaded:
ret_val['disabled_reason'] = None
elif (id == "disable" or
(id == "disable-log-reason" and ext_loaded)):
disabled = True
status = "disabled"
else:
raise webob.exc.HTTPNotFound(explanation=_("Unknown action"))
try:
host = body['host']
except (TypeError, KeyError):
msg = _("Missing required element 'host' in request body.")
raise webob.exc.HTTPBadRequest(explanation=msg)
ret_val['disabled'] = disabled
if id == "disable-log-reason" and ext_loaded:
reason = body.get('disabled_reason')
if not self._is_valid_as_reason(reason):
msg = _('Disabled reason contains invalid characters '
'or is too long')
raise webob.exc.HTTPBadRequest(explanation=msg)
ret_val['disabled_reason'] = reason
# NOTE(uni): deprecating service request key, binary takes precedence
# Still keeping service key here for API compatibility sake.
service = body.get('service', '')
binary = body.get('binary', '')
binary_key = binary or service
if not binary_key:
raise webob.exc.HTTPBadRequest()
try:
svc = objects.Service.get_by_args(context, host, binary_key)
if not svc:
raise webob.exc.HTTPNotFound(explanation=_('Unknown service'))
svc.disabled = ret_val['disabled']
if 'disabled_reason' in ret_val:
svc.disabled_reason = ret_val['disabled_reason']
svc.save()
except exception.ServiceNotFound:
raise webob.exc.HTTPNotFound(explanation=_("service not found"))
ret_val.update({'host': host, 'service': service,
'binary': binary, 'status': status})
return ret_val
class Services(extensions.ExtensionDescriptor):
"""Services support."""
name = "Services"
alias = "os-services"
namespace = "http://docs.openstack.org/volume/ext/services/api/v2"
updated = "2012-10-28T00:00:00-00:00"
def get_resources(self):
resources = []
controller = ServiceController(self.ext_mgr)
resource = extensions.ResourceExtension('os-services', controller)
resources.append(resource)
return resources
|
# Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the | License.
import sys, os, subprocess, glob
script_dir = os.path.dirname(os.path.realpath(__file__))
publisher_command = os.environ.get("SIMPLE_COMMUNICATION_PUBLISHER_BIN")
if not publisher | _command:
publisher_files = glob.glob(os.path.join(script_dir, "**/SimpleCommunicationPublisher*"), recursive=True)
publisher_command = next(iter(publisher_files), None)
assert publisher_command
subscriber_command = os.environ.get("SIMPLE_COMMUNICATION_SUBSCRIBER_BIN")
if not subscriber_command:
subscriber_files = glob.glob(os.path.join(script_dir, "**/SimpleCommunicationSubscriber*"), recursive=True)
subscriber_command = next(iter(subscriber_files), None)
assert subscriber_command
xml_file = os.environ.get("XML_FILE")
if xml_file:
real_xml_file = os.path.join(script_dir, xml_file)
else:
real_xml_file = os.path.join(script_dir, "liveliness_assertion.xml")
subscriber_proc = subprocess.Popen([subscriber_command, "--seed", str(os.getpid()), "--notexit",
"--xmlfile", real_xml_file])
publisher_proc = subprocess.Popen([publisher_command, "--seed", str(os.getpid()), "--exit_on_lost_liveliness",
"--xmlfile", real_xml_file], stdout=subprocess.PIPE)
while True:
line = publisher_proc.stdout.readline()
if line.strip().decode('utf-8').startswith('Publisher matched with subscriber '):
print("Subscriber matched.")
break
subscriber_proc.kill()
publisher_proc.communicate()
retvalue = publisher_proc.returncode
if retvalue != 0:
print("Test failed: " + str(retvalue))
else:
print("Test successed")
sys.exit(retvalue)
|
ntactList
self.groups = []
self.sub = sub
self.nickname = ""
self.avatar = None
self.show = ""
self.status = ""
self.url = ""
self.ptype = "unavailable"
def removeMe(self):
""" Destroys this object. Does not remove the contact from the server's list. """
self.contactList = None
self.avatar = None
def syncContactGrantedAuth(self):
""" Since last using the transport the user has been granted authorisation by this contact.
Call this to synchronise the user | 's Jabber list with their legacy list after logon. """
if self.sub == "none":
self.sub = "to"
elif self.sub == "from":
self.sub = "both"
else:
return
self.updateRoster("subscribe")
def syncContactRemovedAuth(self):
""" Since last using the transport the user has been blocked by this | contact.
Call this to synchronise the user's Jabber list with their legacy list after logon. """
if self.sub == "to":
self.sub = "none"
elif self.sub == "both":
self.sub = "from"
else:
return
self.updateRoster("unsubscribed")
def syncUserGrantedAuth(self):
""" Since last using the transport the user has granted authorisation to this contact.
Call this to synchronise the user's Jabber list with their legacy list after logon. """
if self.sub == "none":
self.sub = "from"
elif self.sub == "to":
self.sub = "both"
else:
return
self.updateRoster("subscribe")
def syncUserRemovedAuth(self):
""" Since last using the transport the user has removed this contact's authorisation.
Call this to synchronise the user's Jabber list with their legacy list after logon. """
if self.sub == "from":
self.sub = "none"
elif self.sub == "both":
self.sub = "to"
else:
return
self.updateRoster("unsubscribe")
def syncGroups(self, groups, push=True):
""" Set the groups that this contact is in on the legacy service.
By default this pushes the groups out with a presence subscribed packet. """
self.groups = groups
if push: self.updateRoster("subscribed");
def contactGrantsAuth(self):
""" Live roster event """
if self.sub == "none":
self.sub = "to"
elif self.sub == "from":
self.sub = "both"
self.sendSub("subscribed")
self.sendPresence()
def contactRemovesAuth(self):
""" Live roster event """
if self.sub == "to":
self.sub = "none"
elif self.sub == "both":
self.sub = "from"
self.sendSub("unsubscribed")
def contactRequestsAuth(self):
""" Live roster event """
self.sendSub("subscribe")
def contactDerequestsAuth(self):
""" Live roster event """
self.sendSub("unsubscribe")
def jabberSubscriptionReceived(self, subtype):
""" Updates the subscription state internally and pushes the update to the legacy server """
if subtype == "subscribe":
if self.sub == "to" or self.sub == "both":
self.sendSub("subscribed")
self.contactList.legacyList.addContact(self.jid)
elif subtype == "subscribed":
if self.sub == "none":
self.sub = "from"
if self.sub == "to":
self.sub = "both"
self.contactList.legacyList.authContact(self.jid)
elif(subtype == "unsubscribe"):
if self.sub == "none" and self.sub == "from":
self.sendSub("unsubscribed")
if self.sub == "both":
self.sub = "from"
if self.sub == "to":
self.sub = "none"
self.contactList.legacyList.removeContact(self.jid)
elif(subtype == "unsubscribed"):
if self.sub == "both":
self.sub = "to"
if self.sub == "from":
self.sub = "none"
self.contactList.legacyList.deauthContact(self.jid)
def updateNickname(self, nickname, push=True):
try:
decodednickname = unicode(self.nickname, errors='replace')
except:
decodednickname = self.nickname
if decodednickname != "nickname":
self.nickname = nickname
# will re-remove this if it's removed from JEP-0172.
#self.sendNickname()
if push: self.sendPresence()
#n = Element((None, "nick"))
#n.attributes["xmlns"] = globals.NICK
#n.addContent(nickname)
#self.contactList.session.pytrans.pubsub.localPublish(self.jid, globals.NICK, "current", n)
def updatePresence(self, show, status, ptype, force=False, tojid=None, url=None):
updateFlag = (self.show != show or self.status != status or self.ptype != ptype or force)
self.show = show
self.status = status
self.ptype = ptype
self.url = url
if updateFlag:
self.sendPresence(tojid)
def updateAvatar(self, avatar=None, push=True):
if config.disableAvatars: return
if self.avatar == avatar: return
self.avatar = avatar
if push: self.sendPresence()
#if self.avatar and not config.disableAvatars and not config.disablePEPAvatars:
#avatarHash = self.avatar.getImageHash()
#avatarData = self.avatar.getImageData()
#inbuff = StringIO.StringIO(avatarData)
#img = Image.open(inbuff)
#d = Element((None, "data"))
#d.attributes["xmlns"] = globals.AVATARDATA
#d.addContent(base64.encodestring(avatarData).replace("\n",""))
#self.contactList.session.pytrans.pubsub.localPublish(self.jid, globals.AVATARDATA, avatarHash, d)
#m = Element((None, "metadata"))
#m.attributes["xmlns"] = globals.AVATARMETADATA
#mi = m.addElement("info")
#mi.attributes["id"] = avatarHash
#mi.attributes["type"] = "image/png"
#mi.attributes["bytes"] = str(len(avatarData))
#mi.attributes["height"] = str(img.size[0])
#mi.attributes["width"] = str(img.size[1])
#self.contactList.session.pytrans.pubsub.localPublish(self.jid, globals.AVATARMETADATA, avatarHash, m)
def sendSub(self, ptype):
self.contactList.session.sendPresence(to=self.contactList.session.jabberID, fro=self.jid, ptype=ptype)
def sendNickname(self, tojid=None):
if not tojid:
tojid=self.contactList.session.jabberID
if self.nickname:
el = Element((None, "message"))
el.attributes["to"] = tojid
el.attributes["from"] = self.jid
nick = el.addElement("nick")
nick.attributes["xmlns"] = globals.NICK
nick.addContent(self.nickname)
self.contactList.session.pytrans.send(el)
def sendPresence(self, tojid=None):
avatarHash = ""
if self.avatar and not config.disableAvatars:
avatarHash = self.avatar.getImageHash()
caps = Element((None, "c"))
caps.attributes["xmlns"] = globals.CAPS
caps.attributes["node"] = legacy.url + "/protocol/caps"
caps.attributes["ver"] = legacy.version
if not tojid:
tojid=self.contactList.session.jabberID
self.contactList.session.sendPresence(to=tojid, fro=self.jid, ptype=self.ptype, show=self.show, status=self.status, avatarHash=avatarHash, nickname=self.nickname, payload=[caps], url=self.url)
def updateRoster(self, ptype):
self.contactList.session.sendRosterImport(jid=self.jid, ptype=ptype, sub=self.sub, groups=self.groups)
def fillvCard(self, vCard, jid):
if self.nickname:
NICKNAME = vCard.addElement("NICKNAME")
NICKNAME.addContent(self.nickname)
if self.avatar and not config.disableAvatars and not config.disableVCardAvatars:
PHOTO = self.avatar.makePhotoElement()
vCard.addChild(PHOTO)
user = jid.split('@')[0]
return self.contactList.session.legacycon.jabberVCardRequest(vCard, user)
class ContactList:
""" Represents the Jabber contact list """
def __init__(self, session):
LogEvent(INFO, session.jabberID)
self.session = session
self.contacts = {}
def removeMe(self):
""" Cleanly removes the object """
LogEvent(INFO, self.session.jabberID)
for jid in self.contacts:
self.contacts[jid].updatePresence("", "", "unavailable")
self.contacts[jid].removeMe()
self.contacts = {}
self.session = None
self.legacyList = None
def resendLists(self, tojid=None):
for jid in self.contacts:
if self.contacts[jid].status != "unavailable":
self.contacts[jid].sendPresence(tojid)
LogEvent(INFO, self.session.jabberID)
def createContact(self, jid, sub):
""" Creates a contact object. Use this to initialise the contact list
Returns a Contact object which you can call sync* methods on to synchronise
the user's legacy contact list with their Jabber list """
LogEvent(INFO, self.session.jabberID)
c = Contact(jid, sub, self)
self.contacts[jid] = c
return c
def getContact(self, jid):
""" Finds the contact. If one doesn't |
_server(self, server_id, **kwargs):
"""Starts a stopped server and changes its status to ACTIVE.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#start-server-os-start-action
"""
return self.action(server_id, 'os-start', **kwargs)
def attach_volume(self, server_id, **kwargs):
"""Attaches a volume to a server instance.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#attach-a-volume-to-an-instance
"""
post_body = json.dumps({'volumeAttachment': kwargs})
resp, body = self.post('servers/%s/os-volume_attachments' % server_id,
post_body)
body = json.loads(body)
self.validate_response(schema.attach_volume, resp, body)
return rest_client.ResponseBody(resp, body)
def update_attached_volume(self, server_id, attachment_id, **kwargs):
"""Swaps a volume attached to an instance for another volume"""
post_body = json.dumps({'volumeAttachment': kwargs})
resp, body = self.put('servers/%s/os-volume_attachments/%s' %
(server_id, attachment_id),
post_body)
self.validate_response(schema.update_attached_volume, resp, body)
return rest_client.ResponseBody(resp, body)
def detach_volume(self, server_id, volume_id): # noqa
"""Detaches a volume from a server instance.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#detach-a-volume-from-an-instance
"""
resp, body = self.delete('servers/%s/os-volume_attachments/%s' %
(server_id, volume_id))
self.validate_response(schema.detach_volume, resp, body)
return rest_client.ResponseBody(resp, body)
def show_volume_attachment(self, server_id, volume_id):
"""Return details about the given volume attachment.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#show-a-detail-of-a-volume-attachment
"""
resp, body = self.get('servers/%s/os-volume_attachments/%s' % (
server_id, volume_id))
body = json.loads(body)
self.validate_response(schema.show_volume_attachment, resp, body)
return rest_client.ResponseBody(resp, body)
def list_volume_attachments(self, server_id):
"""Returns the list of volume attachments for a given instance.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#list-volume-attachments-for-an-instance
"""
resp, body = self.get('servers/%s/os-volume_attachments' % (
server_id))
body = json.loads(body)
self.validate_response(schema.list_volume_attachments, resp, body)
| return rest_client.ResponseBody(resp, body)
def add_security_group(self, server_id, **kwargs):
"""Add a security group to the server.
For a full list of available parameters, plea | se refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#add-security-group-to-a-server-addsecuritygroup-action
"""
return self.action(server_id, 'addSecurityGroup', **kwargs)
def remove_security_group(self, server_id, **kwargs):
"""Remove a security group from the server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#remove-security-group-from-a-server-removesecuritygroup-action
"""
return self.action(server_id, 'removeSecurityGroup', **kwargs)
def live_migrate_server(self, server_id, **kwargs):
"""This should be called with administrator privileges.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#live-migrate-server-os-migratelive-action
"""
return self.action(server_id, 'os-migrateLive', **kwargs)
def migrate_server(self, server_id, **kwargs):
"""Migrate a server to a new host.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#migrate-server-migrate-action
"""
return self.action(server_id, 'migrate', **kwargs)
def lock_server(self, server_id, **kwargs):
"""Lock the given server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#lock-server-lock-action
"""
return self.action(server_id, 'lock', **kwargs)
def unlock_server(self, server_id, **kwargs):
"""UNlock the given server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#unlock-server-unlock-action
"""
return self.action(server_id, 'unlock', **kwargs)
def suspend_server(self, server_id, **kwargs):
"""Suspend the provided server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#suspend-server-suspend-action
"""
return self.action(server_id, 'suspend', **kwargs)
def resume_server(self, server_id, **kwargs):
"""Un-suspend the provided server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#resume-suspended-server-resume-action
"""
return self.action(server_id, 'resume', **kwargs)
def pause_server(self, server_id, **kwargs):
"""Pause the provided server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#pause-server-pause-action
"""
return self.action(server_id, 'pause', **kwargs)
def unpause_server(self, server_id, **kwargs):
"""Un-pause the provided server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#unpause-server-unpause-action
"""
return self.action(server_id, 'unpause', **kwargs)
def reset_state(self, server_id, **kwargs):
"""Reset the state of a server to active/error.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#reset-server-state-os-resetstate-action
"""
return self.action(server_id, 'os-resetState', **kwargs)
def shelve_server(self, server_id, **kwargs):
"""Shelve the provided server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#shelve-server-shelve-action
"""
return self.action(server_id, 'shelve', **kwargs)
def unshelve_server(self, server_id, **kwargs):
"""Un-shelve the provided server.
For a full list of available parameters, please refer to the official
API reference:
https://developer.openstack.org/api-ref/compute/#unshelve-restore-shelved-server-unshelve-action
"""
return self.action(server_id, 'unshelve', **kwargs)
def shelve_offload_server(self, server_id, **kwargs):
"""Shelve-offload the provided server.
For a full list of available parameters, please refer to the |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration( | migrations.Migration):
dependencies = [
('article', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='comments',
name='comments_date',
field=models.DateTimeField(default=datetime.datetime(2015, 2, 17, 12, 54, 47, 78000, tzinf | o=utc)),
preserve_default=False,
),
]
|
NAME='gif | '
GCC_LIST=['gif']
| |
RROR",
4: "WARNING",
5: "NOTICE",
6: "INFORMATIONAL",
7: "DEBUG",
}
MY_TZ = os.environ.get('CATCHER_TZ', 'NOT_SET')
TZ_INFO = pytz.timezone(MY_TZ) if MY_TZ != 'NOT_SET' else None
def __init__(self, broker_uri=BROKER_URI, broker_queue=BROKER_QUEUE,
hosts_file=None, mongo_backend=None,
etl_backend=ETL, msg_limit=100,
# leaving it open to use kombu to buffer messages
store_uri=BROKER_URI,
store_queue=LOGSTASH_QUEUE):
if hosts_file is not None:
self.KNOWN_HOSTS = KnownHosts(filename=hosts_file)
self.broker_uri = broker_uri
self.broker_queue = broker_queue
self.store_uri = store_uri
self.store_queue = store_queue
self.mongo_backend = mongo_backend
self.etl_backend = etl_backend
self.keep_running = False
self.msg_limit = msg_limit
@classmethod
def split_alert_message(cls, data):
t = ''
msg = data
end = data.find('>')
start = data.find('<')
if len(data) < end+1:
return '', msg
if start == 0 and end > 0 and end < 10:
t = data[start+1:end]
if not t.isdigit():
return '', data
else:
msg = data[end+1:]
return t, msg
@classmethod
def calculate_msg_type(cls, data):
t, msg = cls.split_alert_message(data)
if len(t) == 0:
return "UNKNOWN"
v = int(t, 10)
if v > 7:
v &= 0x7
return cls.SYSLOG_MSG_TYPE[v]
@classmethod
def format_timestamp(self, tstamp):
if self.TZ_INFO is not None:
local_tz = self.TZ_INFO.localize(tstamp, is_dst=None)
utc_tz = local_tz.astimezone(pytz.utc)
return str(utc_tz.strftime("%Y-%m-%dT%H:%M:%S") +\
".%03d" % (tstamp.microsecond / 1000) + "Z")
return str(tstamp.strftime("%Y-%m-%dT%H:%M:%S") +\
".%03d" % (tstamp.microsecond / 1000))
@classmethod
def get_base_json(cls, syslog_msg, syslog_server_ip,
catcher_name, catcher_host, catcher_tz):
r = {'source': "syslog", 'raw': syslog_msg,
'type': 'json',
| '_id': sha256(syslog_msg).hexdigest(),
'@timestamp': cls.format_timestamp(datetime.now()),
'@version': "1",
'message': "transformed syslog",
'path': '',
'tags': [],
'catcher_tz': catcher_tz,
' | catcher_host': catcher_host,
'catcher_name': catcher_name
}
t, msg = cls.split_alert_message(syslog_msg)
r['syslog_level'] = cls.calculate_msg_type(syslog_msg)
r['syslog_msg'] = msg
r['syslog_tag'] = t
r['syslog_server'] = cls.resolve_host(syslog_server_ip)
r['syslog_server_ip'] = syslog_server_ip
r['syslog_catcher'] = catcher_name
return r
@classmethod
def resolve_host(cls, ip_host):
return cls.KNOWN_HOSTS.resolve_host(ip_host)
def process_message(self, syslog_msg,
syslog_server_ip,
catcher_name, catcher_host, catcher_tz):
m = "Extracting and converting msg from %s msg (syslog: %s)" % (syslog_server_ip, catcher_name)
logging.debug(m)
r = self.get_base_json(syslog_msg, syslog_server_ip,
catcher_name, catcher_host, catcher_tz)
sm = {}
try:
result = self.etl_backend.syslog_et(syslog_msg)
sm.update(result.get('rule_results', result))
if 'rule_name' in result:
sm['rule_name'] = result.get('rule_name')
sm['tags'] = []
if sm.get('syslog_level', None) is not None:
sm['tags'].append(sm['syslog_level'])
if sm.get('rule_name', None) is not None:
sm['tags'].append(sm['rule_name'])
except:
tb = traceback.format_exc()
logging.debug("[XXX] Error: "+tb)
r.update(sm)
return r
def extract_message_components(self, msg_dict):
syslog_msg = msg_dict.get('syslog_msg', '')
syslog_server_ip = msg_dict.get('syslog_server_ip', '')
catcher_host = msg_dict.get('catcher_host', '')
catcher_name = msg_dict.get('catcher_name', '')
catcher_tz = msg_dict.get('catcher_tz', str(get_localzone()))
return self.process_message(syslog_msg,
syslog_server_ip,
catcher_name, catcher_host, catcher_tz)
def process_and_report(self, incoming_msg):
logging.debug("Processing and report syslog_msg")
message = incoming_msg
if isinstance(incoming_msg, str):
try:
message = json.loads(incoming_msg)
except:
message = {}
tb = traceback.format_exc()
logging.debug("[XXX] Error: "+tb)
raise
etl_data = self.extract_message_components(message)
syslog_msg = etl_data['raw']
self.store_results(syslog_msg, etl_data)
return etl_data
def _read_messages(self, uri, queue, callback=None, cnt=1):
msgs = []
read_all = False
if cnt < 1:
read_all = True
try:
logging.debug("Reading the messages")
with Connection(uri) as conn:
q = conn.SimpleQueue(queue)
while cnt > 0 or read_all:
cnt += -1
try:
message = q.get(block=False)
if callback is not None:
data = callback(message.payload)
msgs.append(data)
logging.debug("made it here 2")
logging.debug(data)
message.ack()
except Queue.Empty:
logging.debug("%s queue is empty" % queue)
break
except:
tb = traceback.format_exc()
logging.debug("[XXX] Error: "+tb)
logging.debug("Successfully read %d messages" % len(msgs))
except:
tb = traceback.format_exc()
logging.debug("[XXX] Error: "+tb)
logging.debug("Failed to read message")
return msgs
def store_mongo(self, syslog_msg, etl_data):
if self.mongo_backend is not None:
m = "Sending results to mongo"
logging.debug(m)
raw_insert, json_insert = self.mongo_backend.insert(
syslog_msg,
etl_data)
if not raw_insert:
logging.debug("Failed to insert the raw syslog information in mongo")
if not json_insert:
logging.debug("Failed to insert the processed syslog information in mongo")
def store_kombu(self, etl_data):
logging.debug("Storing message in logstash queue")
try:
with Connection(self.store_uri) as conn:
q = conn.SimpleQueue(self.store_queue)
q.put(etl_data)
q.close()
logging.debug("Storing message in logstash success")
except:
tb = traceback.format_exc()
logging.debug("[XXX] Error: "+tb)
logging.debug("Storing message in logstash queue failed")
def store_results(self, syslog_msg, etl_data):
self.store_mongo(syslog_msg, etl_data)
self.store_kombu(etl_data)
def read_messages(self):
msgs = self._read_messages(self.broker_uri, self.broker_queue,
cnt=self.msg_limit,
callback=self.process_and_report)
return msgs
def serve_forever(self, poll_interval=1.0):
self.keep_running = True
|
# -*- coding: utf-'8' "-*-"
import base64
import json
from hashlib import sha1
import hmac
import logging
import urlparse
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment_adyen.controllers.main import AdyenController
from openerp.osv import osv, fields
from openerp.tools import float_round
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class AcquirerAdyen(osv.Model):
_inherit = 'payment.acquirer'
def _get_adyen_urls(self, cr, uid, environment, context=None):
""" Adyen URLs
- yhpp: hosted payment page: pay.shtml for single, select.shtml for multiple
"""
return {
'adyen_form_url': 'https://%s.adyen.com/hpp/pay.shtml' % ('live' if environment == 'prod' else environment),
}
def _get_providers(self, cr, uid, context=None):
providers = super(AcquirerAdyen, self)._get_providers(cr, uid, context=context)
providers.append(['adyen', 'Adyen'])
return providers
_columns = {
'adyen_merchant_account': fields.char('Merchant Account', required_if_provider='adyen'),
'adyen_skin_code': fields.char('Skin Code', required_if_provider='adyen'),
'adyen_skin_hmac_key': fields.char('Skin HMAC Key', required_if_provider='adyen'),
}
def _adyen_generate_merchant_sig(self, acquirer, inout, values):
""" Generate the shasign for incoming or outgoing communications.
:param browse acquirer: the payment.acquirer browse record. It should
have a shakey in shaky out
:param string inout: 'in' (openerp contacting ogone) or 'out' (adyen
contacting openerp). In this last case only some
fields should be contained (see e-Commerce basic)
:param dict values: transaction values
:return string: shasign
"""
assert inout in ('in', 'out')
assert acquirer.provider == 'adyen'
if inout == 'in':
keys = "paymentAmount currencyCode shipBeforeDate merchantReference skinCode merchantAccount sessionValidity shopperEmail shopperReference recurringContract allowedMethods blockedMethods shopperStatement merchantReturnData billingAddressType deliveryAddressType offset".split()
else:
keys = "authResult pspReference merchantReference skinCode merchantReturnData".split()
def get_value(key):
if values.get(key):
return values[key]
return ''
sign = ''.join('%s' % get_value(k) for k in keys).encode('ascii')
key = acquirer.adyen_skin_hmac_key.encode('ascii')
return base64.b64encode(hmac.new(key, sign, sha1).digest())
def adyen_form_generate_values(self, cr, uid, id, values, context=None):
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
acquirer = self.browse(cr, uid, id, context=context)
# tmp
import datetime
from dateutil import relativedelta
tmp_date = datetime.date.today() + relativedelta.relativedelta(days=1)
values.update({
'merchantReference': values['reference'],
'paymentAmount': '%d' % int(float_round(values['amount'], 2) * 100),
'currencyCode': values['currency'] and values['currency'].name or '',
'shipBeforeDate': tmp_date,
'skinCode': acquirer.adyen_skin_code,
'merchantAccount': acquirer.adyen_merchant_account,
'shopperLocale': values.get('partner_lang'),
'sessionValidity': tmp_date,
'resURL': '%s' % urlparse.urljoin(base_url, AdyenController._return_url),
'merchantReturnData': json.dumps({'return_url': '%s' % values.pop('return_url')}) if values.get('return_url') else False,
'merchantSig': self._adyen_generate_merchant_sig(acquirer, 'in', values),
})
return values
def adyen_get_form_action_url(self, cr, uid, id, context=None):
acquirer = self.browse(cr, uid, id, context=context)
return self._get_adyen_urls(cr, uid, acquirer.environment, context=context)['adyen_form_url']
class TxAdyen(osv.Model):
_inherit = 'payment.transaction'
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
def _adyen_form_get_tx_from_data(self, cr, uid, data, context=None):
reference, pspReference = data.get('merchantReference'), data.get('pspReference')
if not reference or not pspReference:
error_msg = _('Adyen: received data with missing reference (%s) or missing pspReference (%s)') % (reference, pspReference)
_logger.info(error_msg)
raise ValidationError(error_msg)
# find tx -> @TDENOTE use pspReference ?
tx_ids = self.pool['payment.transaction'].search(cr, uid, [('reference', '=', reference)], context=context)
if not tx_ids or len(tx_ids) > 1:
error_msg = _('Ad | yen: received data for reference %s') % (reference)
if not tx_ids:
error_msg += _('; no order found')
else:
error_msg += _('; multiple order found')
_logger.info(error_msg)
raise ValidationError(error_msg)
tx = self.pool['payment.transaction'].browse(cr, uid, tx_ids[0], context=context)
# verify shasign
shasign_check = self.pool['payment.acquirer']._adyen_generate_merchant_sig(t | x.acquirer_id, 'out', data)
if shasign_check != data.get('merchantSig'):
error_msg = _('Adyen: invalid merchantSig, received %s, computed %s') % (data.get('merchantSig'), shasign_check)
_logger.warning(error_msg)
raise ValidationError(error_msg)
return tx
def _adyen_form_get_invalid_parameters(self, cr, uid, tx, data, context=None):
invalid_parameters = []
# reference at acquirer: pspReference
if tx.acquirer_reference and data.get('pspReference') != tx.acquirer_reference:
invalid_parameters.append(('pspReference', data.get('pspReference'), tx.acquirer_reference))
# seller
if data.get('skinCode') != tx.acquirer_id.adyen_skin_code:
invalid_parameters.append(('skinCode', data.get('skinCode'), tx.acquirer_id.adyen_skin_code))
# result
if not data.get('authResult'):
invalid_parameters.append(('authResult', data.get('authResult'), 'something'))
return invalid_parameters
def _adyen_form_validate(self, cr, uid, tx, data, context=None):
status = data.get('authResult', 'PENDING')
if status == 'AUTHORISED':
tx.write({
'state': 'done',
'acquirer_reference': data.get('pspReference'),
# 'date_validate': data.get('payment_date', fields.datetime.now()),
# 'paypal_txn_type': data.get('express_checkout')
})
return True
elif status == 'PENDING':
tx.write({
'state': 'pending',
'acquirer_reference': data.get('pspReference'),
})
return True
else:
error = _('Adyen: feedback error')
_logger.info(error)
tx.write({
'state': 'error',
'state_message': error
})
return False
|
import json
class JSON_RPCError(Exception):
""" Base class for JSON-RPC errors. """
def to_json(self):
return json.dumps({
'code': self.code,
'message': self.__doc__ | ,
})
class ParseError(JSON_RPCError):
""" Invalid JSON was received by the server. An error occurred on the
server while parsing the JSON text.
"""
code = -32700
class InvalidRequestError(JSON_RPCError):
""" The JSON sent is not a valid Request object. """
code = -32600
class MethodNotFoundError(JSON_RPCError):
""" The method does not exist / is not available. | """
code = -32601
class InvalidParamsError(JSON_RPCError):
""" Invalid methods parameter(s). """
code = -32602
class InternalError(JSON_RPCError):
""" Internal JSON-RPC error. """
code = -32603
|
#!/usr/bin/en | v python
from distutils.core import setup
setup(name='Card-Magic',
version='1.0',
description='The best card and decks ever',
author='Juan Carlos Ferrer',
author_email='juan.carlos@micronixsolutions.com',
packages=['cardmagic', 'cardmagic.tests'],
package_data = {
'cardmagic': [
'translations/en/LC_MESSAGES/*',
'translations/es/LC_MESSAGES/*'],
} | ,
)
|
"""
********************************************************************************
Learn Python the Hard Way Third Edition, by
Zed A. Shaw
ISBN: 978-0321884916
********************************************************************************
"""
import random
from urllib import urlopen
import sys
#debug = "DEBUG: "
WORD_URL = "http://learncodethehardway.org/words.txt"
WORDS = []
PHRASES = {
"class %%%(%%%):":
"Make a class named %%% t | hat is-a %%%.",
"class %%%(object):\n\tdef __init__(self, ***)":
"class %%% has-a __init__ that takes self and *** parameters.",
"class %%%(object):\n\tdef ***(self,@@@)":
"class %%% has-a function named *** that takes self and @@@ parameters.",
"*** = %%%()":
"Set *** to an instance of class %%%.",
"***.***(@@@)":
"From *** get the *** function, and call it with parameters self, @@@.",
"***.*** = '** | *'":
"From *** get the *** attribute and set it to '***'."
}
# do they want to drill phrases first
PHRASE_FIRST = False
if len(sys.argv) == 2 and sys.argv[1] == "english":
PHRASE_FIRST = True
#print debug + "0"
# load up the words from the website
#for word in urlopen(WORD_URL).readlines():
# once downloaded just open the file locally):
for word in open('words.txt').readlines():
WORDS.append(word.strip())
#print debug + word
def convert(snippet, phrase):
class_names = [w.capitalize() for w in
random.sample(WORDS, snippet.count("%%%"))]
other_names = random.sample(WORDS, snippet.count("***"))
results = []
param_names = []
for i in range(0, snippet.count("@@@")):
param_count = random.randint(1,3)
param_names.append(', '.join(random.sample(WORDS, param_count)))
for sentence in snippet, phrase:
result = sentence[:]
#fake class names
for word in class_names:
result = result.replace("%%%", word, 1)
#fake other names
for word in other_names:
result = result.replace("***", word, 1)
#fake parameter lists
for word in param_names:
result = result.replace("@@@", word, 1)
results.append(result)
return results
# keep going until EOF
try:
while True:
snippets = PHRASES.keys()
#print debug + "3"
random.shuffle(snippets)
for snippet in snippets:
#print debug + "4"
phrase = PHRASES[snippet]
question, answer = convert(snippet, phrase)
if PHRASE_FIRST:
question, answer = answer, question
print question
raw_input("> ")
print "ANSWER: %s\n\n" % answer
except EOFError:
print "\nBye"
|
from collections import OrderedDict
from .. import Provider as CompanyProvider
class Provider(CompanyProvider):
formats = OrderedDict(
(
("{{company_limited_prefix}}{{last_name}} {{company_limited_suffix}}", 0.2),
(
"{{company_limited_prefix}}{{last_name}}{{company_suffix}} {{company_limited_suffix}}",
0.2,
),
("{{company_limited_prefix}}{{last_name}} {{company_limited_suffix}}", 0.2),
("{{company_prefix}}{{last_name}}", 0.2),
("{{company_prefix}}{{last_name}}{{company_suffix}}", 0.2),
("{{last_name}}{{company_suffix}}", 0.1),
("{{nonprofit_prefix}}{{last_name}}", 0.1),
("{{last_name}}-{{last_name}}", 0.05),
("{{last_name}}และ{{last_name}}", 0.05),
("{{company_limited_prefix}}{{last_name}}", 0.01),
)
)
company_prefixes = OrderedDict(
(
("ห้างหุ้นส่วนจำกัด ", 0.3),
("หจก.", 0.2),
("บจก.", 0.1),
("บมจ.", 0.1),
("ห้างหุ้นส่วนสามัญ ", 0.1),
("หสน.", 0.01),
)
)
nonprofit_prefixes = OrderedDict(
(
("สมาคม", 0.4),
("มูลนิธิ", 0.3),
("ชมรม", 0.2),
("สหภาพแรงงาน", 0.1),
)
)
company_suffixes = (
"และเพื่อน",
"และบุตร",
"แอนด์ซันส์",
"กรุ๊ป",
"การช่าง",
"ก่อสร้าง",
"บริการ",
"เซอร์วิส",
"กลการ",
"ซัพพลาย",
"คอมมิวนิเคชั่น",
"พืชผล",
"เอเยนซี",
"เอ็นจิเนียริ่ง",
"คอนสตรัคชั่น",
"วิศวกรรม",
"วิศวการ",
"คอมพิวเตอร์",
"พานิช",
"ขนส่ง",
"เฟอนิชชิ่ง",
"เฟอร์นิเจอร์",
"อุตสาหกรรม",
"เอนเตอรไพรส์",
| "จิวเวลรี่",
"อะไหล่ยนต์",
"ภาพยนตร์",
"ยานยนต์",
"เทรดดิ้ง",
"การค้า",
"แลบ",
"เคมิคอล",
"อิมปอร์ตเอ็กซปอร์ต",
"อินเตอร์เนชั่นแนล",
"บรรจุภัณฑ์",
"แพคกิ้ง",
| "มอเตอร์",
"โอสถ",
"การบัญชี",
"สโตร์",
)
company_limited_prefixes = OrderedDict(
(
("บริษัท ", 0.95),
("ธนาคาร", 0.03),
("บริษัทหลักทรัพย์ ", 0.005),
("กองทุนรวม", 0.005),
)
)
company_limited_suffixes = OrderedDict(
(
("จำกัด", 0.85),
("จำกัด (มหาชน)", 0.15),
)
)
def company_prefix(self) -> str:
"""
:example: 'ห้างหุ้นส่วนจำกัด'
"""
return self.random_element(self.company_prefixes)
def company_limited_prefix(self) -> str:
"""
:example: 'บริษัท'
"""
return self.random_element(self.company_limited_prefixes)
def company_limited_suffix(self) -> str:
"""
:example: 'จำกัด'
"""
return self.random_element(self.company_limited_suffixes)
def nonprofit_prefix(self) -> str:
"""
:example: 'มูลนิธิ'
"""
return self.random_element(self.nonprofit_prefixes)
|
"""Support for raspihats board binary sensors."""
from __future__ import annotations
import logging
imp | ort voluptuous as vol
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity
from homeassistant.const import (
CONF_ADDRESS,
CONF_DEVICE_CLASS,
CONF_NAME,
DEVICE_DEFAULT_NAME,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant | .helpers.typing import ConfigType, DiscoveryInfoType
from . import (
CONF_BOARD,
CONF_CHANNELS,
CONF_I2C_HATS,
CONF_INDEX,
CONF_INVERT_LOGIC,
DOMAIN,
I2C_HAT_NAMES,
I2C_HATS_MANAGER,
I2CHatsException,
I2CHatsManager,
)
_LOGGER = logging.getLogger(__name__)
DEFAULT_INVERT_LOGIC = False
DEFAULT_DEVICE_CLASS = None
_CHANNELS_SCHEMA = vol.Schema(
[
{
vol.Required(CONF_INDEX): cv.positive_int,
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean,
vol.Optional(CONF_DEVICE_CLASS, default=DEFAULT_DEVICE_CLASS): cv.string,
}
]
)
_I2C_HATS_SCHEMA = vol.Schema(
[
{
vol.Required(CONF_BOARD): vol.In(I2C_HAT_NAMES),
vol.Required(CONF_ADDRESS): vol.Coerce(int),
vol.Required(CONF_CHANNELS): _CHANNELS_SCHEMA,
}
]
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_I2C_HATS): _I2C_HATS_SCHEMA}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the raspihats binary_sensor devices."""
I2CHatBinarySensor.I2C_HATS_MANAGER = hass.data[DOMAIN][I2C_HATS_MANAGER]
binary_sensors = []
i2c_hat_configs = config.get(CONF_I2C_HATS, [])
for i2c_hat_config in i2c_hat_configs:
address = i2c_hat_config[CONF_ADDRESS]
board = i2c_hat_config[CONF_BOARD]
try:
assert I2CHatBinarySensor.I2C_HATS_MANAGER
I2CHatBinarySensor.I2C_HATS_MANAGER.register_board(board, address)
for channel_config in i2c_hat_config[CONF_CHANNELS]:
binary_sensors.append(
I2CHatBinarySensor(
address,
channel_config[CONF_INDEX],
channel_config[CONF_NAME],
channel_config[CONF_INVERT_LOGIC],
channel_config[CONF_DEVICE_CLASS],
)
)
except I2CHatsException as ex:
_LOGGER.error(
"Failed to register %s I2CHat@%s %s", board, hex(address), str(ex)
)
add_entities(binary_sensors)
class I2CHatBinarySensor(BinarySensorEntity):
"""Representation of a binary sensor that uses a I2C-HAT digital input."""
I2C_HATS_MANAGER: I2CHatsManager | None = None
def __init__(self, address, channel, name, invert_logic, device_class):
"""Initialize the raspihats sensor."""
self._address = address
self._channel = channel
self._name = name or DEVICE_DEFAULT_NAME
self._invert_logic = invert_logic
self._device_class = device_class
self._state = self.I2C_HATS_MANAGER.read_di(self._address, self._channel)
def online_callback():
"""Call fired when board is online."""
self.schedule_update_ha_state()
self.I2C_HATS_MANAGER.register_online_callback(
self._address, self._channel, online_callback
)
def edge_callback(state):
"""Read digital input state."""
self._state = state
self.schedule_update_ha_state()
self.I2C_HATS_MANAGER.register_di_callback(
self._address, self._channel, edge_callback
)
@property
def device_class(self):
"""Return the class of this sensor."""
return self._device_class
@property
def name(self):
"""Return the name of this sensor."""
return self._name
@property
def should_poll(self):
"""No polling needed for this sensor."""
return False
@property
def is_on(self):
"""Return the state of this sensor."""
return self._state != self._invert_logic
|
__author__ = 'alberto'
import time
from functools import wraps
from config import logger
def measure_time(func):
"""
D | ecorator that reports the execution time.
"""
@wraps(func)
def wrapper(*args, **kwargs):
logger.info("Running %s", func.__name__)
start = time.time()
result = func(*args, **kwargs)
end = time.time()
logger.info("Execution time: %s", end - start)
return result
| return wrapper |
import os
import sys
import time
import pickle
import threading
import multiprocessing
from concurrent.futures import ThreadPoolExecutor
# required to reserve changed attributes
from pandaserver.taskbuffer import JobSpec
from pandaserver.taskbuffer import FileSpec
JobSpec.reserveChangedState = True
FileSpec.reserveChangedState = True
from pandacommon.pandalogger.PandaLogger import PandaLogger
from pandacommon.pandalogger.LogWrapper import LogWrapper
_logger = PandaLogger().getLogger('TaskBufferInterface')
# method class
class TaskBufferMethod:
def __init__(self,methodName,commDict,childlock,comLock,resLock):
self.methodName = methodName
self.childlock = childlock
self.commDict = commDict
self.comLock = comLock
self.resLock = resLock
def __call__(self,*args,**kwargs):
log = LogWrapper(_logger, 'pid={} thr={} {}'.format(os.getpid(),
threading.current_thread().ident,
self.methodName))
log.debug('start')
# get lock among children
i = self.childlock.get()
# make dict to send it master
self.commDict[i].update({'methodName': self.methodName,
'args': pickle.dumps(args),
'kw | args': pickle.dumps(kwargs)})
# send notification to master
self.comLock[i].release()
# wait response
self.resLock[i].acquire()
res = pickle.loads(self.commDict[i]['res'])
statusCode = self.commDict[i]['stat']
# release | lock to children
self.childlock.put(i)
log.debug('end')
# return
if statusCode == 0:
return res
else:
errtype,errvalue = res
raise RuntimeError("{0}: {1} {2}".format(self.methodName,errtype.__name__,errvalue))
# child class
class TaskBufferInterfaceChild:
# constructor
def __init__(self,commDict,childlock,comLock,resLock):
self.childlock = childlock
self.commDict = commDict
self.comLock = comLock
self.resLock = resLock
# method emulation
def __getattr__(self,attrName):
return TaskBufferMethod(attrName,self.commDict,self.childlock,
self.comLock,self.resLock)
# master class
class TaskBufferInterface:
# constructor
def __init__(self):
# make manager to create shared objects
self.manager = multiprocessing.Manager()
# main loop
def run(self, taskBuffer, commDict, comLock, resLock, to_stop):
with ThreadPoolExecutor(max_workers=taskBuffer.get_num_connections()) as pool:
[pool.submit(self.thread_run, taskBuffer, commDict[i], comLock[i], resLock[i], to_stop) for i in commDict.keys()]
# main loop
def thread_run(self, taskBuffer, commDict, comLock, resLock, to_stop):
# main loop
while True:
# stop sign
if to_stop.value:
break
# wait for command
if not comLock.acquire(timeout=0.25):
continue
try:
# get command from child
methodName = commDict['methodName']
args = pickle.loads(commDict['args'])
kwargs = pickle.loads(commDict['kwargs'])
# execute
method = getattr(taskBuffer,methodName)
res = method(*args, **kwargs)
commDict['stat'] = 0
# set response
commDict['res'] = pickle.dumps(res)
except Exception:
res = sys.exc_info()[:2]
commDict['stat'] = 1
commDict['res'] = pickle.dumps(res)
# send response
resLock.release()
# launcher
def launch(self, taskBuffer):
# shared objects
self.childlock = multiprocessing.Queue()
self.commDict = dict()
self.comLock = dict()
self.resLock = dict()
self.to_stop = multiprocessing.Value('i', 0)
for i in range(taskBuffer.get_num_connections()):
self.childlock.put(i)
self.commDict[i] = self.manager.dict()
self.comLock[i] = multiprocessing.Semaphore(0)
self.resLock[i] = multiprocessing.Semaphore(0)
# run
self.process = multiprocessing.Process(target=self.run,
args=(taskBuffer,
self.commDict, self.comLock,
self.resLock, self.to_stop))
self.process.start()
# get interface for child
def getInterface(self):
return TaskBufferInterfaceChild(self.commDict, self.childlock, self.comLock, self.resLock)
# stop the loop
def stop(self):
with self.to_stop.get_lock():
self.to_stop.value = 1
while self.process.is_alive():
time.sleep(1)
# kill
def terminate(self):
self.process.terminate()
|
# coding=utf-8
# This file is part of SickRage.
#
# URL: https://sickrage.github.io
# Git: https://github.com/SickRage/SickRage.git
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from datetime import datetime
from feedparser.util import FeedParserDict
from hachoir_parser import creat | eParser
import sickbeard
from sickbeard import logger
from sickbeard.classes import Proper, TorrentSearchResult
from sickbeard.common import Quality
from sickbeard.db import DBConnection
from sickrage.helper.common import try_int
from sickrage.helper.exceptions import ex
from sickrage.providers.GenericProvider import GenericProvider
from sickrage.show.Show import Show
class TorrentProvider(GenericProvider):
def __init__(self, name):
GenericProvider.__init__(self, name)
self.ratio = None
self.provider_type = GenericProvider.TORRENT
def find_propers(self, search_date=None):
results = []
db = DBConnection()
placeholder = ','.join([str(x) for x in Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_BEST])
sql_results = db.select(
'SELECT s.show_name, e.showid, e.season, e.episode, e.status, e.airdate'
' FROM tv_episodes AS e'
' INNER JOIN tv_shows AS s ON (e.showid = s.indexer_id)'
' WHERE e.airdate >= ' + str(search_date.toordinal()) +
' AND e.status IN (' + placeholder + ') and e.is_proper = 0'
)
for result in sql_results or []:
show = Show.find(sickbeard.showList, int(result[b'showid']))
if show:
episode = show.getEpisode(result[b'season'], result[b'episode'])
for term in self.proper_strings:
search_strings = self._get_episode_search_strings(episode, add_string=term)
for item in self.search(search_strings[0]):
title, url = self._get_title_and_url(item)
results.append(Proper(title, url, datetime.today(), show))
return results
def is_active(self):
return bool(sickbeard.USE_TORRENTS) and self.is_enabled()
@property
def _custom_trackers(self):
if not (sickbeard.TRACKERS_LIST and self.public):
return ''
return '&tr=' + '&tr='.join({x.strip() for x in sickbeard.TRACKERS_LIST.split(',') if x.strip()})
def _get_result(self, episodes):
return TorrentSearchResult(episodes)
def _get_size(self, item):
if isinstance(item, dict):
size = item.get('size', -1)
elif isinstance(item, (list, tuple)) and len(item) > 2:
size = item[2]
else:
size = -1
# Make sure we didn't select seeds/leechers by accident
if not size or size < 1024 * 1024:
size = -1
return try_int(size, -1)
def _get_storage_dir(self):
return sickbeard.TORRENT_DIR
def _get_title_and_url(self, item):
if isinstance(item, (dict, FeedParserDict)):
download_url = item.get('url', '')
title = item.get('title', '')
if not download_url:
download_url = item.get('link', '')
elif isinstance(item, (list, tuple)) and len(item) > 1:
download_url = item[1]
title = item[0]
else:
download_url = ''
title = ''
if title.endswith('DIAMOND'):
logger.log('Skipping DIAMOND release for mass fake releases.')
download_url = title = 'FAKERELEASE'
if download_url:
download_url = download_url.replace('&', '&')
if title:
title = title.replace(' ', '.')
return title, download_url
def _verify_download(self, file_name=None):
try:
parser = createParser(file_name)
if parser:
# pylint: disable=protected-access
# Access to a protected member of a client class
mime_type = parser._getMimeType()
try:
parser.stream._input.close()
except Exception:
pass
if mime_type == 'application/x-bittorrent':
return True
except Exception as e:
logger.log('Failed to validate torrent file: {0}'.format(ex(e)), logger.DEBUG)
logger.log('Result is not a valid torrent file', logger.DEBUG)
return False
def seed_ratio(self):
return self.ratio
|
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "Licens | e");
# you may not use this file except in compliance with the License.
# You may obtain a copy | of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common interfaces and implementation."""
import abc
import collections
import six
def _fuss(tuplified_metadata):
return tuplified_metadata + (
(
'grpc.metadata_added_by_runtime',
'gRPC is allowed to add metadata in transmission and does so.',
),
)
FUSSED_EMPTY_METADATA = _fuss(())
def fuss_with_metadata(metadata):
if metadata is None:
return FUSSED_EMPTY_METADATA
else:
return _fuss(tuple(metadata))
def rpc_names(service_descriptors):
rpc_names_to_descriptors = {}
for service_descriptor in service_descriptors:
for method_descriptor in service_descriptor.methods_by_name.values():
rpc_name = '/{}/{}'.format(
service_descriptor.full_name, method_descriptor.name)
rpc_names_to_descriptors[rpc_name] = method_descriptor
return rpc_names_to_descriptors
class ChannelRpcRead(
collections.namedtuple(
'ChannelRpcRead',
('response', 'trailing_metadata', 'code', 'details',))):
pass
class ChannelRpcHandler(six.with_metaclass(abc.ABCMeta)):
@abc.abstractmethod
def initial_metadata(self):
raise NotImplementedError()
@abc.abstractmethod
def add_request(self, request):
raise NotImplementedError()
@abc.abstractmethod
def close_requests(self):
raise NotImplementedError()
@abc.abstractmethod
def take_response(self):
raise NotImplementedError()
@abc.abstractmethod
def cancel(self, code, details):
raise NotImplementedError()
@abc.abstractmethod
def termination(self):
raise NotImplementedError()
@abc.abstractmethod
def is_active(self):
raise NotImplementedError()
@abc.abstractmethod
def time_remaining(self):
raise NotImplementedError()
@abc.abstractmethod
def add_callback(self, callback):
raise NotImplementedError()
class ChannelHandler(six.with_metaclass(abc.ABCMeta)):
@abc.abstractmethod
def invoke_rpc(
self, method_full_rpc_name, invocation_metadata, requests,
requests_closed, timeout):
raise NotImplementedError()
class ServerRpcRead(
collections.namedtuple('ServerRpcRead',
('request', 'requests_closed', 'terminated',))):
pass
REQUESTS_CLOSED = ServerRpcRead(None, True, False)
TERMINATED = ServerRpcRead(None, False, True)
class ServerRpcHandler(six.with_metaclass(abc.ABCMeta)):
@abc.abstractmethod
def send_initial_metadata(self, initial_metadata):
raise NotImplementedError()
@abc.abstractmethod
def take_request(self):
raise NotImplementedError()
@abc.abstractmethod
def add_response(self, response):
raise NotImplementedError()
@abc.abstractmethod
def send_termination(self, trailing_metadata, code, details):
raise NotImplementedError()
@abc.abstractmethod
def add_termination_callback(self, callback):
raise NotImplementedError()
class Serverish(six.with_metaclass(abc.ABCMeta)):
@abc.abstractmethod
def invoke_unary_unary(
self, method_descriptor, handler, invocation_metadata, request,
deadline):
raise NotImplementedError()
@abc.abstractmethod
def invoke_unary_stream(
self, method_descriptor, handler, invocation_metadata, request,
deadline):
raise NotImplementedError()
@abc.abstractmethod
def invoke_stream_unary(
self, method_descriptor, handler, invocation_metadata, deadline):
raise NotImplementedError()
@abc.abstractmethod
def invoke_stream_stream(
self, method_descriptor, handler, invocation_metadata, deadline):
raise NotImplementedError()
|
rs/src/main.rs")
PATH_LIBS_RS = join_path("data/sample-rs/src/libs.rs")
PATH_CODEACTION = join_path("data/sample-ts/src/codeAction.ts")
print(PATH_MAIN_RS)
def assertRetry(predicate, retry_max=100):
retry_delay = 0.1
retry_count = 0
while retry_count < retry_max:
if predicate():
return
else:
retry_count += 1
time.sleep(retry_delay)
assert predicate()
def getLanguageClientBuffers(nvim):
return [b for b in nvim.buffers if b.name.endswith("__LCNHover__")]
@pytest.fixture(scope="module")
def nvim() -> neovim.Nvim:
nvim = neovim.attach("socket", path=NVIM_LISTEN_ADDRESS)
time.sleep(1)
return nvim
@pytest.fixture(autouse=True)
def setup(nvim):
nvim.command("%bdelete!")
def test_textDocument_definition(nvim):
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(10)
nvim.funcs.cursor(3, 22)
nvim.funcs.LanguageClient_textDocument_definition()
time.sleep(3)
assert nvim.current.window.cursor == [8, 3]
def test_textDocument_hover(nvim):
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
nvim.funcs.cursor(3, 22)
nvim.funcs.LanguageClient_textDocument_hover()
time.sleep(1)
buf = getLanguageClientBuffers(nvim)[0]
expect = "fn greet() -> i32"
assert expect in "\n".join(buf)
def test_textDocument_rename(nvim):
nvim.command("edit! {}".format( | PATH_MAIN_RS))
time.sleep(1)
expect = [line.replace("greet", "hello") for line in nvim.current.buffer]
nvim.funcs.cursor(3, | 22)
nvim.funcs.LanguageClient_textDocument_rename({"newName": "hello"})
time.sleep(1)
assert nvim.current.buffer[:] == expect
nvim.command("edit! {}".format(PATH_MAIN_RS))
def test_textDocument_rename_multiple_oneline(nvim):
nvim.command("edit! {}".format(PATH_LIBS_RS))
time.sleep(1)
expect = [line.replace("a", "x") for line in nvim.current.buffer]
nvim.funcs.cursor(4, 13)
# TODO: Test case where new variable length is different.
nvim.funcs.LanguageClient_textDocument_rename({"newName": "x"})
time.sleep(1)
assert nvim.current.buffer[:] == expect
nvim.command("bd!")
nvim.command("edit! {}".format(PATH_MAIN_RS))
def test_textDocument_rename_multiple_files(nvim):
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
nvim.funcs.cursor(17, 5)
expect = [line.replace("yo", "hello") for line in nvim.current.buffer]
nvim.funcs.LanguageClient_textDocument_rename({"newName": "hello"})
time.sleep(1)
assert nvim.current.buffer[:] == expect
nvim.command("bd!")
nvim.command("bd!")
nvim.command("edit! {}".format(PATH_MAIN_RS))
def test_textDocument_documentSymbol(nvim):
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
nvim.funcs.cursor(1, 1)
nvim.funcs.LanguageClient_textDocument_documentSymbol()
time.sleep(1)
assert nvim.funcs.getloclist(0)
nvim.command("3lnext")
assert nvim.current.window.cursor != [1, 1]
def test_workspace_symbol(nvim):
nvim.command("edit! {}".format(PATH_LIBS_RS))
time.sleep(1)
nvim.funcs.cursor(1, 1)
nvim.funcs.LanguageClient_workspace_symbol()
time.sleep(1)
assert nvim.funcs.getloclist(0)
nvim.command("1lnext")
assert nvim.current.window.cursor == [8, 0]
def test_textDocument_references(nvim):
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
nvim.funcs.cursor(8, 6)
nvim.funcs.LanguageClient_textDocument_references()
time.sleep(1)
expect = ["fn greet() -> i32 {", """println!("{}", greet());"""]
assert [location["text"]
for location in nvim.funcs.getloclist(0)] == expect
nvim.command("lnext")
assert nvim.current.window.cursor == [3, 19]
def test_textDocument_references_modified_buffer(nvim):
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
nvim.funcs.cursor(8, 6)
nvim.input("iabc")
time.sleep(1)
nvim.funcs.LanguageClient_textDocument_references()
time.sleep(1)
assert nvim.current.window.cursor == [8, 3]
nvim.command("edit! {}".format(PATH_MAIN_RS))
def test_languageClient_registerServerCommands(nvim):
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
nvim.command('let g:responses = []')
nvim.command("call LanguageClient_registerServerCommands("
"{'bash': ['bash']}, g:responses)")
time.sleep(1)
assert nvim.vars['responses'][0]['result'] is None
def test_languageClient_registerHandlers(nvim):
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
nvim.command('let g:responses = []')
nvim.command("call LanguageClient_registerHandlers("
"{'window/progress': 'HandleWindowProgress'}, g:responses)")
time.sleep(1)
assert nvim.vars['responses'][0]['result'] is None
# def test_languageClient_textDocument_codeAction(nvim):
# nvim.command("edit {}".format(PATH_CODEACTION))
# nvim.funcs.cursor(4, 14)
# assertRetry(lambda: len(nvim.funcs.getqflist()) == 1)
# nvim.funcs.LanguageClient_textDocument_codeAction()
# # Wait for fzf window showup.
# assertRetry(lambda:
# next((b for b in nvim.buffers
# if b.name.startswith('term://')), None) is not None)
# time.sleep(0.2)
# nvim.eval('feedkeys("\<CR>")')
# # Wait for fzf window dismiss.
# assertRetry(lambda:
# next((b for b in nvim.buffers
# if b.name.startswith('term://')), None) is None)
# assertRetry(lambda: len(nvim.funcs.getqflist()) == 0)
def _open_float_window(nvim):
nvim.funcs.cursor(3, 22)
pos = nvim.funcs.getpos('.')
nvim.funcs.LanguageClient_textDocument_hover()
time.sleep(1)
return pos
def test_textDocument_hover_float_window_closed_on_cursor_moved(nvim):
if not nvim.funcs.exists("*nvim_open_win"):
pytest.skip("Neovim 0.3.0 or earlier does not support floating window")
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
buf = nvim.current.buffer
pos = _open_float_window(nvim)
float_buf = getLanguageClientBuffers(nvim)[0]
# Check if float window is open
float_winnr = nvim.funcs.bufwinnr(float_buf.number)
assert float_winnr > 0
# Check if cursor is not moved
assert buf.number == nvim.current.buffer.number
assert pos == nvim.funcs.getpos(".")
# Move cursor to left
nvim.funcs.cursor(13, 17)
# Check float window buffer was closed by CursorMoved
assert len(getLanguageClientBuffers(nvim)) == 0
def test_textDocument_hover_float_window_closed_on_entering_window(nvim):
if not nvim.funcs.exists("*nvim_open_win"):
pytest.skip("Neovim 0.3.0 or earlier does not support floating window")
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
win_id = nvim.funcs.win_getid()
nvim.command("split")
try:
assert win_id != nvim.funcs.win_getid()
_open_float_window(nvim)
assert win_id != nvim.funcs.win_getid()
# Move to another window
nvim.funcs.win_gotoid(win_id)
assert win_id == nvim.funcs.win_getid()
# Check float window buffer was closed by BufEnter
assert len(getLanguageClientBuffers(nvim)) == 0
finally:
nvim.command("close!")
def test_textDocument_hover_float_window_closed_on_switching_to_buffer(nvim):
if not nvim.funcs.exists("*nvim_open_win"):
pytest.skip("Neovim 0.3.0 or earlier does not support floating window")
# Create a new buffer
nvim.command("enew!")
another_bufnr = nvim.current.buffer.number
try:
nvim.command("edit! {}".format(PATH_MAIN_RS))
time.sleep(1)
source_bufnr = nvim.current.buffer.number
_open_float_window(nvim)
float_buf = getLanguageClientBuffers(nvim)[0]
float_winnr = nvim.funcs.bufwinnr(float_buf.number)
assert float_winnr > 0
assert nvim.current.buffer.number == source_bufnr
# Move to another buffer within the same window
nvim.command("buffer {}".format(another_bufnr))
|
# -*- coding: utf-8 -*-
from __future__ imp | ort unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('applicants', '0010_auto_20151126_0525'),
]
operations = [
migrations.AddField(
model_name='applicant',
name='number_of_mi | ssed_calls',
field=models.IntegerField(default=0),
),
]
|
"""
Author: Ali Hajimirza (ali@alihm.net)
Copyright Ali Hajimirza, free for use under MIT license.
"""
import csv
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from algorithm import EM
import argparse
def line_plot(data_arrays, xlabel, ylabel, labels, title, f):
"""
Plots a scatter chart.
Parameters
----------
data_arrays: 2d numpy array
Data to be plotted. This array consists of matrices of real values to be plotted.
Each row of this matrix will be plotted as a line on the graph.
xlabel: list of string
The list of categories on for the x axis labels. The length of this list should be equal to the
columns of the data_arrays.
yl | abel: string
The label on the y axis.
labels: list of string |
The labels for each category.
title: string
The title of the graph. Will be used as the name of the graph file.
dest: string, optional
Path to the directory to save the image
Returns
-------
None:
Saves the plot to the disk.
"""
plt.suptitle(title, fontsize=14)
plots = []
for data in data_arrays:
plot, = plt.plot(data)
plots.append(plot)
plt.legend(plots, labels, loc=2)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.savefig(f, format="png")
plt.clf()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Divides data into the categories by ')
parser.add_argument('data', type=argparse.FileType("rb"), help='CSV file of data input')
args = parser.parse_args()
# reading the file
with args.data as csvfile:
reader = csv.reader(csvfile)
input_list = np.array(map(lambda line: np.array(map(lambda i: float(i), line)), reader))
x_list = input_list[:,0]
e_matrix = input_list[:,1:]
mean_matrix = EM.simulate_E_M(x_list, e_matrix, 100)
line_plot(mean_matrix, 'step', 'mean', ['Distribution 1','Distribution 2','Distribution 3'], 'E-M Learning' ,'sample_result.png' )
|
#!/usr/bin/env python
# Copyright 2019 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
# [START storage_remove_bucket_default_owner]
from google.cloud import storage
def remove_bucket_default_owner(bucket_name, user_email):
"""Removes a user from the access control list of the given bucket's
default object access control list."""
# bucket_name = "your-bucket-name"
# user_email = "name@example.com"
storage_client = storage.Client()
b | ucket = storage_client.bucket(bucket_name)
# Reload fetches the current ACL from Cloud Storage.
bucket.acl.reload()
# You can also use `group`, `domain`, `all_authenticated` and `all` to
# remove access for different types of entities.
bucket.default_object_acl.user(user_email).revoke_read()
bucket.default_object_acl.user(user_email).revoke_write()
bucket.default_object_acl.user(user_email).revoke_owner()
bucket.default_object_acl.save()
p | rint(
"Removed user {} from the default acl of bucket {}.".format(
user_email, bucket_name
)
)
# [END storage_remove_bucket_default_owner]
if __name__ == "__main__":
remove_bucket_default_owner(
bucket_name=sys.argv[1], user_email=sys.argv[2]
)
|
#!/usr/bin/env python
import os.path
<<<<<<< HEAD
import re
import sys
=======
import sys
import gspread
>>>>>>> # This is a combination of 2 commits.
=======
<<<<<<< HEAD
import re
import sys
=======
import sys
import gspread
>>>>>>> # This is a combination of 2 commits.
>>>>>>> Update README.md
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
def read(filename):
return open(os.path.join(os.path.dirname(__file__), filename)).read()
description = 'Google Spreadsheets Python API'
long_description = """
{index}
License
-------
MIT
Download
========
"""
long_description = long_description.lstrip("\n").format(index=read('docs/index.txt'))
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
read('gspread/__init__.py'), re.MULTILINE).group(1)
setup(
name='gspread',
packages=['gspread'],
description=description,
long_description=long_description,
version=version,
author='Anton Burnashev',
author_email='fuss.here@gmail.com',
url='https://github.com/burnash/gspread',
keywords=['spreadsheets', 'google-spreadsheets'],
install_requires=['requests>=2.2.1'],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Science/Research",
"Topic :: Office/Business :: Financial :: Spreadsheet",
| "Topic :: Software Develop | ment :: Libraries :: Python Modules"
],
license='MIT'
)
|
# Copyright 2014 Plexxi, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
class dataObject(object):
def __init__(self, data=None, version=0):
if data is None:
self.data = {}
else:
self.data = data
if version:
self.version = version
else:
self.version = int(bool(data))
def __str__(self):
return str(self.data)
class subData(object):
"""A piece of data that a data service is subscribed to.
Each data service in the cage can have its own instance of
this data; keep track of who published which instance.
"""
def __init__(self, key, dataindex, corrId, callback):
self.key = key
self.dataindex = dataindex
self.corrId = corrId
self.callback = callback
self.dataObjects = {}
# LOG.info(
# "*****New subdata: %s, %s, %s",
# key, dataindex, id(self.dataObjects))
def getSources(self):
return self.dataObjects.keys()
def update(self, sender, newdata):
self.dataObjects[sender] = newdata
def version(self, sender):
version = 0
if sender in self.dataObjects:
version = self.dataObjects[sender].version
return version
def getData(self, sender):
result = dataObject()
if sender in self.dataObjects:
LOG.info("subdata object: %s", self.dataObjects[sender])
result = self.dataObjects[sender]
return result
def getAllData(self):
result = {}
for sender in self.dataObjects:
result[sender] = self.dataObjects[sender]
return result
class pubData(object):
"""A piece of data that a data service is publishing.
Keep track of those data services that are subscribed.
"""
def __init__(self, dataindex, args={}):
self.dataindex = dataindex
self.dataObject = dataObject()
self.subscribers = {}
self.requesters = {}
self.args = args
def update(self, newdata):
version = self.dataObject.version + 1
self.dataObject = dataObject(newdata, version)
def get(self):
ret | urn self.dataObject
def version(self):
return self.dataObject.version
def addsubscriber(self, sender, type, corrId):
if sender not in self.subscribers:
self.subscribers[sender] = {}
self.subscribers[sender]['type'] = type
self.subscribers[sender]['correlationId'] = corrId
def removesubscriber(self, sender):
if sender in s | elf.subscribers:
del self.subscribers[sender]
def getsubscribers(self, sender=""):
if sender:
if sender in self.subscribers:
return self.subscribers[sender]
else:
return []
else:
return self.subscribers
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
)
class YouJizzIE(InfoExtractor):
_VALID_URL = r'https?://(?:\w+\.)?youjizz\.com/videos/[^/#?]+-(?P<id>[0-9]+)\.html(?:$|[?#])'
_TEST = {
'url': 'http://www.youjizz.com/videos/zeichentrick-1-2189178.html',
'md5': '07e15fa469ba384c7693fd246905547c',
'info_dict': {
'id': '2189178',
'ext': 'flv',
'title': 'Zeichentrick 1',
'age_limit': 18,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
age_limit = self._rta_search(webpage)
video_title = self._html_search_regex(
r'<title>\s*(.*)\s*</title>', webpage, 'title')
embed_page_url = self._search_regex(
r'(https?://www.youjizz.com/videos/embed/[0-9]+)',
webpage, 'embed page')
webpage = self._download_webpage(
embed_page_url, video_id, note='downloading embed page')
# Get the video URL
m_playlist = re.search(r'so.addVariable\("playlist", ?"(?P<playlist>.+?)"\);', webpage)
if m_playlist is not None:
playlist_url = m_playlist.group('playlist')
playlist_page = | self._download_webpage(playlist_url, video_id,
'Downloading playlist page')
m_levels = list(re.finditer(r'<level bitrate="(\d+?)" file="(.*?)"', playlist_page))
if len(m_levels) == 0:
raise ExtractorError('Unable to extract video url')
videos = [(int(m.group(1)), m.group(2)) for m in m_le | vels]
(_, video_url) = sorted(videos)[0]
video_url = video_url.replace('%252F', '%2F')
else:
video_url = self._search_regex(r'so.addVariable\("file",encodeURIComponent\("(?P<source>[^"]+)"\)\);',
webpage, 'video URL')
return {
'id': video_id,
'url': video_url,
'title': video_title,
'ext': 'flv',
'format': 'flv',
'player_url': embed_page_url,
'age_limit': age_limit,
}
|
# -*- coding: | utf8 -*-
#
# Copyright (C) 2014 NDP Sy | stèmes (<http://www.ndp-systemes.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
#
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import test_purchase_group_by_period
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ibtokin.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's i | nstalled and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_comma | nd_line(sys.argv)
|
# coding: latin1
## Copyright 2003-2007 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
## License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, write to the Free Software Foundation,
## Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from lino.misc.tsttools import TestCase, main
from lino.apps.contacts.contacts_demo import startup
from lino.apps.contacts.contacts_tables import *
from lino.adamo.filters import NotEmpty
#from lino.apps.addrbook import demo
#from lino.apps.addrbook.tables i | mport Partner
class Case(TestCase):
def test01(self):
db = startup()
s1 = ''
q = db.query(Contact,\
"name street city.name",
orderBy="name")
q.addColFilter('city',NotEmpty)
## for row in q:
| ## #print row[0]
## s1 += str(row[0]) + " "
## s1 += str(row[1]) + " "
## s1 += str(row[2]) + "\n"
## #print s1
## self.assertEqual(s1,"""\
## Arens None Eupen
## Ausdemwald None Aachen
## Bodard None Verviers
## Eesti Telefon Sõpruse pst. Tallinn
## Eierschal None Eupen
## Eierschal None Eupen
## Freitag None Eupen
## Girf OÜ Laki Tallinn
## Großmann None Eupen
## PAC Systems PGmbH Hütte Eupen
## Rumma & Ko OÜ Tartu mnt. Tallinn
## Saffre None Tallinn
## """)
s2 = ''
for row in q:
s2 += unicode(row.name) + " "
if row.street is not None:
s2 += unicode(row.street) + " "
s2 += unicode(row.city.name) + "\n"
#print s2
self.assertEquivalent(s2,u"""\
Andreas Arens Eupen
Anton Ausdemwald Aachen
Emil Eierschal Eupen
Erna Eierschal Eupen
Frédéric Freitag Eupen
Gerd Großmann Eupen
Hans Flott Bierstraße München
Henri Bodard Verviers
Kati Kask Tallinn
Kurtz & Büntig Bergstraße Eupen
Mets & puu OÜ Tartu mnt. Tallinn
Reisebüro Freitag Hütte Eupen
Tõnu Tamm Tallinn
""")
# some other cases (for example 80.py) would fail if run
# together with this case in one suite and if the following
# lines were not:
db.shutdown()
if __name__ == '__main__':
main()
|
r:
# Python 2
from StringIO import StringIO as BytesIO
from string import Template
try:
import urllib.request as urllib2
except ImportError:
# Python 2
import urllib2
import urllib
try:
from PIL import Image
except ImportError:
# On some systems, PIL.Image is known as Image.
import Image
import ModestMaps
from ModestMaps.Core import Point, Coordinate
from . import Geography
# This import should happen inside getProviderByName(), but when testing
# on Mac OS X features are missing from output. Wierd-ass C libraries...
try:
from . import Vector
except ImportError:
pass
# Already deprecated; provided for temporary backward-compatibility with
# old location of Mapnik provider. TODO: remove in next major version.
try:
from .Mapnik import ImageProvider as Mapnik
except ImportError:
pass
def getProviderByName(name):
""" Retrieve a provider object by name.
Raise an exception if the name doesn't work out.
"""
if name.lower() == 'mapnik':
from . import Mapnik
return Mapnik.ImageProvider
elif name.lower() == 'proxy':
return Proxy
elif name.lower() == 'url template':
return UrlTemplate
elif name.lower() == 'vector':
from . import Vector
return Vector.Provider
elif name.lower() == 'mbtiles':
from . import MBTiles
return MBTiles.Provider
elif name.lower() == 'mapnik grid':
from . import Mapnik
return Mapnik.GridProvider
elif name.lower() == 'sandwich':
from . import Sandwich
return Sandwich.Provider
raise Exception('Unknown provider name: "%s"' % name)
class Verbatim:
''' Wrapper for PIL.Image that saves raw input bytes if modes and formats match.
'''
def __init__(self, bytes):
self.buffer = BytesIO(bytes)
self.format = None
self._image = None
#
# Guess image format based on magic number, if possible.
# http://www.astro.keele.ac.uk/oldusers/rno/Computing/File_magic.html
#
magic = {
'\x89\x50\x4e\x47': 'PNG',
'\xff\xd8\xff\xe0': 'JPEG',
'\x47\x49\x46\x38': 'GIF',
'\x47\x49\x46\x38': 'GIF',
'\x4d\x4d\x00\x2a': 'TIFF',
'\x49\x49\x2a\x00': 'TIFF'
}
if bytes[:4] in magic:
self.format = magic[bytes[:4]]
else:
self.format = self.image().format
def image(self):
''' Return a guaranteed instance of PIL.Image.
'''
if self._image is None:
self._image = Image.open(self.buffer)
return self._image
def convert(self, mode):
if mode == self.image().mode:
return self
else:
return self.image().convert(mode)
def crop(self, bbox):
return self.image().crop(bbox)
def save(self, output, format):
if format == self.format:
output.write(s | elf.buffer.getvalue())
else:
self.image().save(output, format)
class Proxy:
" | "" Proxy provider, to pass through and cache tiles from other places.
This provider is identified by the name "proxy" in the TileStache config.
Additional arguments:
- url (optional)
URL template for remote tiles, for example:
"http://tile.openstreetmap.org/{Z}/{X}/{Y}.png"
- provider (optional)
Provider name string from Modest Maps built-ins.
See ModestMaps.builtinProviders.keys() for a list.
Example: "OPENSTREETMAP".
- timeout (optional)
Defines a timeout in seconds for the request.
If not defined, the global default timeout setting will be used.
Either url or provider is required. When both are present, url wins.
Example configuration:
{
"name": "proxy",
"url": "http://tile.openstreetmap.org/{Z}/{X}/{Y}.png"
}
"""
def __init__(self, layer, url=None, provider_name=None, timeout=None):
""" Initialize Proxy provider with layer and url.
"""
if url:
self.provider = ModestMaps.Providers.TemplatedMercatorProvider(url)
elif provider_name:
if provider_name in ModestMaps.builtinProviders:
self.provider = ModestMaps.builtinProviders[provider_name]()
else:
raise Exception('Unkown Modest Maps provider: "%s"' % provider_name)
else:
raise Exception('Missing required url or provider parameter to Proxy provider')
self.timeout = timeout
@staticmethod
def prepareKeywordArgs(config_dict):
""" Convert configured parameters to keyword args for __init__().
"""
kwargs = dict()
if 'url' in config_dict:
kwargs['url'] = config_dict['url']
if 'provider' in config_dict:
kwargs['provider_name'] = config_dict['provider']
if 'timeout' in config_dict:
kwargs['timeout'] = config_dict['timeout']
return kwargs
def renderTile(self, width, height, srs, coord):
"""
"""
img = None
urls = self.provider.getTileUrls(coord)
# Tell urllib2 get proxies if set in the environment variables <protocol>_proxy
# see: https://docs.python.org/2/library/urllib2.html#urllib2.ProxyHandler
proxy_support = urllib2.ProxyHandler()
url_opener = urllib2.build_opener(proxy_support)
for url in urls:
body = url_opener.open(url, timeout=self.timeout).read()
tile = Verbatim(body)
if len(urls) == 1:
#
# if there is only one URL, don't bother
# with PIL's non-Porter-Duff alpha channeling.
#
return tile
elif img is None:
#
# for many URLs, paste them to a new image.
#
img = Image.new('RGBA', (width, height))
img.paste(tile, (0, 0), tile)
return img
class UrlTemplate:
""" Built-in URL Template provider. Proxies map images from WMS servers.
This provider is identified by the name "url template" in the TileStache config.
Additional arguments:
- template (required)
String with substitutions suitable for use in string.Template.
- referer (optional)
String to use in the "Referer" header when making HTTP requests.
- source projection (optional)
Projection to transform coordinates into before making request
- timeout (optional)
Defines a timeout in seconds for the request.
If not defined, the global default timeout setting will be used.
More on string substitutions:
- http://docs.python.org/library/string.html#template-strings
"""
def __init__(self, layer, template, referer=None, source_projection=None,
timeout=None):
""" Initialize a UrlTemplate provider with layer and template string.
http://docs.python.org/library/string.html#template-strings
"""
self.layer = layer
self.template = Template(template)
self.referer = referer
self.source_projection = source_projection
self.timeout = timeout
@staticmethod
def prepareKeywordArgs(config_dict):
""" Convert configured parameters to keyword args for __init__().
"""
kwargs = {'template': config_dict['template']}
if 'referer' in config_dict:
kwargs['referer'] = config_dict['referer']
if 'source projection' in config_dict:
kwargs['source_projection'] = Geography.getProjectionByName(config_dict['source projection'])
if 'timeout' in config_dict:
kwargs['timeout'] = config_dict['timeout']
return kwargs
def renderArea(self, width, height, srs, xmin, ymin, xmax, ymax, zoom):
""" Re |
=None,
rheader=self.rheader)
else:
r.error(415, current.ERROR.BAD_FORMAT)
return output
# -------------------------------------------------------------------------
@classmethod
def write(cls,
repository_id=None,
resource_name=None,
transmission=None,
mode=None,
action=None,
result=None,
remote=False,
message=None):
"""
Writes a new entry to the log
@param repository_id: the repository record ID
@param resource_name: the resource name
@param transmission: transmission mode (IN, OUT or None)
@param mode: synchronization mode (PULL, PUSH or None)
@param action: action that triggers the log entry (if any)
@param result: the result of the transaction
(SUCCESS, WARNING, ERROR or FATAL)
@param remote: boolean, True if this is a remote error
@param message: clear text message
"""
if result not in (cls.SUCCESS, cls.WARNING, cls.ERROR, cls.FATAL):
result = cls.SUCCESS
if result == cls.SUCCESS:
# Can't be a remote error if it's not an error at all
remote = False
if transmission not in (cls.IN, cls.OUT):
transmission = cls.NONE
if mode not in (cls.PULL, cls.PUSH, cls.LOGIN, cls.REGISTER):
mode = cls.NONE
if not action:
action = cls.NONE
entry = {"timestmp": datetime.datetime.utcnow(),
"repository_id": repository_id,
"resource_name": resource_name,
"mode": "%s/%s" % (mode, transmission),
"action": action,
"result": result,
"remote": remote,
"message": message,
}
current.s3db[cls.TABLENAME].insert(**entry)
# -------------------------------------------------------------------------
@staticmethod
def rheader(r, **attr):
""" S3SyncLog resource header """
if r.id is None:
return DIV(current.T("Showing latest entries first"))
else:
return None
# =============================================================================
class S3SyncRepository(object):
""" Class representation a peer repository """
def __init__(self, repository):
"""
Constructor
@param repository: the repository record (Row)
"""
# Logger and Config
self.log = S3SyncLog
self._config = None
# Identifier and name
self.id = repository.id
self.name = repository.name
# API type and import/export backend
self.apitype = repository.apitype
self.backend = repository.backend
# URL / Path
self.url = repository.url
self.path = repository.path
# Authentication
self.username = repository.username
self.password = repository.password
self.client_id = repository.client_id
self.client_secret = repository.client_secret
self.site_key = repository.site_key
self.refresh_token = repository.refresh_token
# Network
self.proxy = repository.proxy
# Processing Options
self.accept_push = repository.accept_push
self.synchronise_uuids = repository.synchronise_uuids
self.keep_source = repository.keep_source
# Instantiate Adapter
import sync_adapter
api = sync_adapter.__dict__.get(self.apitype)
if api:
adapter = api.S3SyncAdapter(self)
else:
adapter = S3SyncBaseAdapter(self)
self.adapter = adapter
# -------------------------------------------------------------------------
@property
def config(self):
""" Lazy access to the current sync config """
if self._config is None:
table = current.s3db.sync_config
row = current.db().select(table.ALL, limitby=(0, 1)).first()
| self._config = row
| return self._config
# -------------------------------------------------------------------------
def __getattr__(self, name):
"""
Delegate other attributes and methods to the adapter
@param name: the attribute/method
"""
return object.__getattribute__(self.adapter, name)
# =============================================================================
class S3SyncBaseAdapter(object):
"""
Sync Adapter (base class) - interface providing standard
synchronization methods for the respective repository type.
This class isn't meant to be instantiated or accessed directly,
but is normally accessed through the S3SyncRepository instance.
"""
def __init__(self, repository):
"""
Constructor
@param repository: the repository (S3Repository instance)
"""
self.repository = repository
self.log = repository.log
# -------------------------------------------------------------------------
# Methods to be implemented by subclasses:
# -------------------------------------------------------------------------
def register(self):
"""
Register this site at the peer repository
@return: True to indicate success, otherwise False
"""
raise NotImplementedError
# -------------------------------------------------------------------------
def login(self):
"""
Login at the peer repository
@return: None if successful, otherwise the error
"""
raise NotImplementedError
# -------------------------------------------------------------------------
def pull(self, task, onconflict=None):
"""
Fetch updates from the peer repository and import them
into the local database (active pull)
@param task: the synchronization task (sync_task Row)
@param onconflict: callback for automatic conflict resolution
@return: tuple (error, mtime), with error=None if successful,
else error=message, and mtime=modification timestamp
of the youngest record sent
"""
raise NotImplementedError
# -------------------------------------------------------------------------
def push(self, task):
"""
Extract new updates from the local database and send
them to the peer repository (active push)
@param task: the synchronization task (sync_task Row)
@return: tuple (error, mtime), with error=None if successful,
else error=message, and mtime=modification timestamp
of the youngest record sent
"""
raise NotImplementedError
# -------------------------------------------------------------------------
def send(self,
resource,
start=None,
limit=None,
msince=None,
filters=None,
mixed=False):
"""
Respond to an incoming pull from the peer repository
@param resource: the resource to be synchronized
@param start: index of the first record to send
@param limit: maximum number of records to send
@param msince: minimum modification date/time for records to send
@param filters: URL filters for record extraction
@param mixed: negotiate resource with peer (disregard resource)
@return: a dict {status, remote, message, response}, with:
- status....the outcome of the operation
- remote....whether the error was remote (or local)
- message...the log message
- response..the response to send to the peer
"""
raise NotImplementedError
# |
"""
A test module which has a required module and a config
"""
TYPE = "Test"
NAME = "test_2"
REQUIRES = ["test_1"]
DEFAULTCONF = {'a': 1, 'b': 2}
def check(conf=DEFAULTC | ONF):
if None in REQUIRES:
return False
return True
def scan(filelist, conf=DEFAULTCONF):
results = []
result1, meta1 = REQUIRES[0]
result1 = dict(result1)
for fname in filelist:
if fname in result1:
results.append((fname, True))
else:
results.append((fname, fname))
metadata = {}
metadata["Name"] = NAME
metadata["Type"] = TYPE
metadata["Include"] = True
return result | s, metadata
|
),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing Python, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import copy
import os
import re
import fileinput
import sys
from distutils.version import LooseVersion
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_libdir, get_software_libdir, get_software_root, get_software_version
from easybuild.tools.run import run_cmd
from easybuild.tools.systemtools import get_shared_lib_ext
EXTS_FILTER_PYTHON_PACKAGES = ('python -c "import %(ext_name)s"', "")
class EB_Python(ConfigureMake):
"""Support for building/installing Python
- default configure/build_step/make install works fine
To extend Python by adding extra packages there are two ways:
- list the packages in the exts_list, this will include the packages in this Python installation
- create a seperate easyblock, so the packages can be loaded with module load
e.g., you can include numpy and scipy in a default Python installation
but also provide newer updated numpy and scipy versions by creating a PythonPackage-derived easyblock for it.
"""
def prepare_for_extensions(self):
"""
Set default class and filter for Python packages
"""
# build and install additional packages with PythonPackage easyblock
self.cfg['exts_defaultclass'] = "PythonPackage"
self.cfg['exts_filter'] = EXTS_FILTER_PYTHON_PACKAGES
# don't pass down any build/install options that may have been specified
# 'make' options do not make sense for when building/installing Python libraries (usually via 'python setup.py')
msg = "Unsetting '%s' easyconfig parameter before building/installing extensions: %s"
for param in ['buildopts', 'installopts']:
if self.cfg[param]:
self.log.debug(msg, param, self.cfg[param])
self.cfg[param] = ''
def configure_step(self):
"""Set extra configure options."""
self.cfg.update('configopts', "--with-threads --enable-shared")
# Need to be careful to match the unicode settings to the underlying python
if sys.maxunicode == 1114111:
self.cfg.update('configopts', "--enable-unicode=ucs4")
elif sys.maxunicode == 65535:
self.cfg.update('configopts', "--enable-unicode=ucs2")
else:
raise EasyBuildError("Unknown maxunicode value for your python: %d" % sys.maxunicode)
modules_setup_dist = os.path.join(self.cfg['start_dir'], 'Modules', 'Setup.dist')
libreadline = get_software_root('libreadline')
if libreadline:
ncurses = get_software_root('ncurses')
if ncurses:
readline_libdir = get_software_libdir('libreadline')
ncurses_libdir = get_software_libdir('ncurses')
readline_static_lib = os.path.join(libreadline, readline_libdir, 'libreadline.a')
ncurses_static_lib = os.path.join(ncurses, ncurses_libdir, 'libncurses.a')
readline = "readline readline.c %s %s" % (readline_static_lib, ncurses_static_lib)
for line in fileinput.input(modules_setup_dist, inplace='1', backup='.readline'):
line = re.sub(r"^#readline readline.c.*", readline, line)
sys.stdout.write(line)
else:
raise EasyBuildError("Both libreadline and ncurses are required to ensure readline support")
openssl = get_software_root('OpenSSL')
if openssl:
for line in fileinput.input(modules_setup_dist, inplace='1', backup='.ssl'):
line = re.sub(r"^#SSL=.*", "SSL=%s" % openssl, line)
line = re.sub(r"^#(\s*-DUSE_SSL -I)", r"\1", line)
line = re.sub(r"^#(\s*-L\$\(SSL\)/lib )", r"\1 -L$(SSL)/lib64 ", line)
sys.stdout.write(line)
tcl = get_software_root('Tcl')
tk = get_software_root('Tk')
if tcl and tk:
tclver = get_software_version('Tcl')
tkver = get_software_version('Tk')
tcltk_maj_min_ver = '.'.join(tclver.split('.')[:2])
if tcltk_maj_min_ver != '.'.join(tkver.split('.')[:2]):
raise EasyBuildError("Tcl and Tk major/minor versions don't match: %s vs %s", tclver, tkver)
self.cfg.update('configopts', "--with-tcltk-includes='-I%s/include -I%s/include'" % (tcl, tk))
tcl_libdir = | os.path.join(tcl, get_software_libdir('Tcl'))
tk_libdir = os.path.join(tk, get_software_libdir('Tk'))
tcltk_libs = "-L%(tcl_libdir)s -L%(tk_libdir)s -ltcl%(maj_min_ver)s -ltk%(maj_min_ver)s" % {
'tcl_libdir': tcl_libdir,
'tk_libdir': tk_libdir,
'maj_min_ver': tcltk_maj_min_ver,
}
self.cfg.update('configopts', "--with-tcltk-libs='%s'" % t | cltk_libs)
super(EB_Python, self).configure_step()
def install_step(self):
"""Extend make install to make sure that the 'python' command is present."""
super(EB_Python, self).install_step()
python_binary_path = os.path.join(self.installdir, 'bin', 'python')
if not os.path.isfile(python_binary_path):
pythonver = '.'.join(self.version.split('.')[0:2])
srcbin = "%s%s" % (python_binary_path, pythonver)
try:
os.symlink(srcbin, python_binary_path)
except OSError, err:
raise EasyBuildError("Failed to symlink %s to %s: %s", srcbin, python_binary_path, err)
def sanity_check_step(self):
"""Custom sanity check for Python."""
pyver = "python%s" % '.'.join(self.version.split('.')[0:2])
try:
fake_mod_data = self.load_fake_module()
except EasyBuildError, err:
raise EasyBuildError("Loading fake module failed: %s", err)
abiflags = ''
if LooseVersion(self.version) >= LooseVersion("3"):
run_cmd("which python", log_all=True, simple=False)
cmd = 'python -c "import sysconfig; print(sysconfig.get_config_var(\'abiflags\'));"'
(abiflags, _) = run_cmd(cmd, log_all=True, simple=False)
if not abiflags:
raise EasyBuildError("Failed to determine abiflags: %s", abiflags)
else:
abiflags = abiflags.strip()
custom_paths = {
'files': ["bin/%s" % pyver, "lib/lib%s%s.%s" % (pyver, abiflags, get_shared_lib_ext())],
'dirs': ["include/%s%s" % (pyver, abiflags), "lib/%s" % pyver],
}
# cleanup
self.clean_up_fake_module(fake_mod_data)
custom_commands = [
('python', '--version'),
('python', '-c "import _ctypes"'), # make sure that foreign function interface (libffi) works
('python', '-c "import _ssl"'), # make sure SSL support is enabled one way or another
('python', '-c "import readline"'), # make sur |
t uuid
import subprocess
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.staticfiles.finders import find as find_static_path
from olympia.lib.jingo_minify_helpers import ensure_path_exists
def run_command(command):
"""Run a command and correctly poll the output and write that to stdout"""
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
while True:
output = process.stdout.readline()
if output == '' and process.poll() is not None:
break
if output:
print(output.strip())
return process.poll()
class Command(BaseCommand):
help = ('Compresses css and js assets defined in settings.MINIFY_BUNDLES')
# This command must not do any system checks because Django runs db-field
# related checks since 1.10 which require a working MySQL connection.
# We don't have that during our docker builds and since `compress_assets`
# is being used while building our docker images we have to disable them.
requires_system_checks = False
checked_hash = {}
bundle_hashes = {}
missing_files = 0
minify_skipped = 0
def add_arguments(self, parser):
"""Handle command arguments."""
parser.add_argument(
'force', action='store_true',
help='Ignores modified/created dates and forces compression.')
def generate_build_id(self):
return uuid.uuid4().hex[:8]
def update_hashes(self):
# Adds a time based hash on to the build id.
self.build_id = '%s-%s' % (
self.generate_build_id(), hex(int(time.time()))[2:])
build_id_file = os.path.realpath(
os.path.join(settings.ROOT, 'build.py'))
with open(build_id_file, 'w') as f:
f.write('BUILD_ID_CSS = "%s"\n' % self.build_id)
f.write('BUILD_ID_JS = "%s"\n' % self.build_id)
f.write('BUILD_ID_IMG = "%s"\n' % self.build_id)
f.write('BUNDLE_HASHES = %s\n' % self.bundle_hashes)
def handle(self, **options):
self.force_compress = options.get('force', False)
# This will loop through every bundle, and do the following:
# - Concat all files into one
# - Cache bust all images in CSS files
# - Minify the concatted files
for ftype, bundle in settings.MINIFY_BUNDLES.iteritems():
for name, files in bundle.iteritems():
# Set the paths to the files.
concatted_file = os.path.join(
settings.ROOT, 'static',
ftype, '%s-all.%s' % (name, ftype,))
compressed_file = os.path.join(
settings.ROOT, 'static',
ftype, '%s-min.%s' % (name, ftype,))
ensure_path_exists(concatted_file)
ensure_path_exists(compressed_file)
files_all = []
for fn in files:
processed = self._preprocess_file(fn)
# If the file can't be processed, we skip it.
if processed is not None:
files_all.append(processed)
# Concat all the files.
tmp_concatted = '%s.tmp' % concatted_file
if len(files_all) == 0:
raise CommandError(
'No input files specified in '
'MINIFY_BUNDLES["%s"]["%s"] in settings.py!' %
(ftype, name)
)
run_command('cat {files} > {tmp}'.format(
files=' '.join(files_all),
tmp=tmp_concatted
))
# Cache bust individual images in the CSS.
if ftype == 'css':
bundle_hash = self._cachebust(tmp_concatted, name)
self.bundle_hashes['%s:%s' % (ftype, name)] = bundle_hash
# Compresses the concatenations.
is_changed = self._is_changed(concatted_file)
self._clean_tmp(concatted_file)
if is_changed or not os.path.isfile(compressed_file):
self._minify(ftype, concatted_file, compressed_file)
else:
print(
'File unchanged, skipping minification of %s' % (
concatted_file))
self.minify_skipped += 1
# Write out the hashes
self.update_hashes()
if self.minify_skipped:
print(
'Unchanged files skipped for minification: %s' % (
self.minify_skipped))
def _preprocess_file(self, filename):
"""Preprocess files and return new filenames."""
css_bin = filename.endswith('.less') and settings.LESS_BIN
source = find_static_path(filename)
target = source
if css_bin:
target = '%s.css' % source
run_command('{lessc} {source} {target}'.format(
lessc=css_bin,
source=str(source),
target=str(target)))
return target
def _is_changed(self, concatted_file):
"""Check if the file has been changed."""
if self.force_compress:
return True
tmp_concatted = '%s.tmp' % concatted_file
file_exists = (
os.path.exists(concatted_file) and
os.path.getsize(concatted_file) == os.path.getsize(tmp_concatted))
if file_exists:
orig_hash = self._file_hash(concatted_file)
temp_hash = self._file_hash(tmp_concatted)
return orig_hash != temp_hash
return True # Different filesize, so it was definitely changed
def _clean_tmp(self, concatted_file):
"""Replace the old file with the temp file."""
tmp_concatted = '%s.tmp' % concatted_file
if os.path.exists(concatted_file):
os.remove(concatted_file)
os.rename(tmp_concatted, concatted_file)
def _cachebust(self, css_file, bundle_name):
"""Cache bust images. Return a new bundle hash."""
self.stdout.write(
'Cache busting images in %s\n' % re.sub('.tmp$', '', css_file))
if not os.path.exists(css_file):
return
css_content = ''
with open(css_file, 'r') as css_in:
css_content = css_in.read()
def _parse(url):
return self._cachebust_regex(url, css_file)
css_parsed = re.sub('url\(([^)]*?)\)', _parse, css_content)
with open(css_file, 'w') as css_out:
css_out.write(css_parsed)
# Return bundle hash for cachebusting JS/CSS files.
file_hash = hashlib.md5(css_parsed).hexdigest()[0:7]
self.checked_hash[css_file] = file_hash
if self.missing_files:
self.stdout.write(
' - Error finding %s images\n' % (self.missing_files,))
self.missing_files = 0
return file_hash
def _minify(self, ftype, file_in, file_out):
"""Run the proper minifier on the file."""
if ftype == 'js' and hasattr(settings, 'UGLIFY_BIN'):
opts = {'method': 'UglifyJS', 'bin': settings.UGLIFY_BIN}
run_command('{uglify} -v -o {target} {source} -m'.format(
uglify=opts['bin'],
target=file_out,
source=file_in))
elif ftype = | = 'css' and hasattr(settings, 'CLEANCSS_BIN'):
opts = {'method': 'clean-css', 'bin': settings.CLEANCSS_BIN}
run_command('{cleancss} -o {target} {source}'.format(
cleancss=opts['bin'],
target=file_out,
source=file_in))
self.stdout.writ | e(
'Minifying %s (using %s)\n' % (file_in, opts['method']))
def _file_hash(self, url):
"""Open the file and get a hash of it."""
if url in self.checked_hash:
return self.checked_hash[url]
file_hash = ''
try:
with open(url) as f:
file_hash = hashlib.md5(f.read()).hexdigest()[0:7]
ex |
import re
class UnknowItem(Exception):
pass
KEYWORDS = ("and", "as", "assert", "break", "class", "continue", "def", "del", "elif", "else", "except", "exec", "finally", "for", "from", "global", "if", "import", "in", "is", "lambda", "not", "or", "pass", "print", "raise", "return", "try", "while", "with", "yield")
TOKENS = (
(r'[a-zA-Z_]\w*', 'NAME'),
(r'0', 'INT'),
(r'[-+]?\d+[eE][-+]?\d+[jJ]', 'FLOAT_EXPONANT_COMPLEX'),
(r'[-+]?\d+.\d?[eE][-+]?\d+[jJ]', 'FLOAT_EXPONANT_COMPLEX'),
(r'[-+]?\d?.\d+[eE][-+]?\d+[jJ]', 'FLOAT_EXPONANT_COMPLEX'),
(r'\d+[eE][-+]?\d*', 'FLOAT_EXPONANT'),
(r'\d+\.\d*[eE][-+]?\d*', 'FLOAT_EXPONANT'),
(r'\.\d+[eE][-+]?\d*', 'FLOAT_EXPONANT'),
(r'\d*\.\d+[jJ]', 'COMPLEX'),
(r'\d+\.[jJ]', 'COMPLEX'),
(r'\d+[jJ]', 'COMPLEX'),
(r'\d+\.', 'FLOAT'),
(r'\d*\.\d+[lL]?', 'FLOAT'),
(r'\d+\.\d*[lL]?', 'FLOAT'),
(r'\.', 'DOT'),
(r'[1-9]+\d*[lL]', 'LONG'),
(r'[1-9]+\d*', 'INT'),
(r'0[xX][\da-fA-F]+[lL]?', 'HEXA'),
(r'(0[oO][0-7]+)|(0[0-7]*)[lL]?', 'OCTA'),
(r'0[bB][01]+[lL]?', 'BINARY'),
(r'\(', 'LEFT_PARENTHESIS'),
(r'\)', 'RIGHT_PARENTHESIS'),
(r':', 'COLON'),
(r',', 'COMMA'),
(r';', 'SEMICOLON'),
(r'@', 'AT'),
(r'\+', 'PLUS'),
( | r'-', 'MINUS'),
(r'\*', 'STAR'),
(r'/', 'SLASH'),
(r'\|', 'VBAR'),
(r'&', 'AMPER'),
(r'<', 'LESS'),
(r'>', 'GREATER'),
(r'=', 'EQUAL'),
| (r'%', 'PERCENT'),
(r'\[', 'LEFT_SQUARE_BRACKET'),
(r'\]', 'RIGHT_SQUARE_BRACKET'),
(r'\{', 'LEFT_BRACKET'),
(r'\}', 'RIGHT_BRACKET'),
(r'`', 'BACKQUOTE'),
(r'==', 'EQUAL_EQUAL'),
(r'<>', 'NOT_EQUAL'),
(r'!=', 'NOT_EQUAL'),
(r'<=', 'LESS_EQUAL'),
(r'>=', 'GREATER_EQUAL'),
(r'~', 'TILDE'),
(r'\^', 'CIRCUMFLEX'),
(r'<<', 'LEFT_SHIFT'),
(r'>>', 'RIGHT_SHIFT'),
(r'\*\*', 'DOUBLE_STAR'),
(r'\+=', 'PLUS_EQUAL'),
(r'-=', 'MINUS_EQUAL'),
(r'\*=', 'STAR_EQUAL'),
(r'/=', 'SLASH_EQUAL'),
(r'%=', 'PERCENT_EQUAL'),
(r'&=', 'AMPER_EQUAL'),
(r'\|=', 'VBAR_EQUAL'),
(r'\^=', 'CIRCUMFLEX_EQUAL'),
(r'<<=', 'LEFT_SHIFT_EQUAL'),
(r'>>=', 'RIGHT_SHIFT_EQUAL'),
(r'\*\*=', 'DOUBLE_STAR_EQUAL'),
(r'//', 'DOUBLE_SLASH'),
(r'//=', 'DOUBLE_SLASH_EQUAL'),
(r'\n', 'ENDL'),
(r'\r\n', 'ENDL'),
(r'#.*', 'COMMENT'),
(r'(\s|\\\n|\\\r\n)+', 'SPACE'),
(r'["\'](.|\n|\r)*["\']', 'STRING'),
(r'[uU]["\'](.|\n|\r)*["\']', 'UNICODE_STRING'),
(r'[rR]["\'](.|\n|\r)*["\']', 'RAW_STRING'),
(r'[bB]["\'](.|\n|\r)*["\']', 'BINARY_STRING'),
(r'[uU][rR]["\'](.|\n|\r)*["\']', 'UNICODE_RAW_STRING'),
(r'[bB][rR]["\'](.|\n|\r)*["\']', 'BINARY_RAW_STRING'),
)
TOKENS = [(re.compile('^' + x[0] + '$'), x[1]) for x in TOKENS]
def tokenize(sequence, print_function=False):
return list(tokenize_generator(sequence, print_function))
def tokenize_current_keywords(print_function=False):
if print_function is True:
return [x for x in KEYWORDS if x != "print"]
else:
return KEYWORDS
def tokenize_generator(sequence, print_function=False):
current_keywords = tokenize_current_keywords()
for item in sequence:
if item in current_keywords:
yield (item.upper(), item)
continue
for candidate, token_name in TOKENS:
if candidate.match(item):
yield (token_name, item)
break
else:
raise UnknowItem("Can't find a matching token for this item: '%s'" % item)
yield ('ENDMARKER', '')
yield
|
import os
import sys
from github import Github
from github.GithubException import GithubException
def tag_to_tag():
SRC_TAG=os.environ.get('SRC_TAG')
ORG_NAME=os.environ.get('ORG_NAME')
REPO_NAME=os.environ.get('REPO_NAME')
USERNAME=os.environ.get('USERNAME')
PASSWORD=os.environ.get('PASSWORD')
TAG=os.environ.get('TAG')
print 'Attempting to create tag %s from tag %s' % (TAG, SRC_TAG)
g = Github(USERNAME,PASSWORD)
org = g.get_organization(ORG_NAME)
repo = org.get_repo(REPO_NAME)
# Get the source tag by name, error if none found
src_tag = None
for tag in repo.get_tags():
print tag.name
if tag.name == SRC_TAG:
src_tag = tag
break
if not src_tag:
print 'No tag named %s found' % SRC_TAG
exit(1)
tag = repo.create_git_tag(TAG, 'Created from tag %s' % SRC_TAG, src_tag.commit.sha, 'commit')
print 'Tag Created:'
print tag._rawData
# Could not figure out how to look up the existing but decided against it
# anyhow as Jenkins shouldn't be rewriting git tags automatically. If a tag
# needs to be overwritten, it must first be manually deleted
# Delete the existing ref
#existing_ref = repo.get_git_ref('tag/%s' % TAG)
#if existing_ref:
# print 'Existing ref found, deleting it to | set new one'
# existing_ref.delete()
ref = repo.create_git_ref('refs/tags/%s' % TAG, tag.sha)
print 'Ref Created:'
print ref._rawData
print 'SUCCESS'
if __name__ == '__main__':
try:
| tag_to_tag()
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
print '-'*60
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stdout)
print '-'*60
sys.exit(1)
|
#!/usr/bin/python
################
# The MIT License (MIT)
#
# Copyright (c) <2013> <Martin de Bruyn>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
############################################################
#----------------------------------------------------------------------#
"""@ package Input
Keep all inputs here.
"""
# System imports
import logging as log
import sys
# Panda imports
from direct.showbase.InputStateGlobal import inputState
from direct.showbase.DirectObje | ct import DirectObject
# MeoTech imports
#----------------------------------------------------------------------#
class InputHandler(DirectObject):
"""InputHandler.
Keyboard stuff
"""
def __init__(self, _game):
"""InputHandler INIT"""
# Game
s | elf.game = _game
# Keyboard
inputState.watchWithModifiers('forward', 'w')
inputState.watchWithModifiers('left', 'a')
inputState.watchWithModifiers('reverse', 's')
inputState.watchWithModifiers('right', 'd')
inputState.watchWithModifiers('turnLeft', 'q')
inputState.watchWithModifiers('turnRight', 'e')
inputState.watchWithModifiers('space', 'space')
#inputState.watchWithModifiers('ctrl', 'lcontrol_down')
self.accept("mouse1", self.shootLight)
# App exit temp
base.accept("escape", sys.exit)
# mouse
self.winXhalf = base.win.getXSize()/2
self.winYhalf = base.win.getYSize()/2
# Should move the camera stuff to the baseCamera.py
base.camera.reparentTo(self.game.meotech.engine.GameObjects["player"].bulletBody)
base.camLens.setFov(90)
base.camLens.setNear(0.5)
self.mouseSpeedX = 15
self.mouseSpeedY = 0.2
self.camP = 10
def shootLight(self):
print "shoot"
cone = self.game.player.flashlightConeBody
base.messenger.send("shootLight", [cone])
def getMouse(self, dt):
player = self.game.meotech.engine.GameObjects["player"]
flashlight = self.game.player.flashlightConeBody
flashlight_lamp = self.game.player.flashlight
flashlight_light = self.game.player.flashlightLight
# Handle mouse
md = base.win.getPointer(0)
x = md.getX()
y = md.getY()
if base.win.movePointer(0, self.winXhalf, self.winYhalf):
omega = (x - self.winXhalf)*-self.mouseSpeedX
player.bulletBody.node().setAngularMovement(omega)
#flashlight.setH(flashlight, base.camera.getH())
cam = base.cam.getP() - (y - self.winYhalf) * self.mouseSpeedY
flashlight.setHpr(base.cam.getHpr())
if cam <-80:
cam = -80
elif cam > 90:
cam = 90
base.cam.setP(cam)
flashlight.setP(cam + 90)
flashlight_lamp.setZ(flashlight.getZ() - 0.6)
flashlight_lamp.setY(flashlight.getY() - 0.55)
flashlight_light.setHpr(flashlight_lamp.find("LightPos").getHpr() + 90)
|
from rest_framework import generics
from rest_framework import permissions as drf_permissions
from rest_framework.exceptions import NotFound
from framework.auth.oauth_scopes import CoreScopes
from osf.models import (
Guid,
BaseFileNode,
FileVersion,
QuickFilesNode
)
from api.base.exceptions import Gone
from api.base.permissions import PermissionWithGetter
from api.base.throttling import CreateGuidThrottle, NonCookieAuthThrottle, UserRateThrottle
from api.base import utils
from api.base.views import JSONAPIBaseView
from api.base import permissions as base_permissions
from api.nodes.permissions import ContributorOrPublic
from api.nodes.permissions import ReadOnlyIfRegistration
from api.files.permissions import IsPreprintFile
from api.files.permissions import CheckedOutOrAdmin
from api.files.serializers import FileSerializer
from api.files.serializers import FileDetailSerializer, QuickFilesDetailSerializer
from api.files.serializers import FileVersionSerializer
class FileMixin(object):
"""Mixin with convenience methods for retrieving the current file based on the
current URL. By default, fetches the file based on the file_id kwarg.
"""
serializer_class = FileSerializer
file_lookup_url_kwarg = 'file_id'
def get_file(self, check_permissions=True):
try:
obj = utils.get_object_or_error(BaseFileNode, se | lf.kwargs[self.file_lookup_url_kwarg], self.request, display_name='file')
except NotFound:
obj = utils.get_object_or_error(Guid, self.kwargs[self.file_lookup_url_kwarg], self.request).referent
if obj.is_deleted:
raise Gone(detail='The requested file is no longer available.')
if not isinstance(obj, BaseFileNode):
raise NotFoun | d
if check_permissions:
# May raise a permission denied
self.check_object_permissions(self.request, obj)
return obj
class FileDetail(JSONAPIBaseView, generics.RetrieveUpdateAPIView, FileMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/files_detail).
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
IsPreprintFile,
CheckedOutOrAdmin,
base_permissions.TokenHasScope,
PermissionWithGetter(ContributorOrPublic, 'node'),
PermissionWithGetter(ReadOnlyIfRegistration, 'node'),
)
required_read_scopes = [CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NODE_FILE_WRITE]
serializer_class = FileDetailSerializer
throttle_classes = (CreateGuidThrottle, NonCookieAuthThrottle, UserRateThrottle, )
view_category = 'files'
view_name = 'file-detail'
def get_serializer_class(self):
try:
node = self.get_node()
except (NotFound, Gone):
return FileDetailSerializer
else:
if isinstance(node, QuickFilesNode):
return QuickFilesDetailSerializer
return FileDetailSerializer
def get_node(self):
return self.get_file().node
# overrides RetrieveAPIView
def get_object(self):
user = utils.get_user_auth(self.request).user
file = self.get_file()
if self.request.GET.get('create_guid', False):
# allows quickfiles to be given guids when another user wants a permanent link to it
if (self.get_node().has_permission(user, 'admin') and utils.has_admin_scope(self.request)) or file.node.is_quickfiles:
file.get_guid(create=True)
return file
class FileVersionsList(JSONAPIBaseView, generics.ListAPIView, FileMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/files_versions).
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
PermissionWithGetter(ContributorOrPublic, 'node'),
)
required_read_scopes = [CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NODE_FILE_WRITE]
serializer_class = FileVersionSerializer
view_category = 'files'
view_name = 'file-versions'
ordering = ('-modified',)
def get_queryset(self):
self.file = self.get_file()
return self.file.versions.all()
def get_serializer_context(self):
context = JSONAPIBaseView.get_serializer_context(self)
context['file'] = self.file
return context
def node_from_version(request, view, obj):
return view.get_file(check_permissions=False).node
class FileVersionDetail(JSONAPIBaseView, generics.RetrieveAPIView, FileMixin):
"""The documentation for this endpoint can be found [here](https://developer.osf.io/#operation/files_version_detail).
"""
version_lookup_url_kwarg = 'version_id'
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
PermissionWithGetter(ContributorOrPublic, node_from_version)
)
required_read_scopes = [CoreScopes.NODE_FILE_READ]
required_write_scopes = [CoreScopes.NODE_FILE_WRITE]
serializer_class = FileVersionSerializer
view_category = 'files'
view_name = 'version-detail'
# overrides RetrieveAPIView
def get_object(self):
self.file = self.get_file()
maybe_version = self.file.get_version(self.kwargs[self.version_lookup_url_kwarg])
# May raise a permission denied
# Kinda hacky but versions have no reference to node or file
self.check_object_permissions(self.request, file)
return utils.get_object_or_error(FileVersion, getattr(maybe_version, '_id', ''), self.request)
def get_serializer_context(self):
context = JSONAPIBaseView.get_serializer_context(self)
context['file'] = self.file
return context
|
#/usr/bin/env python
import os
import cv2
import sys
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
disp_n = 200
s_time = 3
radius = 3
thickness = 3
cls_color = (23, 119, 188)
colors = [
(0, 0, 255),
(0, 255, 0),
(255, 0, 0),
(23, 119, 188),
(222, 12, 39),
(122, 212, 139),
(20, 198, 68),
(111, 12, 139),
(131, 112, 179),
(31, 211, 79),
(131, 121, 179),
(31, 121, 192),
(192, 21, 92),
(192, 21, 192),
(216, 121, 92),
(16, 11, 62),
(16, 111, 162),
(96, 46, 12),
]
n_colors = len(colors)
def _mkdirs(path):
if not os.path.isdir(path):
os.makedirs(path)
# only one ground-truths for per image
def _read_gt(filepath):
'''format: imgidx objidx bbox cls'''
pd_dt = {}
pd_c = 0
fh = open(filepath)
for line in fh.readlines():
pd_c = pd_c + 1
line = line.strip()
info = line.split()
assert len(info) >= 1
imgidx, info = info[0], info[1:]
assert len(info) == 6
imgidx = imgidx.strip()
objidx = info[0].strip()
x1 = info[1].strip()
y1 = info[2].strip()
x2 = info[3].strip()
y2 = info[4].strip()
cls = info[5].strip()
objidx = int(objidx)
assert objidx == 0
x1 = int(x1)
y1 = int(y1)
x2 = int(x2)
y2 = int(y2)
if x1 > x2:
x1, x2 = x2, x1
if y1 > y2:
y1, y2 = y2, y1
pd_dt[imgidx] = [x1, y1, x2, | y2]
fh.close()
assert pd_c == len(pd_dt.keys())
return pd_dt
# multiple or one for prediction
def _read_pd(filepath, in_dire, is_in_dire=False):
'''f | ormat: imgidx score bbox cls'''
gt_dt = {}
gt_c = 0
fh = open(filepath)
imgidxs = []
for line in fh.readlines():
gt_c = gt_c + 1
line = line.strip()
info = line.split()
assert len(info) >= 1
im_path, info = info[0], info[1:]
assert len(info) == 6
im_path = im_path.strip()
score = info[0].strip()
x1 = info[1].strip()
y1 = info[2].strip()
x2 = info[3].strip()
y2 = info[4].strip()
cls = info[5].strip()
if is_in_dire:
im_name = im_path[len(in_dire):]
else:
im_name = os.path.basename(im_path)
imgidx = im_name.strip().rsplit(".", 1)[0]
imgidx = imgidx.strip()
if imgidx in imgidxs:
print imgidx, line
imgidxs.append(imgidx)
score = float(score)
x1 = int(x1)
y1 = int(y1)
x2 = int(x2)
y2 = int(y2)
if x1 > x2:
x1, x2 = x2, x1
if y1 > y2:
y1, y2 = y2, y1
gt_dt[imgidx] = [x1, y1, x2, y2]
fh.close()
print len(imgidxs)
print len(set(imgidxs))
assert gt_c == len(gt_dt.keys()), "gt_c: %s, n_keys: %s" \
% (gt_c, len(gt_dt.keys()))
return gt_dt
def _area(box):
assert len(box) == 4
w = box[2] - box[0] + 1
h = box[3] - box[1] + 1
a = w * h
assert a >= 0
return a
def _overlap(pd_box, gt_box):
pa = _area(pd_box)
ga = _area(gt_box)
x1 = max(pd_box[0], gt_box[0])
y1 = max(pd_box[1], gt_box[1])
x2 = min(pd_box[2], gt_box[2])
y2 = min(pd_box[3], gt_box[3])
if x1 > x2 or y1 > y2:
oa = 0
else:
oa = _area([x1, y1, x2, y2])
return oa / (pa + ga - oa + 0.0)
def _iou(pd_file, gt_file, in_dire, is_in_dire=False):
''''''
pd_dt = _read_pd(pd_file, in_dire, is_in_dire=is_in_dire)
gt_dt = _read_gt(gt_file)
assert len(pd_dt.keys()) == len(gt_dt.keys())
imgidxs = pd_dt.keys()
imgidxs.sort()
disp_c = 0
ovs = []
for imgidx in imgidxs:
disp_c += 1
if disp_c % disp_n == 0:
print "disp_c:", disp_c
pd_box = pd_dt[imgidx]
gt_box = gt_dt[imgidx]
ov = _overlap(pd_box, gt_box)
ovs.append(ov)
if disp_c % disp_n != 0:
print "disp_c:", disp_c
print "\n\nDone.\n\n"
return ovs
def _recall(ovs, thresolds):
n_ovs = len(ovs) # n_examples
n_thres = len(thresolds)
precision = np.zeros(n_thres) # np.zeros((n_thres,), dtype=np.int)
recall = np.zeros(n_thres) # np.zeros((n_thres,), dtype=np.int)
print recall.shape
for j in xrange(n_thres):
acc_c = 0
thres = thresolds[j]
for j2 in xrange(n_ovs):
ov = ovs[j2]
if ov > thres:
acc_c += 1
acc_c = acc_c / (n_ovs + 0.)
precision[j] = acc_c
recall[j] = acc_c
return recall
def _all_recall_pics(ovs_list, type_names, title, out_path=None, legend_loc="upper right"):
'''Plot Precision-Recall curve'''
plt.clf()
plt.grid(True)
plt.xlabel('IoU')
plt.ylabel('Recall')
# plt.ylim([0.0, 1.0])
# plt.xlim([0.5, 1.0])
n_dataset = len(ovs_list)
assert n_dataset == len(type_names)
thresolds = [j / 100.0 for j in xrange(50, 101, 1)]
for j in xrange(n_dataset):
ovs = ovs_list[j]
name = type_names[j]
recall = _recall(ovs, thresolds)
plt.plot(thresolds, recall, label=name)
plt.xticks(np.arange(0.50, 1.01, 0.05))
plt.yticks(np.arange(0.0, 1.01, 0.1))
plt.title(title)
plt.legend(loc=legend_loc)
plt.savefig(out_path)
if out_path is None:
plt.show()
else:
plt.savefig(out_path)
def torso_run():
''''''
ovs_list = []
type_names = []
out_path = "/pathTo/../res.pics/torso.recall.png"
## flic test
pd_file = "/pathTo/../dataset/FLIC/vision/flic_torso_test.txt"
gt_file = "/pathTo/../dataset/FLIC/labels/crop_test_torso_labels2.txt"
in_dire = "/pathTo/../dataset/FLIC/crop.images2/test/"
is_in_dire = False
type_names.append("FLIC Dataset")
ovs = _iou(pd_file, gt_file, in_dire, is_in_dire=is_in_dire)
ovs_list.append(ovs)
## bbc pose -> test & val
pd_file = "/pathTo/../dataset/bbc_pose/torso_masks/test_torso_results.txt"
gt_file = "/pathTo/../dataset/bbc_pose/labels/crop_test_torso.label"
in_dire = "/pathTo/../dataset/bbc_pose/crop.data/"
is_in_dire = True
type_names.append("BBC Pose Dataset")
ovs = _iou(pd_file, gt_file, in_dire, is_in_dire=is_in_dire)
ovs_list.append(ovs)
## kinect2
pd_file = "/pathTo/../dataset/Kinect2/torso_masks/test_torso_results.txt"
gt_file = "/pathTo/../dataset/Kinect2/labels/up.crop.color2_test_torso_l7.log"
in_dire = "/pathTo/../dataset/Kinect2/up.crop.color/"
is_in_dire = False
type_names.append("Kinect2 Dataset")
ovs = _iou(pd_file, gt_file, in_dire, is_in_dire=is_in_dire)
ovs_list.append(ovs)
# pic -> viz
title = 'Recall for Torso Detection'
_all_recall_pics(ovs_list, type_names, title, out_path=out_path)
def person_run():
''''''
ovs_list = []
type_names = []
out_path = "/pathTo/../res.pics/person.recall.png"
## bbc pose -> test & val
pd_file = "/pathTo/../dataset/bbc_pose/test_person_results.txt"
gt_file = "/pathTo/../dataset/bbc_pose/labels/pbbox_test_cls.txt"
in_dire = "/pathTo/../dataset/bbc_pose/data/"
is_in_dire = True
type_names.append("BBC Pose Dataset")
ovs = _iou(pd_file, gt_file, in_dire, is_in_dire=is_in_dire)
ovs_list.append(ovs)
## kinect2
pd_file = "/pathTo/../dataset/Kinect2/test_person_results.txt"
gt_file = "/pathTo/../dataset/Kinect2/labels/up.color2.pbbox.test.log"
in_dire = "/pathTo/../dataset/Kinect2/up.color/"
is_in_dire = False
type_names.append("Kinect2 Dataset")
ovs = _iou(pd_file, gt_file, in_dire, is_in_dire=is_in_dire)
ovs_list.append(ovs)
# pic -> viz
title = 'Recall for Person Detection'
_all_recall_pics(ovs_list, type_names, title, out_path=out_path, legend_loc="lower left")
if __name__ == '__main__':
''''''
# torso_run()
person_run() |
# based on https://github.com/pypa/sampleproject/blob/master/setup.py
# see http://packaging.python.org/en/latest/tutorial.html#creating-your-own-project
from setuptools import setup, find_packages
from setuptools.command.install import install as stdinstall
import codecs
import os
import re
import sys
def find_version(*file_paths):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, *file_paths), 'r', 'latin1') as f:
version_file = f.read()
# The version line must have the form
# __version__ = 'ver'
version_match = re.searc | h(r"^__version__ = ['\ | "]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
def get_file_contents(filename):
with codecs.open(filename, encoding='utf-8') as f:
contents = f.read()
return contents
package_name = "typecheck-decorator"
class install_with_test(stdinstall):
def run(self):
stdinstall.run(self) # normal install
##pip/setuptools makes this unbuffering unhelpful:
#sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 1) # make line-buffered
#sys.stderr = os.fdopen(sys.stderr.fileno(), 'w', 1) # make line-buffered
#import typecheck.test_typecheck_decorator # execute post-install test (during beta only)
setup(
# setup customization:
cmdclass={'install': install_with_test},
# basic information:
name=package_name,
version=find_version('typecheck', '__init__.py'),
description="flexible explicit run-time type checking of function arguments (Python3-only)",
long_description=get_file_contents("README.rst"),
# The project URL:
url='http://github.com/prechelt/' + package_name,
# Author details:
author='Dmitry Dvoinikov, Lutz Prechelt',
author_email='prechelt@inf.fu-berlin.de',
# Classification:
license='BSD License',
classifiers=[
'License :: OSI Approved :: BSD License',
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Documentation',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='type-checking',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages.
packages=find_packages(exclude=["contrib", "docs", "tests*"]),
# List run-time dependencies here. These will be installed by pip when your
# project is installed.
install_requires = ['typing;python_version<"3.5"'],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
# 'typecheck': ['package_data.dat'],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
###data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
### entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
) |
elay,
ACTION_WAIT_TEMPLATE: self._async_wait_template,
ACTION_CHECK_CONDITION: self._async_check_condition,
ACTION_FIRE_EVENT: self._async_fire_event,
ACTION_CALL_SERVICE: self._async_call_service,
}
@property
def is_running(self) -> bool:
"""Return true if script is on."""
return self._cur != -1
def run(self, variables=None, context=None):
"""Run script."""
run_coroutine_threadsafe(
self.async_run(variables, context), self.hass.loop).result()
async def async_run(self, variables: Optional[Sequence] = None,
context: Optional[Context] = None) -> None:
"""Run script.
This method is a coroutine.
"""
self.last_triggered = date_util.utcnow()
if self._cur == -1:
self._log('Running script')
self._cur = 0
# Unregister callback if we were in a delay or wait but turn on is
# called again. In that case we just continue execution.
self._async_remove_listener()
for cur, action in islice(enumerate(self.sequence), self._cur, None):
try:
await self._handle_action(action, variables, context)
except _SuspendScript:
# Store next step to take and notify change listeners
self._cur = cur + 1
if self._change_listener:
self.hass.async_add_job(self._change_listener)
return
except _StopScript:
break
except Exception:
# Store the step that had an exception
self._exception_step = cur
# Set script to not running
self._cur = -1
self.last_action = None
# Pass exception on.
raise
# Set script to not-running.
self._cur = -1
self.last_action = None
if self._change_listener:
self.hass.async_add_job(self._change_listener)
def stop(self) -> None:
"""Stop running script."""
run_callback_threadsafe(self.hass.loop, self.async_stop).result()
def async_stop(self) -> None:
"""Stop running script."""
if self._cur == -1:
return
self._cur = -1
self._async_remove_listener()
if self._change_listener:
self.hass.async_add_job(self._change_listener)
@callback
def async_log_exception(self, logger, message_base, exception):
"""Log an exception for this script.
Should only be called on exceptions raised by this scripts async_run.
"""
# pylint: disable=protected-access
step = self._exception_step
action = self.sequence[step]
action_type = _determine_action(action)
error = None
meth = logger.error
if isinstance(exception, vol.Invalid):
error_desc = "Invalid data"
elif isinstance(exception, exceptions.TemplateError):
error_desc = "Error rendering template"
elif isinstance(exception, exceptions.Unauthorized):
error_desc = "Unauthorized"
elif isinstance(exception, exceptions.ServiceNotFound):
error_desc = "Service not found"
else:
# Print the full stack trace, unknown error
error_desc = 'Unknown error'
meth = logger.exception
error = ""
if error is None:
error = str(exception)
meth("%s. %s for %s at pos %s: %s",
message_base, error_desc, action_type, step + 1, error)
async def _handle_action(self, action, variables, context):
"""Handle an action."""
await self._actions[_determine_action(action)](
action, variables, context)
async def _async_delay(self, action, variables, context):
"""Handle delay."""
# Call ourselves in the future to continue work
unsub = None
@callback
def async_script_delay(now):
"""Handle delay."""
# pylint: disable=cell-var-from-loop
with suppress(ValueError):
self._async_listener.remove(unsub)
self.hass.async_create_task(
self.async_run(variables, context))
delay = action[CONF_DELAY]
try:
if isinstance(delay, template.Template):
delay = vol.All(
cv.time_period,
cv.positive_timedelta)(
delay.async_render(variables))
elif isinstance(delay, dict):
delay_data = {}
delay_data.update(
template.render_complex(delay, variables))
delay = cv.time_period(delay_data)
except (exceptions.TemplateError, vol.Invalid) as ex:
_LOGGER.error("Error rendering '%s' delay template: %s",
self.name, ex)
raise _StopScript
self.last_action = action.get(
CONF_ALIAS, 'delay {}'.format(delay))
self._log("Executing step %s" % self.last_action)
unsub = async_track_point_in_utc_time(
self.hass, async_script_delay,
date_util.utcnow() + delay
)
self._async_listener.append(unsub)
raise _SuspendScript
async def _async_wait_template(self, action, variables, context):
"""Handle a wait template."""
# Call ourselves in the future to continue work
wait_template = action[CONF_WAIT_TEMPLATE]
wait_template.hass = self.hass
self.last_action = action.get(CONF_ALIAS, 'wait template')
self._log("Executing step %s" % self.last_action)
# check if condition already okay
if condition.async_template(
self.hass, wait_template, variables):
return
@callback
def async_script_wait(entity_id, from_s, to_s):
"""Handle script after template condition is true."""
self._async_remove_listener()
self.hass.async_create_task(
self.async_run(variables, context))
self._async_listener.append(async_track_template(
self.hass, wait_template, async_script_wait, variables))
if CONF_TIMEOUT in action:
self._async_set_timeout(
action, variables, context,
action.get(CONF_CONTINUE, True))
raise _SuspendScript
async def _async_call_serv | ice(self, action, variables, context):
"""Call the service specified in the action.
This method is a coroutine.
"""
self.last_action = action.get(CONF_ALIAS, 'call service')
self._log("Executing step %s" % self.last_action)
await service.async_call_from_config(
self.hass, action,
| blocking=True,
variables=variables,
validate_config=False,
context=context
)
async def _async_fire_event(self, action, variables, context):
"""Fire an event."""
self.last_action = action.get(CONF_ALIAS, action[CONF_EVENT])
self._log("Executing step %s" % self.last_action)
event_data = dict(action.get(CONF_EVENT_DATA, {}))
if CONF_EVENT_DATA_TEMPLATE in action:
try:
event_data.update(template.render_complex(
action[CONF_EVENT_DATA_TEMPLATE], variables))
except exceptions.TemplateError as ex:
_LOGGER.error('Error rendering event data template: %s', ex)
self.hass.bus.async_fire(action[CONF_EVENT],
event_data, context=context)
async def _async_check_condition(self, action, variables, context):
"""Test if condition is matching."""
config_cache_key = frozenset((k, str(v)) for k, v in action.items())
config = self._config_cache.get(config_cache_key)
if not config:
config = condition.async_from_config(action, False)
self._config_cache[config_cache_key] = config
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RRvcheck(RPackage):
"""Check latest release version of R and R package (both in 'CRAN',
'Bioconductor' or 'Github')."""
homepage = "https://cloud.r-project.org/package=rvcheck"
url = "https://cloud.r-project.org/src/contrib/rvcheck_0.0.9.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/rvcheck"
version('0.1.3', sha256='0b59986c1ccc5b89f8aca8fa7cf62d0b875719addb40e08dbda1791cfd334fc4')
version('0.0.9', sha256='6e7be7b029d28181a1b57ebd4d25978f3459722ffdb45a3698157a7 | f943bea92')
depends_on('r@3.3.0:', when='@:0.1.1', type=('build', 'run'))
depends_on('r@3. | 4.0:', when='@0.1.3:', type=('build', 'run'))
depends_on('r-rlang', when='@0.1.1:', type=('build', 'run'))
|
impor | t numpy as np
import tensorflow as tf
import dists
from misc import | *
|
# Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.distance._roberts.
Roberts similarity
"""
from typing import Any, Optional
from ._token_distance import _TokenDistance
from ..tokenizer import _Tokenizer
__all__ = ['Roberts']
class Roberts(_TokenDistance):
r"""Roberts similarity.
For two multisets X and Y drawn from an alphabet S, Roberts similarity
:cite:`Roberts:1986` is
.. math::
sim_{Roberts}(X, Y) =
\frac{\Big[\sum_{i \in S} (X_i + Y_i) \cdot
\frac{min(X_i, Y_i)}{max(X_i, Y_i)}\Big]}
{\sum_{i \in S} (X_i + Y_i)}
.. versionadded:: 0.4.0
"""
def __init__(
self, tokenizer: Optional[_Tokenizer] = None, **kwargs: Any
) -> None:
"""Initialize Roberts instance.
Parameters
----------
tokenizer : _Tokenizer
A tokenizer instance from the :py:mod:`abydos.tokenizer` package
**kwargs
Arbitrary keyword arguments
|
Other Parameters
----------------
qval : int
The length of each q-gram. Using this parameter and tokenizer=None
will cause the instance to use the QGram tokenizer with this
q value.
.. versionadded:: 0.4.0
"""
super(Roberts, self).__init__(tokenizer=tokenizer, **kwargs)
def sim(self, src: | str, tar: str) -> float:
"""Return the Roberts similarity of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
Returns
-------
float
Roberts similarity
Examples
--------
>>> cmp = Roberts()
>>> cmp.sim('cat', 'hat')
0.5
>>> cmp.sim('Niall', 'Neil')
0.36363636363636365
>>> cmp.sim('aluminum', 'Catalan')
0.11764705882352941
>>> cmp.sim('ATCG', 'TAGC')
0.0
.. versionadded:: 0.4.0
"""
if src == tar:
return 1.0
self._tokenize(src, tar)
alphabet = self._total().keys()
return sum(
(self._src_tokens[i] + self._tar_tokens[i])
* min(self._src_tokens[i], self._tar_tokens[i])
/ max(self._src_tokens[i], self._tar_tokens[i])
for i in alphabet
) / sum((self._src_tokens[i] + self._tar_tokens[i]) for i in alphabet)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
# subSystemBonusGallenteElectronic2TractorBeamVelocity
#
# Used by:
# Subsy | stem: Proteus Electronics - Emergent Locus Analyzer
type = "passive"
def handler(fit, module, context):
fit.modules.filteredItemBoost(lambda mod: mod.item.group.name == "Tractor Beam",
"maxTractorVelocity", module.getModifiedItemAttr("s | ubsystemBonusGallenteElectronic2"),
skill="Gallente Electronic Systems")
|
from django.db import models
from django.db.models.fields.files import FieldFile
from django.core.files import File
def get_video_dimensions(path):
from ffvideo import VideoStream
vs = VideoStream(path)
return (vs.frame_width, vs.frame_height)
class VideoFile(File):
"""
A mixin for use alongside django.core.files.base.File, which provides
additional features for dealing with images.
"""
def _get_width(self):
return self._get_video_dimensions()[0 | ]
width = property(_get_width)
def _get_height(self):
return self._get_video_dimensions()[1]
height = property(_get_height)
def _get_video_dimensions(self):
if not hasattr(self, '_dimensions_cache'):
close = self.closed
self.open()
self._di | mensions_cache = get_video_dimensions(self.path)
return self._dimensions_cache
# A video field is exactly a file field with a different signature
class VideoFieldFile(VideoFile, FieldFile):
pass
class VideoField(models.FileField):
attr_class = VideoFieldFile
|
# 5%
elif num < 10: planet.type = 'G'; planet.diameter = dice(3, 6, 0) * 10000 # 5%
elif num < 60: planet.type = 'R'; planet.diameter = dice(1, 10, 0) * 1000 # 50% - rock
elif num < 70: planet.type = 'D'; planet.diameter = dice(2, 6, 2) * 1000 # 10% - desert
elif num < 100: planet.type = 'H'; planet.diameter = dice(3, 6, 1) * 1000 # 30% - hostile
elif zone == 1: # Zone B
if num < 10: planet.type = 'A' # 10%
elif num < 15: planet.type = 'G'; planet.diameter = dice(3, 6, 0) * 10000 # 5%
elif num < 25: planet.type = 'R'; planet.diameter = dice(1, 10, 0) * 1000 # 10% - rock
elif num < 45: planet.type = 'D'; planet.diameter = dice(2, 6, 2) * 1000 # 20% - desert
elif num < 70: planet.type = 'H'; planet.diameter = dice(3, 6, 1) * 1000 # 25% - hostile
elif num < 90:
if isFGK:
planet.type = 'M'; planet.diameter = dice(2, 6, 5) * 1000 # FGK / 20% - marginal
else:
planet.type = 'H'; planet.diameter = dice(3, 6, 1) * | 1000 # Else / 20% - hostile
elif num | < 100:
if isFGK:
# planet.type = 'E'; planet.diameter = dice(2, 6, 5) * 1000
planet.type = 'E'; planet.diameter = dice(1, 4, 13) * 1000 # FGK / 10% - terran
else:
planet.type = 'H'; planet.diameter = dice(3, 6, 1) * 1000 # Else / 10% - hostile
elif zone == 2: # Zone C
if num < 15: planet.type = 'A' # 15%
elif num < 75: planet.type = 'G'; planet.diameter = dice(3, 6, 0) * 10000 # 60%
elif num < 80: planet.type = 'R'; planet.diameter = dice(1, 10, 0) * 1000 # 5% - rock
elif num < 90: planet.type = 'C'; planet.diameter = dice(1, 10, 0) * 1000 # 10% - cold
elif num < 95: planet.type = 'D'; planet.diameter = dice(2, 6, 2) * 1000 # 5% - desert
elif num < 100:
if isDNB:
planet.type = 'C'; planet.diameter = dice(1, 10, 0) * 1000 # DNB / 5% - cold
else:
planet.type = 'H'; planet.diameter = dice(3, 6, 1) * 1000 # Else / 5% - hostile
# energy
planet.energy = random.randrange(100 - zone * 50, 150 - zone * 50)
# minerals
if planet.type[0] in ('R', 'D', 'H', 'M'):
density = dice(1, 6, 0) / 2.0 + 3
planet.minerals = int(((planet.diameter / 500.0) + density * 10.0 + random.randrange(1, 101) / 2.0 - 45) * 2)
elif planet.type[0] == 'A':
diameter = dice(1, 10, 0) * 1000 # rock planet
density = dice(1, 6, 0) / 2.0 + 3
planet.minerals = int(((diameter / 500.0) + density * 10.0 + random.randrange(1, 101) / 2.0 - 45) * 2)
elif planet.type[0] == 'G':
diameter = dice(3, 6, 1) * 1000 # earth like planet
density = dice(1, 6, 0) / 2.0 + 3
planet.minerals = int(((diameter / 500.0) + density * 10.0 + random.randrange(1, 101) / 2.0 - 45) * 2)
elif planet.type == 'E':
planet.minerals = 100
else:
planet.minerals = 0
if planet.minerals < 0:
planet.minerals = 0
# environment
if planet.type == 'E': planet.environ = 100
elif planet.type == 'M': planet.environ = random.randrange(25, 51)
elif planet.type == 'H': planet.environ = random.randrange(12, 26)
elif planet.type == 'D': planet.environ = random.randrange(6, 13)
elif planet.type == 'C': planet.environ = random.randrange(0, 7)
elif planet.type == 'R': planet.environ = random.randrange(0, 7)
else: planet.environ = 0
# slots
slotsMod = 0.67
planet.maxSlots = int((planet.diameter / 1000) * 1.5 * slotsMod)
if planet.type == 'E': planet.slots = 9 # planet.slots = int(planet.maxSlots * 0.50)
elif planet.type == 'M': planet.slots = int(planet.maxSlots * 0.50)
elif planet.type == 'H': planet.slots = int(planet.maxSlots * 0.50)
elif planet.type == 'D': planet.slots = int(planet.maxSlots * 0.75)
elif planet.type == 'C': planet.slots = int(planet.maxSlots * 0.75)
elif planet.type == 'R': planet.slots = int(planet.maxSlots * 0.75)
else: planet.slots = 0
# make sure that all planets except A and G has at least one slot
if planet.type in "EMHDCR" and planet.slots == 0:
#@print "Fixing slots", planet.type, planet.slots, planet.maxSlots
planet.maxSlots = max(1, planet.maxSlots)
planet.slots = max(1, planet.slots)
#print planet.type, planet.environ, planet.minerals
def dice(num, range, offset):
result = offset
for i in xrange(0, num):
result += random.randrange(1, range + 1)
return result
def shiftSystems(galaxy, min, max, delta):
print 'Shifting...'
min = min * min
max = max * max
minMinDist = 1000000
maxMinDist = 0
for system1 in galaxy.systems:
if not system1._moveable:
continue
minDist = [1000000, 100000, 100000]
closestSystems = [None, None, None]
for system2 in galaxy.systems:
if system1 == system2 or not system2._moveable:
continue
dist = (system1.x - system2.x) ** 2 + (system1.y - system2.y) ** 2
if dist < minDist[0]:
minDist.pop()
minDist.insert(0, dist)
closestSystems.pop()
closestSystems.insert(0, system2)
elif dist < minDist[1]:
minDist.pop()
minDist.insert(1, dist)
closestSystems.pop()
closestSystems.insert(1, system2)
elif dist < minDist[2]:
minDist.pop()
minDist.insert(2, dist)
closestSystems.pop()
closestSystems.insert(2, system2)
system1._closest = closestSystems
for closestSystem in closestSystems:
if not closestSystem:
continue
dist = (system1.x - closestSystem.x) ** 2 + (system1.y - closestSystem.y) ** 2
if dist < min and closestSystem:
# move system away
if system1.x > closestSystem.x:
system1.x += random.uniform(0, delta)
closestSystem.x -= random.uniform(0, delta)
else:
system1.x -= random.uniform(0, delta)
closestSystem.x += random.uniform(0, delta)
if system1.y > closestSystem.y:
system1.y += random.uniform(0, delta)
closestSystem.y -= random.uniform(0, delta)
else:
system1.y -= random.uniform(0, delta)
closestSystem.y += random.uniform(0, delta)
elif dist > max and closestSystem:
# move systems closer
if system1.x < closestSystem.x:
system1.x += random.uniform(0, delta)
closestSystem.x -= random.uniform(0, delta)
else:
system1.x -= random.uniform(0, delta)
closestSystem.x += random.uniform(0, delta)
if system1.y < closestSystem.y:
system1.y += random.uniform(0, delta)
closestSystem.y -= random.uniform(0, delta)
else:
system1.y -= random.uniform(0, delta)
closestSystem.y += random.uniform(0, delta)
if dist < minMinDist: minMinDist = dist
if dist > maxMinDist: maxMinDist = dist
print 'Finished [min. dist = <%.2f; %.2f>]' % (math.sqrt(minMinDist), math.sqrt(maxMinDist))
return math.sqrt(minMinDist), math.sqrt(maxMinDist)
## info
def getInfo(galaxy):
starTypes = {}
planetTypes = {}
planets = 0
maxPlanets = 0
minPlanets = 999
planetDist = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for system in galaxy.systems:
starTypes[system.starClass] = starTypes.get(system.starClass, 0) + 1
for planet in system.planets:
planetTypes[planet.type] = planetTypes.get(planet.type, 0) + 1
planets += 1
sysPlanets = len(system.planets)
maxPlanets = max(maxPlanets, sysPlanets)
minPlanets = min(minPlanets, sysPlanets)
planetDist[sysPlanets] += 1
stars = len(galaxy.systems)
print 'Systems:', stars
print starTypes
print 'Planets per system:', planetDist
print 'Planets:', planets
print 'min %d, max %d, avg %.2f' % (minPlanets, maxPlanets, float(planets) / stars)
print 'Types:', planetTypes
return stars, starTypes, planets, planetTypes
## saving
def saveGalaxy(id, galaxy):
print 'Saving...'
# names
loadSystemNames()
# save
fh = open('galaxy-%s.xml' % id, 'w')
print >>fh, '<?xml version="1.0" encoding="UTF-8"?>'
print >>fh, '<universe>'
print >>fh, '\t<galaxy id="%s" x="%.2f" y="%.2f">' % (
id, galaxy.centerX, galaxy.centerY
)
print >>fh, '\t\t<properties radius="%.2f"/>' % galaxy.radius
for system in galaxy.systems:
saveSystem(fh, system)
print >>fh, '\t</galaxy>'
print >>fh, '</universe>'
fh.close()
print 'Saved.'
def saveSystem(fh, system):
print >>fh, '\t\t<system x="%.2f" y="%.2f">' % (system.x, system.y)
# name = 'SCN-%04d%04d' % (system.x * 10, system.y * 10)
global systemNames
name = |
from data.database.sourceGroupAssignmentTable import getSourceIdToAssignedGroups
from data.database.sourceGroupTable import getAllSourceGroupNames
from data. | database.sourceTable import getAllSources
(categoryToSourceObjects, sourceCategoryNames, sourceIdToAssignments, sourceIdToSourceObject,
unCategorizedSource) = ({}, None, None, None, None)
def getSourceById(sourceId):
| return sourceIdToSourceObject[sourceId]
def getSourceCategoryNames():
return sourceCategoryNames
def getSources(categoryName):
return categoryToSourceObjects[categoryName]
def getUncategorizedSource():
return unCategorizedSource
def _addToCategoryLookup(source):
global categoryToSourceObjects
for c in source.categories:
if c in categoryToSourceObjects:
categoryToSourceObjects[c].append(source)
else:
categoryToSourceObjects[c] = [source]
def __sourceToCategorys(source):
source_id = source.lookupId
if (source_id in sourceIdToAssignments):
return sourceIdToAssignments[source_id]
else:
return []
def initSourceManager():
global sourceCategoryNames, sourceIdToAssignments, sourceIdToSourceObject, unCategorizedSource
unCategorizedSource = []
sourceCategoryNames = getAllSourceGroupNames()
sourceIdToAssignments = getSourceIdToAssignedGroups()
sourceIdToSourceObject = {}
for s in getAllSources():
s.categories = __sourceToCategorys(s)
sourceIdToSourceObject[s.lookupId] = s
if(len(s.categories) != 0):
_addToCategoryLookup(s)
else:
unCategorizedSource.append(s)
|
import logging
import math
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.db.models import Q
from django.shortcuts import get_object_or_404, redirect, render
from django.views.generic import View
from experiments_manager.models import Experiment
from marketplace.models import (ExternalPackage, InternalPackage, Package,
PackageResource, PackageVersion)
from .forms import RegisterForm, WorkbenchUserForm
from .models import WorkbenchUser, get_workbench_user
logger = logging.getLogger(__name__)
@login_required
def index(request):
workbench_user = WorkbenchUser.objects.get(user=request.user)
experiments = Experiment.objects.filter(owner=workbench_user).order_by('-created')[:5]
packages = InternalPackage.objects.filter(owner=workbench_user).order_by('-created')[:5]
logger.info('%s accessed index', workbench_user)
recent_versions = list(PackageVersion.objects.all().order_by('-created')[:5])
recent_resources = list(PackageResource.objects.all().order_by('-created')[:5])
recent_internal = list(InternalPackage.objects.all().order_by('-created')[:5])
recent_external = list(ExternalPackage.objects.all().order_by('-created')[:5])
recent_experiments = list(Experiment.objects.filter(public=True).order_by('created')[:5])
total_list = recent_versions + recent_resources + recent_internal + recent_external + recent_experiments
total_list = reversed(sorted(total_list, key=lambda x: x.created))
return render(request, 'index.html', {'experiments': experiments,
'packages': packages,
'activities': total_list})
class DetailProfileView(View):
def get(self, request):
workbench_user = get_workbench_user(request.user)
return render(request, "user_manager/workbenchuser_detail.html", {'workbench_user': workbench_user})
class EditProfileView(View):
def get(self, request):
workbench_user = get_workbench_user(request.user)
form = WorkbenchUserForm(instance=workbench_user)
logger.info('%s edit get profile view', workbench_user)
return render(request, "user_manager/workbenchuser_edit.html", {'form': form})
def post(self, request):
workbench_user = get_workbench_user(request.user)
form = WorkbenchUserForm(request.POST, instance=workbench_user)
if form.is_valid():
current_password = form.cleaned_data['current_password']
user = workbench_user.user
if current_password:
if user.check_password(current_password) and change_password_of_user(workbench_user, form):
messages.add_message(request, messages.SUCCESS, 'Your password has been changed.')
else:
messages.add_message(request, messages.ERROR, 'Passwords did not match '
'or incorrect current password.')
return render(request, "user_manager/workbenchuser_edit.html", {'form': form})
form.save()
logger.info('%s edited profile successfully', workbench_user)
return redirect(to='/')
else:
return render(request, "user_manager/workbenchuser_edit.html", {'form': form})
def change_password_of_user(w_user, form):
new_password = form.cleaned_data['new_password']
new_password_again = form.cleaned_data['new_password_again']
if new_password == new_password_again:
user = w_user.user
user.set_password(new_password)
user.save()
return True
return False
class RegisterView(View):
def get(self, request):
form = RegisterForm()
return render(request, 'user_manager/register.html', {'form': form})
def post(self, request):
form = RegisterForm(self.request.POST)
if form.is_valid():
new_email = form.cleaned_data['email']
if not existing_user_check(new_email):
user = User.objects.create_user(form.cleaned_data['username'],
new_email,
form.cleaned_data['password'])
workbench_user = WorkbenchUser.objects.get(user=user)
workbench_user.netid = form.cleaned_data['netid']
workbench_user.save()
logger.info('new user created: %s', workbench_user)
return redirect(to='/')
else:
return render(request, 'user_manager/register.html', {'form': form}) |
else:
return render(request, 'user_manager/register.html', {'form': form})
def existing_user_check(email_address):
return User.objects.filter(email=email_address)
class WorkbenchUserDetailView(View):
def get(self, request, username):
| workbench_user = get_object_or_404(WorkbenchUser, user__username=username)
recent_experiments = Experiment.objects.filter(owner=workbench_user, completed=True).order_by('-created')[:5]
recent_packages = Package.objects.filter(owner=workbench_user).order_by('-created')[:5]
return render(request, "user_manager/user_profile.html", {'w_user': workbench_user,
'experiments': recent_experiments,
'packages': recent_packages})
@login_required
def search(request):
if 'q' in request.GET:
q = request.GET.get('q')
page = request.GET.get('page')
page = int(page) if page is not None else 1
results, nr_of_pages = get_search_results(request.user, q, page)
return render(request, 'search.html', {'results': results, 'query': q, 'page': page,
'next_page': page + 1,
'previous_page': page - 1,
'nr_of_pages': nr_of_pages,
'nr_of_pages_range': range(1, nr_of_pages+1)})
return render(request, 'search.html', {})
def get_search_results(user, q, page_nr=1, page_size=25):
start_value = (page_nr - 1) * page_size
end_value = start_value + page_size
search_query_list = build_search_queries(q, user)
total_count = sum([x.count() for x in search_query_list])
nr_of_pages = int(math.ceil(total_count / page_size))
total_list = [list(x.order_by('-created')[start_value:end_value]) for x in search_query_list]
total_flat_list = [item for sublist in total_list for item in sublist]
total_flat_list = sorted(total_flat_list, key=lambda x: x.created)
return total_flat_list, nr_of_pages
def build_search_queries(q, user):
package_version_query = PackageVersion.objects.filter(version_nr__contains=q)
package_resource_query = PackageResource.objects.filter(title__contains=q)
internal_package_query = InternalPackage.objects.filter(name__contains=q)
external_package_query = ExternalPackage.objects.filter(name__contains=q)
users_query = WorkbenchUser.objects.filter(user__username=q)
experiment_query = Experiment.objects.filter(Q(owner__user=user, title__contains=q) |
Q(completed=True, title__contains=q))
return package_version_query, package_resource_query, internal_package_query, external_package_query, \
experiment_query, users_query
|
#!/usr/bin/env python
#Demo code
#
# simple demonstration script showing real-time thermal Imaging
# using the MLX90620 16x4 thermopile array and the mlxd daemon
#
# Copyright (C) 2015 Chuck Werbick
#
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import time
import picamera
import numpy as np
import subprocess
import os, sys
import datetime
import skimage
from skimage import io, exposure, transform, img_as_float, img_as_ubyte
from time import sleep
import matplotlib
import matplotlib.pyplot as plt
# IR registration parameters
ROT = np.deg2rad(90)
SCALE = (36.2, 36.4)
OFFSET = (530, 170)
def getImage():
fn = r'/home/pi/tmp.jpg';
proc = subprocess.Popen('raspistill -o %s -w 640 -h 480 -n -t 3' %(fn),
shell=True, stderr=subprocess.STDOUT)
proc.wait()
im = io.imread(fn, as_grey=True)
im = exposure.equalize_hist(im)
return skimage.img_as_ubyte(im)
im = getImage()
with picamera.PiCamera() as camera:
camera.resolution = (640, 480)
camera.framerate = 20
camera.start_preview()
# get the temperature array, and align with the image
fifo = open('/var/run/mlx90620.sock', 'r')
# get the whole FIFO
ir_raw = fifo.read()
# trim to 128 bytes
ir_trimmed = ir_raw[0:128]
# go all numpy on it
ir = np.frombuffer(ir_trimmed, np.uint16)
# set the array shape to the sensor shape (16x4)
ir = ir.reshape((16, 4))[::-1, ::-1]
ir = img_as_float(ir)
# stretch contrast on our heat map
p2, p98 = np.percentile(ir, (2, 98))
ir = exposure.rescale_intensity(ir, in_range=(p2, p98))
# increase even further? (optional)
# ir = exposure.equalize_hist(ir)
# turn our array into pretty colors
cmap = plt.get_cmap('spectral')
rgba_img | = cmap(ir)
rgb_img = np.delete(rgba_img, 3, 2)
| # align the IR array with the camera
tform = transform.AffineTransform(scale=SCALE, rotation=ROT, translation=OFFSET)
ir_aligned = transform.warp(rgb_img, tform.inverse, mode='constant', output_shape=im.shape)
# turn it back into a ubyte so it'll display on the preview overlay
ir_byte = img_as_ubyte(ir_aligned)
#add the overlay
o = camera.add_overlay(np.getbuffer(ir_byte), layer=3, alpha=90)
#update loop
while True:
sleep(0.25)
ir_raw = fifo.read()
ir_trimmed = ir_raw[0:128]
ir = np.frombuffer(ir_trimmed, np.uint16)
ir = ir.reshape((16, 4))[::-1, ::-1]
ir = img_as_float(ir)
p2, p98 = np.percentile(ir, (2, 98))
ir = exposure.rescale_intensity(ir, in_range=(p2, p98))
ir = exposure.equalize_hist(ir)
cmap = plt.get_cmap('spectral')
rgba_img = cmap(ir)
rgb_img = np.delete(rgba_img, 3, 2)
# align the IR array with the image
tform = transform.AffineTransform(scale=SCALE, rotation=ROT, translation=OFFSET)
ir_aligned = transform.warp(rgb_img, tform.inverse, mode='constant', output_shape=im.shape)
ir_byte = img_as_ubyte(ir_aligned)
o.update(np.getbuffer(ir_byte))
print('Error! Closing...')
camera.remove_overlay(o)
fifo.close()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipes', '0002_recipecollection_title'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoFi | eld(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('title', models.CharField(max_length=255)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='recipe',
name='tags',
field=models.ManyToManyField(to='recipes.Tag', related_name=' | recipes'),
preserve_default=True,
),
]
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test-appropriate entry points into the gRPC Python Beta API."""
import grpc
from grpc.beta import implementations
def not_really_secure_channel(
host, port, channel_credentials, server_host_override):
"""Creates an insecure Channel to a remote host.
Args:
host: The name of the remote host to which to connect.
port: The port of the remote host t | o which to connect.
channel_credentials: The implementations.ChannelCredentials with which to
connect.
server_host_override: The target name used for SSL host name checking.
Returns:
An implementations.Channel to the remote host through which RPCs may be
conducted.
"""
targ | et = '%s:%d' % (host, port)
channel = grpc.secure_channel(
target, channel_credentials._credentials,
((b'grpc.ssl_target_name_override', server_host_override,),))
return implementations.Channel(channel)
|
from django.conf.urls.defaults import patterns, include, url
from rest_framework.urlpatterns import format_suffix_patterns
from startups import views
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('startups.views',
# Examples:
# url(r'^$', 'angellist_demo.views.home', name='home'),
# url(r'^angellist_demo/', include('angellist_demo.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/ | doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
url(r'^startups$', 'startup_list'),
url(r'^startups/(?P<pk>[0-9]+)$', 'startup_detail'),
)
urlpatterns = format_suffix_ | patterns(urlpatterns)
|
d['source'] = 'gffutils_derived'
d['frame'] = '.'
d['extra'] = []
d['attributes'] = helpers._unjsonify(d['attributes'])
f = feature.Feature(**d)
f.id = self._id_handler(f)
yield f
# Drop the indexes so the inserts are faster
c.execute('DROP INDEX IF EXISTS relationsparent')
c.execute('DROP INDEX IF EXISTS relationschild')
# Insert the just-inferred transcripts and genes. TODO: should we
# *always* use "merge" here for the merge_strategy?
logger.info("Importing inferred features into db")
last_perc = None
for i, f in enumerate(derived_feature_generator()):
perc = int(i / float(n_features) * 100)
if perc != last_perc:
sys.stderr.write('%s of %s (%s%%)\r' % (i, n_features, perc))
sys.stderr.flush()
last_perc = perc
try:
self._insert(f, c)
except sqlite3.IntegrityError:
fixed, final_strategy = self._do_merge(f, 'merge')
c.execute(
'''
UPDATE features SET attributes = ?
WHERE id = ?
''', (helpers._jsonify(fixed.attributes),
fixed.id))
logger.info("Committing changes")
self.conn.commit()
if not self._keep_tempfiles:
os.unlink(fout.name)
# TODO: recreate indexes?
def create_db(data, dbfn, id_spec=None, force=False, verbose=False,
checklines=10, merge_strategy='error', transform=None,
gtf_transcript_key='transcript_id', gtf_gene_key='gene_id',
gtf_subfeature='exon', force_gff=False,
force_dialect_check=False, from_string=False, keep_order=False,
text_factory=sqlite3.OptimizedUnicode, force_merge_fields=None,
pragmas=constants.default_pragmas, sort_attribute_values=False,
dialect=None, _keep_tempfiles=False, infer_gene_extent=True,
disable_infer_genes=False, disable_infer_transcripts=False,
**kwargs):
"""
Create a database from a GFF or GTF file.
For more details on when and how to use the kwargs below, see the examples
in the online documentation (:ref:`examples`).
Parameters
----------
data : string or iterable
If a string (and `from_string` is False), then `data` is the path to
the original GFF or GTF file.
If a string and `from_string` is True, then assume `data` is the actual
data to use.
Otherwise, it's an iterable of Feature objects.
dbfn : string
Path to the database that will be created. Can be the special string
":memory:" to create an in-memory database.
id_spec : string, list, dict, callable, or None
This parameter guides what will be used as the primary key for the
database, which in turn determines how you will access individual
features by name from the database.
If `id_spec=None`, then auto-increment primary keys based on the
feature type (e.g., "gene_1", "gene_2"). This is also the fallback
behavior for the other values below.
If `id_spec` is a string, then look for this key in the attributes. If
it exists, then use its value as the primary key, otherwise
autoincrement based on the feature type. For many GFF3 files, "ID"
usually works well.
If `id_spec` is a list or tuple of keys, then check for each one in
order, using the first one found. For GFF3, this might be ["ID",
"Name"], which would use the ID if it exists, otherwise the Name,
otherwise autoincrement based on the feature type.
If `id_spec` is a dictionary, then it is a mapping of feature types to
what should be used as the ID. For example, for GTF files, `{'gene':
'gene_id', 'transcript': 'transcript_id'}` may be useful. The values
of this dictionary can also be a list, e.g., `{'gene': ['gene_id',
'geneID']}`
If `id_spec` is a callable object, then it accepts a dictionary from
the iterator and returns one of the following:
* None (in which case the feature type will be auto-incremented)
* string (which will be used as the primary key)
* special string starting with "autoincrement:X", where "X" is
a string that will be used for auto-incrementing. For example,
if "autoincrement:chr10", then the first feature will be
"chr10_1", the second "chr10_2", and so on.
force : bool
If `False` (default), then raise an exception if `dbfn` already exists.
Use `force=True` to overwrite any existing databases.
verbose : bool
Report percent complete and other feedback on how the db creation is
progressing.
In order to report percent complete, the entire file needs to be read
once to see how many items there are; | for large files you may want to
use `verbose=False` to avoid this.
checklines : int
Number of lines to check the dialect | .
merge_strategy : str
One of {merge, create_unique, error, warning, replace}.
This parameter specifies the behavior when two items have an identical
primary key.
Using `merge_strategy="merge"`, then there will be a single entry in
the database, but the attributes of all features with the same primary
key will be merged.
Using `merge_strategy="create_unique"`, then the first entry will use
the original primary key, but the second entry will have a unique,
autoincremented primary key assigned to it
Using `merge_strategy="error"`, a :class:`gffutils.DuplicateID`
exception will be raised. This means you will have to edit the file
yourself to fix the duplicated IDs.
Using `merge_strategy="warning"`, a warning will be printed to the
logger, and the duplicate feature will be skipped.
Using `merge_strategy="replace" will replace the entire existing
feature with the new feature.
transform : callable
Function (or other callable object) that accepts a `Feature` object and
returns a (possibly modified) `Feature` object.
gtf_transcript_key, gtf_gene_key : string
Which attribute to use as the transcript ID and gene ID respectively
for GTF files. Default is `transcript_id` and `gene_id` according to
the GTF spec.
gtf_subfeature : string
Feature type to use as a "gene component" when inferring gene and
transcript extents for GTF files. Default is `exon` according to the
GTF spec.
force_gff : bool
If True, do not do automatic format detection -- only use GFF.
force_dialect_check : bool
If True, the dialect will be checkef for every feature (instead of just
`checklines` features). This can be slow, but may be necessary for
inconsistently-formatted input files.
from_string : bool
If True, then treat `data` as actual data (rather than the path to
a file).
keep_order : bool
If True, all features returned from this instance will have the
order of their attributes maintained. This can be turned on or off
database-wide by setting the `keep_order` attribute or with this
kwarg, or on a feature-by-feature basis by setting the `keep_order`
attribute of an individual feature.
Note that a single order of attributes will be used for all features.
Specifically, the order will be determined by the order of attribute
keys in the first `checklines` of the input data. See
helpers._choose_dialect for more information on this.
Default is False, since this includes a sorting step that can get
time-consuming for many features.
infer_gene_extent : bool
DEPRECATED in version 0.8. |
"""
WSGI config for cv project.
It exposes the WSGI callable as a module-level variable named ` | `application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cv.settings")
application = get | _wsgi_application()
|
"""Support for Spider thermostats."""
import logging
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from . import DOMAIN as SPIDER_DOMAIN
SUPPORT_FAN = ["Auto", "Low", "Medium", "High", "Boost 10", "Boost 20", "Boost 30"]
SUPPORT_HVAC = [HVAC_MODE_HEAT, HVAC_MODE_COOL]
HA_STATE_TO_SPIDER = {
HVAC_MODE_COOL: "Cool",
HVAC_MODE_HEAT: "Heat",
HVAC_MODE_OFF: "Idle",
}
SPIDER_STATE_TO_HA = {value: key for key, value in HA_STATE_TO_SPIDER.items()}
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Spider thermostat."""
if discovery_info is None:
return
devices = [
SpiderThermostat(hass.data[SPIDER_DOMAIN]["controller"], device)
for device in hass.data[SPIDER_DOMAIN]["thermostats"]
]
add_entities(devices, True)
class SpiderThermostat(ClimateEntity):
"""Representation of a thermostat."""
def __init__(self, api, thermostat):
"""Initialize the thermostat."""
self.api = api
self.thermostat = thermostat
@property
def supported_features(self):
"""Return the list of supported features."""
supports = SUPPORT_TARGET_TEMPERATURE
if self.thermostat.has_fan_mode:
supports |= SUPPORT_FAN_MODE
return supports
@property
def unique_id(self):
"""Return the id of the thermostat, if any."""
return self.thermostat.id
@property
def name(self):
"""Return the name of the thermostat, if any."""
return self.thermostat.name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self.thermostat.current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self.thermostat.target_temperature
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return self.thermostat.temperature_steps
@property
def min_temp(self):
"""Return the minimum temperature."""
return self.thermostat.minimum_temperature
@property
def max_temp(self):
"""Return the maximum temperature."""
return self.thermostat.maximum_temperature
@property
def hvac_mode(self):
"""Return current operation ie. heat, cool, idle."" | "
return SPIDER_STATE_TO_HA[self.thermostat.operation_mode]
@property
def hvac_modes(self):
"""Return the list of available operation modes."""
return SUPPORT_HVAC
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if | temperature is None:
return
self.thermostat.set_temperature(temperature)
def set_hvac_mode(self, hvac_mode):
"""Set new target operation mode."""
self.thermostat.set_operation_mode(HA_STATE_TO_SPIDER.get(hvac_mode))
@property
def fan_mode(self):
"""Return the fan setting."""
return self.thermostat.current_fan_speed
def set_fan_mode(self, fan_mode):
"""Set fan mode."""
self.thermostat.set_fan_speed(fan_mode)
@property
def fan_modes(self):
"""List of available fan modes."""
return SUPPORT_FAN
def update(self):
"""Get the latest data."""
self.thermostat = self.api.get_thermostat(self.unique_id)
|
})
self.assertTrue(daemon.isAlive())
daemon.kill()
@test_base.flaky
def test_2_daemon_with_option(self):
logger_path = test_base.getTestDirectory(test_base.TEMP_DIR)
daemon = self._run_daemon(
{
"disable_watchdog": True,
"disable_extensions": True,
"disable_logging": False,
},
options_only={
"logger_path": logger_path,
"verbose": True,
})
self.assertTrue(daemon.isAlive())
info_path = os.path.join(logger_path, "osqueryd.INFO*")
def info_exists():
return len(glob.glob(info_path)) > 0
# Wait for the daemon to flush to GLOG.
test_base.expectTrue(info_exists)
# Assign the variable after we have assurances it exists
self.assertTrue(info_exists())
# Lastly, verify that we have permission to read the file
data = ''
with open(glob.glob(info_path)[0], 'r') as fh:
try:
data = fh.read()
except:
pass
self.assertTrue(len(data) > 0)
daemon.kill()
@test_base.flaky
def test_3_daemon_with_watchdog(self):
# This test does not join the service threads properly (waits for int).
if os.environ.get('SANITIZE') is not None:
return
daemon = self._run_daemon({
"allow_unsafe": True,
"disable_watchdog": False,
"ephemeral": True,
"disable_database": True,
"disable_logging": True,
})
self.assertTrue(daemon.isAlive())
# Check that the daemon spawned a child process
children = daemon.getChildren()
self.assertTrue(len(children) > 0)
daemon.kill()
# This will take a few moments to make sure the client process
# dies when the watcher goes away
self.assertTrue(daemon.isDead(children[0]))
@test_base.flaky
def test_3_daemon_lost_worker(self):
# Test that killed workers are respawned by the watcher
if os.environ.get('SANITIZE') is not None:
return
daemon = self._run_daemon({
"allow_unsafe": True,
"disable_watchdog": False,
"ephemeral": True,
"disable_database": True,
"disable_logging": True,
})
self.assertTrue(daemon.isAlive())
# Check that the daemon spawned a child process
children = daemon.getChildren()
self.assertTrue(len(children) > 0)
# Kill only the child worker
os.kill(children[0], signal.SIGINT)
self.assertTrue(daemon.isDead(children[0]))
self.assertTrue(daemon.isAlive())
# Expect the children of the daemon to be respawned
def waitDaemonChildren():
children = daemon.getChildren()
return len(children) > 0
test_base.expectTrue(waitDaemonChildren)
children = daemon.getChildren()
self.assertTrue(len(children) > 0)
@test_base.flaky
def test_4_daemon_sighup(self):
# A hangup signal should not do anything to the daemon.
daemon = self._run_daemon({
"disable_watchdog": True,
})
self.assertTrue(daemon.isAlive())
# Send SIGHUP on posix. Windows does not have SIGHUP so we use SIGTERM
sig = signal.SIGHUP if os.name != "nt" else signal.SIGTERM
os.kill(daemon.proc.pid, sig)
self.assertTrue(daemon.isAlive())
@test_base.flaky
def test_5_daemon_sigint(self):
# An interrupt signal will cause the daemon to stop.
daemon = self._run_daemon({
"disable_watchdog": True,
"ephemeral": True,
"disable_database": True,
"disable_logging": True,
})
self.assertTrue(daemon.isAlive())
# Send a SIGINT
os.kill(daemon.pid, signal.SIGINT)
self.assertTrue(daemon.isDead(daemon.pid, 10))
if os.name != "nt":
self.assertEqual(daemon.retcode, 0)
@test_base.flaky
def test_6_logger_mode(self):
logger_path = test_base.getTestDirectory(test_base.TEMP_DIR)
test_mode = 0o754 # Strange mode that should | never exist
| daemon = self._run_daemon(
{
"disable_watchdog": True,
"disable_extensions": True,
"disable_logging": False,
},
options_only={
"logger_path": logger_path,
"logger_mode": test_mode,
"verbose": True,
})
self.assertTrue(daemon.isAlive())
# Wait for the daemon to write the info log to disk before continuing
info_path = os.path.join(logger_path, "osqueryd.INFO*")
def info_exists():
return len(glob.glob(info_path)) > 0
results_path = os.path.join(logger_path, "osqueryd.results.log")
def results_exists():
return os.path.exists(results_path)
# Wait for the daemon to flush to GLOG.
test_base.expectTrue(info_exists)
test_base.expectTrue(results_exists)
info_path = glob.glob(info_path)[0]
# Both log files should exist, the results should have the given mode.
for pth in [info_path, results_path]:
self.assertTrue(os.path.exists(pth))
# Only apply the mode checks to .log files.
# TODO: Add ACL checks for Windows logs
if pth.find('.log') > 0 and os.name != "nt":
rpath = os.path.realpath(pth)
mode = os.stat(rpath).st_mode & 0o777
self.assertEqual(mode, test_mode)
daemon.kill()
def test_7_logger_stdout(self):
logger_path = test_base.getTestDirectory(test_base.TEMP_DIR)
daemon = self._run_daemon({
"disable_watchdog": True,
"disable_extensions": True,
"disable_logging": False,
"logger_plugin": "stdout",
"logger_path": logger_path,
"verbose": True,
})
info_path = os.path.join(logger_path, "osqueryd.INFO")
def pathDoesntExist():
if os.path.exists(info_path):
return False
return True
self.assertTrue(daemon.isAlive())
self.assertTrue(pathDoesntExist())
daemon.kill()
def test_8_hostid_uuid(self):
# Test added to test using UUID as hostname ident for issue #3195
daemon = self._run_daemon({
"disable_watchdog": True,
"disable_extensions": True,
"disable_logging": False,
"logger_plugin": "stdout",
"host_identifier": "uuid",
"verbose": True,
})
self.assertTrue(daemon.isAlive())
daemon.kill()
def test_9_hostid_instance(self):
daemon = self._run_daemon({
"disable_watchdog": True,
"disable_extensions": True,
"disable_logging": False,
"logger_plugin": "stdout",
"host_identifier": "instance",
"verbose": True,
})
self.assertTrue(daemon.isAlive())
daemon.kill()
def test_config_check_exits(self):
daemon = self._run_daemon({
"config_check": True,
"disable_extensions": True,
"disable_logging": False,
"disable_database": True,
"logger_plugin": "stdout",
"verbose": True,
})
self.assertTrue(daemon.isDead(daemon.pid, 10))
if os.name != "nt":
self.assertEqual(daemon.retcode, 0)
def test_config_dump_exits(self):
daemon = self._run_daemon({
"config_dump": True,
"disable_extensions": True,
"disable_logging": False,
"disable_database": True,
"logger_plugin": "stdout",
"verbose": True,
})
self.assertTrue(daemon.isDead(daemon.pid, 10))
if os.name != "nt":
self.assertEqual(daemon.retcode, 0)
def test_database_dump_exits(self):
|
# -*- coding: utf-8 -*-
actions = {
"up": _(u"Go up in the current buffer"),
"down": _(u"Go down in the current buffer"),
"left": _(u"Go to the previous buffer"),
"right": _(u"Go to the next buffer"),
"next_account": _(u"Focus the next session"),
"previous_account": _(u"Focus the previous session"),
"show_hide": _(u"Show or hide the GUI"),
"post_tweet": _(u"New tweet"),
"post_reply": _(u"Reply"),
"post_retweet": _(u"Retweet"),
"send_dm": _(u"Send direct message"),
"add_to_favourites": _(u"Mark as favourite"),
"remove_from_favourites": _(u"Remove from favourites"),
"follow": _(u"Open the user actions dialogue"),
"user_details": _(u"See user details"),
"view_item": _(u"Show tweet"),
"exit": _(u"Quit"),
"open_timeline": _(u"Open user timeline"),
"remove_buffer": _(u"Destroy buffer"),
"interact": _(u"Interact with the currently focused tweet."),
"url": _(u"Open URL"),
"volume_up": _(u"Increase volume by 5%"),
"volume_down": _(u"Decrease volume by 5%"),
"go_h | ome": _(u"Jump to the first element of a buffer"),
"go_end": _(u"Jump to the last element of the current buffer"),
"go_page_up": _(u"Jump 20 elements up in the current buffer"),
"go_page_down": _(u"Jump 20 elements down in the current buffer"),
"update_profile": _(u"Edit profile"),
"delete": _(u"Delete a tweet or direct message"),
"clear_buffer": _(u"Empty the current buffer"),
"repeat_item": _(u"Repeat last item"),
"cop | y_to_clipboard": _(u"Copy to clipboard"),
"add_to_list": _(u"Add to list"),
"remove_from_list": _(u"Remove from list"),
"toggle_buffer_mute": _(u"Mute/unmute the active buffer"),
"toggle_session_mute": _(u"Mute/unmute the current session"),
"toggle_autoread": _(u"toggle the automatic reading of incoming tweets in the active buffer"),
"search": _(u"Search on twitter"),
"find": _(u"Find a string in the currently focused buffer"),
"edit_keystrokes": _(u"Show the keystroke editor"),
"view_user_lists": _(u"Show lists for a specified user"),
"get_more_items": _(u"load previous items"),
"reverse_geocode": _(u"Get geolocation"),
"view_reverse_geocode": _(u"Display the tweet's geolocation in a dialog"),
"get_trending_topics": _(u"Create a trending topics buffer"),
"open_conversation": _(u"View conversation"),
} |
# -*- coding: utf-8 -*-
#
#
# Copyright (C) 2013 Agile Business Group sagl (<http://www.agilebg.com>)
# Author: Nico | la Malcontenti <nicola.malcontenti@agile | bg.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from . import sale
|
class R | obot:
def __init__(self):
self.__name = ""
@property
def name(self):
return self.__name
@name.setter
def name(self, x):
self.__name = x
class Car:
def __init__(self, model=None):
self.__set_model(model)
def __set_model(self, model):
self.__model = model
def __get_model(self):
return self.__model
model = property(__get_model, __set_model)
|
x = Robot()
x.name = "apo"
print(x.name)
c = Car()
c.model = "Mercedes"
print(c.model)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.test import TestCase
from models import Student, StudyGroup, Task, Lab, Subject, GroupSubject
class PortalTest(TestCase):
def setUp(self):
self.study_group1 = StudyGroup.objects.create(name="10А")
self.study_group2 = StudyGroup.objects.create(name="11Б")
self.subject1 = Subject.objects.create(name="Оптика")
self.subject2 = Subject.objects.create(name="Механика")
self.group_subject11 = GroupSubject.objects.create(
study_group=self.study_group1, subject=self.subject1
)
self.group_subject22 = GroupSubject.objects.create(
study_group=self.study_group2, subject=self.subject2
)
self.student1 = Student.objects.create_user(
username="ivan", email=None, password="123456", study_group=self.study_group1
)
self.student2 = Student.objects.create_user(
username="pavel", email=None, password="123456", study_group=self.study_group2
)
self.lab | 1 = Lab.objects.create(name="Кольца ньютона", subject=self.subject1)
self.lab2 = Lab.objects.create(name="Атвуд", subject=self.subject2)
def test_task_create(self):
has_error = False
try:
task = Task(stud | ent=self.student1, lab=self.lab1)
task.clean()
task.save()
except ValidationError:
has_error = True
self.assertFalse(has_error)
def test_task_create_double(self):
"""
Должна выскочить ошибка валидации - пытаемся создать 2 одинаковых задания
:return:
"""
has_error = False
try:
task = Task(student=self.student1, lab=self.lab1)
task.clean()
task.save()
task = Task(student=self.student1, lab=self.lab1)
task.clean()
task.save()
except ValidationError:
has_error = True
self.assertTrue(has_error)
# Проверяем что по данной учебной группе есть только одно задание
subject = self.group_subject11.subject
study_group = self.group_subject11.study_group
task_count = Task.objects.filter(
lab__subject__pk=subject.id, student__study_group__pk=study_group.id
).count()
self.assertTrue(task_count, 1)
|
urn routes[index.row()].routeNum
elif index.column() == 1:
return routes[index.row()].beginSignal.name
elif index.column() == 2:
return routes[index.row()].endSignal.name
elif index.column() == 3:
return routes[index.row()].initialState
return None
def setData(self, index, value, role):
"""Updates data when modified in the view"""
if role == Qt.EditRole:
if index.column() == 3:
routeNum = int(index.sibling(index.row(), 0).data())
self._editor.routes[routeNum].initialState = value
self.dataChanged.emit(index, index)
return True
return False
def headerData(self, section, orientation, role = Qt.DisplayRole):
"""Returns the header labels"""
if role == Qt.DisplayRole and orientation == Qt.Horizontal:
if section == 0:
return self.tr("Route no.")
elif section == 1:
return self.tr("Begin Signal")
elif section == 2:
return self.tr("End Signal")
elif section == 3:
return self.tr("Initial State")
return None
def flags(self, index):
"""Returns the flags of the model"""
retFlag = Qt.ItemIsEnabled | Qt.ItemIsSelectable
if index.column() == 3:
retFlag |= Qt.ItemIsEditable
return retFlag
class Route(QtCore.QObject):
"""@brief Path between two signals
A route is a path between two signals. If a route is activated, the path
is selected, and the signals at the beginning and the end of the route are
changed and the conflicting possible other routes are inhibited. Routes
are static and defined in the game file. The player can only activate or
deactivate them.
"""
def __init__(self, simulation, routeNum, beginSignal, endSignal,
initialState = 0):
"""Constructor of the Route class. After construction, the directions
dictionary must be filled and then the _positions list must be
populated by calling createPositionsList().
@param routeNum The route number (id)
@param beginSignal Pointer to the SignalItem at which the route starts
@param endSignal Pointer to the SignalItem at which the route ends"""
super().__init__(simulation)
self.simulation = simulation
self._routeNum = routeNum
bsp = ts2.routing.Position(beginSignal, beginSignal.previousItem, 0)
esp = ts2.routing.Position(endSignal, endSignal.previousItem, 0)
self._positions = [bsp, esp]
self._directions = {}
self._initialState = initialState
self._persistent = False
routeSelected = QtCore.pyqtSignal()
routeUnselected = QtCore.pyqtSignal()
@property
def positions(self):
"""Returns the positions list of this route."""
return self._positions
@property
def routeNum(self):
"""Returns this route number"""
return self._routeNum
@property
def beginSignal(self):
""" Returns the SignalItem where this route starts."""
return self._positions[0].trackItem
@property
def endSignal(self):
"""Returns the SignalItem where this route ends."""
return self._positions[-1].trackItem
@property
def initialState(self):
"""Returns the state of the route at the beginning of the simulation.
0 => Not activated
1 => Activated, non persistent
2 => Activated, persistent"""
return self._initialState
@initialState.setter
def initialState(self, value):
"""Setter function for the initialState property"""
value = int(value)
if value < 0 or value > 2:
value = 0
self._initialState = value
def getRouteState(self):
"""Returns the current route state:
| 0 => Not activated
1 => Activated, non persistent
2 => Activated, persistent."""
if self.beginSignal.nextActiveRoute is not None and \
self.beginSignal.nextActiveRoute == self:
if self._persistent:
return 2
else:
return 1
else:
| return 0
@property
def directions(self):
"""Returns the directions dictionary"""
return self._directions
def direction(self, tiId):
"""Returns the direction of this route at the trackItem with id tiId
"""
return self._directions[tiId]
def appendDirection(self, tiId, direction):
""" Appends a direction to a TrackItem on the Route.
@param tiId The trackItem number to which we add direction
@param direction The direction to append.
For points, 0 means normal and other values means reverse"""
self._directions[tiId] = direction
def createPositionsList(self):
""" Populates the _positions list.
If the route is invalid, it leaves the _positions list empty.
Also completes the _directions map, with obvious directions."""
cur = self._positions[0].next()
it = 1
while not cur.isOut():
if cur == self._positions[-1]:
return True
self._positions.insert(it, cur)
it += 1
if cur.trackItem.tiType.startswith("P"):
if cur.previousTI == cur.trackItem.normalItem:
self._directions[cur.trackItem.tiId] = 0
elif cur.previousTI == cur.trackItem.reverseItem:
self._directions[cur.trackItem.tiId] = 1
elif cur.previousTI == cur.trackItem.commonItem \
and cur.trackItem.tiId not in self._directions:
self._directions[cur.trackItem.tiId] = 0
cur = cur.next(0, self._directions.get(cur.trackItem.tiId, -1))
QtCore.qCritical(self.tr("Invalid route %i. "
"Impossible to link beginSignal with endSignal"
% self.routeNum))
return False
def links(self, si1, si2):
""" Returns true if the route links SignalItem si1 to SignalItem si2.
@param si1 First SignalItem
@param si2 Last SignalItem"""
if self.beginSignal == si1 and self.endSignal == si2:
return True
else:
return False
def activate(self, persistent = False):
""" This function is called by the simulation when the route is
activated."""
for pos in self._positions:
pos.trackItem.setActiveRoute(self, pos.previousTI)
self.endSignal.previousActiveRoute = self
self.beginSignal.nextActiveRoute = self
self.persistent = persistent
self.routeSelected.emit()
def desactivate(self):
"""This function is called by the simulation when the route is
desactivated."""
self.beginSignal.resetNextActiveRoute(self)
self.endSignal.resetPreviousActiveRoute()
for pos in self._positions:
if pos.trackItem.activeRoute is None or \
pos.trackItem.activeRoute == self:
pos.trackItem.resetActiveRoute()
self.routeUnselected.emit()
def isActivable(self):
"""Returns true if this route can be activated, i.e. that no other
active route is conflicting with this route."""
flag = False
for pos in self._positions:
if pos.trackItem != self.beginSignal and \
pos.trackItem != self.endSignal:
if pos.trackItem.conflictTI is not None \
and pos.trackItem.conflictTI.activeRoute is not None:
# The trackItem has a conflict item and this conflict item
# has an active route
return False
if pos.trackItem.activeRoute is not None:
# The trackItem already has an active route
if pos.trackItem.tiType.startswith("P") and flag == Fal |
# -*- coding: utf-8 -*-
#
# # Copyright: (c) 2012, Red Hat, Inc
# Written by Seth Vidal <skvidal at fedoraproject.org>
# Contributing Authors:
# - Ansible Core Team
# - Eduard Snesarev (@verm666)
# - Berend De Schouwer (@berenddeschouwer)
# - Abhijeet Kasurde (@Akasurde)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
import os
import time
import glob
import tempfile
from abc import ABCMeta, abstractmethod
from ansible.module_utils._text import to_native
from ansible.module_utils.six import with_metaclass
yumdnf_argument_spec = dict(
argument_spec=dict(
allow_downgrade=dict(type='bool', default=False),
autoremove=dict(type='bool', default=False),
bugfix=dict(required=False, type='bool', default=False),
conf_file=dict(type='str'),
disable_excludes=dict(type='str', default=None),
disable_gpg_check=dict(type='bool', default=False),
disable_plugin=dict(type='list', default=[]),
disablerepo=dict(type='list', default=[]),
download_only=dict(type='bool', default=False),
enable_plugin=dict(type='list', default=[]),
enablerepo=dict(type='list', default=[]),
exclude=dict(type='list', default=[]),
installroot=dict(type='str', default="/"),
install_repoquery=dict(type='bool', default=True),
list=dict(type='str'),
name=dict(type='list', aliases=['pkg'], default=[]),
releasever=dict(default=None),
security=dict(type='bool', default=False),
skip_broken=dict(type='bool', default=False),
# removed==absent, installed==present, these are accepted as aliases
state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']),
update_cache=dict(type='bool', default=False, aliases=['expire-cache']),
update_only=dict(required=False, default="no", type='bool'),
validate_certs=dict(type='bool', default=True),
lock_timeout=dict(type='int', default=0),
),
required_one_of=[['name', 'list', 'update_cache']],
mutually_exclusive=[['name', 'list']],
supports_check_mode=True,
)
class YumDnf(with_metaclass(ABCMeta, object)):
"""
Abstract class that handles the population of instance variables that should
be identical between both YUM and DNF modules because of the feature parity
and shared argument spec
"""
def __init__(self, module):
self.module = module
self.allow_downgrade = self.module.params['allow_downgrade']
self.autoremove = self.module.params['autoremove']
self.bugfix = self.module.params['bugfix']
self.conf_file = self.module.params['conf_file']
self.disable_excludes = self.module.params['disable_excludes']
self.disable_gpg_check = self.module.params['disable_gpg_check']
self.disable_plugin = self.module.params['disable_plugin']
self.disablerepo = self.module.params.get('disablerepo', [])
self.download_only = self.module.params['download_only']
self.enable_plugin = self.module.params['enable_plugin']
self.enablerepo = self.module.params.get('enablerepo', [])
self.exclude = self.module.params['exclude']
self.installroot = self.module.params['installroot']
self.install_repoquery = self.module.params['install_repoquery']
self.list = self.module.params['list']
self.names = [p.strip() for p in self.module.params['name']]
self.releasever = self.module.params['releasever']
self.security = self.module.params['security']
self.skip_broken = self.module.params['skip_broken']
self.state = self.module.params['state']
self.update_only = self.module.params['update_only']
self.update_cache = self.module.params['update_cache']
self.validate_certs = self.module.params['validate_certs']
self.lock_timeout = self.module.params['lock_timeout']
# It's possible someone passed a comma separated string since it used
# to be a string type, so we should handle that
self.names = self.listify_comma_sep_strings_in_list(self.names)
self.disablerepo = self.listify_comma_sep_strings_in_list(self.disablerepo)
self.enablerepo = self.listify_comma_sep_strings_in_list(self.enablerepo)
self.exclude = self.listify_comma_sep_strings_in_list(self.exclude)
# Fail if someone passed a space separated string
# https://github.com/ansible/ansible/issues/46301
if any((' ' in name and '@' not in name and '==' not in name for name in self.names)):
module.fail_j | son(
msg='It appears that a space separated string of packages was passed in '
'as an argument. To operate on several packages, pass a comma separated '
'string of packages or a list of packages.'
)
# This should really be redefined by both the yum and dnf module but a
# default isn't a bad idea
self.lockfile = '/var/run/yum.pid'
def wait_for_lock(self):
'''Poll until the lock is removed if | timeout is a positive number'''
if (os.path.isfile(self.lockfile) or glob.glob(self.lockfile)):
if self.lock_timeout > 0:
for iteration in range(0, self.lock_timeout):
time.sleep(1)
if not os.path.isfile(self.lockfile) and not glob.glob(self.lockfile):
return
self.module.fail_json(msg='{0} lockfile is held by another process'.format(self.pkg_mgr_name))
def listify_comma_sep_strings_in_list(self, some_list):
"""
method to accept a list of strings as the parameter, find any strings
in that list that are comma separated, remove them from the list and add
their comma separated elements to the original list
"""
new_list = []
remove_from_original_list = []
for element in some_list:
if ',' in element:
remove_from_original_list.append(element)
new_list.extend([e.strip() for e in element.split(',')])
for element in remove_from_original_list:
some_list.remove(element)
some_list.extend(new_list)
if some_list == [""]:
return []
return some_list
@abstractmethod
def run(self):
raise NotImplementedError
|
anField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'object_name': 'Page'},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_i | ndex': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
| },
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')", 'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is | distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the Licens | e for the specific language governing permissions and
# limitations under the License.
from ._query import query |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Dimitrios Tydeas Mengidis <tydeas.dr@gmail.com>
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: composer
author: '"Dimitrios Tydeas Mengidis (@dmtrs)" <tydeas.dr@gmail.com>'
short_description: Dependency Manager for PHP
version_added: "1.6"
description:
- Composer is a tool for dependency management in PHP. It allows you to declare the dependent libraries your project needs and it will install them in your project for you
options:
command:
version_added: "1.8"
description:
- Composer command like "install", "update" and so on
required: false
default: install
working_dir:
description:
- Directory of your project ( see --working-dir )
required: true
default: null
aliases: [ "working-dir" ]
prefer_source:
description:
- Forces installation from package sources when possible ( see --prefer-source )
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: [ "prefer-source" ]
prefer_dist:
description:
- Forces installation from package dist even for dev versions ( see --prefer-dist )
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: [ "prefer-dist" ]
no_dev:
description:
- Disables installation of require-dev packages ( see --no-dev )
| required: false
default: "yes"
choices: [ "yes", "no" ]
aliases: [ "no-dev" ]
no_scripts:
description:
- Skips the execution of all scripts defined in composer.json ( see --no-scripts )
required: false
default: "no"
| choices: [ "yes", "no" ]
aliases: [ "no-scripts" ]
no_plugins:
description:
- Disables all plugins ( see --no-plugins )
required: false
default: "no"
choices: [ "yes", "no" ]
aliases: [ "no-plugins" ]
optimize_autoloader:
description:
- Optimize autoloader during autoloader dump ( see --optimize-autoloader ). Convert PSR-0/4 autoloading to classmap to get a faster autoloader. This is recommended especially for production, but can take a bit of time to run so it is currently not done by default.
required: false
default: "yes"
choices: [ "yes", "no" ]
aliases: [ "optimize-autoloader" ]
requirements:
- php
- composer installed in bin path (recommended /usr/local/bin)
notes:
- Default options that are always appended in each execution are --no-ansi, --no-progress, and --no-interaction
'''
EXAMPLES = '''
# Downloads and installs all the libs and dependencies outlined in the /path/to/project/composer.lock
- composer: command=install working_dir=/path/to/project
'''
import os
import re
def parse_out(string):
return re.sub("\s+", " ", string).strip()
def has_changed(string):
if "Nothing to install or update" in string:
return False
else:
return True
def composer_install(module, command, options):
php_path = module.get_bin_path("php", True, ["/usr/local/bin"])
composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"])
cmd = "%s %s %s %s" % (php_path, composer_path, command, " ".join(options))
return module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec = dict(
command = dict(default="install", type="str", required=False),
working_dir = dict(aliases=["working-dir"], required=True),
prefer_source = dict(default="no", type="bool", aliases=["prefer-source"]),
prefer_dist = dict(default="no", type="bool", aliases=["prefer-dist"]),
no_dev = dict(default="yes", type="bool", aliases=["no-dev"]),
no_scripts = dict(default="no", type="bool", aliases=["no-scripts"]),
no_plugins = dict(default="no", type="bool", aliases=["no-plugins"]),
optimize_autoloader = dict(default="yes", type="bool", aliases=["optimize-autoloader"]),
),
supports_check_mode=True
)
options = []
# Default options
options.append('--no-ansi')
options.append('--no-progress')
options.append('--no-interaction')
options.extend(['--working-dir', os.path.abspath(module.params['working_dir'])])
# Get composer command with fallback to default
command = module.params['command']
# Prepare options
if module.params['prefer_source']:
options.append('--prefer-source')
if module.params['prefer_dist']:
options.append('--prefer-dist')
if module.params['no_dev']:
options.append('--no-dev')
if module.params['no_scripts']:
options.append('--no-scripts')
if module.params['no_plugins']:
options.append('--no-plugins')
if module.params['optimize_autoloader']:
options.append('--optimize-autoloader')
if module.check_mode:
options.append('--dry-run')
rc, out, err = composer_install(module, command, options)
if rc != 0:
output = parse_out(err)
module.fail_json(msg=output)
else:
# Composer version > 1.0.0-alpha9 now use stderr for standard notification messages
output = parse_out(out + err)
module.exit_json(changed=has_changed(output), msg=output)
# import module snippets
from ansible.module_utils.basic import *
main()
|
# jsb/plugs/core/rc.py
#
#
""" jsonbot resource files .. files with the .jsb extension which consists of commands to be executed. """
## jsb imports
from jsb.lib.commands import cmnds
from jsb.lib.examples import examples
from jsb.utils.url import geturl2
from jsb.utils.exception import handle_exception
from jsb.utils.generic import waitforqueue, waitevents
## basic imports
import copy
## defines
cpy = copy.deepcopy
## rc command
def handle_rc(bot, event):
""" import aliases by url. assumes a .RC file. 1 alias per line """
if not event.rest: event.missing("<file>|<url>") ; return
teller = 0
t = event.rest
waiting = []
try:
try:
if t.startswith("http"): data = geturl2(t)
else: data = open(t, 'r').read()
except IOError, ex: event.reply("I/O error: %s" % str(ex)) ; return
if not data: event.reply("can't get data from %s" % event.rest) ; return
for d in data.split("\n"):
i = d.strip()
if not i: continue
if i.startswith("#"): continue
e = cpy(event)
e.txt = "%s" % i.strip()
e.direct = True
bot.put(e)
waiting.append(e)
#result = bot.docmnd(event.userhost, event.channel, i, wait=1, event=event)
#if result: resu | lt.waitall()
teller += 1
#waitevents(waiting)
event.reply("%s commands executed" % teller)
except Exception, ex: event.reply("an error occured: %s" % str(ex)) ; handle_exception()
cmnds.add("rc", ha | ndle_rc, ["OPER"], threaded=True)
examples.add("rc", "execute a file of jsonbot commands .. from file or url", "1) rc resource.jsb 2) rc http://jsonbot.org/resource.jsb")
|
from __future__ import print_function
import unittest
import numpy as np
import pydrake
import os.path
class TestRBTCoM(unittest.TestCase):
def testCoM0(self):
r = pydrake.rbtree.RigidBodyTree(os.path.join(pydrake.getDrakePath(),
"examples/Pendulum/Pendulum.urdf"))
kinsol = r.doKinematics(np.zeros((7, 1)), np.zeros((7, 1)))
c = r.centerOfMass(kinsol)
self.assertTrue(np.allclose(c.flat, [0.0, 0.0, -0.2425], atol=1e-4))
def testCoMJacobian(self):
r = pydrake.rbtree.RigidBodyTree(os.path.join(pydrake.getDrakePath(),
| "examples/Pendulum/Pendulum.urdf"))
q = r.getRandomConfiguration()
kinsol = r.doKinematics(q, np.zeros((7, 1)))
J = r.centerOfMassJacobian(kinsol)
self.assertTrue(np.shape(J) == (3, 7))
q = r.getZeroConfiguration( | )
kinsol = r.doKinematics(q, np.zeros((7, 1)))
J = r.centerOfMassJacobian(kinsol)
self.assertTrue(np.allclose(J.flat, [1., 0., 0., 0., -0.2425, 0., -0.25,
0., 1., 0., 0.2425, 0., 0., 0.,
0., 0., 1., 0., 0., 0., 0.], atol=1e-4))
if __name__ == '__main__':
unittest.main()
|
inka, José Sánchez-Gallego, and Brett Andrews
# @Date: 2018-07-24
# @Filename: test_rss.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
#
# @Last modified by: José Sánchez-Gallego (gallegoj@uw.edu)
# @Last modified time: 2018-08-04 13:35:39
import astropy.io.fits
import astropy.table
import numpy
import pytest
import marvin
from ..conftest import Galaxy, set_the_config
@pytest.fixture(scope='session')
def galaxy(get_params, plateifu):
"""Yield an instance of a Galaxy object for use in tests."""
release, bintype, template = get_params
set_the_config(release)
gal = Galaxy(plateifu=plateifu)
gal.set_params(bintype=bintype, template=template, release=release)
gal.set_filepaths()
gal.set_galaxy_data()
yield gal
@pytest.fixture(scope='session')
def rss_session(galaxy, mode):
# These get created only once per session.
if mode == 'auto' or str(galaxy.bintype) != 'SPX':
pytest.skip()
if mode == 'local':
rss = marvin.tools.RSS(filename=galaxy.rsspath, release=galaxy.release, mode='local')
else:
rss = marvin.tools.RSS(plateifu=galaxy.plateifu, release=galaxy.release, mode='remote')
rss.expdata = galaxy.rss
yield rss
@pytest.fixture(scope='function')
def rss(rss_session):
# In some of the tests we modify the RSS objects. Here we implement
# a setup procedure that "unloads" the RSSFiber objects and resets the
# autoload attribute.
for rssfiber in rss_session:
rssfiber.loaded = False
rss_session.autoload = True
yield rss_session
@pytest.fixture(scope='session')
def rssfiber(rss_session):
fiberid = 0
if rss_session[fiberid].loaded is False:
rss_session[fiberid].load()
yield rss_session[fiberid]
@pytest.mark.usefixtures('monkeyauth')
class TestRSS(object):
def test_rss_init(self, rss):
assert isinstance(rss, marvin.tools.RSS)
assert isinstance(rss, marvin.tools.mixins.NSAMixIn)
assert isinstance(rss, list)
assert isinstance(rss.obsinfo, astropy.table.Table)
if rss.mode == 'file':
assert isinstance(rss.data, astropy.io.fits.HDUList)
assert rss._wavelength is not None
assert len(rss) == rss._nfibers
rss.autoload = False # To make things faster for this test
assert all([isinstance(rss_fiber, marvin.tools.rss.RSSFiber) for rss_fiber in rss])
@pytest.mark.parametrize('autoload', [True, False])
def test_rss_autoload(self, rss, autoload):
rss.autoload = autoload
assert rss[0].loaded is autoload
def test_load(self, rss):
rss.autoload = False
assert rss[0].loaded is False
rss[0].load()
assert rss[0].loaded is True
def test_load_all(self, rss):
if rss.mode == 'remote':
pytest.skip()
rss.load_all()
assert all([rss_fiber.loaded is True for rss_fiber in rss])
def test_obsinfo_to_rssfiber(self, rss):
# We get it in this complicated way so that it is a different way of
# obtianing it than in the _populate_fibres method.
ifusize = int(str(rss.ifu)[0:-2])
exp_idx = 0
n_fiber = 1
for rssfiber in rss:
assert numpy.all(rss.obsinfo[exp_idx] == rssfiber.obsinfo)
n_fiber += 1
if n_fiber > ifusize:
n_fiber = 1
exp_idx += 1
def test_getcube(self, rss):
cube = rss.getCube()
assert isinstance(cube, marvin.tools.Cube)
assert cube.mode == rss.mode
assert cube.plateifu == rss.plateifu
assert cube.mangaid == rss.mangaid
assert cube.release == rss.release
def test_select_fibers(self, rss):
# Skipping for API or it will take forever. Should not matter since
# we have already tested slicing for API.
if rss.data_origin == 'api':
pytest.skip()
fibers_expnum = rss.select_fibers(exposure_no=rss.expdata['expnum'])
assert len(fibers_expnum) == rss.expdata['nfiber']
assert fibers_expnum[0].obsinfo['EXPNUM'][0] == rss.expdata['expnum']
fibers_mjd = rss.select_fibers(mjd=1234)
assert len(fibers_mjd) == 0
fibers_mjd = rss.select_fibers(mjd=rss.expdata['mjd'])
assert len(fibers_mjd) == (rss.expdata['nexp'] * rss.expdata['nfiber'])
assert fibers_mjd[0].obsinfo['MJD'][0] == rss.expdata['mjd']
@pytest.mark.usefixtures('monkeyauth')
class TestRSSFiber(object):
def test_rssfiber_spectra(self, rssfiber):
assert isinstance(rssfiber, marvin.tools.RSSFiber)
assert isinstance(rssfiber.rss, marvin.tools.RSS)
assert isinstance(rssfiber.obsinfo, astropy.table.Table)
assert hasattr(rssfiber, 'ivar')
assert isinstance(rssfiber.ivar, numpy.ndarray)
assert len(rssfiber.ivar) == len(rssfiber.wavelength)
assert hasattr(rssfiber, 'mask')
assert isinstance(rssfiber.mask, numpy.ndarray)
assert len(rssfiber.mask) == len(rssfiber.wavelength)
for dm_element in rssfiber.rss.datamodel.rss + rssfiber.rss.datamodel.spectra:
if dm_element.name == 'flux':
continue
spectrum = getattr(rssfiber, dm_element.name, None)
assert spectrum is not None
assert isinstance(spectrum, numpy.ndarray)
assert len(spectrum) == len(rssfiber.wavelength)
def test_rssfiber_data(self, rssfiber):
rss_filename = rssfiber.rss._getFullPath()
rss_hdu = astropy.io.fits.open(rss_filename)
numpy.testing.assert_allclose(rss_hdu['FLUX'].data[rssfiber.fiberid, :], rssfiber.value)
numpy.testing.assert_allclose(rss_hdu['IVAR'].data[rssfiber.fiberid, :], rssfiber.ivar)
numpy.testing.assert_array_equal(rss_hdu['MASK'].data[rssfiber.fiberid, :], rssfiber.mask)
for dm_element in rssfiber.rss.datamodel.rss:
if dm_element.name == 'flux':
continue
fits_data = rss_hdu[dm_element.fits_extension()].data[rssfiber.fiberid, :]
numpy.testing.assert_allclose(fits_data, getattr(rssfiber, dm_element.name).value)
for dm_element in rssfiber.rss.datamodel.spectra:
fits_data = rss_hdu[dm_element.fits_extension()].data
numpy.testing.assert_allclose(fits_data, getattr(rssfiber, dm_element.name).value)
def test_rssfiber_slice(self, rssfiber):
n_elements = 10
sliced = rssfiber[0:n_elements]
assert len(sliced.value) == n_elements
numpy.testing.assert_allclose(sliced.value, rssfiber.value[0:n_elements])
assert len(sliced.ivar) == n_elements
asse | rt len(sliced.mask) == n_elements
for dm_element in rssfiber.rss.datamodel.rss + rssfiber.rss.datamodel.spectra:
if dm_element.name == 'flux':
continue
spectrum_sliced = getattr(sliced, dm_element.name, None)
assert len(spectrum_sliced) == n_elements
assert sliced.obsinfo is not None |
def test_rssfiber_masked(self, rssfiber):
assert numpy.sum(rssfiber.masked.mask) > 0
def test_rssfiber_descale(self, rssfiber):
descaled = rssfiber.descale()
numpy.testing.assert_allclose(descaled.value, rssfiber.value * rssfiber.unit.scale)
assert descaled.obsinfo is not None
class TestPickling(object):
def test_pickling_file(self, temp_scratch, rss):
if rss.data_origin == 'file':
assert rss.data is not None
rss_file = temp_scratch.join('test_rss.mpf')
rss.save(str(rss_file))
assert rss_file.check() is True
rss_restored = marvin.tools.RSS.restore(str(rss_file))
assert rss_restored.data_origin == rss.data_origin
assert isinstance(rss_restored, marvin.tools.RSS)
assert len(rss_restored) > 0
assert isinstance(rss_restored[0], marvin.tools.RSSFiber)
assert numpy.sum(rss_restored[0].value) > 0
if rss.data_origin == 'file':
assert rss_restored.data is not None
else:
assert rss_ |
'''This module contains utilities for following up search triggers'''
# JavaScript for searching the aLOG
redirect_javascript = """<script type="text/javascript">
function redirect(form,way)
{
// Set location to form and submit.
if(form != '')
{
document.forms[form].action=way;
document.forms[form].submit();
}
else
{
window.top.location = way;
}
}
</script>"""
search_form_string="""<form name="%s_alog_search" id="%s_alog_search" method="post">
<input type="hidden" name="srcDateFrom" id="srcDateFrom" value="%s" size="20"/>
<input type="hidden" name="srcDateTo" id="srcDateTo" value="%s" size="20"/>
</form>"""
data_h1_string = """H1
<a href=https://ldas-jobs.ligo-wa.caltech.edu/~detchar/summary/day/%s>
Summary</a>
<a onclick="redirect('h1_alog_search',
'https://alog.ligo-wa.caltech.edu/aLOG/includes/search.php?adminType=search');
return true;">aLOG</a>"""
data_l1_string="""L1
<a href=https://ldas-jobs.ligo-la.caltech.edu/~detchar/summary/day/%s>
Summary</a>
<a onclick="redirect('l1_alog_search',
'https://alog.ligo-la.caltech.edu/aLOG/includes/search.php?adminType=search');
return true;">aLOG</a>"""
def get_summary_page_link(ifo, utc_time):
"""Return a string that links to the summary page and aLOG for this ifo
Parameters
----------
ifo : string
The detector name
utc_time : sequence
First three element | s must be strings giving year, month, day resp.
Returns
-------
return_string : string
String containing HTML for links to summary page and aLOG search
"""
search_form = search_form_string
data = {'H1': data_h1_string, 'L1': data_l1_string}
if ifo not in data:
return ifo
else:
# alog format is day-month-year
alog_utc = '%02d-%02d-%4d' % (utc_time[2], utc_time[1], utc_time[0])
| # summary page is exactly the reverse
ext = '%4d%02d%02d' % (utc_time[0], utc_time[1], utc_time[2])
return_string = search_form % (ifo.lower(), ifo.lower(), alog_utc, alog_utc)
return return_string + data[ifo] % ext
|
from copy import deepcopy
from sqlalchemy import inspect
from sqlalchemy.orm.base import DEFAULT_STATE_ATTR
from sqlalchemy.orm.state import InstanceState
from mongosql.bag import ModelPropertyBags
class ModelHistoryProxy:
""" Proxy object to gain access to historical model attributes.
This leverages SqlAlchemy attribute history to provide access to the previous value of an
attribute. The only reason why this object exists is because keeping two instances in memory may
be expensive. But because normally you'll only need a field or two, the decision was to use
this magic proxy object that will load model history on demand.
Why would you need to access model history at all?
Because CrudHelper's update method (i.e., changing model fields) gives you two objects: the
current instance, and the old instance, so that your custom code in the update handler can
compare those fields.
For instance, when a certain object is being moved from one User to another, you might want
to notify both of them. In that case, you'll need access to the historical user.
The initial solution was to *copy* the instance, apply the modifications from JSON to a copy,
and then feed both of them to the save handler... but copying was expensive.
That's why we have this proxy: it does not load all the fields of the historical model,
but acts as a proxy object (__getattr__()) that will get those properties on demand.
"""
def __init__(self, instance):
# Save the information that we'll definitely need
self.__instance = instance
self.__model = self.__instance.__class__
self.__bags = ModelPropertyBags.for_model(self.__model) # type: ModelPropertyBags
self.__inspect = inspect(instance) # type: InstanceState
# Copy every field onto ourselves
self.__copy_from_instance(self.__instance)
# Enable accessing relationships through our proxy
self.__install_instance_state(instance)
def __copy_from_instance(self, instance):
""" Copy all attributes of `instance` to `self`
Alright, this code renders the whole point of having ModelHistoryProxy void.
There is an issue with model history:
"Each time the Session is flushed, the history of each attribute is reset to empty.
The Session by default autoflushes each time a Query is invoked"
https | ://docs.sqlalchemy.org/en/latest/orm/internals.html#sqlalchemy.orm.state.AttributeState.history
This means that as soon as you load a relationship, model history is reset.
To solve this, we have to make a copy of this model.
All attributes are set on `self`, so accessing `self.attr` will not trigger `__getattr__( | )`
"""
""" Copy the given list of columns from the instance onto self """
insp = self.__inspect # type: InstanceState
# Copy all values onto `self`
for column_name in self.__bags.columns.names:
# Skip unloaded columns (because that would emit sql queries)
# Also skip the columns that were already copied (perhaps, mutable columns?)
if column_name not in insp.unloaded:
# The state
attr_state = insp.attrs[column_name] # type: AttributeState
# Get the historical value
# deepcopy() ensures JSON and ARRAY values are copied in full
hist_val = deepcopy(_get_historical_value(attr_state))
# Remove the value onto `self`: we're bearing the value now
setattr(self, column_name, hist_val)
def __install_instance_state(self, instance):
""" Install an InstanceState, so that relationship descriptors can work properly """
# These lines install the internal SqlAlchemy's property on our proxy
# This property mimics the original object.
# This ensures that we can access relationship attributes through a ModelHistoryProxy object
# Example:
# hist = ModelHistoryProxy(comment)
# hist.user.id # wow!
instance_state = getattr(instance, DEFAULT_STATE_ATTR)
my_state = InstanceState(self, instance_state.manager)
my_state.key = instance_state.key
my_state.session_id = instance_state.session_id
setattr(self, DEFAULT_STATE_ATTR, my_state)
def __getattr__(self, key):
# Get a relationship:
if key in self.__bags.relations:
relationship = getattr(self.__model, key)
return relationship.__get__(self, self.__model)
# Get a property (@property)
if key in self.__bags.properties:
# Because properties may use other columns,
# we have to run it against our`self`, because only then it'll be able to get the original values.
return getattr(self.__model, key).fget(self)
# Every column attribute is accessed through history
attr = self.__inspect.attrs[key]
return _get_historical_value(attr)
def _get_historical_value(attr):
""" Get the previous value of an attribute
This is where the magic happens: this method goes into the SqlAlchemy instance and
obtains the historical value of an attribute called `key`
"""
# Examine attribute history
# If a value was deleted (e.g. replaced) -- we return it as the previous version.
history = attr.history
if not history.deleted:
# No previous value, return the current value instead
return attr.value
else:
# Return the previous value
# It's a tuple, since History supports collections, but we do not support these,
# so just get the first element
return history.deleted[0]
|
#/usr/bin/env python
#Base Server -Chapter three -basicserver.py
import socket, traceback
host=''
port=8080
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
print "Waiting for connections..."
s.listen(1)
while True:
try:
clientsock, clientaddr=s.accept()
except KeyboardInterrupt:
raise
except:
traceback.print_exc()
continue
try:
print "Got connection from", clientsock.getpeername()
except (KeyboardInterrupt, SystemExi | t):
raise
except:
traceback.print_exc()
try:
clientsock.close()
except KeyboardInterrupt:
| raise
except:
traceback.print_exc()
|
, 2]]
assert list(postfixes([1, 2, 3, 4, 5])) == \
[[5], [4, 5], [3, 4, 5], [2, 3, 4, 5], [1, 2, 3, 4, 5]]
def test_topological_sort():
V = [2, 3, 5, 7, 8, 9, 10, 11]
E = [(7, 11), (7, 8), (5, 11),
(3, 8), (3, 10), (11, 2),
(11, 9), (11, 10), (8, 9)]
assert topological_sort((V, E)) == [3, 5, 7, 8, 11, 2, 9, 10]
assert topological_sort((V, E), key=lambda v: -v) == \
[7, 5, 11, 3, 10, 8, 9, 2]
raises(ValueError, lambda: topological_sort((V, E + [(10, 7)])))
def test_rotate():
A = [0, 1, 2, 3, 4]
assert rotate_left(A, 2) == [2, 3, 4, 0, 1]
assert rotate_right(A, 1) == [4, 0, 1, 2, 3]
A = []
B = rotate_right(A, 1)
assert B == []
B.append(1)
assert A == []
B = rotate_left(A, 1)
assert B == []
B.append(1)
assert A == []
def test_multiset_partitions():
A = [0, 1, 2, 3, 4]
assert list(multiset_partitions(A, 5)) == [[[0], [1], [2], [3], [4]]]
assert len(list(multiset_partitions(A, 4))) == 10
assert len(list(multiset_partitions(A, 3))) == 25
assert list(multiset_partitions([1, 1, 1, 2, 2], 2)) == [
[[1, 1, 1, 2], [2]], [[1, 1, 1], [2, 2]], [[1, 1, 2, 2], [1]],
[[1, 1, 2], [1, 2]], [[1, 1], [1, 2, 2]]]
assert list(multiset_partitions([1, 1, 2, 2], 2)) == [
[[1, 1, 2], [2]], [[1, 1], [2, 2]], [[1, 2, 2], [1]],
[[1, 2], [1, 2]]]
assert list(multiset_partitions([1, 2, 3, 4], 2)) == [
[[1, | 2, 3], [4]], [ | [1, 2, 4], [3]], [[1, 2], [3, 4]],
[[1, 3, 4], [2]], [[1, 3], [2, 4]], [[1, 4], [2, 3]],
[[1], [2, 3, 4]]]
assert list(multiset_partitions([1, 2, 2], 2)) == [
[[1, 2], [2]], [[1], [2, 2]]]
assert list(multiset_partitions(3)) == [
[[0, 1, 2]], [[0, 1], [2]], [[0, 2], [1]], [[0], [1, 2]],
[[0], [1], [2]]]
assert list(multiset_partitions(3, 2)) == [
[[0, 1], [2]], [[0, 2], [1]], [[0], [1, 2]]]
assert list(multiset_partitions([1] * 3, 2)) == [[[1], [1, 1]]]
assert list(multiset_partitions([1] * 3)) == [
[[1, 1, 1]], [[1], [1, 1]], [[1], [1], [1]]]
a = [3, 2, 1]
assert list(multiset_partitions(a)) == \
list(multiset_partitions(sorted(a)))
assert list(multiset_partitions(a, 5)) == []
assert list(multiset_partitions(a, 1)) == [[[1, 2, 3]]]
assert list(multiset_partitions(a + [4], 5)) == []
assert list(multiset_partitions(a + [4], 1)) == [[[1, 2, 3, 4]]]
assert list(multiset_partitions(2, 5)) == []
assert list(multiset_partitions(2, 1)) == [[[0, 1]]]
assert list(multiset_partitions('a')) == [[['a']]]
assert list(multiset_partitions('a', 2)) == []
assert list(multiset_partitions('ab')) == [[['a', 'b']], [['a'], ['b']]]
assert list(multiset_partitions('ab', 1)) == [[['a', 'b']]]
assert list(multiset_partitions('aaa', 1)) == [['aaa']]
assert list(multiset_partitions([1, 1], 1)) == [[[1, 1]]]
def test_multiset_combinations():
ans = ['iii', 'iim', 'iip', 'iis', 'imp', 'ims', 'ipp', 'ips',
'iss', 'mpp', 'mps', 'mss', 'pps', 'pss', 'sss']
assert [''.join(i) for i in
list(multiset_combinations('mississippi', 3))] == ans
M = multiset('mississippi')
assert [''.join(i) for i in
list(multiset_combinations(M, 3))] == ans
assert [''.join(i) for i in multiset_combinations(M, 30)] == []
assert list(multiset_combinations([[1], [2, 3]], 2)) == [[[1], [2, 3]]]
assert len(list(multiset_combinations('a', 3))) == 0
assert len(list(multiset_combinations('a', 0))) == 1
assert list(multiset_combinations('abc', 1)) == [['a'], ['b'], ['c']]
def test_multiset_permutations():
ans = ['abby', 'abyb', 'aybb', 'baby', 'bayb', 'bbay', 'bbya', 'byab',
'byba', 'yabb', 'ybab', 'ybba']
assert [''.join(i) for i in multiset_permutations('baby')] == ans
assert [''.join(i) for i in multiset_permutations(multiset('baby'))] == ans
assert list(multiset_permutations([0, 0, 0], 2)) == [[0, 0]]
assert list(multiset_permutations([0, 2, 1], 2)) == [
[0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1]]
assert len(list(multiset_permutations('a', 0))) == 1
assert len(list(multiset_permutations('a', 3))) == 0
def test():
for i in range(1, 7):
print(i)
for p in multiset_permutations([0, 0, 1, 0, 1], i):
print(p)
assert capture(lambda: test()) == dedent('''\
1
[0]
[1]
2
[0, 0]
[0, 1]
[1, 0]
[1, 1]
3
[0, 0, 0]
[0, 0, 1]
[0, 1, 0]
[0, 1, 1]
[1, 0, 0]
[1, 0, 1]
[1, 1, 0]
4
[0, 0, 0, 1]
[0, 0, 1, 0]
[0, 0, 1, 1]
[0, 1, 0, 0]
[0, 1, 0, 1]
[0, 1, 1, 0]
[1, 0, 0, 0]
[1, 0, 0, 1]
[1, 0, 1, 0]
[1, 1, 0, 0]
5
[0, 0, 0, 1, 1]
[0, 0, 1, 0, 1]
[0, 0, 1, 1, 0]
[0, 1, 0, 0, 1]
[0, 1, 0, 1, 0]
[0, 1, 1, 0, 0]
[1, 0, 0, 0, 1]
[1, 0, 0, 1, 0]
[1, 0, 1, 0, 0]
[1, 1, 0, 0, 0]
6\n''')
def test_partitions():
assert [p.copy() for p in partitions(6, k=2)] == [
{2: 3}, {1: 2, 2: 2}, {1: 4, 2: 1}, {1: 6}]
assert [p.copy() for p in partitions(6, k=3)] == [
{3: 2}, {1: 1, 2: 1, 3: 1}, {1: 3, 3: 1}, {2: 3}, {1: 2, 2: 2},
{1: 4, 2: 1}, {1: 6}]
assert [p.copy() for p in partitions(6, k=2, m=2)] == []
assert [p.copy() for p in partitions(8, k=4, m=3)] == [
{4: 2}, {1: 1, 3: 1, 4: 1}, {2: 2, 4: 1}, {2: 1, 3: 2}] == [
i.copy() for i in partitions(8, k=4, m=3) if all(k <= 4 for k in i)
and sum(i.values()) <=3]
assert [p.copy() for p in partitions(S(3), m=2)] == [
{3: 1}, {1: 1, 2: 1}]
assert [i.copy() for i in partitions(4, k=3)] == [
{1: 1, 3: 1}, {2: 2}, {1: 2, 2: 1}, {1: 4}] == [
i.copy() for i in partitions(4) if all(k <= 3 for k in i)]
raises(ValueError, lambda: list(partitions(3, 0)))
# Consistency check on output of _partitions and RGS_unrank.
# This provides a sanity test on both routines. Also verifies that
# the total number of partitions is the same in each case.
# (from pkrathmann2)
for n in range(2, 6):
i = 0
for m, q in _set_partitions(n):
assert q == RGS_unrank(i, n)
i = i+1
assert i == RGS_enum(n)
def test_binary_partitions():
assert [i[:] for i in binary_partitions(10)] == [[8, 2], [8, 1, 1],
[4, 4, 2], [4, 4, 1, 1], [4, 2, 2, 2], [4, 2, 2, 1, 1],
[4, 2, 1, 1, 1, 1], [4, 1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2],
[2, 2, 2, 2, 1, 1], [2, 2, 2, 1, 1, 1, 1], [2, 2, 1, 1, 1, 1, 1, 1],
[2, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
assert len([j[:] for j in binary_partitions(16)]) == 36
def test_bell_perm():
assert [len(list(generate_bell(i))) for i in range(1, 7)] == [
factorial(i) for i in range(1, 7)]
assert list(generate_bell(3)) == [
(0, 1, 2), (1, 0, 2), (1, 2, 0), (2, 1, 0), (2, 0, 1), (0, 2, 1)]
def test_involutions():
lengths = [1, 2, 4, 10, 26, 76]
for n, N in enumerate(lengths):
i = list(generate_involutions(n + 1))
assert len(i) == N
assert len(set([Permutation(j)**2 for j in i])) == 1
def test_derangements():
assert len(list(generate_derangements(list(range(6))))) == 265
assert ''.join(''.join(i) for i in generate_derangements('abcde')) == (
'badecbaecdbcaedbcdeabceadbdaecbdeacbdecabeacdbedacbedcacabedcadebcaebd'
'cdaebcdbeacdeabcdebaceabdcebadcedabcedbadabecdaebcdaecbdcaebdcbeadceab'
'dcebadeabcdeacbdebacdebcaeabcdeadbceadcbecabdecbadecdabecdbaedabcedacb'
'edbacedbca')
assert list(generate_derangements([0, 1, 2, 3])) == [
[1, 0, 3, 2], [1, 2, 3, 0], [1, 3, 0, 2], [2, 0, 3, 1],
[2, 3, 0, 1], [2, 3, 1, 0], [3, 0, 1, 2], [3, 2, 0, 1], [3, 2, 1, 0]]
assert list(generate_derangements([0, 1, 2, 2])) == [
[2, 2, 0, 1], [2, 2, 1, 0]]
def test_necklaces():
def count(n, |
# -*- encoding: utf-8 -*-
########################### | ###################################################
#
# Daniel Campos (danielcampos@avanzosc.es) Date: 29/09/2014
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the imp | lied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from . import models
|
distribution(
number_of_particles, *list_arguments, **keyword_arguments
):
return new_flat_mass_distribution(
number_of_particles,
mass_min=stellar_mass/number_of_stars,
mass_max=stellar_mass/number_of_stars,
)
initial_mass_function = new_fixed_mass_distribution
if stellar_mass:
# best underestimate mean_mass a bit for faster results
mean_mass = 0.25 | units.MSun
mass = initial_mass_function(
int(stellar_mass / mean_mass),
mass_min=lower_mass_limit,
mass_max=upper_mass_limit,
)
previous_number_of_stars = len(mass)
if exceed_mass:
# Allow one final star to exceed stellar_mass
final_star = 1+numpy.argmax(mass.cumsum() > stellar_mass)
if (final_star > 1 and final_star < len(mass)):
mass = mass[:final_star]
else:
# Limit to stars not exceeding stellar_mass
mass = mass[mass.cumsum() < stellar_mass]
additional_mass = [] | units.MSun
while True:
if previous_number_of_stars + len(additional_mass) > len(mass):
break
# We don't have enough stars yet, or at least not tested this
additional_mass = initial_mass_function(
int(stellar_mass / mean_mass),
mass_min=lower_mass_limit,
mass_max=upper_mass_limit,
)
if exceed_mass:
# Allow one final star to exceed stellar_mass
final_star = 1+numpy.argmax(
mass.sum() + additional_mass.cumsum() > stellar_mass
)
if (final_star > 1 and final_star < len(mass)):
additional_mass = additional_mass[:final_star]
else:
# Limit to stars not exceeding stellar_mass
additional_mass = additional_mass[
mass.sum() + additional_mass.cumsum() < stellar_mass
]
mass.append(additional_mass)
number_of_stars = len(mass)
else:
# Give stars their mass
mass = initial_mass_function(
number_of_stars,
mass_min=lower_mass_limit,
mass_max=upper_mass_limit,
)
return mass
def new_star_cluster(
stellar_mass=False,
initial_mass_function="salpeter",
upper_mass_limit=125. | units.MSun,
lower_mass_limit=0.1 | units.MSun,
number_of_stars=1024,
effective_radius=3.0 | units.parsec,
star_distribution="plummer",
star_distribution_w0=7.0,
star_distribution_fd=2.0,
star_metallicity=0.01,
# initial_binary_fraction=0,
**kwargs
):
"""
Create stars.
When using an IMF, either the stellar mass is fixed (within
stochastic error) or the number of stars is fixed. When using
equal-mass stars, both are fixed.
"""
mass = new_masses(
stellar_mass=stellar_mass,
initial_mass_function=initial_mass_function,
upper_mass_limit=upper_mass_limit,
lower_mass_limit=lower_mass_limit,
number_of_stars=number_of_stars,
)
total_mass = mass.sum()
number_of_stars = len(mass)
print(number_of_stars, total_mass, effective_radius)
converter = generic_unit_converter.ConvertBetweenGenericAndSiUnits(
total_mass,
1. | units.kms,
effective_radius,
)
# Give stars a po | sition and velocity, based on the distribution model.
if star_distribution == "plummer":
stars = new_plummer_sphere(
number_of_stars,
convert_nbody=converter,
)
elif star_distribution == "king":
stars = new_king_model(
number_of_stars,
| star_distribution_w0,
convert_nbody=converter,
)
elif star_distribution == "fractal":
stars = new_fractal_cluster_model(
number_of_stars,
fractal_dimension=star_distribution_fd,
convert_nbody=converter,
)
else:
return -1, "No stellar distribution"
# set the stellar mass.
stars.mass = mass
# set other stellar parameters.
stars.metallicity = star_metallicity
# Virialize the star cluster if > 1 star
if len(stars) > 1:
stars.move_to_center()
stars.scale_to_standard(
convert_nbody=converter,
# virial_ratio=virial_ratio,
# smoothing_length_squared= ...,
)
# Record the cluster's initial parameters to the particle distribution
stars.collection_attributes.initial_mass_function = \
initial_mass_function.lower()
stars.collection_attributes.upper_mass_limit = upper_mass_limit
stars.collection_attributes.lower_mass_limit = lower_mass_limit
stars.collection_attributes.number_of_stars = number_of_stars
stars.collection_attributes.effective_radius = effective_radius
stars.collection_attributes.star_distribution = star_distribution
stars.collection_attributes.star_distribution_w0 = star_distribution_w0
stars.collection_attributes.star_distribution_fd = star_distribution_fd
stars.collection_attributes.star_metallicity = star_metallicity
# Derived/legacy values
stars.collection_attributes.converter_mass = \
converter.to_si(1 | nbody_system.mass)
stars.collection_attributes.converter_length =\
converter.to_si(1 | nbody_system.length)
stars.collection_attributes.converter_speed =\
converter.to_si(1 | nbody_system.speed)
return stars
def new_stars_from_sink(
origin,
upper_mass_limit=125 | units.MSun,
lower_mass_limit=0.1 | units.MSun,
default_radius=0.25 | units.pc,
velocity_dispersion=1 | units.kms,
logger=None,
initial_mass_function="kroupa",
distribution="random",
randomseed=None,
**keyword_arguments
):
"""
Form stars from an origin particle that keeps track of the properties of
this region.
"""
logger = logger or logging.getLogger(__name__)
if randomseed is not None:
logger.info("setting random seed to %i", randomseed)
numpy.random.seed(randomseed)
try:
initialised = origin.initialised
except AttributeError:
initialised = False
if not initialised:
logger.debug(
"Initialising origin particle %i for star formation",
origin.key
)
next_mass = new_star_cluster(
initial_mass_function=initial_mass_function,
upper_mass_limit=upper_mass_limit,
lower_mass_limit=lower_mass_limit,
number_of_stars=1,
**keyword_arguments
)
origin.next_primary_mass = next_mass[0].mass
origin.initialised = True
if origin.mass < origin.next_primary_mass:
logger.debug(
"Not enough in star forming region %i to form the next star",
origin.key
)
return Particles()
mass_reservoir = origin.mass - origin.next_primary_mass
stellar_masses = new_star_cluster(
stellar_mass=mass_reservoir,
upper_mass_limit=upper_mass_limit,
lower_mass_limit=lower_mass_limit,
imf=initial_mass_function,
).mass
number_of_stars = len(stellar_masses)
new_stars = Particles(number_of_stars)
new_stars.age = 0 | units.yr
new_stars[0].mass = origin.next_primary_mass
new_stars[1:].mass = stellar_masses[:-1]
origin.next_primary_mass = stellar_masses[-1]
new_stars.position = origin.position
new_stars.velocity = origin.velocity
try:
radius = origin.radius
except AttributeError:
radius = default_radius
rho = numpy.random.random(number_of_stars) * radius
theta = (
numpy.random.random(number_of_stars)
* (2 * numpy.pi | units.rad)
)
phi = (
numpy.random.random(number_of_stars) * numpy.pi | units.rad
)
x = rho * sin(phi) * cos(theta)
y = rho * sin(phi) * sin(theta)
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import sys
from gl_rep.data_loaders import airq_data_loader, simulation_loader, physionet_data_loader, har_data_loader
from gl_rep.glr import GLR
from gl_rep.models import EncoderGlobal, EncoderLocal, WindowDecoder
from gl_rep.utils import plot_reps, train_glr
import tensorflow as tf
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
gpus = tf.config.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def main(args):
"""
Train and validate our local and global representation learning framework for different dataset
"""
is_continue = False
# Load the data and experiment configurations
with open('configs.json') as config_file:
configs = json.load(config_file)[args.data]
if args.data=='air_quality':
n_epochs = 250
lr = 1e-3
trainset, validset, testset, _ = airq_data_loader(normalize="mean_zero")
elif args.data=='simulation':
n_epochs = 100
lr = 1e-2
trainset, validset, testset, _, _ = simulation_loader(normalize="none", mask_threshold=0.0)
elif args.data == 'physionet':
n_epochs = 200
lr = 1e-3
trainset, validset, testset, _ = physionet_data_loader(normalize="mean_zero")
elif args.data=='har':
n_epochs = 150
lr = 1e-3
trainset, validset, testset, normalization_specs = har_data_loader(normalize='none')
# Create the representation learning models
zt_encoder = EncoderLocal(zl_size=configs["zl_size"], hidden_sizes=configs["glr_local_encoder_size"])
zg_encoder = EncoderGlobal(zg_size=configs["zg_size"], hidden_sizes=configs["glr_global_encoder_size"])
dec = WindowDecoder(output_size=configs["feature_size"], output_length=configs["window_size"],
hidden_sizes=configs["glr_decoder_size"])
rep_model = GLR(global_encoder=zg_encoder, local_encoder=zt_encoder, decoder=dec,
window_size=configs["window_size"], time_length=configs["t_len"],
data_dim=configs["feature_size"], kernel_scales=configs["kernel_scales"],
kernel=configs["kernels"], beta=configs["beta"], M=configs["mc_samples"], sigma=.5,
lamda=args.lamda, length_scale=configs["length_scale"], p=15)
# Train the decoupled local and global representation learning modules
if args.train:
if is_continue:
rep_model.load_weights('./ckpt/glr_%s_lambda%.1f' %(args.data, args.lamda))
train_glr(rep_model, trainset, validset, lr=lr, n_epochs=n_epochs, data=args.data)
# Plot summary performance graphs fo | r the learning framework,
# including the representation distribution and signal reconstruction plots
rep_model.load_weights('./ckpt/glr_%s_lambda%.1f' %(args.data, args.lamda))
plot_reps(testset, rep_model, args.data)
if __nam | e__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--data', type=str, default='air_quality', help="dataset to use")
parser.add_argument('--lamda', type=float, default=1., help="regularization weight")
parser.add_argument('--train', action='store_true')
args = parser.parse_args()
main(args)
|
filenames = ['firstNames', 'secondNames', 'famousWrestlers', 'categories', 'jobs']
for filename in filenames:
with open('%s.csv' % fi | lename, 'r') as f:
namelist = []
for name in f.read().split('\n'):
if len(name)>1: namelist.append(name)
with | open('../js/%s.js' % filename, 'w') as dest_f:
dest_f.write('%s = %s;' % (filename, namelist)) |
replace(' < ', ' < ').\
replace(' > ', ' > ').\
replace('>=', ' ≥ ')
sel = Selector(text=html)
item = ProblemItem()
item['oj'] = 'poj'
item['problem_id'] = self.problem_id
item['problem_url'] = response.url
item['title'] = | sel.css('.ptt').xpath('./text()').extract()[0]
item['description'] = sel.css('.ptx').extract()[0]
item['input'] = sel.css('.ptx').extract()[1]
item['output'] = sel.css('.ptx').extract()[2]
try:
item['time_limit'] = sel.css('.plm').re('Case\sT[\S | *\s]*MS')[0][21:]
except:
item['time_limit'] = sel.css('.plm').re('T[\S*\s]*MS')[0][16:]
item['memory_limit'] = sel.css('.plm').re('Me[\S*\s]*K')[0][18:]
item['sample_input'] = sel.css('.sio').extract()[0]
item['sample_output'] = sel.css('.sio').extract()[1]
item['update_time'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
return item
class PojSubmitSpider(CrawlSpider):
name = 'poj_submit'
allowed_domains = ['poj.org']
login_url = 'http://poj.org/login'
submit_url = 'http://poj.org/submit'
login_verify_url = 'http://poj.org/loginlog'
source = \
'I2luY2x1ZGUgPHN0ZGlvLmg+CgppbnQgbWFpbigpCnsKICAgIGludCBhLGI7CiAgICBzY2FuZigiJWQgJWQiLCZhLCAmYik7CiAgICBwcmludGYoIiVkXG4iLGErYik7CiAgICByZXR1cm4gMDsKfQ=='
start_urls = [
"http://poj.org/status"
]
download_delay = 0.5
rules = [
Rule(link(allow=('/status\?top=[0-9]+'), deny=('status\?bottom=[0-9]+')), follow=True, callback='parse_start_url')
]
is_login = False
def __init__(self,
solution_id='None',
problem_id='1000',
language='g++',
source=None,
username='sdutacm1',
password='sdutacm', *args, **kwargs):
super(PojSubmitSpider, self).__init__(*args, **kwargs)
self.solution_id = solution_id
self.username = username
self.password = password
self.problem_id = problem_id
self.language = language
if source is not None:
self.source = source
def start_requests(self):
return [FormRequest(self.login_url,
formdata = {
'user_id1': self.username,
'password1': self.password,
'B1': 'login',
},
callback = self.after_login,
)]
def after_login(self, response):
return [Request(self.login_verify_url,
callback = self.login_verify
)]
def login_verify(self, response):
if response.url == self.login_verify_url:
self.is_login = True
self.login_time = time.mktime(time.strptime(\
response.headers['Date'], \
'%a, %d %b %Y %H:%M:%S %Z')) + (8 * 60 * 60)
time.sleep(1)
return [FormRequest(self.submit_url,
formdata = {
'problem_id': self.problem_id,
'language': LANGUAGE.get(self.language, '0'),
'source': self.source,
'submit': 'Submit',
'encoded': '1'
},
callback = self.after_submit,
dont_filter = True
)]
else:
return Request(self.start_urls[0], callback=self.parse_start_url)
def after_submit(self, response):
time.sleep(3)
for url in self.start_urls:
yield self.make_requests_from_url(url)
def parse_start_url(self, response):
sel = Selector(response)
item = SolutionItem()
item['oj'] = 'poj'
item['problem_id'] = self.problem_id
item['language'] = self.language
item['solution_id'] = self.solution_id
if self.is_login:
for tr in sel.xpath('//table')[-1].xpath('.//tr')[1:]:
user = tr.xpath('.//td/a/text()').extract()[0]
_submit_time = tr.xpath('.//td/text()').extract()[-1]
if user == self.username:
item['submit_time'] = _submit_time
item['run_id'] = tr.xpath('.//td/text()').extract()[0]
try:
item['memory'] = \
tr.xpath('.//td')[4].xpath('./text()').extract()[0]
item['time'] = \
tr.xpath('.//td')[5].xpath('./text()').extract()[0]
except:
pass
item['code_length'] = tr.xpath('.//td/text()').extract()[-2]
item['result'] = tr.xpath('.//td').xpath('.//font/text()').extract()[0]
self._rules = []
return item
else:
item['result'] = 'Submit Error'
self._rules = []
return item
class PojStatusSpider(Spider):
name = 'poj_status'
allowed_domains = ['poj.org']
def __init__(self, run_id=13881167, *args, **kwargs):
super(PojStatusSpider, self).__init__(*args, **kwargs)
self.run_id = str(run_id)
self.start_urls = [
'http://poj.org/status?top=%s' % (int(run_id) + 1)
]
def parse(self, response):
sel = Selector(response)
item = SolutionItem()
item['oj'] = 'poj'
item['run_id'] = self.run_id
for tr in sel.xpath('//table')[-1].xpath('.//tr')[1:]:
runid = tr.xpath('.//td/text()').extract()[0]
_submit_time = tr.xpath('.//td/text()').extract()[-1]
if runid == self.run_id:
item['submit_time'] = _submit_time
item['problem_id'] = tr.xpath('.//td/a/text()').extract()[1]
item['language'] = tr.xpath('.//td')[6].xpath('.//text()').extract()[0]
try:
item['memory'] = \
tr.xpath('.//td')[4].xpath('./text()').extract()[0]
item['time'] = \
tr.xpath('.//td')[5].xpath('./text()').extract()[0]
except:
pass
item['code_length'] = tr.xpath('.//td/text()').extract()[-2]
item['result'] = tr.xpath('.//td').xpath('.//font/text()').extract()[0]
self._rules = []
return item
else:
item['result'] = 'wait'
self._rules = []
class PojAccountSpider(Spider):
name = 'poj_user'
allowed_domains = ['poj.org']
login_url = 'http://poj.org/login'
login_verify_url = 'http://poj.org/loginlog'
accepted_url = \
'http://poj.org/status?problem_id=&user_id=%s&result=0&language='
download_delay = 1
is_login = False
solved = {}
def __init__(self,
username='sdutacm1',
password='sdutacm', *args, **kwargs):
super(PojAccountSpider, self).__init__(*args, **kwargs)
self.username = username
self.password = password
self.start_urls = [
"http://poj.org/userstatus?user_id=%s" % username
]
def start_requests(self):
return [FormRequest(self.login_url,
formdata = {
'user_id1': self.username,
'password1': self.password,
'B1': 'login',
},
callback = self.after_login,
)]
def after_login(self, response):
return [Request(self.login_verify_url,
callback |
##########################################################################
#
# Copyright (c) 2011, Image Engine Design Inc. All righ | ts reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or o | ther materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from _IECoreArnold import *
from UniverseBlock import UniverseBlock
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.