prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
{
'name': "Blockchain Waves Synchro",
'version': '1.0',
'depends': ['base',
'sale',
| 'sales_team',
'delivery',
'barcodes',
'mail',
'report',
'portal_sale',
'website_portal',
'website_payment',],
'author': "Sergey Stepanets",
'category': 'Application',
'description': """
Module for blockchain synchro
""",
'da | ta': [
'views/setting.xml',
'data/cron.xml',
'views/clients.xml',
'views/sale_order.xml',
'views/journal_signature.xml',
# 'views/report.xml',
],
} |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-13 20:45
from __future__ import unicode_literals
from django.db import migr | ations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('kriegspiel', '0003_auto_20170113_2035'),
]
operations = [
migrations.AddField(
model_name='move',
name='created_at',
field=models.DateTimeField(default=django.utils.timezone.now),
),
] | |
# Copyright (c) 2017-presen | t, Facebook, Inc.
# All Rights Reserved.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
from __future__ import absolute_import, division, print_function, unicode_ | literals
import pkgutil
# Indicate that hgext3rd is a namspace package, and other python path
# directories may still be searched for hgext3rd extensions.
__path__ = pkgutil.extend_path(__path__, __name__) # type: ignore # noqa: F821
|
cat_probs_value = _swap_first_last_axes(cat_probs_value)
# Add a new innermost dimension for broadcasting to mvn vector shape
cat_probs_value = [np.e | xpand_dims(c_p, -1) for c_p in cat_probs_value]
true_mean = sum(
[c_p * m for (c_p, m) in zip(cat_probs_value, dist_means_value)])
self.assertAllClose(true_mean, mean_value)
def testPr | obScalarUnivariate(self):
with self.test_session() as sess:
dist = make_univariate_mixture(batch_shape=[], num_components=2)
for x in [
np.array(
[1.0, 2.0], dtype=np.float32), np.array(
1.0, dtype=np.float32),
np.random.randn(3, 4).astype(np.float32)
]:
p_x = dist.prob(x)
self.assertEqual(x.shape, p_x.get_shape())
cat_probs = nn_ops.softmax([dist.cat.logits])[0]
dist_probs = [d.prob(x) for d in dist.components]
p_x_value, cat_probs_value, dist_probs_value = sess.run(
[p_x, cat_probs, dist_probs])
self.assertEqual(x.shape, p_x_value.shape)
total_prob = sum(c_p_value * d_p_value
for (c_p_value, d_p_value
) in zip(cat_probs_value, dist_probs_value))
self.assertAllClose(total_prob, p_x_value)
def testProbScalarMultivariate(self):
with self.test_session() as sess:
dist = make_multivariate_mixture(
batch_shape=[], num_components=2, event_shape=[3])
for x in [
np.array(
[[-1.0, 0.0, 1.0], [0.5, 1.0, -0.3]], dtype=np.float32), np.array(
[-1.0, 0.0, 1.0], dtype=np.float32),
np.random.randn(2, 2, 3).astype(np.float32)
]:
p_x = dist.prob(x)
self.assertEqual(x.shape[:-1], p_x.get_shape())
cat_probs = nn_ops.softmax([dist.cat.logits])[0]
dist_probs = [d.prob(x) for d in dist.components]
p_x_value, cat_probs_value, dist_probs_value = sess.run(
[p_x, cat_probs, dist_probs])
self.assertEqual(x.shape[:-1], p_x_value.shape)
total_prob = sum(c_p_value * d_p_value
for (c_p_value, d_p_value
) in zip(cat_probs_value, dist_probs_value))
self.assertAllClose(total_prob, p_x_value)
def testProbBatchUnivariate(self):
with self.test_session() as sess:
dist = make_univariate_mixture(batch_shape=[2, 3], num_components=2)
for x in [
np.random.randn(2, 3).astype(np.float32),
np.random.randn(4, 2, 3).astype(np.float32)
]:
p_x = dist.prob(x)
self.assertEqual(x.shape, p_x.get_shape())
cat_probs = nn_ops.softmax(dist.cat.logits)
dist_probs = [d.prob(x) for d in dist.components]
p_x_value, cat_probs_value, dist_probs_value = sess.run(
[p_x, cat_probs, dist_probs])
self.assertEqual(x.shape, p_x_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
total_prob = sum(c_p_value * d_p_value
for (c_p_value, d_p_value
) in zip(cat_probs_value, dist_probs_value))
self.assertAllClose(total_prob, p_x_value)
def testProbBatchMultivariate(self):
with self.test_session() as sess:
dist = make_multivariate_mixture(
batch_shape=[2, 3], num_components=2, event_shape=[4])
for x in [
np.random.randn(2, 3, 4).astype(np.float32),
np.random.randn(4, 2, 3, 4).astype(np.float32)
]:
p_x = dist.prob(x)
self.assertEqual(x.shape[:-1], p_x.get_shape())
cat_probs = nn_ops.softmax(dist.cat.logits)
dist_probs = [d.prob(x) for d in dist.components]
p_x_value, cat_probs_value, dist_probs_value = sess.run(
[p_x, cat_probs, dist_probs])
self.assertEqual(x.shape[:-1], p_x_value.shape)
cat_probs_value = _swap_first_last_axes(cat_probs_value)
total_prob = sum(c_p_value * d_p_value
for (c_p_value, d_p_value
) in zip(cat_probs_value, dist_probs_value))
self.assertAllClose(total_prob, p_x_value)
def testSampleScalarBatchUnivariate(self):
with self.test_session() as sess:
num_components = 3
dist = make_univariate_mixture(
batch_shape=[], num_components=num_components)
n = 4
with _test_capture_normal_sample_outputs() as component_samples:
samples = dist.sample(n, seed=123)
self.assertEqual(samples.dtype, dtypes.float32)
self.assertEqual((4,), samples.get_shape())
cat_samples = dist.cat.sample(n, seed=123)
sample_values, cat_sample_values, dist_sample_values = sess.run(
[samples, cat_samples, component_samples])
self.assertEqual((4,), sample_values.shape)
for c in range(num_components):
which_c = np.where(cat_sample_values == c)[0]
size_c = which_c.size
# Scalar Batch univariate case: batch_size == 1, rank 1
which_dist_samples = dist_sample_values[c][:size_c]
self.assertAllClose(which_dist_samples, sample_values[which_c])
# Test that sampling with the same seed twice gives the same results.
def testSampleMultipleTimes(self):
# 5 component mixture.
logits = [-10.0, -5.0, 0.0, 5.0, 10.0]
mus = [-5.0, 0.0, 5.0, 4.0, 20.0]
sigmas = [0.1, 5.0, 3.0, 0.2, 4.0]
with self.test_session():
n = 100
random_seed.set_random_seed(654321)
components = [
distributions_py.Normal(
loc=mu, scale=sigma) for mu, sigma in zip(mus, sigmas)
]
cat = distributions_py.Categorical(
logits, dtype=dtypes.int32, name="cat1")
dist1 = distributions_py.Mixture(cat, components, name="mixture1")
samples1 = dist1.sample(n, seed=123456).eval()
random_seed.set_random_seed(654321)
components2 = [
distributions_py.Normal(
loc=mu, scale=sigma) for mu, sigma in zip(mus, sigmas)
]
cat2 = distributions_py.Categorical(
logits, dtype=dtypes.int32, name="cat2")
dist2 = distributions_py.Mixture(cat2, components2, name="mixture2")
samples2 = dist2.sample(n, seed=123456).eval()
self.assertAllClose(samples1, samples2)
def testSampleScalarBatchMultivariate(self):
with self.test_session() as sess:
num_components = 3
dist = make_multivariate_mixture(
batch_shape=[], num_components=num_components, event_shape=[2])
n = 4
with _test_capture_mvndiag_sample_outputs() as component_samples:
samples = dist.sample(n, seed=123)
self.assertEqual(samples.dtype, dtypes.float32)
self.assertEqual((4, 2), samples.get_shape())
cat_samples = dist.cat.sample(n, seed=123)
sample_values, cat_sample_values, dist_sample_values = sess.run(
[samples, cat_samples, component_samples])
self.assertEqual((4, 2), sample_values.shape)
for c in range(num_components):
which_c = np.where(cat_sample_values == c)[0]
size_c = which_c.size
# Scalar Batch multivariate case: batch_size == 1, rank 2
which_dist_samples = dist_sample_values[c][:size_c, :]
self.assertAllClose(which_dist_samples, sample_values[which_c, :])
def testSampleBatchUnivariate(self):
with self.test_session() as sess:
num_components = 3
dist = make_univariate_mixture(
batch_shape=[2, 3], num_components=num_components)
n = 4
with _test_capture_normal_sample_outputs() as component_samples:
samples = dist.sample(n, seed=123)
self.assertEqual(samples.dtype, dtypes.float32)
self.assertEqual((4, 2, 3), samples.get_shape())
cat_samples = dist.cat.sample(n, seed=123)
sample_values, cat_sample_values, dist_sample_values = sess.run(
[samples, cat_samples, component_samples])
self.assertEqual((4, 2, 3), sample_values.shape)
for c in range(num_components):
which_c_s, which_c_b0, which_c_b1 = np.where(cat_sample_values == c)
size_c = which_c_s.size
# Batch univariate case: batch_size |
__author__ = 'Dominic Miglar <dominic.miglar@bitmovin.net>'
import unittest
import bitcodin
from bitcodin.test.bitcodin_test_case impo | rt BitcodinTestCase
from bitcodin.rest import RestClient
class GetStatisticsCurrentMonthTestCase(BitcodinTestCase):
def setUp(self):
super(GetStatisticsCurrentMonthTestCase, self).setUp()
def runTest(self):
r | esponse = RestClient.get(url=bitcodin.get_api_base()+'/statistics', headers=bitcodin.create_headers())
def tearDown(self):
super(GetStatisticsCurrentMonthTestCase, self).tearDown()
if __name__ == '__main__':
unittest.main()
|
le=protected-access
redirect_field_name=auth.REDIRECT_FIELD_NAME
) | )
# Set the cookie and try again
self.set_logged_in_cookies(request)
self.assert_redirect_to_dashboard_looks_correct(
actions.do_complete(request.backend, social_views._do_login, user=user))
self.assert_account_settings_context_looks_correct(accoun | t_settings_context(request))
def test_signin_fails_if_account_not_active(self):
_, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete')
strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
user = self.create_user_models_for_existing_account(strategy, 'user@example.com', 'password', self.get_username())
user.is_active = False
user.save()
with self._patch_edxmako_current_request(strategy.request):
self.assert_json_failure_response_is_inactive_account(student_views.login_user(strategy.request))
def test_signin_fails_if_no_account_associated(self):
_, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_LOGIN, redirect_uri='social:complete')
strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
self.create_user_models_for_existing_account(
strategy, 'user@example.com', 'password', self.get_username(), skip_social_auth=True)
self.assert_json_failure_response_is_missing_social_auth(student_views.login_user(strategy.request))
def test_first_party_auth_trumps_third_party_auth_but_is_invalid_when_only_email_in_request(self):
self.assert_first_party_auth_trumps_third_party_auth(email='user@example.com')
def test_first_party_auth_trumps_third_party_auth_but_is_invalid_when_only_password_in_request(self):
self.assert_first_party_auth_trumps_third_party_auth(password='password')
def test_first_party_auth_trumps_third_party_auth_and_fails_when_credentials_bad(self):
self.assert_first_party_auth_trumps_third_party_auth(
email='user@example.com', password='password', success=False)
def test_first_party_auth_trumps_third_party_auth_and_succeeds_when_credentials_good(self):
self.assert_first_party_auth_trumps_third_party_auth(
email='user@example.com', password='password', success=True)
def test_full_pipeline_succeeds_registering_new_account(self):
# First, create, the request and strategy that store pipeline state.
# Mock out wire traffic.
request, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_REGISTER, redirect_uri='social:complete')
strategy.request.backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
# Begin! Grab the registration page and check the login control on it.
self.assert_register_response_before_pipeline_looks_correct(self.client.get('/register'))
# The pipeline starts by a user GETting /auth/login/<provider>.
# Synthesize that request and check that it redirects to the correct
# provider page.
self.assert_redirect_to_provider_looks_correct(self.client.get(
pipeline.get_login_url(self.provider.provider_id, pipeline.AUTH_ENTRY_LOGIN)))
# Next, the provider makes a request against /auth/complete/<provider>.
# pylint: disable=protected-access
self.assert_redirect_to_register_looks_correct(actions.do_complete(request.backend, social_views._do_login))
# At this point we know the pipeline has resumed correctly. Next we
# fire off the view that displays the registration form.
with self._patch_edxmako_current_request(request):
self.assert_register_response_in_pipeline_looks_correct(
student_views.register_user(strategy.request),
pipeline.get(request)['kwargs'],
['name', 'username', 'email']
)
# Next, we invoke the view that handles the POST. Not all providers
# supply email. Manually add it as the user would have to; this
# also serves as a test of overriding provider values. Always provide a
# password for us to check that we override it properly.
overridden_password = strategy.request.POST.get('password')
email = 'new@example.com'
if not strategy.request.POST.get('email'):
strategy.request.POST = self.get_registration_post_vars({'email': email})
# The user must not exist yet...
with self.assertRaises(auth_models.User.DoesNotExist):
self.get_user_by_email(strategy, email)
# ...but when we invoke create_account the existing edX view will make
# it, but not social auths. The pipeline creates those later.
with self._patch_edxmako_current_request(strategy.request):
self.assert_json_success_response_looks_correct(student_views.create_account(strategy.request))
# We've overridden the user's password, so authenticate() with the old
# value won't work:
created_user = self.get_user_by_email(strategy, email)
self.assert_password_overridden_by_pipeline(overridden_password, created_user.username)
# At this point the user object exists, but there is no associated
# social auth.
self.assert_social_auth_does_not_exist_for_user(created_user, strategy)
# We should be redirected back to the complete page, setting
# the "logged in" cookie for the marketing site.
self.assert_logged_in_cookie_redirect(actions.do_complete(
request.backend, social_views._do_login, request.user, None, # pylint: disable=protected-access
redirect_field_name=auth.REDIRECT_FIELD_NAME
))
# Set the cookie and try again
self.set_logged_in_cookies(request)
self.assert_redirect_to_dashboard_looks_correct(
actions.do_complete(strategy.request.backend, social_views._do_login, user=created_user))
# Now the user has been redirected to the dashboard. Their third party account should now be linked.
self.assert_social_auth_exists_for_user(created_user, strategy)
self.assert_account_settings_context_looks_correct(account_settings_context(request), linked=True)
def test_new_account_registration_assigns_distinct_username_on_collision(self):
original_username = self.get_username()
request, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_REGISTER, redirect_uri='social:complete')
# Create a colliding username in the backend, then proceed with
# assignment via pipeline to make sure a distinct username is created.
strategy.storage.user.create_user(username=self.get_username(), email='user@email.com', password='password')
backend = strategy.request.backend
backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
# pylint: disable=protected-access
self.assert_redirect_to_register_looks_correct(actions.do_complete(backend, social_views._do_login))
distinct_username = pipeline.get(request)['kwargs']['username']
self.assertNotEqual(original_username, distinct_username)
def test_new_account_registration_fails_if_email_exists(self):
request, strategy = self.get_request_and_strategy(
auth_entry=pipeline.AUTH_ENTRY_REGISTER, redirect_uri='social:complete')
backend = strategy.request.backend
backend.auth_complete = mock.MagicMock(return_value=self.fake_auth_complete(strategy))
# pylint: disable=protected-access
self.assert_redirect_to_register_looks_correct(actions.do_complete(backend, social_views._do_login))
with self._patch_edxmako_current_request(request):
self.assert_register_response_in_pipeline_looks_correct(
student_views.register_user(strategy.request),
pipeline.get(request)['k |
#!/usr/bin/env
#(C) Mugfoundation 2014
#Available under MIT license
import click
import hashlib
c = hashlib.sha512()
@click.command()
@click.option('--setup', 'setup', help='Setup new project', type=str)
@click.option('-x', '--major', 'major', help='major version setter', type=int)
@click.option('-y', '--minor', 'minor', help='minor version setter', type=int)
@click.option('-z', '--patch', 'patch', help='patch version setter', type=int)
@click.option('-e', '--extras', 'extras', help='major version setter', type=int)
@click.option('-h', '--hash', 'hash', help='file to extract the sha512 hash', | type=str)
@click.option('-sr', | '--sign', 'sign', help='sign the release using open pgp (available only on linux)', type=str)
def main():
|
if _event == self.peerQueueEvent["ping_peers"]:
query(_p["address"], _p["port"], "*", self.logger)
# Need to add all self.peerQueueEvent items here
'''
Data Propagation Request
'''
if _event == self.peerQueueEvent["data_propagate"]:
if _data == None:
self.logger.warning("Propagation of 'None' data triggered")
else:
self.logger.info("Propagating data")
if _p["address"] != "127.0.0.1":
query(_p["address"], _p["port"], _data, self.logger)
self.propagated = _data
# Indicate that the task from the queue is compelete
self.pushToPeers.task_done()
'''
Trigger shutdown
'''
def triggerShutdown(self):
self.signal = False
# We are at the end of the run, write out active nodes
try:
open("dpeer/known_nodes.json", "w+").write(dumps(self.peers, indent=4))
except IOError:
cout("fail", "Unable to save TX to file.")
'''
Request handler
- Handles all incomming requests from other peers
- Need to send back string-type even on failures
- The events handled here are listed in :
Documentation -> RequestProtocol.md
'''
def handle(self, data):
'''
Ping received - Send back a pong
'''
if data == b'*':
return "___PONG___"
'''
Split the data into chunks, each request should follow the
request protocol chunking defined in 'RequestProtocol.md'
'''
original_data = data
data = data.decode()
data = data.split('\n')
if len(d | ata) < 3:
return("_INVALID_DATA_CHUNK_")
# Ensure that we don't repeat ourselves
if original_data in self.propagated:
self.logger.info("Handler got request to propagate previously propageted data")
return("_PREVIOUSLY_PROPAGETED_")
'''
Information Pushed | (IP) - Broadcast received
'''
if data[0] == "broadcast":
'''
IP.0
'''
if data[1] == "new_client":
# Ensure there isn't anything silly with \n happening
ensured = ''.join(data[2:])
try:
ensured = loads(ensured)
except:
return ("_INVALID_UNABLE_TO_LOADS_DATA_")
try:
addr = ensured["address"]
prt = ensured["port"]
except:
return("_INVALID_JSON_DATA_")
# Attempt to ping the peer they asked us to add. If we can't reach them, we wont try
res = None or query(addr, prt, "*", self.logger)
if res is None or res == -1:
return ("_CANT_REACH_GIVEN_PEER_")
try:
self.addNewPeer(ensured["address"], ensured["port"], ensured["epochTime"], ensured["personalTime"])
except:
return("_INVALID_JSON_DATA_")
# Propagate peer to network
self.addItemToPeerQueue(self.peerQueueEvent["data_propagate"], "_ALL_", original_data)
return("_REGISTERED_")
'''
IP.1
'''
if data[1] == "new_transaction":
# Add to mempool, and propagate
_memaddresult = self.memPool.insert_transaction(''.join(data[2:]))
if _memaddresult == "_NEW_TX_CREATED_":
self.addItemToPeerQueue(self.peerQueueEvent["data_propagate"], "_ALL_", original_data)
return _memaddresult
'''
Synchronization Request (SR)
'''
if data[0] == "synchronize":
print("\nA synchronization request was picked up!")
'''
Information Request (IR)
'''
if data[0] == "information":
''' IR.0 '''
if data[1] == "timestamp":
return str(datetime.now())
''' IR.1 '''
if data[1] == "numBlocks":
return str(self.chain.head)
''' IR.2 '''
if data[1] == "numPeers":
return str(len(self.peers))
''' IR.3 '''
if data[1] == "numPool":
temp = self.memPool.request_pool_size()
''' IR.4 '''
if data[1] == "uptime":
return str(self.uptime)
cout("fail", "\tNEXUS->Handle->data:")
cout("lightgreen", data)
'''
If we reach the bottom, that means there was an issue with the
data, return something to indicate the issue
'''
return "_NULL_"
'''
Sync the EPOCH time and network time with peers
'''
def synchronizeEpoch(self):
cout("fail", "NEED TO SYNC WITH PEERS ON NEXUS LAUNCH - NYP")
if not self.supressText and self.prev_epoch is None and self.next_epoch is None:
cout("yellow", "...Attempting to sync from previously known nodes...")
# Nodes to attempt conenct with
attempt = []
# Attempt ping
for peer in self.peers:
res = None or query(peer["address"], peer["port"], "*", self.logger, timeout=2)
if res is None or res == -1:
attempt_sync = False
if not self.supressText:
print(
ctxt("fail", "Failed to ping ["),
ctxt("yellow", (peer["address"] + "@" + str(peer["port"]))),
ctxt("fail", "] - Wont attempt to sync")
)
else:
attempt_sync = True
if attempt_sync and peer["address"] != "127.0.0.1":
if not self.supressText:
cout("yellow", "Adding external peer to attempt sync with")
attempt.append[peer]
if not self.supressText:
cout("yellow", ("Ping-scan for sync complete, " + str(len(attempt)) + " nodes to attempt sync with"))
# We are either starting alone, or we were cut off from the network
if len(attempt) == 0:
self.next_epoch = None
return
# Grab random peer that is available to sync with
sysRand = SystemRandom()
peer_to_sync = sysRand.choice(attempt)
print("NOT YET DONE ------ ")
print("Sync with: ", peer_to_sync)
'''
Epoch triggered, perform mining process
'''
def performCurrentEpoch(self):
self.epochs_passed += 1
return
'''
Main running loop
'''
def run(self):
cout("lightgreen", "> Peer Nexus Online <")
# If we dont sync on launch, this will be true
if self.next_epoch is None and self.prev_epoch is None:
print(
ctxt("fail", "(No peers discovered)") +
ctxt("yellow", " . Assuming pillar status at [") +
ctxt("cyan", str(datetime.now())) +
ctxt("yellow", "]")
)
self.prev_epoch = datetime.utcnow()
self.next_epoch = self.prev_epoch + timedelta(seconds=__NEXUS__BASE_EPOCH_SECONDS__)
while self.signal:
'''
Ensure list of propagated data isn't getting out of hand
'''
if len(self.propagated) > __NEXUS__PROP_HISTORY_LENGTH__:
self.propagated = self.propagated[__NEXUS__PROP_HISTORY_CUT__:]
'''
Ping all online peers to ensure connectivity
'''
for peer in self.peers:
res = None or query(peer["address"], peer["port"], "*", self.logger, timeout=__NEXUS__PING_TIM |
lf._accepted = True
# validate using schema
if not self._validate_with_schema(message):
# Some errors return invalid xml.
self.logger.error("Message doesn't follow schema.")
self._accepted = False
# raise ValueError('Failed to validate against schema')
# Check signature
if not validate(message):
raise ValueError('Failed to verify signature')
descriptors = None
self._content = None
tree = etree.fromstring(message)
# Parse elements from tree to variables.
for element in tree.iter():
if element.tag == "{http://bxd.fi/xmldata/}CustomerId":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._customerid = element.text
if element.tag == "{http://bxd.fi/xmldata/}Timestamp":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._timestamp = element.text
if element.tag == "{http://bxd.fi/xmldata/}ResponseCode":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._responsecode = element.text
if element.tag == "{http://bxd.fi/xmldata/}ResponseText":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._responsetext = element.text
if element.tag == "{http://bxd.fi/xmldata/}ExecutionSerial":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._executionserial = element.text
if element.tag == "{http://bxd.fi/xmldata/}Encrypted":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
value = element.text.lower()
self._encrypted = True if value == 'true' else False
if element.tag == "{http://bxd.fi/xmldata/}EncryptionMethod":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._encryptionmethod = element.text
if element.tag == "{http://bxd.fi/xmldata/}Compressed":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
value = element.text.lower()
if value == '1':
value = 'true'
self._compressed = True if value == 'true' else False
if element.tag == "{http://bxd.fi/xmldata/}CompressionMethod":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._compressionmethod = element.text
if element.tag == "{http://bxd.fi/xmldata/}AmountTotal":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._amounttotal = element.text
if element.tag == "{http://bxd.fi/xmldata/}TransactionCount":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._transactioncount = element.text
i | f element.tag == "{http://bxd.fi/xmldata/}Cust | omerExtension":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._customerextension = element
if element.tag == "{http://bxd.fi/xmldata/}FileDescriptors":
descriptors = element
if element.tag == "{http://bxd.fi/xmldata/}FileType":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
self._filetype = element.text
if element.tag == "{http://bxd.fi/xmldata/}Content":
self.logger.debug("{0}: {1}".format(element.tag, element.text))
bytestring = bytes(element.text, 'utf-8')
self._content = base64.b64decode(bytestring)
# Parse filedescriptors
if descriptors is not None:
self._file_descriptors = []
for descriptor in descriptors:
fd = FileDescriptor()
for element in descriptor.iter():
if element.tag == "{http://bxd.fi/xmldata/}FileReference":
self.logger.debug("{0}: {1}".format(element.tag,
element.text))
fd.reference = element.text
if element.tag == "{http://bxd.fi/xmldata/}TargetId":
self.logger.debug("{0}: {1}".format(element.tag,
element.text))
fd.target = element.text
if element.tag == "{http://bxd.fi/xmldata/}ServiceId":
self.logger.debug("{0}: {1}".format(element.tag,
element.text))
fd.serviceid = element.text
if element.tag == ("{http://bxd.fi/xmldata/}"
"ServiceIdOwnerName"):
self.logger.debug("{0}: {1}".format(element.tag,
element.text))
fd.serviceidownername = element.text
if element.tag == "{http://bxd.fi/xmldata/}UserFilename":
self.logger.debug("{0}: {1}".format(element.tag,
element.text))
fd.userfilename = element.text
if element.tag == ("{http://bxd.fi/xmldata/}"
"ParentFileReference"):
self.logger.debug("{0}: {1}".format(element.tag,
element.text))
fd.parentfile = element.text
if element.tag == ("{http://bxd.fi/xmldata/}FileType"):
self.logger.debug("{0}: {1}".format(element.tag,
element.text))
fd.filetype = element.text
if element.tag == "{http://bxd.fi/xmldata/}FileTimestamp":
self.logger.debug("{0}: {1}".format(element.tag,
element.text))
fd.timestamp = element.text
if element.tag == "{http://bxd.fi/xmldata/}Status":
self.logger.debug("{0}: {1}".format(element.tag,
element.text))
fd.status = element.text
self._file_descriptors.append(fd)
def is_accepted(self):
""" Was applicationrequest accepted or not.
@rtype: boolean
@return: True if response code was 00 (OK)
"""
try:
if self._responsecode != "00" or self._accepted == False:
self.logger.error(
"ApplicationResponse:{0}:{1}".format(self._responsecode,
self._responsetext)
)
return False
return True
except AttributeError as e:
self.logger.exception(e)
self.logger.error("Unable to find responsecode and response text.")
return False
def _get_content(self):
""" Returns content of xml string in clear text
@rtype: string or None
@return: Data saved to content field.
"""
data = ""
try:
if self._compressed is True:
if self._get_compressionmethod() != None:
if self._get_compressionmethod() == "RFC1952":
data = gzip.decompress(bytes(self._content))
else:
raise TypeError("Unsupported compression method")
else:
data = gzip.decompress(bytes(self._content))
else:
data = self._content
return str(data, 'utf-8')
except AttributeError:
return self._content
content |
#!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2011 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# ************** | ********************************************************
import os, sys, traceback
import Ice, AllTests
def test(b):
if not b:
raise RuntimeError('test assertion failed')
def usage(n):
sys.stderr.write("Usage: " + n + " port...\n")
def run(args, communicator):
ports = []
for arg in args[1:]:
if arg[0] == '-':
sys.stderr.write(args[0] + ": unknown option `" + arg | + "'\n")
usage(args[0])
return False
ports.append(int(arg))
if len(ports) == 0:
sys.stderr.write(args[0] + ": no ports specified\n")
usage(args[0])
return False
try:
AllTests.allTests(communicator, ports)
except:
traceback.print_exc()
test(False)
return True
try:
initData = Ice.InitializationData()
initData.properties = Ice.createProperties(sys.argv)
#
# This test aborts servers, so we don't want warnings.
#
initData.properties.setProperty('Ice.Warn.Connections', '0')
communicator = Ice.initialize(sys.argv, initData)
status = run(sys.argv, communicator)
except:
traceback.print_exc()
status = False
if communicator:
try:
communicator.destroy()
except:
traceback.print_exc()
status = False
sys.exit(not status)
|
input_name = '../examples/linear_elasticity/l | inear_viscoelastic.py'
output_name_trunk = 'test_linear_viscoelastic'
from tests_basic import TestInputEvolutionary
class Test(TestInputEvolutionary):
| pass
|
#!/usr/bin/env python3
"""
Item Related Objects.
"""
#-*- coding: utf-8 -*-
import re
from datetime import datetime
from db_related import DBConnect
class Item_Lookup(object):
"""
Returned Item Lookup Dictionary Structure:
item = {
upc: text
description: text
cost: decimal
price: decimal
taxable: True or False
on_hand_qty: decimal
stx: decimal
}
"""
def __init__(self, upc):
self.upc = upc
def GetBasics(self):
query = '''SELECT upc, description, cost, retail, taxable, onhandqty
FROM item_detailed
| WHERE upc=(?)'''
data = [self.upc,]
returnd = DBConnect | (query, data).ALL()
|
# coding: utf-8
import sys
from collections import defaultdict
sys.path.append('/project/nakamura-lab01/Work/yusuke-o/python')
from data.reader import dlmread
def addfeature(fs, fid, name, mode):
if mode == 'dev' or name in fid:
fs.append(fid[name])
def main():
if len(sys.argv) != 6:
print('USAGE: python3 makeliblin_greedy.py \\')
print(' <str: mode ("dev" or "test")>')
print(' <in-file: input sentence with POS> \\')
print(' <in-file: splitter table> \\')
print(' <(dev)out-file, (test)in-file: feature ID table> \\')
print(' <out-file: LIBLINEAR input data>')
return
mode = sys.argv[1]
fname_pos = sys.argv[2]
fname_splitter = sys.argv[3]
fname_fid = sys.argv[4]
fname_liblin = sys.argv[5]
if mode not in ['dev', 'test']:
sys.stderr.write('ERROR: unknown mode.\n')
return
# load word and pos
corpus_in_pos = [x for x in dlmread(fname_pos, ' ')]
for i in range(len(corpus_in_pos)):
corpus_in_pos[i] = [w.split('_') for w in corpus_in_pos[i]]
# load splitter
tab_sp = defaultdict(lambda: [])
with open(fname_splitter, 'r', encoding='utf-8') as fp:
for l in fp:
lineno, wordno = tuple(int(x) for x in l.strip().split(' '))
tab_sp[lineno].append(wordno)
# load or new feature id table
fid = defaultdict(lambda: len(fid)+1)
if mode == 'test':
with open(fname_fid, 'r', encoding='utf-8') as fp:
for l in fp:
ls = l.split()
k = ls[0]
v = int(ls[1])
fid[k] = v
# make/save training data
n = 0
with open(fname_liblin, 'w', encoding='utf-8') as fp:
for i in range(len(corp | us_in_pos)):
data = [['<s>', '<s>']] * 2 + corpus_in_pos[i] + [['</s>', '</s>']] * 2
for j in range(len(data)-5): # ignore end of sentence
jj = j+2
features = []
# unigram words
# addfeature(features, fid, 'WORD[-2]=%s' % data[jj-2][0], | mode)
addfeature(features, fid, 'WORD[-1]=%s' % data[jj-1][0], mode)
addfeature(features, fid, 'WORD[0]=%s' % data[jj+0][0], mode)
addfeature(features, fid, 'WORD[+1]=%s' % data[jj+1][0], mode)
addfeature(features, fid, 'WORD[+2]=%s' % data[jj+2][0], mode)
# unigram POSes
# addfeature(features, fid, 'POS[-2]=%s' % data[jj-2][1], mode)
addfeature(features, fid, 'POS[-1]=%s' % data[jj-1][1], mode)
addfeature(features, fid, 'POS[0]=%s' % data[jj+0][1], mode)
addfeature(features, fid, 'POS[+1]=%s' % data[jj+1][1], mode)
addfeature(features, fid, 'POS[+2]=%s' % data[jj+2][1], mode)
# bigram words
# addfeature(features, fid, 'WORD[-2:-1]=%s_%s' % (data[jj-2][0], data[jj-1][0]), mode)
addfeature(features, fid, 'WORD[-1:0]=%s_%s' % (data[jj-1][0], data[jj+0][0]), mode)
addfeature(features, fid, 'WORD[0:+1]=%s_%s' % (data[jj+0][0], data[jj+1][0]), mode)
addfeature(features, fid, 'WORD[+1:+2]=%s_%s' % (data[jj+1][0], data[jj+2][0]), mode)
# bigram POSes
# addfeature(features, fid, 'POS[-2:-1]=%s_%s' % (data[jj-2][1], data[jj-1][1]), mode)
addfeature(features, fid, 'POS[-1:0]=%s_%s' % (data[jj-1][1], data[jj+0][1]), mode)
addfeature(features, fid, 'POS[0:+1]=%s_%s' % (data[jj+0][1], data[jj+1][1]), mode)
addfeature(features, fid, 'POS[+1:+2]=%s_%s' % (data[jj+1][1], data[jj+2][1]), mode)
# trigram words
# addfeature(features, fid, 'WORD[-2:0]=%s_%s_%s' % (data[jj-2][0], data[jj-1][0], data[jj+0][0]), mode)
addfeature(features, fid, 'WORD[-1:+1]=%s_%s_%s' % (data[jj-1][0], data[jj+0][0], data[jj+1][0]), mode)
addfeature(features, fid, 'WORD[0:+2]=%s_%s_%s' % (data[jj+0][0], data[jj+1][0], data[jj+2][0]), mode)
# trigram POSes
# addfeature(features, fid, 'POS[-2:0]=%s_%s_%s' % (data[jj-2][1], data[jj-1][1], data[jj+0][1]), mode)
addfeature(features, fid, 'POS[-1:+1]=%s_%s_%s' % (data[jj-1][1], data[jj+0][1], data[jj+1][1]), mode)
addfeature(features, fid, 'POS[0:+2]=%s_%s_%s' % (data[jj+0][1], data[jj+1][1], data[jj+2][1]), mode)
line = '1 ' if j in tab_sp[i] else '2 '
line += ' '.join('%d:1'%f for f in sorted(features))
fp.write(line+'\n')
n += 1
# save feature id table
if mode == 'dev':
with open(fname_fid, 'w', encoding='utf-8') as fp:
for k, v in fid.items():
fp.write('%s\t%d\n' % (k, v))
if __name__ == '__main__':
main()
|
code, possibly JSON) and these are the only types
with encoding handlers defined.
There's no need to return the exact way the field was instantiated this time,
just ensure that the resulting field is the same - prefer keyword arguments
over positional ones, and omit parameters with their default values.
"""
# Short-form way of fetching all the default parameters
keywords = {}
possibles = {
"verbose_name": None,
"primary_key": False,
"max_length": None,
"unique": False,
"blank": False,
"null": False,
"db_index": False,
"default": NOT_PROVIDED,
"editable": True,
"serialize": True,
"unique_for_date": None,
"unique_for_month": None,
"unique_for_year": None,
"choices": [],
"help_text": '',
"db_column": None,
"db_tablespace": settings.DEFAULT_INDEX_TABLESPACE,
"auto_created": False,
"validators": [],
"error_messages": None,
}
attr_overrides = {
"unique": "_unique",
"choices": "_choices",
"error_messages": "_error_messages",
"validators": "_validators",
"verbose_name": "_verbose_name",
}
equals_comparison = {"choices", "validators", "db_tablespace"}
for name, default in possibles.items():
value = getattr(self, attr_overrides.get(name, name))
# Unroll anything iterable for choices into a concrete list
if name == "choices" and isinstance(value, collections.Iterable):
value = list(value)
# Do correct kind of comparison
if name in equals_comparison:
if value != default:
keywords[name] = value
else:
if value is not default:
keywords[name] = value
# Work out path - we shorten it for known Django core fields
path = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
if path.startswith("django.db.models.fields.related"):
path = path.replace("django.db.models.fields.related", "django.db.models")
if path.startswith("django.db.models.fields.files"):
path = path.replace("django.db.models.fields.files", "django.db.models")
if path.startswith("django.db.models.fields.proxy"):
path = path.replace("django.db.models.fields.proxy", "django.db.models")
if path.startswith("django.db.models.fields"):
path = path.replace("django.db.models.fields", "django.db.models")
# Return basic info - other fields should override this.
return (
force_text(self.name, strings_only=True),
path,
[],
keywords,
)
def clone(self):
"""
Uses deconstruct() to clone a new copy of this Field.
Will not preserve any class attachments/attribute names.
"""
name, path, args, kwargs = self.deconstruct()
return self.__class__(*args, **kwargs)
def __eq__(self, other):
# Needed for @total_ordering
if isinstance(other, Field):
return self.creation_counter == other.creation_counter
return NotImplemented
def __lt__(self, other):
# This is needed because bisect does not take a comparison function.
if isinstance(other, Field):
return self.creation_counter < other.creation_counter
return NotImplemented
def __hash__(self):
return hash(self.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.rel:
obj.rel = copy.copy(self.rel)
if hasattr(self.rel, 'field') and self.rel.field is self:
obj.rel.field = obj
memodict[id(self)] = obj
return obj
def __copy__(self):
# We need to avoid hitting __reduce__, so define this
# slightly weird copy construct.
obj = Empty()
obj.__class__ = self.__class__
obj.__dict__ = self.__dict__.copy()
return obj
def __reduce__(self):
"""
Pickling should return the model._meta.fields instance of the field,
not a new copy of that field. So, we use the app registry to load the
model and then the field back.
"""
if not hasattr(self, 'model'):
# Fields are sometimes used without attaching them to models (for
# example in aggregation). In this case give back a plain field
# instance. The code below will create a new empty instance of
# class self.__class__, then update its dict with self.__dict__
# values - so, this is very close to normal pickle.
return _empty, (self.__class__,), self.__dict__
if self.model._deferred:
# Deferred model will not be found from the app registry. This
# could be fixed by reconstructing the deferred model on unpickle.
raise RuntimeError("Fields of deferred models can't be reduced")
return _load_field, (self.model._meta.app_label, self.model._meta.object_name,
self.name)
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
@cached_property
def validators(self):
# Some validators can't be created at field initialization time.
# This method provides a way to delay their creation until required.
return self.default_validators + self._validators
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self._choices and value not in self.empty_values:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
raise exceptions.ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'], code='null')
if not self.blank and value in self.empty_values:
raise exceptions.ValidationError(self.error_messages['blank'], code='blank')
def clean(self, value, model_instanc | e):
"""
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is rai | sed.
"""
value = self.to_python(value)
self.validate(value, model_instanc |
dle, new_bundle2)
assert_raises(ValueError, StreamlineLinearRegistration, method='Whatever')
def test_rigid_partial_real_bundles():
static = fornix_streamlines()[:20]
moving = fornix_streamlines()[20:40]
static_center, shift = center_streamlines(static)
moving_center, shift2 = center_streamlines(moving)
print(shift2)
mat = compose_matrix(translate=np.array([0, 0, 0.]),
angles=np.deg2rad([40, 0, 0.]))
moved = transform_streamlines(moving_center, mat)
srr = StreamlineLinearRegistration()
srm = srr.optimize(static_center, moved)
print(srm.fopt)
print(srm.iterations)
print(srm.funcs)
moving_back = srm.transform(moved)
print(srm.matrix)
static_center = set_number_of_points(static_center, 100)
moving_center = set_number_of_points(moving_back, 100)
vol = np.zeros((100, 100, 100))
spts = np.concatenate(static_center, axis=0)
spts = np.round(spts).astype(np.int) + np.array([50, 50, 50])
mpts = np.concatenate(moving_center, axis=0)
mpts = np.round(mpts).astype(np.int) + np.array([50, 50, 50])
for index in spts:
i, j, k = index
vol[i, j, k] = 1
vol2 = np.zeros((100, 100, 100))
for index in mpts:
i, j, k = index
vol2[i, j, k] = 1
overlap = np.sum(np.logical_and(vol, vol2)) / float(np.sum(vol2))
assert_equal(overlap * 100 > 40, True)
def test_stream_rigid():
static = fornix_streamlines()[:20]
moving = fornix_streamlines()[20:40]
static_center, shift = center_streamlines(static)
mat = compose_matrix44([0, 0, 0, 0, 40, 0])
moving = transform_streamlines(moving, mat)
srr = StreamlineLinearRegistration()
sr_params = srr.optimize(static, moving)
moved = transform_streamlines(moving, sr_params.matrix)
srr = StreamlineLinearRegistration(verbose=True)
srm = srr.optimize(static, moving)
moved2 = transform_streamlines(moving, srm.matrix)
moved3 = srm.transform(moving)
assert_array_almost_equal(moved[0], moved2[0], decimal=3)
assert_array_almost_equal(moved2[0], moved3[0], decimal=3)
def test_min_vs_min_fast_precision():
static = fornix_streamlines()[:20]
moving = fornix_streamlines()[:20]
static = [s.astype('f8') for s in static]
moving = [m.astype('f8') for m in moving]
bmd = BundleMinDistanceMatrixMetric()
bmd.setup(static, moving)
bmdf = BundleMinDistanceMetric()
bmdf.setup(static, moving)
x_test = [0.01, 0, 0, 0, 0, 0]
print(bmd.distance(x_test))
print(bmdf.distance(x_test))
assert_equal(bmd.distance(x_test), bmdf.distance(x_test))
def test_same_number_of_points():
A = [np.random.rand(10, 3), np.random.rand(20, 3)]
B = [np.random.rand(21, 3), np.random.rand(30, 3)]
C = [np.random.rand(10, 3), np.random.rand(10, 3)]
D = [np.random.rand(20, 3), np.random.rand(20, 3)]
slr = StreamlineLinearRegistration()
assert_raises(ValueError, slr.optimize, A, B)
assert_raises(ValueError, slr.optimize, C, D)
assert_raises(ValueError, slr.optimize, C, B)
def test_efficient_bmd():
a = np.array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]])
streamlines = [a, a + 2, a + 4]
points, offsets = unlist_streamlines(streamlines)
points = points.astype(np.double)
points2 = points.copy()
D = np.zeros((len(offsets), len(offsets)), dtype='f8')
_bundle_minimum_distance_matrix(points, points2,
len(offsets), len(offsets),
a.shape[0], D)
assert_equal(np.sum(np.diag(D)), 0)
points2 += 2
_bundle_minimum_distance_matrix(points, points2,
len(offsets), len(offsets),
a.shape[0], D)
streamlines2 = relist_streamlines(points2, offsets)
D2 = distance_matrix_mdf(streamlines, streamlines2)
assert_array_almost_equal(D, D2)
cols = D2.shape[1]
rows = D2.shape[0]
dist = 0.25 * (np.sum(np.min(D2, axis=0)) / float(cols) +
np.sum(np.min(D2, axis=1)) / float(rows)) ** 2
dist2 = _bundle_minimum_distance(points, points2,
len(offsets), len(offsets),
a.shape[0])
assert_almost_equal(dist, dist2)
def test_openmp_locks():
static = []
moving = []
pts = 20
for i in range(1000):
s = np.random.rand(pts, 3)
static.append(s)
moving.append(s + 2)
moving = moving[2:]
points, offsets = unlist_streamlines(static)
points2, offsets2 = unlist_streamlines(moving)
D = np.zeros((len(offsets), len(offsets2)), dtype='f8')
_bundle_minimum_distance_matrix(points, points2,
len(offsets), len(offsets2),
pts, D)
dist1 = 0.25 * (np.sum(np.min(D, axis=0)) / float(D.shape[1]) +
np.sum(np.min(D, axis=1)) / float(D.shape[0])) ** 2
dist2 = _bundle_minimum_distance(points, points2,
len(offsets), len(offsets2),
pts)
assert_almost_equal(dist1, dist2, 6)
def test_from_to_rigid():
t = np.array([10, 2, 3, 0.1, 20., 30.])
mat = compose_matrix44(t)
vec = decompose_matrix44(mat, 6)
assert_array_almost_equal(t, vec)
t = np.array([0, 0, 0, 180, 0., 0.])
mat = np.eye(4)
mat[0, 0] = -1
vec = decompose_matrix44(mat, 6)
assert_array_almost_equal(-t, vec)
def test_matrix44():
assert_raises(ValueError, compose_matrix44, np.ones(5))
assert_raises(ValueError, compose_matrix44, np.ones(9))
assert_raises(ValueError, compose_matrix44, np.ones(16))
def test_abstract_metric_class():
class DummyStreamlineMetric(StreamlineDistanceMetric):
def test():
pass
assert_raises(TypeError, DummyStreamlineMetric)
def test_evolution_of_previous_iterations(): | static = fornix_streamlines()[:20]
moving = fornix_streamlines()[:20]
moving = [m + np.array([10., 0., 0.]) for m in moving]
slr = StreamlineLinearRegistration(evolution=True)
from dipy.core.optimize import SCIPY_LESS_0_12
if not SCIPY_LESS_0_12:
slm = slr.optimize(static, moving)
assert_equal(len(slm.matrix_history), slm.iterations)
def test_similarity_real_bundles():
bundle_initial = fornix_streamlines()
bundle_initial, shift = center_streamlines(bundle_initial)
bundle = bundle_initial[:20]
xgold = [0, 0, 10, 0, 0, 0, 1.5]
mat = compose_matrix44(xgold)
bundle2 = transform_streamlines(bundle_initial[:20], mat)
metric = BundleMinDistanceMatrixMetric()
x0 = np.array([0, 0, 0, 0, 0, 0, 1], 'f8')
slr = StreamlineLinearRegistration(metric=metric,
x0=x0,
method='Powell',
bounds=None,
verbose=False)
slm = slr.optimize(bundle, bundle2)
new_bundle2 = slm.transform(bundle2)
evaluate_convergence(bundle, new_bundle2)
def test_affine_real_bundles():
bundle_initial = fornix_streamlines()
bundle_initial, shift = center_streamlines(bundle_initial)
bundle = bundle_initial[:20]
xgold = [0, 4, 2, 0, 10, 10, 1.2, 1.1, 1., 0., 0.2, 0.]
mat = compose_matrix44(xgold)
bundle2 = transform_streamlines(bundle_initial[:20], mat)
x0 = np.array([0, 0, 0, 0, 0, 0, 1., 1., 1., 0, 0, 0])
x = 25
bounds = [(-x, x), (-x, x), (-x, x),
(-x, x), (-x, x), (-x, x),
(0.1, 1.5), (0.1, 1.5), (0.1, 1.5),
(-1, 1), (-1, 1), (-1, 1)]
options = {'maxcor': 10, 'ftol': 1e-7, 'gtol': 1e-5, 'eps': 1e-8}
metric = BundleMinDistanceMatrixMetric()
slr = StreamlineLinearRegistration(metric=metric,
x0=x0,
method='L-BFGS-B',
bounds=bounds,
verbose=True,
| |
import torch
def rmsprop(opfunc, x, config, state=None):
""" An implementation of RMSprop
ARGS:
- 'opfunc' : a function that takes a single input (X), the point
of a evaluation, and returns f(X) and df/dX
- 'x' : the initial point
- 'config` : a table with configura | tion parameters for the optimizer
- 'config['learningRate']' : learning rate
- 'config['alpha']' : smoothing constant
- 'config['epsilon']' : value with which to initialise m
- 'config['weightDecay']' : weight decay
- 'state' : a table describing the state of the optimizer;
after each call the state is modified
- 'state['m']' : leaky sum of | squares of parameter gradients,
- 'state['tmp']' : and the square root (with epsilon smoothing)
RETURN:
- `x` : the new x vector
- `f(x)` : the function, evaluated before the update
"""
# (0) get/update state
if config is None and state is None:
raise ValueError("rmsprop requires a dictionary to retain state between iterations")
state = state if state is not None else config
lr = config.get('learningRate', 1e-2)
alpha = config.get('alpha', 0.99)
epsilon = config.get('epsilon', 1e-8)
wd = config.get('weightDecay', 0)
# (1) evaluate f(x) and df/dx
fx, dfdx = opfunc(x)
# (2) weight decay
if wd != 0:
dfdx.add_(wd, x)
# (3) initialize mean square values and square gradient storage
if 'm' not in state:
state['m'] = x.new().resize_as_(dfdx).zero_()
state['tmp'] = x.new().resize_as_(dfdx)
# (4) calculate new (leaky) mean squared values
state['m'].mul_(alpha)
state['m'].addcmul_(1.0 - alpha, dfdx, dfdx)
# (5) perform update
torch.sqrt(state['m'], out=state['tmp']).add_(epsilon)
x.addcdiv_(-lr, dfdx, state['tmp'])
# return x*, f(x) before optimization
return x, fx
|
f | rom .nuimo import | ControllerManager, ControllerManagerListener, Controller, ControllerListener, GestureEvent, Gesture, LedMatrix
|
# -*- coding: utf-8 -*-
from django.shortcuts import render
from django.http import HttpResponsePermanentRedirect, Http404, HttpResponseRedirect
from django.views.decorators.http import require_GET
from django.contrib.auth import login, authenticate
from django.core.urlresolvers import reverse_lazy
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.contrib import messages
from django.views.decorators.cache import cache_page
from .forms import UrlCreateForm
from .models import Url
@cache_page(60 * 60)
@require_GET
def redirect(request, short_code):
"""
Redirects Url
"""
if short_code:
try:
url = Url.objects.get(short_code=short_code)
except Url.DoesNotExist:
raise Http404()
return HttpResponsePermanentRedirect(url.original_url)
def register_user(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
user = form.save()
username = request.POST['username']
password = request.POST['password1']
user = authenticate(username=username, password=password)
login(request, user)
messages.success(request, 'User registered and logged in with success.')
return HttpResponseRedirect(reverse_lazy('index'))
else:
context = {'user_register_form': form}
else:
context = {'user_register_form': UserCreationForm()}
return render(request, 'register.html', context)
def user_url_list(user, page, limit=20):
"""
Returns a paginato | r of a queryset with users Url's.
"""
url_li | st = Url.objects.filter(user=user)
paginator = Paginator(url_list, limit)
try:
url_list = paginator.page(page)
except PageNotAnInteger:
url_list = paginator.page(1)
except EmptyPage:
url_list = paginator.page(paginator.num_pages)
return url_list
def index(request):
"""
Main View, show form and list Url`s of the authenticated user.
"""
if request.user.is_authenticated():
context = {
# Returns the users ``Url.objects`` QuerySet or None if Anonymous.
'url_list': user_url_list(request.user, request.GET.get('page')),
'absolute_uri': request.build_absolute_uri(),
'user': request.user
}
else:
context = {
'user_login_form': AuthenticationForm(),
'user_register_form': UserCreationForm()
}
if request.method == "POST":
form = UrlCreateForm(request.POST)
if form.is_valid():
form.instance.user = (
request.user if request.user.is_authenticated() else None
)
instance = form.save()
context['short_url'] = request.build_absolute_uri() + instance.short_code
else:
form = UrlCreateForm()
context['change_form'] = form
return render(request, 'index.html', context)
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Universidade de Aveiro, DETI/IEETA, Bioinformatics Group - http://bioinformatics.ua.pt/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.http import HttpResponse, HttpResponseRedirect
from questionnaire.models import *
from django.shortcuts import render_to_response, get_obj | ect_or_404
import sys
from searchengine.models import *
r | em = 1
qu = get_object_or_404(Questionnaire, id=rem)
qsets = qu.questionsets()
for qs in qsets:
expected = qs.questions()
for q in expected:
slugs = Slugs.objects.filter(description__exact=q.text)
if len(slugs)!=1:
print "Error (multiple slugs to the description): " + q.number
for s in slugs:
try:
print s.slug1 + "| " + s.description + "| " + str(s.question.pk)
except:
print s.slug1 + "| " + str(s.question.pk)
continue
s = slugs[0]
if (s.slug1 != q.slug):
print "Error (slug1!=slug): " + q.number
print s.slug1 + "| " + s.description + "| " + str(s.question.pk)
continue
if (s.question.pk!=q.pk):
print "Error (q.pk!=pk): " + q.number
continue
|
# Copyright 2012 NetApp
# Copyright 2015 Chuck Fouts
# All Ri | ghts Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by appl | icable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import warnings
from manilaclient.v2 import shares
warnings.warn("Module manilaclient.v1.shares is deprecated (taken as "
"a basis for manilaclient.v2.shares). "
"The preferable way to get a client class or object is to use "
"the manilaclient.client module.")
class MovedModule(object):
def __init__(self, new_module):
self.new_module = new_module
def __getattr__(self, attr):
return getattr(self.new_module, attr)
sys.modules["manilaclient.v1.shares"] = MovedModule(shares)
|
# Generated by Django 1.11.13 on 2018-08-14 17:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
replaces = [
("user", "0001_initial"),
("user", "0002_rename_account_tables"),
("user", "0003_auto_20151226_1110"),
]
initial = True
dependencies = [
("auth", "0012_alter_user_first_name_max_length"),
]
operations = [
migrations.CreateModel(
name="User",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"password",
models.CharField(max_length=128, verbose_name="password"),
),
(
"last_login",
models.DateTimeField(
blank=True, null=True, verbose_name="last login"
),
),
(
"is_superuser",
models.BooleanField(
default=False,
help_text="Designates that this user has all permissions without explicitly assigning them.",
verbose_name="superuser status",
),
),
(
"username",
models.CharField(
error_messages={
"unique": "A user with that username already exists."
},
help_text="Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.",
max_length=150,
unique=True,
validators=[
django.contrib.auth.validators.UnicodeUsernameValidator()
],
verbose_name="username",
),
),
(
"first_name",
models.CharField(
blank=True, max_length=150, verbose_name="first name"
),
),
(
"last_name",
models.CharField(
blank=True, max_length=150, verbose_name="last name"
),
),
(
"email",
models.EmailField(
| blank=True,
max_length=254,
verbose_name="email address",
),
),
(
"is_staff",
models.BooleanField(
default=False,
help_text="Designates whether the user can log into this admin site.",
verbose_name="staff | status",
),
),
(
"is_active",
models.BooleanField(
default=True,
help_text="Designates whether this user should be treated as active. Unselect this instead of deleting accounts.",
verbose_name="active",
),
),
(
"date_joined",
models.DateTimeField(
default=django.utils.timezone.now,
verbose_name="date joined",
),
),
(
"groups",
models.ManyToManyField(
blank=True,
help_text="The groups this user belongs to. A user will get all permissions granted to each of their groups.",
related_name="user_set",
related_query_name="user",
to="auth.Group",
verbose_name="groups",
),
),
(
"user_permissions",
models.ManyToManyField(
blank=True,
help_text="Specific permissions for this user.",
related_name="user_set",
related_query_name="user",
to="auth.Permission",
verbose_name="user permissions",
),
),
],
options={
"db_table": "auth_user",
},
managers=[
("objects", django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name="TeamMember",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("roles", models.CharField(blank=True, max_length=100)),
(
"leader",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="leader",
to=settings.AUTH_USER_MODEL,
),
),
(
"member",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="member",
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="UserProfile",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("about", models.TextField(blank=True, max_length=500)),
(
"user",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.AlterUniqueTogether(
name="teammember",
unique_together=set([("leader", "member")]),
),
]
|
self.motherTongue = motherTongue
# spell check rules are disabled by default
self.disabled = {'HUNSPELL_RULE', 'HUNSPELL_NO_SUGGEST_RULE', 'YOUR_NN', 'TRY_AND', 'PRP_PAST_PART',
'MORFOLOGIK_RULE_' + self.language.replace('-', '_').upper()}
self.enabled = set()
self._instances[id(self)] = self
def __del__(self):
if not self._instances and self._server_is_alive():
#self._terminate_server()
pass
def __repr__(self):
return '{}(language={!r}, motherTongue={!r})'.format(
self.__class__.__name__, self.language, self.motherTongue)
@property
def language(self):
"""The language to be used."""
return self._language
@language.setter
def language(self, language):
self._language = LanguageTag(language)
self.disabled.clear()
self.enabled.clear()
@property
def motherTongue(self):
"""The user's mother tongue or None.
The mother tongue may also be used as a source language for
checking bilingual texts.
"""
return self._motherTongue
@motherTongue.setter
def motherTongue(self, motherTongue):
self._motherTongue = (None if motherTongue is None
else LanguageTag(motherTongue))
@property
def _spell_checking_rules(self):
return {'HUNSPELL_RULE', 'HUNSPELL_NO_SUGGEST_RULE',
'MORFOLOGIK_RULE_' + self.language.replace('-', '_').upper()}
def check(self, text: str, srctext=None) -> [Match]:
"""Match text against enabled rules."""
root = self._get_root(self._url, self._encode(text, srctext))
return [Match(e.attrib) for e in root if e.tag == 'error']
def _check_api(self, text: str, srctext=None) -> bytes:
"""Match text against enabled rules (result in XML format)."""
root = self._get_root(self._url, self._encode(text, srctext))
return (b'<?xml version="1.0" encoding="UTF-8"?>\n' +
ElementTree.tostring(root) + b"\n")
def _encode(self, text, srctext=None):
params = {'language': self.language, 'text': text.encode('utf-8')}
if srctext is not None:
params['srctext'] = srctext.encode('utf-8')
if self.motherTongue is not None:
params['motherTongue'] = self.motherTongue
if self.disabled:
params['disabled'] = ','.join(self.disabled)
if self.enabled:
params['enabled'] = ','.join(self.enabled)
return urllib.parse.urlencode(params).encode()
def correct(self, text: str, srctext=None) -> str:
"""Automatically apply suggestions to the text."""
return correct(text, self.check(text, srctext))
def enable_spellchecking(self):
"""Enable spell-checking rules."""
self.disabled.difference_update(self._spell_checking_rules)
def disable_spellchecking(self):
"""Disable spell-checking rules."""
self.disabled.update(self._spell_checking_rules)
@classmethod
def _get_languages(cls) -> set:
"""Get supported languages (by querying the server)."""
if not cls._server_is_alive():
cls._start_server_on_free_port()
url = urllib.parse.urljoin(cls._url, 'Languages')
languages = set()
for e in cls._get_root(url, num_tries=1):
languages.add(e.get('abbr'))
languages.add(e.get('abbrWithVariant'))
return languages
@classmethod
def _get_attrib(cls):
"""Get matches element attributes."""
if not cls._server_is_alive():
cls._start_server_on_free_port()
params = {'language': FAILSAFE_LANGUAGE, 'text': ''}
data = urllib.parse.urlencode(params).encode()
root = cls._get_root(cls._url, data, num_tries=1)
return root.attrib
@classmethod
def _get_root(cls, url, data=None, num_tries=2):
for n in range(num_tries):
try:
with urlopen(url, data, cls._TIMEOUT) as f:
return ElementTree.parse(f).getroot()
except (IOError, http.client.HTTPException) as e:
cls._terminate_server()
cls._start_server()
if n + 1 >= num_tries:
raise Error('{}: {}'.format(cls._url, e))
@classmethod
def _start_server_on_free_port(cls):
while True:
cls._url = 'http://{}:{}'.format(cls._HOST, cls._port)
try:
cls._start_server()
break
except | ServerError:
if cls._MIN_PORT <= cls._port < cls._MAX_PORT:
cls._port += 1
else:
raise
@classmethod
def _start_server(cls):
err = None
try:
serv | er_cmd = get_server_cmd(cls._port)
except PathError as e:
# Can't find path to LanguageTool.
err = e
else:
# Need to PIPE all handles: http://bugs.python.org/issue3905
cls._server = subprocess.Popen(
server_cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
startupinfo=startupinfo
)
# Python 2.7 compatibility
# for line in cls._server.stdout:
match = None
while True:
line = cls._server.stdout.readline()
if not line:
break
match = cls._PORT_RE.search(line)
if match:
port = int(match.group(1))
if port != cls._port:
raise Error('requested port {}, but got {}'
.format(cls._port, port))
break
if not match:
cls._terminate_server()
err_msg = cls._server.communicate()[1].strip()
cls._server = None
match = cls._PORT_RE.search(err_msg)
if not match:
raise Error(err_msg)
port = int(match.group(1))
if port != cls._port:
raise Error(err_msg)
if not cls._server:
# Couldn't start the server, so maybe there is already one running.
params = {'language': FAILSAFE_LANGUAGE, 'text': ''}
data = urllib.parse.urlencode(params).encode()
try:
with urlopen(cls._url, data, cls._TIMEOUT) as f:
tree = ElementTree.parse(f)
except (IOError, http.client.HTTPException) as e:
if err:
raise err
raise ServerError('{}: {}'.format(cls._url, e))
root = tree.getroot()
# LanguageTool 1.9+
if root.get('software') != 'LanguageTool':
raise ServerError('unexpected software from {}: {!r}'
.format(cls._url, root.get('software')))
@classmethod
def _server_is_alive(cls):
return cls._server and cls._server.poll() is None
@classmethod
def _terminate_server(cls):
try:
cls._server.terminate()
except OSError:
pass
@total_ordering
class LanguageTag(str):
"""Language tag supported by LanguageTool."""
_LANGUAGE_RE = re.compile(r"^([a-z]{2,3})(?:[_-]([a-z]{2}))?$", re.I)
def __new__(cls, tag):
# Can't use super() here because of 3to2.
return str.__new__(cls, cls._normalize(tag))
def __eq__(self, other):
try:
other = self._normalize(other)
except ValueError:
pass
return str(self) == other
def __lt__(self, other):
try:
other = self._normalize(other)
except ValueError:
pass
return str(self) < other
@classmethod
def _normalize(cls, tag):
if not tag:
raise ValueError('empty language tag')
languages = {languag |
def extractDanielyangNinja(item):
'''
Parser for 'danielyang.ninja'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPo | stfix(item['title'])
if n | ot (chp or vol) or "preview" in item['title'].lower():
return None
if "WATTT" in item['tags']:
return buildReleaseMessageWithType(item, "WATTT", vol, chp, frag=frag, postfix=postfix)
return False
|
#!/usr/bin/env python
# coding=utf-8
"""30. Digit fifth powers
https://projecteuler.net/pr | oblem=30
Surprisingly there are only three numbers that can be written as the sum of
fourth powers of their digits:
> 1634 = 14 \+ 64 \+ 34 \+ 44
> 8208 = 84 \+ 24 \+ 04 \+ 84
> 9474 = 94 \+ 44 \+ 74 \+ 44
As 1 = 1 | 4 is not a sum it is not included.
The sum of these numbers is 1634 + 8208 + 9474 = 19316.
Find the sum of all the numbers that can be written as the sum of fifth powers
of their digits.
"""
|
import unittest
"""
Given an unordered array of integers, find the length of longest increasing subsequence.
Input: 0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15
Output: 6 (0, 2, 6, 9, 11, 15)
"""
"""
A great explanation of | the approach appears here:
http://www.geeksforgeeks.org/longest-monotonically-increasing-subsequence-size-n-log-n/
"""
def find_ceil_index(list_of_numbers, ele):
"""
Returns the smallest element in list_of_numbers greater than or equal to ele | .
"""
low = 0
high = len(list_of_numbers)-1
ans = -1
while low <= high:
mid = (low + high) / 2
if list_of_numbers[mid] >= ele:
ans = mid
high = mid - 1
else:
low = mid + 1
return ans
def find_longest_increasing_subsequence_length(list_of_numbers):
LCS = [list_of_numbers[0]]
for i in range(1, len(list_of_numbers)):
cur_ele = list_of_numbers[i]
k = find_ceil_index(LCS, cur_ele)
if k == -1:
LCS.append(cur_ele)
else:
LCS[k] = cur_ele
return len(LCS)
class TestLIS(unittest.TestCase):
def test_longest_increasing_subsequence(self):
list_of_numbers = [0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15]
self.assertEqual(find_longest_increasing_subsequence_length(list_of_numbers), 6)
list_of_numbers = [2, 5, 3, 1, 2, 3, 4, 5, 6]
self.assertEqual(find_longest_increasing_subsequence_length(list_of_numbers), 6)
list_of_numbers = [5, 4, 3, 2, 1]
self.assertEqual(find_longest_increasing_subsequence_length(list_of_numbers), 1)
|
stance(dq1, _trep._Config)
assert isinstance(q2, _trep._Config)
return self._L_ddqdq(dq1, q2)
def L_ddqdqdq(self, dq1, q2, q3):
"""
Calculate the third derivative of the Lagrangian with respect
to the velocity of dq1, the value of q2, and the value of q3.
"""
assert isinstance(dq1, _trep._Config)
assert isinstance(q2, _trep._Config)
assert isinstance(q3, _trep._Config)
return self._L_ddqdqdq(dq1, q2, q3)
def L_ddqdqdqdq(self, dq1, q2, q3, q4):
"""
Calculate the fourth derivative of the Lagrangian with respect
to the velocity of dq1, the value of q2, the value of q3, and
the value of q4.
"""
assert isinstance(dq1, _trep._Config)
assert isinstance(q2, _trep._Config)
assert isinstance(q3, _trep._Config)
assert isinstance(q4, _trep._Config)
return self._L_ddqdqdqdq(dq1, q2, q3, q4)
def L_ddqddq(self, dq1, dq2):
"""
Calculate the second derivative of the Lagrangian with respect
to the velocity of dq1 and the velocity of dq2.
"""
assert isinstance(dq1, _trep._Config)
assert isinstance(dq2, _trep._Config)
return self._L_ddqddq(dq1, dq2)
def L_ddqddqdq(self, dq1, dq2, q3):
"""
Calculate the third derivative of the Lagrangian with respect
to the velocity of dq1, the velocity of dq2, and the value of
q3.
"""
assert isinstance(dq1, _trep._Config)
assert isinstance(dq2, _trep._Config)
assert isinstance( q3, _trep._Config)
return self._L_ddqddqdq(dq1, dq2, q3)
def L_ddqddqdqdq(self, dq1, dq2, q3, q4):
"""
Calculate the fourth derivative of the Lagrangian with respect
to the velocity of dq1, the velocity of dq2, the value of q3,
and the value of q4.
"""
assert isinstance(dq1, _trep._Config)
assert isinstance(dq2, _trep._Config)
assert isinstance( q3, _trep._Config)
assert isinstance( q4, _trep._Config)
return self._L_ddqddqdqdq(dq1, dq2, q3, q4)
@dynamics_indexing_decorator('d')
def f(self, q=None):
"""
Calculate the dynamics at the current state.
See documentation for details.
"""
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS)
return self._f[q].copy()
@dynamics_indexing_decorator('dq')
def f_dq(self, q=None, q1=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV1)
return self._f_dq[q1, q].T.copy()
@dynamics_indexing_decorator('dq')
def f_ddq(self, q=None, dq1=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV1)
return self._f_ddq[dq1, q].T.copy()
@dynamics_indexing_decorator('dk')
def f_dddk(self, q=None, k1=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV1)
return self._f_dk[k1, q].T.copy()
@dynamics_indexing_decorator('du')
def f_du(self, q=None, u1=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV1)
return self._f_du[u1, q].T.copy()
@dynamics_indexing_decorator('dqq')
def f_dqdq(self, q=None, q1=None, q2=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV2)
return self._f_dqdq[q1, q2, q].copy()
@dynamics_indexing_decorator('dqq')
def f_ddqdq(self, q=None, dq1=None, q2=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV2)
return self._f_ddqdq[dq1, q2, q].copy()
@dynamics_indexing_decorator('dqq')
def f_ddqddq(self, q=None, dq1=None, dq2=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV2)
return self._f_ddqddq[dq1, dq2, q].copy()
@dynamics_indexing_decorator('dkq')
def f_dddkdq(self, q=None, k1=None, q2=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV2)
return self._f_dkdq[k1, q2, q].copy()
@dynamics_indexing_decorator('duq')
def f_dudq(self, q=None, u1=None, q2=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV2)
return self._f_dudq[u1, q2, q].copy()
@dynamics_indexing_decorator('duq')
def f_duddq(self, q=None, u1=None, dq2=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV2)
return self._f_duddq[u1, dq2, q].copy()
@dynam | ics_indexing_deco | rator('duu')
def f_dudu(self, q=None, u1=None, u2=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV2)
return self._f_dudu[u1, u2, q].copy()
@dynamics_indexing_decorator('c')
def lambda_(self, constraint=None):
"""
Calculate the constraint forces at the current state.
"""
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS)
return self._lambda[constraint].copy()
@dynamics_indexing_decorator('cq')
def lambda_dq(self, constraint=None, q1=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV1)
return self._lambda_dq[q1, constraint].T.copy()
@dynamics_indexing_decorator('cq')
def lambda_ddq(self, constraint=None, dq1=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV1)
return self._lambda_ddq[dq1, constraint].T.copy()
@dynamics_indexing_decorator('ck')
def lambda_dddk(self, constraint=None, k1=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV1)
return self._lambda_dk[k1, constraint].T.copy()
@dynamics_indexing_decorator('cu')
def lambda_du(self, constraint=None, u1=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV1)
return self._lambda_du[u1, constraint].T.copy()
@dynamics_indexing_decorator('cqq')
def lambda_dqdq(self, constraint=None, q1=None, q2=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV2)
return self._lambda_dqdq[q1, q2, constraint].copy()
@dynamics_indexing_decorator('cqq')
def lambda_ddqdq(self, constraint=None, dq1=None, q2=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV2)
return self._lambda_ddqdq[dq1, q2, constraint].copy()
@dynamics_indexing_decorator('cqq')
def lambda_ddqddq(self, constraint=None, dq1=None, dq2=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV2)
return self._lambda_ddqddq[dq1, dq2, constraint].copy()
@dynamics_indexing_decorator('ckq')
def lambda_dddkdq(self, constraint=None, k1=None, q2=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV2)
return self._lambda_dkdq[k1, q2, constraint].copy()
@dynamics_indexing_decorator('cuq')
def lambda_dudq(self, constraint=None, u1=None, q2=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV2)
return self._lambda_dudq[u1, q2, constraint].copy()
@dynamics_indexing_decorator('cuq')
def lambda_duddq(self, constraint=None, u1=None, dq2=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV2)
return self._lambda_duddq[u1, dq2, constraint].copy()
@dynamics_indexing_decorator('cuu')
def lambda_dudu(self, constraint=None, u1=None, u2=None):
self._update_cache(_trep.SYSTEM_CACHE_DYNAMICS_DERIV2)
return self._lambda_dudu[u1, u2, constraint].copy()
def test_derivative_dq(self, func, func_dq, delta=1e-6, tolerance=1e-7,
verbose=False, test_name='<unnamed>'):
"""
Test the derivative of a function with respect to a
configuration variable value against its numerical
approximation.
func -> Callable taking no arguments and returning float or np.array
func_dq -> Callable taking one configuration variable argument
and returning a float or np.array.
delta -> perturbation to the current configuration to
calculate the numeric approximation.
Returns stuff
"""
q0 = self.q
tests_total = 0
tests_failed = 0
for q in self.configs:
self.q = q0
dy_exact = func_dq(q)
delta_q = q0.copy()
delta_q[q.index] -= delta
se |
__all__ = (
'TokenList',
)
import collections
from .errors import TokenTypeError
class TokenList(collections.Sized, collections.Iterable, collections.Container):
def __init__(self, init=None, *, token_type=None):
if token_type is None:
token_type = object
self._token_type = token_type
self._tokens = collections.deque()
if init:
if not hasattr(init, '__iter__'):
raise TypeError("invalid value {!r}: not an iterable".format(init))
for token in init:
self.add(token)
@property
def token_type(self):
return self._token_type
def add(self, token, *, count=1):
if not isinstance(token, self._token_type):
raise TokenTypeError("invalid token {!r}: type is not {}".format(token, self._token_type.__name__))
for i in range(count):
self._tokens.append(token)
def pop(self):
re | turn self._tokens.popleft()
def remove(self, token):
for c, t in enumerate(self._tokens):
if t is token:
break
else:
return
del self._tokens[c]
#self._tokens.remove(token)
|
def copy(self):
return self.__class__(init=self, token_type=self.token_type)
def __iter__(self):
yield from self._tokens
def __len__(self):
return len(self._tokens)
def clear(self):
self._tokens.clear()
def extend(self, values):
if self._token_type is object:
self._tokens.extend(values)
else:
for value in values:
self.add(value)
def __contains__(self, value):
return value in self._tokens
def __repr__(self):
args = []
if self:
args.append(repr(list(self._tokens)))
if self._token_type is not object:
args.append("token_type={}".format(self._token_type.__name__))
return "{}({})".format(self.__class__.__name__, ', '.join(args))
def __eq__(self, other):
if isinstance(other, TokenList):
if self._token_type != other.token_type:
return False
return self._tokens == other._tokens
else:
if len(self._tokens) != len(other):
return False
for a, b in zip(self._tokens, other):
if a != b:
return False
return True
|
addresses_args={"uselist": False})
assert_raises_message(
exc.InvalidRequestError,
"On relationship User.addresses, 'dynamic' loaders cannot be "
"used with many-to-one/one-to-one relationships and/or "
"uselist=False.",
configure_mappers
)
def test_no_m2o(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(Address, addresses, properties={
'user': relationship(User, lazy='dynamic')
})
mapper(User, users)
assert_raises_message(
exc.InvalidRequestError,
"On relationship Address.user, 'dynamic' loaders cannot be "
"used with many-to-one/one-to-one relationships and/or "
"uselist=False.",
configure_mappers
)
def test_order_by(self):
User, Address = self._user_address_fixture()
sess = create_session()
u = sess.query(User).get(8)
eq_(
list(u.addresses.order_by(desc(Address.email_address))),
[
Address(email_address='ed@wood.com'),
Address(email_address='ed@lala.com'),
Address(email_address='ed@bettyboop.com')
]
)
def test_configured_order_by(self):
addresses = self.tables.addresses
User, Address = self._user_address_fixture(
addresses_args={
"order_by":
addresses.c.email_address.desc()})
sess = create_session()
u = sess.query(User).get(8)
eq_(
list(u.addresses),
[
Address(email_address='ed@wood.com'),
Address(email_address='ed@lala.com'),
Address(email_address='ed@bettyboop.com')
]
)
# test cancellation of None, replacement with something else
eq_(
list(u.addresses.order_by(None).order_by(Address.email_address)),
[
Address(email_address='ed@bettyboop.com'),
Address(email_address='ed@lala.com'),
Address(email_address='ed@wood.com')
]
)
# test cancellation of None, replacement with nothing
eq_(
set(u.addresses.order_by(None)),
set([
Address(email_address='ed@bettyboop.com'),
Address(email_address='ed@lala.com'),
Address(email_address='ed@wood.com')
])
)
def test_count(self):
User, Address = self._user_address_fixture()
sess = create_session()
u = sess.query(User).first()
eq_(u.addresses.count(), 1)
def test_dynamic_on_backref(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(Address, addresses, properties={
'user': relationship(User,
backref=backref('addresses', lazy='dynamic'))
})
mapper(User, users)
sess = create_session()
ad = sess.query(Address).get(1)
def go():
ad.user = None
self.assert_sql_count(testing.db, go, 0)
sess.flush()
u = sess.query(User).get(7)
assert ad not in u.addresses
def test_no_count(self):
User, Address = self._user_address_fixture()
sess = create_session()
q = sess.query(User)
# dynamic collection cannot implement __len__() (at least one that
# returns a live database result), else additional count() queries are
# issued when evaluating in a list context
def go():
eq_(
q.filter(User.id == 7).all(),
[
User(id=7,
addresses=[
Address(id=1, email_address='jack@bean.com')
])
]
)
self.assert_sql_count(testing.db, go, 2)
def test_no_populate(self):
User, Address = self._user_address_fixture()
u1 = User()
assert_raises_message(
NotImplementedError,
"Dynamic attributes don't support collection population.",
attributes.set_committed_value, u1, 'addresses', []
)
def test_m2m(self):
Order, Item = self._order_item_fixture(items_args={
"backref": backref("orders", lazy="dynamic")
})
sess = create_session()
o1 = Order(id=15, description="order 10")
i1 = Item(id=10, description="item 8")
o1.items.append(i1)
sess.add(o1)
sess.flush()
assert o1 in i1.orders.all()
assert i1 in o1.items.all()
@testing.exclude('mysql', 'between',
((5, 1, 49), (5, 1, 52)),
'https://bugs.launchpad.net/ubuntu/+source/mysql-5.1/+bug/706988')
def test_association_nonaliased(self):
items, Order, orders, order_items, Item = (self.tables.items,
self.classes.Order,
self.tables.orders,
self.tables.order_items,
self.classes.Item)
mapper(Order, orders, properties={
'items': relationship(Item,
secondary=order_items,
lazy="dynamic",
order_by=order_items.c.item_id)
})
mapper(Item, items)
sess = create_session()
o = sess.query(Order).first()
self.assert_compile(
o.items,
"SELECT items.id AS items_id, items.description AS "
"items_description FROM items,"
" order_items WHERE :param_1 = order_items.order_id AND "
"items.id = order_items.item_id"
" ORDER BY order_items.item_id",
use_default_dialect=True
)
# filter criterion against the secondary table
# works
eq_(
o.items.filter(order_items.c.item_id == 2).all(),
[Item(id=2)]
)
def test_transient_count(self):
User, Address = self._user_address_fixture()
u1 = User()
u1.addresses.append(Address())
eq_(u1.addresses.count(), | 1)
def test_transient_access(self):
User, Address = self._user_address_fixture()
u1 = User()
u1.addresses.append(Address())
| eq_(u1.addresses[0], Address())
def test_custom_query(self):
class MyQuery(Query):
pass
User, Address = self._user_address_fixture(
addresses_args={"query_class": MyQuery})
sess = create_session()
u = User()
sess.add(u)
col = u.addresses
assert isinstance(col, Query)
assert isinstance(col, MyQuery)
assert hasattr(col, 'append')
eq_(type(col).__name__, 'AppenderMyQuery')
q = col.limit(1)
assert isinstance(q, Query)
assert isinstance(q, MyQuery)
assert not hasattr(q, 'append')
eq_(type(q).__name__, 'MyQuery')
def test_custom_query_with_custom_mixin(self):
class MyAppenderMixin(AppenderMixin):
def add(self, items):
if isinstance(items, list):
for item in items:
self.append(item)
else:
self.append(items)
class MyQuery(Query):
pass
class MyAppenderQuery(MyAppenderMixin, MyQuery):
query_class = MyQuery
User, Address = self._user_address_fixture(
addresses_args={
"query_class": |
settings
import json
import os
import re
import nltk
from nltk.corpus import *
from nltk.collocations import *
import string
import logging
logger = logging.getLogger('nltksite.nltkapp')
# Create your views here.
# this is horrible
def clearencoding(str):
try:
json.dumps(str)
if len(str) == 1 and ord(str) > 128:
logger.warn("Unicode Error on str='%s' code=%s Skipping" % (repr(str), ord(str)))
str = ""
except UnicodeDecodeError:
logger.warn("Unicode Error on str='%s' code=%s Skipping" % (str, repr(str)))
str = str.decode('utf8', 'ignore')
return str
def index(request):
logger.debug("index requested.")
corpora = Corpus.objects.all()
context = {'corpora': corpora}
return render(request, 'nltkapp/index.htm | l', context)
def sayhello(request):
logger.debug("say hello.")
return JsonResponse({'message': 'Hello World'})
def getdocuments(request):
corpus_id = request.GET.get('corpus_id', None)
c = Corpus.objects.get(pk=corpus_id)
logger.debug("Getting list of documents for corpus %s (id=%s)" % (c.name,corpus_i | d))
documents = c.document_set.all()
documents_list = []
for d in documents:
documents_list.append({'id': d.id, 'name': d.file.name})
return JsonResponse({'documents': documents_list})
def get_sentences(request):
corpus_id = request.GET.get('corpus_id', None)
document_ids = json.loads(request.GET.get('document_ids', None))
word = request.GET.get('word', None)
logger.debug("corpus_id=%s, document_ids=%s, word=%s" % (corpus_id, str(document_ids), word))
finalResult = {}
corpus, internal_filter = open_corpus(corpus_id, document_ids)
# \b is a word boundary match in regex, so we get government but not governmentally
pattern = "\\b" + word + "\\b"
# Chosen corpus is an nltk internal corpus (gutenberg, bible, inaugural addresses, etc...).
# We treat those slightly differently than user-mode corpora
fileids = []
if internal_filter:
fileids = [internal_filter]
else:
# Get array of fileids used by the NLTK corpus object from our own document ids
fileids = corpus.fileids()
logger.debug("fileids=%s", fileids)
for fileid in fileids:
if fileid in corpus.fileids():
sents = corpus.sents(fileid)
results = []
for sentence in sents:
combined = clearencoding(' '.join(sentence))
if re.search(pattern, combined):
results.append(combined)
if len(results) > 0:
finalResult[fileid] = results
# wdmatrix is a word-document matrix. finalResult['facebook.txt'] = [sentences]
return JsonResponse({'word': word, 'wdmatrix':finalResult})
def wordfreq(request):
corpus_id = request.GET.get('corpus_id', None)
document_ids = json.loads(request.GET.get('document_ids', None))
ngram = request.GET.get('ngram', None)
scoring_method = request.GET.get('scoring_method', None)
logger.debug("corpus_id=%s, document_ids=%s, ngram=%s, scoring_method=%s" % (corpus_id, str(document_ids), ngram, scoring_method))
corpus, internal_filter = open_corpus(corpus_id, document_ids)
if not internal_filter:
words = corpus.words()
else:
words = corpus.words(internal_filter)
logger.debug("PlaintextCorpusReader on files: %s" % corpus.fileids())
if ngram == "1":
return onegram_collocation(words)
elif ngram == "2":
first_word_list, fdist = bigram_collocation(words, scoring_method)
elif ngram == "3":
first_word_list, fdist = trigram_collocation(words, scoring_method)
else:
logger.debug("Invalid ngram value specified. " + ngram)
word_list = []
for b in first_word_list:
for sample in fdist:
if b == sample:
worddict = {'word': clearencoding(' '.join(sample)), 'weight': fdist[sample], 'exclude': 0, 'exclude_reason': ''}
break
word_list.append(worddict)
return JsonResponse({'list':word_list})
def onegram_collocation(words):
fdist = nltk.FreqDist(words)
unusual_list = unusual_words(words)
word_list = []
for sample in fdist:
contains_punctuation = False
all_punctuation = True
for c in sample:
if c in string.punctuation:
contains_punctuation = True
else:
all_punctuation = False
# If word contains punctuation OR occurs less than 3 times OR is a stop word, SKIP IT
if (contains_punctuation or fdist[sample] < 3 or sample in stopwords.words('english')):
continue
if (clearencoding(sample.lower()) in unusual_list):
unusual = True
else:
unusual = False
if (len(clearencoding(sample)) > 0):
word_list.append({'word': clearencoding(sample), 'weight': fdist[sample], 'exclude': 0, 'exclude_reason': '', 'unusual': unusual})
return JsonResponse({'list':word_list})
def bigram_collocation(words, score):
ignored_words = stopwords.words('english')
bigrams = nltk.bigrams(words)
fdist = nltk.FreqDist(bigrams)
bigram_measures = nltk.collocations.BigramAssocMeasures()
finder = BigramCollocationFinder.from_words(words)
# Only select bigrams that appear at least 3 times
finder.apply_freq_filter(3)
finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in ignored_words)
# return the 10 bigrams with the highest PMI
method = bigram_measures.pmi
if "student_t" in score:
method = bigram_measures.student_t
elif "chi_sq" in score:
method = bigram_measures.chi_sq
elif "pmi" in score:
method = bigram_measures.pmi
elif "likelihood_ratio" in score:
method = bigram_measures.likelihood_ratio
elif "poisson_stirling" in score:
method = bigram_measures.poisson_stirling
elif "jaccard" in score:
method = bigram_measures.jaccard
word_list = finder.nbest(method, 100)
return [word_list, fdist]
def trigram_collocation(words, score):
ignored_words = stopwords.words('english')
trigrams = nltk.trigrams(words)
fdist = nltk.FreqDist(trigrams)
trigram_measures = nltk.collocations.TrigramAssocMeasures()
finder = TrigramCollocationFinder.from_words(words)
#finder.apply_freq_filter(3)
finder.apply_word_filter(lambda w: len(w) < 3 or w.lower() in ignored_words)
method = trigram_measures.pmi
if "student_t" in score:
method = trigram_measures.student_t
elif "chi_sq" in score:
method = trigram_measures.chi_sq
elif "pmi" in score:
method = trigram_measures.pmi
elif "likelihood_ratio" in score:
method = trigram_measures.likelihood_ratio
elif "poisson_stirling" in score:
method = trigram_measures.poisson_stirling
elif "jaccard" in score:
method = trigram_measures.jaccard
word_list = finder.nbest(method, 100)
return [word_list, fdist]
# Given an array of words, connect to wordnet and return the part of speech, definition, etc...
def wordnet_data(request):
words = json.loads(request.GET.get('words', None))
logger.debug("wordnet_data words=%s" % str(words))
results = []
for w in words:
syns = wordnet.synsets(w)
if len(syns) > 0:
root_word = syns[0].lemmas()[0].name()
pos = syns[0].pos()
definition = syns[0].definition()
synonyms = ''
for syn in syns:
if (syn.lemmas()[0].name() != root_word):
synonyms += syn.lemmas()[0].name() + ', '
examples = syns[0].examples()
results.append({'word': w,
'root': root_word,
'pos': pos,
'definition': definition,
'synonyms': synonyms[:-2],
'examples': examples
})
else:
results.append({'word': w,
'root': 'undefined',
'pos': 'undefined',
'definition': 'undefined',
'synonyms': 'undefined',
'examples': 'undefined'
})
return JsonResponse({'results': results})
def unusual_words(text):
text_vocab = set(w.lower() for w in text if w.isalpha())
english_vocab = set(w.lower() for w in nltk.corpus.words.words())
unusual = text_vocab.difference(english_vocab)
return sorted(unusual)
def open_corpus(corpus_id, document_ids):
c = Corpus.objects.get(pk=corpus_id)
if c.internal_nltk_name:
return eval(c.internal_nltk_name), c.internal_nltk_filter
fileids = []
for d in document_ids:
d = int(d)
# we want entire corpus
if (d == -1):
fileids = '.*\.txt'
break
document = Document.objects.get(pk=d)
fileids.append(os.path.basename(document.file.name))
# Kareem March 5, 2015: Added encoding=None. This prevents NLTK from assuming any specific encoding like utf8
# Without encoding=None, we got UnicodeDecodeErrors. This avoids it, but we have to ha |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the Lice | nse is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
class ReferenceContribution:
def __init__(self,src,dest,me,cont):
self.theSource = src
self.th | eDestination = dest
self.theMeansEnd = me
self.theContribution = cont
def source(self): return self.theSource
def destination(self): return self.theDestination
def meansEnd(self): return self.theMeansEnd
def contribution(self): return self.theContribution
|
{"lv":
{"containingPool": fake_pool_name,
"lunIndex": fake_aoetarget,
"name": fake_volume_name,
"lvStatus":
{"exportedLun":
{"lun": fake_lun,
"shelf": fake_shelf}}
},
"repoName": fake_repository_name}]}]]
fake_esm_fetch_no_volume = [[
{"command": "super_fake_command"},
{"reply": []}]]
fake_esm_success = {"category": "provider",
| "tracking": False,
"configState": "completedSuccessfully",
"heldPending": False,
"metaCROp": "noAction",
"message": None}
fake_group_fullpath = "admin group:%s" % (fake_esm_group)
fake_group_id = 4
fake_login_reply = {"values": [
{"fullPath": fake_group_fullpath,
| "groupId": fake_group_id}],
"message": "",
"state": "adminSucceed",
"metaCROp": "noAction"}
fake_group_fail_fullpath = "fail group:%s" % (fake_esm_group)
fake_group_fail_id = 5
fake_login_reply_group_fail = {"values": [
{"fullPath": fake_group_fail_fullpath,
"groupId": fake_group_fail_id}],
"message": "",
"state": "adminSucceed",
"metaCROp": "noAction"}
def compare(a, b):
if type(a) != type(b):
return False
if type(a) == list or type(a) == tuple:
if len(a) != len(b):
return False
return all(map(lambda t: compare(t[0], t[1]), zip(a, b)))
elif type(a) == dict:
if len(a) != len(b):
return False
for k, v in a.items():
if not compare(v, b[k]):
return False
return True
else:
return a == b
def pack_data(request):
request['data'] = jsonutils.dumps(request['data'])
class FakeRpcBadRequest(Exception):
pass
class FakeRpcIsNotCalled(Exception):
def __init__(self, handle, url_params, data):
self.handle = handle
self.url_params = url_params
self.data = data
def __str__(self):
return 'Fake Rpc handle for {0}/{1}/{2} not found'.format(
self.handle, self.url_params, self.data)
class FakeRpcHandle(object):
def __init__(self, handle, url_params, data, result):
self.handle = handle
self.url_params = url_params
self.data = data
self.result = result
self._is_called = False
def set_called(self):
self._is_called = True
def __call__(self, handle, url_params, data,
allow_empty_response=False):
if handle != self.handle:
raise FakeRpcBadRequest(
'Unexpected handle name {0}. Expected {1}.'
.format(handle, self.handle))
if not compare(url_params, self.url_params):
raise FakeRpcBadRequest('Unexpected url params: {0} / {1}'
.format(url_params, self.url_params))
if not compare(data, self.data):
raise FakeRpcBadRequest('Unexpected data: {0}/{1}'
.format(data, self.data))
if callable(self.result):
return self.result()
else:
return self.result
class FakeRpc(object):
def __init__(self):
self._handles = []
def handle(self, handle, url_params, data, result):
self._handles.append(FakeRpcHandle(handle, url_params, data, result))
def __call__(self, handle_name, url_params, data,
allow_empty_response=False):
for handle in self._handles:
if (handle.handle == handle_name and
compare(handle.url_params, url_params) and
compare(handle.data, handle.data)):
handle.set_called()
return handle(handle_name, url_params, data,
allow_empty_response)
raise FakeRpcIsNotCalled(handle_name, url_params, data)
class CoraidDriverTestCase(test.TestCase):
def setUp(self):
super(CoraidDriverTestCase, self).setUp()
configuration = mox.MockObject(conf.Configuration)
configuration.append_config_values(mox.IgnoreArg())
configuration.coraid_default_repository = 'default_repository'
configuration.coraid_esm_address = fake_esm_ipaddress
configuration.coraid_user = fake_esm_username
configuration.coraid_group = fake_esm_group
configuration.coraid_password = fake_esm_password
configuration.volume_name_template = "volume-%s"
configuration.snapshot_name_template = "snapshot-%s"
configuration.coraid_repository_key = fake_coraid_repository_key
configuration.use_multipath_for_image_xfer = False
configuration.enforce_multipath_for_image_xfer = False
configuration.num_volume_device_scan_tries = 3
configuration.volume_dd_blocksize = '1M'
self.fake_rpc = FakeRpc()
self.stubs.Set(coraid.CoraidRESTClient, 'rpc', self.fake_rpc)
self.driver = coraid.CoraidDriver(configuration=configuration)
self.driver.do_setup({})
def mock_volume_types(self, repositories=None):
if not repositories:
repositories = [fake_repository_name]
self.mox.StubOutWithMock(volume_types, 'get_volume_type_extra_specs')
for repository in repositories:
(volume_types
.get_volume_type_extra_specs(fake_volume_type['id'],
fake_coraid_repository_key)
.AndReturn('<in> {0}'.format(repository)))
class CoraidDriverLoginSuccessTestCase(CoraidDriverTestCase):
def setUp(self):
super(CoraidDriverLoginSuccessTestCase, self).setUp()
login_results = {'state': 'adminSucceed',
'values': [
{'fullPath':
'admin group:{0}'.format(fake_esm_group),
'groupId': fake_esm_group_id
}]}
self.fake_rpc.handle('admin', {'op': 'login',
'username': fake_esm_username,
'password': fake_esm_password},
'Login', login_results)
self.fake_rpc.handle('admin', {'op': 'setRbacGroup',
'groupId': fake_esm_group_id},
'Group', {'state': 'adminSucceed'})
class CoraidDriverApplianceTestCase(CoraidDriverLoginSuccessTestCase):
def test_resize_volume(self):
new_volume_size = int(fake_volume_size) + 1
fetch_request = {'shelf': 'cms',
'orchStrRepo': '',
'lv': fake_volume_name}
self.fake_rpc.handle('fetch', fetch_request, None,
fake_esm_fetch)
reply = {'configState': 'completedSuccessfully'}
resize_volume_request = {'addr': 'cms',
'data': {
'lvName': fake_volume_name,
'newLvName': fake_volume_name + '-resize',
'size':
coraid_volume_size(new_volume_size),
'repoName': fake_repository_name},
'op': 'orchStrLunMods',
'args': 'resize'}
pack_data(resize_volume_request)
self.fake_rpc.handle('configure', {}, [resize_volume_request],
reply)
real_reply = self.driver.appliance.resize_volume(fake_volume_name,
new_volume_size)
self.assertEqual(reply['configState'], real_reply['configState'])
class CoraidDriverIntegrationalTestCase(CoraidDriverLoginSuccessTestCase):
def setUp(self):
super(CoraidDriverIntegrationalTestCase, self). |
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2017 Tuukka Turto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Module for main window related functionality
"""
import PyQt4.QtGui
from herculeum.ui.controllers import EndScreenController, StartGameController
from herculeum.ui.gui.endscreen import EndScreen
from herculeum.ui.gui.eventdisplay import EventMessageDockWidget
from herculeum.ui.gui.map import PlayMapWindow
from herculeum.ui.gui.menu import MenuDialog
from herculeum.ui.gui.startgame import StartGameWidget
from PyQt4.QtCore import QFile, Qt
from PyQt4.QtGui import (QAction, QApplication, QCursor, QDialog, QIcon,
QMainWindow, QPixmap, QSplashScreen)
class QtUserInterface():
"""
Class for Qt User Interface
.. versionadded:: 0.9
"""
def __init__(self, application):
"""
Default constructor
"""
super().__init__()
self.application = application
self.splash_screen = None
self.qt_app = QApplication([])
# self.qt_app.setOverrideCursor(QCursor(Qt.BlankCursor))
def show_splash_screen(self):
"""
Show splash screen
"""
file = QFile(':herculeum.qss')
file.open(QFile.ReadOnly)
styleSheet = str(file.readAll().data(), 'ascii')
self.qt_app.setStyleSheet(styleSheet)
pixmap = QPixmap(':splash.png')
self.splash_screen = QSplashScreen(pixmap)
self.splash_screen.show()
def show_main_window(self):
"""
Show main window
"""
main_window = MainWindow(self.application,
self.application.surface_manager,
self.qt_app,
None,
Qt.FramelessWindowHint,
StartGameController(self.application.level_generator_factory,
self.application.creature_generator,
self.application.item_generator,
self.application.config.start_level))
self.splash_screen.finish(main_window)
main_window.show_new_game()
self.qt_app.exec_()
class MainWindow(QMainWindow):
"""
Class for displaying main window
.. versionadded:: 0.5
"""
def __init__(self, application, surface_manager, qt_app, parent, flags,
controller):
"""
Default constructor
"""
super().__init__(parent, flags)
self.application = application
self.surface_manager = surface_manager
self.qt_app = qt_app
self.controller = controller
self.__set_layout()
def __set_layout(self):
exit_action = QAction(QIcon(':exit-game.png'),
'&Quit',
self)
exit_action.setShortcut('Ctrl+Q')
exit_action.setStatusTip('Quit game')
exit_action.triggered.connect(PyQt4.QtGui.qApp.quit)
inventory_action = QAction(QIcon(':inventory.png'),
'Inventory',
self)
inventory_action.setShortcut('Ctrl+I')
inventory_action.setStatusTip('Show inventory')
inventory_action.triggered.connect(self.__show_menu)
character_action = QAction(QIcon(':character.png'),
'Character',
self)
character_action.setShortcut('Ctrl+C')
character_action.setStatusTip('Show character')
self.map_window = PlayMapWindow(parent=None,
model=self.application.world,
surface_manager=self.surface_manager,
action_factory=self.application.action_factory,
rng=self.application.rng,
rules_engine=self.application.rules_engine,
configuration=self.application.config)
self.setCentralWidget(self.map_window)
self.map_window.MenuRequested.connect(self.__show_menu)
self.map_window.EndScreenRequested.connect(self.__show_end_screen)
self.setGeometry(50, 50, 800, 600)
self.setWindowTitle('Herculeum')
self.setWindowIcon(QIcon(':rune-stone.png'))
self.showMaximized()
def show_new_game(self):
"""
Show new game dialog
"""
app = self.application
start_dialog = StartGameWidget(generator=app.player_generator,
config=self.application.config.controls,
parent=self,
application=self.application,
surface_manager=self.surface_manager,
flags=Qt.Dialog | Qt.CustomizeWindowHint)
result = start_dialog.exec_()
if result == QDialog.Accepted:
player = start_dialog.player_character
intro_text = self.controller.setup_world(self.application.world,
player)
player.register_for_updates(self.map_window.hit_points_widget)
self.map_window.hit_points_widget.show_hit_points(player)
self.map_window.hit_points_widget.show_spirit_points(player)
self.map_window.message_widget.text_edit.setText(intro_text)
self.__show_map_window()
def __show_map_window(self):
"""
Show map window
"""
self.map_window.construct_scene()
def __show_message_window(self, character):
"""
Show message display
:param character: character which events to display
:type character: Character
"""
messages_display = EventMessageDockWidget(self, character)
self.addDockWidget(Qt.BottomDockWidgetArea,
messages_display)
| def __show_menu(self):
"""
Show menu
"""
menu_dialog = MenuDialog(self.surface_manager,
self.application.world.player,
| self.application.action_factory,
self.application.config.controls,
self,
Qt.Dialog | Qt.CustomizeWindowHint)
menu_dialog.exec_()
def __show_end_screen(self):
"""
Show end screen
.. versionadded:: 0.8
"""
end_screen = EndScreen(self.application.world,
self.application.config.controls,
self,
Qt.Dialog | Qt.CustomizeWindowHint,
controller=EndScreenController())
end_screen.exec_()
self.qt_app.quit()
|
': 'Oland\xc5\xb3',
'lv_LV': 'Latvi\xc5\xb3',
'el_GR': 'Graik\xc5\xb3',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Vengr\xc5\xb3',
'lt_LT': 'Lietuvi\xc5\xb3',
'sl_SI': 'Slov\xc4\x97n\xc5\xb3',
'hr_HR': 'Kroat\xc5\xb3',
'en_EN': 'Angl\xc5\xb3',
'es_ES': 'Ispan\xc5\xb3',
'ca_AD': 'Katalon\xc5\xb3',
'ru_RU': 'Rus\xc5\xb3',
'is_IS': 'Island\xc5\xb3',
'da_DK': 'Dan\xc5\xb3',
'ar_AE': 'Arab\xc5\xb3',
'sk_SK': 'Slovak\xc5\xb3',
'de_DE': 'Vokie\xc4\x8di\xc5\xb3',
'sr_YU': 'Serb\xc5\xb3',
'cs_CZ': '\xc4\x8cek\xc5\xb3',
'pl_PL': 'Lenk\xc5\xb3',
'uk_UA': 'Ukrainie\xc4\x8di\xc5\xb3',
'fa_IR': 'Pers\xc5\xb3',
'sv_SE': '\xc5\xa0ved\xc5\xb3',
'he_IL': 'Hebraj\xc5\xb3',
'T1': 'Pra\xc5\xa1ome naudoti AUK\xc5\xa0TYN IR \xc5\xbdEMYN mygtukus, kad i\xc5\xa1sirinktum\xc4\x97te savo kalb\xc4\x85. Po to spauskite OK mygtuk\xc4\x85.',
'T2': 'Kalbos pasirinkimas'},
'lv_LV': {'tr_TR': 'Turku',
'fr_FR': 'Fran\xc4\x8du',
'fi_FI': 'Somu',
'pt_PT': 'Portug\xc4\x81\xc4\xbcu',
'fy_x-FY': 'Fr\xc4\xabzu',
'it_IT': 'It\xc4\x81\xc4\xbcu',
'et_EE': 'Estonian',
'no_NO': 'Norv\xc4\x93\xc4\xa3u',
'nl_NL': 'Holandie\xc5\xa1u',
'lv_LV': 'Latvie\xc5\xa1u',
'el_GR': 'Grie\xc4\xb7u',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Ung\xc4\x81ru',
'lt_LT': 'Lietuvie\xc5\xa1u',
'sl_SI': 'Slovenian',
'hr_HR': 'Horv\xc4\x81tu',
'en_EN': 'Ang\xc4\xbcu',
'es_ES': 'Sp\xc4\x81\xc5\x86u',
'ca_AD': 'Kat\xc4\x81lie\xc5\xa1u',
'ru_RU': 'Krievu',
'is_IS': 'Islandie\xc5\xa1u',
'da_DK': 'D\xc4\x81\xc5\x86u',
'ar_AE': 'Ar\xc4\x81bu',
'sk_SK': 'Slovakian',
'de_DE': 'V\xc4\x81cu',
'sr_YU': 'Serbian',
'cs_CZ': '\xc4\x8cehu',
'pl_PL': 'Po\xc4\xbcu',
'uk_UA': 'Ukrai\xc5\x86u',
'fa_IR': 'Persian',
'sv_SE': 'Zviedru',
'he_IL': 'Hebrew',
'T1': 'L\xc5\xabdzu lietojiet UP un DOWN tausti\xc5\x86us, lai izv\xc4\x93l\xc4\x93tos valodu. P\xc4\x93c tam spiediet OK.',
'T2': 'Valodas izv\xc4\x93le'},
'is_IS': {'tr_TR': 'Tyrkneska',
'fr_FR': 'Franska',
'fi_FI': 'Finnska',
'pt_PT': 'Port\xc3\xbagalska',
'fy_x-FY': 'Fr\xc3\xadsneska',
'it_IT': '\xc3\x8dtalska',
'et_EE': 'Eistneska',
'no_NO': 'Norska',
'nl_NL': 'Hollenska',
'lv_LV': 'Lettneska',
'el_GR': 'Gr\xc3\xadska',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Ungverska',
'lt_LT': 'Lith\xc3\xa1\xc3\xadska',
'sl_SI': 'Slovenian',
'hr_HR': 'Kr\xc3\xb3at\xc3\xadska',
'en_EN': 'Enska',
'es_ES': 'Sp\xc3\xa6nska',
'ca_AD': 'Katal\xc3\xb3nska',
'ru_RU': 'R\xc3\xbassneska',
'is_IS': '\xc3\x8dslenska',
'da_DK': 'Danska',
'ar_AE': 'Arab\xc3\xadska',
'sk_SK': 'Slovakian',
'de_DE': '\xc3\x9e\xc3\xbdska',
'sr_YU': 'Serneska',
'cs_CZ': 'T\xc3\xa9kkneska',
'pl_PL': 'P\xc3\xb3lska',
'uk_UA': 'Ukrainian',
'fa_IR': 'Persneska',
'sv_SE': 'S\xc3\xa6nskt',
'he_IL': 'Hebrew',
'T1': 'Vinsamlega noti\xc3\xb0 UP og NI\xc3\x90UR takka til a\xc3\xb0 velja tungum\xc3\xa1l. \xc3\x9dttu svo \xc3\xa1 OK til a\xc3\xb0 nota.',
'T2': 'Val tungum\xc3\xa1ls'},
'it_IT': {'tr_TR': 'Turco',
'fr_FR': 'Francese',
'fi_FI': 'Finlandese',
'pt_PT': 'Portoghese',
'fy_x-FY': 'Frisone',
'it_IT': 'Italiano',
'et_EE': 'Estone',
'no_NO': 'Norvegese',
'nl_NL': 'Olandese',
'lv_LV': 'Lettone',
'el_GR': 'Greco',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Ungherese',
'lt_LT': 'Lituano',
'sl_SI': 'Sloveno',
'hr_HR': 'Croato',
'en_EN': 'Inglese',
'es_ES': 'Spagnolo',
'ca_AD': 'Catalano',
'ru_RU': 'Russo',
'is_IS': 'Islandese',
| 'da_DK': 'Danese',
'ar_AE': 'Arabo',
'sk_SK': 'Slovacco',
'de_DE': 'Tedesco',
'sr_YU': 'Serbo',
'cs_CZ': 'Ceco',
'pl_PL': 'Polacco',
'uk_UA': 'Ucraino',
'fa_IR': 'Persiano',
'sv_S | E': 'Svedese',
'he_IL': 'Ebraico',
'T1': 'Selezionare la propria lingua utilizzando i tasti S\xc3\xb9/Gi\xc3\xb9. OK >> confermare.',
'T2': 'Selezione lingua'},
'no_NO': {'tr_TR': 'Tyrkisk',
'fr_FR': 'Fransk',
'fi_FI': 'Finsk',
'pt_PT': 'Portugisisk',
'fy_x-FY': 'Frisisk',
'it_IT': 'Italiensk',
'et_EE': 'Estlandsk',
'no_NO': 'Norsk',
'nl_NL': 'Nederlandsk',
'lv_LV': 'Latvisk',
'el_GR': 'Gresk',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': 'Ungarsk',
'lt_LT': 'Litauisk',
'sl_SI': 'Slovenisk',
'hr_HR': 'Kroatisk',
'en_EN': 'Engelsk',
'es_ES': 'Spansk',
'ca_AD': 'Katalansk',
'ru_RU': 'Russisk',
'is_IS': 'Islandsk',
'da_DK': 'Dansk',
'ar_AE': 'Arabisk',
'sk_SK': 'Slovakisk',
'de_DE': 'Tysk',
'sr_YU': 'Serbisk',
'cs_CZ': 'Tjekkisk',
'pl_PL': 'Polsk',
'uk_UA': 'Ukrainsk',
'fa_IR': 'Persisk',
'sv_SE': 'Svensk',
'he_IL': 'Hebraisk',
'T1': 'Vennligst bruk OPP og NED taster for \xc3\xa5 velge spr\xc3\xa5k. Etterp\xc3\xa5 trykker du OK tast for \xc3\xa5 fortsette.',
'T2': 'Spr\xc3\xa5kvalg'},
'fa_IR': {'tr_TR': '\xd8\xaa\xd8\xb1\xda\xa9\xdb\x8c',
'fr_FR': '\xd9\x81\xd8\xb1\xd8\xa7\xd9\x86\xd8\xb3\xd9\x88\xdb\x8c',
'fi_FI': '\xd9\xbe\xd8\xa7\xdb\x8c\xd8\xa7\xd9\x86',
'pt_PT': '\xd9\xbe\xd8\xb1\xd8\xaa\xd8\xba\xd8\xa7\xd9\x84\xdb\x8c',
'fy_x-FY': '\xd9\x81\xd8\xb1\xdb\x8c\xd8\xb2\xdb\x8c',
'it_IT': '\xd8\xa7\xdb\x8c\xd8\xaa\xd8\xa7\xd9\x84\xdb\x8c\xd8\xa7\xdb\x8c\xdb\x8c',
'et_EE': '\xd8\xa7\xd8\xb3\xd8\xaa\xd9\x88\xd9\x86\xdb\x8c\xd8\xa7\xdb\x8c\xdb\x8c',
'no_NO': '\xd9\x86\xd8\xb1\xd9\x88\xda\x98\xdb\x8c',
'nl_NL': '\xd9\x81\xd9\x84\xd9\x85\xd9\x86\xda\xa9\xdb\x8c',
'lv_LV': '\xd9\x84\xd8\xaa\xd9\x88\xd9\x86\xdb\x8c',
'el_GR': '\xdb\x8c\xd9\x88\xd9\x86\xd8\xa7\xd9\x86\xdb\x8c',
'pt_BR_BR': 'Brazilian Portuguese',
'hu_HU': '\xd9\x85\xd8\xac\xd8\xa7\xd8\xb1\xd8\xb3\xd8\xaa\xd8\xa7\xd9\x86\xdb\x8c',
'lt_LT': '\xd9\x84\xdb\x8c\xd8\xaa\xd9\x88\xd8\xa7\xd9\x86\xdb\x8c',
'sl_SI': '\xd8\xa7\xd8\xb3\xd9\x84\xd9\x88\xd9\x88\xd9\x86\xdb\x8c\xd8\xa7\xdb\x8c\xdb\x8c',
'hr_HR': '\xda\xa9\xd8\xb1\xd9\x88\xd8\xa7\xd8\xb3\xdb\x8c',
'en_EN': '\xd8\xa7\xd9\x86\xda\xaf\xd9\x84\xdb\x8c\xd8\xb3\xdb\x8c',
'es_ES': '\xd8\xa7\xd8\xb3\xd9\xbe\xd8\xa7\xd9\x86\xdb\x8c\xd8\xa7\xdb\x8c\xdb\x8c',
'ca_AD': '\xda\xa9\xd8\xa7\xd8\xaa\xd8\xa7\xd9\x84\xd8\xa7\xd9\x86',
'ru_RU': '\xd8\xb1\xd9\x88\xd8\xb3\xdb\x8c',
'is_IS': '\xd8\xa7\xdb\x8c\xd8\xb3\xd9\x84\xd9\x86\xd8\xaf\xdb\x8c',
'da_DK': '\xd8\xaf\xd8\xa7\xd9\x86\xd9\x85\xd8\xa7\xd8\xb1\xda\xa9\xdb\x8c',
'ar_AE': '\xd8\xb9\xd8\xb1\xd8\xa8\xdb\x8c',
'sk_SK': '\xd8\xa7\xd8\xb3\xd9\x84\xd9\x88\xd8\xa7\xda\xa9\xdb\x8c',
'de_DE': '\xd8\xa2\xd9\x84\xd9\x85\xd8\xa7\xd9\x86\xdb\x8c',
'sr_YU': '\xd8\xb5\xd8\xb1\xd8\xa8\xd8\xb3\xd8\xaa\xd8\ |
# -*- coding: utf-8 -*-
"""WebUI."""
from .websocket import WebsocketProxyHandler
def create_webapp(n | aumanni, **kwargs):
"""App factory.
:param CircleCore core: CircleCore Core
:param str base_url: ベースURL
:param int ws_port: Websocket Port Number
:return: WebUI App
:rtype: CCWebApp
"""
from .app import Nauma | nniWebApp
app = NaumanniWebApp(naumanni, **kwargs)
return app
|
#
# Copyright (c) 2014, Jim Bosch
# All rights reserved.
#
# mcpib is distributed under a simple BSD-like license;
# see the LICENSE file that should be prese | nt in the root
# of the source distribution.
#
import unittest
import os
import sys
buildPythonPath = os.path.join(os.path.split(__file__)[0], "..", "python")
if os.path.exists(buildPythonPath): sys.path.insert(0, buildPythonPath)
import mcpib
import builtin_strings_mod as mod
class BuiltinStringsTestCase(unittest.TestCase):
de | f testString(self):
"""Test that to-Python and from-Python converters for std::string work as expected."""
self.assertEqual(mod.passthru_string("foo"), "foo")
self.assertRaises(mcpib.FromPythonError, mod.passthru_string, 5)
self.assertRaises(mcpib.FromPythonError, mod.passthru_string, ["bar"])
def testCString(self):
"""Test that to-Python and from-Python converters for char const * work as expected."""
self.assertEqual(mod.passthru_cstring("foo"), "foo")
self.assertRaises(mcpib.FromPythonError, mod.passthru_cstring, 5)
self.assertRaises(mcpib.FromPythonError, mod.passthru_cstring, ["bar"])
def testCharArgs(self):
"""Test that c-string converters are not used for char values, references, or non-const pointers."""
self.assertRaises(mcpib.FromPythonError, mod.accept_char, "foo")
self.assertRaises(mcpib.FromPythonError, mod.accept_char_const, "foo")
self.assertRaises(mcpib.FromPythonError, mod.accept_char_ptr, "foo")
self.assertRaises(mcpib.FromPythonError, mod.accept_char_ref, "foo")
self.assertRaises(mcpib.FromPythonError, mod.accept_char_const_ref, "foo")
self.assertRaises(mcpib.ToPythonError, mod.return_char)
self.assertRaises(mcpib.ToPythonError, mod.return_char_const)
self.assertRaises(mcpib.ToPythonError, mod.return_char_ptr)
self.assertRaises(mcpib.ToPythonError, mod.return_char_ref)
self.assertRaises(mcpib.ToPythonError, mod.return_char_const_ref)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python
"""FEM library
Demonstrates some simple finite element definitions, and computes a mass
matrix
$ python fem.py
[ 1/60, 0, -1/360, 0, -1/90, -1/360]
[ 0, 4/45, 0, 2/45, 2/45, -1/90]
[-1/360, 0, 1/60, -1/90, 0, -1/360]
[ 0, 2/45, -1/90, 4/45, 2/45, 0]
[ -1/90, 2/45, 0, 2/45, 4/45, 0]
[-1/360, -1/90, -1/360, 0, 0, 1/60]
"""
from sympy import symbols, Symbol, factorial, Rational, zeros, div, eye, \
integrate, diff, pprint, reduced
x, y, z = symbols('x,y,z')
class ReferenceSimplex:
def __init__(self, nsd):
self.nsd = nsd
coords = []
if nsd <= 3:
coords = symbols('x,y,z')[:nsd]
else:
coords = []
for d in range(0,nsd):
coords.append(Symbol("x_%d" % d))
self.coords = coords
def integrate(self,f):
coords = self.coords
nsd = self.nsd
limit = 1
for p in coords:
limit -= p
intf = f
for d in range(0,nsd):
p = coords[d]
limit += p
intf = integrate(intf, (p, 0, limit))
return intf
def bernstein_space(order, nsd):
if nsd > 3:
raise RuntimeError("Bernstein only implemented in 1D, 2D, and 3D")
sum = 0
basis = []
coeff = []
if nsd == 1:
b1, b2 = x, 1-x
for o1 in range(0,order+1):
for o2 in range(0,order+1):
if o1 + o2 == order:
aij = Symbol("a_%d_%d" % (o1,o2))
sum += aij*binomial(order,o1)*pow(b1, o1)*pow(b2, o2)
basis.append(binomial(order,o1)*pow(b1, o1)*pow(b2, o2))
coeff.append(aij)
if nsd == 2:
b1, b2, b3 = x, y, 1-x-y
for o1 in range(0,order+1):
for o2 in range(0,order+1):
for o3 in range(0,order+1):
if o1 + o2 + o3 == order:
aij = Symbol("a_%d_%d_%d" % (o1,o2,o3))
fac = factorial(order) / (factorial(o1)*factorial(o2)*factorial(o3))
sum += aij*fac*pow(b1, o1)*pow(b2, o2)*pow | (b3, o3)
basis.append(fac*pow(b1, o1)*pow(b2, o2)*pow(b3, o3))
coeff.append(aij)
if nsd == 3:
b1, b2, b3, b4 = x, y, z, 1-x-y-z
for o1 in range(0,order+1):
for o2 in range(0,order+1):
for o3 in range(0,order+1):
for o4 in ran | ge(0,order+1):
if o1 + o2 + o3 + o4 == order:
aij = Symbol("a_%d_%d_%d_%d" % (o1,o2,o3,o4))
fac = factorial(order)/ (factorial(o1)*factorial(o2)*factorial(o3)*factorial(o4))
sum += aij*fac*pow(b1, o1)*pow(b2, o2)*pow(b3, o3)*pow(b4, o4)
basis.append(fac*pow(b1, o1)*pow(b2, o2)*pow(b3, o3)*pow(b4, o4))
coeff.append(aij)
return sum, coeff, basis
def create_point_set(order, nsd):
h = Rational(1,order)
set = []
if nsd == 1:
for i in range(0, order+1):
x = i*h
if x <= 1:
set.append((x,y))
if nsd == 2:
for i in range(0, order+1):
x = i*h
for j in range(0, order+1):
y = j*h
if x + y <= 1:
set.append((x,y))
if nsd == 3:
for i in range(0, order+1):
x = i*h
for j in range(0, order+1):
y = j*h
for k in range(0, order+1):
z = j*h
if x + y + z <= 1:
set.append((x,y,z))
return set
def create_matrix(equations, coeffs):
A = zeros(len(equations))
i = 0; j = 0
for j in range(0, len(coeffs)):
c = coeffs[j]
for i in range(0, len(equations)):
e = equations[i]
d, _ = reduced(e, [c])
A[i,j] = d[0]
return A
class Lagrange:
def __init__(self,nsd, order):
self.nsd = nsd
self.order = order
self.compute_basis()
def nbf(self):
return len(self.N)
def compute_basis(self):
order = self.order
nsd = self.nsd
N = []
pol, coeffs, basis = bernstein_space(order, nsd)
points = create_point_set(order, nsd)
equations = []
for p in points:
ex = pol.subs(x, p[0])
if nsd > 1:
ex = ex.subs(y, p[1])
if nsd > 2:
ex = ex.subs(z, p[2])
equations.append(ex )
A = create_matrix(equations, coeffs)
Ainv = A.inv()
b = eye(len(equations))
xx = Ainv*b
for i in range(0,len(equations)):
Ni = pol
for j in range(0,len(coeffs)):
Ni = Ni.subs(coeffs[j], xx[j,i])
N.append(Ni)
self.N = N
def main():
t = ReferenceSimplex(2)
fe = Lagrange(2,2)
u = 0
#compute u = sum_i u_i N_i
us = []
for i in range(0, fe.nbf()):
ui = Symbol("u_%d" % i)
us.append(ui)
u += ui*fe.N[i]
J = zeros(fe.nbf())
for i in range(0, fe.nbf()):
Fi = u*fe.N[i]
print Fi
for j in range(0, fe.nbf()):
uj = us[j]
integrands = diff(Fi, uj)
print integrands
J[j,i] = t.integrate(integrands)
pprint(J)
if __name__ == "__main__":
main()
|
:
file(filename, "w").write(data)
else:
raise FMBTX11Error('Unsupported image format "%s"...' % (data[:4],))
else:
return False
return True
class FMBTX11Error(Exception): pass
X11ConnectionError = fmbtx11_conn.X11ConnectionError
_keyNames = [ "VoidSymbol", "BackSpace", "Tab", "Linefeed", "Clear",
"Return", "Pause", "Scroll_Lock", "Sys_Req", "Escape",
"Delete", "Multi_key", "Codeinput", "SingleCandidate",
"MultipleCandidate", "PreviousCandidate", "Kanji",
"Muhenkan", "Henkan_Mode", "Henkan", "Romaji",
| "Hiragana", "Katakana", "Hiragana_Katakana", "Zenkaku",
"Hankaku", "Zenkaku_Hankaku", "Touroku", "Massyo",
"Kana_Lock", "Kana_Shift", "Eisu_Shift", "Eisu_toggle",
"Kanji_Bangou", "Zen_Koho", "Mae_Koho", "Home", "Left",
"Up", "Right", "Down", "Prior", "Page_Up", "Next",
"Page_Down", "End", "Begin", "Select", "P | rint",
"Execute", "Insert", "Undo", "Redo", "Menu", "Find",
"Cancel", "Help", "Break", "Mode_switch",
"script_switch", "Num_Lock", "KP_Space", "KP_Tab",
"KP_Enter", "KP_F1", "KP_F2", "KP_F3", "KP_F4",
"KP_Home", "KP_Left", "KP_Up", "KP_Right", "KP_Down",
"KP_Prior", "KP_Page_Up", "KP_Next", "KP_Page_Down",
"KP_End", "KP_Begin", "KP_Insert", "KP_Delete",
"KP_Equal", "KP_Multiply", "KP_Add", "KP_Separator",
"KP_Subtract", "KP_Decimal", "KP_Divide", "KP_0",
"KP_1", "KP_2", "KP_3", "KP_4", "KP_5", "KP_6", "KP_7",
"KP_8", "KP_9", "F1", "F2", "F3", "F4", "F5", "F6",
"F7", "F8", "F9", "F10", "F11", "L1", "F12", "L2",
"F13", "L3", "F14", "L4", "F15", "L5", "F16", "L6",
"F17", "L7", "F18", "L8", "F19", "L9", "F20", "L10",
"F21", "R1", "F22", "R2", "F23", "R3", "F24", "R4",
"F25", "R5", "F26", "R6", "F27", "R7", "F28", "R8",
"F29", "R9", "F30", "R10", "F31", "R11", "F32", "R12",
"F33", "R13", "F34", "R14", "F35", "R15", "Shift_L",
"Shift_R", "Control_L", "Control_R", "Caps_Lock",
"Shift_Lock", "Meta_L", "Meta_R", "Alt_L", "Alt_R",
"Super_L", "Super_R", "Hyper_L", "Hyper_R", "ISO_Lock",
"ISO_Level2_Latch", "ISO_Level3_Shift",
"ISO_Level3_Latch", "ISO_Level3_Lock",
"ISO_Level5_Shift", "ISO_Level5_Latch",
"ISO_Level5_Lock", "ISO_Group_Shift", "ISO_Group_Latch",
"ISO_Group_Lock", "ISO_Next_Group",
"ISO_Next_Group_Lock", "ISO_Prev_Group",
"ISO_Prev_Group_Lock", "ISO_First_Group",
"ISO_First_Group_Lock", "ISO_Last_Group",
"ISO_Last_Group_Lock", "ISO_Left_Tab",
"ISO_Move_Line_Up", "ISO_Move_Line_Down",
"ISO_Partial_Line_Up", "ISO_Partial_Line_Down",
"ISO_Partial_Space_Left", "ISO_Partial_Space_Right",
"ISO_Set_Margin_Left", "ISO_Set_Margin_Right",
"ISO_Release_Margin_Left", "ISO_Release_Margin_Right",
"ISO_Release_Both_Margins", "ISO_Fast_Cursor_Left",
"ISO_Fast_Cursor_Right", "ISO_Fast_Cursor_Up",
"ISO_Fast_Cursor_Down", "ISO_Continuous_Underline",
"ISO_Discontinuous_Underline", "ISO_Emphasize",
"ISO_Center_Object", "ISO_Enter", "dead_grave",
"dead_acute", "dead_circumflex", "dead_tilde",
"dead_perispomeni", "dead_macron", "dead_breve",
"dead_abovedot", "dead_diaeresis", "dead_abovering",
"dead_doubleacute", "dead_caron", "dead_cedilla",
"dead_ogonek", "dead_iota", "dead_voiced_sound",
"dead_semivoiced_sound", "dead_belowdot", "dead_hook",
"dead_horn", "dead_stroke", "dead_abovecomma",
"dead_psili", "dead_abovereversedcomma", "dead_dasia",
"dead_doublegrave", "dead_belowring",
"dead_belowmacron", "dead_belowcircumflex",
"dead_belowtilde", "dead_belowbreve",
"dead_belowdiaeresis", "dead_invertedbreve",
"dead_belowcomma", "dead_currency", "dead_a", "dead_A",
"dead_e", "dead_E", "dead_i", "dead_I", "dead_o",
"dead_O", "dead_u", "dead_U", "dead_small_schwa",
"dead_capital_schwa", "dead_greek",
"First_Virtual_Screen", "Prev_Virtual_Screen",
"Next_Virtual_Screen", "Last_Virtual_Screen",
"Terminate_Server", "AccessX_Enable",
"AccessX_Feedback_Enable", "RepeatKeys_Enable",
"SlowKeys_Enable", "BounceKeys_Enable",
"StickyKeys_Enable", "MouseKeys_Enable",
"MouseKeys_Accel_Enable", "Overlay1_Enable",
"Overlay2_Enable", "AudibleBell_Enable", "Pointer_Left",
"Pointer_Right", "Pointer_Up", "Pointer_Down",
"Pointer_UpLeft", "Pointer_UpRight", "Pointer_DownLeft",
"Pointer_DownRight", "Pointer_Button_Dflt",
"Pointer_Button1", "Pointer_Button2", "Pointer_Button3",
"Pointer_Button4", "Pointer_Button5",
"Pointer_DblClick_Dflt", "Pointer_DblClick1",
"Pointer_DblClick2", "Pointer_DblClick3",
"Pointer_DblClick4", "Pointer_DblClick5",
"Pointer_Drag_Dflt", "Pointer_Drag1", "Pointer_Drag2",
"Pointer_Drag3", "Pointer_Drag4", "Pointer_Drag5",
"Pointer_EnableKeys", "Pointer_Accelerate",
"Pointer_DfltBtnNext", "Pointer_DfltBtnPrev", "ch",
"Ch", "CH", "c_h", "C_h", "C_H", "space", "exclam",
"quotedbl", "numbersign", "dollar", "percent",
"ampersand", "apostrophe", "quoteright", "parenleft",
"parenright", "asterisk", "plus", "comma", "minus",
"period", "slash", "0", "1", "2", "3", "4", "5", "6",
"7", "8", "9", "colon", "semicolon", "less", "equal",
"greater", "question", "at", "A", "B", "C", "D", "E",
"F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P",
"Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z",
"bracketleft", "backslash", "bracketright",
"asciicircum", "underscore", "grave", "quoteleft", "a",
"b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l",
"m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w",
"x", "y", "z", "braceleft", "bar", "braceright",
"asciitilde", "nobreakspace", "exclamdown", "cent",
"sterling", "currency", "yen", "brokenbar", "section",
"diaeresis", "copyright", "ordfeminine",
"guillemotleft", "notsign", "hyphen", "registered",
"macron", "degree", "plusminus", "twosuperior",
"threesuperior", "acute", "mu", "paragraph",
"periodcentered", "cedilla", "onesuperior", "masculine",
"guillemotright", "onequarter", "onehalf",
"threequarters", "questiondown", "Agrave", "Aacute",
"Acircumflex", "Atilde", "Adiaeresis", "Aring", "AE",
"Ccedilla", "Egrave", "Eacute", "Ecircumflex",
"Ediaeresis", "Igrave", "Iacute", "Icircumflex",
"Idiaeresis", "ETH", "Eth", "Ntilde", "Ograve",
"Oacute", "Ocircumflex", "Otilde", "Odiaeresis",
"multiply", "Oslash", "Ooblique", "Ugrave", "Uacute",
"Ucircumflex", "Udiaeresis", "Yacute", "THORN", "Thorn",
"ssharp", "agrave", "aacute", "acircumflex", "atilde",
"adiaeresis", "aring", "ae", "ccedilla", "egrave",
"eacute", "ecircumflex", "ediaeresis", "igrave",
"iacute", "icircumflex", "idiaeresis", "eth", "ntilde",
"ograve", "oacute", "ocircumflex", "otilde",
"odiaeresis", "division", "oslash", "ooblique",
"ugrave", "uacute", "ucircumflex", "udiaeresis",
"yacu |
# -*- coding: utf-8 -*-
"""
* Partial implementation of standard atmospheric model as described in
* GOST 4401-81 useful for processing of data from meteorological balloon
* sensors.
*
* Supported modelling of temperature and pressure over the altitude span from
* 0 up to 51km.
*
* algorithm by Oleg Kochetov <ok@noiselab.ru>
"""
from math import log10
class GOST4401(object):
G = 9.80665
R = 287.05287
E = 6356766
MIN_PRESSURE = 6.69384
MAX_PRESSURE = 101325.00
MIN_GP_ALT = 0.00
MAX_GP_ALT = 51000.00
# Lookup table with averaged empirical parameters for
# lower layers of atmosphere in accordance with ГОСТ 4401-81
LUT_RECORDS = 6
tab = {
'altitude' : 0, # Geopotentional altitude
'temperature' : 1, # degrees K
'temp gradient' : 2, # degrees K per meter
'pressure' : 3, # pascals
}
ag_table = [
[0, 288.15, -0.0065, 101325.00],
[11000, 216.65, 0.0, 22632.04],
[20000, 216.65, 0.0010, 5474.87],
[32000, 228.65, 0.0028, 868.0146],
[47000, 270.65, 0 | .0, 110.9056],
[51000, 270.65, -0.0028, 6.69384]
]
@staticmethod
def geopotential_to_geometric(self, altitude):
return altitude * self.E / (self.E - altitu | de)
@staticmethod
def geometric_to_geopotential(self, altitude):
return altitude * self.E / (self.E + altitude)
def get_altitude(self, pressure):
"""
Returns geometric altitude value for the given pressure.
:param pressure: float pressure - pressure in pascals
:return: float geometric altitude in meters
"""
# Pressure in Pascals
if (pressure <= self.MIN_PRESSURE) or (pressure > self.MAX_PRESSURE):
return None
for idx in range(0, self.LUT_RECORDS - 1):
if ((pressure <= self.ag_table[idx][self.tab['pressure']]) and
(pressure > self.ag_table[idx + 1][self.tab['pressure']])):
break
Ps = float(self.ag_table[idx][self.tab['pressure']])
Bm = float(self.ag_table[idx][self.tab['temp gradient']])
Tm = float(self.ag_table[idx][self.tab['temperature']])
Hb = float(self.ag_table[idx][self.tab['altitude']])
if Bm != 0:
geopot_H = ((Tm * pow(Ps / pressure, Bm * self.R / self.G) - Tm) / Bm)
else:
geopot_H = log10(Ps / pressure) * (self.R * Tm) / self.G * 0.434292
return self.geopotential_to_geometric(self, Hb + geopot_H)
def get_pressure(self, altitude):
"""
Returns pressure in pascals for the given geometric altitude
:param altitude: float altitude - geometric altitude in meters
:return: float - pressure in pascals
"""
geopot_H = self.geometric_to_geopotential(self, altitude)
if (geopot_H < self.MIN_GP_ALT) or (geopot_H >= self.MAX_GP_ALT):
return None
for idx in range(0, self.LUT_RECORDS - 1):
if ((geopot_H >= self.ag_table[idx][self.tab['altitude']]) and
(geopot_H < self.ag_table[idx + 1][self.tab['altitude']])):
break
Ps = float(self.ag_table[idx][self.tab['pressure']])
Bm = float(self.ag_table[idx][self.tab['temp gradient']])
Tm = float(self.ag_table[idx][self.tab['temperature']])
Hb = float(self.ag_table[idx][self.tab['altitude']])
if Bm != 0:
lP = log10(Ps) - (self.G / (Bm * self.R)) * log10((Tm + Bm * (geopot_H - Hb)) / Tm)
else:
lP = log10(Ps) - 0.434294 * (self.G * (geopot_H - Hb)) / (self.R * Tm)
return pow(10, lP)
def get_temperature(self, altitude):
"""
Returns temperature value in K for the given geometric altitude.
:param altitude: float altitude - geometric altitude in meters
:return: float - temperature in degrees K
"""
geopot_H = self.geometric_to_geopotential(self, altitude)
if (geopot_H < self.MIN_GP_ALT) or (geopot_H >= self.MAX_GP_ALT):
return None
for idx in range(0, self.LUT_RECORDS - 1):
if ((geopot_H >= self.ag_table[idx][self.tab['altitude']]) and
(geopot_H < self.ag_table[idx + 1][self.tab['altitude']])):
break
Bm = float(self.ag_table[idx][self.tab['temp gradient']])
Tm = float(self.ag_table[idx][self.tab['temperature']])
Hb = float(self.ag_table[idx][self.tab['altitude']])
temp = Tm
if Bm != 0:
temp += Bm * (geopot_H - Hb)
return temp
|
"""SCons.Tool.sunf95
Tool-specific initialization for sunf95, the Sun Studio F95 compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, c | opy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# th | e following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/sunf95.py 4720 2010/03/24 03:14:11 jars"
import SCons.Util
from FortranCommon import add_all_to_env
compilers = ['sunf95', 'f95']
def generate(env):
"""Add Builders and construction variables for sunf95 to an
Environment."""
add_all_to_env(env)
fcomp = env.Detect(compilers) or 'f95'
env['FORTRAN'] = fcomp
env['F95'] = fcomp
env['SHFORTRAN'] = '$FORTRAN'
env['SHF95'] = '$F95'
env['SHFORTRANFLAGS'] = SCons.Util.CLVar('$FORTRANFLAGS -KPIC')
env['SHF95FLAGS'] = SCons.Util.CLVar('$F95FLAGS -KPIC')
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
"""
Django settings for untitled project.
Generated by 'django-admin startproject' using Django 1.10.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^8wdj$q^6mp6g7z1s7nwip_ffhof4r6g)nl88dy0-u(r)(o=_n'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
DEBUG_SFW = False
# Use to blank most frontend NSFW-stuff for developing in public spaces
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_q',
'kinksorter_app',
]
USE_ASYNC = False
Q_CLUSTER = {
'name': 'kinksorter-cluster',
'recycle': 10, # big tasks -> often recycle workers
'save_limit': 10, # try to minimize database_size
'catch_up': False, # try to minimize database_size
'orm': 'default',
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
| ]
ROOT_URLCONF = 'kinksorter.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages | .context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'kinksorter.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'kinksorter.db'),
'OPTIONS': {'timeout': 20000},
},
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '[%(asctime)s] %(message)s',
'datefmt': '%H:%M:%S',
}
},
'filters': {
'ignore_get_current_task': {
'()': 'django.utils.log.CallbackFilter',
'callback': lambda r: not (len(r.args) > 2 and r.args[1] == '200' and r.args[0] == 'GET /get_current_task HTTP/1.1'),
}
},
'handlers': {
'console': {
'filters': ['ignore_get_current_task'],
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
},
'loggers': {
'django.server': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
},
},
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'kinksorter', 'static')]
STATIC_ROOT = os.path.join(BASE_DIR, 'kinksorter', 'static_collected')
# User content (to get the videos via static/, as it needs to be under the root
STATIC_LINKED_DIRECTORIES = os.path.join(STATIC_URL, 'directory_links')
DIRECTORY_LINKS = os.path.join(STATIC_ROOT, 'directory_links')
os.makedirs(DIRECTORY_LINKS, exist_ok=True)
|
[2].eventId, False)
pipe: rd.PipeState = self.controller.GetPipelineState()
ssbo: rd.BoundResource = pipe.GetReadWriteResources(rd.ShaderStage.Compute)[0].resources[0]
data: bytes = self.controller.GetBufferData(ssbo.resourceId, 0, 0)
rdtest.log.print("Got {} bytes of uints".format(len(data)))
uints = [struct.unpack_from('=4L', data, offs) for offs in range(0, len(data), 16)]
for x in range(0, 6): # 3 groups of 2 threads each
for y in range(0, 8): # 3 groups of 2 threads each
for z in range(0, 5): # 5 groups of 1 thread each
idx = 100 + z*8*6 + y*6 + x
if not rdtest.value_compare(uints[idx], [x, y, z, 12345]):
raise rdtest.TestFailureException(
'expected thread index data @ {},{},{}: {} is not as expected: {}'
.format(x, y, z, uints[idx], [x, y, z, 12345]))
rdtest.log.success("Dispatched buffer contents are as expected for {}".format(level))
empties = self.find_draw("{}: Empty draws".format(level))
self.check(empties and len(empties.children) == 2)
draw: rd.DrawcallDescription
for draw in empties.children:
self.check(draw.numIndices == 0)
self.check(draw.numInstances == 0)
self.controller.SetFrameEvent(draw.eventId, False)
# Check that we have empty PostVS
postvs_data = self.get_postvs(rd.MeshDataStage.VSOut, 0, 1)
self.check(len(postvs_data) == 0)
self.check_overlay(draw.eventId, out, tex, save_data)
| rdtest.log.success("{} empty draws are empty".format(level))
indirects = self.find_draw("{}: Indirect draws".format(level))
self.check('vkCmdDrawIndirect' in indirects.children[0].name)
self.check('vkCmdDrawIndexedIndirect' in indirects.children[1].name)
self.check(len(indirects.children[1].children) == 2)
rdtest.log.success("Correct number of {} indirect draws".form | at(level))
# vkCmdDrawIndirect(...)
draw = indirects.children[0]
self.check(draw.numIndices == 3)
self.check(draw.numInstances == 2)
self.controller.SetFrameEvent(draw.eventId, False)
# Check that we have PostVS as expected
postvs_data = self.get_postvs(rd.MeshDataStage.VSOut)
postvs_ref = {
0: {'vtx': 0, 'idx': 0, 'gl_PerVertex.gl_Position': [-0.8, -0.5, 0.0, 1.0]},
1: {'vtx': 1, 'idx': 1, 'gl_PerVertex.gl_Position': [-0.7, -0.8, 0.0, 1.0]},
2: {'vtx': 2, 'idx': 2, 'gl_PerVertex.gl_Position': [-0.6, -0.5, 0.0, 1.0]},
}
self.check_mesh_data(postvs_ref, postvs_data)
self.check(len(postvs_data) == len(postvs_ref)) # We shouldn't have any extra vertices
self.check_overlay(draw.eventId, out, tex, save_data)
rdtest.log.success("{} {} is as expected".format(level, draw.name))
# vkCmdDrawIndexedIndirect[0](...)
draw = indirects.children[1].children[0]
self.check(draw.numIndices == 3)
self.check(draw.numInstances == 3)
self.controller.SetFrameEvent(draw.eventId, False)
# Check that we have PostVS as expected
postvs_data = self.get_postvs(rd.MeshDataStage.VSOut)
# These indices are the *output* indices, which have been rebased/remapped, so are not the same as the input
# indices
postvs_ref = {
0: {'vtx': 0, 'idx': 0, 'gl_PerVertex.gl_Position': [-0.6, -0.5, 0.0, 1.0]},
1: {'vtx': 1, 'idx': 1, 'gl_PerVertex.gl_Position': [-0.5, -0.8, 0.0, 1.0]},
2: {'vtx': 2, 'idx': 2, 'gl_PerVertex.gl_Position': [-0.4, -0.5, 0.0, 1.0]},
}
self.check_mesh_data(postvs_ref, postvs_data)
self.check(len(postvs_data) == len(postvs_ref)) # We shouldn't have any extra vertices
self.check_overlay(draw.eventId, out, tex, save_data)
rdtest.log.success("{} {} is as expected".format(level, draw.name))
# vkCmdDrawIndexedIndirect[1](...)
draw = indirects.children[1].children[1]
self.check(draw.numIndices == 6)
self.check(draw.numInstances == 2)
self.controller.SetFrameEvent(draw.eventId, False)
# Check that we have PostVS as expected
postvs_data = self.get_postvs(rd.MeshDataStage.VSOut)
postvs_ref = {
0: {'vtx': 0, 'idx': 0, 'gl_PerVertex.gl_Position': [-0.4, -0.5, 0.0, 1.0]},
1: {'vtx': 1, 'idx': 1, 'gl_PerVertex.gl_Position': [-0.3, -0.8, 0.0, 1.0]},
2: {'vtx': 2, 'idx': 2, 'gl_PerVertex.gl_Position': [-0.2, -0.8, 0.0, 1.0]},
3: {'vtx': 3, 'idx': 3, 'gl_PerVertex.gl_Position': [-0.1, -0.5, 0.0, 1.0]},
4: {'vtx': 4, 'idx': 4, 'gl_PerVertex.gl_Position': [ 0.0, -0.8, 0.0, 1.0]},
5: {'vtx': 5, 'idx': 5, 'gl_PerVertex.gl_Position': [ 0.1, -0.8, 0.0, 1.0]},
}
self.check_mesh_data(postvs_ref, postvs_data)
self.check(len(postvs_data) == len(postvs_ref)) # We shouldn't have any extra vertices
self.check_overlay(draw.eventId, out, tex, save_data)
rdtest.log.success("{} {} is as expected".format(level, draw.name))
indirect_count_root = self.find_draw("{}: KHR_draw_indirect_count".format(level))
if indirect_count_root is not None:
self.check(indirect_count_root.children[0].name == '{}: Empty count draws'.format(level))
self.check(indirect_count_root.children[1].name == '{}: Indirect count draws'.format(level))
empties = indirect_count_root.children[0]
self.check(empties and len(empties.children) == 2)
draw: rd.DrawcallDescription
for draw in empties.children:
self.check(draw.numIndices == 0)
self.check(draw.numInstances == 0)
self.controller.SetFrameEvent(draw.eventId, False)
# Check that we have empty PostVS
postvs_data = self.get_postvs(rd.MeshDataStage.VSOut, 0, 1)
self.check(len(postvs_data) == 0)
self.check_overlay(draw.eventId, out, tex, save_data)
# vkCmdDrawIndirectCountKHR
draw_indirect = indirect_count_root.children[1].children[0]
self.check(draw_indirect and len(draw_indirect.children) == 1)
# vkCmdDrawIndirectCountKHR[0]
draw = draw_indirect.children[0]
self.check(draw.numIndices == 3)
self.check(draw.numInstances == 4)
self.controller.SetFrameEvent(draw.eventId, False)
# Check that we have PostVS as expected
postvs_data = self.get_postvs(rd.MeshDataStage.VSOut)
# These indices are the *output* indices, which have been rebased/remapped, so are not the same as the input
# indices
postvs_ref = {
0: {'vtx': 0, 'idx': 0, 'gl_PerVertex.gl_Position': [-0.8, 0.5, 0.0, 1.0]},
1: {'vtx': 1, 'idx': 1, 'gl_PerVertex.gl_Position': [-0.7, 0.2, 0.0, 1.0]},
2: {'vtx': 2, 'idx': 2, 'gl_PerVertex.gl_Position': [-0.6, 0.5, 0.0, 1.0]},
}
self.check_mesh_data(postvs_ref, postvs_data)
self.check(len(postvs_data) == len(postvs_ref)) # We shouldn't have any extra vertices
self.check_overlay(draw.eventId, out, tex, save_data)
rdtest.log.success("{} {} is as expected".format(level, draw.name))
# vkCmdDrawIndexedIndirectCountKHR
draw_indirect = indirect_count_root.children[1].child |
# This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Provides version information of important supporting modules.
"""
from __future__ import unicode_literals
import functools
import appinfo
def _catch_unknown(f):
"""Decorate a function, returning "unknown" on import/attribute error."""
@functools.wraps(f)
def wrapper():
try:
return f()
except (ImportError, AttributeError):
return "unknown"
return wrapper
@_catch_unknown
def app_version():
import appinfo
return appinfo.version
@_catch_unknown
def sip_version():
import sip
return sip.SIP_VERSION_STR
@_catch_unknown
def pyqt_version():
import PyQt4.QtCore
return PyQt4.QtCore.PYQT_VERSION_STR
@_catch_unknown
def qt_version():
import PyQt4.QtCore
return PyQt4.QtCore.QT_VERSION_STR
@_catch_unknown
def python_version():
import platform
return p | latform.python_version()
@_catch_unknown
def operating_system():
import platform
return platform.platform()
@_catch_unknown
def ly_version():
import ly.pkginfo
return ly.pkginfo.version
@_catch_unknown
def poppler_version():
import popplerqt4
return '.'.join(format(n) for n in popplerqt4.poppler_version())
@_catch_unknown
def python_poppler_version():
import popplerqt4
return '.'.join(f | ormat(n) for n in popplerqt4.version())
def version_info_named():
"""Yield all the relevant names and their version string."""
yield appinfo.appname, appinfo.version
yield "Python", python_version()
yield "python-ly", ly_version()
yield "Qt", qt_version()
yield "PyQt", pyqt_version()
yield "sip", sip_version()
yield "poppler", poppler_version()
yield "python-poppler-qt", python_poppler_version()
yield "OS", operating_system()
def version_info_string(separator='\n'):
"""Return all version names as a string, joint with separator."""
return separator.join(map("{0[0]}: {0[1]}".format, version_info_named()))
|
from .bhtsn | e import bh_tsne
__all__ = ["_bhtsne", "b | h_tsne"]
|
##!/usr/bin/env python
from array import *
import os
import struct
stats = os.stat('freqtest.dat')
file_size = stats.st_size
#print('file size ', +file_size, ' bytes')
entries = file_size/4
#print('file has ', +entries, +' entries')
freq_array = array('f', []) #create an array to hold the entries
for a in range(0, file_size, 4): #read the entries sequentially from the file
with open('freqtest.dat', 'rb') as f:
f.seek(a)
bytes = f.read(4)
freq = struct.unpack('<f', bytes)
b = (a/4) +1
# frq(b) = str(freq[0])
print('Frequency: ' + str((a/4)+1) + ' ' + str(freq[0])) #print the entries as they are read
freq_array.append(freq[0]) #and add them to the array
f.close()
x = raw_input('continue? (y to modify freqs in the list, n to go to adding freqs)')
while x != "n":
# print(x)
fm = int(input('freq to modify: ')) #we want to modify a particular frequency
current_freq = freq_array[fm-1]
print('current freq is: ', + current_freq) #we want to replace it with a new value
new_freq = input('new frequency: ')
freq_array[fm-1] = new_freq
for indx in range(len(freq_array)): #print the modified list
print(indx+1, +freq_array[indx])
x = raw_input("do you want to change another frequency? ")
x = raw_input('continue? (y to add freqs to the list, n to save the li | st and exit)') #second part... we may want to add new frequencies to the list
while x != "n": #similar to the modify loop
new_freq = input('new frequency: ')
freq_array.append(new_freq) #except we append the frequency at the end
for indx in range(len(freq_array)): #and as before print t | he modified list
print(indx+1, +freq_array[indx])
x = raw_input("do you want to add another frequency? ")
print freq_array #this is here as a troubleshooting tool
f = open('freqtest.dat', 'wb') #everything done? dump the array to the file (overwrites
f.write(freq_array) #the old one)
f.close()
|
"""
====================================
Probabilistic Tracking on ODF fields
====================================
In this example we perform probabilistic fiber tracking on fields of ODF peaks.
This example requires importing example `reconst_csa.py`.
"""
import numpy as np
from reconst_csa import *
from dipy.reconst.interpolate import NearestNeighborInterpolator
from dipy.tracking.markov import (BoundaryStepper,
FixedSizeStepper,
ProbabilisticOdfWeightedTracker)
from dipy.tracking.utils import seeds_from_mask
stepper = FixedSizeStepper(1)
"""
Read the voxel size from the image header:
"""
zooms = img.get_header().get_zooms()[:3]
"""
Randomly select some seed points from the mask:
"""
seeds = seeds_from_mask(mask, [1, 1, 1], zooms)
seeds = seeds[:2000]
interpolator = NearestNeighborInterpolator(data, zooms)
pwt = ProbabilisticOdfWeightedTracker(csamodel, interpolator, mask,
stepper, 20, seeds, sphere)
csa_streamlines = list(pwt)
"""
Now that we have our streamlines in | memory we can save the results to disk.
For this purpose we can use the TrackVis format (``*.trk``). First, we need to
create a header.
"""
import nibabel as nib
hdr = nib.trackvis.empty_header()
hdr['voxel_size'] = (2., | 2., 2.)
hdr['voxel_order'] = 'LAS'
hdr['dim'] = csapeaks.gfa.shape[:3]
"""
Save the streamlines.
"""
csa_streamlines_trk = ((sl, None, None) for sl in csa_streamlines)
csa_sl_fname = 'csa_prob_streamline.trk'
nib.trackvis.write(csa_sl_fname, csa_streamlines_trk, hdr)
"""
Visualize the streamlines with fvtk (python vtk is required).
"""
from dipy.viz import fvtk
from dipy.viz.colormap import line_colors
r = fvtk.ren()
fvtk.add(r, fvtk.line(csa_streamlines, line_colors(csa_streamlines)))
print('Saving illustration as csa_prob_tracks.png')
fvtk.record(r, n_frames=1, out_path='csa_prob_tracks.png', size=(600, 600))
"""
.. figure:: csa_prob_tracks.png
:align: center
**Probabilistic streamlines applied on an ODF field modulated by GFA**.
"""
|
# Copyri | ght 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/ | LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
from . import gradle
from . import ci_command
@click.argument('task', required=True, nargs=-1)
@click.option(
'--gradle-opts',
default='',
help='GRADLE_OPTS passed to the gradle invocation.')
@ci_command('gradle')
def gradle_command(task, gradle_opts):
"""Runs the specified gradle commands."""
gradle.run(*task, gradle_opts=gradle_opts)
|
# Creates graph of restaurant reviews for yelp or trip advisor.
# writes graph to gml file for use in gephi
#
# Rob Churchill
#
# NOTE: I learned to do this in my data science class last semester. If you are looking for plagiarism things, you will almost certainly find similar clustering code.
# I did not copy it, I learned this specific way of doing it, and referred to my previous assignments when doing it for this project. If you would like to see my previous
# assignments, I will provide you them on request. Otherwise, I don't think that it's worth adding a lot of extra files for the sole sake of showing that I haven't plagiarized.
import networkx as nx
import numpy as np
import scipy as sp
import csv
folder = 'data/'
file_names = ['yelp_data.csv', 'trip_advisor_data.csv']
# EDIT this line to change which website you make the graph for. True=yelp, False=TripAdvisor
yelp = False
yelp_dataset = list()
file_name = file_names[1]
if yelp == True:
file_name = file_names[0]
# reads in appropriate file given yelp boolean variable
with open(folder+file_name, 'r') as f:
reader = csv.reader(f)
for line in reader:
yelp_dataset.append(line)
# removes headers
yelp_dataset.remove(yelp_dataset[0])
print len(yelp_dataset)
# create the graph
G = nx.Graph()
for y in yelp_dataset:
# add the nodes if they don't already exist
G.add_node(y[4], type='resta | urant')
G.add_node(y[13], type='reviewer')
# add the edge between the reviewer and restaurant, weight is in different position in each file.
if yelp == True:
G.add_edge(y[13], y[4], weight=float(y[2]))
else:
G.add_edge(y[13], y[4], weight=float(y[1]))
print nx.number_of_nodes(G)
print nx.number_of_edges(G)
# write gr | aph to gml file.
nx.write_gml(G, 'ta_graph.gml') |
# -*- coding: utf-8 -*-
"""Django URLconf file for ulm"""
from __future__ import unicode_literals
from django.conf import settings
try:
# pylint: di | sable=E0611
from django.conf.urls import patterns, include, url
except (ImportError): # Django 1.3 compatibility
from django.conf.urls.defaults import patterns, include, url
from django.conf.urls.static import static
from ulm.views import laptop, batteries, wifi
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = \
pa | tterns('',
url(r'^$', laptop),
url(r'^batter(?:y|ies)/$', batteries),
url(r'^(?:wifi|wlan)/$', wifi),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
#!/usr/bin/env python
import os
from os.path import join as pjoin
import sys
import subprocess
def get_output(cmd):
s = subprocess.Popen(cmd, stdout=subprocess.PIPE)
out = s.communicate()[0]
s.wait()
return out.strip()
# you could use os.path.walk to calculate this... or you could use du(1).
def duhack(path):
cmd = ['du', '-k', path]
out = get_output(cmd).split()
return int(out[0]) * 1024
BASEPATH=sys.argv[1]
ROOT="/x1/mail-archives/mod_mbox"
HOSTNAME="http://mail-archives.apache.org/mod_mbox/"
PARITION_SIZE=100 * 1024 * 1024
tlps={}
for files in os.listdir(ROOT):
path = files
tlp = path[0:path.find('-')]
list = path[path.find('-')+1:]
# print "%s - %s %s" % (tlp, list, path)
if not os.access("%s/%s/listinfo.db" % (ROOT, path), os.F_OK):
continue
if tlp == "www":
tlp = "asf"
if not tlps.has_key(tlp):
tlps[tlp] = {}
tlps[tlp][list] = [path, duhack(pjoin(ROOT, path))]
keys = tlps.keys()
keys.sort()
count = 0
fcount = 0
def write_sitemap_header(fp):
fp.write("""<?xml version="1.0" encoding="UTF-8"?>\n<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">\n""")
def write_sitemap_footer(fp):
fp.write("</sitemapindex>\n")
fp = open(BASEPATH % (fcount), 'w')
write_sitemap_header(fp)
for tlp in keys:
klist = tlps[tlp].keys()
klist.sort()
for list in klist:
name = tlps[tlp][list][0]
size = tlps[tlp][list][1]
if size < PARITION_SIZE:
count += 1
fp.write("<sitemap><loc>%s%s/?for | mat=sitemap</loc></sitemap>\n" % (HOSTNAME, name))
else:
part = (size / PARITION_SIZE) + 1
for i in range(0, part):
count += 1
fp.write("<sitemap><loc>%s%s/?format=sitemap&pmax=%d&part=%d</loc></sitemap>\n" % (HOSTNAME, name, part, i))
if count > 500:
write_sitemap_footer(fp)
fp.close()
count = 0
| fcount += 1
fp = open(BASEPATH % (fcount), 'w')
write_sitemap_header(fp)
write_sitemap_footer(fp)
|
import os
import logging
import MySQLdb
import time
import sys
import Queue
import threading
import json
createUserSQL = "INSERT IGNORE INTO users (name) VALUES (%s);"
getUserByUsernameSQL = "SELECT * FROM users WHERE name=%s;"
getAuthorByNameSQL = "SELECT * FROM authors WHERE name=%s;"
createAuthorSQL = "INSERT IGNORE INTO authors (userid, name) VALUES (%s, %s);"
createBookSQL = "INSERT IGNORE INTO books (name, author, price) VALUES (%s, %s, %s);"
firstNames = sorted(["Kenia ", "Randal", "Shawnna ", "Rey ", "Cordia", "Kendal",
"Alina", "Dianna", "Misti", "Chelsie", "Gracia", "Teena", "Ronny", "Willy",
"Betsy", "Kenisha", "Elsy", "Cheryle", "Lurline ", "Karina", "Luba", "Vita",
"Lu", "Frances", "Lavenia", "Nereida", "Zetta", "Melony", "Eloise",
"Nickolas", "Ericka", "Cecilia", "Jenni", "Sofia", "Nobuko", "Trudy",
"Petronila", "Donnette", "Santos", "Viola", "Jessika", "Chere", "Azalee",
"Meggan", "Floyd", "Liberty", "Tabitha", "Juliana", "Pamila", "Blondell"])
lastNames = sorted(["Watterson", "Lawler", "Walt", "Birch", "Bryd", "Speight",
"Monroy", "Milledge", "Davilla", "Behrendt", "Mustain", "Blythe", "Gandhi",
"Brady", "Gooden", "Jellison", "Hager", "Selders", "Seaton", "Wind",
"Jelinek", "Reiser", "Lacour", "Maginnis", "Baggs", "Crossno", "Shadley",
"Bramer", "Mento", "Manigault", "Jacobi", "Deckman", "Spikes", "Duncan",
"Ackman", "Hornick", "Bourbeau", "Riehl", "Sena", "Rolon", "Pereira",
"Mikula", "Luk", "Albaugh", "Akin", "Bradburn", "Houlihan", "Frisina",
"Funnell", "Keister"])
def connect():
return MySQLdb.connect(host="mysql", # your host, usually localhost
user="root", # your username
passwd="password", # your password
db="bench") # name of the data base
createUserThreads = []
def createUsers(name):
logging.debug("Creating... "+name)
sys.stdout.flush()
db = connect();
cur = db.cursor()
for j in lastNames:
for k in range(0, 10):
myname = name + " " + j + "(" + str(k) + ")"
sys.stdout.flush()
cur.execute(createUserSQL, (myname,))
cur.execute(getUserByUsernameSQL, (myname, ))
row = cur.fetchone()
if not row == None:
cur.execute(createAuthorSQL, [str(row[0]), ("Author "+myname)])
else:
print "Could not create ", myname
db.commit()
db.close()
logging.debug("Created! "+name)
sys.stdout.flush()
createBookThreads = [ | ]
def createBook(username):
logging.debug("Creating books... "+username)
sys.stdout.flush()
db = connect()
cur = db.cursor()
for j in lastNames:
for k in range(0, 3):
myname = "Author " + username + " " + j + "(" + str(k) + ")"
cur.execute(getAuthorByNameSQL, (myname, ))
| row = cur.fetchone()
if not row == None:
for i in range(0,2):
bookname = myname+"'s book "+str(i)
cur.execute(createBookSQL, [bookname, str(row[0]), i * 5])
else:
print "Could not find ", myname
db.commit()
db.close()
logging.debug("Created books! "+username)
sys.stdout.flush()
def initilizeUsers():
logging.debug("Initilizing users...")
start = time.time();
for i in firstNames:
name = i + " " + hostname
t = threading.Thread(target=createUsers, args = (name, ))
t.daemon = True
createUserThreads.append(t)
# Start all the threads
for x in createUserThreads:
x.start()
# Wait for them to complete
for x in createUserThreads:
x.join()
# Return the time it took to run
logging.debug("Creating users took: "+str(time.time() - start))
return time.time() - start;
def initilizeBooks():
logging.debug("Initilizing books...")
start = time.time();
for i in firstNames:
name = i + " " + hostname
t = threading.Thread(target=createBook, args = (name, ))
t.daemon = True
createBookThreads.append(t)
# Start all the threads
for x in createBookThreads:
x.start()
# Wait for them to complete
for x in createBookThreads:
x.join()
# Return the time it took to run
logging.debug("Creating books took: "+str(time.time() - start))
return time.time() - start;
def main():
logging.debug("Starting...")
db = connect();
intUserTime = initilizeUsers();
intBookTime = initilizeBooks();
# cur.execute("SELECT * FROM users")
# # print all the first cell of all the rows
# for row in cur.fetchall():
# logging.debug(row[1])
#
# cur.execute("SELECT * FROM authors")
# # print all the first cell of all the rows
# for row in cur.fetchall():
# logging.debug(row[2])
# db.close()
logging.info("Starting result save.")
with open('/tmp/bench_results/result.json', 'w') as fp:
results = {
"hostname": hostname,
"results": {
"Create": {
"Users": intUserTime,
"Books": intBookTime
}
}
}
logging.info(json.dumps(results))
json.dump(results, fp)
if __name__ == '__main__':
hostname = os.uname()[1]
logging.basicConfig(format=hostname + ' %(asctime)s %(levelname)s: %(message)s', level=logging.DEBUG)
main()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from colour import Color
from django.contrib.auth.decorators import user_passes_test
from django.urls import reverse
from django.db.models import Sum
from django.http import HttpResponse, HttpResponseRedirect, StreamingHttpResponse
from django.shortcuts import get_object_or_404
from django.template import loader
from .models import Party, City, Senator, ContactList
from .forms import ChooseForm
from .getPopulations import getCityStatePopulations
import viewsenators.initialization as initialization
# This seems to be the most that Facebook will allow, though it varies over time
NUM_CITIES_PER_QUERY = 50
def index(request):
def colorToD3(color):
return "rgb(%d,%d,%d)" % (color.red*255, color.green*255, color.blue*255)
def substituteDesc(moc, desc):
if "{{number}}" not in desc:
desc += "\n\n%s's phone number is {{number}}" % moc.lastName
if moc.phoneNumber:
text = moc.phoneNumber
else:
text ="(unknown number)"
desc = desc.replace("{{name}}", moc.firstName + " " + moc.lastName)
return desc.replace("{{number}}", text)
template = loader.get_template('halcyonic/index.html')
if 'list' in request.GET:
clId = str(request.GET['list'])
contactList = get_object_or_404(ContactList, slug=clId)
else:
try:
contactList = ContactList.objects.get(slug='keep-children-with-their-families')
except ContactList.DoesNotExist:
contactList = ContactList.objects.get(title="Republican")
stateColor = colorToD3(Color(rgb=(125/255.0, 0/255.0, 16/255.0)))
senatorToURLsPopsAndDesc = {}
for senator in contactList.senators.all():
senatorToURLsPopsAndDesc[senator] = _stateToFbCode(senator.state)
senatorToURLsPopsAndDesc[senator]['callScript'] = substituteDesc(senator, contactList.description)
sortedDict = sorted(senatorToURLsPopsAndDesc.items(),
key = lambda x: x[0].state.name)
context = {
"stateColor": stateColor, # TODO eventually have meaningful colors?
"title": contactList.title,
"senatorToURLsPopsAndDesc": sortedDict
}
return HttpRespon | se(template.render(context, request))
def combineContactList(request):
template = loader.get_template('viewsenators/combine.html')
context = {'contactLists': ContactList.objects.all()}
return HttpResponse(template.render(context, request))
def createContactList(request):
# | if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = ChooseForm(request.POST)
# check whether it's valid:
if form.is_valid():
data = form.cleaned_data
title = data['title']
description = data['description']
senators = data['senators']
public = data['public']
cl = _makeContactList(title, description, senators, public)
return HttpResponseRedirect(reverse('index')+'?list=' + cl.slug)
# if a GET (or any other method) we'll create a blank form
else:
form = ChooseForm()
so = Senator.objects
ids = {}
for party in Party.objects.all():
idList = ["input[value=\""+str(s.id)+"\"]"
for s in so.filter(party=party)]
idsSet = set(idList)
idsStr = ', '.join(idsSet)
ids[party.name] = idsStr
template = loader.get_template('viewsenators/choose.html')
context = {'form': form,
'ids': ids}
return HttpResponse(template.render(context, request))
def debugWriteAnything(text):
response = HttpResponse()
response.write(text)
return response
def _stateToFbCode(state):
""" :return: the URL and the percentage of the population of the
desired states which will be found via that URL """
# While there are many better URL constructions that ideally start with
# your friends, rather than start with all FB users in each city then
# intersect that with your friends list, this is the only way I could get it
# to work.
# In particular, facebook seems to limit the number of unions to six,
# whereas the number of intersections can be ten times that.
setOfCities = City.objects.filter(state=state).order_by('-population')[:NUM_CITIES_PER_QUERY]
url = "https://www.facebook.com/search/"
for city in setOfCities:
url += city.facebookId + "/residents/present/"
url += "union/me/friends/intersect/"
# % of population in this search
cityPop = setOfCities.aggregate(Sum('population'))['population__sum']
if cityPop is None: cityPop = 0 # TODO hack if a state has no cities
statePop = state.population
percentPopIncludedInURL = float(cityPop) / float(statePop)
percentPopIncludedInURL = int(100*percentPopIncludedInURL+0.5)
return {'url': url,
'percentPopIncludedInURL': percentPopIncludedInURL}
def _makeContactList(title, description, senatorList, public):
cl = ContactList.objects.create(
title = title,
description = description,
public = public)
cl.senators.set(senatorList)
cl.save()
return cl
@user_passes_test(lambda u: u.is_superuser)
def populateSenators(request):
def _createInitialLists():
if ContactList.objects.count() != 0:
return
assert Senator.objects.count() == 100
for party in Party.objects.all():
title = party.name
description = "Call {{name}} at {{number}}"
senators = Senator.objects.filter(party=party)
_makeContactList(title, description, senators, public=True)
initialization.populateAllData()
_createInitialLists()
senators = Senator.objects.all()
def s2t(s): return "%s: %s, %s" % (s.state.abbrev, s.firstName, s.lastName)
senText = '<br>'.join(sorted([s2t(s) for s in senators]))
return debugWriteAnything("The list of senators: <br>" + senText)
@user_passes_test(lambda u: u.is_superuser)
def clearDataForNewCongress(request):
initialization.clearDataForNewCongress()
return populateSenators(request)
@user_passes_test(lambda u: u.is_superuser)
def updateCitiesAndStatesWithLatestData(request):
# This can take more than 30 seconds, so we need a streaming response
# for Heroku to not shut it down
# This is only run once by the admin, so the decreased performance
# shouldn't matter.
def runner():
cityPopulations, statePopulations = getCityStatePopulations()
for x in initialization.updateCitiesWithCurrentData(cityPopulations):
yield x
yield initialization.addPopulationToStates(statePopulations)
return StreamingHttpResponse(runner())
|
#!/usr/bin/env python
import sys
import logging
import argparse
from gff3 import feature_lambda, feature_test_qual_value
from CPT_GFFParser import gffParse, gffWrite
from Bio.SeqFeature import FeatureLocation
log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def __get_features(child, interpro=False):
child_features = {}
for rec in gffParse(child):
log.info("Parsing %s", rec.id)
# Only top level
for feature in rec.features:
# Get the record id as parent_feature_id (since this is how it will be during remapping)
parent_feature_id = rec.id
# If it's an interpro specific gff3 file
if interpro:
# Then we ignore polypeptide features as they're useless
if feature.type == "polypeptide":
continue
try:
child_features[parent_feature_id].append(feature)
except KeyError:
child_features[parent_feature_id] = [feature]
# Keep a list of feature objects keyed by parent record id
return child_features
def __update_feature_location(feature, parent, protein2dna):
start = feature.location.start
end = feature.location.end
if protein2dna:
start *= 3
end *= 3
if parent.location.strand >= 0:
ns = parent.location.start + start
ne = parent.location.start + end
st = +1
else:
ns = parent.location.end - end
ne = parent.location.end - start
st = -1
# Don't let start/stops be less than zero.
#
# Instead, we'll replace with %3 to try and keep it in the same reading
# frame that it should be in.
if ns < 0:
ns %= 3
if ne < 0:
ne %= 3
feature.location = FeatureLocation(ns, ne, strand=st)
if hasattr(feature, "sub_features"):
for subfeature in feature.sub_features:
__update_feature_location(subfeature, parent, protein2dna)
def rebase(parent, child, interpro=False, protein2dna=False, map_by="ID"):
# get all of the features we will be re-mapping in a dictionary, keyed by parent feature ID
child_features = __get_features(child, interpro=interpro)
for rec in gffParse(parent):
replacement_features = []
# Horrifically slow I believe
for feature in feature_lambda(
rec.features,
# Filter features in the parent genome by those that are
# "interesting", i.e. have results in child_features array.
# Probably an unnecessary optimisation.
feature_test_qual_value,
{"qualifier": map_by, "attribute_list": child_features.keys()},
subfeatures=False,
):
# Features which will be re-mapped
to_remap = child_features[feature.id]
fixed_features = []
for x in to_remap:
# Then update the location of the actual feature
__update_feature_location(x, feature, protein2dna)
if interpro:
for y in ("status", "Target"):
try:
| del x.qualifiers[y]
except:
pass
fixed_features.append(x)
replacement_features.extend(fixed_features)
# We do this so we don't include the original set of features that we
# were rebasing against in our result.
rec.features = replacement_features
rec.annotations = {}
gffWrite([rec], sys.stdout)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
| description="rebase gff3 features against parent locations", epilog=""
)
parser.add_argument(
"parent", type=argparse.FileType("r"), help="Parent GFF3 annotations"
)
parser.add_argument(
"child",
type=argparse.FileType("r"),
help="Child GFF3 annotations to rebase against parent",
)
parser.add_argument(
"--interpro", action="store_true", help="Interpro specific modifications"
)
parser.add_argument(
"--protein2dna",
action="store_true",
help="Map protein translated results to original DNA data",
)
parser.add_argument("--map_by", help="Map by key", default="ID")
args = parser.parse_args()
rebase(**vars(args))
|
from __future__ import unicode_literals
import binascii
from pymacaroons import Caveat
from pymacaroons.utils import (
convert_to_bytes,
sign_first_party_caveat
)
from .base_first_party import (
BaseFirstPartyCaveatDelegate,
BaseFirstPartyCaveatVerifierDelegate
)
class FirstPartyCaveatDelegate(BaseFirstPartyCaveatDelegate):
def __init__(self, *args, **kwargs):
super(FirstPartyCaveatDelegate, self).__init__(*args, **kwargs)
def add_first_party_caveat(self, macaroon, predicate, **kwargs):
predicate = convert_to_bytes(predicate)
caveat = Caveat(caveat_id=convert_to_bytes(predicate))
macaroon.caveats.append(caveat)
encode_key = binascii.unhexlify(macaroon.signature_bytes)
macaroon.signature = sig | n_first_party_caveat(encode_key, predicate)
return macaroon
class FirstPartyCaveatVerifierDelegate(BaseFirstPartyCaveatVerifierDelegate):
def __init__(self, *args, **kwargs):
super(FirstPartyCaveatVerifierDelegate, self).__init__(*args, **kwargs)
def v | erify_first_party_caveat(self, verifier, caveat, signature):
predicate = caveat.caveat_id
caveat_met = sum(callback(predicate)
for callback in verifier.callbacks)
return caveat_met
def update_signature(self, signature, caveat):
return binascii.unhexlify(
sign_first_party_caveat(
signature,
caveat._caveat_id
)
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#HW3 for EECS 598 Motion Planning
import time
import openravepy
import userdefined as us
import kdtree
import transformationFunction as tf
from random import randrange
#### YOUR IMPORTS GO HERE ####
handles = [];
#### END OF YOUR IMPORTS ####
if not __openravepy_build_doc__:
from openravepy import *
from numpy import *
def waitrobot(robot):
"""busy wait for robot completion"""
while not robot.GetController().IsDone():
time.sleep(0.01)
def tuckarms(env,robot):
with env:
jointnames = ['l_shoulder_lift_joint','l_elbow_flex_joint','l_wrist_flex_joint','r_shoulder_lift_joint','r_elbow_flex_joint','r_wrist_flex_joint']
robot.SetActiveDOFs([robot.GetJoint(name).GetDOFIndex() for name in jointnames])
robot.SetActiveDOFValues([1.29023451,-2.32099996,-0.69800004,1.27843491,-2.32100002,-0.69799996]);
robot.GetController().SetDesired(robot.GetDOFValues());
waitrobot(robot)
def stringT | oFloatList(path):
path = path.split('\n')
for line in xrange(len(path)):
path[line] = path[line].split(',')
for i in xrange(len(path[line])):
path[line][i]=float(path[line][i])
return path
def drawPath(path,robot,color,size):
if type(path) is str: path = stringToFloatList(path)
| for i in path:
robot.SetActiveDOFValues(i)
handles.append(env.plot3(points=robot.GetTransform()[0:3,3],pointsize=size,colors=color,drawstyle=1))
if __name__ == "__main__":
env = Environment()
env.SetViewer('qtcoin')
collisionChecker = RaveCreateCollisionChecker(env,'ode')
env.SetCollisionChecker(collisionChecker)
env.Reset()
# load a scene from ProjectRoom environment XML file
env.Load('env/bitreequad.env.xml')
time.sleep(0.1)
# 1) get the 1st robot that is inside the loaded scene
# 2) assign it to the variable named 'robot'
robot = env.GetRobots()[0]
robot.SetActiveDOFs([],DOFAffine.X|DOFAffine.Y|DOFAffine.Z|DOFAffine.RotationQuat)
# print robot.GetActiveDOFValues()
# raw_input("Press enter to move robot...")
# qt = tf.quaternion_from_euler(0.5,0.5,0.75,'rzxz')
# startconfig = [4.0,-1.5 ,0.2] + list(qt)
# print startconfig
startconfig = [ 4.0,-1.5 ,0.2 ,0.0, 0.0, 0.0 ];
robot.SetActiveDOFValues(us.E2Q(startconfig));
# robot.GetController().SetDesired(robot.GetDOFValues());
# waitrobot(robot);
waitrobot(robot)
print "test update state"
# s1 = [1,1,1,1,0,0,0,0.2,0.2,0.2,0.1,0.1,-0.1]
avf = 1.85*9.8/4
u = [-0.5*avf,2*avf,-0.5*avf,3*avf]
ts = 0.02
t = range(0,100)
while 1:
s2 = [0,0,0,0,0,0,1,0,0,0,0,0,0]
for tt in t:
s2 = us.updateState(s2,u,ts)
x1 = array(s2[0:3])
v1 = array(s2[3:6])
Q1 = array(s2[6:10])
W1 = array(s2[10:13])
E1 = tf.euler_from_quaternion(Q1)
C = list(x1)+list(Q1)
robot.SetActiveDOFValues(C);
time.sleep(0.02)
# traj = RaveCreateTrajectory(env,'');
# config = robot.GetActiveConfigurationSpecification('linear');
# config.AddDeltaTimeGroup();
# traj.Init(config);
# # myPath = [ [point.x, point.y,point.theta,i*0.01] for i,point in enumerate(path) ];
# num = 0
# for pathNode in path:
# num += 1
# traj.Insert(num,pathNode,config,True)
# # for i ,wayPoint in enumerate(myPath):
# # traj.Insert(i,wayPoint,config,True);
# robot.GetController().SetPath(traj);
# # robot.GetController().SetPath(traj)
### END OF YOUR CODE ###
raw_input("Press enter to exit...")
|
import sys
import os
import struct
import binascii
from time import sleep
from ctypes import (CDLL, get_errno)
from ctypes.util import find_library
from socket import (socket, AF_BLUETOOTH, SOCK_RAW, BTPROTO_HCI, SOL_HCI, HCI_FILTER,)
os.system("hciconfig hci0 down")
os.system("hciconfig hci0 up")
if not os.geteuid() == 0:
sys.exit("script only works as root")
btlib = find_library("bluetooth")
if not btlib:
raise Exception(
"Can't find required bluetooth libraries"
" (need to install bluez)"
)
bluez = CDLL(btlib, use_errno=True)
dev_id = bluez.hci_get_route(None)
sock = socket(AF_BLUETOOTH, SOCK_RAW, BTPROTO_HCI)
sock.bind((dev_id,))
err = bluez.hci_le_set_scan_parameters(sock.fileno(), 0, 0x10, 0x10, 0, 0, 1000);
if err < 0:
raise Exception("Set scan parameters failed")
# occurs when scanning is still enabled from previous call
# allows LE advertising events
hci_filter = struct.pack(
"<IQH",
0x00000010,
0x4000000000000000,
0
)
sock.setsockopt(SOL_HCI, HCI_FILTER, hci_filter)
err = bluez.hci_le_set_scan_enable(
sock.fileno(),
1, # 1 - turn on; 0 - turn off
0, # 0-filtering disabled | , 1-filter out duplicates
1000 # timeout
)
if err < 0:
errnum = get_errno()
raise Exception("{} {}".format(
errno.errorcode[errnum],
os.strerror(errnum)
))
distanceAway = 1 # distance away from the estimote beacon in meter
with open("RSSI_data" + str(distanceAway) + ".csv","w") as out_file:
for x in range (1,100):
data = sock.recv(10 | 24)
RSSI = int(binascii.b2a_hex(data[-1]),16)-255
out_string = ""
out_string += str(RSSI)
out_string += "\n"
out_file.write(out_string)
sock.close()
sys.exit()
|
"""
Copyright 2014 Quentin Kaiser
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from nessusobject import NessusObject
class Scanner(NessusObject):
"""
A Nessus Scan Template instance.
Attributes:
_Google Python Style Guide:
http://google-styleguide.googlecode.com/svn/trunk/pyguide.html
"""
def __init__(self, server):
"""Constructor"""
super(Scanner, self).__init__(server)
self._id = None
self._uuid = None
self._name = None
self._type = None
self._status = None
self._scan_count = 0
self._engine_version = None
self._platform = None
self._loaded_plugin_set = None
self._registration_code = None
self._owner = None
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = int(value)
@property
def uuid(self):
return self._uuid
@uuid.setter
def uuid(self, value):
self._uuid = str(value)
@property
def name(self):
return self._name
@name.setter
def name(self, value):
| self._name = str(value | )
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = str(value)
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = str(value)
@property
def scan_count(self):
return self._scan_count
@scan_count.setter
def scan_count(self, value):
self._scan_count = int(value)
@property
def engine_version(self):
return self._engine_version
@engine_version.setter
def engine_version(self, value):
self._engine_version = str(value)
@property
def platform(self):
return self._platform
@platform.setter
def platform(self, value):
self._platform = str(value)
@property
def loaded_plugin_set(self):
return self._loaded_plugin_set
@loaded_plugin_set.setter
def loaded_plugin_set(self, value):
self._loaded_plugin_set = str(value)
@property
def registration_code(self):
return self._registration_code
@registration_code.setter
def registration_code(self, value):
self._registration_code = str(value)
@property
def owner(self):
return self._owner
@owner.setter
def owner(self, value):
self._owner = str(value) |
, arr)
def FetchBlobs(names):
"""Fetches a list of blobs from the workspace.
Inputs:
names: list of names of blobs - strings or BlobReferences
Returns:
list of fetched blobs
"""
return [FetchBlob(name) for name in names]
def FetchBlob(name):
"""Fetches a blob from the workspace.
Inputs:
name: the name of the blob - a string or a BlobReference
Returns:
Fetched blob (numpy array or string) if successful
"""
return C.fetch_blob(StringifyBlobName(name))
def ApplyTransform(transform_key, net):
"""Apply a Transform to a NetDef protobuf object, and returns the new
transformed NetDef.
Inputs:
transform_key: the name of the transform, as it is stored in the registry
net: a NetDef protobuf object
Returns:
Transformed NetDef protobuf object.
"""
transformed_net = caffe2_pb2.NetDef()
transformed_str = C.apply_transform(
str(transform_key).encode('utf-8'),
net.SerializeToString(),
)
transformed_net.ParseFromString(transformed_str)
return transformed_net
def ApplyTransformIfFaster(transform_key, net, init_net, **kwargs):
"""Apply a Transform to a NetDef protobuf object, and returns the new
transformed NetDef, only if it runs faster than the original.
The runs are performed on the current active workspace (gWorkspace).
You should initialize that workspace before making a call to this function.
Inputs:
transform_key: the name of the transform, as it is stored in the registry
net: a NetDef protobuf object
init_net: The net to initialize the workspace.
warmup_runs (optional):
Determines how many times the net is run before testing.
Will be 5 by default.
main_runs (optional):
Determines how many times the net is run during testing.
Will be 10 by default.
improvement_threshold (optional):
Determines the factor which the new net needs to be faster
in order to replace the old. Will be 1.01 by default.
Returns:
Either a Transformed NetDef protobuf object, or the original netdef.
"""
warmup_runs = kwargs['warmup_runs'] if 'warmup_runs' in kwargs else 5
main_runs = kwargs['main_runs'] if 'main_runs' in kwargs else 10
improvement_threshold = kwargs['improvement_threshold'] \
if 'improvement_threshold' in kwargs else 1.01
transformed_net = caffe2_pb2.NetDef()
transformed_str = C.apply_transform_if_faster(
str(transform_key).encode('utf-8'),
net.SerializeToString(),
init_net.SerializeToString(),
warmup_runs,
main_runs,
float(improvement_threshold),
)
transformed_net.ParseFromString(transformed_str)
return transformed_net
def GetNameScope():
"""Return the current namescope string. To be used to fetch blobs"""
return scope.CurrentNameScope()
class _BlobDict(object):
"""Provides python dict compatible way to do fetching and feeding"""
def __getitem__(self, key):
return FetchBlob(key)
def __setitem__(self, key, value):
return FeedBlob(key, value)
def __len__(self):
return len(C.blobs())
def __iter__(self):
return C.blobs().__iter__()
def __contains__(self, item):
return C.has_blob(item)
blobs = _BlobDict()
################################################################################
# Utilities for immediate mode
#
# Caffe2's immediate mode implements the following behavior: between the two
# function calls StartImmediate() and StopImmediate(), for any operator that is
# called through CreateOperator(), we will also run that operator in a workspace
# that is specific to the immediate mode. The user is explicitly expected to
# make sure that these ops have proper inputs and outputs, i.e. one should not
# run an op where an external input is not created or fed.
#
# Users can use FeedImmediate() and FetchImmediate() to interact with blobs
# in the immediate workspace.
#
# Once StopImmediate() is called, all contents in the immediate workspace is
# freed up so one can continue using normal runs.
#
# The immediate mode is solely for debugging purposes a | nd support will be very
# sparse.
################################################################################
_immediate_mode = False
_immediate_workspace_name = "_CAFFE2_IMMEDIATE | "
_immediate_root_folder = ''
def IsImmediate():
return _immediate_mode
@contextlib.contextmanager
def WorkspaceGuard(workspace_name):
current = CurrentWorkspace()
SwitchWorkspace(workspace_name, True)
yield
SwitchWorkspace(current)
def StartImmediate(i_know=False):
global _immediate_mode
global _immediate_root_folder
if IsImmediate():
# already in immediate mode. We will kill the previous one
# and start from fresh.
StopImmediate()
_immediate_mode = True
with WorkspaceGuard(_immediate_workspace_name):
_immediate_root_folder = tempfile.mkdtemp()
ResetWorkspace(_immediate_root_folder)
if i_know:
# if the user doesn't want to see the warning message, sure...
return
print("""
Enabling immediate mode in caffe2 python is an EXTREMELY EXPERIMENTAL
feature and may very easily go wrong. This is because Caffe2 uses a
declarative way of defining operators and models, which is essentially
not meant to run things in an interactive way. Read the following carefully
to make sure that you understand the caveats.
(1) You need to make sure that the sequences of operators you create are
actually runnable sequentially. For example, if you create an op that takes
an input X, somewhere earlier you should have already created X.
(2) Caffe2 immediate uses one single workspace, so if the set of operators
you run are intended to be under different workspaces, they will not run.
To create boundaries between such use cases, you can call FinishImmediate()
and StartImmediate() manually to flush out everything no longer needed.
(3) Underlying objects held by the immediate mode may interfere with your
normal run. For example, if there is a leveldb that you opened in immediate
mode and did not close, your main run will fail because leveldb does not
support double opening. Immediate mode may also occupy a lot of memory esp.
on GPUs. Call FinishImmediate() as soon as possible when you no longer
need it.
(4) Immediate is designed to be slow. Every immediate call implicitly
creates a temp operator object, runs it, and destroys the operator. This
slow-speed run is by design to discourage abuse. For most use cases other
than debugging, do NOT turn on immediate mode.
(5) If there is anything FATAL happening in the underlying C++ code, the
immediate mode will immediately (pun intended) cause the runtime to crash.
Thus you should use immediate mode with extra care. If you still would
like to, have fun [https://xkcd.com/149/].
""")
def StopImmediate():
"""Stops an immediate mode run."""
# Phew, that was a dangerous ride.
global _immediate_mode
global _immediate_root_folder
if not IsImmediate():
return
with WorkspaceGuard(_immediate_workspace_name):
ResetWorkspace()
shutil.rmtree(_immediate_root_folder)
_immediate_root_folder = ''
_immediate_mode = False
def ImmediateBlobs():
with WorkspaceGuard(_immediate_workspace_name):
return Blobs()
def RunOperatorImmediate(op):
with WorkspaceGuard(_immediate_workspace_name):
RunOperatorOnce(op)
def FetchImmediate(*args, **kwargs):
with WorkspaceGuard(_immediate_workspace_name):
return FetchBlob(*args, **kwargs)
def FeedImmediate(*args, **kwargs):
with WorkspaceGuard(_immediate_workspace_name):
return FeedBlob(*args, **kwargs)
# CWorkspace utilities
def _Workspace_create_net_with_exception_intercept(ws, net, overwrite=False):
return CallWithExceptionIntercept(
ws._create_net,
ws._last_failed_op_net_position,
GetNetName(net),
|
match: case | = 4 | 2
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2013-2014
# Author: Lucas Meneghel Rodrigues <lmr@redhat.com>
import os
import glob
# pylint: disable=E0611
from setuptools import setup
VERSION = open('VERSION', 'r').read().strip()
VIRTUAL_ENV = 'VIRTUAL_ENV' in os.environ
def get_dir(system_path=None, virtual_path=None):
"""
Retrieve VIRTUAL_ENV friendly path
:param system_path: Relative system path
:param virtual_path: Overrides system_path for virtual_env only
:return: VIRTUAL_ENV friendly path
"""
if virtual_path is None:
virtual_path = system_path
if VIRTUAL_ENV:
if virtual_path is None:
virtual_path = []
return os.path.join(*virtual_path)
else:
if system_path is None:
system_path = []
return os.path.join(*(['/'] + system_path))
def get_data_files():
def add_files(level=[]):
installed_location = ['usr', 'share', 'avocado-plugins-vt']
installed_location += level
level_str = '/'.join(level)
if level_str:
level_str += '/'
file_glob = '%s*' % level_str
files_found = [path for path in glob.glob(file_glob) if
os.path.isfile(path)]
return [((get_dir(installed_location, level)), files_found)]
data_files = [(get_dir(['etc', 'avocado', 'conf.d']),
['etc/avocado/conf.d/vt.conf'])]
data_files += [(get_dir(['usr', 'share', 'avocado-plugins-vt',
'test-providers.d']),
glob.glob('test-providers.d/*'))]
data_files_dirs = ['backends', 'shared']
for data_file_dir in data_files_dirs:
for root, dirs, files in os.walk(data_file_dir):
for subdir in dirs:
rt = root.split('/')
rt.append(subdir)
data_files += add_files(rt)
return data_files
setup(name='avocado-plugins-vt',
version=VERSION,
description='Avocado Virt Test Compatibility Layer plugin',
author='Avocado Developers',
author_email='avocado-devel@redhat.com',
url='http://github.com/avocado-framework/avocado-vt',
packages=['avocado_vt',
'avocado_vt.plugins',
'virttest',
'virttest.libvirt_xml',
'virttest.libvirt_xml.devices',
'virttest.libvirt_xml.nwfilter_protocols',
'virttest.qemu_devices',
'virttest.remote_commander',
'virttest.staging',
'virttest.staging.backports',
'virttest.tests',
'virttest.unittest_utils',
'virttest.utils_test',
'virttest.utils_test.qemu | '],
package_data={"virttest": ["*.*"]},
data_files=get_data_files(),
entry_points={
'avocado.plugins.cli': [
'vt-list = avocado_vt.plugins.vt_list:VTLister',
'vt = avocado_vt.plugins.vt:VTRun',
],
'avocado.plugins.cli.cmd': [
'vt-bootstrap = avocado_vt.plugins.vt_bootstrap:VTBootstrap',
],
'avocado.plugins.job.prepost': [
'vt-joblock = avoc | ado_vt.plugins.vt_joblock:VTJobLock'
],
},
)
|
# -*- coding:utf-8 -*-
from django.urls import path
from article.views.post import PostListApiV | iew, PostCreateApiView, PostDetailApiView
urlpatterns = [
# 前缀:/api/v1/article/post/
path('create', PostCreateApiView.as_view(), name="create"),
path('list', PostListApiV | iew.as_view(), name="list"),
path('<int:pk>', PostDetailApiView.as_view(), name="detail"),
]
|
timeout = self._NOTIFICATION_DELAY
else:
timeout = None
try:
events = poll_obj.poll(timeout)
except OSError as e:
if e.errno == errno.EINTR:
continue
else:
raise
except select.error:
continue
else:
if not self._running:
break
elif not events:
self.notify()
else:
for fd, event in events:
if fd == self._inotify_fd:
self._handle_events()
finally:
with self._lock:
if self._inotify_fd is not None:
os.close(self._inotify_fd)
self._inotify_fd = None
if self._pipe_r is not None:
os.close(self._pipe_r)
self._pipe_r = None
os.close(self._pipe_w)
self._pipe_w = None
def refresh(self):
with self._lock:
if self._inotify_fd is None:
return
try:
if self._worktree is not None:
tracked_dirs = set(
os.path.dirname(os.path.join(self._worktree,
path))
for path in gitcmds.tracked_files())
self._refresh_watches(tracked_dirs,
self._worktree_wd_to_path_map,
self._worktree_path_to_wd_map)
git_dirs = set()
git_dirs.add(self._git_dir)
for dirpath, dirnames, filenames in core.walk(
os.path.join(self._git_dir, 'refs')):
git_dirs.add(dirpath)
self._refresh_watches(git_dirs,
self._git_dir_wd_to_path_map,
self._git_dir_path_to_wd_map)
self._git_dir_wd = \
self._git_dir_path_to_wd_map[self._git_dir]
except OSError as e:
if e.errno == errno.ENOSPC:
self._log_out_of_wds_message()
self._running = False
else:
raise
def _refresh_watches(self, paths_to_watch, wd_to_path_map,
path_to_wd_map):
watched_paths = set(path_to_wd_map)
for path in watched_paths - paths_to_watch:
wd = path_to_wd_map.pop(path)
wd_to_path_set.pop(wd)
try:
inotify.rm_watch(self._inotify_fd, wd)
except OSError as e:
if e.errno == errno.EINVAL:
# This error can occur if the target of the wd was
# removed on the filesystem before we call
# inotify.rm_watch() so ignore it.
pass
else:
raise
for path in paths_to_watch - watched_paths:
try:
wd = inotify.add_watch(self._inotify_fd, core.encode(path),
self._ADD_MASK)
except OSError as e:
if e.errno in (errno.ENOENT, errno.ENOTDIR):
# These two errors should only occur as a result of
# race conditions: the first if the directory
# referenced by path was removed or renamed before the
# call to inotify.add_watch(); the second if the
# directory referenced by path was replaced with a file
# before the call to inotify.add_watch(). Therefore we
# simply ignore them.
pass
else:
raise
else:
wd_to_path_map[wd] = path
path_to_wd_map[path] = wd
def _check_event(self, wd, mask, name):
if mask & inotify.IN_Q_OVERFLOW:
self._force_notify = True
elif not mask & self._TRIGGER_MASK:
pass
elif mask & inotify.IN_ISDIR:
pass
elif wd in self._worktree_wd_to_path_map:
if self._use_check_ignore:
self._file_paths.add(
os.path.join(self._worktree_wd_to_path_map[wd],
| core.decode(name)))
else:
self._force_notify = True
elif wd == self._git_dir_wd:
name = core.decode(name)
if name == 'HEAD' or name == 'index':
self._force_notify = True
elif (wd in self._git_dir_wd_to_path_map
and not core.decode(name).endswith('.lock')):
self._force_notify = True
def _handle_events(self):
for w | d, mask, cookie, name in \
inotify.read_events(self._inotify_fd):
if not self._force_notify:
self._check_event(wd, mask, name)
def stop(self):
self._running = False
with self._lock:
if self._pipe_w is not None:
os.write(self._pipe_w, bchr(0))
self.wait()
if AVAILABLE == 'pywin32':
class _Win32Watch(object):
def __init__(self, path, flags):
self.flags = flags
self.handle = None
self.event = None
try:
self.handle = win32file.CreateFileW(
path,
0x0001, # FILE_LIST_DIRECTORY
win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE,
None,
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_BACKUP_SEMANTICS |
win32con.FILE_FLAG_OVERLAPPED,
None)
self.buffer = win32file.AllocateReadBuffer(8192)
self.event = win32event.CreateEvent(None, True, False, None)
self.overlapped = pywintypes.OVERLAPPED()
self.overlapped.hEvent = self.event
self._start()
except:
self.close()
raise
def _start(self):
win32file.ReadDirectoryChangesW(self.handle, self.buffer, True,
self.flags, self.overlapped)
def read(self):
if win32event.WaitForSingleObject(self.event, 0) \
== win32event.WAIT_TIMEOUT:
result = []
else:
nbytes = win32file.GetOverlappedResult(self.handle,
self.overlapped, False)
result = win32file.FILE_NOTIFY_INFORMATION(self.buffer, nbytes)
self._start()
return result
def close(self):
if self.handle is not None:
win32file.CancelIo(self.handle)
win32file.CloseHandle(self.handle)
if self.event is not None:
win32file.CloseHandle(self.event)
class _Win32Thread(_BaseThread):
_FLAGS = (win32con.FILE_NOTIFY_CHANGE_FILE_NAME |
win32con.FILE_NOTIFY_CHANGE_DIR_NAME |
win32con.FILE_NOTIFY_CHANGE_ATTRIBUTES |
win32con.FILE_NOTIFY_CHANGE_SIZE |
win32con.FILE_NOTIFY_CHANGE_LAST_WRITE |
|
te_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([variables.global_variables_initializer()])
basic_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val
})
with variable_scope.variable_scope("block", initializer=initializer):
m0 = array_ops.zeros([1, 2])
m1 = array_ops.zeros([1, 2])
m2 = array_ops.zeros([1, 2])
m3 = array_ops.zeros([1, 2])
g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
[lstm_ops.LSTMBlockCell(2, use_peephole=True) for _ in range(2)],
state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
sess.run([variables.global_variables_initializer()])
block_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
x.name: x_values,
m0.name: m0_val,
m1.name: m1_val,
m2.name: m2_val,
m3.name: m3_val
})
self.assertEqual(len(basic_res), len(block_res))
for basic, block in zip(basic_res, block_res):
self.assertAllClose(basic, block)
def testLSTMBasicToBlock(self):
with self.test_session(use_gpu=True) as sess:
(basic_state, fused_state, basic_outputs, block_outputs, fused_outputs,
basic_grads, block_grads, fused_grads, basic_wgrads, block_wgrads,
fused_wgrads) = blocks_match(
sess, use_peephole=False)
self.assertAllClose(basic_outputs, block_outputs)
self.assertAllClose(basic_grads, block_grads)
for basic, block in zip(basic_wgrads, block_wgrads):
self.assertAllClose(basic, block, rtol=1e-6, atol=1e-6)
self.assertAllClose(basic_outputs, fused_outputs)
self.assertAllClose(basic_state, fused_state)
self.assertAllClose(basic_grads, fused_grads)
for basic, fused in zip(block_wgrads, fused_wgrads):
self.assertAllClose(basic, fused, rtol=1e-6, atol=1e-6)
def testLSTMBasicToBlockPeeping(self):
with self.test_session(use_gpu=True) as sess:
(basic_state, fused_state, basic_outputs, block_outputs, fused_outputs,
basic_grads, block_grads, fused_grads, basic_wgrads, block_wgrads,
fused_wgrads) = blocks_match(
sess, use_peephole=True)
self.assertAllClose(basic_outputs, block_outputs)
self.assertAllClose(basic_grads, block_grads)
for basic, block in zip(basic_wgrads, block_wgrads):
self.assertAllClose(basic, block, rtol=1e-6, atol=1e-6)
self.assertAllClose(basic_outputs, fused_outputs)
self.assertAllClose(basic_state, fused_state)
self.assertAllClose(basic_grads, fused_grads)
for basic, fused in zip(block_wgrads, fused_wgrads):
self.assertAllClose(basic, fused, rtol=1e-6, atol=1e-6)
def testLSTMFusedSequenceLengths(self):
"""Verify proper support for sequence lengths in LSTMBlockFusedCell."""
with self.test_session(use_gpu=True) as sess:
batch_size = 3
input_size = 4
cell_size = 5
max_sequence_length = 6
inputs = []
for _ in range(max_sequence_length):
inp = ops.convert_to_tensor(
np.random.randn(batch_size, input_size), dtype=dtypes.float32)
inputs.append(inp)
seq_lengths = constant_op.constant([3, 4, 5])
cell_inputs = array_ops.stack(inputs)
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=19890213)
with variable_scope.variable_scope("lstm_cell", initializer=initializer):
# magic naming so that the cells pick up these variables and reuse them
variable_scope.get_variable(
"kernel",
shape=[input_size + cell_size, cell_size * 4],
dtype=dtypes.float32)
variable_scope.get_variable(
"bias",
shape=[cell_size * 4],
dtype=dtypes.float32,
initializer=init_ops.zeros_initializer())
cell = lstm_ops.LSTMBlockFusedCell(
cell_size, cell_clip=0, use_peephole=False, reuse=True,
name="lstm_cell")
fused_outputs_op, fused_state_op = cell(
cell_inputs, dtype=dtypes.float32, sequence_length=seq_lengths)
cell_vars = [
v for v in variables.trainable_variables()
if v.name.endswith("kernel") or v.name.endswith("bias")
]
# Verify that state propagation works if we turn our sequence into
# tiny (single-time) subsequences, i.e. unfuse the cell
unfused_outputs_op = []
state = None
with variable_scope.variable_scope(
variable_scope.get_variable_scope(), reuse=True):
for i, inp in enumerate(inputs):
lengths = [int(i < l) for l in seq_lengths.eval()]
output, state = cell(
array_ops.expand_dims(inp, 0),
initial_state=state,
dtype=dtypes.float32,
sequence_length=lengths)
unfused_outputs_op.append(output[0])
unfused_outputs_op = array_ops.stack(unfused_outputs_op)
sess.run([variables.global_variables_initializer()])
unfused_outputs, unfused_state = sess.run([unfused_outputs_op, state[0]])
unfused_grads = sess.run(
gradients_impl.gradients(unfused_outputs_op, inputs))
unfused_wgrads = sess.run(
gradients_impl.gradients(unfused_outputs_op, cell_vars))
fused_outputs, fused_state = sess.run(
[fused_outputs_op, fused_state_op[0]])
fused_grads = sess.run(gradients_impl.gradients(fused_outputs_op, inputs))
fused_wgrads = sess.run(
gradients_impl.gradients(fused_outputs_op, cell_vars))
self.assertAllClose(fused_outputs, unfused_outputs)
self.assertAllClose(fused_state, unfused_state | )
self.assertAllClose(f | used_grads, unfused_grads)
for fused, unfused in zip(fused_wgrads, unfused_wgrads):
self.assertAllClose(fused, unfused, rtol=1e-6, atol=1e-6)
#### Benchmarking.
class BenchmarkLSTMBlock(test.Benchmark):
def benchmarkLSTMBlockCellFpropWithDynamicRNN(self):
print("BlockLSTMCell forward propagation via dynamic_rnn().")
print("--------------------------------------------------------------")
print("LSTMBlockCell Seconds per inference.")
print("batch_size,cell_size,input_size,time_steps,use_gpu,wall_time")
iters = 10
for config in benchmarking.dict_product({
"batch_size": [1, 8, 13, 32, 67, 128],
"cell_size": [128, 250, 512, 650, 1024, 1350],
"time_steps": [40],
"use_gpu": [True, False]
}):
with ops.Graph().as_default():
with benchmarking.device(use_gpu=config["use_gpu"]):
inputs = variable_scope.get_variable(
"x",
[config["time_steps"], config["batch_size"], config["cell_size"]])
cell = lstm_ops.LSTMBlockCell(config["cell_size"])
outputs = rnn.dynamic_rnn(
cell, inputs, time_major=True, dtype=dtypes.float32)
init_op = variables.global_variables_initializer()
with session.Session() as sess:
sess.run(init_op)
wall_time = benchmarking.seconds_per_run(outputs, sess, iters)
# Print to stdout. If the TEST_REPORT_FILE_PREFIX environment variable
# is set, this will produce a copy-paste-able CSV file.
print(",".join(
map(str, [
config["batch_size"], config["cell_size"], config["cell_size"],
config["time_steps"], config["use_gpu"], wall_time
])))
benchmark_name_template = "_".join([
"LSTMBlockCell_fprop", "BS%(batch_size)i", "CS%(cell_size)i",
"IS%(cell_size)i", "TS%(time_steps)i", "gpu_%(use_gpu)s"
])
self.report_benchmark(
name=benchmark_name_template % config,
iters=iters,
wall_time=wall_time,
extras=config)
def benchmarkLSTMBlockCellBpropWithDynamicRNN(self):
print("BlockLSTMCell backward propagation via dynamic_rnn().")
print("--------------------------------------------------------------")
print("LSTMBlockCell |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from core import FeatureExtractorRegistry
from twinkle.connectors.core import ConnectorRegistry
class FeatureExtractorPipelineFactory(object):
"""
Factory object for creating a pipeline from a file
"""
def __init__(self):
"""
"""
pass
def buildInput(self, config_data):
"""
builds an input from the ConnectorRegistry
"""
input_name = config_data["name"]
input_config = config_data["config"]
return ConnectorRegistry.buildConnector(input_name, input_config)
def buildOutput(self, config_data):
"""
builds na output from the connectorRegister
"""
output_name = config_data["name"]
output_config = config_data["config"]
return ConnectorRegistry.buildConnector(output_name, output_config)
def buildExtractor(self, config_data):
"""
"""
extractor_name = config_data["name"]
extractor_config = config_data["config"]
return FeatureExtractorRegistry.buildExtractor(extractor_name, extractor_config)
def buildFromDictionary(self,config_data):
"""
"""
if "input" not in config_data:
raise Exception("No input source was specified in the configuration data")
if "output" not in config_data:
raise Exception("No output source was specified in the configuration data")
#build input
input_data = config_data["input"]
input = self.buildInput(input_data)
# build output
output_data = config_data["output"]
output = self.buildOutput(output_data)
# create the pipeline
pipeline = FeatureExtractorPipeline(input, output)
# get feature extractors
extractors = config_data["extractors"]
# add each extractor
for extractor_config in extractors:
extractor = self.buildExtractor(extractor_config)
pipeline.addExtractor(extractor)
return pipeline
class FeatureExtractorPipeline(object):
"""
Simple feature extractor pipeline.
Needs a lot of features in the future such as dependency graphs to resolve some of the intermediates
and the ability to do second passes for items which need to be normalized.
"""
def __init__(self, input, output):
self.feature_extractors = []
self.input = input
| self.output = output
def addExtractor(self, extractor):
"""
add Extractor to the pipeline
"""
self.feature_extractors.append(extractor)
def run(self):
"""
runs the pipeline
"""
processed_items = []
# iterate through each item
for item in self.input:
item_cookie = { "tweet": item, "text": item.text}
output = {}
# first do preprossing
for extractor in self.feature_extractors:
| extractor.extract(item, item_cookie, output)
print output
# write output
self.output.write(output)
|
from setuptools import setup
setup(
# general meta
name='elasticity',
version='0.7',
author='Brian C. Dilley - Flipagram',
author_email='brian@flipagram.com',
description='Python based command line tool for managing ElasticSearch clusters.',
platforms='any',
url='https://github.com/Cheers-Dev/elasticity',
download_url='https://github.com/Cheers-Dev/elasticity',
# packages
packages=[
'elasticity'
| ],
# dependencies
install_requires=[
'elasticsearch>=1.4.0',
'pyyaml>=3.10'
],
# additional files to include
include_pack | age_data=True,
# the scripts
scripts=['scripts/elasticity'],
# wut?
classifiers=['Intended Audience :: Developers']
)
|
matching_answer])
for key, value in temp_dict.items():
# Each value now has hint, votes, matching_answer.
temp_dict[key] = value + [matching_answer]
matching_hints.update(temp_dict)
# matching_hints now maps pk's to lists of [hint, votes, matching_answer]
# Finally, randomly choose a subset of matching_hints to actually show.
if not matching_hints:
# No hints to give. Return.
return
# Get the top hint, plus two random hints.
n_hints = len(matching_hints)
hints = []
# max(dict) returns the maximum key in dict.
# The key function takes each pk, and returns the number of votes for the
# hint with that pk.
best_hint_index = max(matching_hints, key=lambda pk: matching_hints[pk][1])
hints.append(matching_hints[best_hint_index][0])
best_hint_answer = matching_hints[best_hint_index][2]
# The brackets surrounding the index are for backwards compatability purposes.
# (It used to be that each answer was paired with multiple hints in a list.)
self.previous_answers += [[best_hint_answer, [best_hint_index]]]
for _ in xrange(min(2, n_hints - 1)):
# Keep making random hints until we hit a target, or run out.
while True:
# random.choice randomly chooses an element from its input list.
# (We then unpack the item, in this case data for a hint.)
(hint_index, (rand_hint, _, hint_answer)) =\
random.choice(matching_hints.items())
if rand_hint not in hints:
break
hints.append(rand_hint)
self.previous_answers += [[hint_answer, [hint_index]]]
return {'hints': hints,
'answer': answer}
def get_feedback(self, data):
"""
The student got it correct. Ask him to vote on hints, or submit a hint.
Args:
`data` -- not actually used. (It is assumed that the answer is correct.)
Output keys:
- 'answer_to_hints': a nested dictionary.
answer_to_hints[answer][hint_pk] returns the text of the hint.
- 'user_submissions': the same thing as self.user_submissions. A list of
the answers that the user previously submitted.
"""
# The student got it right.
# Did he submit at least one wrong answer?
if len(self.user_submissions) == 0:
# No. Nothing to do here.
return
# Make a hint-voting interface for each wrong answer. The student will only
# be allowed to make one vote / submission, but he can choose which wrong answer
# he wants to look at.
answer_to_hints = {} # answer_to_hints[answer text][hint pk] -> hint text
# Go through each previous answer, and populate index_to_hints and index_to_answer.
for i in xrange(len(self.previous_answers)):
answer, hints_offered = self.previous_answers[i]
if answer not in answer_to_hints:
answer_to_hints[answer] = {}
if answer in self.hints:
# Go through each hint, and add to index_to_hints
for hint_id in hints_offered:
if (hint_id is not None) and (hint_id not in answer_to_hints[answer]):
try:
answer_to_hints[answer][hint_id] = self.hints[answer][str(hint_id)][0]
except KeyError:
# Sometimes, the hint that a user saw will have been deleted by the instructor.
continue
return {'answer_to_hints': answer_to_hints,
'user_submissions': self.user_submissions}
def tally_vote(self, data):
"""
Tally a user's vote on his favorite hint.
Args:
`data` -- expected to have the following keys:
'answer': text of answer we're voting on
'hint': hint_pk
'pk_list': A list of [answer, pk] pairs, each of which representing a hint.
We will return a list of how many votes each hint in the list has so far.
It's up to the browser to specify which hints to return vote counts for.
Returns key 'hint_and_votes', a list of (hint_text, #votes) pairs.
"""
if self.user_voted:
return {'error': 'Sorry, but you have already voted!'}
ans = data['answer']
if not self.validate_answer(ans):
# Uh oh. Invalid answer.
log.exception('Failure in hinter tally_vote: Unable to parse answer: {ans}'.format(ans=ans))
return {'error': 'Failure in voting!'}
hint_pk = str(data['hint'])
# We use temp_dict because we need to do a direct write for the database to update.
temp_dict = self.hints
try:
temp_dict[ans][hint_pk][1] += 1
except KeyError:
log.exception('''Failure in hinter tally_vote: User voted for non-existant hint:
Answer={ans} pk={hint_pk}'''.format(ans=ans, hint_pk=hint_pk))
return {'error': 'Failure in voting!'}
self.hints = temp_dict
# Don't let the user vote again!
self.user_voted = True
# Return a list of how many votes each hint got.
pk_list = json.loads(data['pk_list'])
hint_and_votes = []
for answer, vote_pk in pk_list:
if not self.validate_answer(answer):
log.exception('In hinter tally_vote, couldn\'t parse {ans}'.format(ans=answer))
continue
try:
hint_and_votes.append(temp_dict[answer][str(vote_pk)])
except KeyError:
log.exception('In hinter tally_vote, couldn\'t find: {ans}, {vote_pk}'.format(
ans=answer, vote_pk=str(vote_pk)))
hint_and_votes.sort(key=lambda pair: pair[1], reverse=True)
# Reset self.previous_answers and user_submissions.
self.previous_answers = []
self.user_submissions = []
return {'hint_and_votes': hint_and_votes}
def submit_hint(self, data):
"""
Take a hint submission and add it to the database.
Args:
`data` -- expected to have the following keys:
| 'answer': text of answer
'hint': text of the new hint that the user is adding
Returns a thank-you message.
"""
# Do html escaping. Perhaps in the future do profanity filtering, etc. as well.
hint = escape(data['hint'])
answer = data['answer']
if not self. | validate_answer(answer):
log.exception('Failure in hinter submit_hint: Unable to parse answer: {ans}'.format(
ans=answer))
return {'error': 'Could not submit answer'}
# Only allow a student to vote or submit a hint once.
if self.user_voted:
return {'message': 'Sorry, but you have already voted!'}
# Add the new hint to self.hints or self.mod_queue. (Awkward because a direct write
# is necessary.)
if self.moderate == 'True':
temp_dict = self.mod_queue
else:
temp_dict = self.hints
if answer in temp_dict:
temp_dict[answer][str(self.hint_pk)] = [hint, 1] # With one vote (the user himself).
else:
temp_dict[answer] = {str(self.hint_pk): [hint, 1]}
self.hint_pk += 1
if self.moderate == 'True':
self.mod_queue = temp_dict
else:
self.hints = temp_dict
# Mark the user has having voted; reset previous_answers
self.user_voted = True
self.previous_answers = []
self.user_submissions = []
return {'message': 'Thank you for your hint!'}
class CrowdsourceHinterDescriptor(CrowdsourceHinterFields, RawDescriptor):
module_class = CrowdsourceHinterModule
stores_state = True
@classmethod
def definition_from_xml(cls, xml_object, |
from __future__ import print_function
import argparse
import yaml
from .bocca import make_project, ProjectExistsError
def main():
parser = argparse.ArgumentParser()
parser.add_argument('file', type=argparse.FileType('r'),
help='Project description file')
parser.add_argument('--clobber', action='store_true',
help='Clobber an existing project')
args = parser.parse_args()
try: |
make_project(yaml.load(args.file), clobber=args.clobber)
except ProjectExistsError as error:
print('The specified project (%s) already exists. Exiting.' % error)
if __name__ == '__main__':
m | ain()
|
from __future__ import unicode_literals, divi | sion, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from flask import jsonify
from flask_restplus import inputs
from flexget.api import api, APIResource
from flexget.api.app import NotFoundError, BadRequest, etag
from flexget.plugins.internal.api_tvmaze import APITVMaze as tvm
tvmaze_api = | api.namespace('tvmaze', description='TVMaze Shows')
class ObjectsContainer(object):
actor_object = {
'type': 'object',
'properties': {
"last_update": {'type': 'string', 'format': 'date-time'},
"medium_image": {'type': 'string'},
"name": {'type': 'string'},
"original_image": {'type': 'string'},
"tvmaze_id": {'type': 'integer'},
"url": {'type': 'string'}
}
}
schedule_object = {
'type': 'object',
'properties': {
"days": {'type': 'array', 'items': {'type': 'string'}},
"time": {'type': 'string'}
}
}
tvmaze_series_object = {
'type': 'object',
'properties': {
'tvmaze_id': {'type': 'integer'},
'status': {'type': 'string'},
'rating': {'type': 'number'},
'genres': {'type': 'array', 'items': {'type': 'string'}},
'weight': {'type': 'integer'},
'updated': {'type': 'string', 'format': 'date-time'},
'name': {'type': 'string'},
'language': {'type': 'string'},
'schedule': schedule_object,
'url': {'type': 'string', 'format': 'url'},
'original_image': {'type': 'string'},
'medium_image': {'type': 'string'},
'tvdb_id': {'type': 'integer'},
'tvrage_id': {'type': 'integer'},
'premiered': {'type': 'string', 'format': 'date-time'},
'year': {'type': 'integer'},
'summary': {'type': 'string'},
'webchannel': {'type': ['string', 'null']},
'runtime': {'type': 'integer'},
'show_type': {'type': 'string'},
'network': {'type': ['string', 'null']},
'last_update': {'type': 'string', 'format': 'date-time'}
},
'required': ['tvmaze_id', 'status', 'rating', 'genres', 'weight', 'updated', 'name', 'language',
'schedule', 'url', 'original_image', 'medium_image', 'tvdb_id', 'tvrage_id', 'premiered', 'year',
'summary', 'webchannel', 'runtime', 'show_type', 'network', 'last_update'],
'additionalProperties': False
}
tvmaze_episode_object = {
'type': 'object',
'properties': {
'tvmaze_id': {'type': 'integer'},
'series_id': {'type': 'integer'},
'number': {'type': 'integer'},
'season_number': {'type': 'integer'},
'title': {'type': 'string'},
'airdate': {'type': 'string', 'format': 'date-time'},
'url': {'type': 'string'},
'original_image': {'type': ['string', 'null']},
'medium_image': {'type': ['string', 'null']},
'airstamp': {'type': 'string', 'format': 'date-time'},
'runtime': {'type': 'integer'},
'summary': {'type': 'string'},
'last_update': {'type': 'string', 'format': 'date-time'}
},
'required': ['tvmaze_id', 'series_id', 'number', 'season_number', 'title', 'airdate', 'url', 'original_image',
'medium_image', 'airstamp', 'runtime', 'summary', 'last_update'],
'additionalProperties': False
}
tvmaze_series_schema = api.schema_model('tvmaze_series_schema', ObjectsContainer.tvmaze_series_object)
tvmaze_episode_schema = api.schema_model('tvmaze_episode_schema', ObjectsContainer.tvmaze_episode_object)
@tvmaze_api.route('/series/<string:title>/')
@api.doc(params={'title': 'TV Show name or TVMaze ID'})
class TVDBSeriesSearchApi(APIResource):
@etag
@api.response(200, 'Successfully found show', model=tvmaze_series_schema)
@api.response(NotFoundError)
def get(self, title, session=None):
"""TVMaze series lookup"""
try:
tvmaze_id = int(title)
except ValueError:
tvmaze_id = None
try:
if tvmaze_id:
series = tvm.series_lookup(tvmaze_id=tvmaze_id, session=session)
else:
series = tvm.series_lookup(series_name=title, session=session)
except LookupError as e:
raise NotFoundError(e.args[0])
return jsonify(series.to_dict())
episode_parser = api.parser()
episode_parser.add_argument('season_num', type=int, help='Season number')
episode_parser.add_argument('ep_num', type=int, help='Episode number')
episode_parser.add_argument('air_date', type=inputs.date_from_iso8601, help="Air date in the format of '2012-01-01'")
@tvmaze_api.route('/episode/<int:tvmaze_id>/')
@api.doc(params={'tvmaze_id': 'TVMaze ID of show'})
@api.doc(parser=episode_parser)
class TVDBEpisodeSearchAPI(APIResource):
@etag
@api.response(200, 'Successfully found episode', tvmaze_episode_schema)
@api.response(NotFoundError)
@api.response(BadRequest)
def get(self, tvmaze_id, session=None):
"""TVMaze episode lookup"""
args = episode_parser.parse_args()
air_date = args.get('air_date')
season_num = args.get('season_num')
ep_num = args.get('ep_num')
kwargs = {'tvmaze_id': tvmaze_id,
'session': session}
if air_date:
kwargs['series_id_type'] = 'date'
kwargs['series_date'] = air_date
elif season_num and ep_num:
kwargs['series_id_type'] = 'ep'
kwargs['series_season'] = season_num
kwargs['series_episode'] = ep_num
else:
raise BadRequest('not enough parameters sent for lookup')
try:
episode = tvm.episode_lookup(**kwargs)
except LookupError as e:
raise NotFoundError(e.args[0])
return jsonify(episode.to_dict())
|
import argparse
import sys, os
import numpy as np
from copy import copy
parser = argparse.ArgumentParser()
parser.add_argument('qubit', help='qubit name')
parser.add_argument('direction', help='direction (X or Y)')
parser.add_argument('numPulses', type=i | nt, help='log2(n) of the longest sequence n')
parser.add_argument('amplitude', type=float, help='pulse amplitude')
args = parser.parse_args()
from QGL import *
q = QubitFactory(args.qubit)
if args.direction == 'X':
pPulse = Xtheta(q, amp=args.amplitude)
mPulse = X90m(q)
else:
pPulse = Ytheta(q, amp=args.amplitude)
mPulse = Y90m(q)
# Exponentially growing repetitions of the target pulse, e.g.
# (1, 2, 4, 8, 16, 32, 64, 128, ...) x X90
seqs = [[pPulse]*n for n in 2**np.arange(args.numP | ulses+1)]
# measure each along Z or X/Y
seqs = [s + m for s in seqs for m in [ [MEAS(q)], [mPulse, MEAS(q)] ]]
# tack on calibrations to the beginning
seqs = [[Id(q), MEAS(q)], [X(q), MEAS(q)]] + seqs
# repeat each
repeated_seqs = [copy(s) for s in seqs for _ in range(2)]
fileNames = compile_to_hardware(repeated_seqs, fileName='RepeatCal/RepeatCal')
# plot_pulse_files(fileNames)
|
import requests
import urllib2
import argparse
from bs4 import BeautifulSoup
def get_best_torrent(query):
query = urllib2.quote(query)
r = requests.get('http://kat.cr/usearch/{}/'.format(query))
soup = BeautifulSoup(r.content)
torrents = soup.find('table', class_='data').find_all(has_class_odd_or_even, limit=5)
for torrent in torrents:
name = torrent.find('a', class_='cellMainLink').text.encode('utf-8')
print "Name: {}".format(name)
size = torrent.find(class_='nobr center').text
print "Size: {}".format(size)
verified = bool(torrent.find('i', class_='ka ka-verify'))
if verified:
print "Verified Uploader: True"
else:
print "Verified: False"
seeds = torrent.find(class_='green center') | .text
print | "Seeds: {}".format(seeds)
leeches = torrent.find(class_='red lasttd center').text
print "Leeches: {}".format(leeches)
try:
seed_to_leech = float(seeds) / float(leeches)
except ZeroDivisionError:
seed_to_leech = int(seeds)
print "Seed to leech ratio: {}".format(seed_to_leech)
magnet = torrent.find(class_='iaconbox').find('a', class_='imagnet')['href']
print "Magnet: \n{}\n".format(magnet)
def has_class_odd_or_even(tag):
if tag.has_attr('class'):
if 'odd' in tag.attrs['class'] or 'even' in tag.attrs['class']:
return True
return False
def command_line_runner():
parser = argparse.ArgumentParser(description='Get magnet links for torrents from the CLI')
parser.add_argument('name', type=str, nargs='*', help='Name of the torrent you are looking for')
args = parser.parse_args()
if not args.name:
parser.print_help()
else:
get_best_torrent(' '.join(args.name))
if __name__ == '__main__':
command_line_runner()
|
from xml.dom import minidom
from object_classes import *
from helpers import timeToSeconds
class HindenburgInt(object):
def __init__(self, project_file, version="Hindenburg Journalist 1.26.1936", version_num="1.26.1936"):
self.projectFile = project_file
self.version = version
self.version_num = version_num
def get_session_name(self):
for i in self.projectFile.split("/"):
name = i
name = name.split(".")
return name[0]
def read(self):
projectXML = minidom.parse(self.projectFile)
projectObj = Session(self.get_session_name())
projectXML = projectXML.getElementsByTagName("Session")
project = projectXML[0]
projectObj.samplerate = project.getAttribute('Samplerate')
fileSourceInfo = project.getElementsByTagName("AudioPool")[0]
fileSourcePath = fileSourceInfo.getAttribute("Location") + "/" + fileSourceInfo.getAttribute("Path")
projectObj.audio_folder = fileSourceInfo.getAttribute('Path')
projectObj.folder_path = fileSourceInfo.getAttribute('Location')
audioFiles = project.getElementsByTagName("File")
for file in audioFiles:
projectObj.addFile(fileSourcePath + "/" + file.getAttribute("Name"), int(file.getAttribute('Id')))
markers = project.getElementsByTagName("Marker")
for marker in markers:
projectObj.addMarker(marker.getAttribute('Id'), marker.getAttribute('Name'), float(marker.getAttribute('Time')))
tracks = project.getElementsByTagName("Track")
for track in tracks:
current_track = projectObj.addTrack(track.getAttribute('Name'))
try:
current_track.pan = self.interpretPan(track.getAttribute('Pan'))
except:
current_track.pan = 0
try:
current_track.volume = track.getAttribute('Volume')
except:
current_track.volume = 0
try:
if track.getAttribute('Solo') == "1":
current_track.solo = True
except:
current_track.solo = False
try:
if track.getAttribute('Mute') == "1":
current_track.mute = False
except:
current_track.mute = False
try:
if track.getAttribute('Rec') == "1":
current_track.rec = True
except:
current_track.rec = False
trackItems = track.getElementsByTagName("Region")
for item in trackItems:
new_item = current_track.addItem(projectObj.getFileByID(int(item.getAttribute('Ref'))))
try:
start = float(item.getAttribute('Start'))
except:
start = 0
new_item.startTime = start
try:
startAt = float(item.getAttribute('Offset'))
except:
startAt = 0
new_item.startAt = startAt
length = timeToSeconds(item.getAttribute('Length'))
new_item.length = length
try:
gain = float(item.getAttribute('Gain'))
except:
gain = 0
new_item.gain = gain
new_item.name = item.getAttribute('Name')
fades = item.getElementsByTagName('Fade')
if fades:
autoEnv = current_track.getEnvelope('Volume')
if autoEnv == "Envelope Not Found":
autoEnv = current_track.addEnvelope('Volume')
firstFade = True
for fade in fades:
startTime = new_item.startTime + float(fade.getAttribute('Start'))
if firstFade:
startValue = new_item.gain
else:
startValue = autoEnv.points[-1].value
firstFade = False
endTime = startTime + float(fade.getAttribute('Length'))
try:
endValue = floa | t(fade.getAttribute('Gain'))
except:
endValue = 0
autoEnv.addPoint(startTime, startValue)
autoEnv.addPoint(endTime, endValue)
plugins = track.getElementsByTagName("Plugin")
for plugin in plugins:
if plugin.getAttribute('Name' | ) == 'Compressor':
pluginType = "Native"
else:
pluginType = "Plugin"
new_plugin = current_track.addFX(plugin.getAttribute('Name'), pluginType, int(plugin.getAttribute('Id')))
if pluginType == "Native":
if plugin.getAttribute('Name') == 'Compressor':
new_plugin.addProperty('UID', plugin.getAttribute('UID'))
new_plugin.addProperty('Comp', plugin.getAttribute('Comp'))
return projectObj
#Notes: Need to develop the section that reads the plugins...include support for external plugins, and the native EQ plugin
def write(self, destinationFile):
print('This function still needs to be written')
def interpretPan(self, amount):
num = -float(amount)
num = num*90
return num
|
import numpy as np
import theano
import theano.tensor as T
import lasagne as nn
import data
import load
import nn_plankton
import dihedral
import tmp_dnn
import tta
features = [
# "hu",
# "tutorial",
"haralick",
# "aaronmoments",
# "lbp",
# "pftas",
# "zernike_moments",
# "image_size",
]
batch_size = 128
chunk_size = 32768
num_chunks_train = 240
momentum = 0.9
learning_rate_schedule = {
0: 0.001,
100: 0.0001,
200: 0.00001,
}
validate_every = 40
save_every = 40
sdir = "/mnt/storage/users/avdnoord/git/kaggle-plankton/predictions/"
train_pred_file = sdir+""
valid_pred_file = sdir+""
test_pred_file = sdir+"test--sharding_blend_pl_blend4_convroll4_doublescale_fs5_no_dropout_33_66.npy"
data_loader = load.PredictionsWithFeaturesDataLoader(
features = features,
train_pred_file=train_pred_file,
valid_pred_file=valid_pred_file,
test_pred_file=test_pred_file,
num_chunks_train=num_chunks_train,
chunk_size=chunk_size)
create_train_gen = lambda: data_loader.create_random_gen()
create_eval_train_gen = lambda: data_loader.create_fixed_gen("train")
create_eval_valid_gen = lambda: data_loader.create_fixed_gen("valid")
create_eval_test_gen = lambda: data_loader.create_fixed_gen("test")
def build_model():
l0 = nn.layers.InputLayer((batch_size, data.num_classes))
l0_size = nn.layers.InputLayer((batch_size, 52))
l1_size = nn.layers.DenseLayer(l0_size, num_units=80, W=nn_plankton.Orthogonal('relu'), b=nn.init.Constant(0.1))
l2_size = nn.layers.DenseLayer(l1_size, num_units=80, W=nn_plankton.Orthogonal('relu'), b=nn.init.Constant(0.1))
l3_size = nn.layers.DenseLayer(l2_size, num_units=data.num_classes, W=nn_plankton.Orthogonal(), b=n | n.init.Constant(0.1), nonlinearity=None)
l1 = nn_plankton.NonlinLayer(l0, T.log)
ltot = nn.layers.ElemwiseSumLayer([l1, l3_size])
# norm_by_sum = lambda x: x / x.sum(1).dimshuffle(0, "x")
lout = nn_plankton.NonlinLayer(ltot, nonlinearity=T.nnet.softmax)
return [l0, l0_size], lout
def build_objective(l_ins, l_out):
reg_param = 0.0002
alpha = 0. # 0 -> L2 1-> L1
print "regu", reg_param, alpha
| # lambda_reg = 0.005
params = nn.layers.get_all_non_bias_params(l_out)
# reg_term = sum(T.sum(p**2) for p in params)
L2 = sum(T.sum(p**2) for p in params)
L1 = sum(T.sum(T.abs_(p)) for p in params)
def loss(y, t):
return nn_plankton.log_loss(y, t) + reg_param*(alpha * L1 + (1-alpha) * L2)
return nn.objectives.Objective(l_out, loss_function=loss) |
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Web test-specific impl of the unexpected passes' queries module."""
import os
import posixpath
from blinkpy.web_tests.stale_expectation_removal import constants
from unexpected_passes_common import queries as queries_module
# The target number of results/rows per query when running in large query mode.
# Higher values = longer individual query times and higher chances of running
# out of memory in BigQuery. Lower values = more parallelization overhead and
# more issues with rate limit errors.
TARGET_RESULTS_PER_QUERY = 20000
# This query gets us all results for tests that have had results with a
# Failure, Timeout, or Crash expectation in the past |@num_samples| builds on
# |@builder_name|. Whether these are CI or try results depends on whether
# |builder_type| is "ci" or "try".
BQ_QUERY_TEMPLATE = """\
WITH
builds AS (
SELECT
DISTINCT exported.id build_inv_id,
partition_time
FROM `chrome-luci-data.chromium.blink_web_tests_{builder_type}_test_results` tr
WHERE
exported.realm = "chromium:{builder_type}"
AND STRUCT("builder", @builder_name) IN UNNEST(variant)
ORDER BY partition_time DESC
LIMIT @num_builds
),
results AS (
SELECT
exported.id,
test_id,
status,
duration,
(
SELECT value
FROM tr.tags
WHERE key = "step_name") as step_name,
(
SELECT value
FROM tr.tags
WHERE key = "web_tests_base_timeout") as timeout,
ARRAY(
SELECT value
FROM tr.tags
WHERE key = "typ_tag") as typ_tags,
ARRAY(
SELECT value
FROM tr.tags
WHERE key = "raw_typ_expectation") as typ_expectations,
ARRAY(
SELECT value
FROM tr.tags
WHERE key = "web_tests_used_expectations_file") as expectation_files
FROM
`chrome-luci-data.chromium.blink_web_tests_{builder_type}_test_results` tr,
builds b
WHERE
exported.id = build_inv_id
AND status != "SKIP"
{test_filter_clause}
)
SELECT *
FROM results
WHERE
"Failure" IN UNNEST(typ_expectations)
OR "Crash" IN UNNEST(typ_expectations)
OR "Timeout" IN UNNEST(typ_expectations)
"""
# Very similar to above, but used to get the names of tests that are of
# interest for use as a filter.
TEST_FILTER_QUERY_TEMPLATE = """\
WITH
builds AS (
SELECT
DISTINCT exported.id build_inv_id,
partition_time
FROM
`chrome-luci-data.chromium.blink_web_tests_{builder_type}_test_results` tr
WHERE
exported.realm = "chromium:{builder_type}"
AND STRUCT("builder", @builder_name) IN UNNEST(variant)
ORDER BY partition_time DESC
LIMIT 50
),
results AS (
SELECT
exported.id,
test_id,
ARRAY(
SELECT value
FROM tr.tags
WHERE key = "raw_typ_expectation") as typ_expectations
FROM
`chrome-luci-data.chromium.blink_web_tests_{builder_type}_test_results` tr,
builds b
WHERE
exported.id = build_inv_id
AND status != "SKIP"
)
SELECT DISTINCT r.test_id
FROM results r
WHERE
"Failure" IN UNNEST(typ_expectations)
OR "Crash" IN UNNEST(typ_expectations)
OR "Timeout" IN UNNEST(typ_expectations)
"""
ACTIVE_BUILDER_QUERY_TEMPLATE = """\
WITH
builders AS (
SELECT
(
SELECT value
FROM tr.variant
WHERE key = "builder") as builder_name
FROM
`chrome-luci-data.chromium.blink_web_tests_{builder_type}_test_results` tr
)
SELECT DISTINCT builder_name
FROM builders
"""
KNOWN_TEST_ID_PREFIXES = [
'ninja://:blink_web_tests/',
'ninja://:webgpu_blink_web_tests',
]
# The default timeout of most web tests is 6 seconds, so use that if we happen
# to get a result that doesn't report its own timeout.
DEFAULT_TIMEOUT = 6
class WebTestBigQueryQuerier(queries_module.BigQueryQuerier):
def _ConvertJsonResultToResultObject(self, json_result):
result = super(WebTestBigQueryQuerier,
self)._ConvertJsonResultToResultObject(json_result)
result.SetDuration(json_result['duration'], json_result['timeout']
or DEFAULT_TIMEOUT)
return result
def _GetRelevantExpectationFilesForQueryResult(self, query_result):
# Files in the query are either relative to the web tests directory or
# are an absolute path. The paths are always POSIX-style. We don't
# handle absolute paths since those typically point to temporary files
# which will not ex | ist locally.
filepaths = []
for f in query_result.get('expectation_files', []):
if posixpath.isabs(f):
continue
f = f.replace('/', os.sep)
f = os.path.join(constants.WE | B_TEST_ROOT_DIR, f)
filepaths.append(f)
return filepaths
def _ShouldSkipOverResult(self, result):
# WebGPU web tests are currently unsupported for various reasons.
return 'webgpu/cts.html' in result['test_id']
def _GetQueryGeneratorForBuilder(self, builder, builder_type):
# Look for all tests.
if not self._large_query_mode:
return WebTestFixedQueryGenerator(builder_type, '')
query = TEST_FILTER_QUERY_TEMPLATE.format(builder_type=builder_type)
query_results = self._RunBigQueryCommandsForJsonOutput(
query, {'': {
'builder_name': builder
}})
test_ids = ['"%s"' % r['test_id'] for r in query_results]
if not test_ids:
return None
# Only consider specific test cases that were found to have active
# expectations in the above query. Also perform any initial query
# splitting.
target_num_ids = TARGET_RESULTS_PER_QUERY / self._num_samples
return WebTestSplitQueryGenerator(builder_type, test_ids,
target_num_ids)
def _StripPrefixFromTestId(self, test_id):
# Web test IDs provided by ResultDB are the test name known by the test
# runner prefixed by one of the following:
# "ninja://:blink_web_tests/"
# "ninja://:webgpu_blink_web_tests/"
for prefix in KNOWN_TEST_ID_PREFIXES:
if test_id.startswith(prefix):
return test_id.replace(prefix, '')
raise RuntimeError('Unable to strip prefix from test ID %s' % test_id)
def _GetActiveBuilderQuery(self, builder_type):
return ACTIVE_BUILDER_QUERY_TEMPLATE.format(builder_type=builder_type)
class WebTestFixedQueryGenerator(queries_module.FixedQueryGenerator):
def GetQueries(self):
return QueryGeneratorImpl(self.GetClauses(), self._builder_type)
class WebTestSplitQueryGenerator(queries_module.SplitQueryGenerator):
def GetQueries(self):
return QueryGeneratorImpl(self.GetClauses(), self._builder_type)
def QueryGeneratorImpl(test_filter_clauses, builder_type):
queries = []
for tfc in test_filter_clauses:
queries.append(
BQ_QUERY_TEMPLATE.format(builder_type=builder_type,
test_filter_clause=tfc))
return queries
|
"""
Copyright 2017-present, Airbnb Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, | software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This file is the entry point for AWS Lambda.
"""
from streamalert.scheduled_queries.command.application import ScheduledQueries
def handler(event, _):
return ScheduledQueries().run(ev | ent)
|
""" Query modules mapping functions to their query strings
structured:
module_name { query_string: function_for_query }
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import *
import sys
import os
import math
import datetime
import logging
# logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', level=logging.DEBUG)
import random
from uuid import UUID
# Our imports
from emission.core.get_database import get_section_db, get_trip_db, get_routeCluster_db, get_alternatives_db
from . import trip_old as trip
# 0763de67-f61e-3f5d-90e7-518e69793954
# 0763de67-f61e-3f5d-90e7-518e69793954_20150421T230304-0700_0
# helper for getCanonicalTrips
def get_clusters_info(uid):
c_db = get_routeCluster_db()
s_db = get_section_db()
clusterJson = c_db.find_one({"clusters":{"$exists":True}, "user": uid})
if clusterJson is None:
return []
c_info = []
clusterSectionLists= list(clusterJson["clusters"].values())
logging.debug( "Number of section lists for user %s is %s" % (uid, len(clusterSectionLists)))
for sectionList in clusterSectionLists:
first = True
logging.debug( "Number of sections in sectionList for user %s is %s" % (uid, len(sectionList)))
if (len(sectionList) == 0):
# There's no point in returning this cluster, let's move on
continue
distributionArrays = [[] for _ in range(5)]
for section in sectionList:
section_json = s_db.find_one({"_id":section})
if first:
representative_trip = section_json
first = False
appendIfPresent(distributionArrays[0], section_json, "section_start_datetime")
appendIfPresent(distributionArrays[1], section_json, "section_end_datetime")
appendIfPresent(distributionArrays[2], section_json, "section_start_point")
appendIfPresent(distributionArrays[3], section_json, "section_end_point")
appendIfPresent(distributionArrays[4], section_json, "confirmed_mode")
c_info.append((distributionArrays, representative_trip))
return c_info
def appendIfPresent(list,element,key):
if element is not None and key in element:
list.append(element[key])
else:
logging.debug("not appending element %s with key %s" % (element, key))
class AlternativesNotFound(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
#returns the top trips for the user, defaulting to the top 10 trips
def getCanonicalTrips(uid, get_representative=False): # number returned isnt used
"""
uid is a UUID object, not a string
"""
# canonical_trip_list = []
# x = 0
# if route clusters return nothing, then get common routes for user
#clusters = get_routeCluster_db().find_one({'$and':[{'user':uid},{'method':'lcs'}]})
# c = get_routeCluster_db().find_one({'$and':[{'user':uid},{'method':'lcs'}]})
logging.debug('UUID for canonical %s' % uid)
info = get_clusters_info(uid)
cluster_json_list = []
for (cluster, rt) in info:
json_dict = dict()
json_dict["representative_trip"] = rt
json_dict["start_point_distr"] = cluster[2]
json_dict["end_point_distr"] = cluster[3]
json_dict["start_time_distr"] = cluster[0]
json_dict["end_time_distr"] = cluster[1]
json_dict["confirmed_mode_list"] = cluster[4]
cluster_json_list.append(json_dict)
toRet = cluster_json_list
return toRet.__iter__()
#returns all trips to the user
def getAllTrips(uid):
#trips = list(get_trip_db().find({"user_id":uid, "type":"move"}))
query = {'user_id':uid, 'type':'move'}
return get_trip_db().find(query)
def getAllTrips_Date(uid, dys):
#trips = list(get_trip_db().find({"user_id":uid, "type":"move"}))
d = datetime.datetime.now() - datetime.timedelta(days=dys)
query = {'user_id':uid, 'type':'move','trip_start_datetime':{"$gt":d}}
return get_trip_db().find(query)
#returns all trips with no alternatives to the user
def getNoAlternatives(uid):
# If pipelineFlags exists then we have started alternatives, and so have
# already scheduled the query. No need to reschedule unless the query fails.
# TODO: If the query fails, then remove the pipelineFlags so that we will
# reschedule.
query = {'user_id':uid, 'type':'move', 'pipelineFlags': {'$exists': False}}
return get_trip_db().find(query)
def getNoAlternativesPastMonth(uid):
d = datetime.datetime.now() - datetime.timedelta(days=30)
query = {'user_id':uid, 'type':'move',
'trip_start_datetime':{"$gt":d},
'pipelineFlags': {'$exists': False}}
return get_trip_db().find(query)
# Returns the trips that are suitable for training
# Currently this is:
# - trips that have alternatives, and
# - have not yet been included in a training set
def getTrainingTrips(uid):
return getTrainingTrips_Date(uid, 30)
query = {'user_id':uid, 'type':'move'}
return get_trip_db().find(query)
def getTrainingTrips_Date(uid, dys):
d = datetime.datetime.now() - datetime.timedelta(days=dys)
query = {'user_id':uid, 'type':'move','trip_start_datetime':{"$gt":d}, "pipelineFlags":{"$exists":True}}
#query = {'user_id':uid, 'type':'move','trip_start_datetime':{"$gt":d}}
#print get_trip_db().count_documents(query)
return get_trip_db().find(query)
def getAlternativeTrips(trip_id):
#TODO: clean up datetime, and queries here
#d = datetime.datetime.now() - datetime.timedelta(days=6)
#query = {'trip_id':trip_id, 'trip_start_datetime':{"$gt":d}}
query = {'trip_id':trip_id}
alternatives = get_alternatives_db().find(query)
if alternatives.estimated_document_count() > 0:
logging.debug("Number of alternatives for trip %s is %d" % (trip_id, alternatives.estimated_document_count()))
return alternatives
raise AlternativesNotFound("No Alternatives Found")
def getRecentTrips(uid):
raise NotImplementedError()
def getTripsThroughMode(uid):
raise NotImplementedError()
modules = {
# Trip Module
'trips': {
'get_canonical': getCanonicalTrips,
'get_all': getAllTrips,
'get_no_alternatives': getNoAlternatives,
'get_no_alternatives_past_month': getNoAlternativesPast | Month,
'get_most_recent': getRecentTrips,
'get_trips_by_mode': getTripsThroughMode},
# Utility Module
'utility': {
'get_training': getTrainingTrips
},
# Recommender Module
'recommender': {
'get_improve': getCanonicalTri | ps
},
#Perturbation Module
'perturbation': {},
#Alternatives Module
# note: uses a different collection than section_db
'alternatives': {
'get_alternatives': getAlternativeTrips
}
}
|
import argparse
import datetime
import imutils
import numpy as np
import time
import csv
import cv2
import os.path
#define variable
click_frame = False
divide_x = 0
divide_y = 0
channel_A = 0
channel_B = 0
area_A = 0
area_B = 0
#division fuction (divide_frame)
def divide_frame(event,x,y,flags,param):
global click_frame
global divide_x,divide_y
global shape
if click_frame == False and event == cv2.EVENT_LBUTTONDOWN:
click_frame = True
divide_x = x
divide_y = y
print("First frame selected")
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-a", "--min-area", type=int, default=100, help="minimum area size")
#ap.add_argument("-s","--shape",type=str,default="rectangle",help="shape of test arena")
args = vars(ap.parse_args())
# if the video argument is None, then we are reading from webcam
if args.get("video", None) is None:
camera = cv2.VideoCapture(0)
time.sleep(0.25)
else:
camera = cv2.VideoCapture(args.get("video", None))
fps = camera.get(cv2.cv.CV_CAP_PROP_FPS)
frame_count = 0
firstFrame = None
#Creating window and initializing mouse callback for division
cv2.namedWindow("Security Feed")
cv2.setMouseCallback("Security Feed",divide_frame)
# After selecting firstFrame no tracking should occur for 5s
#def relay(event,flags,param)
# while (frame_count/fps) < 5:
# break
while True:
# grab the current frame and initialize the occupied/unoccupied"rectangle"
# text
(grabbed, frame) = camera.read()
text = "Unoccupied"
# if the frame could not be grabbed, then we have reached the end
# of the video
if not grabbed:
break
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if firstFrame is None:
firstFrame = gray
cv2.imshow("Security Feed", frame)
while click_frame == False:
print("Selected Image")
cv2.waitKey(25)
continue
frame_count += 1
# compute the absolute difference between the current frame and
# first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
(cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < args["min_area"]:
conti | nue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Occupied"
fish_x = x+w/2
fish_y = y+h/2
if fish_x < divide_x and fish_y < divide_y:
channel_A += 1
| if fish_x > divide_x and fish_y < divide_y:
area_A += 1
if fish_x < divide_x and fish_y > divide_y:
channel_B += 1
if fish_x > divide_x and fish_y > divide_y:
area_B += 1
#division lines
#tags
fontsize = 1
thickness = 1
cv2.putText(frame,"{0:.2f}".format(fps)+" fps",(25,25),cv2.FONT_HERSHEY_SIMPLEX,0.5,255)
cv2.putText(frame,"{0:.2f}".format(channel_A/fps),(divide_x-width/4,divide_y-height/4),cv2.FONT_HERSHEY_SIMPLEX,fontsize,(255,255,255),thickness)
cv2.putText(frame,"{0:.2f}".format(channel_B/fps),(divide_x-width/4,divide_y+height/4),cv2.FONT_HERSHEY_SIMPLEX,fontsize,(255,255,255),thickness)
cv2.putText(frame,"{0:.2f}".format(area_A/fps),(divide_x+width/4,divide_y-height/4),cv2.FONT_HERSHEY_SIMPLEX,fontsize,(255,255,255),thickness)
cv2.putText(frame,"{0:.2f}".format(area_B/fps),(divide_x+width/4,divide_y+height/4),cv2.FONT_HERSHEY_SIMPLEX,fontsize,(255,255,255),thickness)
cv2.putText(frame,"{0:.2f}".format(frame_count/fps)+" time (s)",(divide_x+width/4,25),cv2.FONT_HERSHEY_SIMPLEX,0.5,(0,255,0))
# show the frame and record if the user presses a key
cv2.imshow("Security Feed", frame)
# cv2.imshow("Thresh", thresh)
# cv2.imshow("Frame Delta", frameDelta)
key = cv2.waitKey(1) & 0xFF
# if the `q` key is pressed, break from the loop
if key == ord("q"):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
#print data
print("Total Time [s]: "+"{0:.2f}".format(frame_count/fps))
print("Channel_A [s]: "+"{0:.2f}".format(channel_A/fps))
print("Channel_B [s]: "+"{0:.2f}".format(channel_B/fps))
print("Area_A [s]: "+"{0:.2f}".format(area_A/fps))
print("Area_B [s]: "+"{0:.2f}".format(area_B/fps))
# Print data to file (data.csv)
# Write file and header if file does not already exist
# If file exists data is inserted in a new row and no header is added
# lineterminator = '\n' to remove blank line between rows when program is restarted
file_exists=os.path.isfile("data.csv")
with open('data.csv','a') as csvfile:
dw=csv.DictWriter(csvfile,delimiter=',',fieldnames=["File","Total Time","Channel_A","Channel_B","Area_A","Area_B"],lineterminator='\n')
writer=csv.writer(csvfile)
if file_exists == True:
writer.writerow([args.get("video"),frame_count/fps,channel_A/fps,channel_B/fps,area_A/fps,area_B/fps])
else:
dw.writeheader()
writer.writerow([args.get("video"),frame_count/fps,channel_A/fps,channel_B/fps,area_A/fps,area_B/fps])
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Luis Rodriguez <luis.rodriguez@opendeusto.es>
#
class ConfigureError(Exception):
""" Configure error of any kind. """
pass
class PermanentConfigureError(ConfigureError):
""" Configure error that would most likely occur again should we retry """
def __str__(self):
return "PermanentConfigureError()"
class TemporaryConfigureError(ConfigureError):
""" Configure error that is likely to not be permanent. Server will retry whenever this is received. """
def __str__(self):
return "TemporaryConfigureError()"
class UserManager(object):
def __init__(self, cfg_manager):
"""
Creates the UserManager.
@param cfg_manager Config Manager which will be used to read configuration parameters
"""
self.cfg = cfg_manager
self.cancelled = False
def configure(self, sid):
"""
Configures the Virtual Machine for use.
| @note This method may block for a long time. It might hence be advisable to account for this delay
and to call it from a worker thread.
@note Implementations might require additional information, which should generally be provided
through the configuration script and accessed through the UserManager's config reader.
@param sid Unique session id of the user.
@return None
@raise ConfigureError If the configure attempt failed | . Failure and the ConfigureError should be either
a PermanentConfigureError or a TemporaryConfigureError. Should a different kind of exception be
raised however, it would be considered permanent.
"""
pass
def cancel(self):
self.cancelled = True
|
Y or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the
# OpenSSL library under certain conditions as described in each
# individual source file, and distribute linked combinations
# including the two.
# You must obey the GNU General Public License in all respects
# for all of the code used other than OpenSSL. If you modify
# file(s) with this exception, you may extend this exception to your
# version of the file(s), but you are not obligated to do so. If you
# do not wish to do so, delete this exception statement from your
# version. If you delete this exception statement from all source
# files in the program, then also delete it here.
import rpcServer
import up2dateErrors
import capabilities
import sys
import xmlrpclib
import OpenSSL
class _DoCallWrapper(object):
"""
A callable object that will handle multiple levels of attributes,
and catch exceptions.
"""
def __init__(self, server, method_name):
self._server = server
self._method_name = method_name
def __getattr__(self, method_name):
""" Recursively build up the method name to pass to the server. """
return _DoCallWrapper(self._server,
"%s.%s" % (self._method_name, method_name))
def __call__(self, *args, **kwargs):
""" Call the method. Catch faults and translate them. """
method = getattr(self._server, self._method_name)
try:
return rpcServer.doCall(method, *args, **kwargs)
except xmlrpclib.Fault:
raise (self.__exception_from_fault(sys.exc_info()[1]), None, sys.exc_info()[2])
except OpenSSL.SSL.Error:
# TODO This should probably be moved to rhnlib and raise an
# exception that subclasses OpenSSL.SSL.Error
# TODO Is there a better way to detect cert failures?
error = str(sys.exc_info()[1])
error = error.strip("[()]")
pieces = error.split(',')
message = ""
if len(pieces) > 2:
message = pieces[2]
elif len(pieces) == 2:
message = pieces[1]
message = message.strip(" '")
if messa | ge == 'certificate verify failed':
raise (up2dateErrors.SSLCertificateVerifyFailedError(), None, sys.exc_info()[2])
else:
raise (up2dateErrors.NetworkError(message), None, sys.exc_info()[2])
def __exception_from_fault(self, fault):
if fault.faultCode == -3:
# This username is already taken, or the password is incorrect.
exception = up2dateErrors.Authentic | ationOrAccountCreationError(fault.faultString)
elif fault.faultCode == -2:
# Invalid username and password combination.
exception = up2dateErrors.AuthenticationOrAccountCreationError(fault.faultString)
elif fault.faultCode == -110:
# Account is disabled
exception = up2dateErrors.AuthenticationOrAccountCreationError(fault.faultString)
elif fault.faultCode == -1:
exception = up2dateErrors.UnknownMethodException(fault.faultString)
elif fault.faultCode == -13:
# Username is too short.
exception = up2dateErrors.LoginMinLengthError(fault.faultString)
elif fault.faultCode == -14:
# too short password
exception = up2dateErrors.PasswordMinLengthError(
fault.faultString)
elif fault.faultCode == -15:
# bad chars in username
exception = up2dateErrors.ValidationError(fault.faultString)
elif fault.faultCode == -16:
# Invalid product registration code.
# TODO Should this really be a validation error?
exception = up2dateErrors.ValidationError(fault.faultString)
elif fault.faultCode == -19:
# invalid
exception = up2dateErrors.NoBaseChannelError(fault.faultString)
elif fault.faultCode == -31:
# No entitlement
exception = up2dateErrors.InsuffMgmntEntsError(fault.faultString)
elif fault.faultCode == -36:
# rhnException.py says this means "Invalid action."
# TODO find out which is right
exception = up2dateErrors.PasswordError(fault.faultString)
elif abs(fault.faultCode) == 49:
exception = up2dateErrors.AbuseError(fault.faultString)
elif abs(fault.faultCode) == 60:
exception = up2dateErrors.AuthenticationTicketError(fault.faultString)
elif abs(fault.faultCode) == 74:
exception = up2dateErrors.RegistrationDeniedError()
elif abs(fault.faultCode) == 105:
exception = up2dateErrors.RhnUuidUniquenessError(fault.faultString)
elif fault.faultCode == 99:
exception = up2dateErrors.DelayError(fault.faultString)
elif abs(fault.faultCode) == 91:
exception = up2dateErrors.InsuffMgmntEntsError(fault.faultString)
elif fault.faultCode == -106:
# Invalid username.
exception = up2dateErrors.ValidationError(fault.faultString)
elif fault.faultCode == -600:
# Invalid username.
exception = up2dateErrors.InvalidRegistrationNumberError(fault.faultString)
elif fault.faultCode == -601:
# No entitlements associated with given hardware info
exception = up2dateErrors.NotEntitlingError(fault.faultString)
elif fault.faultCode == -602:
# No entitlements associated with reg num
exception = up2dateErrors.NotEntitlingError(fault.faultString)
elif fault.faultCode == -2001 or fault.faultCode == -700:
exception = up2dateErrors.AuthenticationOrAccountCreationError(
fault.faultString)
elif fault.faultCode == -701:
exception = up2dateErrors.PasswordMaxLengthError(
fault.faultString)
elif fault.faultCode == -61:
exception = up2dateErrors.ActivationKeyUsageLimitError(
fault.faultString)
elif fault.faultCode == -5:
exception = up2dateErrors.UnableToCreateUser(
fault.faultString)
else:
exception = up2dateErrors.CommunicationError(fault.faultString)
return exception
class RhnServer(object):
"""
An rpc server object that calls doCall for you, and catches lower
level exceptions
"""
def __init__(self, serverOverride=None, timeout=None):
self._server = rpcServer.getServer(serverOverride=serverOverride,
timeout=timeout)
self._capabilities = None
def __get_capabilities(self):
if self._capabilities is None:
headers = self._server.get_response_headers()
if headers is None:
self.registration.welcome_message()
headers = self._server.get_response_headers()
self._capabilities = capabilities.Capabilities()
self._capabilities.populate(headers)
return self._capabilities
capabilities = property(__get_capabilities)
def add_header(self, key, value):
self._server.add_header(key, value)
def __getattr__(self, method_name):
""" Return a callable object that will do the work for us. """
return _DoCallWrapper(self._server, method_name)
|
from setuptools import setup
setup(
name="sgf",
version="0.5",
description="Python library for reading and writing Smart Game Format",
license="MIT",
url="http://github.com/jtauber/sgf",
author="James Tauber",
author_email="jtauber@jtauber.com",
py_modules=["sgf"],
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License", |
"Programming Language :: Python :: 2.7",
"Top | ic :: Games/Entertainment :: Board Games",
"Topic :: Utilities",
],
)
|
hMedium(ctx, mach.id, dvd)
return 0
def mountIsoCmd(ctx, args):
if (len(args) < 3):
print "usage: mountIso vm iso controller port:slot"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
vbox = ctx['vb']
loc = args[2]
try:
dvd = vbox.openMedium(loc, ctx['global'].constants.DeviceType_DVD, ctx['global'].constants.AccessMode_ReadOnly, false)
except:
print "no DVD with path %s registered" % (loc)
return 0
if len(args) > 3:
ctr = args[3]
(port, slot) = args[4].split(":")
else:
# autodetect controller and location, just find first controller with media == DVD
[ctr, port, slot] = findDevOfType(ctx, mach, ctx['global'].constants.DeviceType_DVD)
cmdExistingVm(ctx, mach, 'mountiso', [ctr, port, slot, dvd, True])
return 0
def unmountIsoCmd(ctx, args):
if (len(args) < 2):
print "usage: unmountIso vm controller port:slot"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
vbox = ctx['vb']
if len(args) > 3:
ctr = args[2]
(port, slot) = args[3].split(":")
else:
# autodetect controller and location, just find first controller with media == DVD
[ctr, port, slot] = findDevOfType(ctx, mach, ctx['global'].constants.DeviceType_DVD)
cmdExistingVm(ctx, mach, 'mountiso', [ctr, port, slot, None, True])
return 0
def attachCtr(ctx, mach, args):
[name, bus, ctrltype] = args
ctr = mach.addStorageController(name, bus)
if ctrltype != None:
ctr.controllerType = ctrltype
def attachCtrCmd(ctx, args):
if (len(args) < 4):
print "usage: attachCtr vm cname bus <type>"
return 0
if len(args) > 4:
ctrltype = enumFromString(ctx, 'StorageControllerType', args[4])
if ctrltype == None:
print "Controller type %s unknown" % (args[4])
return 0
else:
ctrltype = None
mach = argsToMach(ctx, args)
if mach is None:
return 0
bus = enumFromString(ctx, 'StorageBus', args[3])
if bus is None:
print "Bus type %s unknown" % (args[3])
return 0
name = args[2]
cmdClosedVm(ctx, mach, attachCtr, [name, bus, ctrltype])
return 0
def detachCtrCmd(ctx, args):
if (len(args) < 3):
print "usage: detachCtr vm name"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
ctr = args[2]
cmdClosedVm(ctx, mach, lambda ctx, mach, args: mach.removeStorageController(ctr))
return 0
def usbctr(ctx, mach, console, args):
if (args[0]):
console.attachUSBDevice(args[1])
else:
console.detachUSBDevice(args[1])
def attachUsbCmd(ctx, args):
if (len(args) < 3):
print "usage: attachUsb vm deviceuid"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
dev = args[2]
cmdExistingVm(ctx, mach, 'guestlambda', [usbctr, True, dev])
return 0
def detachUsbCmd(ctx, args):
if (len(args) < 3):
print "usage: detachUsb vm deviceuid"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
dev = args[2]
cmdExistingVm(ctx, mach, 'guestlambda', [usbctr, False, dev])
return 0
def guiCmd(ctx, args):
if (len(args) > 1):
print "usage: gui"
return 0
binDir = ctx['global'].getBinDir()
vbox = os.path.join(binDir, 'VirtualBox')
try:
os.system(vbox)
except KeyboardInterrupt:
# to allow interruption
pass
return 0
def shareFolderCmd(ctx, args):
if (len(args) < 4):
print "usage: shareFolder vm path name <writable> <persistent>"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
path = args[2]
name = args[3]
writable = False
persistent = False
if len(args) > 4:
for a in args[4:]:
if a == 'writable':
writable = True
if a == 'persistent':
persistent = True
if persistent:
cmdClosedVm(ctx, mach, lambda ctx, mach, args: mach.createSharedFolder(name, path, writable), [])
else:
cmdExistingVm(ctx, mach, 'guestlambda', [lambda ctx, mach, console, args: console.createSharedFolder(name, path, writable)])
return 0
def unshareFolderCmd(ctx, args):
if (len(args) < 3):
print "usage: unshareFolder vm name"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
name = args[2]
found = False
for sf in ctx['global'].getArray(mach, 'sharedFolders'):
if sf.name == name:
cmdClosedVm(ctx, mach, lambda ctx, mach, args: mach.removeSharedFolder(name), [])
found = True
break
if not found:
cmdExistingVm(ctx, mach, 'guestlambda', [lambda ctx, mach, console, args: console.removeSharedFolder(name)])
return 0
def snapshotCmd(ctx, args):
if (len(args) < 2 or args[1] == 'help'):
print "Take snapshot: snapshot vm take name <description>"
print "Restore snapshot: snapshot vm restore name"
print "Merge snapshot: snapshot vm merge name"
return 0
mach = argsToMach(ctx, args)
if mach is None:
return 0
cmd = args[2]
if cmd == 'take':
if (len(args) < 4):
print "usage: snapshot vm take name <description>"
return 0
name = args[3]
if (len(args) > 4):
desc = args[4]
else:
desc = ""
cmdAnyVm(ctx, mach, lambda ctx, mach, console, args: progressBar(ctx, console.takeSnapshot(name, desc)))
return 0
if cmd == 'restore':
if (len(args) < 4):
print "usage: snapshot vm restore name"
return 0
name = args[3]
snap = mach.findSnapshot(name)
cmdAnyVm(ctx, mach, lambda ctx, mach, console, args: progressBar(ctx, console.restoreSnapshot(snap)))
return 0
if cmd == 'restorecurrent':
if (len(args) < 4):
print "usage: snapshot vm restorecurrent"
return 0
snap = mach.currentSnapshot()
cmdAnyVm(ctx, mach, lambda ctx, mach, console, args: progressBar(ctx, console.restoreSnapshot(snap)))
return 0
if cmd == 'delete':
if (len(args) < 4):
print "usage: snapshot vm delete name"
return 0
name = args[3]
snap = mach.findSnapshot(name)
cmdAnyVm(ctx, mach, lambda ctx, mach, console, args: progressBar(ctx, console.deleteSnapshot(snap.id)))
return 0
print "Command '%s' is unknown" % (cmd)
return 0
def natAlias(ctx, mach, nicnum, nat, args=[]):
"""This command shows/alters NAT's alias settings.
usage: nat <vm> <nicnum> alias [default|[log] [proxyonly] [sameports]]
default - set settings to default values
log - switch on alias logging
proxyonly - switch proxyonly mode on
sameports - enforces NAT using the same ports
"""
alias = {
'log': 0x1,
'proxyonly': 0x2,
'sameports': 0x4
}
if len(args) == 1:
first = 0
msg = ''
for aliasmode, aliaskey in alias.iteritems():
if first == 0:
first = 1
else:
msg += ', '
if int(nat.aliasMode) & aliaskey:
msg += '%d: %s' % (aliasmode, 'on')
else:
msg += '%d: %s' % (aliasmode, 'off')
msg += ')'
| return (0, [msg])
else:
nat.aliasMode = 0
if 'default' not in args:
for a in range(1, len(args)):
if not alias.has_key(args[a]):
print 'Invalid alias mode: ' + args[a]
print natAlias.__doc__
return (1, | None)
nat.aliasMode = int(nat.aliasMode) | alias[args[a]]
return (0, None)
def natSettings(ctx, mach, nicnum, nat, args):
"""This command shows/alters NAT settings.
usage: nat <vm> <nicnum> settings [<mtu> [[<socsndbuf> <sockrcvbuf> [<tcpsndwnd> <tcprcvwnd>]]]]
|
import glob
json_files = glob.glob("tests/**/output/**/*.json", recursive=True)
html_files = glob.glob("tests/**/out | put/**/*.html", recursive=True)
html_list = ""
for f_ in html_files:
html_list += '\t<li><a href="{}">{}</li>\n'.format(
f_[6:],
f_.split(".")[-2],
)
json_list = ""
for f_ in json_files:
json_list += '\t<li><a href="{}">{}</li>\n'.format(
f_[6:],
f_.split(".")[-2],
)
html_file = """
<html>
<body>
<h3>HTML</h3>
<ul>
{}
</ | ul>
<br/><br/>
<h3>JSON</h3>
<ul>
{}
</ul>
</body>
</html>
""".format(
html_list, json_list
)
print(html_file)
|
from models import Comment
from ..user.serializers import UserProfileSerializer
from rest_framework import serializers
class CommentSerializer(serializers.ModelSerializer):
username = serializers.SerializerMethodF | ield()
class Meta:
model = Comment
fields = ("id","user","username", "topic","comment","comment_html", "action", "date","is_removed","is_modified","ip_address",
"modified_count","likes_count")
read_only_fields = ("user", | "comment_html","action","date","is_removed","is_modified","modified_count","likes_count")
def get_username(self,obj):
return obj.user.username
def create(self,**kwargs):
comment = Comment.objects.create(user = kwargs['user'],**self.validated_data)
return comment
|
import os.path
import pkgutil
from unittest import TestCase, TestSuite
class TestImports(TestCase):
def __init__(self, mod_name, import_error):
name = f"test_{mod_name.replace('.', '_')}_import"
def run():
try:
__import__(mod_name)
except ImportError as e:
self.assertIsNotNone(import_error)
self.assertEqual(e.msg, import_error)
setattr(self, name, run)
super().__init | __(name)
def load_tests(*_args):
expected_exceptions = {
"blueman.main.NetworkManager": "NM python bindings not found.",
"blueman.main.PulseAudioUtils": "Could not load pulseaudio shared library",
}
test_cases = TestSuite()
home, subpath = os.path.dirname(__file__).rsplit("/test/", 1)
| for package in pkgutil.iter_modules([f"{home}/blueman/{subpath}"], f"blueman.{subpath.replace('/', '.')}."):
test_cases.addTest(TestImports(package.name, expected_exceptions.get(package.name)))
assert test_cases.countTestCases() > 0
return test_cases
|
# coding=utf-8
#
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIO | NS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
REST URI
``http://localhost/mgmt/cm/device/licensing/pool/regkey``
REST Kind
N/A -- HTTP GET returns an error
"""
from f5.bigiq.cm.device.licensing.pool import Pool
from f5.bigiq.resource import OrganizingCollection
class Licensing(OrganizingCollection):
def __init__ | (self, device):
super(Licensing, self).__init__(device)
self._meta_data['allowed_lazy_attributes'] = [
Pool
]
|
# -*- coding: utf-8 -*-
#
# Copyright 2018 BhaaL
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Convert flat XML files to Gettext PO localization files.
See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/flatxml2po.html
for examples and usage instructions.
"""
from translate.convert import convert
from translate.storage import flatxml, po
class flatxml2po:
"""Convert a single XML file to a single PO file."""
SourceStoreClass = flatxml.FlatXMLFile
TargetStoreClass = po.pofile
TargetUnitClass = po.pounit
def __init__(self, inputfile, outputfile, templatefile=None,
root="root", value="str", key="key", ns=None):
"""Initialize the converter."""
self.inputfile = inputfile
self.outputfile = outputfile
self.source_store = self.SourceStoreClass(inputfile,
root_name=root,
value_name=value,
key_name=key,
namespace=ns)
self.target_store = self.TargetStoreClass()
def convert_unit(self, unit):
"""Convert a source format unit to a target format unit."""
target_unit = self.TargetUnitClass.buildfromunit(unit)
return target_unit
def convert_store(self):
"""Convert a single source file to a target format file."""
for source_unit in self.source_store.units:
self.target_store.addunit(self.convert_unit(source_unit))
def run(self):
"""Run the converter."""
self.convert_store()
if self.target_store.isempty():
return 0
self.target_store.serialize(self.outputfile)
return 1
def run_converter(inputfile, outputfile, templatefile=None,
root="root", value="str", key="key", ns=None):
"""Wrapper around the converter."""
return flatxml2po(inputfile, outputfile, templatefile,
root, value, key, ns).run()
formats = {
"xml": ("po", run_converter),
}
def main(argv=None):
parser = convert.ConvertOptionParser(formats,
| description=__doc__)
| parser.add_option("-r", "--root", action="store", dest="root",
default="root",
help='name of the XML root element (default: "root")')
parser.add_option("-v", "--value", action="store", dest="value",
default="str",
help='name of the XML value element (default: "str")')
parser.add_option("-k", "--key", action="store", dest="key",
default="key",
help='name of the XML key attribute (default: "key")')
parser.add_option("-n", "--namespace", action="store", dest="ns",
default=None,
help="XML namespace uri (default: None)")
parser.passthrough.append("root")
parser.passthrough.append("value")
parser.passthrough.append("key")
parser.passthrough.append("ns")
parser.run(argv)
if __name__ == "__main__":
main()
|
# Made by mtrix
import sys
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
qn = "345_MethodToRaiseTheDead"
ADENA = 57
VICTIMS_ARM_BONE = 4274
VICTIMS_THIGH_BONE = 4275
VICTIMS_SKULL = 4276
VICTIMS_RIB_BONE = 4277
VICTIMS_SPINE = 4278
USELESS_BONE_PIECES = 4280
POWDER_TO_SUMMON_DEAD_SOULS = 4281
BILL_OF_IASON_HEINE = 4310
CHANCE = 15
CHANCE2 = 50
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onEvent (self,event,st) :
htmltext = event
if event == "1" :
st.set("cond","1")
st.setState(STARTED)
htmltext = "30970-02.htm"
st.playSound("ItemSound.quest_accept")
elif event == "2" :
| st.set("cond","2")
htmltext = "30970-06.htm"
elif event == "3" :
if st.getQuestItemsCount(ADENA)>=1000 :
st.takeItems(ADENA,1000)
st.giveItems(POWDER_TO_SUMMON_DEAD_SOULS,1)
st.set("cond","3")
htmltext = "30912-03.htm"
st.playSound("ItemSound.quest_itemget")
else :
htmltext = "<html><body>You dont have enough adena!</body></html>"
| elif event == "4" :
htmltext = "30973-02.htm"
st.takeItems(POWDER_TO_SUMMON_DEAD_SOULS,-1)
st.takeItems(VICTIMS_ARM_BONE,-1)
st.takeItems(VICTIMS_THIGH_BONE,-1)
st.takeItems(VICTIMS_SKULL,-1)
st.takeItems(VICTIMS_RIB_BONE,-1)
st.takeItems(VICTIMS_SPINE,-1)
st.set("cond","6")
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId = npc.getNpcId()
id = st.getState()
if npcId != 30970 and id != STARTED : return htmltext
level = player.getLevel()
cond = st.getInt("cond")
amount = st.getQuestItemsCount(USELESS_BONE_PIECES)
if npcId==30970 :
if id == CREATED :
if level>=35 :
htmltext = "30970-01.htm"
else :
htmltext = "<html><body>(This is a quest that can only be performed by players of level 35 and above.)</body></html>"
st.exitQuest(1)
elif cond==1 and st.getQuestItemsCount(VICTIMS_ARM_BONE) and st.getQuestItemsCount(VICTIMS_THIGH_BONE) and st.getQuestItemsCount(VICTIMS_SKULL) and st.getQuestItemsCount(VICTIMS_RIB_BONE) and st.getQuestItemsCount(VICTIMS_SPINE) :
htmltext = "30970-05.htm"
elif cond==1 and (st.getQuestItemsCount(VICTIMS_ARM_BONE)+st.getQuestItemsCount(VICTIMS_THIGH_BONE)+st.getQuestItemsCount(VICTIMS_SKULL)+st.getQuestItemsCount(VICTIMS_RIB_BONE)+st.getQuestItemsCount(VICTIMS_SPINE)<5) :
htmltext = "30970-04.htm"
elif cond==7 :
htmltext = "30970-07.htm"
st.set("cond","1")
st.giveItems(ADENA,amount*238)
st.giveItems(BILL_OF_IASON_HEINE,st.getRandom(7)+1)
st.takeItems(USELESS_BONE_PIECES,-1)
if npcId==30912 :
if cond == 2 :
htmltext = "30912-01.htm"
st.playSound("ItemSound.quest_middle")
elif cond == 3 :
htmltext = "<html><body>What did the urn say?</body></html>"
elif cond == 6 :
htmltext = "30912-04.htm"
st.set("cond","7")
if npcId==30973 :
if cond==3 :
htmltext = "30973-01.htm"
return htmltext
def onKill(self,npc,player,isPet):
st = player.getQuestState(qn)
if not st : return
if st.getState() != STARTED : return
npcId = npc.getNpcId()
random = st.getRandom(100)
if random<=CHANCE :
if not st.getQuestItemsCount(VICTIMS_ARM_BONE) :
st.giveItems(VICTIMS_ARM_BONE,1)
elif not st.getQuestItemsCount(VICTIMS_THIGH_BONE) :
st.giveItems(VICTIMS_THIGH_BONE,1)
elif not st.getQuestItemsCount(VICTIMS_SKULL) :
st.giveItems(VICTIMS_SKULL,1)
elif not st.getQuestItemsCount(VICTIMS_RIB_BONE) :
st.giveItems(VICTIMS_RIB_BONE,1)
elif not st.getQuestItemsCount(VICTIMS_SPINE) :
st.giveItems(VICTIMS_SPINE,1)
if random<=CHANCE2 :
st.giveItems(USELESS_BONE_PIECES,st.getRandom(8)+1)
return
QUEST = Quest(345,qn,"Method To Raise The Dead")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(30970)
QUEST.addTalkId(30970)
QUEST.addTalkId(30912)
QUEST.addTalkId(30973)
STARTED.addQuestDrop(30970,VICTIMS_ARM_BONE,1)
STARTED.addQuestDrop(30970,VICTIMS_THIGH_BONE,1)
STARTED.addQuestDrop(30970,VICTIMS_SKULL,1)
STARTED.addQuestDrop(30970,VICTIMS_RIB_BONE,1)
STARTED.addQuestDrop(30970,VICTIMS_SPINE,1)
STARTED.addQuestDrop(30912,POWDER_TO_SUMMON_DEAD_SOULS,1)
QUEST.addKillId(20789)
QUEST.addKillId(20791) |
from django.db import models
from django.contrib.auth.models import User
from building.models import Building, Unit
# Create your models here.
class Listing(models.Model):
"""
An option to lease, rent, or sublease a specific Unit
"""
CYCLE_CHOICES = (
('year', 'Year'),
('month', 'Month'),
('week', 'Week'),
('day', 'Day'),
)
#who is listing the unit:
#person = models.ForeignKey(Person)
#might be better to just use a User account
#this should be required (setting blank and null to assist with migrations)
user = models.ForeignKey(User, blank=True, null=True)
#even though the building is available by way of the Unit
#it may be easier to look at building
#especially when limiting search results on a map
#
#also, it may be better to schedule a nightly task to update/cache
#the number of listings that are available in a building
#otherwise that could be an expensive search
#
#this should be required (setting blank and null to assist with migrations)
building = models.ForeignKey(Building, related_name="listings", blank=True, null=True)
#the unit available
#unit = models.ForeignKey(Unit, related_name="listings", blank=True, null=True)
unit = models.ForeignKey(Unit, related_name="listings")
#sublease, standard?
lease_type = models.CharField(max_length=200, default="Standard")
lease_term = models.CharField(max_length=200, default="12 Months")
active = models.BooleanField(default=True)
|
#duplicating available_start and rent on unit with current listing
#that will make database lookups simpler
#but it will require coordination when adding a new listing.
#optional
available_start = models.DateTimeField()
#might be useful for subleases:
available_end = models.DateTimeField()
#these may be duplicated at the unit level:
#aka rent? (previously cost)
rent = models.FloatField()
rent_cycle = models.CharF | ield(max_length=10, choices=CYCLE_CHOICES, default="month")
deposit = models.FloatField()
description = models.TextField()
#are pets allowed? if so what kind?
#pets = models.CharField(max_length=200)
#what utilities are included: (to help estimate total cost)
#
#this is set at the building level
#should be consistent within a building,
#and that makes things easier to read if it's not duplicated here:
#TODO:
#application (to apply for lease)
#link to a default one for manager if available
#otherwise allow one to be attached?
#application = models.ForeignKey(BuildingDocument)
#TODO:
#allow photos *(more than 1)* to be submitted for the listing
#but associate them with the unit
added = models.DateTimeField('date published', auto_now_add=True)
updated = models.DateTimeField('date updated', auto_now=True)
|
import numpy as np
from scipy.io import netcdf_file
import bz2
import os
from fnmatch import fnmatch
from numba import jit
@jit
def binsum2D(data, i, j, Nx, Ny):
data_binned = np.zeros((Ny,Nx), dtype=data.dtype)
N = len(data)
for n in range(N):
data_binned[j[n],i[n]] += data[n]
return data_binned
class LatLonAggregator(object):
"""A class for aggregating L2 data into a gridded dataset."""
def __init__(self, dlon=1., dlat=1., lonlim=(-180,180), latlim=(-90,90)):
self.dlon = dlon
self.dlat = dlat
self.lonmin = lonlim[0]
self.lonmax = lonlim[1]
self.latmin = latlim[0]
self.latmax = latlim[1]
# define grids
self.lon = np.arange(self.lonmin, self.lonmax, dlon)
self.lat = np.arange(self.latmin, self.latmax, | dlat)
self.Nx, self.Ny = len(self.lon), len(self.lat)
self.lonc = self.lon | + self.dlon/2
self.latc = self.lat + self.dlat/2
def binsum(self, data, lon, lat):
"""Bin the data into the lat-lon grid.
Returns gridded dataset."""
i = np.digitize(lon.ravel(), self.lon)
j = np.digitize(lat.ravel(), self.lat)
return binsum2D(data.ravel(), i, j, self.Nx, self.Ny)
def zeros(self, dtype=np.dtype('f4')):
return np.zeros((self.Ny, self.Nx), dtype=dtype)
|
# -*- python -*-
# Copyright (C) 2009-2013 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# | (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the im | plied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/usr/mips64-elf/share/gcc-4.8.4/python'
libdir = '/usr/mips64-elf/mips64-elf/lib/el'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Load the pretty-printers.
from libstdcxx.v6.printers import register_libstdcxx_printers
register_libstdcxx_printers (gdb.current_objfile ())
|
def test_upgrade_atac_alignment_enrichment_quality_metric_1_2(
upgrader, atac_alignment_enrichm | ent_quality_metric_1
):
value = upgrader.upgrade(
'atac_alignment_enrichment_quality_metric',
atac_alignment_enrichment_quality_metric_1,
current_version='1',
target_version='2',
)
assert value['schema_version'] == '2'
asse | rt 'fri_blacklist' not in value
assert value['fri_exclusion_list'] == 0.0013046877081284722
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
float_or_none,
unified_strdate,
)
class PornoVoisinesIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?pornovoisines\.com/videos/show/(?P<id>\d+)/(?P<display_id>[^/.]+)'
_TEST = {
'url': 'http://www.pornovoisines.com/videos/show/919/recherche-appartement.html',
'md5': '6f8aca6a058592ab49fe701c8ba8317b',
'info_dict': {
'id': '919',
'display_id': 'recherche-appartement',
'ext': 'mp4',
'title': 'Recherche appartement',
'description': 'md5:fe10cb92ae2dd3ed94bb4080d11ff493',
'thumbnail': r're:^https?://.*\.jpg$',
'upload_date': '20140925',
'duration': 120,
'view_count': int,
'average_rating': float,
'categories': ['Débutante', 'Débutantes', 'Scénario', 'Sodomie'],
'age_limit': 18,
'subtitles': {
'fr': [{
'ext': 'vtt',
}]
},
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
settings_url = self._download_json(
'http://www.pornovoisines.com/api/video/%s/getsettingsurl/' % | video_id,
video_id, note='Getting settings URL')['video_settings_url']
settings = self._download_json(settings_url, video_id)['data']
formats = []
for kind, data in settings['variants'].items():
if kind == 'HLS':
f | ormats.extend(self._extract_m3u8_formats(
data, video_id, ext='mp4', entry_protocol='m3u8_native', m3u8_id='hls'))
elif kind == 'MP4':
for item in data:
formats.append({
'url': item['url'],
'height': item.get('height'),
'bitrate': item.get('bitrate'),
})
self._sort_formats(formats)
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
# The webpage has a bug - there's no space between "thumb" and src=
thumbnail = self._html_search_regex(
r'<img[^>]+class=([\'"])thumb\1[^>]*src=([\'"])(?P<url>[^"]+)\2',
webpage, 'thumbnail', fatal=False, group='url')
upload_date = unified_strdate(self._search_regex(
r'Le\s*<b>([\d/]+)', webpage, 'upload date', fatal=False))
duration = settings.get('main', {}).get('duration')
view_count = int_or_none(self._search_regex(
r'(\d+) vues', webpage, 'view count', fatal=False))
average_rating = self._search_regex(
r'Note\s*:\s*(\d+(?:,\d+)?)', webpage, 'average rating', fatal=False)
if average_rating:
average_rating = float_or_none(average_rating.replace(',', '.'))
categories = self._html_search_regex(
r'(?s)Catégories\s*:\s*<b>(.+?)</b>', webpage, 'categories', fatal=False)
if categories:
categories = [category.strip() for category in categories.split(',')]
subtitles = {'fr': [{
'url': subtitle,
} for subtitle in settings.get('main', {}).get('vtt_tracks', {}).values()]}
return {
'id': video_id,
'display_id': display_id,
'formats': formats,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
'view_count': view_count,
'average_rating': average_rating,
'categories': categories,
'age_limit': 18,
'subtitles': subtitles,
}
|
# -*- coding: utf-8 -*-
# Copyright 2015 AvanzOsc (http://www.avanzosc.es)
# Copyright 2015-2016 - Pedro M. Baeza <pedro.baeza@tecnativa.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
from odoo import api, models
class PurchaseOrder(models.Model):
_inherit = 'purchase.order'
@api.model
def search(self, args, offset=0, limit=None, order=None, count=False):
make_po_conditions = {
'partner_id', 'state', 'picking_type_id', 'company_id',
'dest_address_id',
}
# Restrict the empty return for these conditions
if (self.env.context and
self.env.context.get('grouping', 'standard') == 'order' and
make_po_conditions.issubset(set(x[0] for x in args))):
return self.browse( | )
retur | n super(PurchaseOrder, self).search(
args, offset=offset, limit=limit, order=order, count=count)
class PurchaseOrderLine(models.Model):
_inherit = 'purchase.order.line'
@api.model
def search(self, args, offset=0, limit=None, order=None, count=False):
# Restrict the empty return for these conditions
if (self.env.context and
self.env.context.get('grouping', 'standard') == 'line' and
len(args) == 1 and args[0][0] == 'order_id' and
args[0][1] == 'in'):
return self.browse()
return super(PurchaseOrderLine, self).search(
args, offset=offset, limit=limit, order=order, count=count)
|
from abc import abstractmethod
from typing import Callable, TypeVar, Protocol
from typing_extensions import runtime_checkable
TSource = TypeVar('TSource')
TResult = TypeVar('TResult')
@runtime_checkable
class Applicative(Protocol[TSource, TResult]):
"""Applicative.
Applicative functors are functors with some extra properties.
Most importantly, they allow you to apply functions inside the
functor (hence the name).
To learn more about Applicative functors:
* http://www.davesquared.net/2012/05/fp-newbie-learns-applicatives.html
"""
@abstractmethod
def apply(self, something):
"""Apply wrapped callable.
Python: apply(self: Applicative, something: Applicative[Callable[[A], B]]) -> Applicative
Haskell: (<*>) :: f (a -> b) -> f a -> f b.
Apply (<*>) is a beefed up fmap. It takes a functor value that
has a function in it and another functor, and extracts that
function from the first functor and then maps it over the | second
one.
"""
raise NotImplementedError
#def __mul__(self, something):
# """(<*>) :: f (a -> b) -> f a -> f b.
# Provide the * as an infix version of apply() since we cannot
# represent the Haskell's <*> operator in Python.
# """
# return self.apply(something)
#def lift_a2(self, func, b):
# """liftA2 :: (Applicative f) => (a -> b -> c) -> f a -> f b -> f c."""
| # return func % self * b
@classmethod
@abstractmethod
def pure(cls, fn: Callable[[TSource], TResult]) -> 'Applicative[TSource, TResult]':
"""Applicative functor constructor.
Use pure if you're dealing with values in an applicative context
(using them with <*>); otherwise, stick to the default class
constructor.
"""
raise NotImplementedError
|
levels = self.spectrum
# First we need to check for the tones.
for noise_pause in self.noise_pauses:
# Determine the indices of the tones in a noise pause
tone_indices, bandwidth_for_tone_criterion = determine_tone_lines(
levels,
self.frequency_resolution,
noise_pause.start,
noise_pause.end,
self.force_tone_without_pause,
self.force_bandwidth_criterion,
)
# If we have indices, ...
if np.any(tone_indices):
# ...then we create a tone object.
noise_pause.tone = create_tone(levels, tone_indices, bandwidth_for_tone_criterion,
weakref.proxy(noise_pause))
return self
def _determine_critical_bands(self):
"""Put a critical band around each of the determined tones."""
for tone in self.tones:
critical_band = self.critical_band_at(tone.center)
tone.critical_band = critical_band
critical_band.tone = weakref.proxy(tone)
return self
def analyse(self):
"""Analyse the noise pauses for tones and put critical bands around each of these tones.
The tones are available via :attr:`tones` and the critical bands via :attr:`critical_bands`.
Per frequency line results are available via :attr:`line_classifier`.
"""
# Determine tones. Puts noise pause starts/ends in classier as well as tone lines
# and lines that are neither tone nor noise.
self._determine_tones()
# Construct line classifier
self._construct_line_classifier()
# Determine critical bands.
self._determine_critical_bands()
return self
def critical_band_at(self, frequency):
"""Put at a critical band at `frequency`.
In order to use this function :attr:`line_classifier` needs to be available,
which means :meth:`analyse` needs to be used first.
"""
return create_critical_band(self.spectrum, self.line_classifier, frequency, self.frequency_resolution,
self.effective_analysis_bandwidth, self.regression_range_factor, self.window)
def plot_spectrum(self):
"""Plot power spectrum."""
spectrum = self.spectrum
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(spectrum.index, spectrum)
ax.set_xlabel('f in Hz')
ax.set_ylabel('L in dB')
return fig
@property
def dominant_tone(self):
"""Most dominant_tone tone.
The most dominant_tone tone is the tone with the highest tonal audibility :math:`L_{ta}`.
"""
try:
return sorted(self.tones, key=lambda x: x.critical_band.tonal_audibility, reverse=True)[0]
except IndexError:
return None
def plot_results(self, noise_pauses=False, tones=True, critical_bands=True):
"""Plot overview of results."""
df = self.frequency_resolution
levels = self.spectrum
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(levels.index, levels)
ax.set_xlabel("$f$ in Hz")
ax.set_ylabel("$L$ in dB")
if noise_pauses:
for pause in self.noise_pauses:
ax.axvspan(pause.start * df, pause.end * df, color='green', alpha=0.05)
if tones:
for tone in self.tones:
ax.axvline(tone.center, color='black', alpha=0.05)
if critical_bands:
for band in self.critical_bands:
ax.axvspan(band.start, band.end, color='yellow', alpha=0.05)
band = self.dominant_tone.critical_band
ax.axvline(band.start, color='red', linewidth=0.1)
ax.axvline(band.end, color='red', linewidth=0.1)
# Limit xrange
if noise_pauses:
_items = list(self.noise_pauses)
elif critical_bands:
_items = list(self.critical_bands)
ax.set_xlim(min(item.start for item in _items), max(item.end for item in _items))
return fig
def overview(self):
"""Print overview of results."""
try:
cb = self.dominant_tone.critical_ban | d
except AttributeError:
raise ValueError("Cannot show overview (yet). No tones have been determined.")
tones = [("Tone", "{:4.1f} Hz: {:4.1f} dB".format(tone.center, tone.tone_level)) for tone in self.tones]
table = [
("Critical band", "{:4.1f} to {:4.1f} Hz".format(cb.start, cb.end)),
("Masking noise level $L_{pn}$", "{:4.1f} dB | ".format(cb.masking_noise_level)),
("Tonal level $L_{pt}$", "{:4.1f} dB".format(cb.total_tone_level)),
("Dominant tone", "{:4.1f} Hz".format(cb.tone.center)),
("3 dB bandwidth of tone", "{:2.1f}% of {:4.1f}".format(cb.tone.bandwidth_3db / cb.bandwidth * 100.0,
cb.bandwidth)),
("Tonal audibility $L_{ta}$", "{:4.1f} dB".format(cb.tonal_audibility)),
("Adjustment $K_{t}$", "{:4.1f} dB".format(cb.adjustment)),
("Frequency resolution", "{:4.1f} Hz".format(self.frequency_resolution)),
("Effective analysis bandwidth", "{:4.1f} Hz".format(self.effective_analysis_bandwidth)),
]
table += tones
return tabulate(table)
def results_as_dataframe(self):
"""Return results in dataframe."""
data = ((tone.center, tone.tone_level, tone.bandwidth_3db, tone.critical_band.start, tone.critical_band.end,
tone.critical_band.bandwidth, tone.critical_band.regression_slope,
tone.critical_band.regression_intercept, tone.critical_band.masking_noise_level,
tone.critical_band.total_tone_level, tone.critical_band.tonal_audibility,
tone.critical_band.adjustment) for tone in self.tones)
columns = [
'center', 'tone_level', 'bandwidth_3db', 'critical_band_start', 'critical_band_end',
'critical_band_bandwidth', 'regression_slope', 'regression_intercept', 'masking_noise_level',
'total_tone_level', 'tonal_audibility', 'adjustment'
]
return pd.DataFrame(list(data), columns=columns)
class NoisePause:
def __init__(self, start, end, tone=None):
self.start = start
self.end = end
self.tone = tone
def __str__(self):
return "(start={},end={})".format(self.start, self.end)
def __repr__(self):
return "NoisePause{}".format(str(self))
def __iter__(self):
yield self.start
yield self.stop
def _repr_html_(self):
table = [("Start", self.start), ("End", self.end)]
return tabulate(table, tablefmt="html")
def create_tone(levels, tone_lines, bandwidth_for_tone_criterion, noise_pause):
"""Create an instance of Tone."""
center = levels.iloc[tone_lines].idxmax()
tone_level = tones_level(levels.iloc[tone_lines])
return Tone(center, tone_lines, tone_level, noise_pause, bandwidth_for_tone_criterion)
class Tone:
"""Tone."""
def __init__(self, center, tone_lines, tone_level, noise_pause, bandwidth_3db, critical_band=None):
self.center = center
self._tone_lines = tone_lines
self.tone_level = tone_level
self.noise_pause = noise_pause
self.bandwidth_3db = bandwidth_3db
self.critical_band = critical_band
def __str__(self):
return "(center={:4.1f}, levels={:4.1f})".format(self.center, self.tone_level)
def __repr__(self):
return "Tone{}".format(str(self))
def _repr_html_(self):
table = [("Center frequency", "{:4.1f} Hz".format(self.center)),
("Tone level", "{:4.1f} dB".format(self.tone_level))]
return tabulate(table, tablefmt='html')
def create_critical_band(
levels,
line_classifier,
frequency,
frequency_resolution,
effective_analysis_bandwidth,
regression_range_factor,
window,
|
]
step = _ast.Num( n=1 ) # TODO: should be an expression
elif len( call.args ) == 3:
start = call.args[0]
stop = call.args[1]
step = call.args[2]
else:
raise VerilogTranslationError(
'An invalid number of arguments provided to (x)range function!\n',
node.lineno
)
# Must know if the step is negative or positive in order to set the
# correct bound check. This is because of Python's range behavior.
try:
if hasattr( step, '_object' ): step_val = step._object
elif hasattr( step, 'n' ): step_val = step.n
assert step_val != 0
except (UnboundLocalError,AssertionError):
raise VerilogTranslationError(
'An error occurred when translating a "for loop"!\n'
'The "step" parameter to range must be a constant integer value != 0!',
node.lineno
)
node.iter = _ast.Slice( lower=start, upper=stop, step=step )
node.iter.lt_gt = '<' if step_val > 0 else '>'
return node
#------------------------------------------------------------- | ------------
# ConstantToSlice
#-------------------------------------------------------------------------
class ConstantToSlice( ast.NodeTransformer ):
def visit_Attribute( self, node ):
self.ge | neric_visit( node )
if isinstance( node._object, slice ):
if node._object.step:
raise VerilogTranslationError(
'Slices with steps ([start:stop:step]) are not translatable!\n',
node.lineno
)
new_node = ast.Slice( ast.Num( node._object.start ),
ast.Num( node._object.stop ),
None )
return ast.copy_location( new_node, node )
return node
def visit_Name( self, node ):
self.generic_visit( node )
if isinstance( node._object, slice ):
if node._object.step:
raise VerilogTranslationError(
'Slices with steps ([start:stop:step]) are not translatable!\n',
node.lineno
)
new_node = ast.Slice( ast.Num( node._object.start ),
ast.Num( node._object.stop ),
None )
return ast.copy_location( new_node, node )
return node
#-------------------------------------------------------------------------
# BitStructToSlice
#-------------------------------------------------------------------------
class BitStructToSlice( ast.NodeTransformer ):
def visit_Attribute( self, node ):
self.generic_visit( node )
if isinstance( node._object, _SignalSlice ):
if node._object.slice.step:
raise VerilogTranslationError(
'Slices with steps ([start:stop:step]) are not translatable!\n',
node.lineno
)
new_node = ast.Subscript( node.value,
ast.Slice( ast.Num( node._object.slice.start ),
ast.Num( node._object.slice.stop ),
None ),
None,
)
new_node._object = node._object
return ast.copy_location( new_node, node )
return node
#-------------------------------------------------------------------------
# InferTemporaryTypes
#-------------------------------------------------------------------------
import copy
class InferTemporaryTypes( ast.NodeTransformer ):
last_model = None
func_id = 0
def __init__( self, model ):
self.model = model
self.infer_dict = {}
# Create unique ids for each function we visit in a given model.
# This ensures we can assign unique names to temporaries to provide
# 'scoping' behavior similar to Python.
if id(self.model) != InferTemporaryTypes.last_model:
InferTemporaryTypes.last_model = id(self.model)
InferTemporaryTypes.func_id = 0
else:
InferTemporaryTypes.func_id += 1
def _insert( self, node, value ):
node.targets[0]._object = value
self.infer_dict[ node.targets[0].id ] = value
def _uniq_name( self, node_id ):
return node_id + '__{}'.format( self.func_id )
def visit_Assign( self, node ):
# Catch untranslatable constructs
if len(node.targets) != 1:
raise VerilogTranslationError(
'Chained assignments are not supported!\n'
'Please modify "x = y = ..." to be two separate lines.',
node.lineno
)
if isinstance(node.targets[0], ast.Tuple):
raise VerilogTranslationError(
'Multiple items on the left of an assignment are not supported!\n'
'Please modify "x,y = ..." to be two separate lines.',
node.lineno
)
# First visit the RHS to update Name nodes that have been inferred
self.visit( node.value )
# Need this to visit potential temporaries used in slice indices!
self.visit( node.targets[0] )
# The LHS doesn't have a type, we need to infer it
if node.targets[0]._object == None:
# The LHS should be a Name node!
if not isinstance(node.targets[0], _ast.Name):
raise VerilogTranslationError(
'An internal error occured when performing type inference!\n'
'Please contact the PyMTL developers!',
node.lineno
)
# Assign unique name to this temporary in case the same temporary
# name is used in another concurrent block.
node.targets[0].id = self._uniq_name( node.targets[0].id )
# Copy the object returned by the RHS, set the name appropriately
if isinstance( node.value, ast.Name ):
if isinstance( node.value._object, int ):
self._insert( node, (node.targets[0].id, node.value._object ) )
else:
obj = copy.copy( node.value._object )
obj.name = node.targets[0].id
obj.parent = None
self._insert( node, obj )
elif isinstance( node.value, ast.Attribute ):
if isinstance( node.value._object, int ):
self._insert( node, (node.targets[0].id, node.value._object ) )
else:
obj = copy.copy( node.value._object )
obj.name = node.targets[0].id
obj.parent = None
self._insert( node, obj )
elif isinstance( node.value, ast.Num ):
self._insert( node, (node.targets[0].id, int( node.value.n )) )
elif isinstance( node.value, ast.BoolOp ):
obj = Wire( 1 )
obj.name = node.targets[0].id
self._insert( node, obj )
elif isinstance( node.value, ast.Compare ):
obj = Wire( 1 )
obj.name = node.targets[0].id
self._insert( node, obj )
elif isinstance( node.value, ast.Subscript ):
# TODO: assumes ast.Index does NOT contain a slice object
if not isinstance( node.value.slice, ast.Index ):
raise VerilogTranslationError(
'Type inference from slices > 1-bit is not currently supported!'
'\nCannot infer type of temporary variable "{}".'
.format( node.targets[0].id ),
node.lineno
)
if isinstance( node.value._object, Signal ):
obj = Wire( 1 )
elif isinstance( node.value._object, list ) and \
isinstance( node.value._object[0], Signal ):
obj = Wire( node.value._object[0].nbits )
else:
raise VerilogTranslationError(
'Type inference from unsupported list construct!'
'\nCannot infer type of temporary variable "{}".'
.format( node.targets[0].id ),
node.lineno
)
obj.name = node.targets[0].id
self._insert( node, obj )
elif isinstance( node.value, ast.Call ):
func_name = node.value.func.id
if func_name in ['sext', 'zext']:
nbits_arg = node.value.args[1]
if isinstance( nbits_arg, ast.Num ): nbits = nbits_arg.n
else: nbits = nbits_arg._object
if not isinstance( nbits, int ):
raise VerilogTranslationError(
'The second argument to function "{}" must be an int!'
.format( func_name ),
node.lineno
)
obj = Wire( nbits )
elif func_n |
"""
Test the metropolis hastings algorithm.
"""
import numpy as np
import chronometer as gc
import matplotlib.pyplot as plt
import corner
import emcee
def model(par, x):
return par[0] + par[1]*x
def lnlike(par, x, y, yerr, par_inds):
y_mod = model(par, x)
return sum(-.5*((y_mod - y)/ye | rr)**2)
def test_metropolis_hastings():
# Straight line model
x = np.arange(0, 10, .1)
err = 2.
yerr = np.ones_like(x) * err
y = .7 + 2.5*x + np.random.randn(len(x))*err
# Plot the data.
plt.clf()
plt.errorbar(x, y, yerr=yerr, fmt="k.")
plt.savefig("data")
print("Running Metropolis Hastings" | )
N = 1000000 # N samples
pars = np.array([.5, 2.5]) # initialisation
t = np.array([.01, .01])
par_inds = np.arange(len(pars))
args = [x, y, yerr, par_inds]
samples, par, probs = gc.MH(pars, lnlike, N, t, *args)
results = [np.percentile(samples[:, i], 50) for i in range(2)]
upper = [np.percentile(samples[:, i], 64) for i in range(2)]
lower = [np.percentile(samples[:, i], 15) for i in range(2)]
print(lower, "lower")
print(results, "results")
print(upper, "upper")
assert lower < results
assert results < upper
plt.clf()
plt.errorbar(x, y, yerr=yerr, fmt="k.")
plt.plot(x, results[0] + results[1]*x)
plt.savefig("test")
fig = corner.corner(samples, truths=[.7, 2.5], labels=["m", "c"])
fig.savefig("corner_MH_test")
plt.clf()
plt.plot(probs)
plt.savefig("prob_test")
if __name__ == "__main__":
test_metropolis_hastings()
|
# -*- coding: utf-8 -*-
import pytest
from boussole.exceptions import FinderException
def test_001(finder):
results = finder.get_relative_from_paths("/home/foo/plop", [
"/home/foo",
"/home/bar",
"/etc",
])
assert results == "plop"
def test_002(finder):
results = finder.get_relative_from_paths("/etc/plop.plip", [
"/home/foo",
"/home/bar",
"/etc",
])
assert results == "plop.plip"
def test_003(finder):
results = finder.get_relative_from_paths("/home/foo/plop", [
"/home",
"/home/foo",
"/etc",
])
assert results == "plop"
def test_004(finder):
results = finder.get_relative_from_paths("/home/foo/plop", [
"/home",
"/home/foo",
"/home/bar",
"/etc/ping",
])
assert results == "plop"
def test_005(finder):
results = finder.get_relative_from_paths("/home/foo/plop", [
"/home",
"/home/foo",
"/home/bar/pika",
"/etc/ping",
])
assert results == "plop"
def test_006(finder):
results = finder.get_relative_from_paths("/home/foo/pika/plop", [
"/home",
"/home/foo",
"/home/bar/pika",
"/home/bar",
])
assert results == "pika/plop"
def test_007(finder):
results = finder.get_relative_from_paths("/home/foo/pika/plop", [
"/etc",
"/home/foo/pika",
"/home/bar/pika",
"/home/bar",
])
assert results == "plop"
def test_008(finder):
results = finder.get_relative_from_paths("/home/foo/pika/bim/bam/plop", [
| "/etc",
"/home/foo/pika/bim/bam",
"/home/foo/pika/bim/bom",
"/home/bar/pika",
"/home/bar",
])
assert results == "plop"
def test_009(finder):
"""
Unable to find relative path raise an exception
"""
with pytest.raises(FinderException):
finder.get_relative_from_paths("/home/foo/pika/bim/bam/plop", [
"/etc",
"/home/foo/pika/bim/bom",
| "/home/bar/pika",
"/home/bar",
])
|
odels")
_models = []
for model in os.listdir(models_dir):
model_dir = os.path.join(models_dir, model)
if os.path.isdir(model_dir) and "__init__.py" in os.listdir(model_dir):
_models.append(model)
# Get the models from the directory structure of `src/transformers/models/`
models = [model for model in dir(transformers.models) if not model.startswith("__")]
missing_models = sorted(list(set(_models).difference(models)))
if missing_models:
raise Exception(
f"The following models should be included in {models_dir}/__init__.py: {','.join(missing_models)}."
)
# If some modeling modules should be ignored for all checks, they should be added in the nested list
# _ignore_modules of this function.
def get_model_modules():
"""Get the model modules inside the transformers library."""
_ignore_modules = [
"modeling_auto",
"modeling_encoder_decoder",
"modeling_marian",
"modeling_mmbt",
"modeling_outputs",
"modeling_retribert",
"modeling_utils",
"modeling_flax_auto",
"modeling_flax_encoder_decoder",
"modeling_flax_utils",
"modeling_speech_encoder_decoder",
"modeling_flax_speech_encoder_decoder",
"modeling_flax_vision_encoder_decoder",
"modeling_transfo_xl_utilities",
"modeling_tf_auto",
"modeling_tf_encoder_decoder",
"modeling_tf_outputs",
"modeling_tf_pytorch_utils",
"modeling_tf_utils",
"modeling_tf_transfo_xl_utilities",
"modeling_tf_vision_encoder_decoder",
"modeling_vision_encoder_decoder",
]
modules = []
for model in dir(transformers.models):
# There are some magic dunder attributes in the dir, we ignore them
if not model.startswith("__"):
model_module = getatt | r(transformers.models, model)
for submodule in dir(model_module):
if submodule.startswith("modeling") and submodule not in _ignore_modules:
modeling_module = getattr(model_module, submodule)
if inspect.ismodule(modeling_module):
modules.append(modeling_module)
return modules
def get_models(module, include_pretrained=False):
| """Get the objects in module that are models."""
models = []
model_classes = (transformers.PreTrainedModel, transformers.TFPreTrainedModel, transformers.FlaxPreTrainedModel)
for attr_name in dir(module):
if not include_pretrained and ("Pretrained" in attr_name or "PreTrained" in attr_name):
continue
attr = getattr(module, attr_name)
if isinstance(attr, type) and issubclass(attr, model_classes) and attr.__module__ == module.__name__:
models.append((attr_name, attr))
return models
def is_a_private_model(model):
"""Returns True if the model should not be in the main init."""
if model in PRIVATE_MODELS:
return True
# Wrapper, Encoder and Decoder are all privates
if model.endswith("Wrapper"):
return True
if model.endswith("Encoder"):
return True
if model.endswith("Decoder"):
return True
return False
def check_models_are_in_init():
"""Checks all models defined in the library are in the main init."""
models_not_in_init = []
dir_transformers = dir(transformers)
for module in get_model_modules():
models_not_in_init += [
model[0] for model in get_models(module, include_pretrained=True) if model[0] not in dir_transformers
]
# Remove private models
models_not_in_init = [model for model in models_not_in_init if not is_a_private_model(model)]
if len(models_not_in_init) > 0:
raise Exception(f"The following models should be in the main init: {','.join(models_not_in_init)}.")
# If some test_modeling files should be ignored when checking models are all tested, they should be added in the
# nested list _ignore_files of this function.
def get_model_test_files():
"""Get the model test files."""
_ignore_files = [
"test_modeling_common",
"test_modeling_encoder_decoder",
"test_modeling_flax_encoder_decoder",
"test_modeling_flax_speech_encoder_decoder",
"test_modeling_marian",
"test_modeling_tf_common",
"test_modeling_tf_encoder_decoder",
]
test_files = []
for file_or_dir in os.listdir(PATH_TO_TESTS):
path = os.path.join(PATH_TO_TESTS, file_or_dir)
if os.path.isdir(path):
filenames = [os.path.join(file_or_dir, file) for file in os.listdir(path)]
else:
filenames = [file_or_dir]
for filename in filenames:
if (
os.path.isfile(os.path.join(PATH_TO_TESTS, filename))
and "test_modeling" in filename
and not os.path.splitext(filename)[0] in _ignore_files
):
test_files.append(filename)
return test_files
# This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the tester class
# for the all_model_classes variable.
def find_tested_models(test_file):
"""Parse the content of test_file to detect what's in all_model_classes"""
# This is a bit hacky but I didn't find a way to import the test_file as a module and read inside the class
with open(os.path.join(PATH_TO_TESTS, test_file), "r", encoding="utf-8", newline="\n") as f:
content = f.read()
all_models = re.findall(r"all_model_classes\s+=\s+\(\s*\(([^\)]*)\)", content)
# Check with one less parenthesis as well
all_models += re.findall(r"all_model_classes\s+=\s+\(([^\)]*)\)", content)
if len(all_models) > 0:
model_tested = []
for entry in all_models:
for line in entry.split(","):
name = line.strip()
if len(name) > 0:
model_tested.append(name)
return model_tested
def check_models_are_tested(module, test_file):
"""Check models defined in module are tested in test_file."""
# XxxPreTrainedModel are not tested
defined_models = get_models(module)
tested_models = find_tested_models(test_file)
if tested_models is None:
if test_file in TEST_FILES_WITH_NO_COMMON_TESTS:
return
return [
f"{test_file} should define `all_model_classes` to apply common tests to the models it tests. "
+ "If this intentional, add the test filename to `TEST_FILES_WITH_NO_COMMON_TESTS` in the file "
+ "`utils/check_repo.py`."
]
failures = []
for model_name, _ in defined_models:
if model_name not in tested_models and model_name not in IGNORE_NON_TESTED:
failures.append(
f"{model_name} is defined in {module.__name__} but is not tested in "
+ f"{os.path.join(PATH_TO_TESTS, test_file)}. Add it to the all_model_classes in that file."
+ "If common tests should not applied to that model, add its name to `IGNORE_NON_TESTED`"
+ "in the file `utils/check_repo.py`."
)
return failures
def check_all_models_are_tested():
"""Check all models are properly tested."""
modules = get_model_modules()
test_files = get_model_test_files()
failures = []
for module in modules:
test_file = [file for file in test_files if f"test_{module.__name__.split('.')[-1]}.py" in file]
if len(test_file) == 0:
failures.append(f"{module.__name__} does not have its corresponding test file {test_file}.")
elif len(test_file) > 1:
failures.append(f"{module.__name__} has several test files: {test_file}.")
else:
test_file = test_file[0]
new_failures = check_models_are_tested(module, test_file)
if new_failures is not None:
failures += new_failures
if len(failures) > 0:
raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures))
def get_all_auto_configured_models():
"""Return the list of all models in at |
from django.http import HttpResponse
from django.shortcuts import render
def video_calling(request):
return render(request,'video_ | calling.html | ') |
sion = Revision.objects.get(id=r)
if request.POST.__contains__('change'):
article.current_revision = revision
article.save()
elif request.POST.__contains__('view'):
redirectURL = wiki_reverse('wiki_view_revision', course=course,
kwargs={'revision_number': revision.counter, 'article_path': article.get_path()})
#The rese of these are admin functions
elif request.POST.__contains__('delete') and request.user.is_superuser:
if (revision.deleted == 0):
revision.adminSetDeleted(2)
elif request.POST.__contains__('restore') and request.user.is_superuser:
if (revision.deleted == 2):
revision.adminSetDeleted(0)
elif request.POST.__contains__('delete_all') and request.user.is_superuser:
Revision.objects.filter(article__exact=article, deleted=0).update(deleted=2)
elif request.POST.__contains__('lock_article'):
article.locked = not article.locked
article.save()
except Exception as e:
print str(e)
pass
finally:
return HttpResponseRedirect(redirectURL)
#
#
# <input type="submit" name="delete" value="Delete revision"/>
# <input type="submit" name="restore" value="Restore revision"/>
# <input type="submit" name="delete_all" value="Delete all revisions">
# %else:
# <input type="submit" name="delete_article" value="Delete all revisions">
#
page_count = (history.count() + (page_size - 1)) / page_size
if p > page_count:
p = 1
beginItem = (p - 1) * page_size
next_page = p + 1 if page_count > p else None
prev_page = p - 1 if p > 1 else None
d = {'wiki_page': p,
'wiki_next_page': next_page,
'wiki_prev_page': prev_page,
'wiki_history': history[beginItem:beginItem + page_size],
'show_delete_revision': request.user.is_superuser}
update_template_dictionary(d, request, course, article)
return render_to_response('simplewiki/simplewiki_history.html', d)
def revision_feed(request, page=1, namespace=None, course_id=None):
course = get_opt_course_with_access(request.user, course_id, 'load')
page_size = 10
if page is None:
page = 1
try:
p = int(page)
except ValueError:
p = 1
history = Revision.objects.order_by('-revision_date').select_related('revision_user', 'article', 'previous_revision')
page_count = (history.count() + (page_size - 1)) / page_size
if p > page_count:
p = 1
beginItem = (p - 1) * page_size
next_page = p + 1 if page_count > p else None
prev_page = p - 1 if p > 1 else None
d = {'wiki_page': p,
'wiki_next_page': next_page,
'wiki_prev_page': prev_page,
'wiki_history': history[beginItem:beginItem + page_size],
'show_delete_revision': request.user.is_superuser,
'namespace': namespace}
update_template_dictionary(d, request, course)
return render_to_response('simplewiki/simplewiki_revision_feed.html', d)
def search_articles(request, namespace=None, course_id=None):
course = get_opt_course_with_access(request.user, course_id, 'load')
# blampe: We should check for the presence of other popular django search
# apps and use those if possible. Only fall back on this as a last resort.
# Adding some context to results (eg where matches were) would also be nice.
# todo: maybe do some perm checking here
if request.method == 'GET':
querystring = request.GET.get('value', '').strip()
else:
querystring = ""
results = Article.objects.all()
if namespace:
results = results.filter(namespace__name__exact=namespace)
if request.user.is_superuser:
results = results.order_by('current_revision__deleted')
else:
results = results.filter(current_revision__deleted=0)
if querystring:
for queryword in querystring.split():
# Basic negation is as fancy as we get right now
if queryword[0] == '-' and len(queryword) > 1:
results._search = lambda x: results.exclude(x)
queryword = queryword[1:]
else:
results._search = lambda x: results.filter(x)
results = results._search(Q(current_revision__contents__icontains=queryword) | \
Q(title__icontains=queryword))
results = results.select_related('current_revision__deleted', 'namespace')
results = | sorted(results, key=lambda article: (article.current_revision.deleted, article.get_path().lower()))
if len(results) == 1 and querystring:
return HttpResponseRedirect(wiki_reverse('wiki_view', article=results[0], course=course))
else:
d = {'wiki_search_results': results,
'wiki_search_query': querystring,
'namespace': namespace}
update_template_dictionary(d, request, course)
return | render_to_response('simplewiki/simplewiki_searchresults.html', d)
def search_add_related(request, course_id, slug, namespace):
course = get_opt_course_with_access(request.user, course_id, 'load')
(article, err) = get_article(request, slug, namespace if namespace else course_id)
if err:
return err
perm_err = check_permissions(request, article, course, check_read=True)
if perm_err:
return perm_err
search_string = request.GET.get('query', None)
self_pk = request.GET.get('self', None)
if search_string:
results = []
related = Article.objects.filter(title__istartswith=search_string)
others = article.related.all()
if self_pk:
related = related.exclude(pk=self_pk)
if others:
related = related.exclude(related__in=others)
related = related.order_by('title')[:10]
for item in related:
results.append({'id': str(item.id),
'value': item.title,
'info': item.get_url()})
else:
results = []
json = simplejson.dumps({'results': results})
return HttpResponse(json, mimetype='application/json')
def add_related(request, course_id, slug, namespace):
course = get_opt_course_with_access(request.user, course_id, 'load')
(article, err) = get_article(request, slug, namespace if namespace else course_id)
if err:
return err
perm_err = check_permissions(request, article, course, check_write=True, check_locked=True)
if perm_err:
return perm_err
try:
related_id = request.POST['id']
rel = Article.objects.get(id=related_id)
has_already = article.related.filter(id=related_id).count()
if has_already == 0 and not rel == article:
article.related.add(rel)
article.save()
except:
pass
finally:
return HttpResponseRedirect(reverse('wiki_view', args=(article.get_url(),)))
def remove_related(request, course_id, namespace, slug, related_id):
course = get_opt_course_with_access(request.user, course_id, 'load')
(article, err) = get_article(request, slug, namespace if namespace else course_id)
if err:
return err
perm_err = check_permissions(request, article, course, check_write=True, check_locked=True)
if perm_err:
return perm_err
try:
rel_id = int(related_id)
rel = Article.objects.get(id=rel_id)
article.related.remove(rel)
article.save()
except:
pass
finally:
return HttpResponseRedirect(reverse('wiki_view', args=(article.get_url(),)))
def random_article(request, course_id=None):
course = get_opt_course_with_access(request.user, course_id, 'load')
from random import randin |
plicates={},
index=index, tree=tree, base_dir='/')
# Make sure all required docstrings are present.
self.assertTrue(inspect.getdoc(TestClass) in docs)
self.assertTrue(inspect.getdoc(TestClass.a_method) in docs)
self.assertTrue(inspect.getdoc(TestClass.a_property) in docs)
# Make sure that the signature is extracted properly and omits self.
self.assertTrue('a_method(arg=\'default\')' in docs)
# Make sure there is a link to the child class and it points the right way.
self.assertTrue('[`class ChildClass`](./TestClass/ChildClass.md)' in docs)
# Make sure CLASS_MEMBER is mentioned.
self.assertTrue('CLASS_MEMBER' in docs)
# Make sure this file is contained as the definition location.
self.assertTrue(os.path.relpath(__file__, '/') in docs)
def test_generate_markdown_for_module(self):
module = sys.modules[__name__]
index = {
'TestModule': module,
'TestModule.test_function': test_function,
'TestModule.test_function_with_args_kwargs':
test_function_with_args_kwargs,
'TestModule.TestClass': TestClass,
}
tree = {
'TestModule': ['TestClass', 'test_function',
'test_function_with_args_kwargs']
}
docs = parser.generate_markdown(full_name='TestModule', py_object=module,
duplicate_of={}, duplicates={},
index=index, tree=tree, base_dir='/')
# Make sure all required docstrings are present.
self.assertTrue(inspect.getdoc(module) in docs)
# Make sure that links to the members are there (not asserting on exact link
# text for functions).
self.assertTrue('./TestModule/test_function.md' in docs)
self.assertTrue('./TestModule/test_function_with_args_kwargs.md' in docs)
# Make sure there is a link to the child class and it points the right way.
self.assertTrue('[`class TestClass`](./TestModule/TestClass.md)' in docs)
# Make sure this file is contained as the definition location.
self.assertTrue(os.path.relpath(__file__, '/') in docs)
def test_generate_markdown_for_function(self):
index = {
'test_function': test_function
}
tree = {
'': ['test_function']
}
docs = parser.generate_markdown(full_name='test_function',
py_object=test_function,
duplicate_of={}, duplicates={},
index=index, tree=tree, base_dir='/')
# Make sure docstring shows up.
self.assertTrue(inspect.getdoc(test_function) in docs)
# Make sure the extracted signature is good.
self.assertTrue(
'test_function(unused_arg, unused_kwarg=\'default\')' in docs)
# Make sure this file is contained as the definition location.
self.assertTrue(os.path.relpath(__file__, '/') in docs)
| def test | _generate_markdown_for_function_with_kwargs(self):
index = {
'test_function_with_args_kwargs': test_function_with_args_kwargs
}
tree = {
'': ['test_function_with_args_kwargs']
}
docs = parser.generate_markdown(full_name='test_function_with_args_kwargs',
py_object=test_function_with_args_kwargs,
duplicate_of={}, duplicates={},
index=index, tree=tree, base_dir='/')
# Make sure docstring shows up.
self.assertTrue(inspect.getdoc(test_function_with_args_kwargs) in docs)
# Make sure the extracted signature is good.
self.assertTrue(
'test_function_with_args_kwargs(unused_arg,'
' *unused_args, **unused_kwargs)' in docs)
def test_references_replaced_in_generated_markdown(self):
index = {
'test_function_for_markdown_reference':
test_function_for_markdown_reference
}
tree = {
'': ['test_function_for_markdown_reference']
}
docs = parser.generate_markdown(
full_name='test_function_for_markdown_reference',
py_object=test_function_for_markdown_reference,
duplicate_of={}, duplicates={},
index=index, tree=tree, base_dir='/')
# Make sure docstring shows up and is properly processed.
expected_docs = parser.replace_references(
inspect.getdoc(test_function_for_markdown_reference),
relative_path_to_root='.', duplicate_of={})
self.assertTrue(expected_docs in docs)
def test_docstring_special_section(self):
index = {
'test_function': test_function_with_fancy_docstring
}
tree = {
'': 'test_function'
}
docs = parser.generate_markdown(
full_name='test_function',
py_object=test_function_with_fancy_docstring,
duplicate_of={}, duplicates={},
index=index, tree=tree, base_dir='/')
expected = '\n'.join([
'Function with a fancy docstring.',
'',
'#### Args:',
'',
'* <b>`arg`</b>: An argument.',
'',
'',
'#### Returns:',
'',
'* <b>`arg`</b>: the input, and',
'* <b>`arg`</b>: the input, again.',
''])
self.assertTrue(expected in docs)
def test_generate_index(self):
module = sys.modules[__name__]
index = {
'TestModule': module,
'test_function': test_function,
'TestModule.test_function': test_function,
'TestModule.TestClass': TestClass,
'TestModule.TestClass.a_method': TestClass.a_method,
'TestModule.TestClass.a_property': TestClass.a_property,
'TestModule.TestClass.ChildClass': TestClass.ChildClass,
}
duplicate_of = {
'TestModule.test_function': 'test_function'
}
docs = parser.generate_global_index('TestLibrary', 'test',
index=index,
duplicate_of=duplicate_of)
# Make sure duplicates and non-top-level symbols are in the index, but
# methods and properties are not.
self.assertTrue('a_method' not in docs)
self.assertTrue('a_property' not in docs)
self.assertTrue('TestModule.TestClass' in docs)
self.assertTrue('TestModule.TestClass.ChildClass' in docs)
self.assertTrue('TestModule.test_function' in docs)
# Leading backtick to make sure it's included top-level.
# This depends on formatting, but should be stable.
self.assertTrue('`test_function' in docs)
def test_argspec_for_functoos_partial(self):
# pylint: disable=unused-argument
def test_function_for_partial1(arg1, arg2, kwarg1=1, kwarg2=2):
pass
def test_function_for_partial2(arg1, arg2, *my_args, **my_kwargs):
pass
# pylint: enable=unused-argument
# pylint: disable=protected-access
# Make sure everything works for regular functions.
expected = inspect.ArgSpec(['arg1', 'arg2', 'kwarg1', 'kwarg2'], None, None,
(1, 2))
self.assertEqual(expected, parser._get_arg_spec(test_function_for_partial1))
# Make sure doing nothing works.
expected = inspect.ArgSpec(['arg1', 'arg2', 'kwarg1', 'kwarg2'], None, None,
(1, 2))
partial = functools.partial(test_function_for_partial1)
self.assertEqual(expected, parser._get_arg_spec(partial))
# Make sure setting args from the front works.
expected = inspect.ArgSpec(['arg2', 'kwarg1', 'kwarg2'], None, None, (1, 2))
partial = functools.partial(test_function_for_partial1, 1)
self.assertEqual(expected, parser._get_arg_spec(partial))
expected = inspect.ArgSpec(['kwarg2',], None, None, (2,))
partial = functools.partial(test_function_for_partial1, 1, 2, 3)
self.assertEqual(expected, parser._get_arg_spec(partial))
# Make sure setting kwargs works.
expected = inspect.ArgSpec(['arg1', 'arg2', 'kwarg2'], None, None, (2,))
partial = functools.partial(test_function_for_partial1, kwarg1=0)
self.assertEqual(expected, parser._get_arg_spec(partial))
expected = inspect.ArgSpec(['arg1', 'arg2', 'kwarg1'], None, None, (1,))
partial = functools.partial(test_function_f |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.