text
stringlengths
29
850k
# pylint: disable=missing-docstring from lettuce import step from lettuce import world from lettuce import before from pymongo import MongoClient from nose.tools import assert_equals from nose.tools import assert_in REQUIRED_EVENT_FIELDS = [ 'agent', 'event', 'event_source', 'event_type', 'host', 'ip', 'page', 'time', 'username' ] @before.all def connect_to_mongodb(): world.mongo_client = MongoClient() world.event_collection = world.mongo_client['track']['events'] @before.each_scenario def reset_captured_events(_scenario): world.event_collection.drop() @before.outline def reset_between_outline_scenarios(_scenario, order, outline, reasons_to_fail): world.event_collection.drop() @step(r'[aA]n? course url "(.*)" event is emitted$') def course_url_event_is_emitted(_step, url_regex): event_type = url_regex.format(world.scenario_dict['COURSE'].id) n_events_are_emitted(_step, 1, event_type, "server") @step(r'([aA]n?|\d+) "(.*)" (server|browser) events? is emitted$') def n_events_are_emitted(_step, count, event_type, event_source): # Ensure all events are written out to mongo before querying. world.mongo_client.fsync() # Note that splinter makes 2 requests when you call browser.visit('/foo') # the first just checks to see if the server responds with a status # code of 200, the next actually uses the browser to submit the request. # We filter out events associated with the status code checks by ignoring # events that come directly from splinter. criteria = { 'event_type': event_type, 'event_source': event_source, 'agent': { '$ne': 'python/splinter' } } cursor = world.event_collection.find(criteria) try: number_events = int(count) except ValueError: number_events = 1 assert_equals(cursor.count(), number_events) event = cursor.next() expected_field_values = { "username": world.scenario_dict['USER'].username, "event_type": event_type, } for key, value in expected_field_values.iteritems(): assert_equals(event[key], value) for field in REQUIRED_EVENT_FIELDS: assert_in(field, event)
This case is crafted from high-grade genuine leather, which features vintage look and exquisite hand feel, protecting your iPhone against daily damage and strutting your style as well. It is perfect for those who look for something that can dress up and stand out in a crowd. Hand-made curved edges with perfect cutouts are for unhindered operation.
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.auth.plugins import mapped from keystone.contrib.federation import utils as mapping_utils from keystone import exception from keystone.tests import unit from keystone.tests.unit import mapping_fixtures class MappingRuleEngineTests(unit.BaseTestCase): """A class for testing the mapping rule engine.""" def assertValidMappedUserObject(self, mapped_properties, user_type='ephemeral', domain_id=None): """Check whether mapped properties object has 'user' within. According to today's rules, RuleProcessor does not have to issue user's id or name. What's actually required is user's type and for ephemeral users that would be service domain named 'Federated'. """ self.assertIn('user', mapped_properties, message='Missing user object in mapped properties') user = mapped_properties['user'] self.assertIn('type', user) self.assertEqual(user_type, user['type']) self.assertIn('domain', user) domain = user['domain'] domain_name_or_id = domain.get('id') or domain.get('name') domain_ref = domain_id or 'Federated' self.assertEqual(domain_ref, domain_name_or_id) def test_rule_engine_any_one_of_and_direct_mapping(self): """Should return user's name and group id EMPLOYEE_GROUP_ID. The ADMIN_ASSERTION should successfully have a match in MAPPING_LARGE. They will test the case where `any_one_of` is valid, and there is a direct mapping for the users name. """ mapping = mapping_fixtures.MAPPING_LARGE assertion = mapping_fixtures.ADMIN_ASSERTION rp = mapping_utils.RuleProcessor(mapping['rules']) values = rp.process(assertion) fn = assertion.get('FirstName') ln = assertion.get('LastName') full_name = '%s %s' % (fn, ln) group_ids = values.get('group_ids') user_name = values.get('user', {}).get('name') self.assertIn(mapping_fixtures.EMPLOYEE_GROUP_ID, group_ids) self.assertEqual(full_name, user_name) def test_rule_engine_no_regex_match(self): """Should deny authorization, the email of the tester won't match. This will not match since the email in the assertion will fail the regex test. It is set to match any @example.com address. But the incoming value is set to eviltester@example.org. RuleProcessor should return list of empty group_ids. """ mapping = mapping_fixtures.MAPPING_LARGE assertion = mapping_fixtures.BAD_TESTER_ASSERTION rp = mapping_utils.RuleProcessor(mapping['rules']) mapped_properties = rp.process(assertion) self.assertValidMappedUserObject(mapped_properties) self.assertIsNone(mapped_properties['user'].get('name')) self.assertListEqual(list(), mapped_properties['group_ids']) def test_rule_engine_regex_many_groups(self): """Should return group CONTRACTOR_GROUP_ID. The TESTER_ASSERTION should successfully have a match in MAPPING_TESTER_REGEX. This will test the case where many groups are in the assertion, and a regex value is used to try and find a match. """ mapping = mapping_fixtures.MAPPING_TESTER_REGEX assertion = mapping_fixtures.TESTER_ASSERTION rp = mapping_utils.RuleProcessor(mapping['rules']) values = rp.process(assertion) self.assertValidMappedUserObject(values) user_name = assertion.get('UserName') group_ids = values.get('group_ids') name = values.get('user', {}).get('name') self.assertEqual(user_name, name) self.assertIn(mapping_fixtures.TESTER_GROUP_ID, group_ids) def test_rule_engine_any_one_of_many_rules(self): """Should return group CONTRACTOR_GROUP_ID. The CONTRACTOR_ASSERTION should successfully have a match in MAPPING_SMALL. This will test the case where many rules must be matched, including an `any_one_of`, and a direct mapping. """ mapping = mapping_fixtures.MAPPING_SMALL assertion = mapping_fixtures.CONTRACTOR_ASSERTION rp = mapping_utils.RuleProcessor(mapping['rules']) values = rp.process(assertion) self.assertValidMappedUserObject(values) user_name = assertion.get('UserName') group_ids = values.get('group_ids') name = values.get('user', {}).get('name') self.assertEqual(user_name, name) self.assertIn(mapping_fixtures.CONTRACTOR_GROUP_ID, group_ids) def test_rule_engine_not_any_of_and_direct_mapping(self): """Should return user's name and email. The CUSTOMER_ASSERTION should successfully have a match in MAPPING_LARGE. This will test the case where a requirement has `not_any_of`, and direct mapping to a username, no group. """ mapping = mapping_fixtures.MAPPING_LARGE assertion = mapping_fixtures.CUSTOMER_ASSERTION rp = mapping_utils.RuleProcessor(mapping['rules']) values = rp.process(assertion) self.assertValidMappedUserObject(values) user_name = assertion.get('UserName') group_ids = values.get('group_ids') name = values.get('user', {}).get('name') self.assertEqual(user_name, name) self.assertEqual([], group_ids,) def test_rule_engine_not_any_of_many_rules(self): """Should return group EMPLOYEE_GROUP_ID. The EMPLOYEE_ASSERTION should successfully have a match in MAPPING_SMALL. This will test the case where many remote rules must be matched, including a `not_any_of`. """ mapping = mapping_fixtures.MAPPING_SMALL assertion = mapping_fixtures.EMPLOYEE_ASSERTION rp = mapping_utils.RuleProcessor(mapping['rules']) values = rp.process(assertion) self.assertValidMappedUserObject(values) user_name = assertion.get('UserName') group_ids = values.get('group_ids') name = values.get('user', {}).get('name') self.assertEqual(user_name, name) self.assertIn(mapping_fixtures.EMPLOYEE_GROUP_ID, group_ids) def test_rule_engine_not_any_of_regex_verify_pass(self): """Should return group DEVELOPER_GROUP_ID. The DEVELOPER_ASSERTION should successfully have a match in MAPPING_DEVELOPER_REGEX. This will test the case where many remote rules must be matched, including a `not_any_of`, with regex set to True. """ mapping = mapping_fixtures.MAPPING_DEVELOPER_REGEX assertion = mapping_fixtures.DEVELOPER_ASSERTION rp = mapping_utils.RuleProcessor(mapping['rules']) values = rp.process(assertion) self.assertValidMappedUserObject(values) user_name = assertion.get('UserName') group_ids = values.get('group_ids') name = values.get('user', {}).get('name') self.assertEqual(user_name, name) self.assertIn(mapping_fixtures.DEVELOPER_GROUP_ID, group_ids) def test_rule_engine_not_any_of_regex_verify_fail(self): """Should deny authorization. The email in the assertion will fail the regex test. It is set to reject any @example.org address, but the incoming value is set to evildeveloper@example.org. RuleProcessor should return list of empty group_ids. """ mapping = mapping_fixtures.MAPPING_DEVELOPER_REGEX assertion = mapping_fixtures.BAD_DEVELOPER_ASSERTION rp = mapping_utils.RuleProcessor(mapping['rules']) mapped_properties = rp.process(assertion) self.assertValidMappedUserObject(mapped_properties) self.assertIsNone(mapped_properties['user'].get('name')) self.assertListEqual(list(), mapped_properties['group_ids']) def _rule_engine_regex_match_and_many_groups(self, assertion): """Should return group DEVELOPER_GROUP_ID and TESTER_GROUP_ID. A helper function injecting assertion passed as an argument. Expect DEVELOPER_GROUP_ID and TESTER_GROUP_ID in the results. """ mapping = mapping_fixtures.MAPPING_LARGE rp = mapping_utils.RuleProcessor(mapping['rules']) values = rp.process(assertion) user_name = assertion.get('UserName') group_ids = values.get('group_ids') name = values.get('user', {}).get('name') self.assertValidMappedUserObject(values) self.assertEqual(user_name, name) self.assertIn(mapping_fixtures.DEVELOPER_GROUP_ID, group_ids) self.assertIn(mapping_fixtures.TESTER_GROUP_ID, group_ids) def test_rule_engine_regex_match_and_many_groups(self): """Should return group DEVELOPER_GROUP_ID and TESTER_GROUP_ID. The TESTER_ASSERTION should successfully have a match in MAPPING_LARGE. This will test a successful regex match for an `any_one_of` evaluation type, and will have many groups returned. """ self._rule_engine_regex_match_and_many_groups( mapping_fixtures.TESTER_ASSERTION) def test_rule_engine_discards_nonstring_objects(self): """Check whether RuleProcessor discards non string objects. Despite the fact that assertion is malformed and contains non string objects, RuleProcessor should correctly discard them and successfully have a match in MAPPING_LARGE. """ self._rule_engine_regex_match_and_many_groups( mapping_fixtures.MALFORMED_TESTER_ASSERTION) def test_rule_engine_fails_after_discarding_nonstring(self): """Check whether RuleProcessor discards non string objects. Expect RuleProcessor to discard non string object, which is required for a correct rule match. RuleProcessor will result with empty list of groups. """ mapping = mapping_fixtures.MAPPING_SMALL rp = mapping_utils.RuleProcessor(mapping['rules']) assertion = mapping_fixtures.CONTRACTOR_MALFORMED_ASSERTION mapped_properties = rp.process(assertion) self.assertValidMappedUserObject(mapped_properties) self.assertIsNone(mapped_properties['user'].get('name')) self.assertListEqual(list(), mapped_properties['group_ids']) def test_rule_engine_returns_group_names(self): """Check whether RuleProcessor returns group names with their domains. RuleProcessor should return 'group_names' entry with a list of dictionaries with two entries 'name' and 'domain' identifying group by its name and domain. """ mapping = mapping_fixtures.MAPPING_GROUP_NAMES rp = mapping_utils.RuleProcessor(mapping['rules']) assertion = mapping_fixtures.EMPLOYEE_ASSERTION mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertValidMappedUserObject(mapped_properties) reference = { mapping_fixtures.DEVELOPER_GROUP_NAME: { "name": mapping_fixtures.DEVELOPER_GROUP_NAME, "domain": { "name": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_NAME } }, mapping_fixtures.TESTER_GROUP_NAME: { "name": mapping_fixtures.TESTER_GROUP_NAME, "domain": { "id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID } } } for rule in mapped_properties['group_names']: self.assertDictEqual(reference.get(rule.get('name')), rule) def test_rule_engine_whitelist_and_direct_groups_mapping(self): """Should return user's groups Developer and Contractor. The EMPLOYEE_ASSERTION_MULTIPLE_GROUPS should successfully have a match in MAPPING_GROUPS_WHITELIST. It will test the case where 'whitelist' correctly filters out Manager and only allows Developer and Contractor. """ mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS rp = mapping_utils.RuleProcessor(mapping['rules']) mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) reference = { mapping_fixtures.DEVELOPER_GROUP_NAME: { "name": mapping_fixtures.DEVELOPER_GROUP_NAME, "domain": { "id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID } }, mapping_fixtures.CONTRACTOR_GROUP_NAME: { "name": mapping_fixtures.CONTRACTOR_GROUP_NAME, "domain": { "id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID } } } for rule in mapped_properties['group_names']: self.assertDictEqual(reference.get(rule.get('name')), rule) self.assertEqual('tbo', mapped_properties['user']['name']) self.assertEqual([], mapped_properties['group_ids']) def test_rule_engine_blacklist_and_direct_groups_mapping(self): """Should return user's group Developer. The EMPLOYEE_ASSERTION_MULTIPLE_GROUPS should successfully have a match in MAPPING_GROUPS_BLACKLIST. It will test the case where 'blacklist' correctly filters out Manager and Developer and only allows Contractor. """ mapping = mapping_fixtures.MAPPING_GROUPS_BLACKLIST assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS rp = mapping_utils.RuleProcessor(mapping['rules']) mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) reference = { mapping_fixtures.CONTRACTOR_GROUP_NAME: { "name": mapping_fixtures.CONTRACTOR_GROUP_NAME, "domain": { "id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID } } } for rule in mapped_properties['group_names']: self.assertDictEqual(reference.get(rule.get('name')), rule) self.assertEqual('tbo', mapped_properties['user']['name']) self.assertEqual([], mapped_properties['group_ids']) def test_rule_engine_blacklist_and_direct_groups_mapping_multiples(self): """Tests matching multiple values before the blacklist. Verifies that the local indexes are correct when matching multiple remote values for a field when the field occurs before the blacklist entry in the remote rules. """ mapping = mapping_fixtures.MAPPING_GROUPS_BLACKLIST_MULTIPLES assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS rp = mapping_utils.RuleProcessor(mapping['rules']) mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) reference = { mapping_fixtures.CONTRACTOR_GROUP_NAME: { "name": mapping_fixtures.CONTRACTOR_GROUP_NAME, "domain": { "id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID } } } for rule in mapped_properties['group_names']: self.assertDictEqual(reference.get(rule.get('name')), rule) self.assertEqual('tbo', mapped_properties['user']['name']) self.assertEqual([], mapped_properties['group_ids']) def test_rule_engine_whitelist_direct_group_mapping_missing_domain(self): """Test if the local rule is rejected upon missing domain value This is a variation with a ``whitelist`` filter. """ mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST_MISSING_DOMAIN assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS rp = mapping_utils.RuleProcessor(mapping['rules']) self.assertRaises(exception.ValidationError, rp.process, assertion) def test_rule_engine_blacklist_direct_group_mapping_missing_domain(self): """Test if the local rule is rejected upon missing domain value This is a variation with a ``blacklist`` filter. """ mapping = mapping_fixtures.MAPPING_GROUPS_BLACKLIST_MISSING_DOMAIN assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS rp = mapping_utils.RuleProcessor(mapping['rules']) self.assertRaises(exception.ValidationError, rp.process, assertion) def test_rule_engine_no_groups_allowed(self): """Should return user mapped to no groups. The EMPLOYEE_ASSERTION should successfully have a match in MAPPING_GROUPS_WHITELIST, but 'whitelist' should filter out the group values from the assertion and thus map to no groups. """ mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST assertion = mapping_fixtures.EMPLOYEE_ASSERTION rp = mapping_utils.RuleProcessor(mapping['rules']) mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertListEqual(mapped_properties['group_names'], []) self.assertListEqual(mapped_properties['group_ids'], []) self.assertEqual('tbo', mapped_properties['user']['name']) def test_mapping_federated_domain_specified(self): """Test mapping engine when domain 'ephemeral' is explicitely set. For that, we use mapping rule MAPPING_EPHEMERAL_USER and assertion EMPLOYEE_ASSERTION """ mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER rp = mapping_utils.RuleProcessor(mapping['rules']) assertion = mapping_fixtures.EMPLOYEE_ASSERTION mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertValidMappedUserObject(mapped_properties) def test_create_user_object_with_bad_mapping(self): """Test if user object is created even with bad mapping. User objects will be created by mapping engine always as long as there is corresponding local rule. This test shows, that even with assertion where no group names nor ids are matched, but there is 'blind' rule for mapping user, such object will be created. In this test MAPPING_EHPEMERAL_USER expects UserName set to jsmith whereas value from assertion is 'tbo'. """ mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER rp = mapping_utils.RuleProcessor(mapping['rules']) assertion = mapping_fixtures.CONTRACTOR_ASSERTION mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertValidMappedUserObject(mapped_properties) self.assertNotIn('id', mapped_properties['user']) self.assertNotIn('name', mapped_properties['user']) def test_set_ephemeral_domain_to_ephemeral_users(self): """Test auto assigning service domain to ephemeral users. Test that ephemeral users will always become members of federated service domain. The check depends on ``type`` value which must be set to ``ephemeral`` in case of ephemeral user. """ mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER_LOCAL_DOMAIN rp = mapping_utils.RuleProcessor(mapping['rules']) assertion = mapping_fixtures.CONTRACTOR_ASSERTION mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertValidMappedUserObject(mapped_properties) def test_local_user_local_domain(self): """Test that local users can have non-service domains assigned.""" mapping = mapping_fixtures.MAPPING_LOCAL_USER_LOCAL_DOMAIN rp = mapping_utils.RuleProcessor(mapping['rules']) assertion = mapping_fixtures.CONTRACTOR_ASSERTION mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertValidMappedUserObject( mapped_properties, user_type='local', domain_id=mapping_fixtures.LOCAL_DOMAIN) def test_user_identifications_name(self): """Test varius mapping options and how users are identified. This test calls mapped.setup_username() for propagating user object. Test plan: - Check if the user has proper domain ('federated') set - Check if the user has property type set ('ephemeral') - Check if user's name is properly mapped from the assertion - Check if user's id is properly set and equal to name, as it was not explicitely specified in the mapping. """ mapping = mapping_fixtures.MAPPING_USER_IDS rp = mapping_utils.RuleProcessor(mapping['rules']) assertion = mapping_fixtures.CONTRACTOR_ASSERTION mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertValidMappedUserObject(mapped_properties) mapped.setup_username({}, mapped_properties) self.assertEqual('jsmith', mapped_properties['user']['id']) self.assertEqual('jsmith', mapped_properties['user']['name']) def test_user_identifications_name_and_federated_domain(self): """Test varius mapping options and how users are identified. This test calls mapped.setup_username() for propagating user object. Test plan: - Check if the user has proper domain ('federated') set - Check if the user has propert type set ('ephemeral') - Check if user's name is properly mapped from the assertion - Check if user's id is properly set and equal to name, as it was not explicitely specified in the mapping. """ mapping = mapping_fixtures.MAPPING_USER_IDS rp = mapping_utils.RuleProcessor(mapping['rules']) assertion = mapping_fixtures.EMPLOYEE_ASSERTION mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertValidMappedUserObject(mapped_properties) mapped.setup_username({}, mapped_properties) self.assertEqual('tbo', mapped_properties['user']['name']) self.assertEqual('abc123%40example.com', mapped_properties['user']['id']) def test_user_identification_id(self): """Test varius mapping options and how users are identified. This test calls mapped.setup_username() for propagating user object. Test plan: - Check if the user has proper domain ('federated') set - Check if the user has propert type set ('ephemeral') - Check if user's id is properly mapped from the assertion - Check if user's name is properly set and equal to id, as it was not explicitely specified in the mapping. """ mapping = mapping_fixtures.MAPPING_USER_IDS rp = mapping_utils.RuleProcessor(mapping['rules']) assertion = mapping_fixtures.ADMIN_ASSERTION mapped_properties = rp.process(assertion) context = {'environment': {}} self.assertIsNotNone(mapped_properties) self.assertValidMappedUserObject(mapped_properties) mapped.setup_username(context, mapped_properties) self.assertEqual('bob', mapped_properties['user']['name']) self.assertEqual('bob', mapped_properties['user']['id']) def test_user_identification_id_and_name(self): """Test varius mapping options and how users are identified. This test calls mapped.setup_username() for propagating user object. Test plan: - Check if the user has proper domain ('federated') set - Check if the user has proper type set ('ephemeral') - Check if user's name is properly mapped from the assertion - Check if user's id is properly set and and equal to value hardcoded in the mapping This test does two iterations with different assertions used as input for the Mapping Engine. Different assertions will be matched with different rules in the ruleset, effectively issuing different user_id (hardcoded values). In the first iteration, the hardcoded user_id is not url-safe and we expect Keystone to make it url safe. In the latter iteration, provided user_id is already url-safe and we expect server not to change it. """ testcases = [(mapping_fixtures.CUSTOMER_ASSERTION, 'bwilliams'), (mapping_fixtures.EMPLOYEE_ASSERTION, 'tbo')] for assertion, exp_user_name in testcases: mapping = mapping_fixtures.MAPPING_USER_IDS rp = mapping_utils.RuleProcessor(mapping['rules']) mapped_properties = rp.process(assertion) context = {'environment': {}} self.assertIsNotNone(mapped_properties) self.assertValidMappedUserObject(mapped_properties) mapped.setup_username(context, mapped_properties) self.assertEqual(exp_user_name, mapped_properties['user']['name']) self.assertEqual('abc123%40example.com', mapped_properties['user']['id'])
DOTS ARE INVADING THE SPACE. GO FORTH AND DESTROY THEM ALL. Get a high score while you're at it, too. Ever played one of those unforgiving Spectrum/BBC Micro games with its unwieldy controls, lack of instructions and harsh five life restrictions? Well this game is like a bad version of that. This is my first ever submission, in fact it's my first ever complete game. Please don't go easy on me! Music by Boxheadreplica, used with permission. A game of aerial battle. Short, c. 3 min shmup challenge. Inspired by early Toaplan games. Just discovered, that I forgot to put it in the kart section, so I added some enemies, changed a bit the title screen and here it is. spent the better part of the past week porting bulletml to lua. And then I made this horrible game with it in a few minutes. There are no win screens. Or lose screens.
import asyncio import numpy as np import pygame from A3CBootcampGame.BaseGame import BaseGame from A3CBootcampGame.ShootingGrounds.Targets import TargetHandler from A3CBootcampGame.ShootingGrounds.GameHandler import GameHandler from A3CBootcampGame.ShootingGrounds.Player import Player from A3CBootcampGame.ShootingGrounds.world import World from A3CBootcampGame.physics import physicsHandler WHITE = 255, 255, 255 # Class for the shooting grounds level in A3CBootCamp class ShootingGrounds(BaseGame): def __init__(self, settings, gameDataQueue, playerActionQueue): BaseGame.__init__(self, settings, gameDataQueue, playerActionQueue) def initGame(self): # This function was created to do init in the run function when this class was a process self.baseInit() self.world = World(self.settings) self.player = Player(self.settings, "./Assets/Player.png") self.targetHandler = TargetHandler(self.settings, self.player) self.gameHandler = GameHandler(self.player, self.targetHandler) collisionGroups = 2 boxes = [] self.player.collisionGroup = 0 boxes.append(self.player) for bullet in self.player.ws.bullets: bullet.collisionGroup = 0 boxes.append(bullet) self.targetHandler.target.collisionGroup = 1 boxes.append(self.targetHandler.target) self.physics = physicsHandler(self.world.walls, boxes, collisionGroups, self.settings) async def run(self): while True: if not self.episodeInProgress: self.endEpisode() # Render stuff self.gameScreen.fill(WHITE) self.world.draw(self.gameScreen) self.targetHandler.draw(self.gameScreen) self.player.draw(self.gameScreen) if self.window: self.drawWindow() if not self.gameCounter % self.settings.deepRLRate: self.sendFrameToWorker() while self.playerActionQueue.empty(): # Yield to let other games run, to prevent blocking on the queue await asyncio.sleep(0.005) self.getActionFromWorker() # Update stuff self.player.update(self.playerAction, self.timeStep) self.physics.update(self.timeStep) self.targetHandler.update(self.gameCounter) self.episodeInProgress = self.gameHandler.update(self.physics.events, self.gameCounter, self.episodeData, self.bootStrapCounter, self.settings.bootStrapCutOff) if self.window: self.handleWindow() self.gameCounter += 1
Lonicera is a moderately large genus of ca. 200 species, mostly native to the northern hemisphere. Two are native in Belgium as well, Lonicera periclymenum L. and L. xylosteum L. About half of the species of the genus are cultivated for ornament. Their fruits (berries) are much eaten by birds and therefore easily dispersed. In addition to the taxa treated in detail in this account several others possibly also occur as escapes from cultivation. Clement & Foster (1994) cite several additional species. A cultivar of native Lonicera periclymenum with corollas that are pinkish-purplish outside is increasingly found in the wild, for instance in coastal scrub in Koksijde since several years (pers. obs.). In 2011 it was also observed near Verviers (see: http://waarnemingen.be/waarneming/view/55955784). Such plants possibly belong with cv 'Belgica'. Lonicera xylosteum is a rare and local native species. It is naturally confined to calcareous soils, especially in the area between rivers Meuse and Sambre in Wallonia. However, outside its native distribution range it is increasingly found as an escape from cultivation in many, widely scattered locations, primarily in Flanders (disused railway yards, seadunes, abandoned cemeteries,…) (see also Duvigneaud 1988). Bradshaw D. (1991) Climbing honeysuckles. The Plantsman 13(2): 106-110. Bradshaw D. (1995) Know your honeysuckles. The Garden 120: 406-411. Clement E.J. & Foster M.C. (1994) Alien plants of the British Isles. BSBI, London: XVIII + 590 p. De Boer E. (1999) Een wonderlijke vondst van een gekweekte kamperfoelie, Lonicera maackii (Rupr.) Max., op Gewone vlier. Gorteria 25: 28-29. Duvigneaud J. (1988) La réserve naturelle domaniale de Poilvache à Houx (Yvoir). Mise en évidence de sa vocation didactique. Nat. Mosana 41(4): 113-136. Li D.Z. (2000) Lonicera. In: Cullen J. & al. (eds.), The European Garden Flora, vol. 6. Cambridge University Press, Cambridge: 436-452. Rehder A. (1903) Synopsis of the genus Lonicera. Annual Rep. Missouri Bot. Gard. 14: 27-232. Stace C. (1997) New flora of the British Isles, 2nd ed.: XXVII + 1130 p. Cambridge University Press. Van de Laar H.J. (1988) Lonicera. Dendroflora 25: 37-54. Whitehouse C. (2012) Trial of climbing Lonicera (subgenus Lonicera). The Plantsman N.S. 11(1): 10-15. Wright D. (1983) Climbing honeysuckles. The Plantsman 4(4): 236-252. Yang Q., Landrein S., Osborne J. & Borosova R. (2011) Caprifoliaceae. In: Flora of China Editorial Committee (ed.), Flora of China, vol. 19. Science Press, Beijing & Missouri Botanical Garden Press, St. Louis: 616-641.
""" The constants used in extract_msg. If you modify any of these without explicit instruction to do so from one of the contributers, please do not complain about bugs. """ import datetime import struct import sys if sys.version_info[0] >= 3: BYTES = bytes STRING = str else: BYTES = str STRING = unicode # DEFINE CONSTANTS # WARNING DO NOT CHANGE ANY OF THESE VALUES UNLESS YOU KNOW # WHAT YOU ARE DOING! FAILURE TO FOLLOW THIS INSTRUCTION # CAN AND WILL BREAK THIS SCRIPT! # Constants used by named.py NUMERICAL_NAMED = 0 STRING_NAMED = 1 GUID_PS_MAPI = '{00020328-0000-0000-C000-000000000046}' GUID_PS_PUBLIC_STRINGS = '{00020329-0000-0000-C000-000000000046}' GUID_PSETID_COMMON = '{00062008-0000-0000-C000-000000000046}' GUID_PSETID_ADDRESS = '{00062004-0000-0000-C000-000000000046}' GUID_PS_INTERNET_HEADERS = '{00020386-0000-0000-C000-000000000046}' GUID_PSETID_APPOINTMENT = '{00062002-0000-0000-C000-000000000046}' GUID_PSETID_MEETING = '{6ED8DA90-450B-101B-98DA-00AA003F1305}' GUID_PSETID_LOG = '{0006200A-0000-0000-C000-000000000046}' GUID_PSETID_MESSAGING = '{41F28F13-83F4-4114-A584-EEDB5A6B0BFF}' GUID_PSETID_NOTE = '{0006200E-0000-0000-C000-000000000046}' GUID_PSETID_POSTRSS = '{00062041-0000-0000-C000-000000000046}' GUID_PSETID_TASK = '{00062003-0000-0000-C000-000000000046}' GUID_PSETID_UNIFIEDMESSAGING = '{4442858E-A9E3-4E80-B900-317A210CC15B}' GUID_PSETID_AIRSYNC = '{71035549-0739-4DCB-9163-00F0580DBBDF}' GUID_PSETID_SHARING = '{00062040-0000-0000-C000-000000000046}' GUID_PSETID_XMLEXTRACTEDENTITIES = '{23239608-685D-4732-9C55-4C95CB4E8E33}' GUID_PSETID_ATTACHMENT = '{96357F7F-59E1-47D0-99A7-46515C183B54}' FIXED_LENGTH_PROPS = ( 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x000A, 0x000B, 0x0014, 0x0040, 0x0048, ) FIXED_LENGTH_PROPS_STRING = ( '0000', '0001', '0002', '0003', '0004', '0005', '0006', '0007', '000A', '000B', '0014', '0040', '0048', ) VARIABLE_LENGTH_PROPS = ( 0x000D, 0x001E, 0x001F, 0x00FB, 0x00FD, 0x00FE, 0X0102, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, 0x1007, 0x1014, 0x101E, 0x101F, 0x1040, 0x1048, 0x1102, ) VARIABLE_LENGTH_PROPS_STRING = ( '000D', '001E', '001F', '00FB', '00FD', '00FE', '0102', '1002', '1003', '1004', '1005', '1006', '1007', '1014', '101E', '101F', '1040', '1048', '1102', ) CODE_PAGES = { 37: 'IBM037', # IBM EBCDIC US-Canada 437: 'IBM437', # OEM United States 500: 'IBM500', # IBM EBCDIC International 708: 'ASMO-708', # Arabic (ASMO 708) 709: '', # Arabic (ASMO-449+, BCON V4) 710: '', # Arabic - Transparent Arabic 720: 'DOS-720', # Arabic (Transparent ASMO); Arabic (DOS) 737: 'ibm737', # OEM Greek (formerly 437G); Greek (DOS) 775: 'ibm775', # OEM Baltic; Baltic (DOS) 850: 'ibm850', # OEM Multilingual Latin 1; Western European (DOS) 852: 'ibm852', # OEM Latin 2; Central European (DOS) 855: 'IBM855', # OEM Cyrillic (primarily Russian) 857: 'ibm857', # OEM Turkish; Turkish (DOS) 858: 'IBM00858', # OEM Multilingual Latin 1 + Euro symbol 860: 'IBM860', # OEM Portuguese; Portuguese (DOS) 861: 'ibm861', # OEM Icelandic; Icelandic (DOS) 862: 'DOS-862', # OEM Hebrew; Hebrew (DOS) 863: 'IBM863', # OEM French Canadian; French Canadian (DOS) 864: 'IBM864', # OEM Arabic; Arabic (864) 865: 'IBM865', # OEM Nordic; Nordic (DOS) 866: 'cp866', # OEM Russian; Cyrillic (DOS) 869: 'ibm869', # OEM Modern Greek; Greek, Modern (DOS) 870: 'IBM870', # IBM EBCDIC Multilingual/ROECE (Latin 2); IBM EBCDIC Multilingual Latin 2 874: 'windows-874', # ANSI/OEM Thai (ISO 8859-11); Thai (Windows) 875: 'cp875', # IBM EBCDIC Greek Modern 932: 'shift_jis', # ANSI/OEM Japanese; Japanese (Shift-JIS) 936: 'gb2312', # ANSI/OEM Simplified Chinese (PRC, Singapore); Chinese Simplified (GB2312) 949: 'ks_c_5601-1987', # ANSI/OEM Korean (Unified Hangul Code) 950: 'big5', # ANSI/OEM Traditional Chinese (Taiwan; Hong Kong SAR, PRC); Chinese Traditional (Big5) 1026: 'IBM1026', # IBM EBCDIC Turkish (Latin 5) 1047: 'IBM01047', # IBM EBCDIC Latin 1/Open System 1140: 'IBM01140', # IBM EBCDIC US-Canada (037 + Euro symbol); IBM EBCDIC (US-Canada-Euro) 1141: 'IBM01141', # IBM EBCDIC Germany (20273 + Euro symbol); IBM EBCDIC (Germany-Euro) 1142: 'IBM01142', # IBM EBCDIC Denmark-Norway (20277 + Euro symbol); IBM EBCDIC (Denmark-Norway-Euro) 1143: 'IBM01143', # IBM EBCDIC Finland-Sweden (20278 + Euro symbol); IBM EBCDIC (Finland-Sweden-Euro) 1144: 'IBM01144', # IBM EBCDIC Italy (20280 + Euro symbol); IBM EBCDIC (Italy-Euro) 1145: 'IBM01145', # IBM EBCDIC Latin America-Spain (20284 + Euro symbol); IBM EBCDIC (Spain-Euro) 1146: 'IBM01146', # IBM EBCDIC United Kingdom (20285 + Euro symbol); IBM EBCDIC (UK-Euro) 1147: 'IBM01147', # IBM EBCDIC France (20297 + Euro symbol); IBM EBCDIC (France-Euro) 1148: 'IBM01148', # IBM EBCDIC International (500 + Euro symbol); IBM EBCDIC (International-Euro) 1149: 'IBM01149', # IBM EBCDIC Icelandic (20871 + Euro symbol); IBM EBCDIC (Icelandic-Euro) 1200: 'utf-16', # Unicode UTF-16, little endian byte order (BMP of ISO 10646); available only to managed applications 1201: 'unicodeFFFE', # Unicode UTF-16, big endian byte order; available only to managed applications 1250: 'windows-1250', # ANSI Central European; Central European (Windows) 1251: 'windows-1251', # ANSI Cyrillic; Cyrillic (Windows) 1252: 'windows-1252', # ANSI Latin 1; Western European (Windows) 1253: 'windows-1253', # ANSI Greek; Greek (Windows) 1254: 'windows-1254', # ANSI Turkish; Turkish (Windows) 1255: 'windows-1255', # ANSI Hebrew; Hebrew (Windows) 1256: 'windows-1256', # ANSI Arabic; Arabic (Windows) 1257: 'windows-1257', # ANSI Baltic; Baltic (Windows) 1258: 'windows-1258', # ANSI/OEM Vietnamese; Vietnamese (Windows) 1361: 'Johab', # Korean (Johab) 10000: 'macintosh', # MAC Roman; Western European (Mac) 10001: 'x-mac-japanese', # Japanese (Mac) 10002: 'x-mac-chinesetrad', # MAC Traditional Chinese (Big5); Chinese Traditional (Mac) 10003: 'x-mac-korean', # Korean (Mac) 10004: 'x-mac-arabic', # Arabic (Mac) 10005: 'x-mac-hebrew', # Hebrew (Mac) 10006: 'x-mac-greek', # Greek (Mac) 10007: 'x-mac-cyrillic', # Cyrillic (Mac) 10008: 'x-mac-chinesesimp', # MAC Simplified Chinese (GB 2312); Chinese Simplified (Mac) 10010: 'x-mac-romanian', # Romanian (Mac) 10017: 'x-mac-ukrainian', # Ukrainian (Mac) 10021: 'x-mac-thai', # Thai (Mac) 10029: 'x-mac-ce', # MAC Latin 2; Central European (Mac) 10079: 'x-mac-icelandic', # Icelandic (Mac) 10081: 'x-mac-turkish', # Turkish (Mac) 10082: 'x-mac-croatian', # Croatian (Mac) 12000: 'utf-32', # Unicode UTF-32, little endian byte order; available only to managed applications 12001: 'utf-32BE', # Unicode UTF-32, big endian byte order; available only to managed applications 20000: 'x-Chinese_CNS', # CNS Taiwan; Chinese Traditional (CNS) 20001: 'x-cp20001', # TCA Taiwan 20002: 'x_Chinese-Eten', # Eten Taiwan; Chinese Traditional (Eten) 20003: 'x-cp20003', # IBM5550 Taiwan 20004: 'x-cp20004', # TeleText Taiwan 20005: 'x-cp20005', # Wang Taiwan 20105: 'x-IA5', # IA5 (IRV International Alphabet No. 5, 7-bit); Western European (IA5) 20106: 'x-IA5-German', # IA5 German (7-bit) 20107: 'x-IA5-Swedish', # IA5 Swedish (7-bit) 20108: 'x-IA5-Norwegian', # IA5 Norwegian (7-bit) 20127: 'us-ascii', # US-ASCII (7-bit) 20261: 'x-cp20261', # T.61 20269: 'x-cp20269', # ISO 6937 Non-Spacing Accent 20273: 'IBM273', # IBM EBCDIC Germany 20277: 'IBM277', # IBM EBCDIC Denmark-Norway 20278: 'IBM278', # IBM EBCDIC Finland-Sweden 20280: 'IBM280', # IBM EBCDIC Italy 20284: 'IBM284', # IBM EBCDIC Latin America-Spain 20285: 'IBM285', # IBM EBCDIC United Kingdom 20290: 'IBM290', # IBM EBCDIC Japanese Katakana Extended 20297: 'IBM297', # IBM EBCDIC France 20420: 'IBM420', # IBM EBCDIC Arabic 20423: 'IBM423', # IBM EBCDIC Greek 20424: 'IBM424', # IBM EBCDIC Hebrew 20833: 'x-EBCDIC-KoreanExtended', # IBM EBCDIC Korean Extended 20838: 'IBM-Thai', # IBM EBCDIC Thai 20866: 'koi8-r', # Russian (KOI8-R); Cyrillic (KOI8-R) 20871: 'IBM871', # IBM EBCDIC Icelandic 20880: 'IBM880', # IBM EBCDIC Cyrillic Russian 20905: 'IBM905', # IBM EBCDIC Turkish 20924: 'IBM00924', # IBM EBCDIC Latin 1/Open System (1047 + Euro symbol) 20932: 'EUC-JP', # Japanese (JIS 0208-1990 and 0212-1990) 20936: 'x-cp20936', # Simplified Chinese (GB2312); Chinese Simplified (GB2312-80) 20949: 'x-cp20949', # Korean Wansung 21025: 'cp1025', # IBM EBCDIC Cyrillic Serbian-Bulgarian 21027: '', # (deprecated) 21866: 'koi8-u', # Ukrainian (KOI8-U); Cyrillic (KOI8-U) 28591: 'iso-8859-1', # ISO 8859-1 Latin 1; Western European (ISO) 28592: 'iso-8859-2', # ISO 8859-2 Central European; Central European (ISO) 28593: 'iso-8859-3', # ISO 8859-3 Latin 3 28594: 'iso-8859-4', # ISO 8859-4 Baltic 28595: 'iso-8859-5', # ISO 8859-5 Cyrillic 28596: 'iso-8859-6', # ISO 8859-6 Arabic 28597: 'iso-8859-7', # ISO 8859-7 Greek 28598: 'iso-8859-8', # ISO 8859-8 Hebrew; Hebrew (ISO-Visual) 28599: 'iso-8859-9', # ISO 8859-9 Turkish 28603: 'iso-8859-13', # ISO 8859-13 Estonian 28605: 'iso-8859-15', # ISO 8859-15 Latin 9 29001: 'x-Europa', # Europa 3 38598: 'iso-8859-8-i', # ISO 8859-8 Hebrew; Hebrew (ISO-Logical) 50220: 'iso-2022-jp', # ISO 2022 Japanese with no halfwidth Katakana; Japanese (JIS) 50221: 'csISO2022JP', # ISO 2022 Japanese with halfwidth Katakana; Japanese (JIS-Allow 1 byte Kana) 50222: 'iso-2022-jp', # ISO 2022 Japanese JIS X 0201-1989; Japanese (JIS-Allow 1 byte Kana - SO/SI) 50225: 'iso-2022-kr', # ISO 2022 Korean 50227: 'x-cp50227', # ISO 2022 Simplified Chinese; Chinese Simplified (ISO 2022) 50229: '', # ISO 2022 Traditional Chinese 50930: '', # EBCDIC Japanese (Katakana) Extended 50931: '', # EBCDIC US-Canada and Japanese 50933: '', # EBCDIC Korean Extended and Korean 50935: '', # EBCDIC Simplified Chinese Extended and Simplified Chinese 50936: '', # EBCDIC Simplified Chinese 50937: '', # EBCDIC US-Canada and Traditional Chinese 50939: '', # EBCDIC Japanese (Latin) Extended and Japanese 51932: 'euc-jp', # EUC Japanese 51936: 'EUC-CN', # EUC Simplified Chinese; Chinese Simplified (EUC) 51949: 'euc-kr', # EUC Korean 51950: '', # EUC Traditional Chinese 52936: 'hz-gb-2312', # HZ-GB2312 Simplified Chinese; Chinese Simplified (HZ) 54936: 'GB18030', # Windows XP and later: GB18030 Simplified Chinese (4 byte); Chinese Simplified (GB18030) 57002: 'x-iscii-de', # ISCII Devanagari 57003: 'x-iscii-be', # ISCII Bangla 57004: 'x-iscii-ta', # ISCII Tamil 57005: 'x-iscii-te', # ISCII Telugu 57006: 'x-iscii-as', # ISCII Assamese 57007: 'x-iscii-or', # ISCII Odia 57008: 'x-iscii-ka', # ISCII Kannada 57009: 'x-iscii-ma', # ISCII Malayalam 57010: 'x-iscii-gu', # ISCII Gujarati 57011: 'x-iscii-pa', # ISCII Punjabi 65000: 'utf-7', # Unicode (UTF-7) 65001: 'utf-8', # Unicode (UTF-8) } INTELLIGENCE_DUMB = 0 INTELLIGENCE_SMART = 1 INTELLIGENCE_TUPLE = ( 'INTELLIGENCE_DUMB', 'INTELLIGENCE_SMART', ) TYPE_MESSAGE = 0 TYPE_MESSAGE_EMBED = 1 TYPE_ATTACHMENT = 2 TYPE_RECIPIENT = 3 TYPE_TUPLE = ( 'TYPE_MESSAGE', 'TYPE_MESSAGE_EMBED', 'TYPE_ATTACHMENT', 'TYPE_RECIPIENT', ) RECIPIENT_SENDER = 0 RECIPIENT_TO = 1 RECIPIENT_CC = 2 RECIPIENT_BCC = 3 RECIPIENT_TUPLE = ( 'RECIPIENT_SENDER', 'RECIPIENT_TO', 'RECIPIENT_CC', 'RECIPIENT_BCC', ) # PidTagImportance IMPORTANCE_LOW = 0 IMPORTANCE_MEDIUM = 1 IMPORTANCE_HIGH = 2 IMPORTANCE_TUPLE = ( 'IMPORTANCE_LOW', 'IMPORTANCE_MEDIUM', 'IMPORTANCE_HIGH', ) # PidTagSensitivity SENSITIVITY_NORMAL = 0 SENSITIVITY_PERSONAL = 1 SENSITIVITY_PRIVATE = 2 SENSITIVITY_CONFIDENTIAL = 3 SENSITIVITY_TUPLE = ( 'SENSITIVITY_NORMAL', 'SENSITIVITY_PERSONAL', 'SENSITIVITY_PRIVATE', 'SENSITIVITY_CONFIDENTIAL', ) # PidTagPriority PRIORITY_URGENT = 0x00000001 PRIORITY_NORMAL = 0x00000000 PRIORITY_NOT_URGENT = 0xFFFFFFFF PYTPFLOATINGTIME_START = datetime.datetime(1899, 12, 30) # Constants used for argparse stuff KNOWN_FILE_FLAGS = [ '--out-name', ] NEEDS_ARG = [ '--out-name', ] MAINDOC = "extract_msg:\n\tExtracts emails and attachments saved in Microsoft Outlook's .msg files.\n\n" \ "https://github.com/mattgwwalker/msg-extractor" # Define pre-compiled structs to make unpacking slightly faster # General structs ST1 = struct.Struct('<8x4I') ST2 = struct.Struct('<H2xI8x') ST3 = struct.Struct('<Q') # Structs used by named.py STNP_NAM = struct.Struct('<i') STNP_ENT = struct.Struct('<IHH') # Struct used for unpacking the entries in the entry stream # Structs used by prop.py STFIX = struct.Struct('<8x8s') STVAR = struct.Struct('<8xi4s') # Structs to help with email type to python type conversions STI16 = struct.Struct('<h6x') STI32 = struct.Struct('<I4x') STI64 = struct.Struct('<q') STF32 = struct.Struct('<f4x') STF64 = struct.Struct('<d') STUI32 = struct.Struct('<I4x') STMI16 = struct.Struct('<h') STMI32 = struct.Struct('<i') STMI64 = struct.Struct('<q') STMF32 = struct.Struct('<f') STMF64 = struct.Struct('<d') # PermanentEntryID parsing struct STPEID = struct.Struct('<B3x16s4xI') PTYPES = { 0x0000: 'PtypUnspecified', 0x0001: 'PtypNull', 0x0002: 'PtypInteger16', # Signed short 0x0003: 'PtypInteger32', # Signed int 0x0004: 'PtypFloating32', # Float 0x0005: 'PtypFloating64', # Double 0x0006: 'PtypCurrency', 0x0007: 'PtypFloatingTime', 0x000A: 'PtypErrorCode', 0x000B: 'PtypBoolean', 0x000D: 'PtypObject/PtypEmbeddedTable/Storage', 0x0014: 'PtypInteger64', # Signed longlong 0x001E: 'PtypString8', 0x001F: 'PtypString', 0x0040: 'PtypTime', # Use msgEpoch to convert to unix time stamp 0x0048: 'PtypGuid', 0x00FB: 'PtypServerId', 0x00FD: 'PtypRestriction', 0x00FE: 'PtypRuleAction', 0x0102: 'PtypBinary', 0x1002: 'PtypMultipleInteger16', 0x1003: 'PtypMultipleInteger32', 0x1004: 'PtypMultipleFloating32', 0x1005: 'PtypMultipleFloating64', 0x1006: 'PtypMultipleCurrency', 0x1007: 'PtypMultipleFloatingTime', 0x1014: 'PtypMultipleInteger64', 0x101E: 'PtypMultipleString8', 0x101F: 'PtypMultipleString', 0x1040: 'PtypMultipleTime', 0x1048: 'PtypMultipleGuid', 0x1102: 'PtypMultipleBinary', } # Display types DT_MAILUSER = 0x0000 DT_DISTLIST = 0x0001 DT_FORUM = 0x0002 DT_AGENT = 0x0003 DT_ORGANIZATION = 0x0004 DT_PRIVATE_DISTLIST = 0x0005 DT_REMOTE_MAILUSER = 0x0006 DT_CONTAINER = 0x0100 DT_TEMPLATE = 0x0101 DT_ADDRESS_TEMPLATE = 0x0102 DT_SEARCH = 0x0200 # Rule action types RA_OP_MOVE = 0x01 RA_OP_COPY = 0x02 RA_OP_REPLY = 0x03 RA_OP_OOF_REPLY = 0x04 RA_OP_DEFER_ACTION = 0x05 RA_OP_BOUNCE = 0x06 RA_OP_FORWARD = 0x07 RA_OP_DELEGATE = 0x08 RA_OP_TAG = 0x09 RA_OP_DELETE = 0x0A RA_OP_MARK_AS_READ = 0x0B # Recipiet Row Types RR_NOTYPE = 0x0 RR_X500DN = 0x1 RR_MSMAIL = 0x2 RR_SMTP = 0x3 RR_FAX = 0x4 RR_PROFESSIONALOFFICESYSTEM = 0x5 RR_PERSONALDESTRIBUTIONLIST1 = 0x6 RR_PERSONALDESTRIBUTIONLIST2 = 0x7 # This property information was sourced from # http://www.fileformat.info/format/outlookmsg/index.htm # on 2013-07-22. # It was extended by The Elemental of Destruction on 2018-10-12 PROPERTIES = { '00010102': 'Template data', '0002000B': 'Alternate recipient allowed', '0004001F': 'Auto forward comment', '00040102': 'Script data', '0005000B': 'Auto forwarded', '000F000F': 'Deferred delivery time', '00100040': 'Deliver time', '00150040': 'Expiry time', '00170003': 'Importance', '001A001F': 'Message class', '0023001F': 'Originator delivery report requested', '00250102': 'Parent key', '00260003': 'Priority', '0029000B': 'Read receipt requested', '002A0040': 'Receipt time', '002B000B': 'Recipient reassignment prohibited', '002E0003': 'Original sensitivity', '00300040': 'Reply time', '00310102': 'Report tag', '00320040': 'Report time', '00360003': 'Sensitivity', '0037001F': 'Subject', '00390040': 'Client Submit Time', '003A001F': '', '003B0102': '', '003D001F': 'Subject prefix', '003F0102': '', '0040001F': 'Received by name', '00410102': '', '0042001F': 'Sent repr name', '00430102': '', '0044001F': 'Rcvd repr name', '00450102': '', '0046001F': '', '00470102': '', '0049001F': '', '004B001F': '', '004C0102': '', '004D001F': 'Org author name', '004E0040': '', '004F0102': '', '0050001F': 'Reply rcipnt names', '00510102': '', '00520102': '', '00530102': '', '00540102': '', '00550040': '', '0057000B': '', '0058000B': '', '0059000B': '', '005A001F': 'Org sender name', '005B0102': '', '005C0102': '', '005D001F': '', '005E0102': '', '005F0102': '', '00600040': '', '00610040': '', '00620003': '', '0063000B': '', '0064001F': 'Sent repr adrtype', '0065001F': 'Sent repr email', '0066001F': '', '00670102': '', '0068001F': '', '0069001F': '', '0070001F': 'Topic', '00710102': '', '0072001F': '', '0073001F': '', '0074001F': '', '0075001F': 'Rcvd by adrtype', '0076001F': 'Rcvd by email', '0077001F': 'Repr adrtype', '0078001F': 'Repr email', '007D001F': 'Message header', '007F0102': '', '0080001F': '', '0081001F': '', '08070003': '', '0809001F': '', '0C040003': '', '0C050003': '', '0C06000B': '', '0C08000B': '', '0C150003': '', '0C17000B': '', '0C190102': '', '0C1A001F': 'Sender name', '0C1B001F': '', '0C1D0102': '', '0C1E001F': 'Sender adr type', '0C1F001F': 'Sender email', '0C200003': '', '0C21001F': '', '0E01000B': '', '0E02001F': 'Display BCC', '0E03001F': 'Display CC', '0E04001F': 'Display To', '0E060040': '', '0E070003': '', '0E080003': '', '0E080014': '', '0E090102': '', '0E0F000B': '', '0E12000D': '', '0E13000D': '', '0E170003': '', '0E1B000B': '', '0E1D001F': 'Subject (normalized)', '0E1F000B': '', '0E200003': '', '0E210003': '', '0E28001F': 'Recvd account1 (uncertain)', '0E29001F': 'Recvd account2 (uncertain)', '1000001F': 'Message body', '1008': 'RTF sync body tag', # Where did this come from ??? It's not listed in the docs '10090102': 'Compressed RTF body', '1013001F': 'HTML body', '1035001F': 'Message ID (uncertain)', '1046001F': 'Sender email (uncertain)', '3001001F': 'Display name', '3002001F': 'Address type', '3003001F': 'Email address', '30070040': 'Creation date', '39FE001F': '7-bit email (uncertain)', '39FF001F': '7-bit display name', # Attachments (37xx) '37010102': 'Attachment data', '37020102': '', '3703001F': 'Attachment extension', '3704001F': 'Attachment short filename', '37050003': 'Attachment attach method', '3707001F': 'Attachment long filename', '370E001F': 'Attachment mime tag', '3712001F': 'Attachment ID (uncertain)', # Address book (3Axx): '3A00001F': 'Account', '3A02001F': 'Callback phone no', '3A05001F': 'Generation', '3A06001F': 'Given name', '3A08001F': 'Business phone', '3A09001F': 'Home phone', '3A0A001F': 'Initials', '3A0B001F': 'Keyword', '3A0C001F': 'Language', '3A0D001F': 'Location', '3A11001F': 'Surname', '3A15001F': 'Postal address', '3A16001F': 'Company name', '3A17001F': 'Title', '3A18001F': 'Department', '3A19001F': 'Office location', '3A1A001F': 'Primary phone', '3A1B101F': 'Business phone 2', '3A1C001F': 'Mobile phone', '3A1D001F': 'Radio phone no', '3A1E001F': 'Car phone no', '3A1F001F': 'Other phone', '3A20001F': 'Transmit dispname', '3A21001F': 'Pager', '3A220102': 'User certificate', '3A23001F': 'Primary Fax', '3A24001F': 'Business Fax', '3A25001F': 'Home Fax', '3A26001F': 'Country', '3A27001F': 'Locality', '3A28001F': 'State/Province', '3A29001F': 'Street address', '3A2A001F': 'Postal Code', '3A2B001F': 'Post Office Box', '3A2C001F': 'Telex', '3A2D001F': 'ISDN', '3A2E001F': 'Assistant phone', '3A2F001F': 'Home phone 2', '3A30001F': 'Assistant', '3A44001F': 'Middle name', '3A45001F': 'Dispname prefix', '3A46001F': 'Profession', '3A47001F': '', '3A48001F': 'Spouse name', '3A4B001F': 'TTYTTD radio phone', '3A4C001F': 'FTP site', '3A4E001F': 'Manager name', '3A4F001F': 'Nickname', '3A51001F': 'Business homepage', '3A57001F': 'Company main phone', '3A58101F': 'Childrens names', '3A59001F': 'Home City', '3A5A001F': 'Home Country', '3A5B001F': 'Home Postal Code', '3A5C001F': 'Home State/Provnce', '3A5D001F': 'Home Street', '3A5F001F': 'Other adr City', '3A60': 'Other adr Country', '3A61': 'Other adr PostCode', '3A62': 'Other adr Province', '3A63': 'Other adr Street', '3A64': 'Other adr PO box', '3FF7': 'Server (uncertain)', '3FF8': 'Creator1 (uncertain)', '3FFA': 'Creator2 (uncertain)', '3FFC': 'To email (uncertain)', '403D': 'To adrtype (uncertain)', '403E': 'To email (uncertain)', '5FF6': 'To (uncertain)', } # END CONSTANTS def int_to_data_type(integer): """ Returns the name of the data type constant that has the value of :param integer: """ return TYPE_TUPLE[integer] def int_to_intelligence(integer): """ Returns the name of the intelligence level constant that has the value of :param integer: """ return INTELLIGENCE_TUPLE[integer] def int_to_recipient_type(integer): """ Returns the name of the recipient type constant that has the value of :param integer: """ return RECIPIENT_TUPLE[integer]
The Food and Drug Administration (FDA) is recommending against the routine use of bone graft substitutes containing recombinant proteins or synthetic peptides in patients <18 years old. The FDA has received reports of serious injuries including excess bone growth, fluid accumulation, inhibited bone healing, and swelling. These adverse events are similar to those observed in patients >18 years old but they are more concerning in this age group because of their overall smaller size and bones that are still growing. If the bone grafts are implanted near open growth plates, there is potential for negative bone formation and growth. Manufacturers are required to submit premarket approval application (PMA) with clinical data supporting the safety and efficacy of these Class III high-risk medical devices. However, the FDA has not reviewed or approved the safety and efficacy in patients <18 years old. The FDA recommends first considering the use of alternatives such as autograft bone, allograft bone, and bone graft substitutes that do not contain recombinant proteins or synthetic peptides in patients <18 years old. If bone graft substitutes that contain recombinant proteins or synthetic peptides are considered the best or only option, then parents/guardians and patients should be informed about the risks and benefits of using such products. Patients <18 years old should be monitored closely and be referred for appropriate corrective treatment if necessary.
# pylint: disable=wildcard-import,unused-wildcard-import,missing-docstring # pylint: disable=undefined-variable,line-too-long,invalid-name from __future__ import unicode_literals from nose.tools import * from dear_astrid.formatter import * from dear_astrid.parser import * from dear_astrid.test.helpers import * # shortcut def one_task(fragment): return parse_xml( '<astrid format="2">{0}</astrid>'.format(fragment) )[0] class TestFormatXML(TestCase): # pylint: disable=too-many-public-methods,no-member def assert_task_parses(self, xml, exp): with timezone('UTC'): self.assert_equal(one_task(xml), exp) def assert_round_trip(self, task): xml = format_task(task) tags = ['astrid'] tags.extend(task['tags']) task['tags'] = tags self.assert_task_parses(xml, task) def test_round_trip(self): self.assert_round_trip({ 'title': 'squid', 'priority': 2, 'due_date': dtu(2014, 5, 10, 19, 0, 0, 402000), 'recurrence': None, 'repeat_until': None, 'completed': None, 'deleted': None, 'estimated': 0, 'elapsed': 0, 'tags': [], 'notes': None, }) self.assert_round_trip({ 'title': 'squidly', 'priority': 3, 'due_date': dtu(2014, 5, 10, 19, 0, 0, 402000), 'recurrence': {"FREQ": "DAILY", "INTERVAL": 12}, 'repeat_until': None, 'completed': dtu(2014, 6, 10, 19, 0, 0, 402000), 'deleted': None, 'estimated': 0, 'elapsed': 0, 'tags': ["taggy"], 'notes': "foo", })
TripIt has long dabbled in the “automagical,” and our latest update is no different. Starting today, TripIt’s iPhone app introduces a brand-new card view, which displays the most relevant trip information right when you need it. Heading to the airport? Open the app, and TripIt will show you flight and gate details to guide you onto the plane. Landed at your destination? Hotel directions and rental car pick-up information now appear to make your arrival a seamless one. TripIt now anticipates what information you need to access and shows it upfront, making our mobile travel companion even more helpful to those on the go. TripIt’s upgraded design is flatter and card-centric. Now, to review your upcoming trip items, you swipe fluidly between cards. Want to jump back to the main TripIt dashboard? No problem, it’s just one click away. We also made Google Maps our primary source for maps and directions, and TripIt Pro members will enjoy streamlined, easier-to-read notifications in the alerts center. Be sure to check out our favorite, fun new feature – the clouds we’ve tucked in the background gently drift anytime your device moves, thanks to the iPhone’s gyroscope. What info do you find most important to have at hand when traveling?
from django.shortcuts import render_to_response from django.template import RequestContext import pdb from openpds.visualization.internal import getInternalDataStore from openpds.core.models import Profile, FluQuestions, ProfileStartEnd, FB_Connection, Emoji, Emoji2, emoji_choices, QuestionInstance, QuestionType, FirebaseToken, IPReferral import facebook import json, datetime, time, re, math, pytz from django.http import HttpResponse, Http404, HttpResponseRedirect, HttpResponseForbidden from django.core.urlresolvers import reverse from django.shortcuts import get_object_or_404 from calendar import monthrange from openpds.questions.tasks import checkForProfileReferral from django.views.decorators.cache import cache_page from pymongo import Connection import random from django.conf import settings from django.utils import timezone def dupEmojis(request): for e in Emoji.objects.all(): Emoji2.objects.create(profile=e.profile, emoji=e.emoji, created=e.created, lat=e.lat, lng=e.lng) return HttpResponse("success") def getLength(request): connection = Connection( host=random.choice(getattr(settings, "MONGODB_HOST", None)), port=getattr(settings, "MONGODB_PORT", None), readPreference='nearest' ) for p in Profile.objects.all(): pse = ProfileStartEnd.objects.filter(profile=p) if len(pse) == 0: dbName = p.getDBName().strip() try: db = connection[dbName] collection = db["funf"] try: count = db.command("collstats", "funf")["count"] if count == 0: ProfileStartEnd.objects.create(profile=p) else: start = collection.find({"key": {"$ne": "edu.mit.media.funf.probe.builtin.WifiProbe"}}).sort("time", 1)[0]["time"] end = collection.find({"key": {"$ne": "edu.mit.media.funf.probe.builtin.WifiProbe"}}).sort("time", -1)[0]["time"] # TODO NExt time don't use wifiProbe timestamps (or SMS) it's unreliable - probably ActivityProbe or screenProbe # also must check to make sure the start time is after profile.created days = end - start days = int(days / 60.0 / 60.0/ 24.0) ProfileStartEnd.objects.create(profile=p, start=datetime.datetime.fromtimestamp(start), end=datetime.datetime.fromtimestamp(end), days=days) except: pass except: pass connection.close() return HttpResponse("success") def removeEmptyPSE(request): pses = ProfileStartEnd.objects.filter(start__isnull=True) | ProfileStartEnd.objects.filter(end__isnull=True) def getEmoji(n): for x in EMOJI_PERCENTAGE_CUMULATIVE: if n < x[1]: return x[0] return 'h' EMOJI_PERCENTAGE = { 'h': .16, # 'healthy': 's': .08, # 'sick': 'y': .13, # 'sleepy': 'c': .05, # 'cough': 'f': .05, # 'fever': 'u': .015, # 'flu': 'n': .04, # 'nauseous': 'l': .04, # 'sore throat': 'r': .08, # 'runnynose': 'b': .01, # 'body ache': 'a': .08, #'calm': 'd': .065, #'down': 'e': .1, #'energized': 'm': .03, #'motivated': 't': .07, #'trouble concentrating': } EMOJI_PERCENTAGE_CUMULATIVE = [ ( 'h', .16, ), # 'healthy', ( 's', .24, ), # 'sick', ( 'y', .37, ), # 'sleepy', ( 'c', .42, ), # 'cough', ( 'f', .47, ), # 'fever', ( 'u', .485, ), # 'flu', ( 'n', .525, ), # 'nauseous', ( 'l', .565, ), # 'sore throat', ( 'r', .645, ), # 'runnynose', ( 'b', .655, ), # 'body ache', ( 'a', .735, ), #'calm', ( 'd', .8, ), #'down', ( 'e', .9, ), #'energized', ( 'm', .93, ), #'motivated', ( 't', 1.0, ), #'trouble concentrating', ] def randEmojis(request): pses = ProfileStartEnd.objects.filter(days__gt=2) totalcreated = 0 for pse in pses: start = pse.start new_start = timezone.now() new_start = new_start.replace(year=2017, month=3, day=1) if new_start > start: start = new_start end = pse.end if start > end: continue randint = random.randint(3,5) count = 1 start = start + datetime.timedelta(days=randint) while start < end: try: emojinum = random.random() emoji = getEmoji(emojinum) #todo Hard - should be some correlation Emoji2.objects.create(profile=pse.profile, created=start, emoji=emoji) totalcreated += 1 randint = random.randint(3,5 + count) count += 1 start = start + datetime.timedelta(days=randint) rmin = random.randint(0, 59) rsec = random.randint(0, 59) msec = random.randint(0, 999999) rhour = random.randint(9, 18) start = start.replace(hour=rhour, minute=rmin, second=rsec, microsecond=msec) except: pass return HttpResponse(str(totalcreated)) def fluQuestionSet(request): ps = Profile.objects.all() for p in ps: if FluQuestions.objects.filter(profile=p).count() == 0: r1 = random.random() < .05 # get flu this season yet r2 = random.random() < .17 # get flu last season r3 = random.random() < .35 # vaccine this season if Emoji2.objects.filter(emoji='u', profile=p).count() > 0: r3 = False FluQuestions.objects.create(profile=p, fluThisSeason=r1, fluLastSeason=r2, vaccineThisSeason=r3) return HttpResponse("Success") def csv(request): response = "" for e in Emoji2.objects.all(): response += "%s,%s,%s,%s,%s,%s<br/>" % (str(e.pk), str(e.profile.pk), e.emoji, str(e.created), "", "") return HttpResponse(response)
We offer one piecemonolithic lined Jars and Ball mills, Carbon Steel lined, Stainless counters, automatic timers, electric brakes, special height pedestals. Aug 11, 2017 These particles have special properties because of their size, as their surface Top-Down Method: Production of Nanoparticles with Ball Mills. Ball Milling in Organic Synthesis: Solutions and Challanges. Achim Stolle The chemistry in ball mills is not restricted to a special type of chemical synthesis, if. Dec 16, 2016 MSE Supplies is excited to announce the new compact ball mills that fit in a glove box, and the jar Special Offer: $4265 USD Original $5200. PATTERSON 40" 1.22m dia. x 50" 1.52m long Jacketed Steel Ball Mill for This Mill has a special polished inside finish for a pharmaceutical application. Dry grinding. AEROFALL mills. Ball mills. Rod mills. Separators. Dryers. POLYCOM® Industrial Solutions equips the mills with special corrosion preven-.
""" Helper for generating the .travis.yml file for """ __copyright__ = """ Copyright (C) 2016 Potential Ventures Ltd This file is part of theopencorps <https://github.com/theopencorps/theopencorps/> """ __license__ = """ This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import os from jinja2 import Environment, FileSystemLoader # Annoyingly GAE is jinja 2.6 which doesn't support lstrip_blocks=True _env = Environment(loader=FileSystemLoader(os.path.dirname(__file__)), trim_blocks=True) class TravisYML(object): """ Convenience wrapper for our Travis YML generation """ def __init__(self, *args, **kwargs): for name, value in kwargs.iteritems(): setattr(self, name, value) def render(self): _template = _env.get_template('travis.yml.tpl') return _template.render(**self.__dict__)
Originally from Baltimore, Richard DeCore moved to coastal Delaware in 2014 and currently makes his home in the "Nation's Summer Capital" of Rehoboth Beach. He obtained his real estate license in 2016 and began his career with the Oldfather Group in early October, 2017. He specializes in residential real estate sales in Delaware's coastal region and takes great pride in helping current and new residents of southern Delaware achieve their own personal part of the American Dream. Call Richard today at 302-260-2000 and see what he can do for you.
"""WebSocket protocol versions 13 and 8.""" import asyncio import collections import json import random import re import sys import zlib from enum import IntEnum from struct import Struct from typing import Any, Callable, List, Optional, Tuple, Union from .base_protocol import BaseProtocol from .helpers import NO_EXTENSIONS from .log import ws_logger from .streams import DataQueue __all__ = ('WS_CLOSED_MESSAGE', 'WS_CLOSING_MESSAGE', 'WS_KEY', 'WebSocketReader', 'WebSocketWriter', 'WSMessage', 'WebSocketError', 'WSMsgType', 'WSCloseCode') class WSCloseCode(IntEnum): OK = 1000 GOING_AWAY = 1001 PROTOCOL_ERROR = 1002 UNSUPPORTED_DATA = 1003 INVALID_TEXT = 1007 POLICY_VIOLATION = 1008 MESSAGE_TOO_BIG = 1009 MANDATORY_EXTENSION = 1010 INTERNAL_ERROR = 1011 SERVICE_RESTART = 1012 TRY_AGAIN_LATER = 1013 ALLOWED_CLOSE_CODES = {int(i) for i in WSCloseCode} class WSMsgType(IntEnum): # websocket spec types CONTINUATION = 0x0 TEXT = 0x1 BINARY = 0x2 PING = 0x9 PONG = 0xa CLOSE = 0x8 # aiohttp specific types CLOSING = 0x100 CLOSED = 0x101 ERROR = 0x102 text = TEXT binary = BINARY ping = PING pong = PONG close = CLOSE closing = CLOSING closed = CLOSED error = ERROR WS_KEY = b'258EAFA5-E914-47DA-95CA-C5AB0DC85B11' UNPACK_LEN2 = Struct('!H').unpack_from UNPACK_LEN3 = Struct('!Q').unpack_from UNPACK_CLOSE_CODE = Struct('!H').unpack PACK_LEN1 = Struct('!BB').pack PACK_LEN2 = Struct('!BBH').pack PACK_LEN3 = Struct('!BBQ').pack PACK_CLOSE_CODE = Struct('!H').pack MSG_SIZE = 2 ** 14 DEFAULT_LIMIT = 2 ** 16 _WSMessageBase = collections.namedtuple('_WSMessageBase', ['type', 'data', 'extra']) class WSMessage(_WSMessageBase): def json(self, *, # type: ignore loads: Callable[[Any], Any]=json.loads) -> None: """Return parsed JSON data. .. versionadded:: 0.22 """ return loads(self.data) WS_CLOSED_MESSAGE = WSMessage(WSMsgType.CLOSED, None, None) WS_CLOSING_MESSAGE = WSMessage(WSMsgType.CLOSING, None, None) class WebSocketError(Exception): """WebSocket protocol parser error.""" def __init__(self, code: int, message: str) -> None: self.code = code super().__init__(message) class WSHandshakeError(Exception): """WebSocket protocol handshake error.""" native_byteorder = sys.byteorder # Used by _websocket_mask_python _XOR_TABLE = [bytes(a ^ b for a in range(256)) for b in range(256)] def _websocket_mask_python(mask: bytes, data: bytearray) -> None: """Websocket masking function. `mask` is a `bytes` object of length 4; `data` is a `bytearray` object of any length. The contents of `data` are masked with `mask`, as specified in section 5.3 of RFC 6455. Note that this function mutates the `data` argument. This pure-python implementation may be replaced by an optimized version when available. """ assert isinstance(data, bytearray), data assert len(mask) == 4, mask if data: a, b, c, d = (_XOR_TABLE[n] for n in mask) data[::4] = data[::4].translate(a) data[1::4] = data[1::4].translate(b) data[2::4] = data[2::4].translate(c) data[3::4] = data[3::4].translate(d) if NO_EXTENSIONS: # pragma: no cover _websocket_mask = _websocket_mask_python else: try: from ._websocket import _websocket_mask_cython # type: ignore _websocket_mask = _websocket_mask_cython except ImportError: # pragma: no cover _websocket_mask = _websocket_mask_python _WS_DEFLATE_TRAILING = bytes([0x00, 0x00, 0xff, 0xff]) _WS_EXT_RE = re.compile(r'^(?:;\s*(?:' r'(server_no_context_takeover)|' r'(client_no_context_takeover)|' r'(server_max_window_bits(?:=(\d+))?)|' r'(client_max_window_bits(?:=(\d+))?)))*$') _WS_EXT_RE_SPLIT = re.compile(r'permessage-deflate([^,]+)?') def ws_ext_parse(extstr: str, isserver: bool=False) -> Tuple[int, bool]: if not extstr: return 0, False compress = 0 notakeover = False for ext in _WS_EXT_RE_SPLIT.finditer(extstr): defext = ext.group(1) # Return compress = 15 when get `permessage-deflate` if not defext: compress = 15 break match = _WS_EXT_RE.match(defext) if match: compress = 15 if isserver: # Server never fail to detect compress handshake. # Server does not need to send max wbit to client if match.group(4): compress = int(match.group(4)) # Group3 must match if group4 matches # Compress wbit 8 does not support in zlib # If compress level not support, # CONTINUE to next extension if compress > 15 or compress < 9: compress = 0 continue if match.group(1): notakeover = True # Ignore regex group 5 & 6 for client_max_window_bits break else: if match.group(6): compress = int(match.group(6)) # Group5 must match if group6 matches # Compress wbit 8 does not support in zlib # If compress level not support, # FAIL the parse progress if compress > 15 or compress < 9: raise WSHandshakeError('Invalid window size') if match.group(2): notakeover = True # Ignore regex group 5 & 6 for client_max_window_bits break # Return Fail if client side and not match elif not isserver: raise WSHandshakeError('Extension for deflate not supported' + ext.group(1)) return compress, notakeover def ws_ext_gen(compress: int=15, isserver: bool=False, server_notakeover: bool=False) -> str: # client_notakeover=False not used for server # compress wbit 8 does not support in zlib if compress < 9 or compress > 15: raise ValueError('Compress wbits must between 9 and 15, ' 'zlib does not support wbits=8') enabledext = ['permessage-deflate'] if not isserver: enabledext.append('client_max_window_bits') if compress < 15: enabledext.append('server_max_window_bits=' + str(compress)) if server_notakeover: enabledext.append('server_no_context_takeover') # if client_notakeover: # enabledext.append('client_no_context_takeover') return '; '.join(enabledext) class WSParserState(IntEnum): READ_HEADER = 1 READ_PAYLOAD_LENGTH = 2 READ_PAYLOAD_MASK = 3 READ_PAYLOAD = 4 class WebSocketReader: def __init__(self, queue: DataQueue[WSMessage], max_msg_size: int, compress: bool=True) -> None: self.queue = queue self._max_msg_size = max_msg_size self._exc = None # type: Optional[BaseException] self._partial = bytearray() self._state = WSParserState.READ_HEADER self._opcode = None # type: Optional[int] self._frame_fin = False self._frame_opcode = None # type: Optional[int] self._frame_payload = bytearray() self._tail = b'' self._has_mask = False self._frame_mask = None # type: Optional[bytes] self._payload_length = 0 self._payload_length_flag = 0 self._compressed = None # type: Optional[bool] self._decompressobj = None # type: Any # zlib.decompressobj actually self._compress = compress def feed_eof(self) -> None: self.queue.feed_eof() def feed_data(self, data: bytes) -> Tuple[bool, bytes]: if self._exc: return True, data try: return self._feed_data(data) except Exception as exc: self._exc = exc self.queue.set_exception(exc) return True, b'' def _feed_data(self, data: bytes) -> Tuple[bool, bytes]: for fin, opcode, payload, compressed in self.parse_frame(data): if compressed and not self._decompressobj: self._decompressobj = zlib.decompressobj(wbits=-zlib.MAX_WBITS) if opcode == WSMsgType.CLOSE: if len(payload) >= 2: close_code = UNPACK_CLOSE_CODE(payload[:2])[0] if (close_code < 3000 and close_code not in ALLOWED_CLOSE_CODES): raise WebSocketError( WSCloseCode.PROTOCOL_ERROR, 'Invalid close code: {}'.format(close_code)) try: close_message = payload[2:].decode('utf-8') except UnicodeDecodeError as exc: raise WebSocketError( WSCloseCode.INVALID_TEXT, 'Invalid UTF-8 text message') from exc msg = WSMessage(WSMsgType.CLOSE, close_code, close_message) elif payload: raise WebSocketError( WSCloseCode.PROTOCOL_ERROR, 'Invalid close frame: {} {} {!r}'.format( fin, opcode, payload)) else: msg = WSMessage(WSMsgType.CLOSE, 0, '') self.queue.feed_data(msg, 0) elif opcode == WSMsgType.PING: self.queue.feed_data( WSMessage(WSMsgType.PING, payload, ''), len(payload)) elif opcode == WSMsgType.PONG: self.queue.feed_data( WSMessage(WSMsgType.PONG, payload, ''), len(payload)) elif opcode not in ( WSMsgType.TEXT, WSMsgType.BINARY) and self._opcode is None: raise WebSocketError( WSCloseCode.PROTOCOL_ERROR, "Unexpected opcode={!r}".format(opcode)) else: # load text/binary if not fin: # got partial frame payload if opcode != WSMsgType.CONTINUATION: self._opcode = opcode self._partial.extend(payload) if (self._max_msg_size and len(self._partial) >= self._max_msg_size): raise WebSocketError( WSCloseCode.MESSAGE_TOO_BIG, "Message size {} exceeds limit {}".format( len(self._partial), self._max_msg_size)) else: # previous frame was non finished # we should get continuation opcode if self._partial: if opcode != WSMsgType.CONTINUATION: raise WebSocketError( WSCloseCode.PROTOCOL_ERROR, 'The opcode in non-fin frame is expected ' 'to be zero, got {!r}'.format(opcode)) if opcode == WSMsgType.CONTINUATION: assert self._opcode is not None opcode = self._opcode self._opcode = None self._partial.extend(payload) if (self._max_msg_size and len(self._partial) >= self._max_msg_size): raise WebSocketError( WSCloseCode.MESSAGE_TOO_BIG, "Message size {} exceeds limit {}".format( len(self._partial), self._max_msg_size)) # Decompress process must to be done after all packets # received. if compressed: self._partial.extend(_WS_DEFLATE_TRAILING) payload_merged = self._decompressobj.decompress( self._partial, self._max_msg_size) if self._decompressobj.unconsumed_tail: left = len(self._decompressobj.unconsumed_tail) raise WebSocketError( WSCloseCode.MESSAGE_TOO_BIG, "Decompressed message size exceeds limit {}". format(self._max_msg_size + left, self._max_msg_size)) else: payload_merged = bytes(self._partial) self._partial.clear() if opcode == WSMsgType.TEXT: try: text = payload_merged.decode('utf-8') self.queue.feed_data( WSMessage(WSMsgType.TEXT, text, ''), len(text)) except UnicodeDecodeError as exc: raise WebSocketError( WSCloseCode.INVALID_TEXT, 'Invalid UTF-8 text message') from exc else: self.queue.feed_data( WSMessage(WSMsgType.BINARY, payload_merged, ''), len(payload_merged)) return False, b'' def parse_frame(self, buf: bytes) -> List[Tuple[bool, Optional[int], bytearray, Optional[bool]]]: """Return the next frame from the socket.""" frames = [] if self._tail: buf, self._tail = self._tail + buf, b'' start_pos = 0 buf_length = len(buf) while True: # read header if self._state == WSParserState.READ_HEADER: if buf_length - start_pos >= 2: data = buf[start_pos:start_pos+2] start_pos += 2 first_byte, second_byte = data fin = (first_byte >> 7) & 1 rsv1 = (first_byte >> 6) & 1 rsv2 = (first_byte >> 5) & 1 rsv3 = (first_byte >> 4) & 1 opcode = first_byte & 0xf # frame-fin = %x0 ; more frames of this message follow # / %x1 ; final frame of this message # frame-rsv1 = %x0 ; # 1 bit, MUST be 0 unless negotiated otherwise # frame-rsv2 = %x0 ; # 1 bit, MUST be 0 unless negotiated otherwise # frame-rsv3 = %x0 ; # 1 bit, MUST be 0 unless negotiated otherwise # # Remove rsv1 from this test for deflate development if rsv2 or rsv3 or (rsv1 and not self._compress): raise WebSocketError( WSCloseCode.PROTOCOL_ERROR, 'Received frame with non-zero reserved bits') if opcode > 0x7 and fin == 0: raise WebSocketError( WSCloseCode.PROTOCOL_ERROR, 'Received fragmented control frame') has_mask = (second_byte >> 7) & 1 length = second_byte & 0x7f # Control frames MUST have a payload # length of 125 bytes or less if opcode > 0x7 and length > 125: raise WebSocketError( WSCloseCode.PROTOCOL_ERROR, 'Control frame payload cannot be ' 'larger than 125 bytes') # Set compress status if last package is FIN # OR set compress status if this is first fragment # Raise error if not first fragment with rsv1 = 0x1 if self._frame_fin or self._compressed is None: self._compressed = True if rsv1 else False elif rsv1: raise WebSocketError( WSCloseCode.PROTOCOL_ERROR, 'Received frame with non-zero reserved bits') self._frame_fin = bool(fin) self._frame_opcode = opcode self._has_mask = bool(has_mask) self._payload_length_flag = length self._state = WSParserState.READ_PAYLOAD_LENGTH else: break # read payload length if self._state == WSParserState.READ_PAYLOAD_LENGTH: length = self._payload_length_flag if length == 126: if buf_length - start_pos >= 2: data = buf[start_pos:start_pos+2] start_pos += 2 length = UNPACK_LEN2(data)[0] self._payload_length = length self._state = ( WSParserState.READ_PAYLOAD_MASK if self._has_mask else WSParserState.READ_PAYLOAD) else: break elif length > 126: if buf_length - start_pos >= 8: data = buf[start_pos:start_pos+8] start_pos += 8 length = UNPACK_LEN3(data)[0] self._payload_length = length self._state = ( WSParserState.READ_PAYLOAD_MASK if self._has_mask else WSParserState.READ_PAYLOAD) else: break else: self._payload_length = length self._state = ( WSParserState.READ_PAYLOAD_MASK if self._has_mask else WSParserState.READ_PAYLOAD) # read payload mask if self._state == WSParserState.READ_PAYLOAD_MASK: if buf_length - start_pos >= 4: self._frame_mask = buf[start_pos:start_pos+4] start_pos += 4 self._state = WSParserState.READ_PAYLOAD else: break if self._state == WSParserState.READ_PAYLOAD: length = self._payload_length payload = self._frame_payload chunk_len = buf_length - start_pos if length >= chunk_len: self._payload_length = length - chunk_len payload.extend(buf[start_pos:]) start_pos = buf_length else: self._payload_length = 0 payload.extend(buf[start_pos:start_pos+length]) start_pos = start_pos + length if self._payload_length == 0: if self._has_mask: assert self._frame_mask is not None _websocket_mask(self._frame_mask, payload) frames.append(( self._frame_fin, self._frame_opcode, payload, self._compressed)) self._frame_payload = bytearray() self._state = WSParserState.READ_HEADER else: break self._tail = buf[start_pos:] return frames class WebSocketWriter: def __init__(self, protocol: BaseProtocol, transport: asyncio.Transport, *, use_mask: bool=False, limit: int=DEFAULT_LIMIT, random: Any=random.Random(), compress: int=0, notakeover: bool=False) -> None: self.protocol = protocol self.transport = transport self.use_mask = use_mask self.randrange = random.randrange self.compress = compress self.notakeover = notakeover self._closing = False self._limit = limit self._output_size = 0 self._compressobj = None # type: Any # actually compressobj async def _send_frame(self, message: bytes, opcode: int, compress: Optional[int]=None) -> None: """Send a frame over the websocket with message as its payload.""" if self._closing: ws_logger.warning('websocket connection is closing.') rsv = 0 # Only compress larger packets (disabled) # Does small packet needs to be compressed? # if self.compress and opcode < 8 and len(message) > 124: if (compress or self.compress) and opcode < 8: if compress: # Do not set self._compress if compressing is for this frame compressobj = zlib.compressobj(wbits=-compress) else: # self.compress if not self._compressobj: self._compressobj = zlib.compressobj(wbits=-self.compress) compressobj = self._compressobj message = compressobj.compress(message) message = message + compressobj.flush( zlib.Z_FULL_FLUSH if self.notakeover else zlib.Z_SYNC_FLUSH) if message.endswith(_WS_DEFLATE_TRAILING): message = message[:-4] rsv = rsv | 0x40 msg_length = len(message) use_mask = self.use_mask if use_mask: mask_bit = 0x80 else: mask_bit = 0 if msg_length < 126: header = PACK_LEN1(0x80 | rsv | opcode, msg_length | mask_bit) elif msg_length < (1 << 16): header = PACK_LEN2(0x80 | rsv | opcode, 126 | mask_bit, msg_length) else: header = PACK_LEN3(0x80 | rsv | opcode, 127 | mask_bit, msg_length) if use_mask: mask = self.randrange(0, 0xffffffff) mask = mask.to_bytes(4, 'big') message = bytearray(message) _websocket_mask(mask, message) self.transport.write(header + mask + message) self._output_size += len(header) + len(mask) + len(message) else: if len(message) > MSG_SIZE: self.transport.write(header) self.transport.write(message) else: self.transport.write(header + message) self._output_size += len(header) + len(message) if self._output_size > self._limit: self._output_size = 0 await self.protocol._drain_helper() async def pong(self, message: bytes=b'') -> None: """Send pong message.""" if isinstance(message, str): message = message.encode('utf-8') await self._send_frame(message, WSMsgType.PONG) async def ping(self, message: bytes=b'') -> None: """Send ping message.""" if isinstance(message, str): message = message.encode('utf-8') await self._send_frame(message, WSMsgType.PING) async def send(self, message: Union[str, bytes], binary: bool=False, compress: Optional[int]=None) -> None: """Send a frame over the websocket with message as its payload.""" if isinstance(message, str): message = message.encode('utf-8') if binary: await self._send_frame(message, WSMsgType.BINARY, compress) else: await self._send_frame(message, WSMsgType.TEXT, compress) async def close(self, code: int=1000, message: bytes=b'') -> None: """Close the websocket, sending the specified code and message.""" if isinstance(message, str): message = message.encode('utf-8') try: await self._send_frame( PACK_CLOSE_CODE(code) + message, opcode=WSMsgType.CLOSE) finally: self._closing = True
Shampoo your hair while reparing it from damage caused by your daily routine with Eversleek Keratin Shampoo. Enriched with natural Sunflower botanical, this sulfate-free shampoo gently smoothens hair as it repairs damage. Discover vibrant and soft hair. SMOOTHENS AND REPAIRS CHEMICALLY TREATED HAIR IN THE MOST CARING WAY. EVER. This is an affordable sulfate-free shampoo that helps maintain salon treatments. It's very effective. Love this shampoo! It makes me hair smooth and straight. Maintains my keratin treatment as well.
""" Overview ======== This plugin implements range selection. Key-Commands ============ Namespace: range-sel Mode: NORMAL Event: <Control-k> Description: Add/remove selection one line up from the initial selection mark. Mode: NORMAL Event: <Control-j> Description: Add/remove selection one line down from the initial selection mark. Mode: NORMAL Event: <Control-l> Description: Add/remove selection one character right from the initial selection mark. Mode: NORMAL Event: <Control-h> Description: Add/remove selection one character left from the initial selection mark. Mode: NORMAL Event: <Control-v> Description: Drop a selection mark. """ from vyapp.app import root class RangeSel: def __init__(self, area): area.install('range-sel', ('NORMAL', '<Control-k>', self.sel_up), ('NORMAL', '<Control-j>', self.sel_down), ('NORMAL', '<Control-h>', self.sel_left), ('NORMAL', '<Control-l>', self.sel_right), ('NORMAL', '<Control-v>', self.start_selection)) area.mark_set('(RANGE_SEL_MARK)', '1.0') self.area = area def start_selection(self, event): """ Start range selection. """ self.area.mark_set('(RANGE_SEL_MARK)', 'insert') root.status.set_msg('Dropped selection mark.') def sel_up(self, event): """ It adds 'sel' one line up the 'insert' position and sets the cursor one line up. """ self.area.rmsel('(RANGE_SEL_MARK)', 'insert') self.area.up() self.area.addsel('(RANGE_SEL_MARK)', 'insert') def sel_down(self, event): """ It adds or removes selection one line down. """ self.area.rmsel('(RANGE_SEL_MARK)', 'insert') self.area.down() self.area.addsel('(RANGE_SEL_MARK)', 'insert') def sel_right(self, event): """ It adds or removes selection one character right. """ self.area.rmsel('(RANGE_SEL_MARK)', 'insert') self.area.right() self.area.addsel('(RANGE_SEL_MARK)', 'insert') def sel_left(self, event): """ It adds or removes selection one character left. """ self.area.rmsel('(RANGE_SEL_MARK)', 'insert') self.area.left() self.area.addsel('(RANGE_SEL_MARK)', 'insert') install = RangeSel
The Linen Dust Ruffle by Bella Notte Linens is a classic, 3-panel gathered dust ruffle that attaches to the box spring with t-pins. Each panel has a generous 22-inch drop so that it will graciously accent any bed, and the height of the dust ruffle can be adjusted to find the perfect length for your bedroom decor. This 100% signature linen design is a perfect example of Bella Notte Linens' perfection in design and versatility, as it is intended to complement just about every other luxury bedding collection.
# -*- coding: utf-8 -*- # Generated by Django 1.9.4 on 2016-04-05 00:44 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('recipes', '0007_auto_20160330_1637'), ] operations = [ migrations.AlterModelOptions( name='lesson', options={'ordering': ['date']}, ), migrations.AddField( model_name='lesson', name='parsed', field=models.TextField(null=True), ), migrations.AlterField( model_name='lesson', name='date', field=models.DateField(blank=True, null=True), ), migrations.AlterField( model_name='lesson', name='problems', field=models.TextField(blank=True, null=True), ), migrations.AlterField( model_name='lesson', name='title', field=models.CharField(blank=True, max_length=255, null=True), ), ]
Model A1339, purchased 2011, glass cracked on the top. Is there anyway to change out the glass without having to just live with it being cracked? I just cracked mine too, dropped it accidentally (couch surfing) but I'm gonna opt for a sticker/skin to cover it up. Out of sight out of mind. Cracked Trackpad Glass: Proceed with Caution! You'll be typing an email or working in a Word doc or just browsing the Internet when the trackpad will start taking action on its own. It will interpret the slightest touch in the center as a click, or even a click-and-move, or a click/delete. As you hover the mouse over a clickable item, the trackpad will click it and off you'll go to an unwanted website. "As you hover the mouse over a clickable item, the trackpad will click it and off you'll go to an unwanted website." The teardown and guides are all great but I've not found the parts. Anyone? Im also looking for a new glass, any url? Gerry J будет вечно благодарен.
import pandas as pd import odmanalysis as odm from odmanalysis import gui import os import argparse def main(): parser = argparse.ArgumentParser(description='Pivots the odmanalysis output file to produce and excel file with all cycles in different columns') parser.add_argument('filename', nargs='?', default="", help="The odmanalysis.csv file to tabulate", type=str) parser.add_argument('--split-direction','-d', type=bool, help='split the actuation directions into different columns', metavar='') args = parser.parse_args() if not os.path.isfile(args.filename): args.filename = gui.get_path("*.csv",defaultFile="odmanalysis.csv") commonPath = os.path.abspath(os.path.split(args.filename)[0]) df = odm.readAnalysisData(args.filename) cycleFrames = [] keys = ['cycleNumber'] if args.split_direction == True: keys.append('direction') grouped = df.groupby(keys) for keys, group in grouped: if not hasattr(keys, '__iter__'): keys = tuple([keys]) dfTemp = group[['actuatorVoltage','displacement']] dfTemp = dfTemp.reset_index().drop('timestamp',axis=1) name = 'cycle_%i' % keys[0] for k in keys[1:]: name += "_%s" % k cycleFrames.append(pd.concat({name: dfTemp}, axis=1)) dfCombined = pd.concat(cycleFrames,axis=1) dfCombined.to_excel(os.path.join(commonPath,'odmanalysis_tabulated.xlsx'),index=False) print os.path.join(commonPath,'odmanalysis_tabulated.xlsx') if __name__=="__main__": main()
So, I’m including it as a money-making option on Work at Home Adventures. If you’re already spending your time texting or chatting in online message boards, you could be doing it and getting paid for it instead.When it comes to making money from home, the sky’s the limit! You can chat for money by talking to men online or in paid chat rooms (which, let’s face it, is much safer than some alternative options! Let’s find out some more about this interesting opportunity!Lip Service currently accepts chatters in Canada and the United States. Then, if it’s approved, someone from Lip Service will give you a call so he or she can hear your phone-speaking voice.Lip Service also allows you to be flexible, logging in and working when you’re able to. Chatters for this company can expect to earn per minute of chat time.Usually, a user had to sign up for a subscription or pay per chat, and a portion of that payment goes to you.It all depends on chat method used (actual chat or text messages) and how long the conversation lasts.This one mostly focuses on women chatting with men via a chat console. You must be at least 18 years old and have a reliable computer hooked to the internet.
#!/usr/bin/env python # -*- coding: UTF-8 -*- # # Copyright (c) 2020 ASMlover. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list ofconditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materialsprovided with the # distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from typing import Generator, Union Numeric = Union[int, float] def add(x: Numeric, y: Numeric) -> Generator[Numeric, None, None]: yield x + y def main() -> Generator[Numeric, None, None]: r = yield add(2, 2) print(f"result is: {r}") yield def run() -> None: m = main() sub = m.send(None) result = sub.send(None) m.send(result) if __name__ == '__main__': run()
Earlier this year I got the Budd RDC made by Rapido Trains, Southern Pacific #10 for the Randall Museum layout. It’s a beautiful engine, with a great sound. They use a LokSound decoder with a iPhone 4-like sound speaker (the long type, not the sugar cubes ones). One thing we wanted for the layout automation was to install a current keeper. ESU makes the PowerPack Mini, which should fit in there. First order of business was thus to find out how to remove the shell. To open it: on a cradle, remove the coupler screw and pull the coupler box out. Repeat for both couplers. It all stayed together, which was nice. Approximately under the 3rd window on each side is a little plastic tab in the outer shell (see position indicated in the next picture). You'll need to pry the shell apart at that point, on all 4 sides. With a 5th hand, pull on extremities. I used the trick of leaving a coupler screw and pulling on that with some pliers, but after a while I realized it was almost easier to pull and rock the staircases out of the front and back shells, very slowly and extra careful not to break them. Once I got the shell out, I marked the front end immediately, but in fact it's not needed: in the outer shell casing there's a big F and R inside the roof, and on the mother board you can see FoF and FoR printed. Also as explained in the Rapido doc, the front is the one with the 5 windows and the rear with the 6 windows. Now let’s look at how to add the ESU PowerPack (a.k.a. current keeper) to the Rapido RDC SP-10. I’ve done it and after testing it I was really satisfied with the outcome, however there is a little important twist so I would like to explain how to do it. I used the ESU PowerPack Mini, which is that green capacitor as seen on the picture above, and there's more than enough space to add it in the RDC. It's a fairly easy install too. First I had to open the shell, as explained above. At first I was thinking of placing the keeper in the toilets (geez, these guys really model everything... there's got to be some kind of joke in there!) but the keeper was too large by just a millimeter. Oh well. At the top, there's the long “motherboard” with the LokSound and in the middle there's a small bunch of red/black wires held by a tie. That location is right under the bulbous fans/exhaust ports and is the perfect spot to place the keeper. So first the plastic tie had to go. I used a small screwdriver to untie it, or of course you can carefully cut it open. I then used some kapton tape to hold the wires in place. Once I was satisfied with the location, I soldered the 3 wires to the decoder in the red/white/black orientation as indicated in the PowerPack manual. I had to borrow a Weller with a fine tip at work because my normal one looked gigantic next to the LokSound tiny solder pads. Not the best looking job here, it looked better the first time I soldered them; oh well I'm never satisfied anyway; I checked it out for shorts and it came clean, that's what matter. After I soldered the wires it occurred to me I could have trimmed them a bit. Some more kapton tape keeps everything tight in place. Now here comes the little trick. I put everything on the track yet when I tried I couldn't get any effect from the keeper! I redid the soldering to make sure, checked the red/black wires had power, made sure the CV 113 had a non-zero value, didn't matter, it clearly wasn't powering anything at all. Eventually I asked ESU for some insight. So here's the trick, according to ESU, on this LokSound, AUX6 is shared between the AUX6 function and the white wire that activates the PowerPack. On this Rapido RDC, AUX6 is used for the rear ditch lights and the factory setting is for them to be programmed as Gyra Lights. Using DecoderPro, go to the "Function Outputs" page and change "Aux6 Mode" from Gyra Light to Disabled. Using CVs: CV31 = 16, CV32 = 0, CV315 = 0. Note that the “aisles” screws and the motor ones are not the same, so store them appropriately. Eventually the RDC at Randall stopped working. One truck was clearly not running anymore, at least not consistently. The symptom is that one truck runs fine, and the other one does not run at slow speed but sometimes runs at full speed. First things first: if for some reason one or both of the original motors failed, contact Rapido’s support. It’s apparently a known issue in their first motor batch. Even trying to use contact cleaner such as CRC 2-26 on the collector did not fix the issue with mine. They sent me new motors as well as new supports. The new supports are much easier to use than the previous ones, they clip on the motor frame. “I can’t find the + side on the motor”. That was my main issue and I proceeded by elimination -- solder it and see if the motors turn in the right direction. For that I made a mark on the flywheel with a black marker on the original motors and made sure the new ones were turning in the same direction. I got one right and one wrong. ⇒ Turns out there is a “+” mark on the motor, on the vertical end wall next to the flywheel. It’s a very very tiny mark which I mistook for a scratch at first. It matches the red wire. Out of 4 new motors, one didn’t turn smoothly at all, and another one the flywheel was 1 mm too far and was rubbing against the plastic well. This was only apparent once the cover was screwed in place. Route the red/black wires and solder them in the direction that matches the cover with the seats. Otherwise they are not long enough to be routed back against the sides. Reassembly is basically following the steps in reverse direction. It’s a tad tricky to put back the seats cover and pull all the red/black wires at the same time. Just go slowly and check the drive runs smoothly before and after screwing the seats cover.
# -*- coding: utf-8 -*- """ Django settings for ordo_electro project. For more information on this file, see https://docs.djangoproject.com/en/dev/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/dev/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os from os.path import join, dirname, expanduser from configurations import Configuration, values from ConfigParser import RawConfigParser BASE_DIR = dirname(dirname(__file__)) ## for connecting to mongo from mongoengine import connect class Common(Configuration): # APP CONFIGURATION DJANGO_APPS = ( # Default Django apps: 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', # Useful template tags: # 'django.contrib.humanize', # Admin 'django.contrib.admin', ) THIRD_PARTY_APPS = ( 'crispy_forms', # Form layouts 'avatar', # for user avatars 'allauth', # registration 'allauth.account', # registration 'allauth.socialaccount', # registration 'twython', #twython twitter API app 'rest_framework', # for the django rest framework 'mongoengine', ) # Apps specific for this project go here. LOCAL_APPS = ( 'users', # custom users app # Your stuff: custom apps go here 'social', #tools for managing social content ) # See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS # END APP CONFIGURATION # MIDDLEWARE CONFIGURATION MIDDLEWARE_CLASSES = ( # Make sure djangosecure.middleware.SecurityMiddleware is listed first 'djangosecure.middleware.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) # END MIDDLEWARE CONFIGURATION # MIGRATIONS CONFIGURATION MIGRATION_MODULES = { 'sites': 'contrib.sites.migrations' } # END MIGRATIONS CONFIGURATION # DEBUG # See: https://docs.djangoproject.com/en/dev/ref/settings/#debug DEBUG = values.BooleanValue(True) # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug TEMPLATE_DEBUG = DEBUG # END DEBUG # SECRET CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key # Note: This key only used for development and testing. # In production, this is changed to a values.SecretValue() setting SECRET_KEY = 'CHANGEME!!!' # END SECRET CONFIGURATION # FIXTURE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS FIXTURE_DIRS = ( join(BASE_DIR, 'fixtures'), ) # END FIXTURE CONFIGURATION # EMAIL CONFIGURATION #EMAIL_BACKEND = values.Value('django.core.mail.backends.smtp.EmailBackend') EMAIL_BACKEND = values.Value('django.core.mail.backends.console.EmailBackend') # END EMAIL CONFIGURATION # MANAGER CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#admins ADMINS = ( ("""Ordo""", 'jimmykpost@gmail.com'), ) # See: https://docs.djangoproject.com/en/dev/ref/settings/#managers MANAGERS = ADMINS # END MANAGER CONFIGURATION # DATABASE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#databases DATABASES = values.DatabaseURLValue('mysql://root:@localhost/ordo_electro') # END DATABASE CONFIGURATION # CACHING # Do this here because thanks to django-pylibmc-sasl and pylibmc # memcacheify (used on heroku) is painful to install on windows. CACHES = { 'default': { # 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', # 'LOCATION': '' "BACKEND": "django_redis.cache.RedisCache", "LOCATION": "redis://127.0.0.1:6379/0", "OPTIONS": { "CLIENT_CLASS": "django_redis.client.DefaultClient", } } } # END CACHING # GENERAL CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone TIME_ZONE = 'UTC' # See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code LANGUAGE_CODE = 'en-us' # See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id SITE_ID = 1 # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n USE_I18N = True # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n USE_L10N = True # See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz USE_TZ = True # END GENERAL CONFIGURATION # TEMPLATE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors TEMPLATE_CONTEXT_PROCESSORS = ( 'django.contrib.auth.context_processors.auth', 'allauth.account.context_processors.account', 'allauth.socialaccount.context_processors.socialaccount', 'django.core.context_processors.debug', 'django.core.context_processors.i18n', 'django.core.context_processors.media', 'django.core.context_processors.static', 'django.core.context_processors.tz', 'django.contrib.messages.context_processors.messages', 'django.core.context_processors.request', # Your stuff: custom template context processers go here ) # See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs TEMPLATE_DIRS = ( join(BASE_DIR, 'templates'), ) TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) # See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs CRISPY_TEMPLATE_PACK = 'bootstrap3' # END TEMPLATE CONFIGURATION # STATIC FILE CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root STATIC_ROOT = join(os.path.dirname(BASE_DIR), 'static') # See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url STATIC_URL = '/static/' # See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS STATICFILES_DIRS = ( join(BASE_DIR, 'static'), ) # See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) # END STATIC FILE CONFIGURATION # MEDIA CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root MEDIA_ROOT = join(BASE_DIR, 'media') # See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url MEDIA_URL = '/media/' # END MEDIA CONFIGURATION # URL Configuration ROOT_URLCONF = 'urls' # See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application WSGI_APPLICATION = 'wsgi.application' # End URL Configuration # AUTHENTICATION CONFIGURATION AUTHENTICATION_BACKENDS = ( 'django.contrib.auth.backends.ModelBackend', 'allauth.account.auth_backends.AuthenticationBackend', ) # Some really nice defaults ACCOUNT_AUTHENTICATION_METHOD = 'username' ACCOUNT_EMAIL_REQUIRED = True ACCOUNT_EMAIL_VERIFICATION = 'mandatory' # END AUTHENTICATION CONFIGURATION # Custom user app defaults # Select the correct user model AUTH_USER_MODEL = 'users.User' LOGIN_REDIRECT_URL = 'users:redirect' LOGIN_URL = 'account_login' # END Custom user app defaults # SLUGLIFIER AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify' # END SLUGLIFIER # LOGGING CONFIGURATION # See: https://docs.djangoproject.com/en/dev/ref/settings/#logging # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'DEBUG', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' }, 'file': { 'level': 'DEBUG', 'class': 'logging.FileHandler', 'filename': '/tmp/python.log', 'formatter': 'verbose', }, }, 'formatters': { 'verbose': { 'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s", 'datefmt' : "%d/%b/%Y %H:%M:%S" }, 'simple': { 'format': '%(levelname)s %(message)s' }, }, 'loggers': { 'django': { 'handlers': ['mail_admins','file'], 'level': 'DEBUG', 'propagate': True, }, } } # LOGGING = { # 'version': 1, # 'disable_existing_loggers': False, # 'filters': { # 'require_debug_false': { # '()': 'django.utils.log.RequireDebugFalse' # } # }, # 'handlers': { # 'file': { # 'level': 'DEBUG', # 'class': 'logging.FileHandler', # 'filename': '/tmp/python.log', # }, # }, # 'loggers': { # 'django.request': { # 'handlers': ['file'], # 'level': 'DEBUG', # 'propagate': True, # }, # }, # } # END LOGGING CONFIGURATION @classmethod def post_setup(cls): cls.DATABASES['default']['ATOMIC_REQUESTS'] = True # Your common stuff: Below this line define 3rd party library settings #### get the users personal settings config = RawConfigParser() config.read(expanduser('~') + '/.ordo_electro/settings.ini') TWITTER_KEY = config.get('secrets', 'TWITTER_KEY') TWITTER_SECRET = config.get('secrets', 'TWITTER_SECRET') OAUTH_TOKEN = config.get('secrets', 'OAUTH_TOKEN') OAUTH_TOKEN_SECRET = config.get('secrets', 'OAUTH_TOKEN_SECRET') LOGOUT_URL='account_logout' LOGOUT_REDIRECT_URL='/' # DRF REST_FRAMEWORK = { # Use Django's standard `django.contrib.auth` permissions, # or allow read-only access for unauthenticated users. 'DEFAULT_PERMISSION_CLASSES': [ 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly' ], 'PAGINATE_BY': 10 } connect('ordo_electro')
Where is relevance without vulture politics? We don’t care, we have our carcasses to feed. It’s all over, take care and good bye. Or, is it an ouroboros with no start and end at all? Why break them first and then try to restore? Why so many feelings of separation and patch up? Is it why or is it how many times we go through? What damage it wroughts on the ones separated?
"""API for working with Nvim tabpages.""" from .common import Remote, RemoteMap, RemoteSequence __all__ = ('Tabpage') class Tabpage(Remote): """A remote Nvim tabpage.""" def __init__(self, session, code_data): """Initialize from session and code_data immutable object. The `code_data` contains serialization information required for msgpack-rpc calls. It must be immutable for Tabpage equality to work. """ self._session = session self.code_data = code_data self.windows = RemoteSequence(session, 'tabpage_get_windows', self) self.vars = RemoteMap(session, 'tabpage_get_var', 'tabpage_set_var', self) @property def window(self): """Get the `Window` currently focused on the tabpage.""" return self._session.request('tabpage_get_window', self) @property def valid(self): """Return True if the tabpage still exists.""" return self._session.request('tabpage_is_valid', self)
These two are the kind of people that would never let anyone feel left out or irrelevant. Melissa and Sky are so genuine and compassionate individually, so when you bring them together they are this couple from out of a fairytale. They listened to my ridiculous direction like whipping the hair in each other's faces and whispering stupid words in one another's ear. All I can say is, expect crazy when you shoot with me.
# -*- coding: utf-8 -*- # # Manticore documentation build configuration file, created by # sphinx-quickstart on Fri Mar 10 18:04:51 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = "1.0" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ["sphinx.ext.autodoc"] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = ".rst" # The master toctree document. master_doc = "index" # General information about the project. project = "Manticore" copyright = "2019, Trail of Bits" author = "Trail of Bits" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = "0.3.6" # The full version, including alpha/beta/rc tags. release = "0.3.6" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "alabaster" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = "Manticoredoc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, "Manticore.tex", "Manticore Documentation", "Trail of Bits", "manual") ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [(master_doc, "manticore", "Manticore Documentation", [author], 1)] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( master_doc, "Manticore", "Manticore Documentation", author, "Manticore", "One line description of project.", "Miscellaneous", ) ] # -- Custom # our setup.py does not install z3-solver when on rtd because # for some reason z3 is built during the dep install process, # and rtd ooms (something related to `python setup.py --force`) # so because z3-solver is not installed as a dep, but # rtd still does `import manticore`, we need to mock the environment # enough to make Manticore importable. specifically, we need to mock # things so a Z3Solver can be constructed. import subprocess class MockZ3Fd: def readline(self, *args, **kwargs): return '(:version "4.5.1")\n' def flush(self, *args, **kwargs): return def write(self, *args, **kwargs): return class MockPopen: def __init__(self, *args, **kwargs): self.stdout = MockZ3Fd() self.stdin = MockZ3Fd() subprocess.Popen = MockPopen
John has been in association with Ray Quinney & Nebeker since 1968. Much of that time has involved providing services to First Security Bank and later Wells Fargo Bank after their merger. That work has involved reviewing loans, leases and other financial transactions. The past 15 years has been devoted almost exclusively to leveraged equipment leases for Wells Fargo as trustee. John has a conflict resolution certificate and has acted as a mediator. He is a past member of The University of Utah Board of Trustees and the University Hospital board. He maintains an AV Preeminent (5.0) rating with Martindale-Hubbell, which is the highest rating awarded to attorneys for professional competence and ethics.
# -*- coding: utf-8 -*- """ Created on Sun Jan 14 09:35:33 2017 @author: galad-loth """ import mxnet as mx import feat_net def metric_net_2ch(): data = mx.sym.Variable("data") conv_weight = [] conv_bias = [] for i in range(3): conv_weight.append(mx.sym.Variable('conv' + str(i) + '_weight')) conv_bias.append(mx.sym.Variable('conv' + str(i) + '_bias')) conv_res= feat_net.featnet1(data,conv_weight, conv_bias,"") conv_res = mx.sym.Flatten(data=conv_res) net = mx.sym.FullyConnected(data=conv_res,num_hidden=256, name="fc1") net = mx.sym.Activation(data=net, act_type="relu", name="relu3") net = mx.sym.FullyConnected(data=net,num_hidden=1, name="fc2") return net def metric_net_2ch_cs(): datas= mx.sym.Variable("datas") datac= mx.sym.Variable("datac") conv_weight_s = [] conv_bias_s = [] conv_weight_c = [] conv_bias_c = [] for i in range(4): conv_weight_s.append(mx.sym.Variable('conv' + str(i) + '_weight_s')) conv_bias_s.append(mx.sym.Variable('conv' + str(i) + '_bias_s')) conv_weight_c.append(mx.sym.Variable('conv' + str(i) + '_weight_c')) conv_bias_c.append(mx.sym.Variable('conv' + str(i) + '_bias_c')) conv_res_s=feat_net.featnet2(datas,conv_weight_s, conv_bias_s,"bs_") conv_res_c=feat_net.featnet2(datac,conv_weight_c, conv_bias_c,"bc_") conv_res=mx.sym.Concat(conv_res_s,conv_res_c,dim=1, name='conv_res') net = mx.sym.FullyConnected(data=conv_res,num_hidden=768, name="fc1") net = mx.sym.Activation(data=net, act_type="relu", name="relu1") net = mx.sym.FullyConnected(data=net,num_hidden=1, name="fc2") return net if __name__=="__main__": net=metric_net_2ch_cs() ex=net.simple_bind(ctx=mx.cpu(), datas=(50,2,64,64),datac=(50,2,64,64))
DD S13 Kouki Bumper Extensions (PAIR) are high quality replicas of the discontinued OEM S13 ABS Bumper Extensions pieces. DD S13 OE Kouki Bumper Extensions are produced in black polyurethane. Minor sanding may be required for exact fitment. All photos shown are of the parts you will receive mounted to a customers car utilizing the factory 1996-1998 bumper setup. All polyurethane aero parts are processed immediately upon receiving and usually take 3 to 5 business days to be sent out. Packages are sent via FedEX or Freight depending on size. Tracking will be emailed to your Paypal provided email address. All sales are final. Items will be replaced if damaged upon shipping. No refunds will be issued for buyers remorse or for incorrectly fitting your own parts.
''' 监控linux目录变化(可观察的监控源) Create on : 2014-12-23 Author:liangzonghua ''' from core.MonitorSource import ObserableMonitorSource from core.MetricValue import MultiMetricValue from core import regist_monitor_source from utils.Logger import info import os import pyinotify class DiectoryChangleHanlder(pyinotify.ProcessEvent): _monitor_source = None def __init__(self,ms): self._monitor_source = ms def process_default(self,event): f = event.pathname info(event.maskname+":"+f) metricValue = MultiMetricValue(self._monitor_source.getMonitorSourceName()) metricValue.addMetricValue("type", event.maskname) metricValue.addMetricValue("monitor_path",event.path) metricValue.addMetricValue("monitor_file",event.file) metricValue.addMetricValue("is_directory",event.dir) if hasattr(event, "src_pathname"): metricValue.addMetricValue("src_pathname",event.src_pathname) if os.path.exists(f): metricValue.addMetricValue("atime", os.path.getatime(f)) metricValue.addMetricValue("ctime", os.path.getctime(f)) metricValue.addMetricValue("mtime", os.path.getmtime(f)) metricValue.addMetricValue("size", os.path.getsize(f)) self._monitor_source.notify(metricValue) DEFAULT_MONITOR_MASK = pyinotify.IN_DELETE | pyinotify.IN_CREATE | pyinotify.IN_MODIFY | pyinotify.IN_MOVED_TO | pyinotify.IN_MOVED_FROM class LiuxDirectoryMonitorSource(ObserableMonitorSource): _monitor_dir = "." _monitor_mask= DEFAULT_MONITOR_MASK _is_rec = True def __init__(self,monitorSourceName, directory=None, mask=DEFAULT_MONITOR_MASK, rec = True): self.monitorSourceName(monitorSourceName) if dir != None: self._monitor_dir = directory if mask != None: self._monitor_mask = mask self._is_rec = rec def start(self): ObserableMonitorSource.start(self) if not os.path.exists(self._monitor_dir): os.makedirs(self._monitor_dir,exist_ok=True) info("create monitor dir:%s"%(self._monitor_dir)) wm= pyinotify.WatchManager() eventHandler= DiectoryChangleHanlder(self) self._notifier= pyinotify.Notifier(wm, eventHandler) wm.add_watch(self._monitor_dir, self._monitor_mask,self._is_rec) info('now starting monitor: %s'%(self._monitor_dir)) while True: try: if self._notifier.is_alive(): self._notifier.process_events() if self._notifier.check_events(): self._notifier.read_events() else: break except KeyboardInterrupt: self.stop() break def stop(self): if hasattr(self, "_notifier") and self._notifier != None: self._notifier.stop() ObserableMonitorSource.stop(self) def linux_dir_stat(name="linux_dir_stat",monitor_dir=".",rec=True,mask=DEFAULT_MONITOR_MASK): monitorSource = LiuxDirectoryMonitorSource(monitorSourceName=name,directory=monitor_dir,mask=mask,rec=rec) regist_monitor_source(monitorSource) return monitorSource
The Parent’s Night Out & Kid’s Movie Night will be Friday, June 2 from 6-9 pm for children over the age of 5. We will be showing Cloudy with a Chance of Meatballs. A suggested donation of $5 per child will help cover the cost. A second adult is required to stay for the event as per our church policy. A sign-up sheet is in Immanuel Parlor. The Parents’ Night Out & Kids’ Movie Night will be Friday, May 5 from 6-9 pm for children over the age of 5. We will be showing Kubo and the Two Strings. A suggested donation of $5 per child will help cover the cost. A second adult is required to stay for the event as per our church policy. Please let Brian know if you are able to volunteer. Teen Movie Night for middle school or high school kids only that was scheduled for March 24th will be rescheduled for May. Please contact Brian Love if you have any questions. The Parent’s Night Out & Kid’s Movie Night will be this Friday, March 3 from 6-9 pm for children over the age of 5. We will be showing a DINO double feature: Trolls. A suggested donation of $5 per child will help cover the cost. A second adult is required to stay for the event as per our church policy. A sign-up sheet is in Immanuel Parlor.
import functools import re class Event(object): def __init__(self, evt_type, body, source=None): self.evt_type = evt_type self.body = body self.source = source def __getitem__(self, key): return self.body[key] def __iter__(self): return iter(self.body) @classmethod def deserialize(cls, data): return cls(data.get('type'), data.get('body', {}), source=data.get('source')) def serialize(self): return { 'type': self.evt_type, 'body': self.body, 'source': self.source, } class EventDispatcher(object): wildcards = { '#': r'[\w.]*(?=\.|$)', '*': r'\w+', } def __init__(self, patterns=()): self.patterns = list(patterns) def compile(self, key): words = (self.wildcards.get(word, re.escape(word)) for word in key.split('.')) return re.compile('^%s$' % r'\.'.join(words)) def register(self, pattern, handler): self.patterns.append(( self.compile(pattern), pattern, handler, )) def __iter__(self): for regex, pattern, handler in self.patterns: yield pattern, handler def update(self, other): for pattern, handler in other: self.register(pattern, handler) def dispatch(self, evt_type): for regex, pattern, handler in self.patterns: if regex.match(evt_type): yield pattern, handler def __call__(self, obj, event): handlers = set() for pattern, handler in self.dispatch(event.evt_type): if handler not in handlers: handlers.add(handler) handler(obj, event) return bool(handlers) def __get__(self, obj, cls): if obj is None: return self return functools.partial(self, obj)
Deal-4 1 x Large Traditional Pizza, 2 x Garlic Bread, 2 x Soft Drinks 1.25 Lts. Deal 5 5 x Large Tradtional Pizza, 2 x Garlic Bread, 2 x Soft Drinks 1.25 Lts. Cheesy Chips With optional sauce ( BBQ-Tomato-Chili). Wedges Served with chilli sauce. Special Cheesy Chips With Mushrooms and Capsicums. Calamari Rings Crumbled calamari rings served with garlic sauce and lemon. Fetta & Oregano Chips Fetta,Oregano, Lemon. Capricciosa Cheese, ham, mushroom and olives. Aussie Cheese, ham, bacon and egg. Meat Lovers Cheese, Ham, Salami, Bacon and BBQ Sauce. Hawaiian Cheese, Ham and PMediumineapple. American (Pepperoni) Cheese, Salami and Herbs. Volcano Cheese, salami, capsicum, olives, jalapeno, chili sauce. Mexican - Traditional Pizza Cheese, salami, ham, mushrooms, capsicum, olives, salsa sauce. Prosciutto Tomato base, cheese, rocket, prosciutto, parmesan cheese and black pepper. Vegidelight Cheese, pumpkin, capsicum, corn, olives, feta, olive oil, garlic, fresh basil (hallal)cheese, basil, garlic. The Lot Cheese,ham, salami, onion, capsicum, mushroom, olive , bacon chicken,pineapple, anchovies. Greek Lamb Tomato base, cheese, capsicum, red onions, olives, feta cheese, garlic, oregano, and zatziki sauce. Carbonara Bacon, garlic, cream, parmesan, spring onion, black pepper. Matriciana Bacon, capsicum, onion, garlic, cream, parmesan, black pepper. Mexican Salami, garlic, mushroom, capsicum, onion, chilli, rich tomato, black pepper. Vege Work Mushroom, seasonal vegetables, onion, cream, spinach, garlic, parmesan, black pepper. Seafood Seafood mix, garlic, chilli, spring onion, rich tomato, cherry tomato, parmesan, black pepper. Polo Fungi Chicken, mushroom, garlic, cream, rice, cherry tomato, spring onion, black pepper. Chicken Pesto Chicken, mushroom, garlic, cream, rice, cherry tomato, spring onion, black pepper. Sea Food Seafood mix, garlic,rich tomato,jalopeno, chilli, cream, rice, cherry tomato, spring onion, black pepper. Wege Work Mushroom, seasonal vegetables, onion, spinach, black pepper, spring onion, garlic rice. Roasted Pumpkin Pumpkin, mushroom, spinach, pesto, cream, spring onion, feta cheese, parmesan, rice, black pepper. Chicken Shish Meal (12 Pcs) Served with rice, turkish bread, garden salad, tzatziki sauce and hummus. Lamb Shish Meal (12 Pcs) Served with rice, turkish bread, garden salad, tzatziki sauce and hummus. Mix Shish Meal (12 Pcs) Comes with rice, tzatziki and hummus dips and Turkish bread and garden salad. Lamb Cutlets Meal (6 Pcs) Served with rice, turkish bread, garden salad, tzatziki sauce and hummus. Chicken Wings (6 Pcs) Marinated chicken plus chips. Grilled Veggie Burger Vege patty layered with salsa, salad leaves, gherkin slow, pickled onion and rocket served with chips. Grilled Beef Burger Beef patty layered with cream cheese, roasted crumbled cashew, caramelised onion and rocket served with chips. Messy Chicken Burger Slow cooked chicken layered with smoked sour cream mixed with herbs, caramelized apple and onion, rocket served with chips. Lasagne Beef bolognese, bechamel sauce, parmesan cheese and basil. Caesar Salad Lettuce, croutons, parmesan cheese, bacon, with Caesar dressing. Nutella Fruitilla Pizza Banana,Strawberry, Nutella, Powdered sugar. Ben $ Cherry Ice Cream Nutella base with seasonal fruit and sugar icing.
# -*- coding: utf-8 -*- from sqlalchemy import Column, Integer, String from sqlalchemy.schema import ForeignKey from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class HostTable(Base): __tablename__ = 'hosts' id = Column(Integer, primary_key=True) name = Column(String, unique=True, nullable=False) address = Column(String, nullable=False) user = Column(String) password = Column(String) group = Column(Integer, ForeignKey('groups.id'), nullable=True) def __repr__(self): return "<name='%s', address='%s', user='%s', password='%s'>" \ % (self.name, self.address, self.user, self.password) class GroupsTable(Base): __tablename__ = 'groups' id = Column(Integer, primary_key=True) name = Column(String, nullable=False, unique=True) default_user_name = Column(String) default_password = Column(String) def __repr__(self): return "<id='%s', name='%s', default_user_name='%s', default_password='%s'>" \ % (self.id, self.name, self.default_user_name, self.default_password)
Knee pain can be the result of a wide variety of underlying causes. One such cause of knee pain that is often overlooked as even a possibility is entrapment of the saphenous nerve. The saphenous nerve is the largest sensory nerve in the human body, branching from the femoral nerve up the leg into the adductor canal in the hip. Nerve entrapment can happen when the nerve pierces membranes inside the leg or when the nerve is sheered at some point. Some activities are known for stretching or rubbing the nerve, and the repeated friction can result in nerve entrapment. Marathon running is one such activity that can cause nerve entrapment. Many runners experience muscle fatigue or excessive tightness in their hip rotators, as well as tension of the saphenous nerve through the adductor canal. Typically, nerve entrapment like this means surgery or injections for the patient’s knee pain. In the interest of testing a more conservative, less-invasive approach, a recent case study highlighted the efficacy of manual therapy and exercise rehabilitation on nerve entrapment. The patient was a 29-year-old female runner who competed in ultra-marathons and suffered from constant, nonspecific knee pain. She rated her pain as a 6 on a 10-point pain scale. Unfortunately, she reported that running exacerbated her knee pain. An examination revealed that the woman had hip external rotator fatigue and pelvic instability. Ultimately, she was diagnosed with saphenous nerve entrapment. A doctor of chiropractic utilized a conservative treatment called the Active Release Technique (ART). This process is done by actively placing tension on the surrounding tissues and the membrane that is pierced by the nerve. The hope is that this manual therapy on the soft tissues will release the nerve from its entrapment. During the first ART session, a release was felt and even heard, followed by a brief increase in the patient’s pain. However, the pain almost immediately decreased, and she rated her pain a 2 out of 10 after this treatment session. The effect lasted a week until her next ART session. Again, she felt a release, and this time she reported the pain was fully resolved. The woman was able to return to her ultra-marathon training. The second part of the treatment plan in this case study involved at-home exercise rehabilitation. The patient was instructed and followed through with exercises involving squats with both legs in front of an exercise ball against a wall, standing on one leg with the hip holding an exercise ball against a wall, and squats from a variety of angles on just the left leg, which is where the knee pain was located. The patient continued using these rehabilitative exercises periodically as needed, and at her 16-month follow-up she continued to be free of knee symptoms. Although this study was limited to a single patient, it was the first of its kind to investigate a noninvasive approach to saphenous nerve entrapment, and its success is good news for knee pain patients with this diagnosis who desire conservative management of the condition. Other causes of knee pain have been shown by previous research to respond well to conservative treatment options, such as trigger point therapy, exercise, and a form of acupressure called collateral meridian therapy. Settergren R. Conservative management of a saphenous nerve entrapment in a female ultra-marathon runner. Journal of Bodywork & Movement Therapies 2013; 17: 297-301. Written by: Kelly Johnson on July 26, 2013. Last revised by: Marissa LuckJuly 29, 2013.
# ------------------------------------------------------------------------- # Copyright (C) 2005-2013 Martin Strohalm <www.mmass.org> # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # Complete text of GNU GPL can be found in the file LICENSE.TXT in the # main directory of the program. # ------------------------------------------------------------------------- # load libs import numpy # load stopper from mod_stopper import CHECK_FORCE_QUIT # load modules import calculations # SIGNAL PROCESSING FUNCTIONS # --------------------------- def locate(signal, x): """Find nearest higher index of searched x-value. signal (numpy array) - signal data points x (float) - x value """ # check signal type if not isinstance(signal, numpy.ndarray): raise TypeError, "Signal must be NumPy array!" if signal.dtype.name != 'float64': raise TypeError, "Signal data must be float64!" # check signal data if len(signal) == 0: return 0 # locate x return calculations.signal_locate_x(signal, float(x)) # ---- def basepeak(signal): """Locate highest y-value in signal. Point index is returned. signal (numpy array) - signal data points """ # check signal type if not isinstance(signal, numpy.ndarray): raise TypeError, "Signal must be NumPy array!" if signal.dtype.name != 'float64': raise TypeError, "Signal data must be float64!" # check signal data if len(signal) == 0: raise ValueError, "Signal contains no data!" # locate x return calculations.signal_locate_max_y(signal) # ---- def interpolate(p1, p2, x=None, y=None): """Calculates inner point between two points by linear interpolation. p1 (tuple of floats) - point 1 p2 (tuple of floats) - point 2 x (float) - x value (to interpolate y) y (float) - y value (to interpolate x) """ # interpolate y point if x != None: return calculations.signal_interpolate_y(float(p1[0]), float(p1[1]), float(p2[0]), float(p2[1]), float(x)) # interpolate x point elif y != None: return calculations.signal_interpolate_x(float(p1[0]), float(p1[1]), float(p2[0]), float(p2[1]), float(y)) # no value else: raise ValueError, "No x/y value provided for interpolation!" # ---- def boundaries(signal): """Calculates signal minima and maxima as (minX, minY, maxX, maxY). signal (numpy array) - signal data points """ # check signal type if not isinstance(signal, numpy.ndarray): raise TypeError, "Signal must be NumPy array!" if signal.dtype.name != 'float64': raise TypeError, "Signal data must be float64!" # check signal data if len(signal) == 0: raise ValueError, "Signal contains no data!" # calculate boundaries return calculations.signal_box(signal) # ---- def maxima(signal): """Find local maxima in signal. signal (numpy array) - signal data points """ # check signal type if not isinstance(signal, numpy.ndarray): raise TypeError, "Signal must be NumPy array!" if signal.dtype.name != 'float64': raise TypeError, "Signal data must be float64!" # check signal data if len(signal) == 0: return numpy.array([]) # determine intensity return calculations.signal_local_maxima(signal) # ---- def intensity(signal, x): """Find corresponding y-value for searched x-value. signal (numpy array) - signal data points x (float) - x-value """ # check signal type if not isinstance(signal, numpy.ndarray): raise TypeError, "Signal must be NumPy array!" if signal.dtype.name != 'float64': raise TypeError, "Signal data must be float64!" # check signal data if len(signal) == 0: raise ValueError, "Signal contains no data!" # determine intensity return calculations.signal_intensity(signal, float(x)) # ---- def centroid(signal, x, height): """Find peak centroid for searched x-value measured at y-value. signal (numpy array) - signal data points x (float) - x-value height (float) - y-value for width determination """ # check signal type if not isinstance(signal, numpy.ndarray): raise TypeError, "Signal must be NumPy array!" if signal.dtype.name != 'float64': raise TypeError, "Signal data must be float64!" # check signal data if len(signal) == 0: raise ValueError, "Signal contains no data!" # determine centroid return calculations.signal_centroid(signal, float(x), float(height)) # ---- def width(signal, x, height): """Find peak width for searched x-value measured at y-value. signal (numpy array) - signal data points x (float) - x-value height (float) - y-value for width determination """ # check signal type if not isinstance(signal, numpy.ndarray): raise TypeError, "Signal must be NumPy array!" if signal.dtype.name != 'float64': raise TypeError, "Signal data must be float64!" # check signal data if len(signal) == 0: raise ValueError, "Signal contains no data!" # determine width return calculations.signal_width(signal, float(x), float(height)) # ---- def area(signal, minX=None, maxX=None, baseline=None): """Return area under signal curve. signal (numpy array) - signal data points minX (float) - starting m/z value maxX (float) - ending m/z value baseline (numpy array) - signal baseline """ # check signal type if not isinstance(signal, numpy.ndarray): raise TypeError, "Signal must be NumPy array!" if signal.dtype.name != 'float64': raise TypeError, "Signal data must be float64!" # check baseline type if baseline != None: if not isinstance(baseline, numpy.ndarray): raise TypeError, "Baseline must be NumPy array!" if baseline.dtype.name != 'float64': raise TypeError, "Signal data must be float64!" # check signal data if len(signal) == 0: return 0.0 # check range if minX != None and maxX != None and minX == maxX: return 0.0 # crop data if minX != None and maxX != None: signal = crop(signal, minX, maxX) # subtract baseline if baseline != None: signal = subbase(signal, baseline) # calculate area return calculations.signal_area(signal) # ---- def noise(signal, minX=None, maxX=None, x=None, window=0.1): """Calculates signal noise level and width. signal (numpy array) - signal data points minX, maxX (float) - x-axis range to use for calculation x (float) - x-value for which to calculate the noise +- window window (float) - x-axis range used for calculation, relative to given x (in %/100) """ # check signal type if not isinstance(signal, numpy.ndarray): raise TypeError, "Signal must be NumPy array!" if signal.dtype.name != 'float64': raise TypeError, "Signal data must be float64!" # check signal data if len(signal) == 0: return (0.0, 0.0) # use specified signal range if minX != None and maxX != None: i1 = locate(signal, minX) i2 = locate(signal, maxX) # use specified x +- window elif x != None and window != None: window = x*window i1 = locate(signal, x-window) i2 = locate(signal, x+window) # use whole signal range else: i1 = 0 i2 = len(signal) # get data from signal signal = signal[i1:i2] # check signal data if len(signal) == 0: return (0.0, 0.0) # calculate noise return calculations.signal_noise(signal) # ---- def baseline(signal, window=0.1, offset=0.): """Return baseline data. signal (numpy array) - signal data points window (float or None) - noise calculation window (%/100) offset (float) - baseline offset, relative to noise width (in %/100) """ # check signal type if not isinstance(signal, numpy.ndarray): raise TypeError, "Signal must be NumPy array!" if signal.dtype.name != 'float64': raise TypeError, "Signal data must be float64!" # check signal data if len(signal) == 0: raise ValueError, "Signal contains no data!" # single segment baseline if window == None: noiseLevel, noiseWidth = noise(signal) noiseLevel -= noiseWidth*offset return numpy.array([ [signal[0][0], noiseLevel, noiseWidth], [signal[-1][0], noiseLevel, noiseWidth] ]) # make raster raster = [] minimum = max(0, signal[0][0]) x = signal[-1][0] while x > minimum: raster.append(x) x -= max(50, x*window) raster.append(minimum) raster.sort() # calc baseline data levels = [] widths = [] for i, x in enumerate(raster): i1 = locate(signal, x-x*window) i2 = locate(signal, x+x*window) if i1 == i2: noiseLevel = signal[i1][1] noiseWidth = 0.0 else: noiseLevel, noiseWidth = noise(signal[i1:i2]) levels.append([x, noiseLevel]) widths.append([x, noiseWidth]) # smooth baseline data swindow = 5 * window * (signal[-1][0] - signal[0][0]) levels = smooth(numpy.array(levels), 'GA', swindow, 2) widths = smooth(numpy.array(widths), 'GA', swindow, 2) # make baseline and apply offset buff = [] for i, x in enumerate(raster): width = abs(widths[i][1]) level = max(0, levels[i][1] - width*offset) buff.append([x, level, width]) return numpy.array(buff) # ---- def crop(signal, minX, maxX): """Crop signal to given x-range. New array is returned. signal (numpy array) - signal data points minX (float) - minimum x-value maxX (float) - maximum x-value """ # check signal type if not isinstance(signal, numpy.ndarray): raise TypeError, "Signal must be NumPy array!" # check limits if minX > maxX: minX, maxX = maxX, minX # check signal data if len(signal) == 0 or signal[-1][0] < minX or signal[0][0] > maxX: return numpy.array([]) # crop data return calculations.signal_crop(signal, float(minX), float(maxX)) # ---- def offset(signal, x=0.0, y=0.0): """Shift signal by offset. New array is returned. signal (numpy array) - signal data points x (float) - x-axis offset y (float) - y-axis offset """ # check signal type if not isinstance(signal, numpy.ndarray): raise TypeError, "Signal must be NumPy array!" if signal.dtype.name != 'float64': raise TypeError, "Signal data must be float64!" # check signal data if len(signal) == 0: return numpy.array([]) # offset signal return calculations.signal_offset(signal, float(x), float(y)) # ---- def multiply(signal, x=1.0, y=1.0): """Multiply signal values by factor. New array is returned. signal (numpy array) - signal data points x (float) - x-axis multiplicator y (float) - y-axis multiplicator """ # check signal type if not isinstance(signal, numpy.ndarray): raise TypeError, "Signal must be NumPy array!" if signal.dtype.name != 'float64': raise TypeError, "Signal data must be float64!" # check signal data if len(signal) == 0: return numpy.array([]) # multiply signal return calculations.signal_multiply(signal, float(x), float(y)) # ---- def normalize(signal): """Normalize y-values of the signal to max 1. New array is returned. signal (numpy array) - signal data points """ # check signal type if not isinstance(signal, numpy.ndarray): raise TypeError, "Signal must be NumPy array!" if signal.dtype.name != 'float64': raise TypeError, "Signal data must be float64!" # check signal data if len(signal) == 0: return numpy.array([]) # offset signal return calculations.signal_normalize(signal) # ---- def smooth(signal, method, window, cycles=1): """Smooth signal by moving average filter. New array is returned. signal (numpy array) - signal data points method (MA GA SG) - smoothing method: MA - moving average, GA - Gaussian, SG - Savitzky-Golay window (float) - m/z window size for smoothing cycles (int) - number of repeating cycles """ # check signal type if not isinstance(signal, numpy.ndarray): raise TypeError, "Signal must be NumPy array!" if signal.dtype.name != 'float64': raise TypeError, "Signal data must be float64!" # check signal data if len(signal) == 0: return numpy.array([]) # apply moving average filter if method == 'MA': return movaver(signal, window, cycles, style='flat') # apply gaussian filter elif method == 'GA': return movaver(signal, window, cycles, style='gaussian') # apply savitzky-golay filter elif method == 'SG': return savgol(signal, window, cycles) # unknown smoothing method else: raise KeyError, "Unknown smoothing method! -->", method # ---- def movaver(signal, window, cycles=1, style='flat'): """Smooth signal by moving average filter. New array is returned. signal (numpy array) - signal data points window (float) - m/z window size for smoothing cycles (int) - number of repeating cycles """ # approximate number of points within window window = int(window*len(signal)/(signal[-1][0]-signal[0][0])) window = min(window, len(signal)) if window < 3: return signal.copy() if not window % 2: window -= 1 # unpack mz and intensity xAxis, yAxis = numpy.hsplit(signal,2) xAxis = xAxis.flatten() yAxis = yAxis.flatten() # smooth the points while cycles: CHECK_FORCE_QUIT() if style == 'flat': w = numpy.ones(window,'f') elif style == 'gaussian': r = numpy.array([(i-(window-1)/2.) for i in range(window)]) w = numpy.exp(-(r**2/(window/4.)**2)) else: w = eval('numpy.'+style+'(window)') s = numpy.r_[yAxis[window-1:0:-1], yAxis, yAxis[-2:-window-1:-1]] y = numpy.convolve(w/w.sum(), s, mode='same') yAxis = y[window-1:-window+1] cycles -=1 # return smoothed data xAxis.shape = (-1,1) yAxis.shape = (-1,1) data = numpy.concatenate((xAxis,yAxis), axis=1) return data.copy() # ---- def savgol(signal, window, cycles=1, order=3): """Smooth signal by Savitzky-Golay filter. New array is returned. signal (numpy array) - signal data points window (float) - m/z window size for smoothing cycles (int) - number of repeating cycles order (int) - order of polynom used """ # approximate number of points within window window = int(window*len(signal)/(signal[-1][0]-signal[0][0])) if window <= order: return signal.copy() # unpack axes xAxis, yAxis = numpy.hsplit(signal,2) yAxis = yAxis.flatten() # coeficients orderRange = range(order+1) halfWindow = (window-1) // 2 b = numpy.mat([[k**i for i in orderRange] for k in range(-halfWindow, halfWindow+1)]) m = numpy.linalg.pinv(b).A[0] window = len(m) halfWindow = (window-1) // 2 # precompute the offset values for better performance offsets = range(-halfWindow, halfWindow+1) offsetData = zip(offsets, m) # smooth the data while cycles: smoothData = list() yAxis = numpy.concatenate((numpy.zeros(halfWindow)+yAxis[0], yAxis, numpy.zeros(halfWindow)+yAxis[-1])) for i in range(halfWindow, len(yAxis) - halfWindow): CHECK_FORCE_QUIT() value = 0.0 for offset, weight in offsetData: value += weight * yAxis[i + offset] smoothData.append(value) yAxis = smoothData cycles -=1 # return smoothed data yAxis = numpy.array(yAxis) yAxis.shape = (-1,1) data = numpy.concatenate((xAxis,yAxis), axis=1) return data.copy() # ---- def combine(signalA, signalB): """Unify x-raster and combine two arrays (y=yA+yB). New array is returned. signalA (numpy array) - signal A data points signalB (numpy array) - signal B data points """ # check signal type if not isinstance(signalA, numpy.ndarray) or not isinstance(signalB, numpy.ndarray): raise TypeError, "Signals must be NumPy arrays!" if signalA.dtype.name != 'float64' or signalB.dtype.name != 'float64': raise TypeError, "Signals data must be float64!" # check signal data if len(signalA) == 0 and len(signalB) == 0: return numpy.array([]) # subtract signals return calculations.signal_combine(signalA, signalB) # ---- def overlay(signalA, signalB): """Unify x-raster and overlay two arrays (y=max(yA,yB)). New array is returned. signalA (numpy array) - signal A data points signalB (numpy array) - signal B data points """ # check signal type if not isinstance(signalA, numpy.ndarray) or not isinstance(signalB, numpy.ndarray): raise TypeError, "Signals must be NumPy arrays!" if signalA.dtype.name != 'float64' or signalB.dtype.name != 'float64': raise TypeError, "Signals data must be float64!" # check signal data if len(signalA) == 0 and len(signalB) == 0: return numpy.array([]) # subtract signals return calculations.signal_overlay(signalA, signalB) # ---- def subtract(signalA, signalB): """Unify x-raster and subtract two arrays (y=yA-yB). New array is returned. signalA (numpy array) - signal A data points signalB (numpy array) - signal B data points """ # check signal type if not isinstance(signalA, numpy.ndarray) or not isinstance(signalB, numpy.ndarray): raise TypeError, "Signals must be NumPy arrays!" if signalA.dtype.name != 'float64' or signalB.dtype.name != 'float64': raise TypeError, "Signals data must be float64!" # check signal data if len(signalA) == 0 and len(signalB) == 0: return numpy.array([]) # subtract signals return calculations.signal_subtract(signalA, signalB) # ---- def subbase(signal, baseline): """Subtract baseline from signal withou chaning x-raster. New array is returned. signal (numpy array) - signal data points baseline (numpy array) - baseline data points """ # check signal type if not isinstance(signal, numpy.ndarray) or not isinstance(baseline, numpy.ndarray): raise TypeError, "Signals must be NumPy arrays!" if signal.dtype.name != 'float64' or baseline.dtype.name != 'float64': raise TypeError, "Signals data must be float64!" # check signal data if len(signal) == 0: return numpy.array([]) # check baseline data if len(baseline) == 0: return signal.copy() # check baseline shape if baseline.shape[1] > 2: baseline = numpy.hsplit(baseline, (2,6))[0].copy() # subtract signals return calculations.signal_subbase(signal, baseline) # ----
Buddleja pichinchensis is endemic to the southern Cordillera Central of Colombia and the northern and central highlands of Ecuador, where it grows with Escallonia in páramo and subparamo regions at altitudes of 3,300 – 4,200 m. The species was first named and described by Kunth in 1818. Buddleja pichinchensis is a dioecious shrub or small tree 3 – 6 m tall in the wild, with a blackish fissured bark, becoming increasingly gnarled with age. The young branches are terete and covered with a thick tomentum, bearing sessile or subsessile lanceolate coriaceous or subcoriaceous leaves, glabrescent above, with dense felt-like tomentum below. The faintly scented golden yellow inflorescences are 3 – 12 cm long, with 1 – 2 orders of branches, usually with 3 – 6 pairs of pendent pedunculate heads 1.2 – 2 cm in diameter, each head with 12 – 18 flowers, the corollas 3 – 5 mm long. Pollination is possibly by hummingbirds. The species was briefly in commerce in the UK (Fillpots Nursery, Colchester) circa 2003, and some specimens may survive there. This page was last edited on 3 June 2018, at 03:56 (UTC).
import logging log = logging.getLogger(__name__) import requests, json from nuevo.drivers.neo4j.commands import JSONCommandEncoder, Neo4jBatchedCommand from nuevo.drivers.neo4j.content import Neo4jContent from nuevo.core.exceptions import NuevoException class RESTException(NuevoException): def __init__(self, message, status): self.status = status super(RESTException, self).__init__(message) class Neo4jAtomicREST(object): def __init__(self, base_url): self.session = requests.session( headers = { 'Accept':'application/json', 'Content-Type':'application/json' } ) self.base_url = base_url.rstrip('/') def execute(self, cmd, resp_cls=Neo4jContent): log.debug("EXEC: %s", cmd) response = self.send(cmd) if response is not None: return resp_cls(cid=None, response=response) else: return None def send(self, cmd): url = self.base_url + cmd.resource data = json.dumps(cmd, cls=JSONCommandEncoder) log.debug("SEND: %s %s %s", cmd.method, url, data) try: resp = self.session.request(cmd.method, url, data=data) code = resp.status_code cont = resp.content resp.raise_for_status() if cont: cont = json.loads(cont) else: cont = None log.debug("RECV: %s %s", code, cont) return cont except requests.exceptions.HTTPError as ex: raise RESTException(cont, code) class Neo4jBatchedREST(object): def __init__(self, base_url): self.session = requests.session( headers = { 'Accept':'application/json', 'Content-Type':'application/json' } ) self.base_url = base_url.rstrip('/') self._cid = 0 self.batch = [] self.futures = [] @property def next_cid(self): _cid = self._cid self._cid = self._cid + 1 return _cid def execute(self, cmd, resp_cls=Neo4jContent): cid = self.next_cid cmd = Neo4jBatchedCommand(cmd, cid) fut = resp_cls(cid=cid, response=None) self.batch.append(cmd) self.futures.append(fut) return fut def flush(self): url = "%s/batch" % self.base_url data = json.dumps(self.batch, cls=JSONCommandEncoder) log.debug("SEND: %s", data) resp = self.session.request("POST", url, data=data) try: resp.raise_for_status() responses = json.loads(resp.content) log.debug("RECV: %s", responses) self.materialize(responses) self._cid = 0 self.batch = [] self.futures = [] except requests.HTTPError as err: c = resp.content if 'message' in c: raise RESTException(c.message) else: raise RESTException(c.exception) def materialize(self, responses): for fut, response in zip(self.futures, responses): fut.__materialize__(response['body'])
Forum Discription: những ca khúc vượt thời đại, ca khúc bất tử vượt thời gian. The joy and utter fulfillment of being part of a committed relationship can only be surpassed by a love for God. Diamonds are forever and relationships should be that way also, you should Make It Mine.
from intelmq.lib.bot import Bot, sys from intelmq.lib.message import Event from intelmq.lib.harmonization import DateTime from intelmq.lib import utils class ArborParserBot(Bot): def process(self): report = self.receive_message() if not report.contains("raw"): self.acknowledge_message() raw_report = utils.base64_decode(report.value("raw")) for row in raw_report.split('\n'): row = row.strip() if len(row) == 0 or row.startswith('other'): continue event = Event() time_observation = DateTime().generate_datetime_now() event.add('time.observation', time_observation, sanitize=True) event.add('feed.name', u'arbor') event.add('feed.url', u'http://atlas-public.ec2.arbor.net/public/ssh_attackers') event.add('classification.type', u'brute-force') event.add("raw", row, sanitize=True) columns = ["source.ip"] row = row.split() for key, value in zip(columns, row): event.add(key, value, sanitize=True) self.send_message(event) self.acknowledge_message() if __name__ == "__main__": bot = ArborParserBot(sys.argv[1]) bot.start()
1. Preheat the oven to 180C/gas 4. Line a baking tray with baking parchment. 2. Cut the tomatoes in half lengthways and place on the tray, cut side up. Drizzle with the olive oil, season lightly and bake for about 1½ hours, until they colour and shrink a little (if after an hour they start to colour too much - cover with foil). 3. About 30 minutes before the tomatoes are due to be ready; season the chicken breasts and steam over a high heat for about 12-18 minutes, depending on the thickness of the breasts. Remove from the steamer, cover with cling film or foil and leave to cool to room temperature. 4. Thinly shave the fennel and place in iced water to crisp up. 5. Mix the beans with the sliced onion, bean sprouts, parsley and lemon juice and season lightly. 6. For the dressing, mix together the soy sauce, olive oil, truffle oil and lemon juice. Taste and adjust the seasoning as necessary. 7. Divide the bean salad between 4 plates and sit the tomato halves on top. Slice each chicken breast into 4 pieces and arrange over the bean salad. Toss the fennel with the dressing, scatter over the chicken and serve.
#!/usr/bin/env python # # A python package for interpreting METAR and SPECI weather reports. # # US conventions for METAR/SPECI reports are described in chapter 12 of # the Federal Meteorological Handbook No.1. (FMH-1 1995), issued by NOAA. # See <http://metar.noaa.gov/> # # International conventions for the METAR and SPECI codes are specified in # the WMO Manual on Codes, vol I.1, Part A (WMO-306 I.i.A). # # This module handles a reports that follow the US conventions, as well # the more general encodings in the WMO spec. Other regional conventions # are not supported at present. # # The current METAR report for a given station is available at the URL # http://weather.noaa.gov/pub/data/observations/metar/stations/<station>.TXT # where <station> is the four-letter ICAO station code. # # The METAR reports for all reporting stations for any "cycle" (i.e., hour) # in the last 24 hours is available in a single file at the URL # http://weather.noaa.gov/pub/data/observations/metar/cycles/<cycle>Z.TXT # where <cycle> is a 2-digit cycle number (e.g., "00", "05" or "23"). # # Copyright 2004 Tom Pollard # """ This module defines the Metar class. A Metar object represents the weather report encoded by a single METAR code. """ __author__ = "Tom Pollard" __email__ = "pollard@alum.mit.edu" __version__ = "1.2" __LICENSE__ = """ Copyright (c) 2004-2016, %s All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ % __author__ import datetime from Datatypes import * ## Exceptions class ParserError(Exception): """Exception raised when an unparseable group is found in body of the report.""" pass ## regular expressions to decode various groups of the METAR code MISSING_RE = re.compile(r"^[M/]+$") TYPE_RE = re.compile(r"^(?P<type>METAR|SPECI)\s+") STATION_RE = re.compile(r"^(?P<station>[A-Z][A-Z0-9]{3})\s+") TIME_RE = re.compile(r"""^(?P<day>\d\d) (?P<hour>\d\d) (?P<min>\d\d)Z?\s+""", re.VERBOSE) MODIFIER_RE = re.compile(r"^(?P<mod>AUTO|FINO|NIL|TEST|CORR?|RTD|CC[A-G])\s+") WIND_RE = re.compile(r"""^(?P<dir>[\dO]{3}|[0O]|///|MMM|VRB) (?P<speed>P?[\dO]{2,3}|[/M]{2,3}) (G(?P<gust>P?(\d{1,3}|[/M]{1,3})))? (?P<units>KTS?|LT|K|T|KMH|MPS)? (\s+(?P<varfrom>\d\d\d)V (?P<varto>\d\d\d))?\s+""", re.VERBOSE) VISIBILITY_RE = re.compile(r"""^(?P<vis>(?P<dist>(M|P)?\d\d\d\d|////) (?P<dir>[NSEW][EW]? | NDV)? | (?P<distu>(M|P)?(\d+|\d\d?/\d\d?|\d+\s+\d/\d)) (?P<units>SM|KM|M|U) | CAVOK )\s+""", re.VERBOSE) RUNWAY_RE = re.compile(r"""^(RVRNO | R(?P<name>\d\d(RR?|LL?|C)?)/ (?P<low>(M|P)?\d\d\d\d) (V(?P<high>(M|P)?\d\d\d\d))? (?P<unit>FT)?[/NDU]*)\s+""", re.VERBOSE) WEATHER_RE = re.compile(r"""^(?P<int>(-|\+|VC)*) (?P<desc>(MI|PR|BC|DR|BL|SH|TS|FZ)+)? (?P<prec>(DZ|RA|SN|SG|IC|PL|GR|GS|UP|/)*) (?P<obsc>BR|FG|FU|VA|DU|SA|HZ|PY)? (?P<other>PO|SQ|FC|SS|DS|NSW|/+)? (?P<int2>[-+])?\s+""", re.VERBOSE) SKY_RE= re.compile(r"""^(?P<cover>VV|CLR|SKC|SCK|NSC|NCD|BKN|SCT|FEW|[O0]VC|///) (?P<height>[\dO]{2,4}|///)? (?P<cloud>([A-Z][A-Z]+|///))?\s+""", re.VERBOSE) TEMP_RE = re.compile(r"""^(?P<temp>(M|-)?\d+|//|XX|MM)/ (?P<dewpt>(M|-)?\d+|//|XX|MM)?\s+""", re.VERBOSE) PRESS_RE = re.compile(r"""^(?P<unit>A|Q|QNH|SLP)? (?P<press>[\dO]{3,4}|////) (?P<unit2>INS)?\s+""", re.VERBOSE) RECENT_RE = re.compile(r"""^RE(?P<desc>MI|PR|BC|DR|BL|SH|TS|FZ)? (?P<prec>(DZ|RA|SN|SG|IC|PL|GR|GS|UP)*)? (?P<obsc>BR|FG|FU|VA|DU|SA|HZ|PY)? (?P<other>PO|SQ|FC|SS|DS)?\s+""", re.VERBOSE) WINDSHEAR_RE = re.compile(r"^(WS\s+)?(ALL\s+RWY|RWY(?P<name>\d\d(RR?|L?|C)?))\s+") COLOR_RE = re.compile(r"""^(BLACK)?(BLU|GRN|WHT|RED)\+? (/?(BLACK)?(BLU|GRN|WHT|RED)\+?)*\s*""", re.VERBOSE) RUNWAYSTATE_RE = re.compile(r"""((?P<name>\d\d) | R(?P<namenew>\d\d)(RR?|LL?|C)?/?) ((?P<special> SNOCLO|CLRD(\d\d|//)) | (?P<deposit>(\d|/)) (?P<extent>(\d|/)) (?P<depth>(\d\d|//)) (?P<friction>(\d\d|//)))\s+""", re.VERBOSE) TREND_RE = re.compile(r"^(?P<trend>TEMPO|BECMG|FCST|NOSIG)\s+") TRENDTIME_RE = re.compile(r"(?P<when>(FM|TL|AT))(?P<hour>\d\d)(?P<min>\d\d)\s+") REMARK_RE = re.compile(r"^(RMKS?|NOSPECI|NOSIG)\s+") ## regular expressions for remark groups AUTO_RE = re.compile(r"^AO(?P<type>\d)\s+") SEALVL_PRESS_RE = re.compile(r"^SLP(?P<press>\d\d\d)\s+") PEAK_WIND_RE = re.compile(r"""^P[A-Z]\s+WND\s+ (?P<dir>\d\d\d) (?P<speed>P?\d\d\d?)/ (?P<hour>\d\d)? (?P<min>\d\d)\s+""", re.VERBOSE) WIND_SHIFT_RE = re.compile(r"""^WSHFT\s+ (?P<hour>\d\d)? (?P<min>\d\d) (\s+(?P<front>FROPA))?\s+""", re.VERBOSE) PRECIP_1HR_RE = re.compile(r"^P(?P<precip>\d\d\d\d)\s+") PRECIP_24HR_RE = re.compile(r"""^(?P<type>6|7) (?P<precip>\d\d\d\d)\s+""", re.VERBOSE) PRESS_3HR_RE = re.compile(r"""^5(?P<tend>[0-8]) (?P<press>\d\d\d)\s+""", re.VERBOSE) TEMP_1HR_RE = re.compile(r"""^T(?P<tsign>0|1) (?P<temp>\d\d\d) ((?P<dsign>0|1) (?P<dewpt>\d\d\d))?\s+""", re.VERBOSE) TEMP_6HR_RE = re.compile(r"""^(?P<type>1|2) (?P<sign>0|1) (?P<temp>\d\d\d)\s+""", re.VERBOSE) TEMP_24HR_RE = re.compile(r"""^4(?P<smaxt>0|1) (?P<maxt>\d\d\d) (?P<smint>0|1) (?P<mint>\d\d\d)\s+""", re.VERBOSE) UNPARSED_RE = re.compile(r"(?P<group>\S+)\s+") LIGHTNING_RE = re.compile(r"""^((?P<freq>OCNL|FRQ|CONS)\s+)? LTG(?P<type>(IC|CC|CG|CA)*) ( \s+(?P<loc>( OHD | VC | DSNT\s+ | \s+AND\s+ | [NSEW][EW]? (-[NSEW][EW]?)* )+) )?\s+""", re.VERBOSE) TS_LOC_RE = re.compile(r"""TS(\s+(?P<loc>( OHD | VC | DSNT\s+ | \s+AND\s+ | [NSEW][EW]? (-[NSEW][EW]?)* )+))? ( \s+MOV\s+(?P<dir>[NSEW][EW]?) )?\s+""", re.VERBOSE) ## translation of weather location codes loc_terms = [ ("OHD", "overhead"), ("DSNT", "distant"), ("AND", "and"), ("VC", "nearby") ] def xlate_loc( loc ): """Substitute English terms for the location codes in the given string.""" for code, english in loc_terms: loc = loc.replace(code,english) return loc ## translation of the sky-condition codes into english SKY_COVER = { "SKC":"clear", "CLR":"clear", "NSC":"clear", "NCD":"clear", "FEW":"a few ", "SCT":"scattered ", "BKN":"broken ", "OVC":"overcast", "///":"", "VV":"indefinite ceiling" } CLOUD_TYPE = { "TCU":"towering cumulus", "CU":"cumulus", "CB":"cumulonimbus", "SC":"stratocumulus", "CBMAM":"cumulonimbus mammatus", "ACC":"altocumulus castellanus", "SCSL":"standing lenticular stratocumulus", "CCSL":"standing lenticular cirrocumulus", "ACSL":"standing lenticular altocumulus" } ## translation of the present-weather codes into english WEATHER_INT = { "-":"light", "+":"heavy", "-VC":"nearby light", "+VC":"nearby heavy", "VC":"nearby" } WEATHER_DESC = { "MI":"shallow", "PR":"partial", "BC":"patches of", "DR":"low drifting", "BL":"blowing", "SH":"showers", "TS":"thunderstorm", "FZ":"freezing" } WEATHER_PREC = { "DZ":"drizzle", "RA":"rain", "SN":"snow", "SG":"snow grains", "IC":"ice crystals", "PL":"ice pellets", "GR":"hail", "GS":"snow pellets", "UP":"unknown precipitation", "//":"" } WEATHER_OBSC = { "BR":"mist", "FG":"fog", "FU":"smoke", "VA":"volcanic ash", "DU":"dust", "SA":"sand", "HZ":"haze", "PY":"spray" } WEATHER_OTHER = { "PO":"sand whirls", "SQ":"squalls", "FC":"funnel cloud", "SS":"sandstorm", "DS":"dust storm" } WEATHER_SPECIAL = { "+FC":"tornado" } COLOR = { "BLU":"blue", "GRN":"green", "WHT":"white" } ## translation of various remark codes into English PRESSURE_TENDENCY = { "0":"increasing, then decreasing", "1":"increasing more slowly", "2":"increasing", "3":"increasing more quickly", "4":"steady", "5":"decreasing, then increasing", "6":"decreasing more slowly", "7":"decreasing", "8":"decreasing more quickly" } LIGHTNING_FREQUENCY = { "OCNL":"occasional", "FRQ":"frequent", "CONS":"constant" } LIGHTNING_TYPE = { "IC":"intracloud", "CC":"cloud-to-cloud", "CG":"cloud-to-ground", "CA":"cloud-to-air" } REPORT_TYPE = { "METAR":"routine report", "SPECI":"special report", "AUTO":"automatic report", "COR":"manually corrected report" } ## Helper functions def _report_match(handler,match): """Report success or failure of the given handler function. (DEBUG)""" if match: print(handler.__name__," matched '"+match+"'") else: print(handler.__name__," didn't match...") def _unparsedGroup( self, d ): """ Handle otherwise unparseable main-body groups. """ self._unparsed_groups.append(d['group']) ## METAR report objects debug = False class Metar(object): """METAR (aviation meteorology report)""" def __init__( self, metarcode, month=None, year=None, utcdelta=None): """Parse raw METAR code.""" self.code = metarcode # original METAR code self.type = 'METAR' # METAR (routine) or SPECI (special) self.mod = "AUTO" # AUTO (automatic) or COR (corrected) self.station_id = None # 4-character ICAO station code self.time = None # observation time [datetime] self.cycle = None # observation cycle (0-23) [int] self.wind_dir = None # wind direction [direction] self.wind_speed = None # wind speed [speed] self.wind_gust = None # wind gust speed [speed] self.wind_dir_from = None # beginning of range for win dir [direction] self.wind_dir_to = None # end of range for wind dir [direction] self.vis = None # visibility [distance] self.vis_dir = None # visibility direction [direction] self.max_vis = None # visibility [distance] self.max_vis_dir = None # visibility direction [direction] self.temp = None # temperature (C) [temperature] self.dewpt = None # dew point (C) [temperature] self.press = None # barometric pressure [pressure] self.runway = [] # runway visibility (list of tuples) self.weather = [] # present weather (list of tuples) self.recent = [] # recent weather (list of tuples) self.sky = [] # sky conditions (list of tuples) self.windshear = [] # runways w/ wind shear (list of strings) self.wind_speed_peak = None # peak wind speed in last hour self.wind_dir_peak = None # direction of peak wind speed in last hour self.peak_wind_time = None # time of peak wind observation [datetime] self.wind_shift_time = None # time of wind shift [datetime] self.max_temp_6hr = None # max temp in last 6 hours self.min_temp_6hr = None # min temp in last 6 hours self.max_temp_24hr = None # max temp in last 24 hours self.min_temp_24hr = None # min temp in last 24 hours self.press_sea_level = None # sea-level pressure self.precip_1hr = None # precipitation over the last hour self.precip_3hr = None # precipitation over the last 3 hours self.precip_6hr = None # precipitation over the last 6 hours self.precip_24hr = None # precipitation over the last 24 hours self._trend = False # trend groups present (bool) self._trend_groups = [] # trend forecast groups self._remarks = [] # remarks (list of strings) self._unparsed_groups = [] self._unparsed_remarks = [] self._now = datetime.datetime.utcnow() if utcdelta: self._utcdelta = utcdelta else: self._utcdelta = datetime.datetime.now() - self._now self._month = month self._year = year code = self.code+" " # (the regexps all expect trailing spaces...) try: ngroup = len(Metar.handlers) igroup = 0 ifailed = -1 while igroup < ngroup and code: pattern, handler, repeatable = Metar.handlers[igroup] if debug: print(handler.__name__,":",code) m = pattern.match(code) while m: ifailed = -1 if debug: _report_match(handler,m.group()) handler(self,m.groupdict()) code = code[m.end():] if self._trend: code = self._do_trend_handlers(code) if not repeatable: break if debug: print(handler.__name__,":",code) m = pattern.match(code) if not m and ifailed < 0: ifailed = igroup igroup += 1 if igroup == ngroup and not m: # print("** it's not a main-body group **") pattern, handler = (UNPARSED_RE, _unparsedGroup) if debug: print(handler.__name__,":",code) m = pattern.match(code) if debug: _report_match(handler,m.group()) handler(self,m.groupdict()) code = code[m.end():] igroup = ifailed ifailed = -2 # if it's still -2 when we run out of main-body # groups, we'll try parsing this group as a remark if pattern == REMARK_RE or self.press: while code: for pattern, handler in Metar.remark_handlers: if debug: print(handler.__name__,":",code) m = pattern.match(code) if m: if debug: _report_match(handler,m.group()) handler(self,m.groupdict()) code = pattern.sub("",code,1) break except Exception as err: raise ParserError(handler.__name__+" failed while processing '"+code+"'\n"+" ".join(err.args)) raise err if self._unparsed_groups: code = ' '.join(self._unparsed_groups) raise ParserError("Unparsed groups in body '"+code+"' while processing '"+metarcode+"'") def _do_trend_handlers(self, code): for pattern, handler, repeatable in Metar.trend_handlers: if debug: print(handler.__name__,":",code) m = pattern.match(code) while m: if debug: _report_match(handler, m.group()) self._trend_groups.append(m.group().strip()) handler(self,m.groupdict()) code = code[m.end():] if not repeatable: break m = pattern.match(code) return code def __str__(self): return self.string() def _handleType( self, d ): """ Parse the code-type group. The following attributes are set: type [string] """ self.type = d['type'] def _handleStation( self, d ): """ Parse the station id group. The following attributes are set: station_id [string] """ self.station_id = d['station'] def _handleModifier( self, d ): """ Parse the report-modifier group. The following attributes are set: mod [string] """ mod = d['mod'] if mod == 'CORR': mod = 'COR' if mod == 'NIL' or mod == 'FINO': mod = 'NO DATA' self.mod = mod def _handleTime( self, d ): """ Parse the observation-time group. The following attributes are set: time [datetime] cycle [int] _day [int] _hour [int] _min [int] """ self._day = int(d['day']) if not self._month: self._month = self._now.month if self._day > self._now.day: if self._month == 1: self._month = 12 else: self._month = self._month - 1 if not self._year: self._year = self._now.year if self._month > self._now.month: self._year = self._year - 1 elif self._month == self._now.month and self._day > self._now.day: self._year = self._year - 1 self._hour = int(d['hour']) self._min = int(d['min']) self.time = datetime.datetime(self._year, self._month, self._day, self._hour, self._min) if self._min < 45: self.cycle = self._hour else: self.cycle = self._hour+1 def _handleWind( self, d ): """ Parse the wind and variable-wind groups. The following attributes are set: wind_dir [direction] wind_speed [speed] wind_gust [speed] wind_dir_from [int] wind_dir_to [int] """ wind_dir = d['dir'].replace('O','0') if wind_dir != "VRB" and wind_dir != "///" and wind_dir != "MMM": self.wind_dir = direction(wind_dir) wind_speed = d['speed'].replace('O','0') units = d['units'] if units == 'KTS' or units == 'K' or units == 'T' or units == 'LT': units = 'KT' if wind_speed.startswith("P"): self.wind_speed = speed(wind_speed[1:], units, ">") elif not MISSING_RE.match(wind_speed): self.wind_speed = speed(wind_speed, units) if d['gust']: wind_gust = d['gust'] if wind_gust.startswith("P"): self.wind_gust = speed(wind_gust[1:], units, ">") elif not MISSING_RE.match(wind_gust): self.wind_gust = speed(wind_gust, units) if d['varfrom']: self.wind_dir_from = direction(d['varfrom']) self.wind_dir_to = direction(d['varto']) def _handleVisibility( self, d ): """ Parse the minimum and maximum visibility groups. The following attributes are set: vis [distance] vis_dir [direction] max_vis [distance] max_vis_dir [direction] """ vis = d['vis'] vis_less = None vis_dir = None vis_units = "M" vis_dist = "10000" if d['dist'] and d['dist'] != '////': vis_dist = d['dist'] if d['dir'] and d['dir'] != 'NDV': vis_dir = d['dir'] elif d['distu']: vis_dist = d['distu'] if d['units'] and d['units'] != "U": vis_units = d['units'] if vis_dist == "9999": vis_dist = "10000" vis_less = ">" if self.vis: if vis_dir: self.max_vis_dir = direction(vis_dir) self.max_vis = distance(vis_dist, vis_units, vis_less) else: if vis_dir: self.vis_dir = direction(vis_dir) self.vis = distance(vis_dist, vis_units, vis_less) def _handleRunway( self, d ): """ Parse a runway visual range group. The following attributes are set: range [list of tuples] . name [string] . low [distance] . high [distance] """ if d['name']: name = d['name'] low = distance(d['low']) if d['high']: high = distance(d['high']) else: high = low self.runway.append((name,low,high)) def _handleWeather( self, d ): """ Parse a present-weather group. The following attributes are set: weather [list of tuples] . intensity [string] . description [string] . precipitation [string] . obscuration [string] . other [string] """ inteni = d['int'] if not inteni and d['int2']: inteni = d['int2'] desci = d['desc'] preci = d['prec'] obsci = d['obsc'] otheri = d['other'] self.weather.append((inteni,desci,preci,obsci,otheri)) def _handleSky( self, d ): """ Parse a sky-conditions group. The following attributes are set: sky [list of tuples] . cover [string] . height [distance] . cloud [string] """ height = d['height'] if not height or height == "///": height = None else: height = height.replace('O','0') height = distance(int(height)*100,"FT") cover = d['cover'] if cover == 'SCK' or cover == 'SKC' or cover == 'CL': cover = 'CLR' if cover == '0VC': cover = 'OVC' cloud = d['cloud'] if cloud == '///': cloud = "" self.sky.append((cover,height,cloud)) def _handleTemp( self, d ): """ Parse a temperature-dewpoint group. The following attributes are set: temp temperature (Celsius) [float] dewpt dew point (Celsius) [float] """ temp = d['temp'] dewpt = d['dewpt'] if temp and temp != "//" and temp != "XX" and temp != "MM" : self.temp = temperature(temp) if dewpt and dewpt != "//" and dewpt != "XX" and dewpt != "MM" : self.dewpt = temperature(dewpt) def _handlePressure( self, d ): """ Parse an altimeter-pressure group. The following attributes are set: press [int] """ press = d['press'] if press != '////': press = float(press.replace('O','0')) if d['unit']: if d['unit'] == 'A' or (d['unit2'] and d['unit2'] == 'INS'): self.press = pressure(press/100,'IN') elif d['unit'] == 'SLP': if press < 500: press = press/10 + 1000 else: press = press/10 + 900 self.press = pressure(press,'MB') self._remarks.append("sea-level pressure %.1fhPa" % press) else: self.press = pressure(press,'MB') elif press > 2500: self.press = pressure(press/100,'IN') else: self.press = pressure(press,'MB') def _handleRecent( self, d ): """ Parse a recent-weather group. The following attributes are set: weather [list of tuples] . intensity [string] . description [string] . precipitation [string] . obscuration [string] . other [string] """ desci = d['desc'] preci = d['prec'] obsci = d['obsc'] otheri = d['other'] self.recent.append(("",desci,preci,obsci,otheri)) def _handleWindShear( self, d ): """ Parse wind-shear groups. The following attributes are set: windshear [list of strings] """ if d['name']: self.windshear.append(d['name']) else: self.windshear.append("ALL") def _handleColor( self, d ): """ Parse (and ignore) the color groups. The following attributes are set: trend [list of strings] """ pass def _handleRunwayState( self, d ): """ Parse (and ignore) the runway state. The following attributes are set: """ pass def _handleTrend( self, d ): """ Parse (and ignore) the trend groups. """ if 'trend' in d: self._trend_groups.append(d['trend']) self._trend = True def _startRemarks( self, d ): """ Found the start of the remarks section. """ self._remarks = [] def _handleSealvlPressRemark( self, d ): """ Parse the sea-level pressure remark group. """ value = float(d['press'])/10.0 if value < 50: value += 1000 else: value += 900 if not self.press: self.press = pressure(value,"MB") self.press_sea_level = pressure(value,"MB") def _handlePrecip24hrRemark( self, d ): """ Parse a 3-, 6- or 24-hour cumulative preciptation remark group. """ value = float(d['precip'])/100.0 if d['type'] == "6": if self.cycle == 3 or self.cycle == 9 or self.cycle == 15 or self.cycle == 21: self.precip_3hr = precipitation(value,"IN") else: self.precip_6hr = precipitation(value,"IN") else: self.precip_24hr = precipitation(value,"IN") def _handlePrecip1hrRemark( self, d ): """Parse an hourly precipitation remark group.""" value = float(d['precip'])/100.0 self.precip_1hr = precipitation(value,"IN") def _handleTemp1hrRemark( self, d ): """ Parse a temperature & dewpoint remark group. These values replace the temp and dewpt from the body of the report. """ value = float(d['temp'])/10.0 if d['tsign'] == "1": value = -value self.temp = temperature(value) if d['dewpt']: value2 = float(d['dewpt'])/10.0 if d['dsign'] == "1": value2 = -value2 self.dewpt = temperature(value2) def _handleTemp6hrRemark( self, d ): """ Parse a 6-hour maximum or minimum temperature remark group. """ value = float(d['temp'])/10.0 if d['sign'] == "1": value = -value if d['type'] == "1": self.max_temp_6hr = temperature(value,"C") else: self.min_temp_6hr = temperature(value,"C") def _handleTemp24hrRemark( self, d ): """ Parse a 24-hour maximum/minimum temperature remark group. """ value = float(d['maxt'])/10.0 if d['smaxt'] == "1": value = -value value2 = float(d['mint'])/10.0 if d['smint'] == "1": value2 = -value2 self.max_temp_24hr = temperature(value,"C") self.min_temp_24hr = temperature(value2,"C") def _handlePress3hrRemark( self, d ): """ Parse a pressure-tendency remark group. """ value = float(d['press'])/10.0 descrip = PRESSURE_TENDENCY[d['tend']] self._remarks.append("3-hr pressure change %.1fhPa, %s" % (value,descrip)) def _handlePeakWindRemark( self, d ): """ Parse a peak wind remark group. """ peak_dir = int(d['dir']) peak_speed = int(d['speed']) self.wind_speed_peak = speed(peak_speed, "KT") self.wind_dir_peak = direction(peak_dir) peak_min = int(d['min']) if d['hour']: peak_hour = int(d['hour']) else: peak_hour = self._hour self.peak_wind_time = datetime.datetime(self._year, self._month, self._day, peak_hour, peak_min) if self.peak_wind_time > self.time: if peak_hour > self._hour: self.peak_wind_time -= datetime.timedelta(hours=24) else: self.peak_wind_time -= datetime.timedelta(hours=1) self._remarks.append("peak wind %dkt from %d degrees at %d:%02d" % \ (peak_speed, peak_dir, peak_hour, peak_min)) def _handleWindShiftRemark( self, d ): """ Parse a wind shift remark group. """ if d['hour']: wshft_hour = int(d['hour']) else: wshft_hour = self._hour wshft_min = int(d['min']) self.wind_shift_time = datetime.datetime(self._year, self._month, self._day, wshft_hour, wshft_min) if self.wind_shift_time > self.time: if wshft_hour > self._hour: self.wind_shift_time -= datetime.timedelta(hours=24) else: self.wind_shift_time -= datetime.timedelta(hours=1) text = "wind shift at %d:%02d" % (wshft_hour, wshft_min) if d['front']: text += " (front)" self._remarks.append(text) def _handleLightningRemark( self, d ): """ Parse a lightning observation remark group. """ parts = [] if d['freq']: parts.append(LIGHTNING_FREQUENCY[d['freq']]) parts.append("lightning") if d['type']: ltg_types = [] group = d['type'] while group: ltg_types.append(LIGHTNING_TYPE[group[:2]]) group = group[2:] parts.append("("+",".join(ltg_types)+")") if d['loc']: parts.append(xlate_loc(d['loc'])) self._remarks.append(" ".join(parts)) def _handleTSLocRemark( self, d ): """ Parse a thunderstorm location remark group. """ text = "thunderstorm" if d['loc']: text += " "+xlate_loc(d['loc']) if d['dir']: text += " moving %s" % d['dir'] self._remarks.append(text) def _handleAutoRemark( self, d ): """ Parse an automatic station remark group. """ if d['type'] == "1": self._remarks.append("Automated station") elif d['type'] == "2": self._remarks.append("Automated station (type 2)") def _unparsedRemark( self, d ): """ Handle otherwise unparseable remark groups. """ self._unparsed_remarks.append(d['group']) ## the list of handler functions to use (in order) to process a METAR report handlers = [ (TYPE_RE, _handleType, False), (STATION_RE, _handleStation, False), (TIME_RE, _handleTime, False), (MODIFIER_RE, _handleModifier, False), (WIND_RE, _handleWind, False), (VISIBILITY_RE, _handleVisibility, True), (RUNWAY_RE, _handleRunway, True), (WEATHER_RE, _handleWeather, True), (SKY_RE, _handleSky, True), (TEMP_RE, _handleTemp, False), (PRESS_RE, _handlePressure, True), (RECENT_RE,_handleRecent, True), (WINDSHEAR_RE, _handleWindShear, True), (COLOR_RE, _handleColor, True), (RUNWAYSTATE_RE, _handleRunwayState, True), (TREND_RE, _handleTrend, False), (REMARK_RE, _startRemarks, False) ] trend_handlers = [ (TRENDTIME_RE, _handleTrend, True), (WIND_RE, _handleTrend, True), (VISIBILITY_RE, _handleTrend, True), (WEATHER_RE, _handleTrend, True), (SKY_RE, _handleTrend, True), (COLOR_RE, _handleTrend, True)] ## the list of patterns for the various remark groups, ## paired with the handler functions to use to record the decoded remark. remark_handlers = [ (AUTO_RE, _handleAutoRemark), (SEALVL_PRESS_RE, _handleSealvlPressRemark), (PEAK_WIND_RE, _handlePeakWindRemark), (WIND_SHIFT_RE, _handleWindShiftRemark), (LIGHTNING_RE, _handleLightningRemark), (TS_LOC_RE, _handleTSLocRemark), (TEMP_1HR_RE, _handleTemp1hrRemark), (PRECIP_1HR_RE, _handlePrecip1hrRemark), (PRECIP_24HR_RE, _handlePrecip24hrRemark), (PRESS_3HR_RE, _handlePress3hrRemark), (TEMP_6HR_RE, _handleTemp6hrRemark), (TEMP_24HR_RE, _handleTemp24hrRemark), (UNPARSED_RE, _unparsedRemark) ] ## functions that return text representations of conditions for output def string( self ): """ Return a human-readable version of the decoded report. """ lines = [] lines.append("station: %s" % self.station_id) # if self.type: # lines.append("type: %s" % self.report_type()) if self.time: lines.append("time: %s" % self.time.ctime()) if self.temp: lines.append("temperature: %s" % self.temp.string("C")) if self.dewpt: lines.append("dew point: %s" % self.dewpt.string("C")) if self.wind_speed: lines.append("wind: %s" % self.wind()) if self.wind_speed_peak: lines.append("peak wind: %s" % self.peak_wind()) if self.wind_shift_time: lines.append("wind shift: %s" % self.wind_shift()) if self.vis: lines.append("visibility: %s" % self.visibility()) if self.runway: lines.append("visual range: %s" % self.runway_visual_range()) if self.press: lines.append("pressure: %s" % self.press.string("mb")) if self.weather: lines.append("weather: %s" % self.present_weather()) if self.sky: lines.append("sky: %s" % self.sky_conditions("\n ")) if self.press_sea_level: lines.append("sea-level pressure: %s" % self.press_sea_level.string("mb")) if self.max_temp_6hr: lines.append("6-hour max temp: %s" % str(self.max_temp_6hr)) if self.max_temp_6hr: lines.append("6-hour min temp: %s" % str(self.min_temp_6hr)) if self.max_temp_24hr: lines.append("24-hour max temp: %s" % str(self.max_temp_24hr)) if self.max_temp_24hr: lines.append("24-hour min temp: %s" % str(self.min_temp_24hr)) if self.precip_1hr: lines.append("1-hour precipitation: %s" % str(self.precip_1hr)) if self.precip_3hr: lines.append("3-hour precipitation: %s" % str(self.precip_3hr)) if self.precip_6hr: lines.append("6-hour precipitation: %s" % str(self.precip_6hr)) if self.precip_24hr: lines.append("24-hour precipitation: %s" % str(self.precip_24hr)) if self._remarks: lines.append("remarks:") lines.append("- "+self.remarks("\n- ")) if self._unparsed_remarks: lines.append("- "+' '.join(self._unparsed_remarks)) ## lines.append("METAR: "+self.code) return "\n".join(lines) def report_type( self ): """ Return a textual description of the report type. """ if self.type == None: text = "unknown report type" elif self.type in REPORT_TYPE: text = REPORT_TYPE[self.type] else: text = self.type+" report" if self.cycle: text += ", cycle %d" % self.cycle if self.mod: if self.mod in REPORT_TYPE: text += " (%s)" % REPORT_TYPE[self.mod] else: text += " (%s)" % self.mod return text def wind( self, units="KT" ): """ Return a textual description of the wind conditions. Units may be specified as "MPS", "KT", "KMH", or "MPH". """ if self.wind_speed == None: return "missing" elif self.wind_speed.value() == 0.0: text = "calm" else: wind_speed = self.wind_speed.string(units) if not self.wind_dir: text = "variable at %s" % wind_speed elif self.wind_dir_from: text = "%s to %s at %s" % \ (self.wind_dir_from.compass(), self.wind_dir_to.compass(), wind_speed) else: text = "%s at %s" % (self.wind_dir.compass(), wind_speed) if self.wind_gust: text += ", gusting to %s" % self.wind_gust.string(units) return text def peak_wind( self, units="KT" ): """ Return a textual description of the peak wind conditions. Units may be specified as "MPS", "KT", "KMH", or "MPH". """ if self.wind_speed_peak == None: return "missing" elif self.wind_speed_peak.value() == 0.0: text = "calm" else: wind_speed = self.wind_speed_peak.string(units) if not self.wind_dir_peak: text = wind_speed else: text = "%s at %s" % (self.wind_dir_peak.compass(), wind_speed) if not self.peak_wind_time == None: text += " at %s" % self.peak_wind_time.strftime('%H:%M') return text def wind_shift( self, units="KT" ): """ Return a textual description of the wind shift time Units may be specified as "MPS", "KT", "KMH", or "MPH". """ if self.wind_shift_time == None: return "missing" else: return self.wind_shift_time.strftime('%H:%M') def visibility( self, units=None ): """ Return a textual description of the visibility. Units may be statute miles ("SM") or meters ("M"). """ if self.vis == None: return "missing" if self.vis_dir: text = "%s to %s" % (self.vis.string(units), self.vis_dir.compass()) else: text = self.vis.string(units) if self.max_vis: if self.max_vis_dir: text += "; %s to %s" % (self.max_vis.string(units), self.max_vis_dir.compass()) else: text += "; %s" % self.max_vis.string(units) return text def runway_visual_range( self, units=None ): """ Return a textual description of the runway visual range. """ lines = [] for name,low,high in self.runway: if low != high: lines.append("on runway %s, from %d to %s" % (name, low.value(units), high.string(units))) else: lines.append("on runway %s, %s" % (name, low.string(units))) return "; ".join(lines) def present_weather( self ): """ Return a textual description of the present weather. """ return self._weather( self.weather ) def recent_weather( self ): """ Return a textual description of the recent weather. """ return self._weather( self.recent ) def _weather( self, weather ): """ Return a textual description of weather. """ text_list = [] for weatheri in weather: (inteni,desci,preci,obsci,otheri) = weatheri text_parts = [] code_parts = [] if inteni: code_parts.append(inteni) text_parts.append(WEATHER_INT[inteni]) if desci: code_parts.append(desci) if desci != "SH" or not preci: text_parts.append(WEATHER_DESC[desci[0:2]]) if len(desci) == 4: text_parts.append(WEATHER_DESC[desci[2:]]) if preci: code_parts.append(preci) if len(preci) == 2: precip_text = WEATHER_PREC[preci] elif len(preci) == 4: precip_text = WEATHER_PREC[preci[:2]]+" and " precip_text += WEATHER_PREC[preci[2:]] elif len(preci) == 6: precip_text = WEATHER_PREC[preci[:2]]+", " precip_text += WEATHER_PREC[preci[2:4]]+" and " precip_text += WEATHER_PREC[preci[4:]] if desci == "TS": text_parts.append("with") text_parts.append(precip_text) if desci == "SH": text_parts.append(WEATHER_DESC[desci]) if obsci: code_parts.append(obsci) text_parts.append(WEATHER_OBSC[obsci]) if otheri: code_parts.append(otheri) text_parts.append(WEATHER_OTHER[otheri]) code = " ".join(code_parts) if code in WEATHER_SPECIAL: text_list.append(WEATHER_SPECIAL[code]) else: text_list.append(" ".join(text_parts)) return "; ".join(text_list) def sky_conditions( self, sep="; " ): """ Return a textual description of the sky conditions. """ text_list = [] for skyi in self.sky: (cover,height,cloud) = skyi if cover in ["SKC", "CLR", "NSC"]: text_list.append(SKY_COVER[cover]) else: if cloud: what = CLOUD_TYPE[cloud] elif SKY_COVER[cover].endswith(" "): what = "clouds" else: what = "" if cover == "VV": text_list.append("%s%s, vertical visibility to %s" % (SKY_COVER[cover],what,str(height))) else: text_list.append("%s%s at %s" % (SKY_COVER[cover],what,str(height))) return sep.join(text_list) def trend( self ): """ Return the trend forecast groups """ return " ".join(self._trend_groups) def remarks( self, sep="; "): """ Return the decoded remarks. """ return sep.join(self._remarks)
Saugeen Valley Conservation Authority is a non-for-profit organization, established in 1950 under the Province's Conservation Authorities Act, to further the conservation, restoration, development and management of renewable resources in partnership with its 15 member municipalities. The office of Saugeen Conservation is located at the south end of the Hamlet of Formosa. Saugeen Conservation is governed by its Authority Members who are 15 individuals representing the municipalities within the Saugeen watershed jurisdiction. Approximately eight meetings are held on an annual basis. (see below for meeting schedule) and these meetings are open to the public. Meetings of the Executive Committee are held on an 'as required' basis, depending on the urgency and importance of various issues and developments as they relate to the business of the Authority. Administrative office hours are from 8:30am to 4:30pm., Monday to Friday. 1078 Bruce Rd. #12, P.O. Box 150, Formosa, ON N0G 1W0.
""" Matplotlib Animation Example author: Jake Vanderplas email: vanderplas@astro.washington.edu website: http://jakevdp.github.com license: BSD Please feel free to use and modify this, but keep the above information. Thanks! """ import numpy as np from matplotlib import pyplot as plt from matplotlib import animation import matplotlib import random import sys import csv sys.path.append('../../../') from util import getParetoFront filename = sys.argv[1] data = [] for i in range(1, int(sys.argv[2])): tmp_filename = filename + str(i) + ".csv" csvfile = open(tmp_filename, 'r') spamreader = csv.reader(csvfile, delimiter=',') tmp_data = [] for row in spamreader: tmp_data.append(row) # tmp_data.append([float(row[0]), float(row[1]), float(row[2])]) data.append(tmp_data) data = np.array(data) up = 2 low = 0 # First set up the figure, the axis, and the plot element we want to animate fig = plt.figure() ax = fig.add_subplot(111) ax.set_ylim([0.4,0.8]) ax.set_xlim([0.65,1]) ax.set_xlabel('Efficiency') ax.set_ylabel('Effort') ax.set_title('Pareto Optimal Front Estimation with ORCA') # ax = plt.axes(xlim=(low, up), ylim=(low, up)) # ax = plt.axes(xlim=(0.9, 1.0), ylim=(0, 1)) scat1 = ax.scatter([3], [4], c="b") line, = ax.plot([], [], lw=2, c='g') # initialization function: plot the background of each frame def init(): print "paths" # print scat.get_paths() # sys.exit() # scat.set_paths(matplotlib.path.Path([[2, 3]])) return scat1, line # animation function. This is called sequentially def animate(i): # scat1.set_paths(np.array(tmp_data)) # scat.set_paths([x, y]) # print scat.get_offsets() tmp_data=np.array(data[i], dtype='d') # print tmp_data[:, 1:3] # scat1.set_paths(tmp_data) scat1.set_offsets(tmp_data) fitnesses = tmp_data[:,0:2] parameters = tmp_data[:,2:] # print tmp_data # print fitnesses # print parameters front = getParetoFront(fitnesses, parameters) line.set_data(front[:,0], front[:,1]) return scat1, line # call the animator. blit=True means only re-draw the parts that have changed. anim = animation.FuncAnimation(fig, animate, init_func=init, frames=200, interval=100, blit=True) # save the animation as an mp4. This requires ffmpeg or mencoder to be # installed. The extra_args ensure that the x264 codec is used, so that # the video can be embedded in html5. You may need to adjust this for # your system: for more information, see # http://matplotlib.sourceforge.net/api/animation_api.html anim.save('basic_animation.mp4', fps=10, extra_args=['-vcodec', 'libx264']) plt.show()
* In addition to the stated delivery time outside Europe, you should also calculate up to 2 weeks for customs clearance. Swimrunshop.com has no influence on processing time, and we are not able to contact customs in your country. We aim to deliver your order as fast as possible. In case your ordered items are temporarily out of stock, they will be delivered within 10-14 days. Your delivery will be sent as an package with Post Denmark or GLS. If you wish to have your order delivered to your work please fill in your company address under ‘Ship to a different address’. Please remember both your company name and your own name.
#!/usr/bin/env python # -*- coding: UTF-8 -*- from models.anki import AnkiModel from models.kanji import Kanji from models.kanji_word import KanjiWord from utf8_helper import force_UTF8 import kana import settings import argparse import sys from collections import Counter def parse(args=None): parser = argparse.ArgumentParser(description='Returns a random word') parser.add_argument( '-n', '--num', dest='count', action='store', type=int, default=10, help='The number of words to display' ) out = parser.parse_args(args) return out if __name__ == '__main__': force_UTF8() args = parse() # Find all the kanji that are in the deck all_kanji = set() for word in KanjiWord.all(): for kanji in word.kanji: all_kanji.add(kanji) for kanji in Kanji.all(): all_kanji.add(kanji) # Count which kanji the input data has data = Counter(unicode(sys.stdin.read())) for char, count in data.most_common(): # we don't want kana if kana.is_kana(char): del data[char] # Nor do we want kanji we know if char in all_kanji: del data[char] # Nor any non-kanji chars if not kana.is_kanji(char): del data[char] for char, count in data.most_common(args.count): print char, count
Digital vs. Print: Different, Not Worse? Could the Same Genes Shape Math Skills As Reading Ability? Countdown to Your First Year: What Are Students Supposed to Learn? MzTeachuh: Need A Dose Of Walt Whitman? Need A Dose Of Walt Whitman? There is something so appealing about Walt Whitman; the poet willing to publish his own radiating, pulsating poetry in the middle of Victorianism. He chose to go into the Civil War hospitals, nursing the mutilated soldiers. Walt caught the essence of Christian-good-works America without the Puritan judgment and hostility. Walt did not recognized class distinctions. He honored the folks who actually built America. I like the whoopee; maybe because I'm from the West; marinated in creative California. Optimism lives. Amazement lives. Joy lives. O amazement of things--even the least particle! Kick your shoes off, relax, it's been a long day. Give your left brain time off. This is a very smooth relaxing tune performed Azymuth. "Voo Sobre O Horizonte (Fly Over The Horizon)" Can strong parental bond protect infants down to their DNA? Are Learning Disabilities Holding You Back? Those lines are awfully straight. Like classroom rules. These colors vivid, bright. Like kids' ideas. That arrangement is logical, but with surprises. Like a class discussion with student input. This medium is controlled by the artist. Like a teacher with a lesson plan. Talk about the perfection of order and creativity: our friend, Mozart. The artists are Aleksey Igudesman and Hyung-ki Joo. Their Show Igudesman & Joo includes various and hilarious music by Mozart, Rach­mani­nov, Bach, Vivaldi, Strauss, Beethoven and their own. The two classical musicians kibitzing about the playing of this music is so great! Which Universities Are Effectively Using Social Media? They Can Text & Tweet. Can They Speak? To have the wish I wish tonight. Music for when your wish comes true.
#Embedded file name: ext/toolchain/commands1.py import sys, os, ConfigParser, shutil, re, ftputil, zipfile, glob, commands from generators import VisualStudioGenerator, EclipseGenerator, XcodeGenerator, MakefilesGenerator from getopt import gnu_getopt if sys.version_info >= (2, 4): import subprocess class Toolchain(): requiredMajor = 2 requiredMinor = 6 globalOptions = 'v' globalOptionsLong = ['no-prompts', 'verbose', 'skip-gui', 'skip-core'] cmd_opt_dict = {'about': ['', []], 'setup': ['g:', ['generator=']], 'configure': ['g:dr', ['generator=', 'debug', 'release', 'mac-sdk=', 'mac-identity=']], 'build': ['dr', ['debug', 'release']], 'clean': ['dr', ['debug', 'release']], 'update': ['', []], 'install': ['', []], 'doxygen': ['', []], 'dist': ['', ['vcredist-dir=', 'qt-dir=']], 'distftp': ['', ['host=', 'user=', 'pass=', 'dir=']], 'kill': ['', []], 'usage': ['', []], 'revision': ['', []], 'reformat': ['', []], 'open': ['', []], 'genlist': ['', []], 'reset': ['', []], 'signwin': ['', ['pfx=', 'pwd=', 'dist']], 'signmac': ['', []]} cmd_alias_dict = {'info': 'about', 'help': 'usage', 'package': 'dist', 'docs': 'doxygen', 'make': 'build', 'cmake': 'configure'} def complete_command(self, arg): completions = [] for cmd, optarg in self.cmd_opt_dict.iteritems(): if cmd == arg: return [cmd] if cmd.startswith(arg): completions.append(cmd) for alias, cmd in self.cmd_alias_dict.iteritems(): if alias == arg: return [alias] if alias.startswith(arg): completions.append(alias) return completions def start_cmd(self, argv): cmd_arg = '' if len(argv) > 1: cmd_arg = argv[1] if cmd_arg in ('--help', '-h', '--usage', '-u', '/?'): cmd_arg = 'usage' completions = self.complete_command(cmd_arg) if cmd_arg and len(completions) > 0: if len(completions) == 1: cmd = completions[0] cmd_map = list() if cmd_arg != cmd: cmd_map.append(cmd_arg) cmd_map.append(cmd) if cmd in self.cmd_alias_dict.keys(): alias = cmd if cmd_arg == cmd: cmd_map.append(alias) cmd = self.cmd_alias_dict[cmd] cmd_map.append(cmd) if len(cmd_map) != 0: print 'Mapping command: %s' % ' -> '.join(cmd_map) self.run_cmd(cmd, argv[2:]) return 0 print 'Command `%s` too ambiguous, could mean any of: %s' % (cmd_arg, ', '.join(completions)) else: if len(argv) == 1: print 'No command specified, showing usage.\n' else: print 'Command not recognised: %s\n' % cmd_arg self.run_cmd('usage') return 1 def run_cmd(self, cmd, argv = []): verbose = False try: options_pair = self.cmd_opt_dict[cmd] options = self.globalOptions + options_pair[0] options_long = [] options_long.extend(self.globalOptionsLong) options_long.extend(options_pair[1]) opts, args = gnu_getopt(argv, options, options_long) for o, a in opts: if o in ('-v', '--verbose'): verbose = True handler = CommandHandler(argv, opts, args, verbose) cmd_func = getattr(handler, cmd) cmd_func() except: if not verbose: sys.stderr.write('Error: ' + sys.exc_info()[1].__str__() + '\n') sys.exit(1) else: raise def run(self, argv): if sys.version_info < (self.requiredMajor, self.requiredMinor): print 'Python version must be at least ' + str(self.requiredMajor) + '.' + str(self.requiredMinor) + ', but is ' + str(sys.version_info[0]) + '.' + str(sys.version_info[1]) sys.exit(1) try: self.start_cmd(argv) except KeyboardInterrupt: print '\n\nUser aborted, exiting.' class InternalCommands(): project = 'synergy' setup_version = 5 website_url = 'http://synergy-project.org/' this_cmd = 'hm' cmake_cmd = 'cmake' qmake_cmd = 'qmake' make_cmd = 'make' xcodebuild_cmd = 'xcodebuild' w32_make_cmd = 'mingw32-make' w32_qt_version = '4.6.2' defaultTarget = 'release' cmake_dir = 'res' gui_dir = 'src/gui' doc_dir = 'doc' extDir = 'ext' sln_filename = '%s.sln' % project xcodeproj_filename = '%s.xcodeproj' % project configDir = 'build' configFilename = '%s/%s.cfg' % (configDir, this_cmd) qtpro_filename = 'gui.pro' doxygen_filename = 'doxygen.cfg' cmake_url = 'http://www.cmake.org/cmake/resources/software.html' prevdir = '' generator_id = None no_prompts = False enableMakeCore = True enableMakeGui = True macSdk = None macIdentity = None gtestDir = 'gtest-1.6.0' gmockDir = 'gmock-1.6.0' win32_generators = {1: VisualStudioGenerator('10'), 2: VisualStudioGenerator('10 Win64'), 3: VisualStudioGenerator('9 2008'), 4: VisualStudioGenerator('9 2008 Win64'), 5: VisualStudioGenerator('8 2005'), 6: VisualStudioGenerator('8 2005 Win64')} unix_generators = {1: MakefilesGenerator(), 2: EclipseGenerator()} darwin_generators = {1: MakefilesGenerator(), 2: XcodeGenerator(), 3: EclipseGenerator()} def getBuildDir(self, target = ''): return self.getGenerator().getBuildDir(target) def getBinDir(self, target = ''): return self.getGenerator().getBinDir(target) def sln_filepath(self): return '%s\\%s' % (self.getBuildDir(), self.sln_filename) def xcodeproj_filepath(self, target = ''): return '%s/%s' % (self.getBuildDir(target), self.xcodeproj_filename) def usage(self): app = sys.argv[0] print 'Usage: %s <command> [-g <index>|-v|--no-prompts|<command-options>]\n\nReplace [command] with one of:\n about Show information about this script\n setup Runs the initial setup for this script\n conf Runs cmake (generates project files)\n open Attempts to open the generated project file\n build Builds using the platform build chain\n clean Cleans using the platform build chain\n kill Kills all synergy processes (run as admin)\n update Updates the source code from repository\n revision Display the current source code revision\n package Create a distribution package (e.g. tar.gz)\n install Installs the program\n doxygen Builds doxygen documentation\n reformat Reformat .cpp and .h files using AStyle\n genlist Shows the list of available platform generators\n usage Shows the help screen\n\nExample: %s build -g 3' % (app, app) def configureAll(self, targets, extraArgs = ''): if len(targets) == 0: targets += [self.defaultTarget] for target in targets: self.configure(target) def checkGTest(self): dir = self.extDir + '/' + self.gtestDir if os.path.isdir(dir): return zipFilename = dir + '.zip' if not os.path.exists(zipFilename): raise Exception('GTest zip not found at: ' + zipFilename) if not os.path.exists(dir): os.mkdir(dir) zip = zipfile.ZipFile(zipFilename) self.zipExtractAll(zip, dir) def checkGMock(self): dir = self.extDir + '/' + self.gmockDir if os.path.isdir(dir): return zipFilename = dir + '.zip' if not os.path.exists(zipFilename): raise Exception('GMock zip not found at: ' + zipFilename) if not os.path.exists(dir): os.mkdir(dir) zip = zipfile.ZipFile(zipFilename) self.zipExtractAll(zip, dir) def zipExtractAll(self, z, dir): if not dir.endswith('/'): dir += '/' for f in z.namelist(): if f.endswith('/'): os.makedirs(dir + f) else: z.extract(f, dir) def configure(self, target = '', extraArgs = ''): self.ensure_setup_latest() if sys.platform == 'darwin': config = self.getConfig() if self.macSdk: config.set('hm', 'macSdk', self.macSdk) elif config.has_option('hm', 'macSdk'): self.macSdk = config.get('hm', 'macSdk') if self.macIdentity: config.set('hm', 'macIdentity', self.macIdentity) elif config.has_option('hm', 'macIdentity'): self.macIdentity = config.get('hm', 'macIdentity') self.write_config(config) if not self.macSdk: raise Exception('Arg missing: --mac-sdk <version>') if not self.macIdentity: raise Exception('Arg missing: --mac-identity <name>') sdkDir = self.getMacSdkDir() if not os.path.exists(sdkDir): raise Exception('Mac SDK not found at: ' + sdkDir) os.environ['MACOSX_DEPLOYMENT_TARGET'] = self.macSdk if target == '': print 'Defaulting target to: ' + self.defaultTarget target = self.defaultTarget if self.enableMakeCore: self.configureCore(target, extraArgs) if self.enableMakeGui: self.configureGui(target, extraArgs) self.setConfRun(target) def configureCore(self, target = '', extraArgs = ''): _cmake_cmd = self.persist_cmake() generator = self.getGenerator() if generator != self.findGeneratorFromConfig(): print 'Generator changed, running setup.' self.setup(target) cmake_args = '' #if generator.cmakeName != '': # cmake_args += ' -DCMAKE_TOOLCHAIN_FILE=/Users/diego/hghome/ios-cmake/toolchain/iOS.cmake' print generator.cmakeName if generator.cmakeName.find('Unix Makefiles') != -1: print "UNIX MAKEFILES!" cmake_args += ' -DCMAKE_BUILD_TYPE=' + target.capitalize() if sys.platform == 'darwin': macSdkMatch = re.match('(\\d+)\\.(\\d+)', self.macSdk) if not macSdkMatch: raise Exception('unknown osx version: ' + self.macSdk) sdkDir = self.getMacSdkDir() sysroot = "/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS9.2.sdk" cmake_args += ' -DCMAKE_OSX_ARCHITECTURE="armv7s"' cmake_args += ' -DCMAKE_OSX_ARCHITECTURES="armv7s"' cmake_args += ' -DCMAKE_TRY_COMPILE_OSX_ARCHITECTURES=""' cmake_args += ' -DCMAKE_OSX_DEPLOYMENT_TARGET=""' cmake_args += ' -DCMAKE_BUILD_TYPE=debug' cmake_args += ' -DCMAKE_OSX_SYSROOT=%s' % ("/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS9.2.sdk",) cmake_args += ' -DCMAKE_CXX_FLAGS="-g -arch armv7 -arch armv7s -arch arm64 -isysroot %s -DHAVE_INET_ATON -DHAVE_POSIX_SIGWAIT -DHAVE_SYS_SOCKET_H -DTYPE_OF_SIZE_1=char -DHAVE_SYS_TYPES_H -DHAVE_PTHREAD -I/Users/diego/tarballs/iOScURL/iOScURL/"' % (sysroot, ) cmake_args += ' -DCMAKE_C_FLAGS="-g -arch armv7 -arch armv7s -arch arm64 -isysroot %s -DHAVE_INET_ATON -DHAVE_POSIX_SIGWAIT -DTYPE_OF_SIZE_1=char -DHAVE_SYS_SOCKET_H -DHAVE_SYS_TYPES_H -DHAVE_PTHREAD -I/Users/diego/tarballs/iOScURL/iOScURL/"' % (sysroot, ) sourceDir = generator.getSourceDir() self.checkGTest() self.checkGMock() if extraArgs != '': cmake_args += ' ' + extraArgs cmake_cmd_string = _cmake_cmd + cmake_args + ' ' + sourceDir self.try_chdir(self.getBuildDir(target)) print 'CMake command: ' + cmake_cmd_string err = os.system(cmake_cmd_string) self.restore_chdir() if generator.cmakeName.find('Eclipse') != -1: self.fixCmakeEclipseBug() if err != 0: raise Exception('CMake encountered error: ' + str(err)) def configureGui(self, target = '', extraArgs = ''): self.persist_qmake() qmake_cmd_string = self.qmake_cmd + ' ' + self.qtpro_filename + ' -r' if sys.platform == 'darwin': qmake_cmd_string += ' -spec macx-g++' major, minor = self.getMacVersion() if major == 10 and minor <= 4: qmake_cmd_string += ' CONFIG+="ppc i386"' libs = '-framework ApplicationServices -framework Security -framework cocoa' if major == 10 and minor >= 6: libs += ' -framework ServiceManagement' qmake_cmd_string += ' "MACX_LIBS=%s" ' % libs sdkDir = self.getMacSdkDir() shortForm = 'macosx' + self.macSdk version = str(major) + '.' + str(minor) qmake_cmd_string += ' QMAKE_MACOSX_DEPLOYMENT_TARGET=' + version qMajor, qMinor, qRev = self.getQmakeVersion() if qMajor <= 4: qmake_cmd_string += ' QMAKE_MAC_SDK=' + sdkDir else: qmake_cmd_string += ' QMAKE_MAC_SDK=' + shortForm qmake_cmd_string += ' QMAKE_MAC_SDK.' + shortForm + '.path=' + sdkDir print 'QMake command: ' + qmake_cmd_string self.try_chdir(self.gui_dir) err = os.system(qmake_cmd_string) self.restore_chdir() if err != 0: raise Exception('QMake encountered error: ' + str(err)) def getQmakeVersion(self): version = commands.getoutput('qmake --version') result = re.search('(\\d+)\\.(\\d+)\\.(\\d)', version) if not result: raise Exception('Could not get qmake version.') major = int(result.group(1)) minor = int(result.group(2)) rev = int(result.group(3)) return (major, minor, rev) def getMacSdkDir(self): sdkName = 'macosx' + self.macSdk status, sdkPath = commands.getstatusoutput('xcrun --show-sdk-path --sdk ' + sdkName) if status == 0 and sdkPath: return sdkPath developerDir = os.getenv('DEVELOPER_DIR') if not developerDir: developerDir = '/Applications/Xcode.app/Contents/Developer' sdkDirName = sdkName.replace('macosx', 'MacOSX') sdkPath = developerDir + '/Platforms/MacOSX.platform/Developer/SDKs/' + sdkDirName + '.sdk' sdkPath = '/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS8.4.sdk' if os.path.exists(sdkPath): return sdkPath return '/Developer/SDKs/' + sdkDirName + '.sdk' def fixCmakeEclipseBug(self): print 'Fixing CMake Eclipse bugs...' file = open('.project', 'r+') content = file.read() pattern = re.compile('\\s+<linkedResources>.+</linkedResources>', re.S) content = pattern.sub('', content) file.seek(0) file.write(content) file.truncate() file.close() def persist_cmake(self): err = os.system('%s --version' % self.cmake_cmd) if err != 0: print 'Could not find `%s` in system path.\nDownload the latest version from:\n %s' % (self.cmake_cmd, self.cmake_url) raise Exception('Cannot continue without CMake.') else: return self.cmake_cmd def persist_qt(self): self.persist_qmake() def persist_qmake(self): if sys.version_info < (2, 4): return try: p = subprocess.Popen([self.qmake_cmd, '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) except: print >> sys.stderr, 'Error: Could not find qmake.' if sys.platform == 'win32': print 'Suggestions:\n1. Ensure that qmake.exe exists in your system path.\n2. Try to download Qt (check our dev FAQ for links):\n qt-sdk-win-opensource-2010.02.exe' raise Exception('Cannot continue without qmake.') stdout, stderr = p.communicate() if p.returncode != 0: raise Exception('Could not test for cmake: %s' % stderr) else: m = re.search('.*Using Qt version (\\d+\\.\\d+\\.\\d+).*', stdout) if m: if sys.platform == 'win32': ver = m.group(1) if ver != self.w32_qt_version: print >> sys.stderr, 'Warning: Not using supported Qt version %s (your version is %s).' % (self.w32_qt_version, ver) else: raise Exception('Could not find qmake version.') def ensureConfHasRun(self, target, skipConfig): if self.hasConfRun(target): print 'Skipping config for target: ' + target skipConfig = True if not skipConfig: self.configure(target) def build(self, targets = [], skipConfig = False): if len(targets) == 0: targets += [self.defaultTarget] self.ensure_setup_latest() self.loadConfig() if self.enableMakeCore: self.makeCore(targets) if self.enableMakeGui: self.makeGui(targets) def loadConfig(self): config = self.getConfig() if config.has_option('hm', 'macSdk'): self.macSdk = config.get('hm', 'macSdk') if config.has_option('hm', 'macIdentity'): self.macIdentity = config.get('hm', 'macIdentity') def makeCore(self, targets): generator = self.getGeneratorFromConfig().cmakeName if self.macSdk: os.environ['MACOSX_DEPLOYMENT_TARGET'] = self.macSdk if generator.find('Unix Makefiles') != -1: for target in targets: self.runBuildCommand(self.make_cmd, target) else: for target in targets: if generator.startswith('Visual Studio'): self.run_vcbuild(generator, target, self.sln_filepath()) elif generator == 'Xcode': cmd = self.xcodebuild_cmd + ' -configuration ' + target.capitalize() self.runBuildCommand(cmd, target) else: raise Exception('Build command not supported with generator: ' + generator) def makeGui(self, targets, args = ''): for target in targets: if sys.platform == 'win32': gui_make_cmd = self.w32_make_cmd + ' ' + target + args print 'Make GUI command: ' + gui_make_cmd self.try_chdir(self.gui_dir) err = os.system(gui_make_cmd) self.restore_chdir() if err != 0: raise Exception(gui_make_cmd + ' failed with error: ' + str(err)) elif sys.platform in ('linux2', 'sunos5', 'freebsd7', 'darwin'): gui_make_cmd = self.make_cmd + ' -w' + args print 'Make GUI command: ' + gui_make_cmd targetDir = self.getGenerator().getBinDir(target) bundleTargetDir = targetDir + '/Synergy.app' if os.path.exists(bundleTargetDir): shutil.rmtree(bundleTargetDir) binDir = self.getGenerator().binDir bundleTempDir = binDir + '/Synergy.app' if os.path.exists(bundleTempDir): shutil.rmtree(bundleTempDir) self.try_chdir(self.gui_dir) err = os.system(gui_make_cmd) self.restore_chdir() if err != 0: raise Exception(gui_make_cmd + ' failed with error: ' + str(err)) if sys.platform == 'darwin' and 'clean' not in args: self.macPostGuiMake(target) self.fixQtFrameworksLayout(target) else: raise Exception('Unsupported platform: ' + sys.platform) def macPostGuiMake(self, target): bundle = 'Synergy.app' binDir = self.getGenerator().binDir targetDir = self.getGenerator().getBinDir(target) bundleTempDir = binDir + '/' + bundle bundleTargetDir = targetDir + '/' + bundle if os.path.exists(bundleTempDir): shutil.move(bundleTempDir, bundleTargetDir) if self.enableMakeCore: bundleBinDir = bundleTargetDir + '/Contents/MacOS/' shutil.copy(targetDir + '/synergyc', bundleBinDir) shutil.copy(targetDir + '/synergys', bundleBinDir) shutil.copy(targetDir + '/syntool', bundleBinDir) bundlePluginDir = bundleBinDir + 'plugins' pluginDir = targetDir + '/plugins' print 'Copying plugins dirtree: ' + pluginDir if os.path.isdir(pluginDir): print 'Copying to: ' + bundlePluginDir shutil.copytree(pluginDir, bundlePluginDir) else: print "pluginDir doesn't exist, skipping" self.loadConfig() if not self.macIdentity: raise Exception('run config with --mac-identity') if self.enableMakeGui: bin = 'macdeployqt Synergy.app -verbose=2' self.try_chdir(targetDir) err = os.system(bin) self.restore_chdir() print bundleTargetDir if err != 0: raise Exception(bin + ' failed with error: ' + str(err)) qMajor, qMinor, qRev = self.getQmakeVersion() if qMajor <= 4: frameworkRootDir = '/Library/Frameworks' else: frameworkRootDir = '/Developer/Qt5.2.1/5.2.1/clang_64/lib' frameworkRootDir = '/usr/local/Cellar/qt/4.8.6/Frameworks' target = bundleTargetDir + '/Contents/Frameworks' for root, dirs, files in os.walk(target): for dir in dirs: if dir.startswith('Qt'): shutil.copy(frameworkRootDir + '/' + dir + '/Contents/Info.plist', target + '/' + dir + '/Resources/') def symlink(self, source, target): if not os.path.exists(target): os.symlink(source, target) def move(self, source, target): if os.path.exists(source): shutil.move(source, target) def fixQtFrameworksLayout(self, target): targetDir = self.getGenerator().getBinDir(target) target = targetDir + '/Synergy.app/Contents/Frameworks' major, minor = self.getMacVersion() if major == 10: if minor >= 9: for root, dirs, files in os.walk(target): for dir in dirs: if dir.startswith('Qt'): self.try_chdir(target + '/' + dir + '/Versions') self.symlink('5', 'Current') self.move('../Resources', '5') self.restore_chdir() self.try_chdir(target + '/' + dir) dot = dir.find('.') frameworkName = dir[:dot] self.symlink('Versions/Current/' + frameworkName, frameworkName) self.symlink('Versions/Current/Resources', 'Resources') self.restore_chdir() def signmac(self): self.loadConfig() if not self.macIdentity: raise Exception('run config with --mac-identity') self.try_chdir('bin/Release/') err = os.system('codesign --deep -fs "' + self.macIdentity + '" Synergy.app') self.restore_chdir() if err != 0: raise Exception('codesign failed with error: ' + str(err)) def signwin(self, pfx, pwdFile, dist): generator = self.getGeneratorFromConfig().cmakeName if not generator.startswith('Visual Studio'): raise Exception('only windows is supported') f = open(pwdFile) lines = f.readlines() f.close() pwd = lines[0] if dist: self.signFile(pfx, pwd, 'bin/Release', self.getDistFilename('win')) else: self.signFile(pfx, pwd, 'bin/Release', 'synergy.exe') self.signFile(pfx, pwd, 'bin/Release', 'synergyc.exe') self.signFile(pfx, pwd, 'bin/Release', 'synergys.exe') self.signFile(pfx, pwd, 'bin/Release', 'synergyd.exe') self.signFile(pfx, pwd, 'bin/Release', 'syntool.exe') self.signFile(pfx, pwd, 'bin/Release', 'synwinhk.dll') def signFile(self, pfx, pwd, dir, file): self.try_chdir(dir) err = os.system('signtool sign /f ' + pfx + ' /p ' + pwd + ' /t http://timestamp.verisign.com/scripts/timstamp.dll ' + file) self.restore_chdir() if err != 0: raise Exception('signtool failed with error: ' + str(err)) def runBuildCommand(self, cmd, target): print 'Running: %s %s' % (cmd, target) self.try_chdir(self.getBuildDir(target)) err = os.system(cmd) self.restore_chdir() if err != 0: raise Exception(cmd + ' failed: ' + str(err)) def clean(self, targets = []): if len(targets) == 0: targets += [self.defaultTarget] if self.enableMakeCore: self.cleanCore(targets) if self.enableMakeGui: self.cleanGui(targets) def cleanCore(self, targets): generator = self.getGeneratorFromConfig().cmakeName if generator.startswith('Visual Studio'): if generator.startswith('Visual Studio 10'): for target in targets: self.run_vcbuild(generator, target, self.sln_filepath(), '/target:clean') elif generator.startswith('Visual Studio'): for target in targets: self.run_vcbuild(generator, target, self.sln_filepath(), '/clean') else: cmd = '' if generator == 'Unix Makefiles': print 'Cleaning with GNU Make...' cmd = self.make_cmd elif generator == 'Xcode': print 'Cleaning with Xcode...' cmd = self.xcodebuild_cmd else: raise Exception('Not supported with generator: ' + generator) for target in targets: self.try_chdir(self.getBuildDir(target)) err = os.system(cmd + ' clean') self.restore_chdir() if err != 0: raise Exception('Clean failed: ' + str(err)) def cleanGui(self, targets): self.makeGui(targets, ' clean') def open(self): generator = self.getGeneratorFromConfig().cmakeName if generator.startswith('Visual Studio'): print 'Opening with %s...' % generator self.open_internal(self.sln_filepath()) elif generator.startswith('Xcode'): print 'Opening with %s...' % generator self.open_internal(self.xcodeproj_filepath(), 'open') else: raise Exception('Not supported with generator: ' + generator) def update(self): print 'Running Subversion update...' err = os.system('svn update') if err != 0: raise Exception('Could not update from repository with error code code: ' + str(err)) def revision(self): print self.find_revision() def find_revision(self): return self.getGitRevision() def getGitRevision(self): if sys.version_info < (2, 4): raise Exception('Python 2.4 or greater required.') p = subprocess.Popen(['git', 'log', '--pretty=format:%h', '-n', '1'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode != 0: raise Exception('Could not get revision, git error: ' + str(p.returncode)) return stdout.strip() def getGitBranchName(self): if sys.version_info < (2, 4): raise Exception('Python 2.4 or greater required.') p = subprocess.Popen(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode != 0: raise Exception('Could not get branch name, git error: ' + str(p.returncode)) result = stdout.strip() result = re.sub('heads/', '', result) return result def find_revision_svn(self): if sys.version_info < (2, 4): stdout = commands.getoutput('svn info') else: p = subprocess.Popen(['svn', 'info'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() if p.returncode != 0: raise Exception('Could not get revision - svn info failed with code: ' + str(p.returncode)) m = re.search('.*Revision: (\\d+).*', stdout) if not m: raise Exception('Could not find revision number in svn info output.') return m.group(1) def kill(self): if sys.platform == 'win32': return os.system('taskkill /F /FI "IMAGENAME eq synergy*"') raise Exception('Not implemented for platform: ' + sys.platform) def doxygen(self): self.enableMakeGui = False self.configure(self.defaultTarget, '-DCONF_DOXYGEN:BOOL=TRUE') err = os.system('doxygen %s/%s' % (self.doc_dir, self.doxygen_filename)) if err != 0: raise Exception('doxygen failed with error code: ' + str(err)) def dist(self, type, vcRedistDir, qtDir): package_unsupported = False unixTarget = self.defaultTarget if type == '' or type == None: self.dist_usage() return moveExt = '' if type == 'src': self.distSrc() elif type == 'rpm': if sys.platform == 'linux2': self.distRpm() else: package_unsupported = True elif type == 'deb': if sys.platform == 'linux2': self.distDeb() else: package_unsupported = True elif type == 'win': if sys.platform == 'win32': self.distWix() else: package_unsupported = True elif type == 'mac': if sys.platform == 'darwin': self.distMac() else: package_unsupported = True else: raise Exception('Package type not supported: ' + type) if moveExt != '': self.unixMove(self.getGenerator().buildDir + '/release/*.' + moveExt, self.getGenerator().binDir) if package_unsupported: raise Exception("Package type, '%s' is not supported for platform, '%s'" % (type, sys.platform)) def distRpm(self): rpmDir = self.getGenerator().buildDir + '/rpm' if os.path.exists(rpmDir): shutil.rmtree(rpmDir) os.makedirs(rpmDir) templateFile = open(self.cmake_dir + '/synergy.spec.in') template = templateFile.read() template = template.replace('${in:version}', self.getVersionNumber()) specPath = rpmDir + '/synergy.spec' specFile = open(specPath, 'w') specFile.write(template) specFile.close() target = '../../bin/synergy-%s-%s.rpm' % (self.getVersionForFilename(), self.getLinuxPlatform()) try: self.try_chdir(rpmDir) cmd = 'rpmbuild -bb --define "_topdir `pwd`" synergy.spec' print 'Command: ' + cmd err = os.system(cmd) if err != 0: raise Exception('rpmbuild failed: ' + str(err)) self.unixMove('RPMS/*/*.rpm', target) cmd = 'rpmlint ' + target print 'Command: ' + cmd err = os.system(cmd) if err != 0: raise Exception('rpmlint failed: ' + str(err)) finally: self.restore_chdir() def distDeb(self): buildDir = self.getGenerator().buildDir binDir = self.getGenerator().binDir resDir = self.cmake_dir package = '%s-%s-%s' % (self.project, self.getVersionForFilename(), self.getLinuxPlatform()) debDir = '%s/deb' % buildDir if os.path.exists(debDir): shutil.rmtree(debDir) metaDir = '%s/%s/DEBIAN' % (debDir, package) os.makedirs(metaDir) templateFile = open(resDir + '/deb/control.in') template = templateFile.read() template = template.replace('${in:version}', self.getVersionNumber()) template = template.replace('${in:arch}', self.getDebianArch()) controlPath = '%s/control' % metaDir controlFile = open(controlPath, 'w') controlFile.write(template) controlFile.close() targetBin = '%s/%s/usr/bin' % (debDir, package) targetPlugin = '%s/%s/usr/lib/synergy/plugins' % (debDir, package) targetShare = '%s/%s/usr/share' % (debDir, package) targetApplications = '%s/applications' % targetShare targetIcons = '%s/icons' % targetShare targetDocs = '%s/doc/%s' % (targetShare, self.project) os.makedirs(targetBin) os.makedirs(targetPlugin) os.makedirs(targetApplications) os.makedirs(targetIcons) os.makedirs(targetDocs) for root, dirs, files in os.walk(debDir): for d in dirs: os.chmod(os.path.join(root, d), 493) binFiles = ['synergy', 'synergyc', 'synergys', 'synergyd', 'syntool'] for f in binFiles: shutil.copy('%s/%s' % (binDir, f), targetBin) target = '%s/%s' % (targetBin, f) os.chmod(target, 493) err = os.system('strip ' + target) if err != 0: raise Exception('strip failed: ' + str(err)) pluginDir = '%s/plugins' % binDir pluginFiles = ['libns.so'] for f in pluginFiles: shutil.copy('%s/%s' % (pluginDir, f), targetPlugin) target = '%s/%s' % (targetPlugin, f) os.chmod(target, 420) err = os.system('strip ' + target) if err != 0: raise Exception('strip failed: ' + str(err)) shutil.copy('%s/synergy.desktop' % resDir, targetApplications) shutil.copy('%s/synergy.ico' % resDir, targetIcons) docTarget = '%s/doc/%s' % (targetShare, self.project) copyrightPath = '%s/deb/copyright' % resDir shutil.copy(copyrightPath, docTarget) shutil.copy('%s/deb/changelog' % resDir, docTarget) os.system('gzip -9 %s/changelog' % docTarget) if err != 0: raise Exception('gzip failed: ' + str(err)) for root, dirs, files in os.walk(targetShare): for f in files: os.chmod(os.path.join(root, f), 420) target = '../../bin/%s.deb' % package try: self.try_chdir(debDir) cmd = 'fakeroot dpkg-deb --build %s' % package print 'Command: ' + cmd err = os.system(cmd) if err != 0: raise Exception('dpkg-deb failed: ' + str(err)) cmd = 'lintian %s.deb' % package print 'Command: ' + cmd err = os.system(cmd) if err != 0: raise Exception('lintian failed: ' + str(err)) self.unixMove('*.deb', target) finally: self.restore_chdir() def distSrc(self): name = '%s-%s-%s' % (self.project, self.getVersionForFilename(), 'Source') exportPath = self.getGenerator().buildDir + '/' + name if os.path.exists(exportPath): print 'Removing existing export...' shutil.rmtree(exportPath) os.mkdir(exportPath) cmd = 'git archive %s | tar -x -C %s' % (self.getGitBranchName(), exportPath) print 'Exporting repository to: ' + exportPath err = os.system(cmd) if err != 0: raise Exception('Repository export failed: ' + str(err)) packagePath = '../' + self.getGenerator().binDir + '/' + name + '.tar.gz' try: self.try_chdir(self.getGenerator().buildDir) print 'Packaging to: ' + packagePath err = os.system('tar cfvz ' + packagePath + ' ' + name) if err != 0: raise Exception('Package failed: ' + str(err)) finally: self.restore_chdir() def unixMove(self, source, dest): print 'Moving ' + source + ' to ' + dest err = os.system('mv ' + source + ' ' + dest) if err != 0: raise Exception('Package failed: ' + str(err)) def distMac(self): self.loadConfig() binDir = self.getGenerator().getBinDir('Release') name = 'Synergy' dist = binDir + '/' + name if os.path.exists(dist): shutil.rmtree(dist) os.makedirs(dist) shutil.move(binDir + '/' + name + '.app', dist + '/' + name + '.app') self.try_chdir(dist) err = os.system('ln -s /Applications') self.restore_chdir() fileName = '%s-%s-%s.dmg' % (self.project, self.getVersionForFilename(), self.getMacPackageName()) cmd = 'hdiutil create ' + fileName + ' -srcfolder ./' + name + '/ -ov' self.try_chdir(binDir) err = os.system(cmd) self.restore_chdir() def distWix(self): generator = self.getGeneratorFromConfig().cmakeName arch = 'x86' if generator.endswith('Win64'): arch = 'x64' version = self.getVersionNumber() args = '/p:DefineConstants="Version=%s"' % version self.run_vcbuild(generator, 'release', 'synergy.sln', args, 'src/setup/win32/', 'x86') filename = '%s-%s-Windows-%s.msi' % (self.project, self.getVersionForFilename(), arch) old = 'bin/Release/synergy.msi' new = 'bin/Release/%s' % filename try: os.remove(new) except OSError: pass os.rename(old, new) def distNsis(self, vcRedistDir, qtDir): if vcRedistDir == '': raise Exception('VC++ redist dir path not specified (--vcredist-dir).') if qtDir == '': raise Exception('QT SDK dir path not specified (--qt-dir).') generator = self.getGeneratorFromConfig().cmakeName arch = 'x86' installDirVar = '$PROGRAMFILES32' if generator.endswith('Win64'): arch = 'x64' installDirVar = '$PROGRAMFILES64' templateFile = open(self.cmake_dir + '\\Installer.nsi.in') template = templateFile.read() template = template.replace('${in:version}', self.getVersionNumber()) template = template.replace('${in:arch}', arch) template = template.replace('${in:vcRedistDir}', vcRedistDir) template = template.replace('${in:qtDir}', qtDir) template = template.replace('${in:installDirVar}', installDirVar) nsiPath = self.getGenerator().buildDir + '\\Installer.nsi' nsiFile = open(nsiPath, 'w') nsiFile.write(template) nsiFile.close() command = 'makensis ' + nsiPath print 'NSIS command: ' + command err = os.system(command) if err != 0: raise Exception('Package failed: ' + str(err)) def getVersionNumber(self): cmakeFile = open('CMakeLists.txt') cmake = cmakeFile.read() majorRe = re.search('VERSION_MAJOR (\\d+)', cmake) major = majorRe.group(1) minorRe = re.search('VERSION_MINOR (\\d+)', cmake) minor = minorRe.group(1) revRe = re.search('VERSION_REV (\\d+)', cmake) rev = revRe.group(1) return '%s.%s.%s' % (major, minor, rev) def getVersionStage(self): cmakeFile = open('CMakeLists.txt') cmake = cmakeFile.read() stageRe = re.search('VERSION_STAGE (\\w+)', cmake) return stageRe.group(1) def getVersionForFilename(self): versionStage = self.getVersionStage() gitBranch = self.getGitBranchName() gitRevision = self.getGitRevision() return '%s-%s-%s' % (gitBranch, versionStage, gitRevision) def distftp(self, type, ftp): if not type: raise Exception('Platform type not specified.') self.loadConfig() binDir = self.getGenerator().getBinDir('Release') filename = self.getDistFilename(type) packageSource = binDir + '/' + filename packageTarget = filename ftp.upload(packageSource, packageTarget) if type != 'src': pluginsDir = binDir + '/plugins' nsPluginSource = self.findLibraryFile(type, pluginsDir, 'ns') if nsPluginSource: nsPluginTarget = self.getLibraryDistFilename(type, pluginsDir, 'ns') ftp.upload(nsPluginSource, nsPluginTarget, 'plugins') def getLibraryDistFilename(self, type, dir, name): platform, packageExt, libraryExt = self.getDistributePlatformInfo(type) firstPart = '%s-%s-%s' % (name, self.getVersionForFilename(), platform) filename = '%s.%s' % (firstPart, libraryExt) if type == 'rpm' or type == 'deb': filename = '%s-%s.%s' % (firstPart, packageExt, libraryExt) return filename def findLibraryFile(self, type, dir, name): if not os.path.exists(dir): return None platform, packageExt, libraryExt = self.getDistributePlatformInfo(type) ext = libraryExt pattern = name + '\\.' + ext for filename in os.listdir(dir): if re.search(pattern, filename): return dir + '/' + filename def getDistributePlatformInfo(self, type): ext = None libraryExt = None platform = None if type == 'src': ext = 'tar.gz' platform = 'Source' elif type == 'rpm' or type == 'deb': ext = type libraryExt = 'so' platform = self.getLinuxPlatform() elif type == 'win': ext = 'msi' libraryExt = 'dll' generator = self.getGeneratorFromConfig().cmakeName if generator.find('Win64') != -1: platform = 'Windows-x64' else: platform = 'Windows-x86' elif type == 'mac': ext = 'dmg' libraryExt = 'dylib' platform = self.getMacPackageName() if not platform: raise Exception('Unable to detect distributable platform.') return (platform, ext, libraryExt) def getDistFilename(self, type): pattern = self.getVersionForFilename() for filename in os.listdir(self.getBinDir('Release')): if re.search(pattern, filename): return filename raise Exception('Could not find package name with pattern: ' + pattern) def getDebianArch(self): if os.uname()[4][:3] == 'arm': return 'armhf' import platform os_bits, other = platform.architecture() if os_bits == '32bit': return 'i386' if os_bits == '64bit': return 'amd64' raise Exception('unknown os bits: ' + os_bits) def getLinuxPlatform(self): if os.uname()[4][:3] == 'arm': return 'Linux-armv6l' import platform os_bits, other = platform.architecture() if os_bits == '32bit': return 'Linux-i686' if os_bits == '64bit': return 'Linux-x86_64' raise Exception('unknown os bits: ' + os_bits) def dist_usage(self): print 'Usage: %s package [package-type]\n\nReplace [package-type] with one of:\n src .tar.gz source (Posix only)\n rpm .rpm package (Red Hat)\n deb .deb paclage (Debian)\n win .exe installer (Windows)\n mac .dmg package (Mac OS X)\n\nExample: %s package src-tgz' % (self.this_cmd, self.this_cmd) def about(self): print 'Help Me script, from the Synergy project.\n%s\n\nFor help, run: %s help' % (self.website_url, self.this_cmd) def try_chdir(self, dir): global prevdir if dir == '': prevdir = '' return if not os.path.exists(dir): print 'Creating dir: ' + dir os.makedirs(dir) prevdir = os.path.abspath(os.curdir) print 'Entering dir: ' + dir os.chdir(dir) def restore_chdir(self): if prevdir == '': return print 'Going back to: ' + prevdir os.chdir(prevdir) def open_internal(self, project_filename, application = ''): if not os.path.exists(project_filename): raise Exception('Project file (%s) not found, run hm conf first.' % project_filename) else: path = project_filename if application != '': path = application + ' ' + path err = os.system(path) if err != 0: raise Exception('Could not open project with error code code: ' + str(err)) def setup(self, target = ''): print 'Running setup...' oldGenerator = self.findGeneratorFromConfig() if not oldGenerator == None: for target in ['debug', 'release']: buildDir = oldGenerator.getBuildDir(target) cmakeCacheFilename = 'CMakeCache.txt' if buildDir != '': cmakeCacheFilename = buildDir + '/' + cmakeCacheFilename if os.path.exists(cmakeCacheFilename): print 'Removing %s, since generator changed.' % cmakeCacheFilename os.remove(cmakeCacheFilename) generator = self.get_generator_from_prompt() config = self.getConfig() config.set('hm', 'setup_version', self.setup_version) config.set('cmake', 'generator', generator) self.write_config(config) self.setConfRun('all', False) self.setConfRun('debug', False) self.setConfRun('release', False) print 'Setup complete.' def getConfig(self): if os.path.exists(self.configFilename): config = ConfigParser.ConfigParser() config.read(self.configFilename) else: config = ConfigParser.ConfigParser() if not config.has_section('hm'): config.add_section('hm') if not config.has_section('cmake'): config.add_section('cmake') return config def write_config(self, config, target = ''): if not os.path.isdir(self.configDir): os.mkdir(self.configDir) configfile = open(self.configFilename, 'wb') config.write(configfile) def getGeneratorFromConfig(self): generator = self.findGeneratorFromConfig() if generator: return generator raise Exception('Could not find generator: ' + name) def findGeneratorFromConfig(self): config = ConfigParser.RawConfigParser() config.read(self.configFilename) if not config.has_section('cmake'): return None name = config.get('cmake', 'generator') generators = self.get_generators() keys = generators.keys() keys.sort() for k in keys: if generators[k].cmakeName == name: return generators[k] def min_setup_version(self, version): if os.path.exists(self.configFilename): config = ConfigParser.RawConfigParser() config.read(self.configFilename) try: return config.getint('hm', 'setup_version') >= version except: return False else: return False def hasConfRun(self, target): if self.min_setup_version(2): config = ConfigParser.RawConfigParser() config.read(self.configFilename) try: return config.getboolean('hm', 'conf_done_' + target) except: return False else: return False def setConfRun(self, target, hasRun = True): if self.min_setup_version(3): config = ConfigParser.RawConfigParser() config.read(self.configFilename) config.set('hm', 'conf_done_' + target, hasRun) self.write_config(config) else: raise Exception('User does not have correct setup version.') def get_generators(self): if sys.platform == 'win32': return self.win32_generators if sys.platform in ('linux2', 'sunos5', 'freebsd7', 'aix5'): return self.unix_generators if sys.platform == 'darwin': return self.darwin_generators raise Exception('Unsupported platform: ' + sys.platform) def get_generator_from_prompt(self): return self.getGenerator().cmakeName def getGenerator(self): generators = self.get_generators() if len(generators.keys()) == 1: return generators[generators.keys()[0]] if self.generator_id: return generators[int(self.generator_id)] conf = self.findGeneratorFromConfig() if conf: return conf raise Exception('Generator not specified, use -g arg ' + '(use `hm genlist` for a list of generators).') def setup_generator_prompt(self, generators): if self.no_prompts: raise Exception('User prompting is disabled.') prompt = 'Enter a number:' print prompt, generator_id = raw_input() if generator_id in generators: print 'Selected generator:', generators[generator_id] else: print 'Invalid number, try again.' self.setup_generator_prompt(generators) return generators[generator_id] def get_vcvarsall(self, generator): import platform, _winreg os_bits, other = platform.architecture() if os_bits == '64bit': key_name = 'SOFTWARE\\Wow6432Node\\Microsoft\\VisualStudio\\SxS\\VS7' else: key_name = 'SOFTWARE\\Microsoft\\VisualStudio\\SxS\\VC7' try: key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, key_name) except: raise Exception('Unable to open Visual Studio registry key. Application may not be installed.') if generator.startswith('Visual Studio 8'): value, type = _winreg.QueryValueEx(key, '8.0') elif generator.startswith('Visual Studio 9'): value, type = _winreg.QueryValueEx(key, '9.0') elif generator.startswith('Visual Studio 10'): value, type = _winreg.QueryValueEx(key, '10.0') else: raise Exception('Cannot determine vcvarsall.bat location for: ' + generator) if os_bits == '64bit': path = value + 'vc\\vcvarsall.bat' else: path = value + 'vcvarsall.bat' if not os.path.exists(path): raise Exception("'%s' not found." % path) return path def run_vcbuild(self, generator, mode, solution, args = '', dir = '', config32 = 'Win32'): import platform os_bits, other = platform.architecture() if generator.find('Win64') != -1: if os_bits == '32bit': vcvars_platform = 'x86_amd64' else: vcvars_platform = 'amd64' config_platform = 'x64' else: vcvars_platform = 'x86' config_platform = config32 if mode == 'release': config = 'Release' else: config = 'Debug' if generator.startswith('Visual Studio 10'): cmd = '@echo off\ncall "%s" %s \ncd "%s"\nmsbuild /nologo %s /p:Configuration="%s" /p:Platform="%s" "%s"' % (self.get_vcvarsall(generator), vcvars_platform, dir, args, config, config_platform, solution) else: config = config + '|' + config_platform cmd = '@echo off\ncall "%s" %s \ncd "%s"\nvcbuild /nologo %s "%s" "%s"' % (self.get_vcvarsall(generator), vcvars_platform, dir, args, solution, config) temp_bat = self.getBuildDir() + '\\vcbuild.bat' file = open(temp_bat, 'w') file.write(cmd) file.close() err = os.system(temp_bat) if err != 0: raise Exception('Microsoft compiler failed with error code: ' + str(err)) def ensure_setup_latest(self): if not self.min_setup_version(self.setup_version): self.setup() def reformat(self): err = os.system('tool\\astyle\\AStyle.exe --quiet --suffix=none --style=java --indent=force-tab=4 --recursive lib/*.cpp lib/*.h cmd/*.cpp cmd/*.h') if err != 0: raise Exception('Reformat failed with error code: ' + str(err)) def printGeneratorList(self): generators = self.get_generators() keys = generators.keys() keys.sort() for k in keys: print str(k) + ': ' + generators[k].cmakeName def getMacVersion(self): if not self.macSdk: raise Exception('Mac OS X SDK not set.') result = re.search('(\\d+)\\.(\\d+)', self.macSdk) if not result: print versions raise Exception('Could not find Mac OS X version.') major = int(result.group(1)) minor = int(result.group(2)) return (major, minor) def getMacPackageName(self): major, minor = self.getMacVersion() if major == 10: if minor <= 4: arch = 'Universal' elif minor <= 6: arch = 'i386' else: arch = 'x86_64' else: raise Exception('Mac OS major version unknown: ' + str(major)) version = str(major) + str(minor) return 'MacOSX%s-%s' % (version, arch) def reset(self): if os.path.exists('build'): shutil.rmtree('build') if os.path.exists('bin'): shutil.rmtree('bin') if os.path.exists('lib'): shutil.rmtree('lib') if os.path.exists('src/gui/tmp'): shutil.rmtree('src/gui/tmp') for filename in glob.glob('src/gui/ui_*'): os.remove(filename) class CommandHandler(): ic = InternalCommands() build_targets = [] vcRedistDir = '' qtDir = '' def __init__(self, argv, opts, args, verbose): self.ic.verbose = verbose self.opts = opts self.args = args for o, a in self.opts: if o == '--no-prompts': self.ic.no_prompts = True elif o in ('-g', '--generator'): self.ic.generator_id = a elif o == '--skip-gui': self.ic.enableMakeGui = False elif o == '--skip-core': self.ic.enableMakeCore = False elif o in ('-d', '--debug'): self.build_targets += ['debug'] elif o in ('-r', '--release'): self.build_targets += ['release'] elif o == '--vcredist-dir': self.vcRedistDir = a elif o == '--qt-dir': self.qtDir = a elif o == '--mac-sdk': self.ic.macSdk = a elif o == '--mac-identity': self.ic.macIdentity = a def about(self): self.ic.about() def setup(self): self.ic.setup() def configure(self): self.ic.configureAll(self.build_targets) def build(self): self.ic.build(self.build_targets) def clean(self): self.ic.clean(self.build_targets) def update(self): self.ic.update() def install(self): print 'Not yet implemented: install' def doxygen(self): self.ic.doxygen() def dist(self): type = None if len(self.args) > 0: type = self.args[0] self.ic.dist(type, self.vcRedistDir, self.qtDir) def distftp(self): type = None host = None user = None password = None dir = None if len(self.args) > 0: type = self.args[0] for o, a in self.opts: if o == '--host': host = a elif o == '--user': user = a elif o == '--pass': password = a elif o == '--dir': dir = a if not host: raise Exception('FTP host was not specified.') ftp = ftputil.FtpUploader(host, user, password, dir) self.ic.distftp(type, ftp) def destroy(self): self.ic.destroy() def kill(self): self.ic.kill() def usage(self): self.ic.usage() def revision(self): self.ic.revision() def reformat(self): self.ic.reformat() def open(self): self.ic.open() def genlist(self): self.ic.printGeneratorList() def reset(self): self.ic.reset() def signwin(self): pfx = None pwd = None dist = False for o, a in self.opts: if o == '--pfx': pfx = a elif o == '--pwd': pwd = a elif o == '--dist': dist = True self.ic.signwin(pfx, pwd, dist) def signmac(self): self.ic.signmac()
Usually, superior midnight poppy girl fantasy abstract 3d and desktop picture can help for you to get through your working day. The unusual, brightly tuned midnight poppy girl fantasy abstract 3d and desktop picture can change your feelings and produce sence for you to be umbelievable. There are a lot of chooses in screen pictures, but you can choose each as claimed by your charm and attraction. Why not to receive desktop picture, ready for unlucky days or very terrible afternoons and receive selection of temper changing screen pictures to have for your smart phone? We got abnormal amount of top class, super high definition screen pictures to choose from so you presumably will be interested in.
from datetime import datetime from django.db import models from tagging.models import Tag, TaggedItem class PostImageManager(models.Manager): """ Post Image Manager """ # use for related fields use_for_related_fields = True def get_gallery_images(self): """ Get gallery images Gallery images are PostImages that have a non-null gallery position """ return self.get_query_set().filter(gallery_position__isnull=False) class PostManager(models.Manager): """ Post Manager """ # use for related fields use_for_related_fields = True def build_query(self, require_published=True, year=None, month=None, category_slug=None, series_slug=None, tag=None, require_featured=False): # Initial posts by require published indicator if require_published: posts = self.get_query_set().filter(is_published=True, publish_date__lt=datetime.now) else: posts = self.get_query_set() # featured if require_featured == True: posts = posts.filter(is_featured=True) # date if year: posts = posts.filter(publish_date__year=year) if month: posts = posts.filter(publish_date__month=month) #category and series if category_slug: posts = posts.filter(categories__slug=category_slug) if series_slug: posts = posts.filter(series__slug=series_slug) # tag if tag: # return posts filtered by the tag return TaggedItem.objects.get_by_model(posts, [tag,]) else: return posts def get_published_posts(self): """ Get published posts """ return self.build_query(require_published=True) def get_featured_posts(self): """ Get featured posts """ return self.build_query(require_published=True, require_featured=True) def get_post_archive(self, require_published=True, year=None, month=None, category_slug=None, tag=None): """ Return a Post Archive A blog post archive is a tuple of (year, months[]), each month containing a tuple of (month, days[]), each day containing a tuple of (day, posts[]) """ # This was originally done as a dictionary # but python dictionaries can't guarantee sort order. posts = self.build_query(require_published=require_published, year=year, month=month, category_slug=category_slug, tag=tag) post_archive = {} for post in posts.order_by('-publish_date'): if not post_archive.has_key(post.publish_date.year): post_archive[post.publish_date.year] = {} if not post_archive[post.publish_date.year].has_key(post.publish_date.month): post_archive[post.publish_date.year][post.publish_date.month] = {} if not post_archive[post.publish_date.year][post.publish_date.month].has_key(post.publish_date.day): post_archive[post.publish_date.year][post.publish_date.month][post.publish_date.day] = [] post_archive[post.publish_date.year][post.publish_date.month][post.publish_date.day].append(post) # Now that all of that lifting is done, convert the dictionaries into tuples with lists sorted_years = [(k,[]) for k in sorted(post_archive.keys(), reverse=True)] for sorted_year in sorted_years: sorted_months = [(k,[]) for k in sorted(post_archive[sorted_year[0]], reverse=True)] sorted_year[1].extend(sorted_months) for sorted_month in sorted_months: sorted_days = [(k,[]) for k in sorted( post_archive[sorted_year[0]][sorted_month[0]], reverse=True)] sorted_month[1].extend(sorted_days) for sorted_day in sorted_days: sorted_day[1].extend( post_archive[sorted_year[0]][sorted_month[0]][sorted_day[0]]) return sorted_years @classmethod def get_tags_in_use(cls): """ Return the tags in use """ return Tag.objects.filter( id__in=TaggedItem.objects.filter( content_type=ContentType.objects.get( app_label='blogyall', model=cls ) ).values('tag_id') ) class PublishedPostManager(PostManager): """ Published Post Manager """ def get_query_set(self): return super(PublishedPostManager, self).get_query_set().filter(is_published=True)
Published 5 febrer, 2018 at 640 × 480 in IMPLEMENTEM EL PROGRAMA TEI A L’ESCOLA. ED. PRIMÀRIA I A ED. INFANTIL. Trackbacks are closed, but you can post a comment.
""" Many-to-one relationships To define a many-to-one relationship, use ``ForeignKey()``. """ from __future__ import unicode_literals from django.db import models from django.utils.encoding import python_2_unicode_compatible @python_2_unicode_compatible class Reporter(models.Model): first_name = models.CharField(max_length=30) last_name = models.CharField(max_length=30) email = models.EmailField() def __str__(self): return "%s %s" % (self.first_name, self.last_name) @python_2_unicode_compatible class Article(models.Model): headline = models.CharField(max_length=100) pub_date = models.DateField() reporter = models.ForeignKey(Reporter, models.CASCADE) def __str__(self): return self.headline class Meta: ordering = ('headline',) # If ticket #1578 ever slips back in, these models will not be able to be # created (the field names being lower-cased versions of their opposite # classes is important here). class First(models.Model): second = models.IntegerField() class Second(models.Model): first = models.ForeignKey(First, models.CASCADE, related_name='the_first') # Protect against repetition of #1839, #2415 and #2536. class Third(models.Model): name = models.CharField(max_length=20) third = models.ForeignKey('self', models.SET_NULL, null=True, related_name='child_set') class Parent(models.Model): name = models.CharField(max_length=20, unique=True) bestchild = models.ForeignKey('Child', models.SET_NULL, null=True, related_name='favored_by') class Child(models.Model): name = models.CharField(max_length=20) parent = models.ForeignKey(Parent, models.CASCADE) class ToFieldChild(models.Model): parent = models.ForeignKey(Parent, models.CASCADE, to_field='name') # Multiple paths to the same model (#7110, #7125) @python_2_unicode_compatible class Category(models.Model): name = models.CharField(max_length=20) def __str__(self): return self.name class Record(models.Model): category = models.ForeignKey(Category, models.CASCADE) @python_2_unicode_compatible class Relation(models.Model): left = models.ForeignKey(Record, models.CASCADE, related_name='left_set') right = models.ForeignKey(Record, models.CASCADE, related_name='right_set') def __str__(self): return "%s - %s" % (self.left.category.name, self.right.category.name) # Test related objects visibility. class SchoolManager(models.Manager): def get_queryset(self): return super(SchoolManager, self).get_queryset().filter(is_public=True) class School(models.Model): is_public = models.BooleanField(default=False) objects = SchoolManager() class Student(models.Model): school = models.ForeignKey(School, models.CASCADE)
Sharing my recent OD look. A bright and cheerful plaid coat paired with my favorite pom pom beanie over an all-black outfit. The colors in this coat are so vibrant that it can make any simple attire look put-together and chic. In this cold weather it becomes bit challenging to make all the covering-ups look fun and creative, so I am always looking for inspirations. Sometimes hats, beanies and colorful scarfs do the magic for me but there are days when I feel completely out of ideas. What is your favorite look for the weekends? I would loved to know how you make your cold weather outfit refreshing and fun! Love the colorful coat! Super like!! Oh my goodness, I just love your coat and those boots together! Very vibrant. That’s a beautiful coat and you styled it perfectly!!!! I agree on the challenge part of making an outfit look interesting this weather…I consider my riding boots and fedoras as my go-to style add ons!
""" desisim.pixelsplines ==================== Pixel-integrated spline utilities. Written by A. Bolton, U. of Utah, 2010-2013. """ from __future__ import absolute_import, division, print_function import numpy as n from scipy import linalg as la from scipy import sparse as sp from scipy import special as sf def compute_duck_slopes(pixbound, flux): """ Compute the slope of the illuminating quadratic spline at the locations of the 'ducks', i.e., the pixel boundaries, given the integrated flux per unit baseline within the pixels. ARGUMENTS: pixbound: (npix + 1) ndarray of pixel boundaries, in units of wavelength or log-wavelength or frequency or whatever you like. flux: (npix) ndarray of spectral flux (energy or counts) per abscissa unit, averaged over the extent of the pixel RETURNS: an (npix+1) ndarray of the slope of the underlying/illuminating flux per unit abscissa spectrum at the position of the pixel boundaries, a.k.a. 'ducks'. The end conditions are taken to be zero slope, so the exterior points of the output are zeros. """ npix = len(flux) # Test for correct argument dimensions: if (len(pixbound) - npix) != 1: print('Need one more element in pixbound than in flux!') return 0 # The array of "delta-x" values: dxpix = pixbound[1:] - pixbound[:-1] # Test for monotonif increase: if dxpix.min() <= 0.: print('Pixel boundaries not monotonically increasing!') return 0 # Encode the tridiagonal matrix that needs to be solved: maindiag = (dxpix[:-1] + dxpix[1:]) / 3. offdiag = dxpix[1:-1] / 6. upperdiag = n.append(0., offdiag) lowerdiag = n.append(offdiag, 0.) band_matrix = n.vstack((upperdiag, maindiag, lowerdiag)) # The right-hand side: rhs = flux[1:] - flux[:-1] # Solve the banded matrix and return: acoeff = la.solve_banded((1,1), band_matrix, rhs) acoeff = n.append(n.append(0., acoeff), 0.) return acoeff def cen2bound(pixelcen): """ Convenience function to do the obvious thing to transform pixel centers to pixel boundaries. """ pixbound = 0.5 * (pixelcen[1:] + pixelcen[:-1]) lo_val = 2. * pixbound[0] - pixbound[1] hi_val = 2. * pixbound[-1] - pixbound[-2] pixbound = n.append(n.append(lo_val, pixbound), hi_val) return pixbound def gauss_blur_matrix(pixbound, sig_conv): """ Function to generate a Gaussian blurring matrix for a pixelized spectrum, from specified pixel boundaries and 'sigma' vector. The matrix will be flux-conserving if the spectrum to which it is applied has units of 'counts per unit x', and pixbound and sig_conv both have units of x. pixbound should have one more element than sig_conv. Output is a scipy sparse matrix that can implement the blurring as: blurflux = gauss_blur_matrix * flux where 'flux' has the same dimensions as 'sig_conv'. """ # Derived values and error checks: npix = len(pixbound) - 1 if (len(sig_conv) != npix): raise PixSplineError('Need one more element in pixbound than in \ sig_conv!') if (sig_conv.min() <= 0.): raise PixSplineError('sig_conv must be > 0 everywhere!') xcen = 0.5 * (pixbound[1:] + pixbound[:-1]) dxpix = pixbound[1:] - pixbound[:-1] if (dxpix.min() <= 0.): raise PixSplineError('Pixel boundaries not monotonically increasing!') # Which "new" pixels does each "old" pixel touch? # Let's go +/- 6 sigma for all: sig_width = 6.0 # A minor correction factor to preserve flux conservation: cfact = 1./sf.erf(sig_width / n.sqrt(2.)) xblur_lo = xcen - sig_width * sig_conv xblur_hi = xcen + sig_width * sig_conv bin_lo = n.digitize(xblur_lo, pixbound) - 1 bin_hi = n.digitize(xblur_hi, pixbound) - 1 # Restrict the ranges: #xblur_lo = n.where((xblur_lo > pixbound[0]), xblur_lo, pixbound[0]) #xblur_lo = n.where((xblur_lo < pixbound[-1]), xblur_lo, pixbound[-1]) #xblur_hi = n.where((xblur_hi > pixbound[0]), xblur_hi, pixbound[0]) #xblur_hi = n.where((xblur_hi < pixbound[-1]), xblur_hi, pixbound[-1]) bin_lo = n.where((bin_lo >= 0), bin_lo, 0) #bin_lo = n.where((bin_lo < npix), bin_lo, npix-1) #bin_hi = n.where((bin_hi >= 0), bin_hi, 0) bin_hi = n.where((bin_hi < npix), bin_hi, npix-1) # Compute total number of non-zero elements in the broadening matrix: n_each = bin_hi - bin_lo + 1 n_entries = n_each.sum() ij = n.zeros((2, n_entries), dtype=int) v_vec = n.zeros(n_entries, dtype=float) # Loop over pixels in the "old" spectrum: pcount = 0 roottwo = n.sqrt(2.) bin_vec = n.arange(npix, dtype=int) for k in range(npix): xbound = pixbound[bin_lo[k]:bin_hi[k]+2] # Gaussian integral in terms of error function: erf_terms = cfact * 0.5 * sf.erf((xbound - xcen[k]) / (roottwo * sig_conv[k])) erf_int = (erf_terms[1:] - erf_terms[:-1]) * \ dxpix[k] / dxpix[bin_lo[k]:bin_hi[k]+1] ij[0,pcount:pcount+n_each[k]] = bin_vec[bin_lo[k]:bin_hi[k]+1] ij[1,pcount:pcount+n_each[k]] = k v_vec[pcount:pcount+n_each[k]] = erf_int pcount += n_each[k] conv_matrix = sp.coo_matrix((v_vec, ij), shape=(npix,npix)) return conv_matrix.tocsr() class PixSplineError(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) class PixelSpline: """ Pixel Spline object class. Initialize as follows: PS = PixelSpline(pixbound, flux) where pixbound = array of pixel boundaries in baseline units and flux = array of specific flux values in baseline units. Assumptions: 'pixbound' should have one more element than 'flux', and units of 'flux' are -per-unit-baseline, for the baseline units in which pixbound is expressed, averaged over the extent of each pixel. """ def __init__(self, pixbound, flux): npix = len(flux) # Test for correct argument dimensions: if (len(pixbound) - npix) != 1: raise PixSplineError('Need one more element in pixbound \ than in flux!') # The array of "delta-x" values: dxpix = pixbound[1:] - pixbound[:-1] # Test for monotonic increase: if dxpix.min() <= 0.: raise PixSplineError('Pixel boundaries not monotonically \ increasing!') self.npix = npix self.pixbound = pixbound.copy() self.dxpix = dxpix.copy() self.xcen = 0.5 * (pixbound[1:] + pixbound[:-1]).copy() self.flux = flux.copy() maindiag = (dxpix[:-1] + dxpix[1:]) / 3. offdiag = dxpix[1:-1] / 6. upperdiag = n.append(0., offdiag) lowerdiag = n.append(offdiag, 0.) band_matrix = n.vstack((upperdiag, maindiag, lowerdiag)) # The right-hand side: rhs = flux[1:] - flux[:-1] # Solve the banded matrix for the slopes at the ducks: acoeff = la.solve_banded((1,1), band_matrix, rhs) self.duckslopes = n.append(n.append(0., acoeff), 0.) def point_evaluate(self, xnew, missing=0.): """ Evaluate underlying pixel spline at array of points BUG: input currently needs to be at least 1D array. """ # Initialize output array: outflux = 0. * self.flux[0] * xnew + missing # Digitize into bins: bin_idx = n.digitize(xnew, self.pixbound) # Find the indices of those that are actually in-bounds: wh_in = n.where((bin_idx > 0) * (bin_idx < len(self.pixbound))) if len(wh_in[0]) == 0: return outflux xnew_in = xnew[wh_in] idx_in = bin_idx[wh_in] - 1 # The pixel centers as per the algorithm in use: adiff = self.duckslopes[idx_in+1] - self.duckslopes[idx_in] asum = self.duckslopes[idx_in+1] + self.duckslopes[idx_in] xdiff = xnew_in - self.xcen[idx_in] fluxvals = adiff * xdiff**2 / (2. * self.dxpix[idx_in]) + asum * xdiff \ / 2. + self.flux[idx_in] - adiff * self.dxpix[idx_in] / 24. outflux[wh_in] = fluxvals return outflux def find_extrema(self, minima=False): # Find the formal extrema positions: x_ext = self.xcen - 0.5 * self.dxpix * \ (self.duckslopes[1:] + self.duckslopes[:-1]) / \ (self.duckslopes[1:] - self.duckslopes[:-1]) # Digitize these into bins: bin_ext = n.digitize(x_ext, self.pixbound) - 1 # The second derivatives, flipped in sign if minima is set: curvat = (-1)**(minima == True) * (self.duckslopes[1:] - self.duckslopes[:-1]) / self.dxpix # Find in-bin maxima: wh_ext = n.where((bin_ext == n.arange(self.npix)) * (curvat < 0)) if len(wh_ext[0]) < 1: return n.array([]) x_ext = x_ext[wh_ext] return x_ext def subpixel_average(self, ipix, xlo, xhi): adiff = self.duckslopes[ipix+1] - self.duckslopes[ipix] asum = self.duckslopes[ipix+1] + self.duckslopes[ipix] xlo_c = xlo - self.xcen[ipix] xhi_c = xhi - self.xcen[ipix] outval = adiff * ((xhi-xlo)**2 / 6. + xhi_c * xlo_c / 2.) / \ self.dxpix[ipix] + asum * (xhi_c + xlo_c) / 4. - adiff * \ self.dxpix[ipix] / 24. + self.flux[ipix] return outval def resample(self, pb_new): """ Method to resample a pixelspline analytically onto a new set of pixel boundaries. """ npix_new = len(pb_new) - 1 xnew_lo = pb_new[:-1].copy() xnew_hi = pb_new[1:].copy() # Test for monotonic: new_fulldx = xnew_hi - xnew_lo if new_fulldx.min() <= 0.: raise PixSplineError('New pixel boundaries not monotonically \ increasing!') # Digitize the new boundaries into the original bins: bin_idx = n.digitize(pb_new, self.pixbound) - 1 bin_lo = bin_idx[:-1].copy() bin_hi = bin_idx[1:].copy() # Array for accumulating new counts: new_counts = n.zeros(npix_new, dtype=self.flux.dtype) # Array for accumulating new pixel widths by pieces. # Only used for debugging so far, but may be useful in future. #new_dxpix = n.zeros(npix_new, dtype=self.flux.dtype) # For convenience, we define the following. # Careful not to modify them... they are views, not copies! xold_lo = self.pixbound[:-1] xold_hi = self.pixbound[1:] # 4 cases to cover: # Case 1: both bin_hi and bin_lo in the same bin: wh_this = n.where((bin_hi == bin_lo) * (bin_lo >= 0) * \ (bin_hi < self.npix)) if (len(wh_this[0]) > 0): dx_this = xnew_hi[wh_this] - xnew_lo[wh_this] avgval_this = self.subpixel_average(bin_lo[wh_this], xnew_lo[wh_this], xnew_hi[wh_this]) #new_dxpix[wh_this] += dx_this new_counts[wh_this] += avgval_this * dx_this # Case 2: more than one bin, lower segment: wh_this = n.where((bin_hi > bin_lo) * (bin_lo >= 0)) if (len(wh_this[0]) > 0): dx_this = xold_hi[bin_lo[wh_this]] - xnew_lo[wh_this] avgval_this = self.subpixel_average(bin_lo[wh_this], xnew_lo[wh_this], xold_hi[bin_lo[wh_this]]) #new_dxpix[wh_this] += dx_this new_counts[wh_this] += avgval_this * dx_this # Case 3: more than one bin, upper segment: wh_this = n.where((bin_hi > bin_lo) * (bin_hi < self.npix)) if (len(wh_this[0]) > 0): dx_this = xnew_hi[wh_this] - xold_lo[bin_hi[wh_this]] avgval_this = self.subpixel_average(bin_hi[wh_this], xold_lo[bin_hi[wh_this]], xnew_hi[wh_this]) #new_dxpix[wh_this] += dx_this new_counts[wh_this] += avgval_this * dx_this # Case 4: enire bins covered, whole pixels: wh_this = n.where(bin_hi > (bin_lo+1)) nwhole = len(wh_this[0]) if (nwhole > 0): pcounts = self.flux * self.dxpix icounts_this = n.array([pcounts[bin_lo[wh_this[0][ii]]+1:\ bin_hi[wh_this[0][ii]]].sum() for ii in range(nwhole)]) #new_dxpix[wh_this] += dx_this new_counts[wh_this] += icounts_this # Divide out for average and return: return new_counts / new_fulldx class WeightedRebinCoadder: """ Objet class for weighted rebinning and coaddition of spectra Initialize as follows: WRC = WeighedRebinCoadder(fluxes, invvars, pixbounds) where fluxes = list of arrays of specific flux values invvars = list of arrays of associated inverse variances pixbounds = list of arrays of pixel boundaries in baseline units """ def __init__(self, fluxes, invvars, pixbounds): # Determine minimum and maximum values of independent variable: self.min_indep = [this_bound.min() for this_bound in pixbounds] self.max_indep = [this_bound.max() for this_bound in pixbounds] self._n_input = len(fluxes) # Compute pixel widths: dpixes = [this_bound[1:] - this_bound[:-1] for this_bound in pixbounds] # Compute "specific inverse variances": sp_invvars = [invvars[i] / dpixes[i] for i in range(self._n_input)] # Compute pixelspline objects for fluxes: self._PXS_fluxes = [PixelSpline(pixbounds[i], fluxes[i]) for i in \ range(self._n_input)] # Compute pixelspline objects for specific inverse variances: self._PXS_sp_invvars = [PixelSpline(pixbounds[i], sp_invvars[i]) for \ i in range(self._n_input)] def coadd(self, pixbound_out): # Compute coverage masks: masks = [(pixbound_out[:-1] > self.min_indep[i]) * (pixbound_out[1:] < self.max_indep[i]) for i in \ range(self._n_input)] # Compute output pixel widths: dpix_out = pixbound_out[1:] - pixbound_out[:-1] # Compute interpolated fluxes: new_fluxes = [this_PXS.resample(pixbound_out) for this_PXS in \ self._PXS_fluxes] # Compute interpolated specific inverse variances (converted # to inverse variances): new_invvars = [dpix_out * this_PXS.resample(pixbound_out) for \ this_PXS in self._PXS_sp_invvars] # Compute coadded flux and inverse variance and return: flux_coadd = 0. invvar_coadd = 0. for i in range(self._n_input): flux_coadd += new_fluxes[i] * new_invvars[i] * masks[i] invvar_coadd += new_invvars[i] * masks[i] is_good = n.where(invvar_coadd > 0.) flux_coadd[is_good] /= invvar_coadd[is_good] return flux_coadd, invvar_coadd
When we think of idolatry we usually think of a primitive pagan in a mud hut bowing down to a little god on the ground, or we imagine a pagan temple, very elaborate and ornate with a lot of burning incense. But idolatry goes beyond the idea of creating a false God. Fundamentally, idolatry is thinking thoughts about God that are untrue of Him, or entertaining thoughts about Him that are unworthy of Him. In that sense, many evangelicals are guilty of idolatry. I am appalled at what some Christians assume God to be. God is appalled, too, when He says in Psalm 50:21, “You thought that I was just like you; I will reprove you, and state the case in order before your eyes.” Contemporary Christianity has lowered God to its level, robbing Him of majesty and holiness. That is as idolatrous as worshiping a rock. Yet that is precisely what many have done. They have made a false god in their own likeness. Their thoughts about Him come from the imaginations of their own minds, and have nothing to do with what He really is like. The history of mankind will probably show that no people has ever risen above its religion, and man’s spiritual history will positively demonstrate that no religion has ever been greater than its idea of God. Worship is pure or base, as the worshiper entertains high or low thoughts of God. For this reason the gravest question before the Church is always God Himself, and the most portentous fact about any man is not what he at a given time may say or do but what he in his deep heart conceives God to be like. A. W. Tozer, The Knowledge of the Holy (Harper & Row, 1961), 9. The most basic truth in worship, then, is the worshiper’s understanding of God. In Hosea 6:6 the Lord says, “I delight in loyalty rather than sacrifice, and in the knowledge of God rather than burnt offerings.” That statement elevates the knowledge of God to a position of supreme importance. Proverbs 9:10 says, “The fear of the Lord is the beginning of wisdom, and the knowledge of the Holy One is understanding.” No one is wise until he knows God; no one has even the slightest understanding until he has the knowledge of the Holy One. Without the knowledge of God, all worship is unacceptable worship, not any different from the grossest idolatry. We get into trouble when we try to make God too much like what we know. When we use human symbols to describe God, we must remember that He is the ultimate, infinite pattern and not the copy. No metaphor can fully explain God. For example, we understand God’s love because we know human love. But when God’s love behaves unlike our love we must not assume that God’s love is faulty. That is making human love the absolute pattern and judging God’s love by it. It is often easier to think of God in negative terms. We live in a world that is so opposite God that we frequently have to grasp what God is like by saying what He is not like, because He is unlike anything we understand. For example, when we say God is holy, we mean He has no sin. We cannot conceive of the essence of absolute holiness—all we have experienced is sin. We cannot comprehend eternality or infinity, but we understand boundaries, so we say that God doesn’t have any limitations. But can we understand God? The Bible says we can. We can never fully comprehend Him but we can certainly understand true things about Him. That is because God has revealed Himself to us not only in His creation, but more specifically in His Word. It is our duty to understand His self-revelation accurately. Yet the temptation is always strong to conform God’s character to our pattern of thinking. And that’s fraught with danger when we live and function in a world that is constantly changing. The very concept of an unchanging God is incompatible with a world shaped and driven by scientific discovery, constantly evolving technology, and self-determined morality. The cultural expectation to “change with the times” is invariably applied to God as well. His justice is expected to shift and slide with the standards of our times—a presumed leniency that accommodates our sinful preferences and propensities. But those are dangerous assumptions that offer false comfort. They fly in the face of Scripture’s clear testimony about God’s unchanging character and nature—in theological terms, His immutability. In the days ahead we’ll explore the biblical record concerning God’s immutability. Moreover, we’ll consider the great assurance we can draw from knowing the fixed nature of God’s irreversible promises—the ultimate comfort that comes from worshiping the One true unchanging God.
"Parser for Maya.env" import sys, os, os.path, logging #import external.ply.lex as lex try: from pymel.util.external.ply import lex except ImportError: from ply import lex from pymel.mayautils import getMayaAppDir _logger = logging.getLogger(__name__) # lexer and parser for the Maya.env file # first level lexer : form LVAR ASSIGN VALUE, then second level parsing of VALUE # variables substitution are done as in Maya, taking only into account already defined vars # when line is encountered class EnvLex : """ ply.lex lexer class to parse Maya.env file """ def __init__(self): self.states = ( ('left','exclusive'), ('right','exclusive'), ('end','exclusive'), ('cancel','exclusive') ) self.line = '' def build(self, **kwargs): self.lexer = lex.lex(object=self,**kwargs) tokens = ( 'COMMENT', 'ASSIGN', 'VAR', 'VALUE', 'OK', 'CANCEL', 'newline' ) # First level parsing : form LVAR ASSIGN VALUE t_ANY_ignore_COMMENT = r'\#[^\n]*' # Ignore starting spaces only t_INITIAL_ignore = '^[ \t]+' t_left_ignore = '[ \t]+' t_right_ignore = '[ \t]+' # careful, there seems to be a nasty bug where ply.lex takes $ as its literal value instead of in the 'end of line' meaning ? t_end_ignore = '[ \t]+$' t_cancel_ignore = '[^\n]+' # Valid l-values are env var names, must come first in line (INITIAL sate) def t_VAR(self, t) : r'[^\\^\/^\:^\*^\"^\<^\>^\|^=^ ^\t^\n^#]+' # VAR can only be on left side of ASSIGN (INITIAL parser state) self.lexer.begin('left') self.line += t.value return t # Assignation sign, ignore spaces around it def t_left_ASSIGN(self, t): r'[ \t]*=[ \t]*' self.lexer.begin('right') t.value = t.value.strip() self.line += t.value return t # r-values will be parsed again depending on os name def t_right_VALUE(self, t): r'[^=^\n^#]+' # one and only one VALUE on right side of ASSIGN self.lexer.begin('end') self.line += t.value return t # More than one equal sign per line would be an error def t_right_ASSIGN(self, t): r'[ \t]*=[ \t]*' warnings.warn ( "Double '=' at line %i, format for a Maya.env line is <VAR> = <value>, line ignored" % (self.lexer.lineno), ExecutionWarning) # skip whole line self.lexer.begin('cancel') while self.lexer.lexpos<self.lexer.lexlen and self.lexer.lexdata[self.lexer.lexpos] != '\n' : self.lexer.skip(1) def t_end_ASSIGN(self, t): r'[ \t]*=[ \t]*' warnings.warn ( "More than one '=' at line %i, format for a Maya.env line is <VAR> = <value>, line ignored" % (self.lexer.lineno), ExecutionWarning) # skip whole line self.lexer.begin('cancel') while self.lexer.lexpos<self.lexer.lexlen and self.lexer.lexdata[self.lexer.lexpos] != '\n' : self.lexer.skip(1) # r-values will be parsed again depending on os name def t_end_VALUE(self, t): r'[^=^\n^#]+' # one and only one VALUE on right side of ASSIGN warnings.warn ( "More than one value at line %i, format for a Maya.env line is <VAR> = <value>, line ignored" % (self.lexer.lineno), ExecutionWarning) # skip whole line self.lexer.begin('cancel') while self.lexer.lexpos<self.lexer.lexlen and self.lexer.lexdata[self.lexer.lexpos] != '\n' : self.lexer.skip(1) # Ignore ending spaces and count line no def t_ANY_newline(self, t): r'[ \t]*\n+' st = self.lexer.current_state() if st == 'end' : t.type = 'OK' t.value = self.line elif st == 'INITIAL' : pass else : t.type = 'CANCEL' v = '' i = self.lexer.lexpos-2 while i>0 and self.lexer.lexdata[i] != '\n' : v = self.lexer.lexdata[i] + v i -= 1 t.value = v self.lexer.begin('INITIAL') self.line = '' # Cound nb of new lines, removing white space self.lexer.lineno += len(t.value.lstrip(' \t')) return t # Error handling rules def t_ANY_error(self, t): warnings.warn ( "Illegal character '%s' at line %i, ignored" % (t.value[0], self.lexer.lineno), ExecutionWarning) self.lexer.skip(1) def t_INITIAL_error(self, t): warnings.warn ( "Invalid VAR name '%s' at line %i, line ignored" % (t.value[0], self.lexer.lineno), ExecutionWarning) # skip whole line while self.lexer.lexpos<self.lexer.lexlen and self.lexer.lexdata[self.lexer.lexpos] != '\n' : self.lexer.skip(1) def t_left_error(self, t): warnings.warn ( "Illegal value '%s' at line %i, format for a Maya.env line is <VAR> = <value>, line ignored" % (t.value[0], self.lexer.lineno), ExecutionWarning) # skip whole line while self.lexer.lexpos<self.lexer.lexlen and self.lexer.lexdata[self.lexer.lexpos] != '\n' : self.lexer.skip(1) def t_right_error(self, t): warnings.warn ( "Illegal value '%s' at line %i, line ignored" % (t.value[0], self.lexer.lineno), ExecutionWarning) # skip whole line while self.lexer.lexpos<self.lexer.lexlen and self.lexer.lexdata[self.lexer.lexpos] != '\n' : self.lexer.skip(1) # Test it def test(self,data): self.lexer.input(data) while 1: tok = self.lexer.token() if not tok: break print tok # second level lexer : os dependant parsing of values and variable substitution class ValueLex : """ second level lexer to parse right-values depending on os name """ class Warn : """ a ValueLex subclass to reset warning count """ def __init__(self): self.SEP = False self.VAR = False self.PATH = False def __init__(self, symbols, osname = os.name): self.os = osname self.symbols = symbols self.line = 0 self.warn = ValueLex.Warn() def build(self, **kwargs): self.lexer = lex.lex(object=self,**kwargs) tokens = ( 'SEP', 'RVAR1', 'RVAR2', 'PATHSEP', 'VALUE' ) # ignore ending space t_ignore = '^[ \t]+' def t_SEP(self, t): r':;' if t.value==';' and self.os != 'nt' : # t.value = ':' if not self.warn.SEP : warnings.warn ( "Line %i: the ';' separator should only be used on nt os, on linux or osx use ':' rather" % self.lexer.lineno, ExecutionWarning) self.warn.SEP = True return t # Valid l-values are env var names, must come first in line (INITIAL sate) def t_RVAR1(self, t) : r'\$[^\\^/^:^*^"^<^>^|^=^ ^\t^\n^#^$]+' if self.os == 'nt' : if not self.warn.VAR : warnings.warn ( "Line %i: $VAR should be used on linux or osx, \%VAR\% on nt" % self.lexer.lineno, ExecutionWarning) self.warn.VAR = True v = t.value.lstrip('$') if self.symbols.has_key(v) : t.value = self.symbols[v] return t def t_RVAR2(self, t) : r'\%[^\\^/^:^*^"^<^>^|^=^ ^\t^\n^#]+\%' if self.os != 'nt' : if not self.warn.VAR : warnings.warn ( "Line %i: $VAR should be used on linux or osx, \%VAR\% on nt" % self.lexer.lineno, ExecutionWarning) self.warn.VAR = True v = t.value.strip('%') if self.symbols.has_key(v) : t.value = self.symbols[v] return t # Assignation sign, ignore spaces around it def t_PATHSEP(self, t) : r'\/|\\' if self.os != 'nt' and t.value == '\\': if not self.warn.PATH : warnings.warn ( "Line %i: the '\\' path separator should only be used on nt, on linux or osx use '/' rather" % self.lexer.lineno, ExecutionWarning) self.warn.PATH = True return t # we just return the rest as-is # TODO: warnings if it's a path and path doesn't exist ? # Would need to differentiate % or $ wether we are on nt or not but py.lex # handles definitions strangely, like they are static / source time evaluated # removed % from the list of excluded characters as some definitions seem to use it : # $RMSTREE/icons/%B # TODO : Never seen it elsewhere, must check it doesn't collide with %VARNAME% on NT def t_VALUE(self, t): r'[^=^\n^#^$]+' return t def t_error(self, t): warnings.warn ( "Illegal character '%s' at line %i, ignored" % (t.value[0], self.lexer.lineno), ExecutionWarning) self.lexer.skip(1) # Test it def test(self,data): self.lexer.input(data) while 1: tok = self.lexer.token() if not tok: break print tok # Do the 2 level parse of a Maya.env format text and return a symbol table of the declared env vars def parse(text, environ=os.environ, osname=os.name): symbols = environ.copy() newsymbols = {} # first level lexer envLex = EnvLex() envLex.build() sep = os.path.pathsep # easier if we have a closing newline before eof if not text.endswith('\n') : text += '\n' envLex.lexer.input(text) # second level lexer for values valueLex = ValueLex(symbols, osname) valueLex.build() tok = 'dummy' while tok: tok = envLex.lexer.token() if tok is not None : if tok.type=='VAR' : var = tok.value elif tok.type=='VALUE' : value = tok.value elif tok.type=='OK' : # secondary parsing on value depending on os # update defined env vars up to now if var is not None : # It's quite hard to guess what Maya does with pre-existant env vars when they are also declared # in Maya.env. It seems to ignore Maya,env in most of these cases, except for MAYA_SCRIPT_PATH # where it will add the content o Maya.env to the predefined var # for PATH, MAYA_PLUGIN_PATH and LD_LIBRARY_PATH on linux it seems to add his own stuff, disreguarding # Maya.env if the the variable was pre-existant. If you notice (or want) different behaviors you can # change it here newvalue = None action = 'Ignore' if symbols.has_key(var) : # For these variables ONLY, maya will append the value in maya.env to an exisiting environment variable # (Default is for already defined value to override value in maya.env) # (note the LACK of PYTHONPATH here... boo!) if var in ('MAYA_SCRIPT_PATH', 'MAYA_PLUG_IN_PATH', 'MAYA_MODULE_PATH', 'XBMLANGPATH'): newvalue = self.symbols[var]+sep action = 'Add' else : newvalue = '' action = 'Set' if newvalue is not None : # only display warning for a better feedback there, # as even if it makes no sense we can in all cases affect the value to the env var valueLex.symbols = symbols valueLex.lexer.input(value) valueLex.lexer.lineno = tok.lineno valueLex.warn = ValueLex.Warn() vtok = 'dummy' while vtok: vtok = valueLex.lexer.token() if vtok is not None : newvalue += vtok.value symbols[var] = newvalue newsymbols[var] = newvalue if action == 'Set' : print u"%s set to value %s" % (var, unicode(newvalue)) elif action == 'Add' : print u"%s was already set, appending value: %s" % (var, unicode(newvalue)) elif action == 'Ignore' : print u"%s was already set, ignoring line: %s" % (var, unicode(tok.value)) var = value = None elif tok.type=='CANCEL' : print "Line was ignored due to parsing errors: %s" % unicode(tok.value) var = value = None else : pass return newsymbols # parse the Maya.env file and set the environment variables and python path accordingly def parseMayaenv(envLocation=None, version=None) : """ parse the Maya.env file and set the environement variablas and python path accordingly. You can specify a location for the Maya.env file or the Maya version""" name = 'Maya.env' envPath = None if envLocation : envPath = envLocation if not os.path.isfile(envPath) : envPath = os.path.join(envPath, name) # no Maya.env specified, we look for it in MAYA_APP_DIR if not envPath or not envPath.isfile() : maya_app_dir = getMayaAppDir() if not maya_app_dir: _logger.warn("Neither HOME nor MAYA_APP_DIR is set, unable to find location of Maya.env") return False # try to find which version of Maya should be initialized if not version : # try to query version, will only work if reparsing env from a working Maya version = Version.installName() if version is None: # if run from Maya provided mayapy / python interpreter, can guess version _logger.debug("Unable to determine which verson of Maya should be initialized, trying for Maya.env in %s" % maya_app_dir) # look first for Maya.env in 'version' subdir of MAYA_APP_DIR, then directly in MAYA_APP_DIR if version and os.path.isfile(os.path.join(maya_app_dir, version, name)) : envPath = os.path.join(maya_app_dir, version, name) else : envPath = os.path.join(maya_app_dir, name) # finally if we have a possible Maya.env, parse it if os.path.isfile(envPath) : try : envFile = open(envPath) except : _logger.warn ("Unable to open Maya.env file %s" % envPath ) return False success = False try : envTxt = envFile.read() envVars = parse(envTxt) # update env vars for v in envVars : #_logger.debug("%s was set or modified" % v) os.environ[v] = envVars[v] # add to syspath if envVars.has_key('PYTHONPATH') : #_logger.debug("sys.path will be updated") plist = os.environ['PYTHONPATH'].split(os.pathsep) for p in plist : if not p in sys.path : sys.path.append(p) success = True finally : envFile.close() return success else : if version : print"Found no suitable Maya.env file for Maya version %s" % version else : print"Found no suitable Maya.env file" return False
The Fancy Pineapple Wall Sconce with Brass Pineapple Wall Sconce Wall Sconces House Decoration Home is one of pictures of furniture ideas for your home. The resolution of Fancy Pineapple Wall Sconce with Brass Pineapple Wall Sconce Wall Sconces House Decoration Home was 1013×1350 pixels. In addition to ideas about Interior Furniture. You can find the other picture or post related to Pineapple Wall Sconce just push the gallery or if you are interested in similar pictures of Fancy Pineapple Wall Sconce with Brass Pineapple Wall Sconce Wall Sconces House Decoration Home, you are free to browse through search feature that located on top this page. If you are pleased, please share this link to social media to make the people near you are also inspired. We hope some of the furniture design below can add to the beauty of your home. Perfect Pineapple Wall Sconce with A Mid 20th Century Set Of Four Pineapple Wall Lights Timothy Langston. Stunning Pineapple Wall Sconce with Acclaim Lighting Lanai Collection 2 Light Black Coral Outdoor Wall. Interesting Pineapple Wall Sconce with Vintage Brass Pineapple Wall Sconce Candle Holders Waccamaw. Stylish Pineapple Wall Sconce with Candle Holder Pineapple Candle Holder Pineapple Candle Holder Wall. Stunning Pineapple Wall Sconce with Monteaux Lighting Wall Mount 21 In Bronze Outdoor Pineapple Coach. Furniture could complete your area. Without it, your house will certainly simply end up being an empty space. Nevertheless, you should beware when selecting furniture for your Interior. Ascertain that it fits ideal. If you make use of furniture that do not match, expect your Interior to look disorderly. However how are you going to pick the ideal furniture? So, we will certainly offer you a couple of photos of pineapple wall sconce could be your ideal furniture design suggestions. Here’s some pictures of design ideas for your Interior furniture design related to Pineapple Wall Sconce. We collected the images from various sources to provide the best inspiration for you. The image that we serve is high quality and high resolution, with the hope that you can clearly see the detail of inspiration about Pineapple Wall Sconce. We hope some of the furniture design below can add to the beauty of your home. You can find related images related to Pineapple Wall Sconce on this website by discovering the category section, or the related posts below. So, take your time and find the Pineapple Wall Sconce pictures informed here that suitable with your needs. Thanks for visiting our site, If you found any images copyrighted to yours, please contact us, and we will remove it. We don’t intend to display any copyright protected images. We hope you can find what you need here. We always effort to show a picture with HD resolution or at least with perfect pictures. The Fancy Pineapple Wall Sconce with Brass Pineapple Wall Sconce Wall Sconces House Decoration Home can be the beneficial inspiration for those who seek an image according to distinct categories. Finally, all images we have been displayed on this site will inspire you all.
from flask import render_template from common import * import datetime from tables import users, restolikes, dishlikes from werkzeug import redirect page = Blueprint(__name__) @page.route('/user/<int:user_id>/') def main(user_id): if isValidUserId(user_id): return render_template('user.html', user_id = user_id) return abort(404) @page.route('/user/<int:user_id>/settings/') def settings(user_id): if isLogged(): return render_template('settings.html', user_id = user_id) return render_template('home.html') def updateProfile(userId, name = None, email = None, password = None): if (isLogged()): args = [name, email, password] settings = [] for i in range(3): if args[i]: if i is 2: settings.append(md5Password(args[i])) else: settings.append(args[i]) with db() as connection: with connection.cursor() as cursor: try: update(userId, settings); except dbapi2.Error: connection.rollback() else: connection.commit() return @page.route('/user/<int:user_id>/likeresto/<int:resto_id>/') def likeResto(user_id, resto_id): from tables import restolikes liked = restolikes.likeResto(user_id, resto_id) #if liked is 0: #return 'Returned 0' #elif liked is 1: #return 'Returned 1' #else: #return 2 return redirect(request.args.get('next') or request.referrer or url_for(default)) @page.route('/user/<int:user_id>/likedish/<int:dish_id>/') def likeDish(user_id, dish_id): from tables import dishlikes liked = dishlikes.likeDish(user_id, dish_id) #if liked is 0: #return 'Returned 0' #if liked is 1: #return 'Returned 1' #else: #return 'Returned 2' return redirect(request.args.get('next') or request.referrer or url_for(default)) def reset(): dishlikes.reset() restolikes.reset() users.reset() users.addUser('Yusuf Aksoy', 'yusuf@y.y', md5Password('12345')) # id=1 users.addUser('Moctar Sawadogo', 'moctar@m.m', md5Password('12345')) # id=2 users.addUser('Test User', 'test@test.com', md5Password('12345')) # id=3 return
Sarah's Crafts & Stuff: Pirate Tri-fold Birthday Card - Arrrrrggg! Pirate Tri-fold Birthday Card - Arrrrrggg! Hello me Mateys! 'Tis time for another challenge at My Sheri Crafts! Charlotte challenged us with the theme "Let's Take a Trip". Make a project that has something to do with traveling. It could be a mode of travel, a destination, reason to travel, etc. I decided to get out my Buccaneer cartridge and play with it. I had a lot of fun with this! I just love that ship! "Ahoy, 'tis a birthday boy" is done with the Storybook cartridge, arranged and welded with my Gypsy. To make the letters stand out better, I outlined them with a black marker. The papers are also from the Buccaneer cartridge and are all ink distressed. I like both papers, but that green looks like the deep green sea with big bubbles floating around! I pop dotted the layers on both the ship and the pirate. The card base is a file I created and saved on the Gypsy. It's a really fun card and I love how it turned out! Thanks for stopping by! Please go check out all the great inspiration from the other team members and then join in the fun! Arrrrgggg! Sarah, you nailed this theme with your fabulous 3D tri-fold birthday card -- sure to delight some little (or big) pirate on their special day!!! This is adorable, Sarah. I love that pirate ship, too. This is a fabulous card. Great card. You should put it into the pirate challenge over at Lollipop Ladies which finishes very soon. What a fantastic card! Great design and your attention to details are fab. You're right, this is a really fun card!! I don't think I would attempt something like this, but it looks fabulous! I love how the different flaps flip out and reveal even more surprises. Thanks for playing with the Lollipop Ladies. This is a great card. So fun love how the ship and pirate pop out. Thanks for sharing with us at the Lollipop Ladies. What a beautiful card you have created. I love the Buccaneer cartridge. I think it is one of my favorites for the imagine. Thank you for playing along with us at CCCB. It's a fabulous card and such fun! Is that a sunken pirate ship as the sea is behind it! LOL If so you have made the twist too - with water! The style of the tri-fold is also fun and would suit the current challenge at Crafting with Dragonflies (unusual fold). Thanks for entering this with us at the Lollipop Ladies! Fabulous 3D design!! thanks so much for sharing with us at Love to Create Challenges! This is such a FAB card. Just love love it. Great to see you here - thanks for joining in the fun at Crafting with Dragonflies! What a fun card! Thanks for playing at Hiding in my Craft Room! Sarah, this card is loads of fun! There are so many surprises to look at in every page! Oh, how cool is that!!!Thanks so much for participating in our LOVE TO CREATE challenge!! Hope to see you again for our next challenge!! Wonderful job on this! Love it!
from django.http import JsonResponse from data_2015_fall.models import * from neomodel import db class NetworkResponse(): def __init__(self, name): self.name = name self.children = [] def to_dict(self): return { "name": self.name, "children": [c.to_dict() for c in self.children], } def get_citations(name): query = "match (n1)-[:CITED]->(n2) where n1.title='%s' return n2" % name results, meta = db.cypher_query(query) return [a.title for a in [Article.inflate(row[0]) for row in results]] def get_citations_network(request, name): root = NetworkResponse(name) root.children = [NetworkResponse(title) for title in get_citations(name)] for c in root.children: c.children = [NetworkResponse(title) for title in get_citations(c.name)] for c2 in c.children: c2.children = [NetworkResponse(title) for title in get_citations(c2.name)] return JsonResponse(root.to_dict()) def get_authors(name): query = "match (n1)<-[:AUTHORED]-(n2) where n1.title={name} return n2" results, meta = db.cypher_query(query, {"name": name}) return [a.name for a in [Author.inflate(row[0]) for row in results]] def get_papers(name): query = "match (n1)-[:AUTHORED]->(n2) where n1.name={name} return n2" results, meta = db.cypher_query(query, {"name": name}) return [a.title for a in [Article.inflate(row[0]) for row in results]] def get_coauthors(request, name): query = "match (n1)-[:COAUTHORED]->(n2) where n1.name={name} return n2" results, meta = db.cypher_query(query, {"name": name}) return JsonResponse({"authors": [a.toDict() for a in [Author.inflate(row[0]) for row in results]]}) def get_paper_author_network(request, name): root = NetworkResponse(name) root.children = [NetworkResponse(author) for author in get_authors(name)] for author in root.children: author.children = [NetworkResponse(title) for title in get_papers(author.name)] for paper in author.children: paper.children = [NetworkResponse(author) for author in get_authors(paper.name)] return JsonResponse(root.to_dict())
Tickets are $7 for Adults, $6 for Students, Senior Citizens & Teachers. Family Tickets (2 adults, 2 students) are available for $24. A theatre family enlists kids from all over the country to follow the 42nd parallel to 42nd Street for their big shot at stardom. But when the couple’s only daughter leaves a roadside diner in search of a normal life, they must decide what’s more important: the Great White Way or family? Don’t worry, there’s a happy ending, of course, accompanied by great traditional and original songs!
#!/usr/env python class Flyable: def fly(self): pass class Quackable(object): def quack(self): pass class ReadHeadDuckFly(Flyable): def fly(self): print "I am a readheadduck, I can fly" class ReadHeadDuckQack(Quackable): def quack(self): print "I am a readheadduck,Dcuk duck duck..." class Duck(): def swim(self): print "I am a duck,I can swim..." class ReadHeadDuck(Duck): def __init__(self,flyable,quackable): self.f = flyable self.q = quackable def fly(self): return self.f.fly() def quack(self): return self.q.quack() class Mallardduckflyable(Flyable): def fly(self): print "I am a Mallardduck....,I can fly" class MallardduckQuackble(Quackable): def quack(self): print "I am a Mallardduck,Duck.duck..duck.." class Mallardduck(Duck): def __init__(self,flyable,quackable): self.f = flyable self.q = quackable def fly(self): return self.f.fly() def quack(self): return self.q.quack() if __name__ == "__main__": duck = Duck() duck.swim() rhduck = ReadHeadDuck(ReadHeadDuckFly(),ReadHeadDuckQack()) rhduck.fly() rhduck.swim() rhduck.quack() md = Mallardduck(Mallardduckflyable(),MallardduckQuackble()) md.fly() md.quack() md.swim()
Fran Drescher came by to talk to Kelly & Michael. Michael said she “always makes us laugh.” Michael said that they needed extra security for her. She didn’t know why, she’s the most accessible celebrity. She answers everyone’s tweets. She’s addicted to it. She got it two or three years ago. She talks to her fans a lot. Her fans are very creative. The Tonys are Sunday, June 8. Drescher’s presenting. She has the third fitting for her dress tomorrow. She tries not to eat, but she keeps eating anyway. It’s hard to eat healthily when you’re doing a show, but she’s very excited for her dress. It’s an original. They talked about Drescher’s Broadway debut in Cinderella. She pointed out that her role has the biggest speaking part, but the smallest singing part when they approached her and they said, “Yeah, we know.” She thought it was funny though. Kelly was surprised that this is her debut. She’s done off-Broadway though. They’re at the Broadway Theater, which is one of the biggest theaters, now. They also rewrote the story and modernized it. Cinderella is apparently a stronger character and the prince is the one who’s more lost. He’s the one who needs a woman to help him realize who he is. There’s also a revolutionary in the story now fighting for the underdogs in the community. That actually sounds awesome. Drescher said that men come because the women want to go, but everybody leaves saying how much they liked it. It’s also visually stunning. Kelly got to perform with the cast and got to wear one of the costumes a while ago. It was incredible, but it was cumbersome. They have physical therapists come for free every week just because of the costumes. Kelly & Michael: Fran Drescher Taking On Too Much? Drescher said that you have to make sure that you don’t do too much in theater. She missed a line and said it was because her throat was hurting and she thought she would start coughing, but she was really distracted by thinking about her travel plans.
# encoding: utf-8 import datetime from south.db import db from south.v2 import DataMigration from django.contrib.contenttypes import generic from django.db import models class Migration(DataMigration): def get_fields_mapping(self, orm): return {'old-mz-person-original': (orm.Person, 'original_id'), 'old-mz-organisation-original': (orm.Organisation, 'original_id'), 'old-mz-organisation-external': (orm.Organisation, 'external_id'), 'old-mz-place-original': (orm.Place, 'original_id'), 'old-mz-place-external': (orm.Place, 'external_id'), 'old-mz-position-external': (orm.Position, 'external_id'), 'old-mz-position-title-original': (orm.PositionTitle, 'original_id')} def forwards(self, orm): for scheme, model_and_field in self.get_fields_mapping(orm).items(): model, field = model_and_field for o in model.objects.all(): old_value = getattr(o, field) if old_value is None or old_value == '': continue content_type = orm['contenttypes.ContentType'].objects.get(app_label="core", model=model.__name__.lower()) orm.Identifier.objects.create(scheme=scheme, identifier=unicode(old_value), object_id=o.id, content_type=content_type) def backwards(self, orm): # We need the next two lines to make the content_object field # of Identifier work, as suggested here: # http://south.readthedocs.org/en/latest/generics.html gfk = generic.GenericForeignKey() gfk.contribute_to_class(orm.Identifier, 'content_object') fields_mapping = self.get_fields_mapping(orm) for identifier in orm.Identifier.objects.all(): id_to_put_back = identifier.identifier scheme = identifier.scheme if scheme not in fields_mapping: raise RuntimeError, "It's not possible to migrate identifiers of scheme '%s' backwards" % (scheme,) if id_to_put_back is None or id_to_put_back == '': continue model, field = fields_mapping[scheme] setattr(identifier.content_object, field, int(identifier.object_id)) models = { 'contenttypes.contenttype': { 'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'core.alternativepersonname': { 'Meta': {'unique_together': "(('person', 'alternative_name'),)", 'object_name': 'AlternativePersonName'}, 'alternative_name': ('django.db.models.fields.CharField', [], {'max_length': '300'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name_to_use': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'alternative_names'", 'to': "orm['core.Person']"}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'core.contact': { 'Meta': {'object_name': 'Contact'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'kind': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.ContactKind']"}), 'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'source': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '500', 'blank': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'value': ('django.db.models.fields.TextField', [], {}) }, 'core.contactkind': { 'Meta': {'object_name': 'ContactKind'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'core.identifier': { 'Meta': {'unique_together': "(('scheme', 'identifier'),)", 'object_name': 'Identifier'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'identifier': ('django.db.models.fields.CharField', [], {'max_length': '500'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'scheme': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'core.informationsource': { 'Meta': {'object_name': 'InformationSource'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'entered': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'source': ('django.db.models.fields.CharField', [], {'max_length': '500'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'core.organisation': { 'Meta': {'object_name': 'Organisation'}, '_summary_rendered': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'ended': ('django_date_extensions.fields.ApproximateDateField', [], {'max_length': '10', 'blank': 'True'}), 'external_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'kind': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.OrganisationKind']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'original_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'}), 'started': ('django_date_extensions.fields.ApproximateDateField', [], {'max_length': '10', 'blank': 'True'}), 'summary': ('markitup.fields.MarkupField', [], {'default': "''", 'no_rendered_field': 'True', 'blank': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'core.organisationkind': { 'Meta': {'object_name': 'OrganisationKind'}, '_summary_rendered': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'}), 'summary': ('markitup.fields.MarkupField', [], {'default': "''", 'no_rendered_field': 'True', 'blank': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'core.parliamentarysession': { 'Meta': {'object_name': 'ParliamentarySession'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'house': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Organisation']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mapit_generation': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'}), 'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'core.person': { 'Meta': {'object_name': 'Person'}, '_summary_rendered': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'can_be_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date_of_birth': ('django_date_extensions.fields.ApproximateDateField', [], {'max_length': '10', 'blank': 'True'}), 'date_of_death': ('django_date_extensions.fields.ApproximateDateField', [], {'max_length': '10', 'blank': 'True'}), 'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'legal_name': ('django.db.models.fields.CharField', [], {'max_length': '300'}), 'original_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'}), 'summary': ('markitup.fields.MarkupField', [], {'default': "''", 'no_rendered_field': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'core.place': { 'Meta': {'object_name': 'Place'}, '_summary_rendered': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'external_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'kind': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.PlaceKind']"}), 'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}), 'mapit_area': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mapit.Area']", 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'organisation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Organisation']", 'null': 'True', 'blank': 'True'}), 'original_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'parent_place': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_places'", 'null': 'True', 'to': "orm['core.Place']"}), 'parliamentary_session': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.ParliamentarySession']", 'null': 'True'}), 'shape_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}), 'summary': ('markitup.fields.MarkupField', [], {'default': "''", 'no_rendered_field': 'True', 'blank': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'core.placekind': { 'Meta': {'object_name': 'PlaceKind'}, '_summary_rendered': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}), 'plural_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'}), 'summary': ('markitup.fields.MarkupField', [], {'default': "''", 'no_rendered_field': 'True', 'blank': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'core.position': { 'Meta': {'object_name': 'Position'}, 'category': ('django.db.models.fields.CharField', [], {'default': "'other'", 'max_length': '20'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'end_date': ('django_date_extensions.fields.ApproximateDateField', [], {'default': "'future'", 'max_length': '10', 'blank': 'True'}), 'external_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'note': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '300', 'blank': 'True'}), 'organisation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Organisation']", 'null': 'True', 'blank': 'True'}), 'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Person']"}), 'place': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Place']", 'null': 'True', 'blank': 'True'}), 'sorting_end_date': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10'}), 'sorting_end_date_high': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10'}), 'sorting_start_date': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10'}), 'sorting_start_date_high': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10'}), 'start_date': ('django_date_extensions.fields.ApproximateDateField', [], {'max_length': '10', 'blank': 'True'}), 'subtitle': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}), 'title': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.PositionTitle']", 'null': 'True', 'blank': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'core.positiontitle': { 'Meta': {'object_name': 'PositionTitle'}, '_summary_rendered': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}), 'original_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'requires_place': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'}), 'summary': ('markitup.fields.MarkupField', [], {'default': "''", 'no_rendered_field': 'True', 'blank': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'core.slugredirect': { 'Meta': {'unique_together': "(('content_type', 'old_object_slug'),)", 'object_name': 'SlugRedirect'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'new_object_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'old_object_slug': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'images.image': { 'Meta': {'object_name': 'Image'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100'}), 'is_primary': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'source': ('django.db.models.fields.CharField', [], {'max_length': '400'}) }, 'mapit.area': { 'Meta': {'object_name': 'Area'}, 'country': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'areas'", 'null': 'True', 'to': "orm['mapit.Country']"}), 'generation_high': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'final_areas'", 'null': 'True', 'to': "orm['mapit.Generation']"}), 'generation_low': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'new_areas'", 'null': 'True', 'to': "orm['mapit.Generation']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}), 'parent_area': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['mapit.Area']"}), 'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'areas'", 'to': "orm['mapit.Type']"}) }, 'mapit.country': { 'Meta': {'object_name': 'Country'}, 'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '1'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}) }, 'mapit.generation': { 'Meta': {'object_name': 'Generation'}, 'active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'mapit.type': { 'Meta': {'object_name': 'Type'}, 'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3'}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'scorecards.category': { 'Meta': {'object_name': 'Category'}, '_description_rendered': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 7, 18, 10, 59, 1, 621786)', 'auto_now_add': 'True', 'blank': 'True'}), 'description': ('markitup.fields.MarkupField', [], {'no_rendered_field': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}), 'synopsis': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 7, 18, 10, 59, 1, 621832)', 'auto_now': 'True', 'blank': 'True'}) }, 'scorecards.entry': { 'Meta': {'unique_together': "(('content_type', 'object_id', 'category', 'date'),)", 'object_name': 'Entry'}, '_equivalent_remark_rendered': ('django.db.models.fields.TextField', [], {'blank': 'True'}), '_extended_remark_rendered': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['scorecards.Category']"}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 7, 18, 10, 59, 1, 622393)', 'auto_now_add': 'True', 'blank': 'True'}), 'date': ('django.db.models.fields.DateField', [], {}), 'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'disabled_comment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '300', 'blank': 'True'}), 'equivalent_remark': ('markitup.fields.MarkupField', [], {'max_length': '400', 'no_rendered_field': 'True', 'blank': 'True'}), 'extended_remark': ('markitup.fields.MarkupField', [], {'max_length': '1000', 'no_rendered_field': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'remark': ('django.db.models.fields.CharField', [], {'max_length': '150'}), 'score': ('django.db.models.fields.IntegerField', [], {}), 'source_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'source_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 7, 18, 10, 59, 1, 622419)', 'auto_now': 'True', 'blank': 'True'}) } } complete_apps = ['contenttypes', 'core']
These Are The Days - Kyle Norbraten from The Coastal Crew on Vimeo. These are the days, the ones that really matter. Memorable laps with friends, summer time vibes and golden sunsets. Nothing beats time at home, it is paradise to say the least. From dusk to dawn this is how we choose to spend our days.
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2012 Cisco Systems, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Sumit Naiksatam, Cisco Systems, Inc. # import logging from quantum.db import api as db from quantum.openstack.common import importutils from quantum.plugins.cisco.common import cisco_constants as const from quantum.plugins.cisco.common import cisco_credentials_v2 as cred from quantum.plugins.cisco.common import cisco_exceptions as cexc from quantum.plugins.cisco.common import cisco_utils as cutil from quantum.plugins.cisco.db import network_db_v2 as cdb from quantum.plugins.cisco.db import ucs_db_v2 as udb from quantum.plugins.cisco.l2device_plugin_base import L2DevicePluginBase from quantum.plugins.cisco.ucs import cisco_ucs_configuration as conf LOG = logging.getLogger(__name__) class UCSVICPlugin(L2DevicePluginBase): """UCS Device Plugin""" def __init__(self): self._driver = importutils.import_object(conf.UCSM_DRIVER) LOG.debug("Loaded driver %s\n" % conf.UCSM_DRIVER) # TODO (Sumit) Make the counter per UCSM self._port_profile_counter = 0 def get_all_networks(self, tenant_id, **kwargs): """ Returns a dictionary containing all <network_uuid, network_name> for the specified tenant. """ LOG.debug("UCSVICPlugin:get_all_networks() called\n") self._set_ucsm(kwargs[const.DEVICE_IP]) networks_list = db.network_list(tenant_id) new_networks_list = [] for network in networks_list: new_network_dict = cutil.make_net_dict(network[const.UUID], network[const.NETWORKNAME], []) new_networks_list.append(new_network_dict) return new_networks_list def create_network(self, tenant_id, net_name, net_id, vlan_name, vlan_id, **kwargs): """ Creates a new Virtual Network, and assigns it a symbolic name. """ LOG.debug("UCSVICPlugin:create_network() called\n") self._set_ucsm(kwargs[const.DEVICE_IP]) self._driver.create_vlan(vlan_name, str(vlan_id), self._ucsm_ip, self._ucsm_username, self._ucsm_password) ports_on_net = [] new_network_dict = cutil.make_net_dict(net_id, net_name, ports_on_net) return new_network_dict def delete_network(self, tenant_id, net_id, **kwargs): """ Deletes the network with the specified network identifier belonging to the specified tenant. """ LOG.debug("UCSVICPlugin:delete_network() called\n") self._set_ucsm(kwargs[const.DEVICE_IP]) vlan_binding = cdb.get_vlan_binding(net_id) vlan_name = vlan_binding[const.VLANNAME] self._driver.delete_vlan(vlan_name, self._ucsm_ip, self._ucsm_username, self._ucsm_password) #Rohit:passing empty network name, might not need fixing net_dict = cutil.make_net_dict(net_id, "", []) return net_dict def get_network_details(self, tenant_id, net_id, **kwargs): """ Deletes the Virtual Network belonging to a the spec """ LOG.debug("UCSVICPlugin:get_network_details() called\n") self._set_ucsm(kwargs[const.DEVICE_IP]) network = db.network_get(net_id) ports_list = network[const.NETWORKPORTS] ports_on_net = [] for port in ports_list: new_port = cutil.make_port_dict(port[const.UUID], port[const.PORTSTATE], port[const.NETWORKID], port[const.INTERFACEID]) ports_on_net.append(new_port) new_network = cutil.make_net_dict(network[const.UUID], network[const.NETWORKNAME], ports_on_net) return new_network def update_network(self, tenant_id, net_id, **kwargs): """ Updates the symbolic name belonging to a particular Virtual Network. """ LOG.debug("UCSVICPlugin:update_network() called\n") self._set_ucsm(kwargs[const.DEVICE_IP]) network = db.network_get(net_id) net_dict = cutil.make_net_dict(network[const.UUID], network[const.NETWORKNAME], []) return net_dict def get_all_ports(self, tenant_id, net_id, **kwargs): """ Retrieves all port identifiers belonging to the specified Virtual Network. """ LOG.debug("UCSVICPlugin:get_all_ports() called\n") self._set_ucsm(kwargs[const.DEVICE_IP]) network = db.network_get(net_id) ports_list = network[const.NETWORKPORTS] ports_on_net = [] for port in ports_list: port_binding = udb.get_portbinding(port[const.UUID]) ports_on_net.append(port_binding) return ports_on_net def create_port(self, tenant_id, net_id, port_state, port_id, **kwargs): """ Creates a port on the specified Virtual Network. """ LOG.debug("UCSVICPlugin:create_port() called\n") self._set_ucsm(kwargs[const.DEVICE_IP]) qos = None ucs_inventory = kwargs[const.UCS_INVENTORY] least_rsvd_blade_dict = kwargs[const.LEAST_RSVD_BLADE_DICT] chassis_id = least_rsvd_blade_dict[const.LEAST_RSVD_BLADE_CHASSIS] blade_id = least_rsvd_blade_dict[const.LEAST_RSVD_BLADE_ID] blade_data_dict = least_rsvd_blade_dict[const.LEAST_RSVD_BLADE_DATA] new_port_profile = self._create_port_profile(tenant_id, net_id, port_id, conf.DEFAULT_VLAN_NAME, conf.DEFAULT_VLAN_ID) profile_name = new_port_profile[const.PROFILE_NAME] rsvd_nic_dict = ucs_inventory.reserve_blade_interface( self._ucsm_ip, chassis_id, blade_id, blade_data_dict, tenant_id, port_id, profile_name) port_binding = udb.update_portbinding(port_id, portprofile_name=profile_name, vlan_name=conf.DEFAULT_VLAN_NAME, vlan_id=conf.DEFAULT_VLAN_ID, qos=qos) return port_binding def delete_port(self, tenant_id, net_id, port_id, **kwargs): """ Deletes a port on a specified Virtual Network, if the port contains a remote interface attachment, the remote interface should first be un-plugged and then the port can be deleted. """ LOG.debug("UCSVICPlugin:delete_port() called\n") self._set_ucsm(kwargs[const.DEVICE_IP]) ucs_inventory = kwargs[const.UCS_INVENTORY] chassis_id = kwargs[const.CHASSIS_ID] blade_id = kwargs[const.BLADE_ID] interface_dn = kwargs[const.BLADE_INTF_DN] port_binding = udb.get_portbinding(port_id) profile_name = port_binding[const.PORTPROFILENAME] self._delete_port_profile(port_id, profile_name) ucs_inventory.unreserve_blade_interface(self._ucsm_ip, chassis_id, blade_id, interface_dn) return udb.remove_portbinding(port_id) def update_port(self, tenant_id, net_id, port_id, **kwargs): """ Updates the state of a port on the specified Virtual Network. """ LOG.debug("UCSVICPlugin:update_port() called\n") self._set_ucsm(kwargs[const.DEVICE_IP]) pass def get_port_details(self, tenant_id, net_id, port_id, **kwargs): """ This method allows the user to retrieve a remote interface that is attached to this particular port. """ LOG.debug("UCSVICPlugin:get_port_details() called\n") self._set_ucsm(kwargs[const.DEVICE_IP]) port_binding = udb.get_portbinding(port_id) return port_binding def plug_interface(self, tenant_id, net_id, port_id, remote_interface_id, **kwargs): """ Attaches a remote interface to the specified port on the specified Virtual Network. """ LOG.debug("UCSVICPlugin:plug_interface() called\n") self._set_ucsm(kwargs[const.DEVICE_IP]) port_binding = udb.get_portbinding(port_id) profile_name = port_binding[const.PORTPROFILENAME] old_vlan_name = port_binding[const.VLANNAME] new_vlan_name = self._get_vlan_name_for_network(tenant_id, net_id) new_vlan_id = self._get_vlan_id_for_network(tenant_id, net_id) self._driver.change_vlan_in_profile(profile_name, old_vlan_name, new_vlan_name, self._ucsm_ip, self._ucsm_username, self._ucsm_password) return udb.update_portbinding(port_id, vlan_name=new_vlan_name, vlan_id=new_vlan_id) def unplug_interface(self, tenant_id, net_id, port_id, **kwargs): """ Detaches a remote interface from the specified port on the specified Virtual Network. """ LOG.debug("UCSVICPlugin:unplug_interface() called\n") self._set_ucsm(kwargs[const.DEVICE_IP]) port_binding = udb.get_portbinding(port_id) profile_name = port_binding[const.PORTPROFILENAME] old_vlan_name = port_binding[const.VLANNAME] new_vlan_name = conf.DEFAULT_VLAN_NAME self._driver.change_vlan_in_profile(profile_name, old_vlan_name, new_vlan_name, self._ucsm_ip, self._ucsm_username, self._ucsm_password) return udb.update_portbinding(port_id, vlan_name=new_vlan_name, vlan_id=conf.DEFAULT_VLAN_ID) def create_multiport(self, tenant_id, net_id_list, ports_num, port_id_list, **kwargs): """ Creates a port on the specified Virtual Network. """ LOG.debug("UCSVICPlugin:create_multiport() called\n") self._set_ucsm(kwargs[const.DEVICE_IP]) qos = None ucs_inventory = kwargs[const.UCS_INVENTORY] least_rsvd_blade_dict = kwargs[const.LEAST_RSVD_BLADE_DICT] chassis_id = least_rsvd_blade_dict[const.LEAST_RSVD_BLADE_CHASSIS] blade_id = least_rsvd_blade_dict[const.LEAST_RSVD_BLADE_ID] blade_data_dict = least_rsvd_blade_dict[const.LEAST_RSVD_BLADE_DATA] port_binding_list = [] for port_id, net_id in zip(port_id_list, net_id_list): new_port_profile = self._create_port_profile( tenant_id, net_id, port_id, conf.DEFAULT_VLAN_NAME, conf.DEFAULT_VLAN_ID) profile_name = new_port_profile[const.PROFILE_NAME] rsvd_nic_dict = ucs_inventory.reserve_blade_interface( self._ucsm_ip, chassis_id, blade_id, blade_data_dict, tenant_id, port_id, profile_name) port_binding = udb.update_portbinding( port_id, portprofile_name=profile_name, vlan_name=conf.DEFAULT_VLAN_NAME, vlan_id=conf.DEFAULT_VLAN_ID, qos=qos) port_binding_list.append(port_binding) return port_binding_list def detach_port(self, tenant_id, instance_id, instance_desc, **kwargs): """ Remove the association of the VIF with the dynamic vnic """ LOG.debug("detach_port() called\n") port_id = kwargs[const.PORTID] kwargs.pop(const.PORTID) return self.unplug_interface(tenant_id, None, port_id, **kwargs) def _get_profile_name(self, port_id): """Returns the port profile name based on the port UUID""" profile_name = conf.PROFILE_NAME_PREFIX + cutil.get16ByteUUID(port_id) return profile_name def _get_vlan_name_for_network(self, tenant_id, network_id): """Return the VLAN name as set by the L2 network plugin""" vlan_binding = cdb.get_vlan_binding(network_id) return vlan_binding[const.VLANNAME] def _get_vlan_id_for_network(self, tenant_id, network_id): """Return the VLAN id as set by the L2 network plugin""" vlan_binding = cdb.get_vlan_binding(network_id) return vlan_binding[const.VLANID] def _create_port_profile(self, tenant_id, net_id, port_id, vlan_name, vlan_id): """Create port profile in UCSM""" if self._port_profile_counter >= int(conf.MAX_UCSM_PORT_PROFILES): raise cexc.UCSMPortProfileLimit(net_id=net_id, port_id=port_id) profile_name = self._get_profile_name(port_id) self._driver.create_profile(profile_name, vlan_name, self._ucsm_ip, self._ucsm_username, self._ucsm_password) self._port_profile_counter += 1 new_port_profile = {const.PROFILE_NAME: profile_name, const.PROFILE_VLAN_NAME: vlan_name, const.PROFILE_VLAN_ID: vlan_id} return new_port_profile def _delete_port_profile(self, port_id, profile_name): """Delete port profile in UCSM""" self._driver.delete_profile(profile_name, self._ucsm_ip, self._ucsm_username, self._ucsm_password) self._port_profile_counter -= 1 def _set_ucsm(self, ucsm_ip): """Set the UCSM IP, username, and password""" self._ucsm_ip = ucsm_ip self._ucsm_username = cred.Store.get_username(conf.UCSM_IP_ADDRESS) self._ucsm_password = cred.Store.get_password(conf.UCSM_IP_ADDRESS)
Cast Iron Sportsman's Grill. This rugged, charcoal hibachi-style grill is perfect for picnics, tailgating, camping, or patio. The Lodge Cast Iron Sportsman's Grill features a draft door that regulates heat. Coals are accessible behind a flip-down door. Grill has two adjustable heights. MAKE EVERY MEAL A MEMORY. Lodge knows that cooking is about more than just the food; it’s about the memories.
import numpy as np from PyQt4 import QtGui from mtpy.gui.SmartMT.gui.matplotlib_imabedding import MPLCanvas, Cursor from mtpy.gui.SmartMT.ui_asset.groupbox_frequency_period_single import Ui_groupBoxFrequency_pereiod_single class FrequencySingle(QtGui.QGroupBox): """ Frequency selection (single frequency) """ _unit_period = 'second' _unit_frequency = 'Hz' _title_period = 'Period' _title_frequency = 'Frequency' def __init__(self, parent, use_period=False): QtGui.QGroupBox.__init__(self, parent) self._mt_objs = None self.use_period = use_period self.ui = Ui_groupBoxFrequency_pereiod_single() self.ui.setupUi(self) self._histogram = FrequencySingle.FrequencyHistogram() self.set_use_period(self.use_period) # add matplotlib canvas self.ui.verticalLayoutFrequencyPeriod.addWidget(self._histogram) # connect components self.ui.comboBoxPeriod.currentIndexChanged.connect(self.update_histogram) self.ui.comboBoxPeriod.editTextChanged.connect(self.update_histogram) self._histogram.mpl_connect('button_release_event', self._mouse_pick) def toggle_time_scale(self, *args): self.use_period = not self.use_period self.set_use_period(self.use_period) def set_use_period(self, use_period=False): if use_period: self._histogram.set_unit(self._unit_period) self._histogram.set_title(self._title_period) title = '%s (%s)' % (self._title_period, self._unit_period) else: self._histogram.set_unit(self._unit_frequency) self._histogram.set_title(self._title_frequency) title = '%s (%s)' % (self._title_frequency, self._unit_frequency) self.setTitle(title) self._update_frequency() def _mouse_pick(self, event): if not event.inaxes: return x = event.xdata self.ui.comboBoxPeriod.setEditText("%.5f" % x) def get_frequency(self): return float(self.ui.comboBoxPeriod.currentText()) def update_histogram(self): value = float(self.ui.comboBoxPeriod.currentText()) self._histogram.set_current_frequency(value) def set_data(self, mt_objs): self._mt_objs = mt_objs self._update_frequency() def _update_frequency(self): if self._mt_objs is not None: all_freqs = [] for mt_obj in self._mt_objs: all_freqs.extend(list(mt_obj.Z.freq)) if self.use_period: all_periods = 1.0 / np.array(all_freqs) # self._histogram.set_data(all_periods) all_unique = sorted(list(set(all_periods))) else: # self._histogram.set_data(all_freqs) all_unique = sorted(list(set(all_freqs))) self._histogram.set_data(all_unique) self._histogram.update_figure() # sort all frequencies in ascending order for period in all_unique: self.ui.comboBoxPeriod.addItem("%.5f" % period) self.ui.comboBoxPeriod.setCurrentIndex(0) self.update_histogram() class FrequencyHistogram(MPLCanvas): def __init__(self, parent=None, width=5, height=2, dpi=100): self.artists = dict() self._frequency = None self._current_frequency = None self._title = None self._unit = None MPLCanvas.__init__(self, parent, width, height, dpi) self._lx = None self.cursor = None # self.mpl_connect('motion_notify_event', self.cursor) self.mpl_connect('button_release_event', self.mouse_pick) self.setMinimumSize(200, 150) self.resize(self.sizeHint()) # def mouse_move(self, event): # if not event.inaxes: # return # x = event.xdata # y = event.ydata # if self._cursor_x is None: # self._cursor_x = self._axes.axvline(linewidth=1, color="green") # if self._cursor_text is None: # self._cursor_text = self._axes.text(0.0, 0.0, '', fontsize=8) # self._cursor_x.set_xdata(x) # self._cursor_text.set_text('period=%.2f' % x) # self._cursor_text.set_position((x, y)) # self.draw() def set_title(self, title): self._title = title def set_unit(self, unit): if unit != self._unit: self._unit = unit self.cursor = Cursor(self._axes, track_y=False, text_format="%f " + self._unit, useblit=True) def mouse_pick(self, event): if not event.inaxes: return x = event.xdata self.set_current_frequency(x) def compute_initial_figure(self): if self._frequency is not None: self._axes.tick_params(axis='both', which='major', labelsize=6) self._axes.tick_params(axis='both', which='minor', labelsize=4) self._axes.hist(self._frequency) # , 50, normed=1) if self._title and self._unit: self._axes.set_xlabel("%s (%s)" % (self._title, self._unit), fontsize=8) self.figure.suptitle('%s Distribution in Selected Stations' % self._title, fontsize=8) self._fig.set_tight_layout(True) def set_data(self, frequency): self._frequency = frequency self._lx = None self._current_frequency = None def set_current_frequency(self, freq): self._current_frequency = freq if self._lx is None: self._lx = self._axes.axvline(linewidth=2, color="red") self._lx.set_xdata(self._current_frequency) # if self._fig.canvas.supports_blit: # self._axes.draw_artist(self._lx) # self._fig.canvas.blit(self._axes.bbox) # else: # self._fig.canvas.draw_idle() self._fig.canvas.draw_idle() def update_figure(self): # clear figure self._axes.cla() self.compute_initial_figure() self.draw()
“We’re seeing almost an explosion of digitally printed folding cartons. For a company like ours which offers stiff paperboard of the highest quality this is a terrific development,” comments Fredrik Lisinski, who is responsible for developing Iggesund Paperboard’s sales to the digital print market. Strong brands led by Coca Cola have shown that the intelligent personalisation or regionalisation of packaging and labels can drive sales. Presses designed for higher grammages as well as new finishing equipment are opening up new possibilities. At the same time, larger sheet formats are paving the way for better economies of scale for digital printing. Invercote, which is Iggesund’s flagship product, has paved the way for digital printing on thicker paper materials since the technology was introduced in 1993. Today’s Invercote G is certified for a variety of digital print technologies and is recognised as the market leader by digital printers when they need a thicker or stiffer material than usual. The rapid development of digital presses and increasingly widespread interest in using digital print on packaging led Iggesund to take the next step with Invercote.
#! /usr/bin/env python import genpy import rospy from std_msgs.msg import String import roslib import roslib.message def sender(msg,reciver, msg_type): #reciver -> String #msg -> obj #rospy.loginfo("sender " + reciver) msg_class = roslib.message.get_message_class(msg_type) pub = rospy.Publisher(reciver, msg_class , queue_size=0) #rospy.loginfo(msg) rate = rospy.Rate(10) x=1 while x<2 : pub.publish(msg) rate.sleep() x=x+1 #rospy.loginfo("sended ") #rospy.spin() def sender1(msg, reciver, msg_type): #reciver -> String #msg -> obj #rospy.init_node('sender', anonymous=True) #rospy.loginfo("sender " + reciver) msg_class = roslib.message.get_message_class(msg_type) pub = rospy.Publisher(reciver, msg_class , queue_size=0) #rospy.loginfo(msg) # type-case using YAML try: pub_args = [] for m in msg: pub_args.append(yaml.load(m)) except Exception as e: parser.error("Argument error: "+str(e)) rate = rospy.Rate(10) msg = msg_class() now = rospy.get_rostime() import std_msgs.msg keys = { 'now': now, 'auto': std_msgs.msg.Header(stamp=now) } genpy.message.fill_message_args(msg, pub_args, keys=keys) x=0 while x<2 : pub.publish(msg) rate.sleep() x=x+1 #rospy.loginfo("sended ") rospy.spin()
The Omnirial75™ by WirEng® is a fully enclosed, sealed, higher gain omnidirectional antenna suitable for a variety of applications. The Omnirial75™ features ultra-high efficiency response on all frequencies between 700MHz and 3800MHz, with the added enhancement of a built-in, large internal ground plane. The Omnirial75™ can be used with a variety of radio equipment systems, including boosters, amplifiers, and repeaters (as the external antenna), and with many other applications in the 700MHz to 3800MHz range. The Omnirial75™ is fully enclosed and all-weather resistant, and is also suitable for the marine environment. The connector type on the Omnirial75™ is F Female. The Omnirial75™ is made in the USA with domestic and imported components and includes a one-year warranty period. Patent pending. The Omnirial75™ is an innovative, omnidirectional, ultra-efficient antenna with built-in ground plane and flatten-toroid radiation pattern. The Omnirial75™ can also be used with other RF devices operating in the 700MHz to 3800MHz range and requiring a nominal impedance of 75 Ohm. The Omnirial75™ operating frequency range is 700-3800 MHz with a peak gain of (varies) dBi*, a horizontal aperture of 360° and a vertical aperture of 65°. The Omnirial75™ nominal impedance is 75 Ohm and the connector is F Female. This is the white version with 75 Ohm impedance (SKU OMR-75-OHM). The Omnirial75™ is mainly recommended for outdoors (pole) applications. *Gain in dBi values are averages increases obtained by comparing the standard/internal device antenna to a properly-installed Omnirial75™ located substantially higher than the device location. The Omnirial75™ measures 8 x 11.5 x 3 inches and weighs 1.7 lbs. Made in the USA with skilled craftsmanship. Patent Pending. One year manufacturer warranty included.
from functools import partial from itertools import izip from csvgb import ( isna, isnum, isexempt, ismissed, isransomed, sum0, drop0, inner0, mean0, K_RANSOMED, K_MISSED, K_EXAM_N, K_HOMEWORK_N, K_QUIZ_N, K_THQ_N, ) def sum_XX_YY(XX, YY, N, s_m_n_f='{}_{:02d}'): s_m = '{}_{:02d}'.format(XX, YY) s_m_o = '{}_override'.format(s_m) s_m_p = '{}_penalty'.format(s_m) s_m_r = '{}_ransom'.format(s_m) s_m_n = [s_m_n_f.format(s_m, n + 1) for n in xrange(N)] def f(row, ALL=[]): r = row.get(s_m_r) if not isna(r): return K_RANSOMED o = row.get(s_m_o) if not isna(o): return o v = sum0([row.get(k) for k in s_m_n]) if isna(v): if s_m in ALL: v = K_MISSED return v p = row.get(s_m_p) if isnum(p): v = max(v - p, 0) return v return s_m, f for m in xrange(K_EXAM_N): s_m, f = sum_XX_YY('exam', m + 1, 12) globals()[s_m] = f for m in xrange(K_QUIZ_N): s_m, f = sum_XX_YY('quiz', m + 1, 3) globals()[s_m] = f for m in xrange(K_THQ_N): s_m, f = sum_XX_YY('thq', m + 1, 3, s_m_n_f='{}_q{}') globals()[s_m] = f def X_all(row, ALL): return [row[k] for k in ALL if not isexempt(row[k])] def X_grade(row, ALL, DROPS, DENOM): m = mean0(drop0(X_all(row, ALL), DROPS)) if not isnum(m): return 'None' return m/DENOM*100.0 homework_grade = partial(X_grade, DENOM=100.0) quiz_grade = partial(X_grade, DENOM=30.0) thq_grade = partial(X_grade, DENOM=30.0) def X_misses_g(row, K): return (k for k in K if ismissed(row[k])) def X_misses_count(row, K): return sum(1 for x in X_misses_g(row, K)) def X_misses_percent(row, K, D): N = len(_all(row, k)) if N <= D: return None M = max(X_misses_count(row, K) - D, 0) return float(M)/N*100.0 def X_ransom_g(row, K): for k in K: s = '{}_ransom'.format(k) if isransomed(row.get(s)): yield k def X_ransom_count(row, K): return sum(1 for x in X_ransom_g(row, K)) LETTER = [ 'F', 'D', 'C', 'B', 'A', ] LETTER_CUTS = [ 60.00, 70.00, 80.00, 90.00, ] LETTER_PM = [ 'F', 'D-', 'D', 'D+', 'C-', 'C', 'C+', 'B-', 'B', 'B+', 'A-', 'A', 'A+', ] LETTER_CUTS_PM = [ 60.00, 63.33, 66.66, 70.00, 73.33, 76.66, 80.00, 83.33, 86.66, 90.00, 93.33, 96.66, ] def letterize(grade, cuts=LETTER_CUTS_PM): if type(cuts) != list: raise Exception("Bad cuts: " + str(cuts)) L = None if len(cuts) == len(LETTER) - 1: L = LETTER elif len(cuts) == len(LETTER_PM) - 1: L = LETTER_PM else: raise Exception("Bad cuts: " + str(cuts)) for c, l in izip(cuts, L): if grade < c: return l return L[-1] _gpa_d = { "A+": 4.000, "A" : 4.000, "A-": 3.670, "B+": 3.330, "B" : 3.000, "B-": 2.670, "C+": 2.330, "C" : 2.000, "C-": 1.670, "D+": 1.330, "D" : 1.000, "D-": 0.670, "F" : 0.000, "NF": 0.000, } def gpaize(grade): v = _gpa_d.get(grade) if v is None: raise Exception("Unknown grade: {}".format(grade)) return v def donothing(row): return None
Get a FREE customised report on Norton Malreward office space, including details about availability and prices. Get a free Norton Malreward office space report, including availability and prices. Register your details to stay up to date on new office spaces in Norton Malreward. Get regular updates and reports on office space in Norton Malreward, as well as contact details for an office expert in Norton Malreward.
#!/usr/bin/env python """ Module containing function related to serialization. """ import json from typing import Any, Dict, Union, cast import numpy as np Serializable = Union[np.ndarray, np.int32, np.int64, np.float32, np.float64, np.float128, set] # A type corresponding to the JSON representation of the object. For a lack of # a better option we use Any JsonRepresentation = Any class NumpyOrSetEncoder(json.JSONEncoder): """ JSON encoder for numpy arrays. Pass this class to json.dumps when converting a dictionary to json so that any field which with a numpy array as value will be properly converted. This encoder will also handle numpy scalars and the native python set types. When you need to convert the json representation back, use the `json_numpy_or_set_obj_hook` function. See Also -------- json_numpy_or_set_obj_hook """ def default(self, obj: Serializable) -> JsonRepresentation: """ If input object is an ndarray it will be converted into a dict holding data, dtype, _is_numpy_array and shape. Parameters ---------- obj : Serializable Returns ------- Serialized Data """ # Case for numpy arrays if isinstance(obj, np.ndarray): return { 'data': obj.tolist(), 'dtype': str(obj.dtype), '_is_numpy_array': True, 'shape': obj.shape } # Case for numpy scalars if isinstance(obj, (np.int32, np.int64)): return int(obj) if isinstance(obj, (np.float32, np.float64, np.float128)): return int(obj) # Case for built-in Python sets if isinstance(obj, set): return {'data': list(obj), '_is_set': True} # If it is not a numpy array we fall back to base class encoder return json.JSONEncoder(self, obj) # type: ignore def json_numpy_or_set_obj_hook( dct: Dict[str, JsonRepresentation]) -> Serializable: """ Decodes a previously encoded numpy array. Parameters ---------- dct : dict The JSON encoded numpy array. Returns ------- np.ndarray | set | dict, optional The decoded numpy array or None if the encoded json data was not an encoded numpy array. See Also -------- NumpyOrSetEncoder """ if isinstance(dct, dict) and '_is_numpy_array' in dct: if dct['_is_numpy_array'] is True: data = dct['data'] return np.array(data) raise ValueError( # pragma: no cover 'Json representation contains the "_is_numpy_array" key ' 'indicating that the object should be a numpy array, but it ' 'was set to False, which is not valid.') if isinstance(dct, dict) and '_is_set' in dct: if dct['_is_set'] is True: data = dct['data'] return set(data) raise ValueError( # pragma: no cover 'Json representation contains the "_is_set" key ' 'indicating that the object should be python set, but it ' 'was set to False, which is not valid.') return dct class JsonSerializable: """ Base class for classes you want to be JSON serializable (convert to/from JSON). You can call the methods `to_json` and `from_json` methods (the later is a staticmethod). Note that a subclass must implement the `_to_dict` and `_from_dict` methods. """ def _to_dict(self) -> Any: """ Convert the object to a dictionary representation. Returns ------- dict The dictionary representation of the object. """ raise NotImplementedError( "Implement in a subclass") # pragma: no cover def to_dict(self) -> Dict[str, Any]: """ Convert the object to a dictionary representation. Returns ------- dict The dictionary representation of the object. """ return cast(Dict[str, Any], self._to_dict()) @staticmethod def _from_dict(d: Any) -> Any: """ Convert from a dictionary to an object. Parameters ---------- d : dict The dictionary representing the object. Returns ------- Result The converted object. """ raise NotImplementedError( "Implement in a subclass") # pragma: no cover @classmethod def from_dict(cls, d: Dict[str, Any]) -> Any: """ Convert from a dictionary to an object. Parameters ---------- d : dict The dictionary representing the Result. Returns ------- Result The converted object. """ return cls._from_dict(d) def to_json(self) -> JsonRepresentation: """ Convert the object to JSON. Returns ------- str JSON representation of the object. """ return json.dumps(self._to_dict(), cls=NumpyOrSetEncoder) @classmethod def from_json(cls, data: JsonRepresentation) -> Any: """ Convert a JSON representation of the object to an actual object. Parameters ---------- data : str The JSON representation of the object. Returns ------- any The actual object """ d = json.loads(data, object_hook=json_numpy_or_set_obj_hook) return cls._from_dict(d) # # xxxxxxxxxx Test and Example Usage xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx # if __name__ == '__main__': # expected = np.arange(100, dtype=np.float) # dumped = json.dumps(expected, cls=NumpyOrSetEncoder) # result = json.loads(dumped, object_hook=json_numpy_or_set_obj_hook) # print(type(result)) # print(result)
Basic physical chemistry for the life sciences / [by] Virginia R. Williams [and] Hulen B. Williams. Mathematical appendix by Bill B. Townsend. By: Williams, Virginia R . Contributor(s): Williams, Hulen B .
# # Collective Knowledge (individual environment - setup) # # See CK LICENSE.txt for licensing details # See CK COPYRIGHT.txt for copyright details # # Developer: Leo Gordon, leo@dividiti.com # ############################################################################## # setup environment setup def setup(i): """ Input: { cfg - meta of this soft entry self_cfg - meta of module soft ck_kernel - import CK kernel module (to reuse functions) host_os_uoa - host OS UOA host_os_uid - host OS UID host_os_dict - host OS meta target_os_uoa - target OS UOA target_os_uid - target OS UID target_os_dict - target OS meta target_device_id - target device ID (if via ADB) tags - list of tags used to search this entry env - updated environment vars from meta customize - updated customize vars from meta deps - resolved dependencies for this soft interactive - if 'yes', can ask questions, otherwise quiet } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 bat - prepared string for bat file } """ import os from math import sqrt ck_kernel = i.get('ck_kernel') cus = i.get('customize',{}) full_path = cus.get('full_path','') env_prefix = cus.get('env_prefix','') install_env = cus.get('install_env', {}) detection_mode = len(install_env) == 0 path_install = full_path if os.path.isdir(full_path) else os.path.dirname(full_path) env = i.get('env', {}) env[env_prefix + '_DIR'] = path_install ## Prepend the hidden variables with env_prefix # for varname in install_env.keys(): if varname.startswith('_'): env[env_prefix + varname] = install_env[varname] return {'return':0, 'bat':''}
Purgatory is a set of cartoons done with Mike Cucka for the the Johns Hopkins Newsletter in 1985. They were fairly esoteric and hence not well-liked by the newsletter editor at the time. That makes them more enjoyable to me. Because the cartoon series was fairly esoteric, we featured a series of thumbnail cartoons which were all pun variations on "rockfish". It was a certain sign that someone didn't understand the cartoons when they commented that they enjoyed the rockfish jokes. This cartoon depicts a Halloween scene. It is particularly treasured because it generated an editorial protest letter due to its religious themes. The primary intent was a commentary on hypocritical behavior. I often think of this one when people refer to the proverbial "carrot and stick". Flies are annoying - why not get some revenge by playing a few jokes on them? I like this one simply because it is absurd. This cartoon explors the question: What would it look like if Rumpelstiltskin had a statue? Why indeed would he have one? Simon crossing the road is an observation of squirrels' tendency toward running in front of cars, as well as human inability to learn from others' mistakes. We always wondered why it had to be that the Chuck Wagon always got away. It doesn't have to be that way. Interestingly, I saw Stephen Wright in concert about 5 years after this cartoon, and he made essentially the same joke (I draw no conclusion from this except that good ideas arise in many places). There's never a shortage of folks willing to decide what's good for you on your behalf. This next cartoon relates to a then-popular series of football advertisements in which a video replay is shown and the viewer is invited to "make the call" prior to hearing the official ruling.
class TypeUnion(dict): def _get(self, k): return super().__getitem__(k) def _set(self, k, v): super().__setitem__(k, v) def representative(self, k): while type(self._get(k)) is not int: k = self._get(k) return k def size(self, x): return self._get(self[x]) def compress(self, start, rep): while start is not rep: tmp = self._get(start) self._set(start, rep) start = tmp def __getitem__(self, key): if key not in self: return key rep = self.representative(key) self.compress(key, rep) return rep def join(self, a, b): for x in a, b: if x not in self: self._set(x, 1) ar, br = self.representative(a), self.representative(b) if ar is br: return av, bv = a.isTypeVariable(), b.isTypeVariable() az, bz = self._get(ar), self._get(br) if bz > az if av == bv else av: self._set(ar, br) self._set(br, az + bz) else: self._set(br, ar) self._set(ar, az + bz) def import_dict(self, d): for k, v in d.items(): self.join(k, v) def __setitem__(*a): raise Exception("Don't do that")
Tthe Mexican Caribbean is habitat to several species of snails. Historically the most abundant was Strombus gigas, commonly called the pink snail. For years, this snail has been harvested for its shells and meat, but is now threatened, and its capture is prohibited until 2018. To help its recovery, Anastazia Teresa Banaszak, a researcher at the Academic Unit of reef systems at Puerto Morelos of the National University of Mexico (UNAM), has studied the species since 2009. Shee found that a microalgae improves the survival rate of farmed pink snail larvae. This represents an alternative for production and conservation. Banaszak explained that they already knew of a microalgae in the stomach of the snail. "With that background, we wanted to find out when, exactly, the symbiosis occurs, and thus determines if it influences the survival and growth of the snail." The microalgae measures approximately 10 microns (0.01 millimeters) and performs photosynthesis; it genetically mutates the host products, providing a significant amount of food. Most importantly, the snail has a complex life cycle. The female lays an egg mass where the embryos develop and hatch as larvae within five days. At that time, the snail goes into planktonic phase and swims in the sea. "Our research involved taking samples of microalgae from adult snails and supplying it to larvae at different ages. We found that the best time to apply the algae is 48 hours after hatching, as this is when it can establish the symbiosis. We achieved up to 90 percent survival, as opposed to development without microalgae, which produced less than 1 percent survival rate," said the doctor in aquatic biology. The aim is to perfect the technique of cultivation, execute it at a large scale at low cost and with high efficiency, for industry to benefit from its production and sales, and thus preserve the species. "The most important thing is that we are working to preserve the species and find a more efficient alternative for pink snail farming as a source of income," concluded Dr. Anastazia Banaszak.
from django.db import models from django.utils import timezone import pytz import datetime def hash(n): n = int(n) return ((0x0000FFFF & n)<<16) + ((0xFFFF0000 & n)>>16) class EventInstance(object): def __init__(self, event, event_time, date): self.date = date.date() self.time = date.time() self.event= event self.event_time = event_time self.attending = Signup.objects.filter(event=event, date=self.date, status=Signup.ATTENDING) self.not_attending = Signup.objects.filter(event=event, date=self.date, status=Signup.NOT_ATTENDING) def get_date_id(self): return "%4d_%02d_%02d" % (self.date.year, self.date.month, self.date.day) class Event(models.Model): name = models.CharField(max_length=100) timezone = models.CharField(max_length=50, choices=[(x,x) for x in pytz.all_timezones ], default="US/Mountain") description = models.TextField() location_lat = models.FloatField() location_lon = models.FloatField() addr = models.CharField(max_length=200) city = models.CharField(max_length=100) state = models.CharField(max_length=5) zip = models.CharField(max_length=20) contact_emails = models.CharField(max_length=500, help_text='Comma separated list of email addresses') def __unicode__(self): return self.name def get_next(self): timezone.activate(pytz.timezone(self.timezone)) now = timezone.now().date() events = [ EventInstance(self, d, d.get_next(now)) for d in self.times.all() ] events.sort(key=lambda x:x.date) return events class EventTime(models.Model): DAY_CHOICES = ( (0, "Monday", ), (1, "Tuesday", ), (2, "Wednesday",), (3, "Thursday", ), (4, "Friday", ), (5, "Saturday", ), (6, "Sunday", ), ) event= models.ForeignKey(Event, related_name="times") day = models.IntegerField(choices=DAY_CHOICES) time = models.TimeField() def get_next(self, now): dow = now.weekday() td = datetime.timedelta(days=(self.day - dow) % 7) next_date = now + td return datetime.datetime.combine(next_date, self.time) class Signup(models.Model): ATTENDING = 0 NOT_ATTENDING = 1 status_choices = ( ( ATTENDING , "I'm In", ), ( NOT_ATTENDING, "I'm Out", ), ) event = models.ForeignKey(Event, related_name="signups") date = models.DateField() name = models.CharField(max_length=100) status= models.IntegerField(choices=status_choices, blank=False, default=ATTENDING) def hash(self): return hash(self.pk) class Comment(models.Model): class Meta: ordering = ["-timestamp"] event = models.ForeignKey(Event, related_name="comments") name = models.CharField(max_length=100) comment = models.TextField() timestamp = models.DateTimeField(auto_now_add=True)
The women of Wisconsin have apparently forgotten the story of the Glastonbury sisters [Julia & Abby Smith], two ladies with dauntless tongue the little tyrants and tax-gatherers of their fields withstood up in Connecticut some thirty years ago. You do not let us vote, therefore we will pay no taxes, they cried to the tax man; taxation without representation is unconstitutional and wicked. Get thee hence. The woman suffragists of Wisconsin announce their intention to play the game in that way. They have formed a league, they will take a census of the women taxpayers, the list of names will be published and used as a basis of a “protest to the Legislature against taxation without representation.” Later, when 10,000 names have been secured to a pledge, the women will refuse to pay taxes, and the questions involved will be taken to the courts. This course, it has been reported, has been approved by prominent women lawyers of Wisconsin interested in the women’s suffrage movement. This is about the worst thing that could be said about the women lawyers of Wisconsin. Instead of encouraging the rebellion of their sisters they should advise them that they have got the thing all wrong. They should tell them that the suffrage is not a right, but a privilege, which Legislatures may withhold or confer in their discretion; that the handy phrase, taxation without representation, refers to communities, colonies, or subject States, and has no bearing upon the case of an individual. If the women property owners of Wisconsin, after their property has been duly assessed and tax bills have been presented, defiantly and seditiously refuse to pay the same, the State will in the most matter-of-fact way levy upon their property and sell it for taxes, just as if the owner were a mere man and voter. The Glastonbury sisters up in Connecticut were baffled and routed in their attempt to withhold taxes in order to extort from an unwilling State the privilege of the vote. That is what will happen in Wisconsin, the women lawyers to the contrary notwithstanding. A suffrage referendum failed in Wisconsin in 1912 , but the state was the first in the union to ratify the 19th Amendment granting women the “privilege” to vote in 1920 . We, the tax paying women of Wisconsin, hereby agree to do what we can by protest and argument to emphasize the fact that taxation without representation is tyranny as much for American women today as it was for American colonists in 1778 . And we also pledge ourselves that when 5,000 or more women in Wisconsin shall have similarly enrolled we will simultaneously take action by whatever method may seem best in accordance with official advice from the Wisconsin Suffrage Association to the end that public attention may be thoroughly and effectively called to the injustice and injury done to women by taxing them without giving them any voice as to how their money should be employed. Though there was also talk of a 10,000-signer threshold and of tax resistance as the explicit pre-declared strategy, so perhaps there was a second pledge I haven’t located yet. Dr. Aleck Dodd, discharged Friday as head of pastoral relations of the Toledo Council of Churches because of his refusal to pay federal income taxes in full, will remain in the city and continue his counseling work, he announced today . He was dismissed after the Internal Revenue Bureau had filed a tax lien for $150.47 in unpaid income tax for 1948 against him. Dr. Dodd said he was refusing to pay that part of his income tax as a protest against the country’s military expenditures. In a statement to The Blade today , Dr. Dodd said he and his wife regretted the position in which his action had placed the Council of Churches. “In this matter, I acted as an individual and on the basis of my obedience to the will of God as I understand it,” he said. Dr. Dodd said that the transfer of his property at 727 Grove Place was made to his wife by him in order to “protect my family from the possible results of my action, and not to evade the collection of my tax by due process of law. While I could not voluntarily pay this amount, $150.47, I made no attempt to prevent it being taken from me. The Rev. J. Kenneth Cutler, president of the Toledo Council of Churches, commented on the Dodd case before his congregation in Rosewood Presbyterian Church yesterday. “The Dr. Dodd case is the climax of a long series of things which have caused the confidence of many ministers and laymen in the council to be shaken,” he said. “In the last four and a half years, six major employees of the council have resigned, some of them voluntarily but some of them were practically forced to resign and those within the inner circle of the council know this to be true. Such a turnover is not healthy to any organization. Furthermore, the control and red tape of the council has been stifling to the free and democratic spirit of the Protestant churches. “The by-laws of the council have supposedly been under study for the past two annual meetings but have never been reported back for adoption by the official body of the council. “Because of my deep personal concern, and the concern of many others, to have a stronger and more effective Council of Churches in Toledo whose voice will be listened to and respected, I am willing to risk my future as a pastor in this city to see that this is brought about or to fail in doing it. The Internal Revenue Bureau yesterday in County Recorder’s office filed a tax lien for $150.47, representing unpaid income tax for 1948 , against the Rev. Aleck D. Dodd, 727 Grove Place, head of the Toledo Council of Churches pastoral services. He is one of 43 pacifists who last March declared they would refuse to pay all or a part of their income taxes this year as a protest against the nation’s military expenditures. Mr. Dodd said today the $150.47 represents that part of his income tax which he believes would be used for war purposes. He added he did not know how the Government intends to collect as all of his property is now in his wife’s name. County Recorder’s records show that on Feb. 15 parts of Lots 208 and 209, Scottwood Addition, were transferred by Mr. Dodd to his wife, Ruth. Mr. Dodd has been associated with the church council for five years.
# -*- coding: utf-8 -*- from django.conf import settings # modify reversions to match our needs if required... def reversion_register(model_class, fields=None, follow=(), format="json", exclude_fields=None): """CMS interface to reversion api - helper function. Registers model for reversion only if reversion is available. Auto excludes publisher fields. """ if not 'reversion' in settings.INSTALLED_APPS: return if fields and exclude_fields: raise ValueError("Just one of fields, exclude_fields arguments can be passed.") opts = model_class._meta local_fields = opts.local_fields + opts.local_many_to_many if fields is None: fields = [field.name for field in local_fields] exclude_fields = exclude_fields or [] fields = filter(lambda name: not name in exclude_fields, fields) from cms.utils import reversion_hacks reversion_hacks.register_draft_only(model_class, fields, follow, format) def make_revision_with_plugins(obj, user=None, message=None): from cms.models.pluginmodel import CMSPlugin import reversion """ Only add to revision if it is a draft. """ revision_manager = reversion.revision cls = obj.__class__ if cls in revision_manager._registry: placeholder_relation = find_placeholder_relation(obj) if revision_manager.is_active(): # add toplevel object to the revision revision_manager.add(obj) # add plugins and subclasses to the revision filters = {'placeholder__%s' % placeholder_relation: obj} for plugin in CMSPlugin.objects.filter(**filters): plugin_instance, admin = plugin.get_plugin_instance() if plugin_instance: revision_manager.add(plugin_instance) revision_manager.add(plugin) def find_placeholder_relation(obj): return 'page'
idebate2 has not yet been in a debate. Post a comment to idebate2's profile. idebate2 does not have any Debate.org friends. idebate2 has not added any photo albums. If you are logged in, you will also see green or red bullets next to each issue, which indicate whether you agree or disagree with idebate2 on that particular issue. You can also click each issue to find other members that agree with idebate2's position on the issue.
#!/usr/bin/env python3 from gi.repository import GObject import dbus import dbus.service import dbus.glib class SampleObject(dbus.service.Object): def __init__(self, bus_name, object_path="/"): dbus.service.Object.__init__(self, bus_name, object_path) @dbus.service.method("net.lizenn.dbus.SampleInterface", in_signature='s', out_signature='as') def HelloWorld(self, hello_message): print (str(hello_message)) self.SampleSignal(42, 24) self.SampleSignal2() return ["Hello World", " from example-service.py"] @dbus.service.method("net.lizenn.dbus.SampleInterface", out_signature='as') def GetTuple(self): return ("Hello Tuple", " from example-service.py") @dbus.service.method("net.lizenn.dbus.SampleInterface") def GetDict(self): return {"first": "Hello Dict", "second": " from example-service.py"} @dbus.service.method("net.lizenn.dbus.SampleInterface", in_signature='u') def GetString(self, size): s = "" i = size while (i > 0): s += "x" i -= 1 return s @dbus.service.signal("net.lizenn.dbus.SampleInterface") def SampleSignal(self, x, y): print("SampleSignal") @dbus.service.signal("net.lizenn.dbus.SampleInterface") def SampleSignal2(self): print("SampleSignal2") session_bus = dbus.SessionBus() service = dbus.service.BusName("net.lizenn.dbus.SampleService", bus=session_bus) SampleObject(service, object_path="/root") SampleObject(service, object_path="/root/child1") SampleObject(service, object_path="/root/child2/little1") SampleObject(service, object_path="/root/child2/little2") mainloop = GObject.MainLoop() mainloop.run()
“PNRPU Bulletin. Urban development”- has been published since 2011 and the “PNRPU Bulletin, Applied ecology. Urban development” has been published since 2014, and is a periodical published scientific peer-reviewed journal. The Journal “PNRPU Bulletin, Applied ecology. Urban development” is registered by the federal service for supervision of communications, information technology, and mass media. Certificate No. FS 77-59186 of September 3, 2014. The Journal is published by Perm National Research Polytechnic University, Perm, Russian Federation is involved in the Russian Science Citation Index project. On the web http://elibrary.ru one can search the journal according to its full title- the bulletin of the perm national research polytechnic university. Applied ecology. Urban development. The journal covers environmental aspects of the urbanistics as well as the urban planning, and architectural engineering which are interrelated with the technosphere safety problems, urban city feel and social development processes. The journal is for the lecturers who are teaching such disciplines as civil engineering, environmental management, social and economics ones as well as for the young researchers and students. The journal is focused on the wide variety of specialists who are involved in such areas as planning the settlements, municipal facilities, urban planning environment and economics, social and economic activities.
import os import xsms.util as util from xsms.command import Command from xsms.config import conf root_dir = os.path.dirname(os.path.abspath(__file__)) def test_command_object(): command = Command(conf=conf) assert isinstance(command, Command) assert command.conf['xsms_config_root'] == os.path.expanduser('~/.xsms') def test_command_generate_engine_configs(): command = Command(conf=conf) command.generate_engine_configs() assert os.path.exists(conf['supervisor_conf']) def test_command_generate_server_configs(): command = Command(conf=conf) command.generate_server_configs() assert os.path.exists('{}/insta.cfg'.format(conf['xsms_generated_servers_root'])) def test_command_generate_custom_server_configs(): util.check_if_not_create('{}/insta.cfg.tpl'.format(conf['xsms_templates_servers_root']), '{}/data/servers/custom.cfg.tpl'.format(root_dir)) command = Command(conf=conf) command.generate_server_configs() assert os.path.exists('{}/insta.cfg'.format(conf['xsms_generated_servers_root']))
I have very fond memories of my first solo exhibition, held in the Galeria Chavez de la Rosa in Arequipa, southern Peru. Soon to be my friend and mentor, galerist Carlos Ticona, himself a highly accomplished watercolour painter, extended the invitation of featuring me as an international visiting artist. Hereafter I regularly exhibited at the gallery in numerous group exhibitions. Everything must be perfect, and with 30m of canvas to touch up I settled in for the afternoon. It's all going to plan. TV Panamericana returned for the vernissage to film and interview myself and the visitors. This is what 30m of canvas looks like! TV Panamericana came along during the hanging of the exhibit to film a small reportage piece on the exhibition. It took a bit of engineering and ingenuity to get the exhibit to hang straight but we managed! The beautiful white volcanic rock called 'sillar' gives all of Arequipa a very unique look. In this gallery setting its qualities were a particularly striking fit as a backdrop for the exhibit. I was particularly struck by this couple taking in the work with such enthusiasm! In fledgling Spanish I hope to have uttered intelligible thanks and acknowledgements. Off to view the exhibition!
def pytest_configure(): from django.conf import settings settings.configure( DEBUG_PROPAGATE_EXCEPTIONS=True, DATABASES={'default': {'ENGINE': 'django.db.backends.sqlite3', 'NAME': ':memory:'}}, SITE_ID=1, SECRET_KEY='not very secret in tests', USE_I18N=True, USE_L10N=True, STATIC_URL='/static/', ROOT_URLCONF='tests.urls', TEMPLATES=[ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ # insert your TEMPLATE_DIRS here ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ # Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this # list if you haven't customized them: 'django.contrib.auth.context_processors.auth', 'django.template.context_processors.debug', 'django.template.context_processors.i18n', 'django.template.context_processors.media', 'django.template.context_processors.static', 'django.template.context_processors.tz', 'django.contrib.messages.context_processors.messages', ], }, } ], MIDDLEWARE=( 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ), INSTALLED_APPS=( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'wagtail.contrib.forms', 'wagtail.contrib.redirects', 'wagtail.embeds', 'wagtail.sites', 'wagtail.users', 'wagtail.snippets', 'wagtail.documents', 'wagtail.images', 'wagtail.search', 'wagtail.admin', 'wagtail.core', 'wagtailmodelchooser', 'modelcluster', 'taggit', 'tests', ), PASSWORD_HASHERS=( 'django.contrib.auth.hashers.SHA1PasswordHasher', 'django.contrib.auth.hashers.PBKDF2PasswordHasher', 'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher', 'django.contrib.auth.hashers.BCryptPasswordHasher', 'django.contrib.auth.hashers.MD5PasswordHasher', 'django.contrib.auth.hashers.CryptPasswordHasher', ), ) try: import django django.setup() except AttributeError: pass
Sign Up To Our Newsletter. Ibanez SR505 SR Series 5-String Electric Bass Guitar Brown w/Stand, Tuner Πck. This bundle includes: (1) Ibanez SR505 SR Series 5-String Electric Bass Guitar (Brown Mahogany) (1) Front Row TU-01 Clip-on Chromatic Guitar Tuner (1) Front Row GS-01 Folding Metal Guitar Stand (1) Front Row Pick Sampler. The SR series SR505 from Ibanez is a 5-string electric bass guitar featuring a slim-neck design, a mahogany body, and a brown mahogany finish. The deep cutaway body allows for unencumbered access to the upper regions of the fingerboard, ensuring solid playability. The SR5 5pc neck couples jatoba with. To create stability and strength in a thin design. The warm and clear Bartolini MK-1 split-coil pickups offer a tight bottom end and a wide frequency range with enhanced definition, while the 3-band active EQ provides further tone shaping with dedicated bass, treble, and mid controls. Additionally, a 3-way mid frequency switch (250 Hz, 450 Hz, or 700 Hz) further expands the mid-range shaping capabilities for a more dynamic and ideal tone. Front Row tuner is designed to be easily seen on stage. With the intelligent robot formative design and bright. LCD, this tuner will definitely be a hit with the modern musician. 360-degree rotatable clip can adjust at your suitable angle, and is easy to clip onto your instrument. Operation makes it very simple to use. Fantastic music accessory with great accuracy, precision. Front Row guitar stand combines durability and stability. This A Framed universal guitar stand folds for easy storage or travel and is made from a lightweight and durable metal. The stand features soft, foam padding along the yoke and on the back and foam rests to protect your guitar from scratches. The non-slip rubber feet ensure stability on any bare-floor surface, plus protect floors from getting scuffed when moving or adjusting the stand. Front Row custom picks are highly durable with. Memory and depending on which gauge, just the right amount of flexibility. Whether you're playing electric leads or acoustic chords our pick sampler will provide you with the perfect pick. Comes in three different sizes. For over 30 years, Front Row Electronics has been dedicated to getting you the absolute highest quality products, and doing so with our renowned friendly customer service. We are proud to be an authorized dealer of every brand name we carry including Bose, Pioneer, Yamaha, Fender, Shure, QSC, Roland, Zoom and many more. For the best seats in the house, shop with Front Row Electronics! FrontRow proudly ships to our Armed Forces APO/FPO customers. It must be sent insured and by a traceable means e. When repacking the product box, avoid using duct tape or other adhesives that can leave a residue. Sometimes things get damaged en route, and if that happens, we want you to be covered. Please let us know how we can make yours a five-star rated one too! Our goal is to make every customer 100% satisfied with their order. If you are not satisfied, please let us know before leaving feedback. This listing is currently undergoing maintenance, we apologise for any inconvenience caused. The item "Ibanez SR505 SR Series 5-String Electric Bass Guitar Brown withStand, Tuner Πck" is in sale since Tuesday, November 13, 2018. This item is in the category "Musical Instruments & Gear\Guitars & Basses\Bass Guitars". The seller is "frontrowelectronics" and is located in New York, New York.
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import unicode_literals from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Template' db.create_table('template_template', ( ('reusableplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')( to=orm['wiki.ReusablePlugin'], unique=True, primary_key=True)), ('current_revision', self.gf('django.db.models.fields.related.OneToOneField')( blank=True, related_name='current_set', unique=True, null=True, to=orm['template.TemplateRevision'])), ('template_title', self.gf('django.db.models.fields.SlugField') (unique=True, max_length=50)), ('extend_to_children', self.gf( 'django.db.models.fields.BooleanField')(default=False)), )) db.send_create_signal('template', ['Template']) # Adding model 'TemplateRevision' db.create_table('template_templaterevision', ( ('id', self.gf('django.db.models.fields.AutoField') (primary_key=True)), ('revision_number', self.gf( 'django.db.models.fields.IntegerField')()), ('user_message', self.gf( 'django.db.models.fields.TextField')(blank=True)), ('automatic_log', self.gf( 'django.db.models.fields.TextField')(blank=True)), ('ip_address', self.gf('django.db.models.fields.IPAddressField') (max_length=15, null=True, blank=True)), ('user', self.gf('django.db.models.fields.related.ForeignKey')( to=orm['auth.User'], null=True, on_delete=models.SET_NULL, blank=True)), ('modified', self.gf('django.db.models.fields.DateTimeField') (auto_now=True, blank=True)), ('created', self.gf('django.db.models.fields.DateTimeField') (auto_now_add=True, blank=True)), ('previous_revision', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[ 'template.TemplateRevision'], null=True, on_delete=models.SET_NULL, blank=True)), ('deleted', self.gf('django.db.models.fields.BooleanField') (default=False)), ('locked', self.gf('django.db.models.fields.BooleanField') (default=False)), ('template', self.gf('django.db.models.fields.related.ForeignKey') (to=orm['template.Template'])), ('template_content', self.gf( 'django.db.models.fields.TextField')(blank=True)), ('description', self.gf( 'django.db.models.fields.TextField')(blank=True)), )) db.send_create_signal('template', ['TemplateRevision']) def backwards(self, orm): # Deleting model 'Template' db.delete_table('template_template') # Deleting model 'TemplateRevision' db.delete_table('template_templaterevision') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'user_set'", 'blank': 'True', 'to': "orm['auth.Group']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'user_set'", 'blank': 'True', 'to': "orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'template.template': { 'Meta': {'object_name': 'Template', '_ormbases': ['wiki.ReusablePlugin']}, 'current_revision': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'current_set'", 'unique': 'True', 'null': 'True', 'to': "orm['template.TemplateRevision']"}), 'reusableplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['wiki.ReusablePlugin']", 'unique': 'True', 'primary_key': 'True'}), 'template_title': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}), 'extend_to_children': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), }, 'template.templaterevision': { 'Meta': {'ordering': "('created',)", 'object_name': 'TemplateRevision'}, 'automatic_log': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}), 'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'previous_revision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['template.TemplateRevision']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'revision_number': ('django.db.models.fields.IntegerField', [], {}), 'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['template.Template']"}), 'template_content': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'user_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}) }, 'wiki.article': { 'Meta': {'object_name': 'Article'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'current_revision': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'current_set'", 'unique': 'True', 'null': 'True', 'to': "orm['wiki.ArticleRevision']"}), 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'group_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'group_write': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'other_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'other_write': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_articles'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}) }, 'wiki.articleplugin': { 'Meta': {'object_name': 'ArticlePlugin'}, 'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Article']"}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'wiki.articlerevision': { 'Meta': {'ordering': "('created',)", 'unique_together': "(('article', 'revision_number'),)", 'object_name': 'ArticleRevision'}, 'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Article']"}), 'automatic_log': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}), 'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'previous_revision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.ArticleRevision']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'revision_number': ('django.db.models.fields.IntegerField', [], {}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '512'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'user_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}) }, 'wiki.reusableplugin': { 'Meta': {'object_name': 'ReusablePlugin', '_ormbases': ['wiki.ArticlePlugin']}, 'articleplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['wiki.ArticlePlugin']", 'unique': 'True', 'primary_key': 'True'}), 'articles': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'shared_plugins_set'", 'symmetrical': 'False', 'to': "orm['wiki.Article']"}) } } complete_apps = ['template']
NEW DELHI: In one of the most dramatic developments in the recent past, the Tata Sons on Monday dumped its chairman Cyrus Mistry with immediate effect and made Ratan Tata the interim chairman. Tata will head the group for the next four months, till a search committee finds a new successor. Mistry's journey with the Tata group has been a short one, with both his entry and exit being a surprise move. When he was inducted as the chairman, the reactions were as surprising as they are today on his sudden exit. The reason being that Mistry himself was part of the panel that was tasked with finding a successor to the then chairman Ratan Tata. And now, his four-year tenure has been cut short abruptly. His tenure as the chairman of the Tata Sons has been the shortest at the conglomerate. Mistry, the only second non-Tata to take charge of the group after Nowroji Saklatvala in 1932, had to deal with various challenges in both Indian and global markets since he took charge. Speculations are rife about the reasons for his sudden exit. The reasons range from reported differences between the Tata and Mistry to the company's declining performance under him. While no reason was given for Mistry's sudden ouster, a group spokesperson said, "The Tata Sons board in its collective wisdom and on the recommendation of principal shareholder decided that it may be appropriate to consider a change for the long term interest of Tata Sons and the Tata group." Here's a look at the possible factors that may have led to Tata Sons board taking this surprise decision against Cyrus Mistry in 'long-term interest'. There were a few contrasts in the way Tata Sons operated under Mistry when compared to the tenure of his predecessor, Ratan Tata. For example, Mistry undertook a strategy of divesting assets in contrast to what Ratan Tata did during his 21 years of chairmanship. During Ratan Tata's chairmanship, the company was driven by his expansionist strategy that included overseas purchases like tea maker Tetley in 2000 and luxury car company Jaguar Land Rover (JLR) in 2008. Mistry, on the other hand, was more focussed at tackling mounting debt by raising cash, refinancing loans and selling assets after writing them down. Another stark contrast between Ratan Tata and Mistry's tenure is the conglomerate's market cap. During Ratan Tata's tenure, the group's market cap rose from Rs 8,000 crore in 1991 to over Rs 4.62 lakh crore in December 2012. This was a mighty 57-fold increase. On the other hand, Tata Group currently commands a listed market capitalisation of over $125 billion (close to Rs 8.5 lakh crore). This value was nearly Rs 4.6 lakh crore in December 2012 when Mistry took over from Ratan Tata. The means the market cap doubled while Mistry was in-charge. It is also believed that Tata Sons was unhappy with Mistry's approach of shedding non-profit businesses, including the conglomerate's steel business in Europe, and concentrating only on cash cows. Mistry's decision to sell Tata Steel UK in the wake of mounting losses, might also have been a sore spot within the organization. Earlier this year, the company had completed selling of the European long products business that three units the UK as well as a mill in France to Greybull Capital. There are also reports that over the past 6 months, serious differences had emerged between Ratan Tata and Cyrus Mistry.
import os import subprocess import ioutils def ebool(x): if x: return 'T' else: return 'F' class Commander: def __init__(self): self.use_valgrind = False self.timeout = -1 def run_cmd(self, args, read_stdout = True, read_stderr = True): run_dict = { 'args': args } if read_stderr: run_dict['stdout'] = subprocess.PIPE if read_stderr: run_dict['stderr'] = subprocess.PIPE return (res.returncode, res.stdout, res.stderr) ''' Returns tuple (valid, errs, res) valid: bool, true if no errors errs: string errors res: subprocess object ''' def run_test(self, cmd, cwd = None, code = None, env = None, has_stdout = None, stdout = None, stdout_file = None, has_stderr = None, stderr = None, stderr_file = None): if env != None: new_env = os.environ.copy() for key in env.keys(): new_env[key] = env[key] env = new_env res = subprocess.run(cmd, stdout = subprocess.PIPE, stderr = subprocess.PIPE, cwd = cwd, env = env) cmd_code = res.returncode cmd_stdout = res.stdout cmd_stderr = res.stderr errs = [] if code != None and code != cmd_code: errs.append(('CODE', cmd_code, code)) if has_stdout != None and (len(cmd_stdout) == 0) == has_stdout: errs.append(('HAS_STDOUT', ebool(len(cmd_stdout)), ebool(has_stdout))) if stdout != None and cmd_stdout.decode('ascii') != stdout: errs.append(('STDOUT', '.', '.')) if stdout_file != None and not ioutils.file_content_is(cmd_stdout, stdout_file): errs.append(('STDOUT_FILE', '.', '.')) if has_stderr != None and (len(cmd_stderr) == 0) == has_stderr: errs.append(('HAS_STDERR', ebool(len(cmd_stderr)), ebool(has_stderr))) if stderr != None and cmd_stderr.decode('ascii') != stderr: errs.append(('STDERR', '.', '.')) if stderr_file != None and not ioutils.file_content_is(cmd_stderr, stderr_file): errs.append(('STDERR_FILE', '.', '.')) return (len(errs) == 0, errs, res)
In developing a website, links are most commonly understood in their most basic function – they allow a user to get from one page to another. Consequently, website developers will include links to pages in the navigation menu, side widgets, footers, and occasionally within the text as well. But many don’t consider the effects internal linking may have on a page’s search ranking performance. In addition to enabling a user to travel from one page to another, links will also help establish a hierarchy amongst pages, as well as spread link equity – most commonly sourced from the home page – throughout the website. By prioritizing links to one page over a similar page, SEO strategists can help search engines understand which pages are more important, especially when two pages overlap in topic. In the summer of 2017, Today’s Business took on a new medical client based in New Jersey. One of the first issues which needed to be addressed was why a core service page was no longer ranking, while a similar page was ranking but rather poorly. The two pages were very similar; page 2 was the service of page one, but with an additional element. Content, therefore, was very similar, as were header tags and title tags. There was no discernible difference in the quality of the content, nor were there other sites specifically linking to either page. Finally, the url structure was exactly the same, with the exception of a single word. Further investigation of the site’s internal linking structure revealed the issue. Whether intentionally or not, page 2 was being linked to noticeably more. In the chart above, we can see that through the navigation menu and footer, each page was being linked to on every page once. But only page 2 was being linked to by other methods. If fact, page 2 was one of the most linked-to pages on the website. Because of this, page 2 was being raised in the website hierarchy. Note: The rankings above were calculated by averaging out multiple similar variations of the core query, weighting certain query permutations based on search volume. This was a big problem, since page 1’s service was one of the client’s most important. Today’s Business immediately went to work to amend the situation. We increased internal linking throughout the site, adding several thousand links. Pages that were linking to page 2 were replaced with links to page 1. We also highlighted eight core services within the side navigation widget. This linked to page 1, but not page 2. As you can see, while both pages gained links, the number of links to page 1 increased significantly. While this did dissolve some of the link equity for the site, it also increased the contextualization search engines could have for the pages, since the anchor text being utilized was unique for each page. By December, both pages were ranking on the first page of search results back-to-back for most locations in New Jersey. Consequently, we saw a projected click rate of 13.5%, rather than the normal ~ 7 percent often seen for a listing ranking at 6 or 7. The inclusion of both pages was a pleasant surprise, but not out of the realm of reality, since the pages were so similar. When it comes to having core service pages that experience significant overlap, our case study lends credence to the reality that your internal linking strategy matters. At Today’s Business, we consistently strive to let no detail go unnoticed. In this case, it paid off in spades. In part due to our internal linking strategy, our client saw its impressions finally crest the 100,000 impression mark. In a competitive market such as medicine, every facet of every page needs to be assessed and fine-tuned. It doesn’t happen as quickly as some may expect, but following proper SEO practices forms the foundation of a high performing website.
import json from django.core.urlresolvers import reverse from rest_framework.decorators import api_view from rest_framework.response import Response from rest_framework import status from bluebottle.test.utils import BluebottleTestCase from bluebottle.test.factory_models.slides import SlideFactory, \ DraftSlideFactory from bluebottle.test.factory_models.accounts import BlueBottleUserFactory class SlideTestCase(BluebottleTestCase): """ Base class for test cases for ``slide`` module. The testing classes for ``slide`` module related to the API must subclass this. """ def setUp(self): super(SlideTestCase, self).setUp() self.user = BlueBottleUserFactory.create() self.slide1 = SlideFactory.create(author=self.user, language='nl') self.slide2 = SlideFactory.create(author=self.user, language='en') self.slide3 = DraftSlideFactory.create(author=self.user, language='en', ) class SlideListTestCase(SlideTestCase): """ Test case for ``SlideList`` API view. Endpoint: /api/textwallposts/ """ def test_slides_list(self): """ Ensure we return list of published slides list. """ response = self.client.get(reverse('slide_list')) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 2) def test_api_slides_list_filtered(self): """ Ensure slides can be filtered by language """ response = self.client.get(reverse('slide_list'), {'language': 'en'}) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) def test_slides_list_data(self): """ Ensure get request returns record with correct data. """ response = self.client.get(reverse('slide_list'), {'language': 'nl'}) slide = response.data['results'][0] self.assertEqual(slide['title'], self.slide1.title) self.assertEqual(slide['body'], self.slide1.body) self.assertEqual(slide['author'], self.slide1.author.id) self.assertEqual(slide['status'], self.slide1.status) self.assertEqual(slide['sequence'], self.slide1.sequence)
What makes Origin PC's Neuron gaming desktop so special is how customizable it is. The boutique manufacturer will outfit this baby with pretty much anything you want, and for $2,490 (starting at $1,505), you can get our Ryzen 7 2700X and RTX 2080 configuration that delivered powerful graphics and CPU performance. And if you want to customize it yourself in the future, the innards are easily accessible via a magnetic glass door. The Neuron doesn't include any peripherals, and the fans get pretty loud even when the machine isn't performing rigorous tasks. But overall, the Neuron is a solid RTX gaming desktop that you can truly make your own. The Neuron's pearly-white chassis is smooth, but it reminds me of a dentist's office. Thankfully, the PC's design is fully customizable at the point of purchase. The front panel has a glowing Origin logo at the top and a power button at the bottom accompanied by a few select ports. The left and right end of the panel stylishly curve inward to meet the rest of the chassis. The left side has a glass door showcasing the guts of the machine, while the right side features a flat white panel accented with a hexagonal pattern stripped along the side for ventilation. What makes Origin PC's Neuron gaming desktop so special is how customizable it is. The top and bottom panel are covered in the same hexagonal patterns. The bottom holds four bumper magnets, but you can easily move them to the right panel and lay the desktop flat. It was alarming to see the back plating for the ports because it looked so janky and uneven. At 16.5 x 15.3 x 6.8 inches, the Neuron is a decent size compared with the Corsair Vengeance 5180 Gaming PC (15.7 x 10.9 x 13.8 inches) and the MSI Trident X (15.6 x 15.1 x 5.1 inches). The Neuron is chock-full of ports. Its motherboard holds room for four USB 3.1 ports, four USB 2.0 ports, an HDMI port, a DVI port, an RJ45 port and audio ports for analog line level input, analog line level output and microphone input. The graphics port has three DisplayPorts, an HDMI port and one USB Type-C port. There are also two USB 3.1 ports and microphone and headphone inputs on the front panel. You can get into the Neuron through its magnetic glass door on the left-hand side, where you can easily upgrade most of the components, like the CPU, GPU and RAM. You have to take an additional panel off on the bottom (the PSU shroud) to access the drives and power supply (it's locked in with three Phillips head No. 2 screws). The Neuron ran Battlefield V at 85 frames per second at max settings as I blasted Nazis from miles away with my tank shells. Note that the right panel does not come off, so it's a little difficult to manage your cables if you want to customize them yourself. Armed with an Nvidia GeForce RTX 2080 Founder's Edition GPU with 8GB of VRAM, the Neuron ran Battlefield V at about 85 frames per second on Ultra at 1080p with ray-tracing on as I blasted Nazis from miles away with my tank shells. On the Rise of the Tomb Raider benchmark (Very High, 1080p), the Neuron nailed 89 fps, surpassing the 79-fps gaming desktop average. The RTX 2080-powered Vengeance and Trident both did better, at 91 fps each. At 4K, the Neuron pushed 28 fps, which is under the 37-fps category average as well as our minimum playability threshold of 30 fps. It matched the Vengeance (28 fps), but the Trident scraped across to the minimum 30 fps. The Neuron ran the Hitman benchmark (Ultra, 1080p) at 127 fps. And while that climbs over the 103-fps category average, the Vengeance (136 fps) and the Trident (143 fps) produced smoother frame rates. When running the benchmark in 4K, the Neuron hit 77 fps, which falls below the 81-fps category average. On the Grand Theft Auto V benchmark (Very High, 1080p), the Neuron scored 99 fps, defeating the category average (95 fps) and the Trident (89 fps), but coming up short of the Vengeance (110 fps). Bumping the game to 4K, the Neuron matched the Trident at 35 fps and beat the Vengeance's 33 fps, but all of them fell short of the 43-fps category average. The Neuron, along with the Vengeance and Trident, scored a perfect 11 on the SteamVR Performance Test, toppling the 9.5 category average. The Neuron had no problem blazing through 40 Google Chrome tabs and three 1080p YouTube videos while Battlefield V ran in the background, thanks to its AMD Ryzen 7 2700X processor with 16GB of RAM. However, the system's fans produced quite a bit of noise, and would constantly rev up and down while I was gaming and even when performing normal tasks, which was incredibly annoying. On the Geekbench 4.3 overall performance test, the Neuron scored 27,936, sailing past the 22,906 gaming desktop average. The Vengeance's Intel Core i7-8700 (24,600) fell short of the Neuron, while the Trident X’s Intel Core i9-9900K (31,581) left the competition in the dust. The Neuron matched 65,000 names and addresses in 28 seconds on our Excel test, which is a few seconds ahead of the category average (0:32). While the Vengeance (0:33) fell behind, the Trident (0:25) crawled ahead of the Neuron. The Neuron transcoded a 4K video to 1080p in just 5 minutes and 50 seconds, which surpasses the 6:54 category average but not the Trident's speedy 5:38. Origin's 512GB SSD copied 4.97GB of data in just 8 seconds, translating to 636 megabytes per second and destroying the 414 MBps category average. The Vengeance (480GB SSD) and Trident (512GB SSD) both have a transfer rate of 424 MBps. Origin PC offers this rig fully customizable, offering both AMD and Intel motherboard options with a variety of CPU choices for either manufacturer. The Origin PC Neuron that I tested cost $2,490 and comes with an AMD Ryzen 7 2700X processor, an Nvidia GeForce RTX 2080 Founder's Edition GPU with 8GB of VRAM, 16GB of RAM, a 512GB SSD and a 2TB 7,200-rpm HDD. However, for $2,490, I expected included peripherals, even cheap ones. The base model cost $1,505, and comes with an AMD Ryzen 5 2600 processor, a RTX 2060 with 6GB of VRAM, 16GB of RAM, 250GB SSD and a 2TB HDD (with free promotion). Meanwhile, the ultimate version will run you $11,621, which is outfitted with a AMD Ryzen Threadripper 2950X CPU, an RTX Titan GPU with 24GB of VRAM, 64GB of RAM and five 4TB SSDs. There are also options for the GTX 1660 TI, RTX 2060, RTX 2070, RTX 2080 Ti and even dual RTX 2080 or RTX 2080 Tis. The most powerful among them is the RTX Titan GPU, as well as either of the dual cards. The Neuron also offers tons of cosmetic add-ons. You can pay an additional $329 for a metallic print, which comes in six colors, including Atmos Blue, Sabot Grey, Cyborg Green, Eternal Pink, Epic Purple and Horizon Yellow. You also have the option of getting either the $199 HD UV glass print or a $249 HD UV textured print of Origin's stylistic neurons. With the glass print, you can get it customized, but it'll likely cost more. You can combo one of those with $41 RGB lighting. Origin PC includes a one-year limited warranty with the Neuron. The Origin PC Neuron ($2,490 as tested, $1,505 starting) is the pinnacle of gaming PC customizability. On top of that, it offers great overall performance and an easy-to-open panel for future upgrading. But for the price, it should come with some peripherals, and it doesn't help that the fan is loud. For a similar price ($2,499 as tested, $1,999 starting), you can get the MSI Trident X, which offers a slimmer design, included peripherals and performance on a par with the Neuron. But if you want a powerful PC that you can truly make your own with a ton of internal and external customization options, the Origin PC Neuron delivers. With the seemingly endless customization options, the Neuron is a solid choice for those who want full control of what goes in and out of their PC.
#!/usr/bin/env python # -*- coding: utf-8 -*- ############################################################################### # The MIT License # # Copyright (c) 2016 Grigory Chernyshev # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # ############################################################################### import json from yagocd.resources import Base, BaseManager from yagocd.resources.job import JobInstance from yagocd.util import since @since('15.2.0') class AgentManager(BaseManager): """ The agents API allows users with administrator role to manage agents. `Official documentation. <https://api.go.cd/current/#agents>`_ :versionadded: 15.2.0. :warning: Please note that this API requires using v4 of the API using `Accept: application/vnd.go.cd.v4+json` """ RESOURCE_PATH = '{base_api}/agents' ACCEPT_HEADER = 'application/vnd.go.cd.v4+json' VERSION_TO_ACCEPT_HEADER = { '16.1.0': 'application/vnd.go.cd.v1+json', '16.7.0': 'application/vnd.go.cd.v2+json', '16.9.0': 'application/vnd.go.cd.v3+json', } def __iter__(self): """ Method add iterator protocol for the manager. :return: an array of agents. :rtype: list of yagocd.resources.agent.AgentEntity """ return iter(self.list()) def __getitem__(self, uuid): """ Method add possibility to get agent by the uuid using dictionary like syntax. :param uuid: uuid of the agent :return: Agent entity. :rtype: yagocd.resources.agent.AgentEntity """ return self.get(uuid=uuid) def list(self): # noqa """ Lists all available agents, these are agents that are present in the <agents/> tag inside cruise-config.xml and also agents that are in Pending state awaiting registration. :versionadded: 15.2.0. :return: an array of agents. :rtype: list of yagocd.resources.agent.AgentEntity """ response = self._session.get( path=self.RESOURCE_PATH.format(base_api=self.base_api), headers={'Accept': self._accept_header()}, ) agents = list() # Depending on Go version, return value would be either list of dict. # Support both cases here. json_response = response.json() if isinstance(json_response, list): agents_json = json_response elif isinstance(json_response, dict): agents_json = json_response.get('_embedded', {}).get('agents', {}) else: raise ValueError("Expected response to be in [list, dict], but '{}' found!".format(json_response)) for data in agents_json: agents.append(AgentEntity(session=self._session, data=data)) return agents def dict(self): # noqa """ Wrapper for `list()` method, that transforms founded agents to dictionary by `uuid` key. :return: dictionary of agents with `uuid` as a key and agent as a value. :rtype: dict[str, yagocd.resources.agent.AgentEntity] """ agents = self.list() result = dict() for agent in agents: result[agent.data.uuid] = agent return result def get(self, uuid): """ Gets an agent by its unique identifier (uuid). :versionadded: 15.2.0. :param uuid: uuid of the agent :return: Agent entity. :rtype: yagocd.resources.agent.AgentEntity """ response = self._session.get( path=self._session.urljoin(self.RESOURCE_PATH, uuid).format( base_api=self.base_api ), headers={'Accept': self._accept_header()}, ) return AgentEntity(session=self._session, data=response.json()) def update(self, uuid, config): """ Update some attributes of an agent. :versionadded: 15.2.0. :param uuid: uuid of the agent :param config: dictionary of parameters for update :return: Agent entity. :rtype: yagocd.resources.agent.AgentEntity """ response = self._session.patch( path=self._session.urljoin(self.RESOURCE_PATH, uuid).format( base_api=self.base_api ), data=json.dumps(config), headers={ 'Accept': self._accept_header(), 'Content-Type': 'application/json' }, ) return AgentEntity(session=self._session, data=response.json()) def delete(self, uuid): """ Deletes an agent. :versionadded: 15.2.0. :param uuid: uuid of the agent. :return: a message confirmation if the agent was deleted. """ response = self._session.delete( path=self._session.urljoin(self.RESOURCE_PATH, uuid).format( base_api=self.base_api ), headers={'Accept': self._accept_header()}, ) return response.json().get('message') @since('14.3.0') def job_history(self, uuid, offset=0): """ Lists the jobs that have executed on an agent. :versionadded: 14.3.0. :param uuid: uuid of the agent. :param offset: number of jobs to be skipped. :return: an array of :class:`yagocd.resources.job.JobInstance` along with the job transitions. :rtype: list of yagocd.resources.job.JobInstance """ response = self._session.get( path=self._session.urljoin(self.RESOURCE_PATH, uuid, 'job_run_history', offset).format( base_api=self.base_api ), headers={'Accept': 'application/json'}, ) jobs = list() for data in response.json()['jobs']: jobs.append(JobInstance(session=self._session, data=data, stage=None)) return jobs class AgentEntity(Base): pass
The Austrian president Dr. Alexander van der Bellen is taking over the patronage of the 11th edition of this human world- International Human Rights Film Festival. Vienna’s first international human rights film festival is a platform for cinematic art and critical discourse, a space of encounter and dialogue, and a meeting point for young innovative filmmakers, both local and international. We consider cinema not only an artistic form wherein narrative experiences are per- formed or created, but also an opportunity for the audience to step out of their comfort zone and into dialogue with challenging new realities. We want to encourage ourselves and our audience not to look the other way, but to face the atrocities and injustices of this world and not to be afraid, as fear paralyses. Looking away does not make things disappear, but rather fuels their presence. We hope to inspire solidarity in everyday life as well as in a global context. Together we can break the silence. In this year's programme, once again we present a myriad of perspectives and approaches to socio-political and socially critical topics through different narrative forms and formats.