text
stringlengths 29
850k
|
|---|
"""
Example using brieflz
I have another example on a fork of BriefLz with test python scripts here: https://github.com/sysopfb/brieflz
"""
from ctypes import *
import binascii
import zlib
import struct
try:
brieflz = cdll.LoadLibrary('./blzpack_lib.so')
except OSError:
brieflz = cdll.LoadLibrary('./qakbot/blzpack_lib.so')
DEFAULT_BLOCK_SIZE = 1024 * 1024
#MAX_BLOCK_SIZE = (0xFFFFFFFFUL - 0xFFFFFFFFUL / 9UL - 64UL)
def compress_data(data, blocksize, level):
compressed_data = ""
while len(data) > 0:
buf = create_string_buffer(data[:blocksize])
cb = c_int(len(buf))
cbOut = brieflz.blz_max_packed_size(blocksize)
packed = create_string_buffer(cbOut)
workmem = create_string_buffer(brieflz.blz_workmem_size_level(blocksize,1))
cbOut = c_int(cbOut)
retval = brieflz.blz_pack_level(byref(buf), byref(packed), cb, byref(workmem), level)
if retval > 0:
temp = packed.raw[:retval]
tempret = struct.pack(">IIIIII", 1651276314, level, len(temp), zlib.crc32(temp) % (1<<32), len(buf), zlib.crc32(data[:blocksize])%(1<<32)) + temp
compressed_data += tempret
else:
print("Compression Error")
return None
data = data[blocksize:]
return compressed_data
def decompress_data(data, blocksize=DEFAULT_BLOCK_SIZE, level=1):
decompressed_data = ""
max_packed_size = brieflz.blz_max_packed_size(blocksize);
(magic,level,packedsize,crc,hdr_depackedsize,crc2) = struct.unpack_from('>IIIIII', data)
data = data[24:]
while magic == 0x626C7A1A and len(data) > 0:
compressed_data = create_string_buffer(data[:packedsize])
workdata = create_string_buffer(blocksize)
depackedsize = brieflz.blz_depack(byref(compressed_data), byref(workdata), c_int(hdr_depackedsize))
if depackedsize != hdr_depackedsize:
print("Decompression error")
print("DepackedSize: "+str(depackedsize) + "\nHdrVal: "+str(hdr_depackedsize))
return None
decompressed_data += workdata.raw[:depackedsize]
data = data[packedsize:]
if len(data) > 0:
(magic,level,packedsize,crc,hdr_depackedsize,crc2) = struct.unpack_from('>IIIIII', data)
data = data[24:]
else:
break
return decompressed_data
def main():
#blocksize = DEFAULT_BLOCK_SIZE
blocksize = 100
level = 1
data = "This is a test of brieflz compression"*100
retval = compress_data(data, blocksize, level)
if retval != None:
print("Compression SUCCESS!\nCompressed Data: ")
print(binascii.hexlify(retval))
retval = decompress_data(retval, blocksize, level)
if retval != None and retval == data:
print("Decompress SUCCESS!\nDecompress Data: ")
print(retval)
if __name__ == "__main__":
main()
|
Some MacBook Pro (and MacBook) owners have experienced problems with the keyboard, trackpad, speakers, USB-C, and more. Here’s what to do if you’ve been affected, and how to fix the problems if you have.
The MacBook Pro has been plagued with hardware issues since it was redesigned in 2016. On top of folk complaining about everything from USB-C limitations to a paltry 16GB memory ceiling, several more concerning hardware issues have come to light – in particular a flaw that could mean your MacBook keyboard stops working.
Below we take a look at the biggest, strangest and most awkward problems that have been reported with the MacBook Pro 2016, explaining how to tell if you’re affected, and what to do if you are.
The most common problem appears to be an issue where the keyboard stops working. In fact this issue is so widespread that a petition calling for Apple to replace the keyboard in “every MacBook Pro since late 2016” has been signed by more than 26,000 people and a class action lawsuit has been filed against the company over the keyboard failings.
The incidences of this have been widespead enough for a class action lawsuit to be filed against Apple on behalf of two people who’s MacBook Pro keyboards stopped working.
The lawsuit alleges that Apple “promoted and sold laptops it knew were defective in that they contain a keyboard that is substantially certain to fail prematurely,” and demands that Apple recognises the flaw and covers the cost of remedying or replacing affected Mac laptops.
The affected laptops include MacBook and MacBook Pro models outlined below.
The lawsuit follows a petition of more than 26,000 signatures. The petition (which you can sign here) was started by Matthew Taylor on change.org.
“Every one of Apple’s current-gen MacBook Pro models, 13in and 15in, is sold with a keyboard that can become defective at any moment due to a design failure,” claims Taylor.
What’s causing the fault with MacBook and MacBook Pro keyboards?
The problem appears to be related to the ‘butterfly’ mechanism (used on the MacBook since 2015 and MacBook Pro since 2016). The ‘butterfly’ mechanism distributes the pressure on a key more evenly than the traditional ‘scissor’ mechanism. More crucially, at least in terms of design, the butterfly-style keys mean that the keyboard can be flatter, and the MacBook itself thinner.
The problem appears to be that if a spec of dust gets underneath a key it can stop the key from depressing all the way – and that can stop the key from registering.
Some users report being a little heavy-handed when typing, or even hammering the faulty key many times with your finger, can cure the issue.
Apple itself recommends using compressed air to blow the dust from under the affected key (as per this support document).
However, the problem with the MacBook and MacBook Pro keyboards mentioned in the lawsuit above is that if hammering the keys, or using compressed air is unsuccessful, the dust cannot be removed without removing the entire keyboard. It is not possible to take individual keys out to clean the keyboard as it used to be with older models.
In fact, it’s not just the keyboard that must be removed but also the battery, frame and ports – a repair that can only be performed by an Apple service professional.
Wondering what a MacBook keyboard replacement can cost? Reports indicate that the repair could run to $700 (£521) or more, if the warranty has expired. Here’s how to book an appointment at an Apple Store if you need to.
It isn’t surprising that some of those affected by unresponsive MacBook keys are calling for Apple to recognise the issue (which appears to be related to the keyboard design) and recall the affected machines.
If your MacBook isn’t one of the ones affected by the butterfly keyboard mechanism, you may be able to fix it yourself, read: How to fix a Mac.
It is likely that you are being affected by the issue identified by the class action lawsuit if one or more keys have stopped working on your 2015 or later MacBook, or 2016 or later MacBook Pro.
However, there have been other issues with the MacBook Pro keyboard that seem to be unrelated to the issue being addressed by the class action lawsuit.
For example, some users have reported problems with keys making a high-pitched clicking noise when pressed, as seen in the video below. That problem seem to occur when the MacBooks get hot, but some users have reported having issues at any temperature.
The fact that the new 2016 MacBook Pro models feature only USB-C/Thunderbolt 3 ports, and therefore require adapters to connect just about any external hardware, is old news. However, there are reports that not all USB-C/Thunderbolt 3 adapters work correctly with the 2016 MacBook Pro models.
For example, unauthorised Mac repair guy and YouTube star Louis Rossmann noted in a hands-on review (warning: includes significant bad language!) that some USB-C adapters not only slowed down his 13in non-Touch Bar 2016 MacBook Pro but also appeared to slow or entirely kill the MacBook Pro’s Wi-Fi connection. The adapters worked perfectly with a Dell laptop.
Meanwhile, Mac developer Khaos Tian has not only discovered that some third-party Thunderbolt 3 docks don’t work with the new 2016 MacBook Pro but even got a response from somebody at Apple implying that non-Apple certified models are unlikely ever to be supported.
Buying only Apple’s own USB-C or Thunderbolt 3 adapters is the obvious solution. Buying third-party adaptors or docks will probably be cheaper but, as Apple says, only those certified by Apple are guaranteed to work – and there’s still relatively few of those.
If you have a Thunderbolt 3 dock that’s incompatible then Khaos Tian has detailed a hack that might fix it but it’s very technical in nature and not for beginners.
Apple might provide a future update to macOS Sierra to include support for non-compatible USB-C/Thunderbolt hardware but knowing Apple like we do – and coupled to the fact they have their own range of adapters – we wouldn’t hold our breath waiting for a fix. To be honest, if you’re affected then we reckon it’s best to bite the bullet and get new Apple-approved adapters.
Shortly after the 2016 MacBook Pro got into users’ hands, reports of loud crackling and popping noises through the speakers started to appear on community forums. However, the noises only occur if the user boots into Microsoft Windows using Boot Camp. It does not happen if the user is booted into macOS Sierra.
It’s not clear if the crackles and pops are caused by a logic board issue or perhaps just poor audio drivers within Windows. The latter seems the most likely.
You’ll know if you’re affected by this issue because, obviously, you’ll hear the noises described above should you boot into Windows. Interestingly, if you access Windows via virtualisation software like VMware Fusion or Parallels then the issue does not arise.
You could also ensure that headphones are attached via the 3.5mm audio jack before using Boot Camp to boot into Windows, as this will avoid the MacBook Pro’s speakers being used. The crackling/pops will not be heard in the headphones. Notably, users affected by the issue report that simply turning the volume control down has no effect; the loud crackling and pops continue.
This is a report from one individual, so very far from conclusive, and it’s also something nobody else is likely to be foolish enough to try: YouTube star EverythingApplePro wondered what would happen if you attached more than one Apple USB-C charger to both a 13-in and 15-in 2016 MacBook Pro.
The spec sheet of the new MacBook Pro models says you can charge the computer from any of its USB-C ports, and while nobody is likely to deliberately attach four separate power adapters, as EverythingApplePro ended up doing, a user might attach their regular USB-C charger while also attaching something like a Thunderbolt 3 monitor containing a USB-C hub that’s designed to provide charge to attached devices.
What happened to EverythingApplePro’s computers? Both the MacBook Pros made the acknowledgement sound that accompanies a charger being attached, and the menu bar icon changed to indicate charging was happening, but the charging menu said the battery wasn’t being charged. Rather worryingly, this state of affairs persisted even after EverythingApplePro dropped back to just a single charger, and even after he then rebooted his 15-in model (he had identical results for both the 13 and 15in models, although the YouTube video doesn’t detail him attempting to reboot the 13in model; no, we’ve no idea how these YouTube stars can afford all this hardware only to then destroy it).
You’ll know if you’re affected by this issue because your MacBook Pro won’t charge if you have something attached to more than one USB-C/Thunderbolt 3 port.
With the scarcity of information provided by the video it’s hard to know what the charging issue actually is, and whether it persisted. Therefore we can only guess at a fix. If the issue happened with us we’d start by resetting the SMC. If the MacBook Pro still refuses to charge then a return to Apple is the only solution.
It’s possible this issue could be fixed by Apple in a future firmware upgrade for the 2016 MacBook Pro range. Until then, we advise you to unplug the USB-C charger if attaching any hardware likely to also provide a charge, such as a Thunderbolt 3 dock or a monitor with a built-in hub.
When Apple introduced multitouch trackpads they also introduced three-finger drag, which is a gesture whereby dragging three fingers across the trackpad had the effect of instantly clicking and then dragging whatever was under the mouse cursor. Drag with three fingers across text, for example, and it would be instantly highlighted.
In more recent releases of macOS/OS X Apple has moved this feature to the Accessibility section of System Preferences, but it still works in the same way – unless you have a new 2016 MacBook Pro, that is. In particular, users report that it only works in the centre of the trackpad, or that it works with some apps but not others.
You’ll know if you’re affected by this because, assuming you use three-finger drag, it simply won’t function reliably.
What’s causing the problem with three-finger drag?
macOS includes clever software to detect if the user’s palm accidentally touches the trackpad while typing and with the increased size of the trackpad in the new MacBook Pro range there’s been speculation the problem might be caused by this palm detection going awry.
As with many issues here this will probably be fixed by Apple with either a firmware update, or a future macOS update (or possibly both). Until then, all you can do is either live with the issue, or turn off three-finger drag.
|
graph_tasks = { "wash the dishes" : ["have lunch"],
"cook food" : ["have lunch"],
"have lunch" : [],
"wash laundry" : ["dry laundry"],
"dry laundry" : ["fold laundry"],
"fold laundry" : [] }
def dfs_topsort(graph): # recursive dfs with
L = [] # additional list for order of nodes
color = { u : "white" for u in graph }
found_cycle = [False]
for u in graph:
if color[u] == "white":
dfs_visit(graph, u, color, L, found_cycle)
if found_cycle[0]:
break
if found_cycle[0]: # if there is a cycle,
L = [] # then return an empty list
L.reverse() # reverse the list
return L # L contains the topological sort
def dfs_visit(graph, u, color, L, found_cycle):
if found_cycle[0]:
return
color[u] = "gray"
for v in graph[u]:
if color[v] == "gray":
found_cycle[0] = True
return
if color[v] == "white":
dfs_visit(graph, v, color, L, found_cycle)
color[u] = "black" # when we're done with u,
L.append(u) # add u to list (reverse it later!)
order = dfs_topsort(graph_tasks)
for task in order:
print(task)
|
I have been thinking a bit about play modes and how all of this will fit into the game. Right now there is match mode and server mode.
Server mode is for casual play. You can join a server and walk around, chat, and duel. It will be a good place to sit when waiting for matches. Servers will be community run.
Match mode is where the action is at. Either create a match or find one already waiting. You are placed into a small area map where you duel until death. Matches can be 1v1 or team v team.
I have been kicking around the idea of a champ-spawn type mode. I haven’t worked out all the kinks, but what I am thinking is you pay some sort of fee (in game gold or something) and your guild opens a champ spawn server. When the champ is slain, you get a reward. Clothing skins or something of the like. Similar to how CS:GO weapon cases work. Other guilds would have the option to challenge your champ spawn by paying a fee to join. Something of that nature.
|
from __future__ import unicode_literals
import boto.rds
import boto.vpc
from boto.exception import BotoServerError
import sure # noqa
from moto import mock_ec2, mock_rds
from tests.helpers import disable_on_py3
@disable_on_py3()
@mock_rds
def test_create_database():
conn = boto.rds.connect_to_region("us-west-2")
database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2',
security_groups=["my_sg"])
database.status.should.equal('available')
database.id.should.equal("db-master-1")
database.allocated_storage.should.equal(10)
database.instance_class.should.equal("db.m1.small")
database.master_username.should.equal("root")
database.endpoint.should.equal(('db-master-1.aaaaaaaaaa.us-west-2.rds.amazonaws.com', 3306))
database.security_groups[0].name.should.equal('my_sg')
@disable_on_py3()
@mock_rds
def test_get_databases():
conn = boto.rds.connect_to_region("us-west-2")
list(conn.get_all_dbinstances()).should.have.length_of(0)
conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
conn.create_dbinstance("db-master-2", 10, 'db.m1.small', 'root', 'hunter2')
list(conn.get_all_dbinstances()).should.have.length_of(2)
databases = conn.get_all_dbinstances("db-master-1")
list(databases).should.have.length_of(1)
databases[0].id.should.equal("db-master-1")
@mock_rds
def test_describe_non_existant_database():
conn = boto.rds.connect_to_region("us-west-2")
conn.get_all_dbinstances.when.called_with("not-a-db").should.throw(BotoServerError)
@disable_on_py3()
@mock_rds
def test_delete_database():
conn = boto.rds.connect_to_region("us-west-2")
list(conn.get_all_dbinstances()).should.have.length_of(0)
conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
list(conn.get_all_dbinstances()).should.have.length_of(1)
conn.delete_dbinstance("db-master-1")
list(conn.get_all_dbinstances()).should.have.length_of(0)
@mock_rds
def test_delete_non_existant_database():
conn = boto.rds.connect_to_region("us-west-2")
conn.delete_dbinstance.when.called_with("not-a-db").should.throw(BotoServerError)
@mock_rds
def test_create_database_security_group():
conn = boto.rds.connect_to_region("us-west-2")
security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group')
security_group.name.should.equal('db_sg')
security_group.description.should.equal("DB Security Group")
list(security_group.ip_ranges).should.equal([])
@mock_rds
def test_get_security_groups():
conn = boto.rds.connect_to_region("us-west-2")
list(conn.get_all_dbsecurity_groups()).should.have.length_of(0)
conn.create_dbsecurity_group('db_sg1', 'DB Security Group')
conn.create_dbsecurity_group('db_sg2', 'DB Security Group')
list(conn.get_all_dbsecurity_groups()).should.have.length_of(2)
databases = conn.get_all_dbsecurity_groups("db_sg1")
list(databases).should.have.length_of(1)
databases[0].name.should.equal("db_sg1")
@mock_rds
def test_get_non_existant_security_group():
conn = boto.rds.connect_to_region("us-west-2")
conn.get_all_dbsecurity_groups.when.called_with("not-a-sg").should.throw(BotoServerError)
@mock_rds
def test_delete_database_security_group():
conn = boto.rds.connect_to_region("us-west-2")
conn.create_dbsecurity_group('db_sg', 'DB Security Group')
list(conn.get_all_dbsecurity_groups()).should.have.length_of(1)
conn.delete_dbsecurity_group("db_sg")
list(conn.get_all_dbsecurity_groups()).should.have.length_of(0)
@mock_rds
def test_delete_non_existant_security_group():
conn = boto.rds.connect_to_region("us-west-2")
conn.delete_dbsecurity_group.when.called_with("not-a-db").should.throw(BotoServerError)
@disable_on_py3()
@mock_rds
def test_security_group_authorize():
conn = boto.rds.connect_to_region("us-west-2")
security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group')
list(security_group.ip_ranges).should.equal([])
security_group.authorize(cidr_ip='10.3.2.45/32')
security_group = conn.get_all_dbsecurity_groups()[0]
list(security_group.ip_ranges).should.have.length_of(1)
security_group.ip_ranges[0].cidr_ip.should.equal('10.3.2.45/32')
@disable_on_py3()
@mock_rds
def test_add_security_group_to_database():
conn = boto.rds.connect_to_region("us-west-2")
database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group')
database.modify(security_groups=[security_group])
database = conn.get_all_dbinstances()[0]
list(database.security_groups).should.have.length_of(1)
database.security_groups[0].name.should.equal("db_sg")
@mock_ec2
@mock_rds
def test_add_database_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet1 = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24")
subnet2 = vpc_conn.create_subnet(vpc.id, "10.2.0.0/24")
subnet_ids = [subnet1.id, subnet2.id]
conn = boto.rds.connect_to_region("us-west-2")
subnet_group = conn.create_db_subnet_group("db_subnet", "my db subnet", subnet_ids)
subnet_group.name.should.equal('db_subnet')
subnet_group.description.should.equal("my db subnet")
list(subnet_group.subnet_ids).should.equal(subnet_ids)
@mock_ec2
@mock_rds
def test_describe_database_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24")
conn = boto.rds.connect_to_region("us-west-2")
conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])
conn.create_db_subnet_group("db_subnet2", "my db subnet", [subnet.id])
list(conn.get_all_db_subnet_groups()).should.have.length_of(2)
list(conn.get_all_db_subnet_groups("db_subnet1")).should.have.length_of(1)
conn.get_all_db_subnet_groups.when.called_with("not-a-subnet").should.throw(BotoServerError)
@mock_ec2
@mock_rds
def test_delete_database_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24")
conn = boto.rds.connect_to_region("us-west-2")
conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])
list(conn.get_all_db_subnet_groups()).should.have.length_of(1)
conn.delete_db_subnet_group("db_subnet1")
list(conn.get_all_db_subnet_groups()).should.have.length_of(0)
conn.delete_db_subnet_group.when.called_with("db_subnet1").should.throw(BotoServerError)
@disable_on_py3()
@mock_ec2
@mock_rds
def test_create_database_in_subnet_group():
vpc_conn = boto.vpc.connect_to_region("us-west-2")
vpc = vpc_conn.create_vpc("10.0.0.0/16")
subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24")
conn = boto.rds.connect_to_region("us-west-2")
conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id])
database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small',
'root', 'hunter2', db_subnet_group_name="db_subnet1")
database = conn.get_all_dbinstances("db-master-1")[0]
database.subnet_group.name.should.equal("db_subnet1")
@disable_on_py3()
@mock_rds
def test_create_database_replica():
conn = boto.rds.connect_to_region("us-west-2")
primary = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
replica = conn.create_dbinstance_read_replica("replica", "db-master-1", "db.m1.small")
replica.id.should.equal("replica")
replica.instance_class.should.equal("db.m1.small")
status_info = replica.status_infos[0]
status_info.normal.should.equal(True)
status_info.status_type.should.equal('read replication')
status_info.status.should.equal('replicating')
primary = conn.get_all_dbinstances("db-master-1")[0]
primary.read_replica_dbinstance_identifiers[0].should.equal("replica")
conn.delete_dbinstance("replica")
primary = conn.get_all_dbinstances("db-master-1")[0]
list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0)
@disable_on_py3()
@mock_rds
def test_create_cross_region_database_replica():
west_1_conn = boto.rds.connect_to_region("us-west-1")
west_2_conn = boto.rds.connect_to_region("us-west-2")
primary = west_1_conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2')
primary_arn = "arn:aws:rds:us-west-1:1234567890:db:db-master-1"
replica = west_2_conn.create_dbinstance_read_replica(
"replica",
primary_arn,
"db.m1.small",
)
primary = west_1_conn.get_all_dbinstances("db-master-1")[0]
primary.read_replica_dbinstance_identifiers[0].should.equal("replica")
replica = west_2_conn.get_all_dbinstances("replica")[0]
replica.instance_class.should.equal("db.m1.small")
west_2_conn.delete_dbinstance("replica")
primary = west_1_conn.get_all_dbinstances("db-master-1")[0]
list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0)
@disable_on_py3()
@mock_rds
def test_connecting_to_us_east_1():
# boto does not use us-east-1 in the URL for RDS,
# and that broke moto in the past:
# https://github.com/boto/boto/blob/e271ff09364ea18d9d8b6f4d63d6b0ac6cbc9b75/boto/endpoints.json#L285
conn = boto.rds.connect_to_region("us-east-1")
database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2',
security_groups=["my_sg"])
database.status.should.equal('available')
database.id.should.equal("db-master-1")
database.allocated_storage.should.equal(10)
database.instance_class.should.equal("db.m1.small")
database.master_username.should.equal("root")
database.endpoint.should.equal(('db-master-1.aaaaaaaaaa.us-east-1.rds.amazonaws.com', 3306))
database.security_groups[0].name.should.equal('my_sg')
@disable_on_py3()
@mock_rds
def test_create_database_with_iops():
conn = boto.rds.connect_to_region("us-west-2")
database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2', iops=6000)
database.status.should.equal('available')
database.iops.should.equal(6000)
# boto>2.36.0 may change the following property name to `storage_type`
database.StorageType.should.equal('io1')
|
Hello guys, this post is about Good Leather And Suede Sectional Sofa 96 For Your With Leather And Suede Sectional Sofa ( Leather And Suede Sofa #9). This photo is a image/jpeg and the resolution of this file is 1200 x 771. It's file size is just 114 KB. Wether You ought to save This photo to Your computer, you can Click here. You may also download more images by clicking the following photo or see more at here: Leather And Suede Sofa.
With the usage of showcases getting increasingly more popular, decorating tips are increasingly essential these days. The more mirrors to the wall, the higher the look and feel of the toilet that gives image of the bedroom that is tiny to a larger.
Several love their favorite animation characters to show on the bathroom surfaces. The use of the proper pastel hues and colors can also be in building the proper design, important. Ultimately, the mixture of light hues and the proper bathroom roof lights create an excellent thing to check out is walled by the restroom. No matter what your imaginative, the toilet wall can not alter the room kind. However, you can educate all of your creativity to bring color and some living while in the bath expertise.
of decorating a Good Leather And Suede Sectional Sofa 96 For Your With Leather And Suede Sectional Sofa ( Leather And Suede Sofa #9) the idea might be transformed routinely so that the toilet has always been an improved place. You're able to enhance your bathtub expertise with all the wall decoration that is right. Because the usage of water from hot water can actually damage this wall decoration the use of wall hangings shunned while in the bathroom. The kids's bathrooms also have wall arrangements that are individual.
Category: Sofa. Burlington Futon was published on June 1st, 2018 by and tagged as: Burlington Futon, Burlington, Futon.
Category: Sofa. Futons Wilmington Nc was published on April 1st, 2019 by and tagged as: Futons Wilmington Nc, Futons, Wilmington, Nc.
Category: Sofa. Foam Sofa Cushions Inserts was published on January 31st, 2018 by and tagged as: Foam Sofa Cushions Inserts, Foam, Sofa, Cushions, Inserts.
|
import operator
from functools import partial, wraps
from itertools import chain, count
from collections import Iterator
from toolz import merge, unique, curry
from .optimize import cull, fuse
from .utils import concrete
from . import base
from .compatibility import apply
from . import threaded
__all__ = ['compute', 'do', 'value', 'Value']
def flat_unique(ls):
"""Flatten ``ls``, filter by unique id, and return a list"""
return list(unique(chain.from_iterable(ls), key=id))
def unzip(ls, nout):
"""Unzip a list of lists into ``nout`` outputs."""
out = list(zip(*ls))
if not out:
out = [()] * nout
return out
def to_task_dasks(expr):
"""Normalize a python object and extract all sub-dasks.
- Replace ``Values`` with their keys
- Convert literals to things the schedulers can handle
- Extract dasks from all enclosed values
Parameters
----------
expr : object
The object to be normalized. This function knows how to handle
``Value``s, as well as most builtin python types.
Returns
-------
task : normalized task to be run
dasks : list of dasks that form the dag for this task
Examples
--------
>>> a = value(1, 'a')
>>> b = value(2, 'b')
>>> task, dasks = to_task_dasks([a, b, 3])
>>> task # doctest: +SKIP
(list, ['a', 'b', 3])
>>> dasks # doctest: +SKIP
[{'a': 1}, {'b': 2}]
>>> task, dasks = to_task_dasks({a: 1, b: 2})
>>> task # doctest: +SKIP
(dict, (list, [(list, ['a', 1]), (list, ['b', 2])]))
>>> dasks # doctest: +SKIP
[{'a': 1}, {'b': 2}]
"""
if isinstance(expr, Value):
return expr.key, expr._dasks
elif isinstance(expr, base.Base):
name = tokenize(str(expr), True)
keys = expr._keys()
dsk = expr._optimize(expr.dask, keys)
dsk[name] = (expr._finalize, expr, (concrete, keys))
return name, [dsk]
elif isinstance(expr, (Iterator, list, tuple, set)):
args, dasks = unzip(map(to_task_dasks, expr), 2)
args = list(args)
dasks = flat_unique(dasks)
# Ensure output type matches input type
if isinstance(expr, (list, tuple, set)):
return (type(expr), args), dasks
else:
return args, dasks
elif isinstance(expr, dict):
args, dasks = to_task_dasks(list([k, v] for k, v in expr.items()))
return (dict, args), dasks
else:
return expr, []
tokens = ('_{0}'.format(i) for i in count(1))
def tokenize(v, pure=False):
"""Mapping function from task -> consistent name.
Parameters
----------
v : object
Any python object (or tuple of objects) that summarize the task.
pure : boolean, optional
If True, a consistent hash function is tried on the input. If this
fails, then a unique identifier is used. If False (default), then a
unique identifier is always used.
"""
# TODO: May have hash collisions...
if pure:
try:
return str(hash(v))
except TypeError:
pass
return next(tokens)
def applyfunc(func, args, kwargs, pure=False):
"""Create a Value by applying a function to args.
Given a function and arguments, return a Value that represents the result
of that computation."""
args, dasks = unzip(map(to_task_dasks, args), 2)
dasks = flat_unique(dasks)
name = tokenize((func, args, frozenset(kwargs.items())), pure)
if kwargs:
func = partial(func, **kwargs)
dasks.append({name: (func,) + args})
return Value(name, dasks)
@curry
def do(func, pure=False):
"""Wraps a function so that it outputs a ``Value``.
Examples
--------
Can be used as a decorator:
>>> @do
... def add(a, b):
... return a + b
>>> res = add(1, 2)
>>> type(res) == Value
True
>>> res.compute()
3
For other cases, it may be cleaner to call ``do`` on a function at call
time:
>>> res2 = do(sum)([res, 2, 3])
>>> res2.compute()
8
``do`` also accepts an optional keyword ``pure``. If False (default), then
subsequent calls will always produce a different ``Value``. This is useful
for non-pure functions (such as ``time`` or ``random``).
>>> from random import random
>>> out1 = do(random)()
>>> out2 = do(random)()
>>> out1.key == out2.key
False
If you know a function is pure (output only depends on the input, with no
global state), then you can set ``pure=True``. This will attempt to apply a
consistent name to the output, but will fallback on the same behavior of
``pure=False`` if this fails.
>>> @do(pure=True)
... def add(a, b):
... return a + b
>>> out1 = add(1, 2)
>>> out2 = add(1, 2)
>>> out1.key == out2.key
True
"""
@wraps(func)
def _dfunc(*args, **kwargs):
return applyfunc(func, args, kwargs, pure=pure)
return _dfunc
def optimize(dsk, keys):
dsk2 = cull(dsk, keys)
return fuse(dsk2)
def compute(*args, **kwargs):
"""Evaluate several ``Value``s at once.
Note that the only difference between this function and
``dask.base.compute`` is that this implicitly converts python objects to
``Value``s, allowing for collections of dask objects to be computed.
Examples
--------
>>> a = value(1)
>>> b = a + 2
>>> c = a + 3
>>> compute(b, c) # Compute both simultaneously
(3, 4)
>>> compute(a, [b, c]) # Works for lists of Values
(1, [3, 4])
"""
args = [value(a) for a in args]
return base.compute(*args, **kwargs)
def right(method):
"""Wrapper to create 'right' version of operator given left version"""
def _inner(self, other):
return method(other, self)
return _inner
class Value(base.Base):
"""Represents a value to be computed by dask.
Equivalent to the output from a single key in a dask graph.
"""
__slots__ = ('_key', '_dasks')
_optimize = staticmethod(optimize)
_finalize = staticmethod(lambda a, r: r[0])
_default_get = staticmethod(threaded.get)
def __init__(self, name, dasks):
object.__setattr__(self, '_key', name)
object.__setattr__(self, '_dasks', dasks)
@property
def dask(self):
return merge(*self._dasks)
@property
def key(self):
return self._key
def _keys(self):
return [self.key]
def __repr__(self):
return "Value({0})".format(repr(self.key))
def __hash__(self):
return hash(self.key)
def __dir__(self):
return list(self.__dict__.keys())
def __getattr__(self, attr):
if not attr.startswith('_'):
return do(getattr, True)(self, attr)
else:
raise AttributeError("Attribute {0} not found".format(attr))
def __setattr__(self, attr, val):
raise TypeError("Value objects are immutable")
def __setitem__(self, index, val):
raise TypeError("Value objects are immutable")
def __iter__(self):
raise TypeError("Value objects are not iterable")
def __call__(self, *args, **kwargs):
return do(apply)(self, args, kwargs)
def __bool__(self):
raise TypeError("Truth of Value objects is not supported")
__nonzero__ = __bool__
__abs__ = do(operator.abs, True)
__add__ = do(operator.add, True)
__and__ = do(operator.and_, True)
__div__ = do(operator.floordiv, True)
__eq__ = do(operator.eq, True)
__floordiv__ = do(operator.floordiv, True)
__ge__ = do(operator.ge, True)
__getitem__ = do(operator.getitem, True)
__gt__ = do(operator.gt, True)
__index__ = do(operator.index, True)
__invert__ = do(operator.invert, True)
__le__ = do(operator.le, True)
__lshift__ = do(operator.lshift, True)
__lt__ = do(operator.lt, True)
__mod__ = do(operator.mod, True)
__mul__ = do(operator.mul, True)
__ne__ = do(operator.ne, True)
__neg__ = do(operator.neg, True)
__or__ = do(operator.or_, True)
__pos__ = do(operator.pos, True)
__pow__ = do(operator.pow, True)
__radd__ = do(right(operator.add), True)
__rand__ = do(right(operator.and_), True)
__rdiv__ = do(right(operator.floordiv), True)
__rfloordiv__ = do(right(operator.floordiv), True)
__rlshift__ = do(right(operator.lshift), True)
__rmod__ = do(right(operator.mod), True)
__rmul__ = do(right(operator.mul), True)
__ror__ = do(right(operator.or_), True)
__rpow__ = do(right(operator.pow), True)
__rrshift__ = do(right(operator.rshift), True)
__rshift__ = do(operator.rshift, True)
__rsub__ = do(right(operator.sub), True)
__rtruediv__ = do(right(operator.truediv), True)
__rxor__ = do(right(operator.xor), True)
__sub__ = do(operator.sub, True)
__truediv__ = do(operator.truediv, True)
__xor__ = do(operator.xor, True)
def value(val, name=None):
"""Create a ``Value`` from a python object.
Parameters
----------
val : object
Object to be wrapped.
name : string, optional
Name to be used in the resulting dask.
Examples
--------
>>> a = value([1, 2, 3])
>>> a.compute()
[1, 2, 3]
Values can act as a proxy to the underlying object. Many operators are
supported:
>>> (a + [1, 2]).compute()
[1, 2, 3, 1, 2]
>>> a[1].compute()
2
Method and attribute access also works:
>>> a.count(2).compute()
1
Note that if a method doesn't exist, no error will be thrown until runtime:
>>> res = a.not_a_real_method()
>>> res.compute() # doctest: +SKIP
AttributeError("'list' object has no attribute 'not_a_real_method'")
"""
if isinstance(val, Value):
return val
name = name or tokenize(val, True)
task, dasks = to_task_dasks(val)
dasks.append({name: task})
return Value(name, dasks)
|
Today is Ada Lovelace Day and in celebration of the first computer programmer Suw Charman Anderson has encouraged us to write about women in technology whom we admire.
When choosing who to write about I was disinclined to pick someone famous or high profile as I always worry that women who have gained such heights have often had to do so by becoming like the men they were competing against and in the process compromised some of the different, but often greater, strengths of being a woman.
So with that by way of explanation most of you will, I guess, not have heard of my choice Marni Melrose. Marni, "The Mac Angel", does webinars and podcasts explaining how to get the best out of Daylite the wonderful CRM/time-management software that I now couldn't run my business without. I have bought four of Marni's videos and have watched each of them several times over. Her comprehensive knowledge of the programme, which has a steep learning curve and is highly customizable, means the videos are packed with information but they are also presented in a way that doesn't make the viewer feel stupid.
What impresses me is the depth of Marni's technical knowledge combined with a way of explaining things that makes them easy to understand. I thought that a woman making something very geeky more accessible through her approach and depth of knowledge was an appropriate role model to celebrate on this day.
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import os # moduł udostępniający funkcję isfile()
import csv # moduł do obsługi formatu csv
slownik = {} # pusty słownik
sFile = "slownik.csv" # nazwa pliku zawierającego wyrazy i ich tłumaczenia
def otworz(plik):
if os.path.isfile(sFile): # czy istnieje plik słownika?
with open(sFile, newline='') as plikcsv: # otwórz plik do odczytu
tresc = csv.reader(plikcsv)
for linia in tresc: # przeglądamy kolejne linie
slownik[linia[0]] = linia[1:]
return len(slownik) # zwracamy ilość elementów w słowniku
def zapisz(slownik):
# otwieramy plik do zapisu, istniejący plik zostanie nadpisany(!)
with open(sFile, "w", newline='') as plikcsv:
tresc = csv.writer(plikcsv)
for wobcy in slownik:
lista = slownik[wobcy]
lista.insert(0, wobcy)
tresc.writerow(lista)
def oczysc(str):
str = str.strip() # usuń początkowe lub końcowe białe znaki
str = str.lower() # zmień na małe litery
return str
def main(args):
print("""Podaj dane w formacie:
wyraz obcy: znaczenie1, znaczenie2
Aby zakończyć wprowadzanie danych, podaj 0.
""")
# wobce = set() # pusty zbiór wyrazów obcych
# zmienna oznaczająca, że użytkownik uzupełnił lub zmienił słownik
nowy = False
ileWyrazow = otworz(sFile)
print("Wpisów w bazie:", ileWyrazow)
# główna pętla programu
while True:
dane = input("Podaj dane: ")
t = dane.split(":")
wobcy = t[0].strip().lower() # robimy to samo, co funkcja oczysc()
if wobcy == 'koniec':
break
elif dane.count(":") == 1: # sprawdzamy poprawność danych
if wobcy in slownik:
print("Wyraz", wobcy, " i jego znaczenia są już w słowniku.")
op = input("Zastąpić wpis (t/n)? ")
# czy wyrazu nie ma w słowniku? a może chcemy go zastąpić?
if wobcy not in slownik or op == "t":
znaczenia = t[1].split(",") # znaczenia zapisujemy w liście
znaczenia = list(map(oczysc, znaczenia)) # oczyszczamy listę
slownik[wobcy] = znaczenia
nowy = True
else:
print("Błędny format!")
if nowy:
zapisz(slownik)
print("=" * 50)
print("{0: <15}{1: <40}".format("Wyraz obcy", "Znaczenia"))
print("=" * 50)
for wobcy in slownik:
print("{0: <15}{1: <40}".format(wobcy, ",".join(slownik[wobcy])))
return 0
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
|
Grey Flexfit Delta hat with "B" logo on the front and "Breton Brewing" logo on back. This hat features an incredibly comfortable athletic material. Available in Small/Medium and Large/Extra Large.
|
# -*- encoding: utf-8 -*-
import re
import requests
from django import forms
from vendor.zebra.forms import StripePaymentForm
from django.utils.safestring import mark_safe
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from apps.profile.models import change_password, blank_authenticate
from apps.social.models import MSocialProfile
PLANS = [
("newsblur-premium-12", mark_safe("$12 / year <span class='NB-small'>($1/month)</span>")),
("newsblur-premium-24", mark_safe("$24 / year <span class='NB-small'>($2/month)</span>")),
("newsblur-premium-36", mark_safe("$36 / year <span class='NB-small'>($3/month)</span>")),
]
class HorizRadioRenderer(forms.RadioSelect.renderer):
""" this overrides widget method to put radio buttons horizontally
instead of vertically.
"""
def render(self):
"""Outputs radios"""
choices = '\n'.join(['%s\n' % w for w in self])
return mark_safe('<div class="NB-stripe-plan-choice">%s</div>' % choices)
class StripePlusPaymentForm(StripePaymentForm):
def __init__(self, *args, **kwargs):
email = kwargs.pop('email')
plan = kwargs.pop('plan', '')
super(StripePlusPaymentForm, self).__init__(*args, **kwargs)
self.fields['email'].initial = email
if plan:
self.fields['plan'].initial = plan
email = forms.EmailField(widget=forms.TextInput(attrs=dict(maxlength=75)),
label='邮件地址',
required=False)
plan = forms.ChoiceField(required=False, widget=forms.RadioSelect(renderer=HorizRadioRenderer),
choices=PLANS, label='Plan')
class DeleteAccountForm(forms.Form):
password = forms.CharField(widget=forms.PasswordInput(),
label="确认密码",
required=False)
confirm = forms.CharField(label="请输入“Delete”以确认",
widget=forms.TextInput(),
required=False)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(DeleteAccountForm, self).__init__(*args, **kwargs)
def clean_password(self):
user_auth = authenticate(username=self.user.username,
password=self.cleaned_data['password'])
if not user_auth:
user_auth = blank_authenticate(username=self.user.username)
if not user_auth:
raise forms.ValidationError('你的密码不匹配。')
return self.cleaned_data['password']
def clean_confirm(self):
if self.cleaned_data.get('confirm', "").lower() != "delete":
raise forms.ValidationError('请输入“Delete”以确认删除。')
return self.cleaned_data['confirm']
class ForgotPasswordForm(forms.Form):
email = forms.CharField(widget=forms.TextInput(),
label="你的邮件地址",
required=False)
def __init__(self, *args, **kwargs):
super(ForgotPasswordForm, self).__init__(*args, **kwargs)
def clean_email(self):
if not self.cleaned_data['email']:
raise forms.ValidationError('请输入邮件地址。')
try:
User.objects.get(email__iexact=self.cleaned_data['email'])
except User.MultipleObjectsReturned:
pass
except User.DoesNotExist:
raise forms.ValidationError('没有用户使用此邮件地址。')
return self.cleaned_data['email']
class ForgotPasswordReturnForm(forms.Form):
password = forms.CharField(widget=forms.PasswordInput(),
label="你的新密码",
required=True)
class AccountSettingsForm(forms.Form):
username = forms.RegexField(regex=r'^[a-zA-Z0-9]+$',
max_length=30,
widget=forms.TextInput(attrs={'class': 'NB-input'}),
label='用户名',
required=False,
error_messages={
'invalid': "用户名只能包含字母或数字"
})
email = forms.EmailField(widget=forms.TextInput(attrs={'maxlength': 75, 'class': 'NB-input'}),
label='邮件地址',
required=True,
error_messages={'required': '请输入邮件地址。'})
new_password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'NB-input'}),
label='密码',
required=False,
error_messages={'required': '请输入密码。'})
old_password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'NB-input'}),
label='密码',
required=False,
error_messages={'required': '请输入密码。'})
def __init__(self, user, *args, **kwargs):
self.user = user
super(AccountSettingsForm, self).__init__(*args, **kwargs)
def clean_username(self):
username = self.cleaned_data['username']
return username
def clean_password(self):
if not self.cleaned_data['password']:
return ""
return self.cleaned_data['password']
def clean_email(self):
return self.cleaned_data['email']
def clean(self):
username = self.cleaned_data.get('username', '')
new_password = self.cleaned_data.get('new_password', '')
old_password = self.cleaned_data.get('old_password', '')
email = self.cleaned_data.get('email', None)
if username and self.user.username != username:
try:
User.objects.get(username__iexact=username)
except User.DoesNotExist:
pass
else:
raise forms.ValidationError("此用户名已被使用,请尝试其他用户名。")
if self.user.email != email:
if email and User.objects.filter(email__iexact=email).count():
raise forms.ValidationError("此邮件地址已被其他帐户使用,请尝试其他邮件地址。")
if old_password or new_password:
code = change_password(self.user, old_password, new_password, only_check=True)
if code <= 0:
raise forms.ValidationError("你的旧密码不正确。")
return self.cleaned_data
def save(self, profile_callback=None):
username = self.cleaned_data['username']
new_password = self.cleaned_data.get('new_password', None)
old_password = self.cleaned_data.get('old_password', None)
email = self.cleaned_data.get('email', None)
if username and self.user.username != username:
change_password(self.user, self.user.username, username)
self.user.username = username
self.user.save()
social_profile = MSocialProfile.get_user(self.user.pk)
social_profile.username = username
social_profile.save()
if self.user.email != email:
self.user.email = email
self.user.save()
if old_password or new_password:
change_password(self.user, old_password, new_password)
class RedeemCodeForm(forms.Form):
gift_code = forms.CharField(widget=forms.TextInput(),
label="Gift code",
required=True)
def clean_gift_code(self):
gift_code = self.cleaned_data['gift_code']
gift_code = re.sub(r'[^a-zA-Z0-9]', '', gift_code).lower()
if len(gift_code) != 12:
raise forms.ValidationError('Your gift code should be 12 characters long.')
req = requests.get('https://www.thinkup.com/join/api/bundle/', params={'code': gift_code})
response = req.json()
is_valid = response.get('is_valid', None)
if is_valid:
return gift_code
elif is_valid == False:
raise forms.ValidationError('Your gift code is invalid. Check it for errors.')
elif response.get('error', None):
raise forms.ValidationError('Your gift code is invalid, says the server: %s' % response['error'])
return gift_code
|
Over the last two decades, fully electric cars have had, let’s say, a slow start. In 1997, the Toyota Prius was released as the world’s first mass-produced hybrid. And while hybrid technology has taken off, drivers have been hesitant to accept fully battery-powered tech for a variety of reasons, including a lack of charging stations. But things are looking up for electric. Today you can get a full charge at one of the 16,000 stations across the US.
Plus, battery costs are rapidly decreasing and the public is warming up to the idea of electric cars. In fact, some experts predict electric vehicles (EVs) to account for 65 percent to 75 percent of sales in 2050—which is also good news for the environment.
Another hot topic in the auto world is autonomous rides. Though we’re still several years away (at least) from robots taking the wheel, some cities in America—Las Vegas, Phoenix and Pittsburgh, to name a few—have already seen self-driving vehicles in action. In the future, autonomous vehicles and battery-powered technology could work together to create cleaner, more efficient roads.
Electric Vehicles: Right Now. Maybe you’ve ridden in an EV. Maybe you own a Tesla Model X or Audi e-tron. For the most part, they look, sound and act like a traditional car, but there’s one major difference: their engines run on batteries and not on internal combustion powered by fossil fuels. The pros of going gasless include reduced maintenance costs, lower fuel expense and—most importantly—less pollution. However, electric vehicles are currently more expensive than traditional vehicles. Some experts believe battery costs will continue to decline over the next 10 years, which will further entice shoppers. In fact, from 2014 to 2016, battery prices fell by 50 percent, allowing EVs to gain some traction.
Driverless Vehicles: Right Now. No, they’re not straight out of a sci-fi movie. The term autonomous vehicle (AV) can refer to a car, a truck or even a drone. These vehicles operate on computer intelligence with varying levels of human assistance. Self-driving cars come decked out with intelligence you might not see: sensors, radars and cameras to virtually scan the road, with a ton of processing power under the hood to handle the data streams from all those input sensors. Currently, several laws and regulations stand in the way of a driverless reality, but automakers and tech giants continue to refine their robot rides because they know they’re the next big thing.
Should All AVs Be Battery-Powered? Consider this: At the root of driverless technology is convenience. Humans will no longer have to stress about driving. But the consequences could be harmful. For example, you arrive home from the grocery store and realize you forgot the milk. No problem — just send the car back to fetch it. That’s now two trips back and forth to the grocery store where there might have only been one. Can’t find a parking spot at a baseball game? Your self-driving vehicle can just drive around in circles for three hours. While it’s a much more convenient option, the environmental toll could be severe. Some experts predict that gas-powered, self-driving vehicles could wreak havoc on the environment, causing a 200 percent increase in emissions.
The solution? Make all AVs electric. A marriage of these two powerhouse technologies would completely change the auto industry, the environment and our lives. As Esurance found in a recent report, AVs could also be a cheaper option for consumers—potentially made cheaper still if those AVs are electric. There are still a few variables on the table, but it’s safe to say a clean, green electric AV would significantly reduce air pollution.
Fewer Traffic Jams, Less Pollution. Being gridlocked on the highway doesn’t just make you late for dinner, it also damages the environment. In 2012, a study by the Texas A&M Transportation Institute found that traffic congestion was responsible for 56 billion pounds of carbon dioxide pollution. But in the future, AVs will be able to talk to each other (vehicle-to-vehicle communication) or infrastructure (vehicle-to-infrastructure communication). This will lead to perfectly synchronized driving patterns. Just think—no braking for red lights, stop signs or traffic. And with orchestrated driving on battery power, emissions could be reduced drastically.
Ownership Could Look Different. Here’s a fact: The average car spends 95 percent of its life in park. In the future, sharing an electric vehicle might be standard. Imagine this – you hail a nearby ride to pick you up and take you to work. Rather than sitting (or driving) idle, the vehicle calls on its next passenger and continues to do so until it needs a full charge. This scenario could drive down transportation prices considerably while also reducing the need for consumers to own cars, eliminating environmental waste.
What’s Next for Electric Self-Driving Cars? For self-driving cars to accelerate EV adoption, it will be a collective effort. Policymakers need to establish regulation and demand cleaner vehicles. Automakers and tech giants must continue to raise the bar and push for battery power. And of course, it’ll take the confidence of consumers to trust the clean, self-driving machines and put them to use.
Haden Kirkpatrick is the head of marketing strategy and innovation at Esurance. Haden is an innovator and futurist who is constantly thinking about how IoT, self-driving cars and machine learning will impact the auto insurance industry. Learn more about Esurance by visiting Esurance.com.
Autonomy puts a significant power drain on the car.
And BEVs have none to spare in cold weather.
That is why Hyundai have gone for their Nexo FCEV as the lead vehicle in their autonomy program.
Not only can they cope better with the power drain in normal circumstances, but in the cold excess heat from the fuel cell keeps the battery at optimum operating temperature and the occupants warm. so that range is not significantly decreased.
So they are more able to cope with the power demands of autonomy in all conditions, and much better in cold weather.
"Parked 95% of the Time"
This is an under utilization of expensive resources.
In many places, up to 80% of the purchase and O&M cost of public (people) transportation systems are paid by various levels of government.
Free rides on new driveless e-trains-trams and buses would be possible without increasing governments hand outs due to major reduction in O&M.
More driverless e-trains, e-trams and e-buses could be in operation during peak periods, to meet demands, without extra O&M cost.
Free improved services would quickly attract many more users, reduce travel time and free city cores from traffic jams.
Your toothbrush spends a lot more of its time "parked", but having it ready for use in an instant is the entire purpose of owning one.
There's an insidious side of car-sharing. If you have to be part of a car-sharing network to have mobility, whoever controls the network can render you immobile any time they want. They also track all your movements.
It does not have to be car sharing, with good transit you may not need a car.
By removing drivers on e-buses, e-trams and e-trains you could reduce total operation cost by 50+% over a 10+ year period and transport people free 24/7, much safer and faster because you would greatly reduce travel time, trafic jams, pollution, GHGs and improve people (passengers and bystanders) health.
This programme could be progressively introduced between 2025 and 2035.
Two and three cars families would progressively go a single extended range electrified vehicle.
We produce 100 million vehicles worldwide each year, if many of those are shuttles rather than personal cars we go a long way towards reducing congestion, pollution and fuel usage.
Yes SJC, most major cities have created unacceptable traffic jams and delays and have created increasing costly travel time. Building more highways/roads, like L.A. has done, seems to promote the use of more private vehicles and create more delays, pollution and GHGs.
The use of (free rides) driverless e-buses/vehicles could effectively attract many more users, reduce the use of private vehicles and associated pollution, GHGs and costly delays.
Free driverless e-Buses (all sizes) would be better (for all users) than shared e-vehicles.
Many cities are currently using oversized articulated buses to reduce the cost of highly paid drivers per passenger. In most cities in our region, users barely pay enough to meet drivers full cost. All other cost are paid by Provincial/City governments.
Consequently, driverless e-buses could transport all passengers FREE without extra subsidies from governments.
FREE transportation and better services would attract many more passengers and greatly reduced the use of private cars, reduce traffic jams, reduce travel time, pollution, health hazards and GHGs.
The 4000+ AD e-buses required could take up to 8+ years to introduce. Since the region is short of various labours, current drivers (and many maintenance staff) would be progressively laid off and retrained, specially for/in education/medical fields.
|
# -*- coding: utf-8 -*-
#
# Python Lib Template documentation build configuration file, created by
# sphinx-quickstart on Mon Aug 6 19:31:34 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django runtests'
copyright = u'2012, Raphaël Barrois'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
root_dir = os.path.abspath(os.path.dirname(__file__))
def get_version(package_name):
import re
version_re = re.compile(r"^__version__ = [\"']([\w_.-]+)[\"']$")
package_components = package_name.split('.')
path_components = package_components + ['__init__.py']
with open(os.path.join(root_dir, os.pardir, *path_components)) as f:
for line in f:
match = version_re.match(line[:-1])
if match:
return match.groups()[0]
return '0.1.0'
release = get_version('django_runtests')
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PythonLibTemplatedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PythonLibTemplate.tex', u'Python Lib Template Documentation',
u'Raphaël Barrois', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pythonlibtemplate', u'Python Lib Template Documentation',
[u'Raphaël Barrois'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PythonLibTemplate', u'Python Lib Template Documentation',
u'Raphaël Barrois', 'PythonLibTemplate', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
An toil leibh ceòl ùr ann an Gàidhlig agus Gaeilge? Mas toil, ‘s dòcha gun còrd seo riubh!
A wee while ago, Daithí, Stiofán Ó Fearail (from the band Seo Linn) and Joy, Catherine and Edel were locked in Herbert Place Studios overnight to create 3 new songs in Gàidhlig/Gaeilge for a new series on Raidió na Gaeltachta (Oifigiúil) called ‘An Seisiún’. We had no idea how it was going to work but we managed to understand each other enough to pull 3 songs together and more importantly, had a real laugh doing so.
The video below is a teaser of our first song – if you’d like to hear more, you can listen to the whole programme through the link below.
Tha mi an dòchas gun còrd an ceòl ùr againn riubh!
|
# Copyright 2011 Max Z. Chao
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oauth2 as oauth
import re
from error import RestAPIError
class OAuthConn(object):
def __init__(self, consumer_key, consumer_secret, token_key=None, token_secret=None):
self._consumer = oauth.Consumer(consumer_key, consumer_secret)
if token_key and token_secret:
self._token_key = token_key
self._token_secret = token_secret
token = oauth.Token(token_key, token_secret)
self._client = oauth.Client(self._consumer, token)
else:
self._client = oauth.Client(self._consumer)
def update(self, token_key, token_secret):
token = oauth.Token(token_key, token_secret)
self._token_key = token_key
self._token_secret = token_secret
self._client = oauth.Client(self._consumer, token)
return self
@property
def token_key(self):
return self._token_key
@property
def token_secret(self):
return self._token_secret
def request(self, url, method):
return self._client.request(url, method)
class OAuthOOB(object):
def __init__(self, request_token_url, authenticate_url, access_token_url):
self._request_token_url = request_token_url
self._authenticate_url = authenticate_url
self._access_token_url = access_token_url
def _parse_token(self, content):
return re.findall('oauth_token=([^&]+)&oauth_token_secret=([^&]+)', content)[0]
def get_temp_credentials(self, oauth_conn):
resp, content = oauth_conn.request(self._request_token_url, method = 'GET')
if resp.status != 200:
raise RestAPIError('Failed to get Temp Credentials: ' + str(resp.status) + ' ' + resp.reason)
self._temp_credentials_url = self._authenticate_url + '?' + content
token_key, token_secret = self._parse_token(content)
return oauth_conn.update(token_key, token_secret)
@property
def temp_credentials_url(self):
return self._temp_credentials_url
def get_credentials(self, oauth_conn, pin_code):
access_token_pin_code_url = self._access_token_url + '?oauth_verifier=' + pin_code
resp, content = oauth_conn.request(access_token_pin_code_url, method = 'GET')
if resp.status != 200:
raise RestAPIError('Failed to get Credentials: ' + str(resp.status) + ' ' + resp.reason)
token_key, token_secret = self._parse_token(content)
return oauth_conn.update(token_key, token_secret)
|
www.bbcomel.com is an e-commerce website powered by woocommerce. They are selling households products imported from China. We have setup their store to sell in-stock items and also dropships from aliexpress.com. The client really loves the experience that their having in managing sales through the website rather than on their facebook page.
|
import nltk as k, pickle
tagger=k.data.load('taggers/maxent_treebank_pos_tagger/english.pickle')
# levou muito tempo, retornou:
# tagger.evaluate(k.corpus.brown.tagged_sents())
# 0.5952331741865255
# pq as tags não são as mesmas?
# Receita do Brill na própria classe do nltk
from nltk.tbl.template import Template
from nltk.tag.brill import Pos, Word
from nltk.tag import RegexpTagger, BrillTaggerTrainer
from nltk.corpus import treebank
training_data = treebank.tagged_sents()[:100]
baseline_data = treebank.tagged_sents()[100:200]
gold_data = treebank.tagged_sents()[200:300]
#testing_data = [untag(s) for s in gold_data]
testing_data = [[ss[0] for ss in s] for s in gold_data]
backoff = RegexpTagger([
(r'^-?[0-9]+(.[0-9]+)?$', 'CD'), # cardinal numbers
(r'(The|the|A|a|An|an)$', 'AT'), # articles
(r'.*able$', 'JJ'), # adjectives
(r'.*ness$', 'NN'), # nouns formed from adjectives
(r'.*ly$', 'RB'), # adverbs
(r'.*s$', 'NNS'), # plural nouns
(r'.*ing$', 'VBG'), # gerunds
(r'.*ed$', 'VBD'), # past tense verbs
(r'.*', 'NN') # nouns (default)
])
baseline = backoff
baseline.evaluate(gold_data)
Template._cleartemplates() #clear any templates created in earlier tests
templates = [Template(Pos([-1])), Template(Pos([-1]), Word([0]))]
tt = BrillTaggerTrainer(baseline, templates, trace=3)
tagger1 = tt.train(training_data, max_rules=10)
tagger1.rules()[1:3]
train_stats = tagger1.train_stats()
tagger1.print_template_statistics(printunused=False)
tagger1.evaluate(gold_data)
tagged, test_stats = tagger1.batch_tag_incremental(testing_data, gold_data)
tagger2 = tt.train(training_data, max_rules=10, min_acc=0.99)
print(tagger2.evaluate(gold_data)) # doctest: +ELLIPSIS
tagger2.rules()[2:4]
#nn_cd_tagger = k.tag.RegexpTagger([(r'^-?[0-9]+(.[0-9]+)?$', 'CD'), (r'.*', 'NN')])
nn_cd_tagger = baseline
#tagged_data = k.corpus.treebank.tagged_sents()
tagged_data = k.corpus.treebank.tagged_sents(tagset="universal")
tagged_data2 = k.corpus.brown.tagged_sents(tagset="universal")
num_sents=len(tagged_data)
num_sents2=len(tagged_data2)
train=0.8
cutoff = int(num_sents *train)
cutoff2 = int(num_sents2*train)
training_data = tagged_data[:cutoff]+tagged_data2[:cutoff2]
gold_data = tagged_data[cutoff:]+tagged_data2[cutoff2:]
testing_data = [[t[0] for t in sent] for sent in gold_data]
print("Done loading.")
unigram_tagger = k.tag.UnigramTagger(training_data,backoff=nn_cd_tagger)
bigram_tagger = k.tag.BigramTagger(training_data,
backoff=unigram_tagger)
##templates = [
## k.tag.brill.SymmetricProximateTokensTemplate(k.tag.brill.ProximateTagsRule, (1,1)),
## k.tag.brill.SymmetricProximateTokensTemplate(k.tag.brill.ProximateTagsRule, (2,2)),
## k.tag.brill.SymmetricProximateTokensTemplate(k.tag.brill.ProximateTagsRule, (1,2)),
## k.tag.brill.SymmetricProximateTokensTemplate(k.tag.brill.ProximateTagsRule, (1,3)),
##
## k.tag.brill.SymmetricProximateTokensTemplate(k.tag.brill.ProximateWordsRule, (1,1)),
## k.tag.brill.SymmetricProximateTokensTemplate(k.tag.brill.ProximateWordsRule, (2,2)),
## k.tag.brill.SymmetricProximateTokensTemplate(k.tag.brill.ProximateWordsRule, (1,2)),
## k.tag.brill.SymmetricProximateTokensTemplate(k.tag.brill.ProximateWordsRule, (1,3)),
##
## k.tag.brill.ProximateTokensTemplate(k.tag.brill.ProximateTagsRule, (-1, -1), (1,1)),
## k.tag.brill.ProximateTokensTemplate(k.tag.brill.ProximateWordsRule, (-1, -1), (1,1)),
## ]
trace=5
trainer = k.tag.BrillTaggerTrainer(bigram_tagger, templates, 0)
#trainer = k.tag.BrillTaggerTrainer(bigram_tagger, templates, 2)
#trainer = k.tag.brill.BrillTaggerTrainer(bigram_tagger, trace)
##trainer = brill.BrillTaggerTrainer(u, templates, trace)
max_rules=40000
min_score=2
#brill_tagger = trainer.train(training_data, max_rules, min_score)
brill_tagger = trainer.train(training_data, max_rules, 1)
f=open("./pickledir/brill_tagger5", 'wb')
pickle.dump(brill_tagger,f,-1)
f.close()
# acerto de: 0.9180
|
Today over 50 million people in the United States alone suffer from some kind of hearing loss. With our technological life style, that number continues to grow. Once hearing loss occurs, it cannot be restored. We can imitate it with hearing devices, but it will never be as clear or concise as it once was. After hearing is lost it becomes one of the most expensive parts of life to maintain. Some insurance companies will pay for testing or will give discounts on equipment, but many will not pay a dime. Our passion is to create a world where everyone who chooses, can hear us. And our passion will compel us to raise awareness of our cause, educate those willing to understand, and support those who need our support. Hearing health needs allies and advocates, educators and healers. Sertoma communities share the common belief that attention paid to hearing health adds quality to lives and communities.
HOW DO WE SUPPORT HEARING HEALTH?
We support hearing health through a variety of programs and mission activities, a portion of which are listed below. For more details or a full list of how Sertoma changes the way the world is heard, visit What We Do.
|
# Generated by Django 2.0.2 on 2018-02-23 04:36
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('project_id', models.AutoField(primary_key=True, serialize=False)),
('title', models.TextField()),
('image', models.ImageField(upload_to='projects/')),
('description', models.TextField()),
('url', models.URLField(blank=True, null=True)),
('order', models.IntegerField(default=0)),
],
options={
'ordering': ['-order'],
},
),
migrations.CreateModel(
name='School',
fields=[
('school_id', models.AutoField(primary_key=True, serialize=False)),
('title', models.TextField()),
('started', models.DateField()),
('finished', models.DateField()),
('description', models.TextField()),
('major', models.TextField()),
],
options={
'ordering': ['-started'],
},
),
]
|
Know Newkirk Class of 2007 graduates that are NOT on this List? Help us Update the 2007 Class List by adding missing names.
More 2007 alumni from Newkirk HS have posted profiles on Classmates.com®. Click here to register for free at Classmates.com® and view other 2007 alumni.
Alumni from the Newkirk High School class of 2007 that have been added to this alumni directory are shown on this page. All of the people on this page graduated in '07 from Newkirk . You can register for free to add your name to the NHS alumni directory.
|
# This file is part of OpenHatch.
# Copyright (C) 2010 Jack Grigg
# Copyright (C) 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
from django.core.management.base import BaseCommand
from mysite.search.models import Bug
class Command(BaseCommand):
help = "A bunch of tools for checking and cleaning the Bug database."
def list_old_bugs(self, days, hours=0):
count = 0
x_days_ago = (datetime.datetime.now() -
datetime.timedelta(days=days, hours=hours))
for bug in Bug.all_bugs.filter(last_polled__lt=x_days_ago):
count += 1
print "%d - %s" % (count, str(bug))
print "There are a total of %d Bug objects that are %d days %d hours old." % (count, days, hours)
def list_closed_bugs(self):
count = 0
for bug in Bug.all_bugs.filter(looks_closed=True):
count += 1
print "%d - %s" % (count, str(bug))
print "There are a total of %d closed Bug objects." % count
def delete_old_bugs(self, days, hours=0):
x_days_ago = (datetime.datetime.now() -
datetime.timedelta(days=days, hours=hours))
Bug.all_bugs.filter(last_polled__lt=x_days_ago).delete()
def delete_closed_bugs(self):
Bug.all_bugs.filter(looks_closed=True).delete()
def delete_all_bugs(self):
Bug.all_bugs.all().delete()
def show_usage(self):
print """
usage: ./manage.py customs_debugger COMMAND
The following commands are available:
list_old_bugs List all Bug objects older than one day plus one hour.
list_very_old_bugs List all Bug objects older than two days.
list_closed_bugs List all Bug objects that look closed.
delete_old_bugs Delete all Bug objects older than one day plus one hour.
delete_very_old_bugs Delete all Bug objects older than two days.
delete_closed_bugs Delete all Bug objects that look closed.
delete_all_bugs Delete ALL Bug objects. Period. Useful if you want to
test a bug import from scratch. Not so useful on a
production server.
NOTE: These commands are executed immediately, so make sure you are
executing what you want, especially with the deleting commands."""
def handle(self, *args, **options):
if len(args) > 1:
self.show_usage()
elif 'list_old_bugs' in args:
self.list_old_bugs(days=1, hours=1)
elif 'list_very_old_bugs' in args:
self.list_old_bugs(days=2)
elif 'list_closed_bugs' in args:
self.list_closed_bugs()
elif 'delete_old_bugs' in args:
self.delete_old_bugs(days=1, hours=1)
elif 'delete_very_old_bugs' in args:
self.delete_old_bugs(days=2)
elif 'delete_closed_bugs' in args:
self.delete_closed_bugs()
elif 'delete_all_bugs' in args:
self.delete_all_bugs()
else:
self.show_usage()
|
Jeremiah 2.13 My people have committed two evils: they have forsaken Me, the fountain of living waters, and hewed out cisterns for themselves, broken cisterns that can hold no water.
When God's people turn away from Him, they try to do things for themselves. Do you have water or just a hole in the ground. Time to reconnect with the Tap!
|
'''
A Recurrent Neural Network (LSTM) implementation example using TensorFlow library.
This example is using the MNIST database of handwritten digits (http://yann.lecun.com/exdb/mnist/)
Long Short Term Memory paper: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
import h5py
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib import rnn
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
SMALL_FLAG = 0
print('==> Experiment 4 RNN')
filepath = '/pylon2/ci560sp/haunter/exp3_taylorswift_d15_1s_C1C8.mat'
if SMALL_FLAG:
filepath = '/pylon2/ci560sp/haunter/exp3_small.mat'
print('==> Loading data from {}...'.format(filepath))
# benchmark
t_start = time.time()
# ==============================================
# reading data
# ==============================================
f = h5py.File(filepath)
X_train = np.array(f.get('trainingFeatures'))
y_train = np.array(f.get('trainingLabels'))
X_val = np.array(f.get('validationFeatures'))
y_val = np.array(f.get('validationLabels'))
t_end = time.time()
print('--Time elapsed for loading data: {t:.2f} \
seconds'.format(t = t_end - t_start))
del f
print('-- Number of training samples: {}'.format(X_train.shape[0]))
print('-- Number of validation samples: {}'.format(X_val.shape[0]))
# ==============================================
# RNN configs
# ==============================================
# Network Parameters
num_training_vec, total_features = X_train.shape
num_freq = 169
num_frames = int(total_features / num_freq)
max_iter = 300
print_freq = 10
if SMALL_FLAG:
max_iter = 10
print_freq = 1
batch_size = 1000
learning_rate = 0.001
n_input = num_freq # number of sequences (rows)
n_steps = num_frames # size of each sequence (number of columns), timesteps
n_hidden = 512 # hidden layer num of features
n_classes = int(max(y_train.max(), y_val.max()) + 1)
# ==============================================
# RNN architecture
# ==============================================
# Transform labels into on-hot encoding form
y_train_OHEnc = tf.one_hot(y_train.copy(), n_classes)
y_val_OHEnc = tf.one_hot(y_val.copy(), n_classes)
# tf Graph input
x = tf.placeholder("float", [None, n_steps, n_input])
y = tf.placeholder("float", [None, n_classes])
# Define weights
weights = {
'out': tf.Variable(tf.random_normal([n_hidden, n_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([n_classes]))
}
def RNN(x, weights, biases):
# Prepare data shape to match `rnn` function requirements
# Current data input shape: (batch_size, n_steps, n_input)
# Required shape: 'n_steps' tensors list of shape (batch_size, n_input)
# Unstack to get a list of 'n_steps' tensors of shape (batch_size, n_input)
x = tf.unstack(x, n_steps, 1)
# Define a lstm cell with tensorflow
lstm_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0)
# Get lstm cell output
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out']) + biases['out']
pred = RNN(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.global_variables_initializer()
# evaluation metrics
train_acc_list = []
val_acc_list = []
train_err_list = []
val_err_list = []
# ==============================================
# RNN training
# ==============================================
# Launch the graph
with tf.Session() as sess:
sess.run(init)
y_train = sess.run(y_train_OHEnc)[:, 0, :]
y_val = sess.run(y_val_OHEnc)[:, 0, :]
print('==> Training the full network...')
t_start = time.time()
# Keep training until reach max iterations
for epoch in range(max_iter):
for i in range(0, num_training_vec, batch_size):
end_ind = min(i + batch_size, num_training_vec)
batch_x = X_train[i : end_ind]
batch_y = y_train[i : end_ind]
# Reshape data to get 28 seq of 28 elements
batch_x = batch_x.reshape((-1, n_steps, n_input))
# Run optimization op (backprop)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
if (epoch + 1) % print_freq == 0:
train_acc = accuracy.eval(feed_dict={x: X_train.reshape((-1, n_steps, n_input)),\
y: y_train})
train_acc_list.append(train_acc)
val_acc = accuracy.eval(feed_dict={x: X_val.reshape((-1, n_steps, n_input)),\
y: y_val})
val_acc_list.append(val_acc)
train_err = cost.eval(feed_dict={x: X_train.reshape((-1, n_steps, n_input)),\
y: y_train})
train_err_list.append(train_err)
val_err = cost.eval(feed_dict={x: X_val.reshape((-1, n_steps, n_input)),\
y: y_val})
val_err_list.append(val_err)
print("-- epoch: %d, training error %g"%(epoch + 1, train_err))
t_end = time.time()
print('--Time elapsed for training: {t:.2f} \
seconds'.format(t = t_end - t_start))
# ==============================================
# RNN Evaluation
# ==============================================
# Reports
print('-- Training accuracy: {:.4f}'.format(train_acc_list[-1]))
print('-- Validation accuracy: {:.4f}'.format(val_acc_list[-1]))
print('-- Training error: {:.4E}'.format(train_err_list[-1]))
print('-- Validation error: {:.4E}'.format(val_err_list[-1]))
print('==> Generating error plot...')
x_list = range(0, print_freq * len(train_acc_list), print_freq)
train_err_plot = plt.plot(x_list, train_err_list, 'b-', label='training')
val_err_plot = plt.plot(x_list, val_err_list, '-', color='orange', label='validation')
plt.xlabel('Number of epochs')
plt.ylabel('Cross-Entropy Error')
plt.title('Error vs Number of Epochs with {} Hidden Units'.format(n_hidden))
plt.legend(loc='best')
plt.savefig('rnn_{}.png'.format(n_hidden), format='png')
plt.close()
print('==> Finished!')
|
On the sides of this beautifully designed flask, we see the celtic knots weaving endlessly into one another. Their symbolism dates back thousands of years, representing the circle of life with has no ending and no beginning. While looking at it we might think of the timelessness of our spirits and while taking small sips we celebrate life in its full beauty. And as life is endless, so is love which binds two souls into eternity. A beautiful wedding gift and a wonderful container for special spirits.
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.hvac_templates import HvactemplateZoneUnitary
log = logging.getLogger(__name__)
class TestHvactemplateZoneUnitary(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_hvactemplatezoneunitary(self):
pyidf.validation_level = ValidationLevel.error
obj = HvactemplateZoneUnitary()
# object-list
var_zone_name = "object-list|Zone Name"
obj.zone_name = var_zone_name
# object-list
var_template_unitary_system_name = "object-list|Template Unitary System Name"
obj.template_unitary_system_name = var_template_unitary_system_name
# object-list
var_template_thermostat_name = "object-list|Template Thermostat Name"
obj.template_thermostat_name = var_template_thermostat_name
# real
var_supply_air_maximum_flow_rate = 4.4
obj.supply_air_maximum_flow_rate = var_supply_air_maximum_flow_rate
# real
var_zone_heating_sizing_factor = 0.0
obj.zone_heating_sizing_factor = var_zone_heating_sizing_factor
# real
var_zone_cooling_sizing_factor = 0.0
obj.zone_cooling_sizing_factor = var_zone_cooling_sizing_factor
# alpha
var_outdoor_air_method = "Flow/Person"
obj.outdoor_air_method = var_outdoor_air_method
# real
var_outdoor_air_flow_rate_per_person = 8.8
obj.outdoor_air_flow_rate_per_person = var_outdoor_air_flow_rate_per_person
# real
var_outdoor_air_flow_rate_per_zone_floor_area = 9.9
obj.outdoor_air_flow_rate_per_zone_floor_area = var_outdoor_air_flow_rate_per_zone_floor_area
# real
var_outdoor_air_flow_rate_per_zone = 10.1
obj.outdoor_air_flow_rate_per_zone = var_outdoor_air_flow_rate_per_zone
# object-list
var_supply_plenum_name = "object-list|Supply Plenum Name"
obj.supply_plenum_name = var_supply_plenum_name
# object-list
var_return_plenum_name = "object-list|Return Plenum Name"
obj.return_plenum_name = var_return_plenum_name
# alpha
var_baseboard_heating_type = "HotWater"
obj.baseboard_heating_type = var_baseboard_heating_type
# object-list
var_baseboard_heating_availability_schedule_name = "object-list|Baseboard Heating Availability Schedule Name"
obj.baseboard_heating_availability_schedule_name = var_baseboard_heating_availability_schedule_name
# real
var_baseboard_heating_capacity = 15.15
obj.baseboard_heating_capacity = var_baseboard_heating_capacity
# alpha
var_zone_cooling_design_supply_air_temperature_input_method = "SupplyAirTemperature"
obj.zone_cooling_design_supply_air_temperature_input_method = var_zone_cooling_design_supply_air_temperature_input_method
# real
var_zone_cooling_design_supply_air_temperature = 17.17
obj.zone_cooling_design_supply_air_temperature = var_zone_cooling_design_supply_air_temperature
# real
var_zone_cooling_design_supply_air_temperature_difference = 18.18
obj.zone_cooling_design_supply_air_temperature_difference = var_zone_cooling_design_supply_air_temperature_difference
# alpha
var_zone_heating_design_supply_air_temperature_input_method = "SupplyAirTemperature"
obj.zone_heating_design_supply_air_temperature_input_method = var_zone_heating_design_supply_air_temperature_input_method
# real
var_zone_heating_design_supply_air_temperature = 20.2
obj.zone_heating_design_supply_air_temperature = var_zone_heating_design_supply_air_temperature
# real
var_zone_heating_design_supply_air_temperature_difference = 21.21
obj.zone_heating_design_supply_air_temperature_difference = var_zone_heating_design_supply_air_temperature_difference
# object-list
var_design_specification_outdoor_air_object_name = "object-list|Design Specification Outdoor Air Object Name"
obj.design_specification_outdoor_air_object_name = var_design_specification_outdoor_air_object_name
# object-list
var_design_specification_zone_air_distribution_object_name = "object-list|Design Specification Zone Air Distribution Object Name"
obj.design_specification_zone_air_distribution_object_name = var_design_specification_zone_air_distribution_object_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.hvactemplatezoneunitarys[0].zone_name, var_zone_name)
self.assertEqual(idf2.hvactemplatezoneunitarys[0].template_unitary_system_name, var_template_unitary_system_name)
self.assertEqual(idf2.hvactemplatezoneunitarys[0].template_thermostat_name, var_template_thermostat_name)
self.assertAlmostEqual(idf2.hvactemplatezoneunitarys[0].supply_air_maximum_flow_rate, var_supply_air_maximum_flow_rate)
self.assertAlmostEqual(idf2.hvactemplatezoneunitarys[0].zone_heating_sizing_factor, var_zone_heating_sizing_factor)
self.assertAlmostEqual(idf2.hvactemplatezoneunitarys[0].zone_cooling_sizing_factor, var_zone_cooling_sizing_factor)
self.assertEqual(idf2.hvactemplatezoneunitarys[0].outdoor_air_method, var_outdoor_air_method)
self.assertAlmostEqual(idf2.hvactemplatezoneunitarys[0].outdoor_air_flow_rate_per_person, var_outdoor_air_flow_rate_per_person)
self.assertAlmostEqual(idf2.hvactemplatezoneunitarys[0].outdoor_air_flow_rate_per_zone_floor_area, var_outdoor_air_flow_rate_per_zone_floor_area)
self.assertAlmostEqual(idf2.hvactemplatezoneunitarys[0].outdoor_air_flow_rate_per_zone, var_outdoor_air_flow_rate_per_zone)
self.assertEqual(idf2.hvactemplatezoneunitarys[0].supply_plenum_name, var_supply_plenum_name)
self.assertEqual(idf2.hvactemplatezoneunitarys[0].return_plenum_name, var_return_plenum_name)
self.assertEqual(idf2.hvactemplatezoneunitarys[0].baseboard_heating_type, var_baseboard_heating_type)
self.assertEqual(idf2.hvactemplatezoneunitarys[0].baseboard_heating_availability_schedule_name, var_baseboard_heating_availability_schedule_name)
self.assertAlmostEqual(idf2.hvactemplatezoneunitarys[0].baseboard_heating_capacity, var_baseboard_heating_capacity)
self.assertEqual(idf2.hvactemplatezoneunitarys[0].zone_cooling_design_supply_air_temperature_input_method, var_zone_cooling_design_supply_air_temperature_input_method)
self.assertAlmostEqual(idf2.hvactemplatezoneunitarys[0].zone_cooling_design_supply_air_temperature, var_zone_cooling_design_supply_air_temperature)
self.assertAlmostEqual(idf2.hvactemplatezoneunitarys[0].zone_cooling_design_supply_air_temperature_difference, var_zone_cooling_design_supply_air_temperature_difference)
self.assertEqual(idf2.hvactemplatezoneunitarys[0].zone_heating_design_supply_air_temperature_input_method, var_zone_heating_design_supply_air_temperature_input_method)
self.assertAlmostEqual(idf2.hvactemplatezoneunitarys[0].zone_heating_design_supply_air_temperature, var_zone_heating_design_supply_air_temperature)
self.assertAlmostEqual(idf2.hvactemplatezoneunitarys[0].zone_heating_design_supply_air_temperature_difference, var_zone_heating_design_supply_air_temperature_difference)
self.assertEqual(idf2.hvactemplatezoneunitarys[0].design_specification_outdoor_air_object_name, var_design_specification_outdoor_air_object_name)
self.assertEqual(idf2.hvactemplatezoneunitarys[0].design_specification_zone_air_distribution_object_name, var_design_specification_zone_air_distribution_object_name)
|
BYOB (Bring You Own Boat!), everything else is here!! Join and be a part of the Englewood Gardens Club (Beach Club), too!
Waterfront home located on Gottfried Creek, 2 boating miles to Stump Pass, and 2 bicycle miles to Englewood Beach …..Everything remodeled and updated this year!! This 3BR/2BA home has tile floors throughout (including lanai), all new soft close cabinets throughout, all new quartz counter-tops throughout, all new stainless steel kitchen appliances, all new bath fixtures, tile showers, and curb-less shower in master bath. It has been freshly painted inside and out with new ceiling fans, 2 ½ inch faux wood blinds and efficient LED lighting throughout. There is a separate laundry room with washer and dryer, two water heaters, master bath and laundry room have tubular skylights, and there is a walk-in closet in the master with high-end organizer. The sliding doors to the lanai pocket to disappear and there is a pool with a newly screened cage. New Carrier heat/air unit in 2017 and new roof in 2012.
The garage has been freshly painted with a new automatic garage door, there is an extra concrete parking area for motor home or boat, a dock with new decking and access walkway, davits, a paver patio to enjoy the view of the creek and fresh landscaping with an irrigation system. What more could you want?? Come and enjoy fun in the sun, on the Florida Gulf coast!!
|
# coding: utf8
from rang import *
from couleur import *
import numpy as np
from hand import *
from joueur import *
class donne:
def __init__(self,partie):
"""fonction qui commence une nouvelle donne en distribuant les cartes et en choississant l'atout"""
carte_jouee = []
self.partie = partie
self.distribue()
self.atout = partie.joueur_atout.decide_atout()
self.points = [0,0]
self.joue_donne()
self.fin_donne()
def distribue(self):
"""fonction qui distribue les cartes de manières aléatoires aux quatre joueurs"""
permutations = (np.random.permutation(36) +1).reshape(4,9)
hands = [hand(perm = i) for i in permutations]
for i in range(4):
self.partie.get_joueur(i+1).hand = hands[i]
self.atout = self.partie.joueur_atout.decide_atout()
def compatibilise(self,equipe_gagnant,cartes):
""" fonction qui détermine le nombre de points gagnés dans une plie"""
score = sum([carte.valeur_point(self) for carte in cartes])
#equipe_gagnant.points+=score
self.points[equipe_gagnant.ide -1] += score
def joue_donne(self):
"""fonction qui définit comment on joue chaque donne"""
joueur_commence = self.partie.joueur_atout
for i in range (1,10):
(joueur_gagnant, cartes_jouees) = self.joue_plie(joueur_commence)
self.compatibilise(self.partie.get_equipe(joueur = joueur_gagnant), cartes_jouees)
joueur_commence = joueur_gagnant
self.points[self.partie.get_equipe(joueur = joueur_gagnant).ide -1] += 5
def joue_plie(self,joueur_commence):
"""fonction qui définit comment on joue chaque plie"""
cartes_jouees = []
for i in range(4):
cartes_jouees.append(self.partie.get_joueur(i + joueur_commence.ide).joue(self,cartes_jouees))
joueur_gagnant = self.determine_gagnant(cartes_jouees,joueur_commence)
print cartes_jouees
return (joueur_gagnant, cartes_jouees)
def determine_gagnant(self,cartes_jouees,joueur_commence):
"""fonction qui determine le combientième joueur a gagné la plie"""
couleur_plie = cartes_jouees[0].couleur
values = [carte.valeur_force(self.atout,couleur_plie) for carte in cartes_jouees]
print values
joueur_gagnant = values.index(max(values))
return self.partie.get_joueur(joueur_commence.ide + joueur_gagnant)
def fin_donne(self):
print "equipe 1: " + str(self.points[0])
print "equipe 2: " + str(self.points[1])
self.partie.get_equipe(1).points += self.points[0]
self.partie.get_equipe(1).points += self.points[0]
|
The Sugar Sweet Blues Band has appeared at JJ's Blues, Louisiana Roux, Gordon Biersch Breweries, Little Lou's Bar BQ, and Cafe Stritch in San Jose, Ca.
CD on the way! Please contact me for information and bookings.
|
#!/usr/bin/python
# -*- coding:UTF-8 -*-
################################################################################
#
# Copyright 2010-2014 Carlos Ramisch, Vitor De Araujo, Silvio Ricardo Cordeiro,
# Sandra Castellanos
#
# word.py is part of mwetoolkit
#
# mwetoolkit is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# mwetoolkit is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with mwetoolkit. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
"""
This module provides the `Word` class. This class represents an orthographic
word (as in mwetoolkit-corpus.dtd, mwetoolkit-patterns.dtd and
mwetoolkit-candidates.dtd) defined by a surface form, a lemma and a POS tag.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from xml.sax.saxutils import quoteattr
from .. import util
from .feature import FeatureSet
from .__common import WILDCARD, SEPARATOR
# List of valid word attributes. Must appear in the same order as the
# arguments for the Word class constructor.
WORD_ATTRIBUTES = ["surface", "lemma", "pos", "syn"]
################################################################################
class Word(object):
"""
An orthographic word (in languages for which words are separated from
each other by a space) is the simplest lexical unit recognisable by a
native speaker, and it is characterized by its surface form, its lemma
and its Part Of Speech tag.
"""
################################################################################
def __init__(self, surface=WILDCARD, lemma=WILDCARD,
pos=WILDCARD, syn=WILDCARD, freqs=None):
"""
Instantiates a new `Word`. A Word might be one of: a token in a
corpus, in which case it will probably have at least a defined
surface form (mwetoolkit-corpus.dtd); a part of a pattern, in which
case it will probably contain some `WILDCARD`s; a part of a
reference or gold standard entry, in which case it will have at
least a defined lemma (mwetoolkit-patterns.dtd); a part of an n-gram
in a candidates list, in which case most of the parts should be
defined (mwetoolkit-candidates.dtd). Besides the surface form, the
lemma and the Part Of Speech tag, a word also contains a list of
`Frequency`ies, each one corresponding to its number of occurrences
in a given corpus.
@param surface A string corresponding to the surface form of the
word, i.e. the form in which it occurs in the corpus. A surface form
might include morphological inflection such as plural and gender
marks, conjugation for verbs, etc. For example, "went", "going",
"go", "gone", are all different surface forms for a same lemma, the
verb "(to) go".
@param lemma A string corresponding to the lemma of the word, i.e.
the normalized non-inflected form of the word. A lemma is generally
the preferred simplest form of a word as it appears in a dictionary,
like infinitive for verbs or singular for nouns. Notice that a lemma
is a well formed linguistic word, and thus differs from a root or
a stem. For example, the lemma of the noun "preprocessing" is
"preprocessing" while the root (without prefixes and suffixes) is
"process". Analagously, the lemma of the verb "studied" is "(to)
study" whereas a stem would be "stud-", which is not an English
word.
@param pos A string corresponding to a Part Of Speech tag of the
word. A POS tag is a morphosyntactic class like "Noun", "Adjective"
or "Verb". You should use a POS tagger system to tag your corpus
before you use mwetoolkit. The tag set, i.e. the set of valid POS
tags, is your choice. You can use a very simple set that
distinguishes only top-level classes ("N", "A", "V") or a fine-
grained classification, e.g. "NN" is a proper noun, "NNS" a proper
noun in plural form, etc.
@param syn A string corresponding to a syntax information of the
word. AS the jungle of syntactic formalisms is wild, we assume that
each word has a string that encodes the syntactic information. If
you use a dependency parser, for instance, you might encode the
syntax information as "rel:>index" where "rel" is the type of
syntactic relation (object, subject, det, etc.) and the "index" is
the index of the word on which this word depends. An example can be
found in the corpus DTD file.
@param freqs A dict of `corpus_name`->`Frequency` corresponding to counts of
occurrences of this word in a certain corpus. Please notice that
the frequencies correspond to occurrences of a SINGLE word in a
corpus. Joint `Ngram` frequencies are attached to the corresponding
`Ngram` object that contains this `Word`, if any.
"""
self.surface = surface
self.lemma = lemma
self.pos = pos
self.syn = syn
assert freqs is None or isinstance(freqs, FeatureSet), freqs
self.freqs = freqs or FeatureSet("freq", lambda x,y: x+y)
################################################################################
def copy(self):
r"""Return a copy of this Word."""
return Word(self.surface, self.lemma, self.pos, self.syn, self.freqs.copy())
################################################################################
def lemma_or_surface(self):
r"""Return lemma if it is defined; otherwise, return surface."""
if self.lemma != WILDCARD:
return self.lemma
if self.surface != WILDCARD:
return self.surface
return None
################################################################################
def add_frequency( self, freq ) :
"""
Add a `Frequency` to the list of frequencies of the word.
@param freq `Frequency` that corresponds to a count of this word in
a corpus. No test is performed in order to verify whether this is a
repeated frequency in the list.
"""
self.freqs.add(freq.name, freq.value)
################################################################################
def to_string( self ) :
"""
Converts this word to an internal string representation where each
part of the word is separated with a special `SEPARATOR`. This is
only used internally by the scripts and is of little use to the
user because of reduced legibility. Deconversion is made by the
function `from_string`.
@return A string with a special internal representation of the
word.
"""
return SEPARATOR.join((self.surface, self.lemma, self.pos))
################################################################################
def from_string( self, s ) :
"""
Instanciates the current word by converting to an object
an internal string representation where each part of the word is
separated with a special `SEPARATOR`. This is only used internally
by the scripts and is of little use to the user because of reduced
legibility. Deconversion is made by the function `to_string`.
@param s A string with a special internal representation of
the word, as generated by the function `to_string`
"""
[ self.surface, self.lemma, self.pos ] = s.split( SEPARATOR )
################################################################################
def to_html( self, wid ) :
"""
TODO
@return TODO
"""
# TODO: properly escape this stuff
wtempl = "<a href=\"#\" class=\"word\">%(surface)s" \
"<span class=\"wid\">%(wid)d</span>" \
"<span class=\"lps\">%(lemma)s%(pos)s%(syn)s</span></a>"
templ = lambda x: "<span class=\"%s\">%s</span>" % (x, getattr(self,x))
attr_map = map( lambda x: (x, templ(x)), WORD_ATTRIBUTES) + [("wid", wid)]
return wtempl % dict(attr_map)
################################################################################
def to_xml(self, **kwargs):
"""
Provides an XML string representation of the current object,
including internal variables. The printed attributes of the word
depend on the boolean parameters.
@param print_surface If print_surface is True, will include the
`surface` of the word in the XML <w> element, otherwise the surface
form will not be printed. Default True.
@param print_lemma If print_lemma is True, will include the `lemma`
of the word in the XML <w> element, otherwise the lemma will not be
printed. Default True.
@param print_pos If print_pos is True, will include the `pos` of the
word in the XML <w> element, otherwise the Part Of Speech will not
be printed. Default True.
@param print_freqs If print_freqs is True, will include the `freqs`
of the word as children of the XML <w> element, otherwise the word
frequencies will not be printed. Default True.
@return A string containing the XML element <w> with its attributes
and internal structure, according to mwetoolkit-candidates.dtd,
mwetoolkit-patterns.dtd and mwetoolkit-corpus.dtd and
depending on the input flags.
"""
ret = []
self._to_xml_into(ret)
return "".join(ret)
def _to_xml_into(self, output, print_surface=True, print_lemma=True,
print_pos=True, print_syn=True, print_freqs=True):
output.append("<w")
if self.surface != WILDCARD and print_surface:
output.append(" surface=")
output.append(quoteattr(self.surface))
if self.lemma != WILDCARD and print_lemma:
output.append(" lemma=")
output.append(quoteattr(self.lemma))
if self.pos != WILDCARD and print_pos:
output.append(" pos=")
output.append(quoteattr(self.pos))
if self.syn != WILDCARD and print_syn:
output.append(" syn=")
output.append(quoteattr(self.syn))
if not self.freqs or not print_freqs:
output.append(" />")
else:
output.append(" >")
self.freqs._to_xml_into(output)
output.append("</w>")
################################################################################
def __eq__( self, a_word ) :
"""
Equivalent to match( w )
"""
return self.match( a_word )
################################################################################
def __len__( self ) :
"""
Returns the number of characters in a word. Chooses upon available
information, in priority order surface > lemma > pos.
@return The number of characters in this word. Zero if this is an
empty word (or all fields are wildcards)
"""
if self.surface != WILDCARD :
return len( self.surface )
elif self.lemma != WILDCARD :
return len( self.lemma )
elif self.pos != WILDCARD :
return len( self.pos )
else :
return 0
################################################################################
def compare( self, s1, s2, ignore_case ) :
"""
Compares two strings for equality conditioning the type of
comparison (case sensitive/insensitive) to boolean argument
`ignore_case`.
@param s1 A string to compare.
@param s2 Another string to compare.
@param ignore_case True if comparison should be case insensitive,
False if comparision should be case sensitive.
@return True if the strings are identical, False if they are
different.
"""
if ignore_case :
return s1.lower() == s2.lower()
else :
return s1 == s2
################################################################################
def match( self, w, ignore_case=False, lemma_or_surface=False ) :
"""
A simple matching algorithm that returns true if the parts of the
current word match the parts of the given word. The matching at the
word level considers only the parts that are defined, for example,
POS tags for candidate extraction or lemmas for automatic gold
standard evaluation. A match on a part of the current word is True
when this part equals to the corresponding part of `w` or when the
part of the current word is not defined (i.e. equals `WILDCARD`).
All the three parts (surface, lemma and pos) need to match so that
the match of the word is true. If ANY of these three word parts does
not match the correspondent part of the given word `w`, this
function returns False.
@param w A `Word` against which we would like to compare the current
word. In general, the current word contains the `WILDCARD`s while
`w` has all the parts (surface, lemma, pos) with a defined value.
@return Will return True if ALL the word parts of `w` match ALL
the word parts of the current pattern (i.e. they have the same
values for all the defined parts). Will return False if
ANY of the three word parts does not match the correspondent part of
the given word `w`.
"""
if self.pos!=WILDCARD and not self.compare(self.pos, w.pos, ignore_case):
return False
if lemma_or_surface:
return ((self.compare(self.lemma, w.lemma, ignore_case)
or (self.compare(self.lemma, w.surface, ignore_case))
or (self.compare(self.surface, w.lemma, ignore_case))
or (self.compare(self.surface, w.surface, ignore_case))))
else:
return ((self.surface==WILDCARD or self.compare(self.surface, w.surface, ignore_case))
and (self.lemma==WILDCARD or self.compare(self.lemma, w.lemma, ignore_case)))
#return ((self.surface != WILDCARD and self.compare( self.surface,w.surface,ignore_case)) or \
# self.surface == WILDCARD) and \
# ((self.lemma != WILDCARD and self.compare( self.lemma, w.lemma, ignore_case ) ) or \
# self.lemma == WILDCARD) and \
# ((self.pos != WILDCARD and self.compare( self.pos, w.pos, ignore_case ) ) or \
# self.pos == WILDCARD)
################################################################################
def get_case_class( self, s_or_l="surface" ) :
"""
For a given word (surface form), assigns a class that can be:
* lowercase - All characters are lowercase
* UPPERCASE - All characters are uppercase
* Firstupper - All characters are lowercase except for the first
* MiXeD - This token contains mixed lowercase and uppercase characters
* ? - This token contains non-alphabetic characters
@param s_or_l Surface or lemma? Default value is "surface" but set it
to "lemma" if you want to know the class based on the lemma.
@return A string that describes the case class according to the list
above.
"""
form = getattr( self, s_or_l )
if form != WILDCARD :
token_list = list( form )
else :
token_list = []
case_class = "?"
for letter_i in range( len( token_list ) ) :
letter = token_list[ letter_i ]
if letter.isupper() :
if letter_i > 0 :
if case_class == "lowercase" or case_class == "Firstupper" :
case_class = "MiXeD"
elif case_class == "?" :
case_class = "UPPERCASE"
else :
case_class = "UPPERCASE"
elif letter.islower() :
if letter_i > 0 :
if case_class == "UPPERCASE" :
if letter_i == 1 :
case_class = "Firstupper"
else :
case_class = "MiXeD"
elif case_class == "?" :
case_class = "lowercase"
else :
case_class = "lowercase"
return case_class
################################################################################
def get_freq_value( self, freq_name ) :
"""
Returns the value of a `Frequency` in the frequencies list. The
frequency is identified by the frequency name provided as input to
this function. If two frequencies have the same name, only the first
value found will be returned.
@param freq_name A string that identifies the `Frequency` of the
candidate for which you would like to know the value.
@return Value of the searched frequency. If there is no frequency
with this name, then it will return 0.
"""
for freq in self.freqs :
if freq.name == freq_name :
return freq.value
return 0
################################################################################
def syn_iter(self):
r"""Yield pairs (synrel, index) based on `self.syn`."""
if self.syn != WILDCARD and self.syn != "":
for syn_pair in self.syn.split(";"):
try:
a, b = syn_pair.split(":")
except ValueError:
util.warn("Bad colon-separated syn pair: {pair!r}", pair=syn_pair)
else:
try:
b = int(b) - 1
except ValueError:
util.warn("Bad syn index reference: {index!r}", index=b)
else:
yield (a, b)
################################################################################
@staticmethod
def syn_encode(syn_pairs):
r"""Return a representation of the
list of (synrel, index) pairs `syn_pairs`.
The result can be assigned to a Word's `syn` attribute.
"""
return ";".join("{}:{}".format(rel, index+1)
for (rel, index) in syn_pairs)
|
How do I change my Flybe flight?
Most airlines will allow you to amend your flight ticket. Charges vary and are charged per person per flight, for date and/or time amendments (plus any difference in fare).
To make an amendment, go to the Flybe website, select 'Manage Booking' and log in using your reference number and surname. Once logged in you can select the 'modify flight booking' option.
For any further assistance please contact us and one of the team will be happy to assist you.
Can I cancel my booking and get my money back?
How do I change a name on my Flybe booking?
|
from django.core.urlresolvers import reverse
from taiga.base.utils import json
from taiga.projects import choices as project_choices
from taiga.projects import serializers
from taiga.users.serializers import RoleSerializer
from taiga.permissions.permissions import MEMBERS_PERMISSIONS
from tests import factories as f
from tests.utils import helper_test_http_method
import pytest
pytestmark = pytest.mark.django_db
@pytest.fixture
def data():
m = type("Models", (object,), {})
m.registered_user = f.UserFactory.create()
m.project_member_with_perms = f.UserFactory.create()
m.project_member_without_perms = f.UserFactory.create()
m.project_owner = f.UserFactory.create()
m.other_user = f.UserFactory.create()
m.superuser = f.UserFactory.create(is_superuser=True)
m.public_project = f.ProjectFactory(is_private=False,
anon_permissions=['view_project'],
public_permissions=['view_project'],
owner=m.project_owner)
m.private_project1 = f.ProjectFactory(is_private=True,
anon_permissions=['view_project'],
public_permissions=['view_project'],
owner=m.project_owner)
m.private_project2 = f.ProjectFactory(is_private=True,
anon_permissions=[],
public_permissions=[],
owner=m.project_owner)
m.blocked_project = f.ProjectFactory(is_private=True,
anon_permissions=[],
public_permissions=[],
owner=m.project_owner,
blocked_code=project_choices.BLOCKED_BY_STAFF)
m.public_membership = f.MembershipFactory(project=m.public_project,
user=m.project_member_with_perms,
email=m.project_member_with_perms.email,
role__project=m.public_project,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
m.private_membership1 = f.MembershipFactory(project=m.private_project1,
user=m.project_member_with_perms,
email=m.project_member_with_perms.email,
role__project=m.private_project1,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
f.MembershipFactory(project=m.private_project1,
user=m.project_member_without_perms,
email=m.project_member_without_perms.email,
role__project=m.private_project1,
role__permissions=[])
m.private_membership2 = f.MembershipFactory(project=m.private_project2,
user=m.project_member_with_perms,
email=m.project_member_with_perms.email,
role__project=m.private_project2,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
f.MembershipFactory(project=m.private_project2,
user=m.project_member_without_perms,
email=m.project_member_without_perms.email,
role__project=m.private_project2,
role__permissions=[])
m.blocked_membership = f.MembershipFactory(project=m.blocked_project,
user=m.project_member_with_perms,
role__project=m.blocked_project,
role__permissions=list(map(lambda x: x[0], MEMBERS_PERMISSIONS)))
f.MembershipFactory(project=m.blocked_project,
user=m.project_member_without_perms,
role__project=m.blocked_project,
role__permissions=[])
f.MembershipFactory(project=m.public_project,
user=m.project_owner,
is_admin=True)
f.MembershipFactory(project=m.private_project1,
user=m.project_owner,
is_admin=True)
f.MembershipFactory(project=m.private_project2,
user=m.project_owner,
is_admin=True)
f.MembershipFactory(project=m.blocked_project,
user=m.project_owner,
is_admin=True)
m.public_points = f.PointsFactory(project=m.public_project)
m.private_points1 = f.PointsFactory(project=m.private_project1)
m.private_points2 = f.PointsFactory(project=m.private_project2)
m.blocked_points = f.PointsFactory(project=m.blocked_project)
m.public_user_story_status = f.UserStoryStatusFactory(project=m.public_project)
m.private_user_story_status1 = f.UserStoryStatusFactory(project=m.private_project1)
m.private_user_story_status2 = f.UserStoryStatusFactory(project=m.private_project2)
m.blocked_user_story_status = f.UserStoryStatusFactory(project=m.blocked_project)
m.public_task_status = f.TaskStatusFactory(project=m.public_project)
m.private_task_status1 = f.TaskStatusFactory(project=m.private_project1)
m.private_task_status2 = f.TaskStatusFactory(project=m.private_project2)
m.blocked_task_status = f.TaskStatusFactory(project=m.blocked_project)
m.public_issue_status = f.IssueStatusFactory(project=m.public_project)
m.private_issue_status1 = f.IssueStatusFactory(project=m.private_project1)
m.private_issue_status2 = f.IssueStatusFactory(project=m.private_project2)
m.blocked_issue_status = f.IssueStatusFactory(project=m.blocked_project)
m.public_issue_type = f.IssueTypeFactory(project=m.public_project)
m.private_issue_type1 = f.IssueTypeFactory(project=m.private_project1)
m.private_issue_type2 = f.IssueTypeFactory(project=m.private_project2)
m.blocked_issue_type = f.IssueTypeFactory(project=m.blocked_project)
m.public_priority = f.PriorityFactory(project=m.public_project)
m.private_priority1 = f.PriorityFactory(project=m.private_project1)
m.private_priority2 = f.PriorityFactory(project=m.private_project2)
m.blocked_priority = f.PriorityFactory(project=m.blocked_project)
m.public_severity = f.SeverityFactory(project=m.public_project)
m.private_severity1 = f.SeverityFactory(project=m.private_project1)
m.private_severity2 = f.SeverityFactory(project=m.private_project2)
m.blocked_severity = f.SeverityFactory(project=m.blocked_project)
m.project_template = m.public_project.creation_template
return m
def test_roles_retrieve(client, data):
public_url = reverse('roles-detail', kwargs={"pk": data.public_project.roles.all()[0].pk})
private1_url = reverse('roles-detail', kwargs={"pk": data.private_project1.roles.all()[0].pk})
private2_url = reverse('roles-detail', kwargs={"pk": data.private_project2.roles.all()[0].pk})
blocked_url = reverse('roles-detail', kwargs={"pk": data.blocked_project.roles.all()[0].pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', public_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private1_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private2_url, None, users)
assert results == [401, 403, 403, 200, 200]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 403, 200, 200]
def test_roles_update(client, data):
public_url = reverse('roles-detail', kwargs={"pk": data.public_project.roles.all()[0].pk})
private1_url = reverse('roles-detail', kwargs={"pk": data.private_project1.roles.all()[0].pk})
private2_url = reverse('roles-detail', kwargs={"pk": data.private_project2.roles.all()[0].pk})
blocked_url = reverse('roles-detail', kwargs={"pk": data.blocked_project.roles.all()[0].pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
role_data = RoleSerializer(data.public_project.roles.all()[0]).data
role_data["name"] = "test"
role_data = json.dumps(role_data)
results = helper_test_http_method(client, 'put', public_url, role_data, users)
assert results == [401, 403, 403, 403, 200]
role_data = RoleSerializer(data.private_project1.roles.all()[0]).data
role_data["name"] = "test"
role_data = json.dumps(role_data)
results = helper_test_http_method(client, 'put', private1_url, role_data, users)
assert results == [401, 403, 403, 403, 200]
role_data = RoleSerializer(data.private_project2.roles.all()[0]).data
role_data["name"] = "test"
role_data = json.dumps(role_data)
results = helper_test_http_method(client, 'put', private2_url, role_data, users)
assert results == [401, 403, 403, 403, 200]
role_data = RoleSerializer(data.blocked_project.roles.all()[0]).data
role_data["name"] = "test"
role_data = json.dumps(role_data)
results = helper_test_http_method(client, 'put', blocked_url, role_data, users)
assert results == [401, 403, 403, 403, 451]
def test_roles_delete(client, data):
public_url = reverse('roles-detail', kwargs={"pk": data.public_project.roles.all()[0].pk})
private1_url = reverse('roles-detail', kwargs={"pk": data.private_project1.roles.all()[0].pk})
private2_url = reverse('roles-detail', kwargs={"pk": data.private_project2.roles.all()[0].pk})
blocked_url = reverse('roles-detail', kwargs={"pk": data.blocked_project.roles.all()[0].pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'delete', public_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private1_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private2_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', blocked_url, None, users)
assert results == [401, 403, 403, 403, 451]
def test_roles_list(client, data):
url = reverse('roles-list')
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 3
assert response.status_code == 200
client.login(data.registered_user)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 3
assert response.status_code == 200
client.login(data.project_member_without_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 3
assert response.status_code == 200
client.login(data.project_member_with_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 7
assert response.status_code == 200
client.login(data.project_owner)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 7
assert response.status_code == 200
def test_roles_patch(client, data):
public_url = reverse('roles-detail', kwargs={"pk": data.public_project.roles.all()[0].pk})
private1_url = reverse('roles-detail', kwargs={"pk": data.private_project1.roles.all()[0].pk})
private2_url = reverse('roles-detail', kwargs={"pk": data.private_project2.roles.all()[0].pk})
blocked_url = reverse('roles-detail', kwargs={"pk": data.blocked_project.roles.all()[0].pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'patch', public_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private1_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private2_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', blocked_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 451]
def test_points_retrieve(client, data):
public_url = reverse('points-detail', kwargs={"pk": data.public_points.pk})
private1_url = reverse('points-detail', kwargs={"pk": data.private_points1.pk})
private2_url = reverse('points-detail', kwargs={"pk": data.private_points2.pk})
blocked_url = reverse('points-detail', kwargs={"pk": data.blocked_points.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', public_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private1_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private2_url, None, users)
assert results == [401, 403, 403, 200, 200]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 403, 200, 200]
def test_points_update(client, data):
public_url = reverse('points-detail', kwargs={"pk": data.public_points.pk})
private1_url = reverse('points-detail', kwargs={"pk": data.private_points1.pk})
private2_url = reverse('points-detail', kwargs={"pk": data.private_points2.pk})
blocked_url = reverse('points-detail', kwargs={"pk": data.blocked_points.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
points_data = serializers.PointsSerializer(data.public_points).data
points_data["name"] = "test"
points_data = json.dumps(points_data)
results = helper_test_http_method(client, 'put', public_url, points_data, users)
assert results == [401, 403, 403, 403, 200]
points_data = serializers.PointsSerializer(data.private_points1).data
points_data["name"] = "test"
points_data = json.dumps(points_data)
results = helper_test_http_method(client, 'put', private1_url, points_data, users)
assert results == [401, 403, 403, 403, 200]
points_data = serializers.PointsSerializer(data.private_points2).data
points_data["name"] = "test"
points_data = json.dumps(points_data)
results = helper_test_http_method(client, 'put', private2_url, points_data, users)
assert results == [401, 403, 403, 403, 200]
points_data = serializers.PointsSerializer(data.blocked_points).data
points_data["name"] = "test"
points_data = json.dumps(points_data)
results = helper_test_http_method(client, 'put', blocked_url, points_data, users)
assert results == [401, 403, 403, 403, 451]
def test_points_delete(client, data):
public_url = reverse('points-detail', kwargs={"pk": data.public_points.pk})
private1_url = reverse('points-detail', kwargs={"pk": data.private_points1.pk})
private2_url = reverse('points-detail', kwargs={"pk": data.private_points2.pk})
blocked_url = reverse('points-detail', kwargs={"pk": data.blocked_points.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'delete', public_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private1_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private2_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', blocked_url, None, users)
assert results == [401, 403, 403, 403, 451]
def test_points_list(client, data):
url = reverse('points-list')
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.registered_user)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.project_member_without_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.project_member_with_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 4
assert response.status_code == 200
client.login(data.project_owner)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 4
assert response.status_code == 200
def test_points_patch(client, data):
public_url = reverse('points-detail', kwargs={"pk": data.public_points.pk})
private1_url = reverse('points-detail', kwargs={"pk": data.private_points1.pk})
private2_url = reverse('points-detail', kwargs={"pk": data.private_points2.pk})
blocked_url = reverse('points-detail', kwargs={"pk": data.blocked_points.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'patch', public_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private1_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private2_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', blocked_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 451]
def test_points_action_bulk_update_order(client, data):
url = reverse('points-bulk-update-order')
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
post_data = json.dumps({
"bulk_points": [(1, 2)],
"project": data.public_project.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_points": [(1, 2)],
"project": data.private_project1.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_points": [(1, 2)],
"project": data.private_project2.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_points": [(1, 2)],
"project": data.blocked_project.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 451]
def test_user_story_status_retrieve(client, data):
public_url = reverse('userstory-statuses-detail', kwargs={"pk": data.public_user_story_status.pk})
private1_url = reverse('userstory-statuses-detail', kwargs={"pk": data.private_user_story_status1.pk})
private2_url = reverse('userstory-statuses-detail', kwargs={"pk": data.private_user_story_status2.pk})
blocked_url = reverse('userstory-statuses-detail', kwargs={"pk": data.blocked_user_story_status.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', public_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private1_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private2_url, None, users)
assert results == [401, 403, 403, 200, 200]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 403, 200, 200]
def test_user_story_status_update(client, data):
public_url = reverse('userstory-statuses-detail', kwargs={"pk": data.public_user_story_status.pk})
private1_url = reverse('userstory-statuses-detail', kwargs={"pk": data.private_user_story_status1.pk})
private2_url = reverse('userstory-statuses-detail', kwargs={"pk": data.private_user_story_status2.pk})
blocked_url = reverse('userstory-statuses-detail', kwargs={"pk": data.blocked_user_story_status.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
user_story_status_data = serializers.UserStoryStatusSerializer(data.public_user_story_status).data
user_story_status_data["name"] = "test"
user_story_status_data = json.dumps(user_story_status_data)
results = helper_test_http_method(client, 'put', public_url, user_story_status_data, users)
assert results == [401, 403, 403, 403, 200]
user_story_status_data = serializers.UserStoryStatusSerializer(data.private_user_story_status1).data
user_story_status_data["name"] = "test"
user_story_status_data = json.dumps(user_story_status_data)
results = helper_test_http_method(client, 'put', private1_url, user_story_status_data, users)
assert results == [401, 403, 403, 403, 200]
user_story_status_data = serializers.UserStoryStatusSerializer(data.private_user_story_status2).data
user_story_status_data["name"] = "test"
user_story_status_data = json.dumps(user_story_status_data)
results = helper_test_http_method(client, 'put', private2_url, user_story_status_data, users)
assert results == [401, 403, 403, 403, 200]
user_story_status_data = serializers.UserStoryStatusSerializer(data.blocked_user_story_status).data
user_story_status_data["name"] = "test"
user_story_status_data = json.dumps(user_story_status_data)
results = helper_test_http_method(client, 'put', blocked_url, user_story_status_data, users)
assert results == [401, 403, 403, 403, 451]
def test_user_story_status_delete(client, data):
public_url = reverse('userstory-statuses-detail', kwargs={"pk": data.public_user_story_status.pk})
private1_url = reverse('userstory-statuses-detail', kwargs={"pk": data.private_user_story_status1.pk})
private2_url = reverse('userstory-statuses-detail', kwargs={"pk": data.private_user_story_status2.pk})
blocked_url = reverse('userstory-statuses-detail', kwargs={"pk": data.blocked_user_story_status.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'delete', public_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private1_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private2_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', blocked_url, None, users)
assert results == [401, 403, 403, 403, 451]
def test_user_story_status_list(client, data):
url = reverse('userstory-statuses-list')
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.registered_user)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.project_member_without_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.project_member_with_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 4
assert response.status_code == 200
client.login(data.project_owner)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 4
assert response.status_code == 200
def test_user_story_status_patch(client, data):
public_url = reverse('userstory-statuses-detail', kwargs={"pk": data.public_user_story_status.pk})
private1_url = reverse('userstory-statuses-detail', kwargs={"pk": data.private_user_story_status1.pk})
private2_url = reverse('userstory-statuses-detail', kwargs={"pk": data.private_user_story_status2.pk})
blocked_url = reverse('userstory-statuses-detail', kwargs={"pk": data.blocked_user_story_status.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'patch', public_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private1_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private2_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', blocked_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 451]
def test_user_story_status_action_bulk_update_order(client, data):
url = reverse('userstory-statuses-bulk-update-order')
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
post_data = json.dumps({
"bulk_userstory_statuses": [(1, 2)],
"project": data.public_project.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_userstory_statuses": [(1, 2)],
"project": data.private_project1.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_userstory_statuses": [(1, 2)],
"project": data.private_project2.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_userstory_statuses": [(1, 2)],
"project": data.blocked_project.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 451]
def test_task_status_retrieve(client, data):
public_url = reverse('task-statuses-detail', kwargs={"pk": data.public_task_status.pk})
private1_url = reverse('task-statuses-detail', kwargs={"pk": data.private_task_status1.pk})
private2_url = reverse('task-statuses-detail', kwargs={"pk": data.private_task_status2.pk})
blocked_url = reverse('task-statuses-detail', kwargs={"pk": data.blocked_task_status.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', public_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private1_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private2_url, None, users)
assert results == [401, 403, 403, 200, 200]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 403, 200, 200]
def test_task_status_update(client, data):
public_url = reverse('task-statuses-detail', kwargs={"pk": data.public_task_status.pk})
private1_url = reverse('task-statuses-detail', kwargs={"pk": data.private_task_status1.pk})
private2_url = reverse('task-statuses-detail', kwargs={"pk": data.private_task_status2.pk})
blocked_url = reverse('task-statuses-detail', kwargs={"pk": data.blocked_task_status.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
task_status_data = serializers.TaskStatusSerializer(data.public_task_status).data
task_status_data["name"] = "test"
task_status_data = json.dumps(task_status_data)
results = helper_test_http_method(client, 'put', public_url, task_status_data, users)
assert results == [401, 403, 403, 403, 200]
task_status_data = serializers.TaskStatusSerializer(data.private_task_status1).data
task_status_data["name"] = "test"
task_status_data = json.dumps(task_status_data)
results = helper_test_http_method(client, 'put', private1_url, task_status_data, users)
assert results == [401, 403, 403, 403, 200]
task_status_data = serializers.TaskStatusSerializer(data.private_task_status2).data
task_status_data["name"] = "test"
task_status_data = json.dumps(task_status_data)
results = helper_test_http_method(client, 'put', private2_url, task_status_data, users)
assert results == [401, 403, 403, 403, 200]
task_status_data = serializers.TaskStatusSerializer(data.blocked_task_status).data
task_status_data["name"] = "test"
task_status_data = json.dumps(task_status_data)
results = helper_test_http_method(client, 'put', blocked_url, task_status_data, users)
assert results == [401, 403, 403, 403, 451]
def test_task_status_delete(client, data):
public_url = reverse('task-statuses-detail', kwargs={"pk": data.public_task_status.pk})
private1_url = reverse('task-statuses-detail', kwargs={"pk": data.private_task_status1.pk})
private2_url = reverse('task-statuses-detail', kwargs={"pk": data.private_task_status2.pk})
blocked_url = reverse('task-statuses-detail', kwargs={"pk": data.blocked_task_status.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'delete', public_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private1_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private2_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', blocked_url, None, users)
assert results == [401, 403, 403, 403, 451]
def test_task_status_list(client, data):
url = reverse('task-statuses-list')
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.registered_user)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.project_member_without_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.project_member_with_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 4
assert response.status_code == 200
client.login(data.project_owner)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 4
assert response.status_code == 200
def test_task_status_patch(client, data):
public_url = reverse('task-statuses-detail', kwargs={"pk": data.public_task_status.pk})
private1_url = reverse('task-statuses-detail', kwargs={"pk": data.private_task_status1.pk})
private2_url = reverse('task-statuses-detail', kwargs={"pk": data.private_task_status2.pk})
blocked_url = reverse('task-statuses-detail', kwargs={"pk": data.blocked_task_status.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'patch', public_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private1_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private2_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', blocked_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 451]
def test_task_status_action_bulk_update_order(client, data):
url = reverse('task-statuses-bulk-update-order')
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
post_data = json.dumps({
"bulk_task_statuses": [(1, 2)],
"project": data.public_project.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_task_statuses": [(1, 2)],
"project": data.private_project1.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_task_statuses": [(1, 2)],
"project": data.private_project2.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_task_statuses": [(1, 2)],
"project": data.blocked_project.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 451]
def test_issue_status_retrieve(client, data):
public_url = reverse('issue-statuses-detail', kwargs={"pk": data.public_issue_status.pk})
private1_url = reverse('issue-statuses-detail', kwargs={"pk": data.private_issue_status1.pk})
private2_url = reverse('issue-statuses-detail', kwargs={"pk": data.private_issue_status2.pk})
blocked_url = reverse('issue-statuses-detail', kwargs={"pk": data.blocked_issue_status.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', public_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private1_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private2_url, None, users)
assert results == [401, 403, 403, 200, 200]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 403, 200, 200]
def test_issue_status_update(client, data):
public_url = reverse('issue-statuses-detail', kwargs={"pk": data.public_issue_status.pk})
private1_url = reverse('issue-statuses-detail', kwargs={"pk": data.private_issue_status1.pk})
private2_url = reverse('issue-statuses-detail', kwargs={"pk": data.private_issue_status2.pk})
blocked_url = reverse('issue-statuses-detail', kwargs={"pk": data.blocked_issue_status.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
issue_status_data = serializers.IssueStatusSerializer(data.public_issue_status).data
issue_status_data["name"] = "test"
issue_status_data = json.dumps(issue_status_data)
results = helper_test_http_method(client, 'put', public_url, issue_status_data, users)
assert results == [401, 403, 403, 403, 200]
issue_status_data = serializers.IssueStatusSerializer(data.private_issue_status1).data
issue_status_data["name"] = "test"
issue_status_data = json.dumps(issue_status_data)
results = helper_test_http_method(client, 'put', private1_url, issue_status_data, users)
assert results == [401, 403, 403, 403, 200]
issue_status_data = serializers.IssueStatusSerializer(data.private_issue_status2).data
issue_status_data["name"] = "test"
issue_status_data = json.dumps(issue_status_data)
results = helper_test_http_method(client, 'put', private2_url, issue_status_data, users)
assert results == [401, 403, 403, 403, 200]
issue_status_data = serializers.IssueStatusSerializer(data.blocked_issue_status).data
issue_status_data["name"] = "test"
issue_status_data = json.dumps(issue_status_data)
results = helper_test_http_method(client, 'put', blocked_url, issue_status_data, users)
assert results == [401, 403, 403, 403, 451]
def test_issue_status_delete(client, data):
public_url = reverse('issue-statuses-detail', kwargs={"pk": data.public_issue_status.pk})
private1_url = reverse('issue-statuses-detail', kwargs={"pk": data.private_issue_status1.pk})
private2_url = reverse('issue-statuses-detail', kwargs={"pk": data.private_issue_status2.pk})
blocked_url = reverse('issue-statuses-detail', kwargs={"pk": data.blocked_issue_status.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'delete', public_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private1_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private2_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', blocked_url, None, users)
assert results == [401, 403, 403, 403, 451]
def test_issue_status_list(client, data):
url = reverse('issue-statuses-list')
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.registered_user)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.project_member_without_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.project_member_with_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 4
assert response.status_code == 200
client.login(data.project_owner)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 4
assert response.status_code == 200
def test_issue_status_patch(client, data):
public_url = reverse('issue-statuses-detail', kwargs={"pk": data.public_issue_status.pk})
private1_url = reverse('issue-statuses-detail', kwargs={"pk": data.private_issue_status1.pk})
private2_url = reverse('issue-statuses-detail', kwargs={"pk": data.private_issue_status2.pk})
blocked_url = reverse('issue-statuses-detail', kwargs={"pk": data.blocked_issue_status.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'patch', public_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private1_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private2_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', blocked_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 451]
def test_issue_status_action_bulk_update_order(client, data):
url = reverse('issue-statuses-bulk-update-order')
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
post_data = json.dumps({
"bulk_issue_statuses": [(1, 2)],
"project": data.public_project.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_issue_statuses": [(1, 2)],
"project": data.private_project1.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_issue_statuses": [(1, 2)],
"project": data.private_project2.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_issue_statuses": [(1, 2)],
"project": data.blocked_project.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 451]
def test_issue_type_retrieve(client, data):
public_url = reverse('issue-types-detail', kwargs={"pk": data.public_issue_type.pk})
private1_url = reverse('issue-types-detail', kwargs={"pk": data.private_issue_type1.pk})
private2_url = reverse('issue-types-detail', kwargs={"pk": data.private_issue_type2.pk})
blocked_url = reverse('issue-types-detail', kwargs={"pk": data.blocked_issue_type.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', public_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private1_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private2_url, None, users)
assert results == [401, 403, 403, 200, 200]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 403, 200, 200]
def test_issue_type_update(client, data):
public_url = reverse('issue-types-detail', kwargs={"pk": data.public_issue_type.pk})
private1_url = reverse('issue-types-detail', kwargs={"pk": data.private_issue_type1.pk})
private2_url = reverse('issue-types-detail', kwargs={"pk": data.private_issue_type2.pk})
blocked_url = reverse('issue-types-detail', kwargs={"pk": data.blocked_issue_type.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
issue_type_data = serializers.IssueTypeSerializer(data.public_issue_type).data
issue_type_data["name"] = "test"
issue_type_data = json.dumps(issue_type_data)
results = helper_test_http_method(client, 'put', public_url, issue_type_data, users)
assert results == [401, 403, 403, 403, 200]
issue_type_data = serializers.IssueTypeSerializer(data.private_issue_type1).data
issue_type_data["name"] = "test"
issue_type_data = json.dumps(issue_type_data)
results = helper_test_http_method(client, 'put', private1_url, issue_type_data, users)
assert results == [401, 403, 403, 403, 200]
issue_type_data = serializers.IssueTypeSerializer(data.private_issue_type2).data
issue_type_data["name"] = "test"
issue_type_data = json.dumps(issue_type_data)
results = helper_test_http_method(client, 'put', private2_url, issue_type_data, users)
assert results == [401, 403, 403, 403, 200]
issue_type_data = serializers.IssueTypeSerializer(data.blocked_issue_type).data
issue_type_data["name"] = "test"
issue_type_data = json.dumps(issue_type_data)
results = helper_test_http_method(client, 'put', blocked_url, issue_type_data, users)
assert results == [401, 403, 403, 403, 451]
def test_issue_type_delete(client, data):
public_url = reverse('issue-types-detail', kwargs={"pk": data.public_issue_type.pk})
private1_url = reverse('issue-types-detail', kwargs={"pk": data.private_issue_type1.pk})
private2_url = reverse('issue-types-detail', kwargs={"pk": data.private_issue_type2.pk})
blocked_url = reverse('issue-types-detail', kwargs={"pk": data.blocked_issue_type.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'delete', public_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private1_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private2_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', blocked_url, None, users)
assert results == [401, 403, 403, 403, 451]
def test_issue_type_list(client, data):
url = reverse('issue-types-list')
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.registered_user)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.project_member_without_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.project_member_with_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 4
assert response.status_code == 200
client.login(data.project_owner)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 4
assert response.status_code == 200
def test_issue_type_patch(client, data):
public_url = reverse('issue-types-detail', kwargs={"pk": data.public_issue_type.pk})
private1_url = reverse('issue-types-detail', kwargs={"pk": data.private_issue_type1.pk})
private2_url = reverse('issue-types-detail', kwargs={"pk": data.private_issue_type2.pk})
blocked_url = reverse('issue-types-detail', kwargs={"pk": data.blocked_issue_type.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'patch', public_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private1_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private2_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', blocked_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 451]
def test_issue_type_action_bulk_update_order(client, data):
url = reverse('issue-types-bulk-update-order')
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
post_data = json.dumps({
"bulk_issue_types": [(1, 2)],
"project": data.public_project.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_issue_types": [(1, 2)],
"project": data.private_project1.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_issue_types": [(1, 2)],
"project": data.private_project2.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_issue_types": [(1, 2)],
"project": data.blocked_project.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 451]
def test_priority_retrieve(client, data):
public_url = reverse('priorities-detail', kwargs={"pk": data.public_priority.pk})
private1_url = reverse('priorities-detail', kwargs={"pk": data.private_priority1.pk})
private2_url = reverse('priorities-detail', kwargs={"pk": data.private_priority2.pk})
blocked_url = reverse('priorities-detail', kwargs={"pk": data.blocked_priority.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', public_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private1_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private2_url, None, users)
assert results == [401, 403, 403, 200, 200]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 403, 200, 200]
def test_priority_update(client, data):
public_url = reverse('priorities-detail', kwargs={"pk": data.public_priority.pk})
private1_url = reverse('priorities-detail', kwargs={"pk": data.private_priority1.pk})
private2_url = reverse('priorities-detail', kwargs={"pk": data.private_priority2.pk})
blocked_url = reverse('priorities-detail', kwargs={"pk": data.blocked_priority.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
priority_data = serializers.PrioritySerializer(data.public_priority).data
priority_data["name"] = "test"
priority_data = json.dumps(priority_data)
results = helper_test_http_method(client, 'put', public_url, priority_data, users)
assert results == [401, 403, 403, 403, 200]
priority_data = serializers.PrioritySerializer(data.private_priority1).data
priority_data["name"] = "test"
priority_data = json.dumps(priority_data)
results = helper_test_http_method(client, 'put', private1_url, priority_data, users)
assert results == [401, 403, 403, 403, 200]
priority_data = serializers.PrioritySerializer(data.private_priority2).data
priority_data["name"] = "test"
priority_data = json.dumps(priority_data)
results = helper_test_http_method(client, 'put', private2_url, priority_data, users)
assert results == [401, 403, 403, 403, 200]
priority_data = serializers.PrioritySerializer(data.blocked_priority).data
priority_data["name"] = "test"
priority_data = json.dumps(priority_data)
results = helper_test_http_method(client, 'put', blocked_url, priority_data, users)
assert results == [401, 403, 403, 403, 451]
def test_priority_delete(client, data):
public_url = reverse('priorities-detail', kwargs={"pk": data.public_priority.pk})
private1_url = reverse('priorities-detail', kwargs={"pk": data.private_priority1.pk})
private2_url = reverse('priorities-detail', kwargs={"pk": data.private_priority2.pk})
blocked_url = reverse('priorities-detail', kwargs={"pk": data.blocked_priority.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'delete', public_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private1_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private2_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', blocked_url, None, users)
assert results == [401, 403, 403, 403, 451]
def test_priority_list(client, data):
url = reverse('priorities-list')
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.registered_user)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.project_member_without_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.project_member_with_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 4
assert response.status_code == 200
client.login(data.project_owner)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 4
assert response.status_code == 200
def test_priority_patch(client, data):
public_url = reverse('priorities-detail', kwargs={"pk": data.public_priority.pk})
private1_url = reverse('priorities-detail', kwargs={"pk": data.private_priority1.pk})
private2_url = reverse('priorities-detail', kwargs={"pk": data.private_priority2.pk})
blocked_url = reverse('priorities-detail', kwargs={"pk": data.blocked_priority.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'patch', public_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private1_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private2_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', blocked_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 451]
def test_priority_action_bulk_update_order(client, data):
url = reverse('priorities-bulk-update-order')
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
post_data = json.dumps({
"bulk_priorities": [(1, 2)],
"project": data.public_project.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_priorities": [(1, 2)],
"project": data.private_project1.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_priorities": [(1, 2)],
"project": data.private_project2.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_priorities": [(1, 2)],
"project": data.blocked_project.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 451]
def test_severity_retrieve(client, data):
public_url = reverse('severities-detail', kwargs={"pk": data.public_severity.pk})
private1_url = reverse('severities-detail', kwargs={"pk": data.private_severity1.pk})
private2_url = reverse('severities-detail', kwargs={"pk": data.private_severity2.pk})
blocked_url = reverse('severities-detail', kwargs={"pk": data.blocked_severity.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', public_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private1_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private2_url, None, users)
assert results == [401, 403, 403, 200, 200]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 403, 200, 200]
def test_severity_update(client, data):
public_url = reverse('severities-detail', kwargs={"pk": data.public_severity.pk})
private1_url = reverse('severities-detail', kwargs={"pk": data.private_severity1.pk})
private2_url = reverse('severities-detail', kwargs={"pk": data.private_severity2.pk})
blocked_url = reverse('severities-detail', kwargs={"pk": data.blocked_severity.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
severity_data = serializers.SeveritySerializer(data.public_severity).data
severity_data["name"] = "test"
severity_data = json.dumps(severity_data)
results = helper_test_http_method(client, 'put', public_url, severity_data, users)
assert results == [401, 403, 403, 403, 200]
severity_data = serializers.SeveritySerializer(data.private_severity1).data
severity_data["name"] = "test"
severity_data = json.dumps(severity_data)
results = helper_test_http_method(client, 'put', private1_url, severity_data, users)
assert results == [401, 403, 403, 403, 200]
severity_data = serializers.SeveritySerializer(data.private_severity2).data
severity_data["name"] = "test"
severity_data = json.dumps(severity_data)
results = helper_test_http_method(client, 'put', private2_url, severity_data, users)
assert results == [401, 403, 403, 403, 200]
severity_data = serializers.SeveritySerializer(data.blocked_severity).data
severity_data["name"] = "test"
severity_data = json.dumps(severity_data)
results = helper_test_http_method(client, 'put', blocked_url, severity_data, users)
assert results == [401, 403, 403, 403, 451]
def test_severity_delete(client, data):
public_url = reverse('severities-detail', kwargs={"pk": data.public_severity.pk})
private1_url = reverse('severities-detail', kwargs={"pk": data.private_severity1.pk})
private2_url = reverse('severities-detail', kwargs={"pk": data.private_severity2.pk})
blocked_url = reverse('severities-detail', kwargs={"pk": data.blocked_severity.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'delete', public_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private1_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private2_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', blocked_url, None, users)
assert results == [401, 403, 403, 403, 451]
def test_severity_list(client, data):
url = reverse('severities-list')
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.registered_user)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.project_member_without_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 2
assert response.status_code == 200
client.login(data.project_member_with_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 4
assert response.status_code == 200
client.login(data.project_owner)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 4
assert response.status_code == 200
def test_severity_patch(client, data):
public_url = reverse('severities-detail', kwargs={"pk": data.public_severity.pk})
private1_url = reverse('severities-detail', kwargs={"pk": data.private_severity1.pk})
private2_url = reverse('severities-detail', kwargs={"pk": data.private_severity2.pk})
blocked_url = reverse('severities-detail', kwargs={"pk": data.blocked_severity.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'patch', public_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private1_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private2_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', blocked_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 451]
def test_severity_action_bulk_update_order(client, data):
url = reverse('severities-bulk-update-order')
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
post_data = json.dumps({
"bulk_severities": [(1, 2)],
"project": data.public_project.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_severities": [(1, 2)],
"project": data.private_project1.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_severities": [(1, 2)],
"project": data.private_project2.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 204]
post_data = json.dumps({
"bulk_severities": [(1, 2)],
"project": data.blocked_project.pk
})
results = helper_test_http_method(client, 'post', url, post_data, users)
assert results == [401, 403, 403, 403, 451]
def test_membership_retrieve(client, data):
public_url = reverse('memberships-detail', kwargs={"pk": data.public_membership.pk})
private1_url = reverse('memberships-detail', kwargs={"pk": data.private_membership1.pk})
private2_url = reverse('memberships-detail', kwargs={"pk": data.private_membership2.pk})
blocked_url = reverse('memberships-detail', kwargs={"pk": data.blocked_membership.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'get', public_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private1_url, None, users)
assert results == [200, 200, 200, 200, 200]
results = helper_test_http_method(client, 'get', private2_url, None, users)
assert results == [401, 403, 403, 200, 200]
results = helper_test_http_method(client, 'get', blocked_url, None, users)
assert results == [401, 403, 403, 200, 200]
def test_membership_update(client, data):
public_url = reverse('memberships-detail', kwargs={"pk": data.public_membership.pk})
private1_url = reverse('memberships-detail', kwargs={"pk": data.private_membership1.pk})
private2_url = reverse('memberships-detail', kwargs={"pk": data.private_membership2.pk})
blocked_url = reverse('memberships-detail', kwargs={"pk": data.blocked_membership.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
membership_data = serializers.MembershipSerializer(data.public_membership).data
membership_data["token"] = "test"
membership_data = json.dumps(membership_data)
results = helper_test_http_method(client, 'put', public_url, membership_data, users)
assert results == [401, 403, 403, 403, 200]
membership_data = serializers.MembershipSerializer(data.private_membership1).data
membership_data["token"] = "test"
membership_data = json.dumps(membership_data)
results = helper_test_http_method(client, 'put', private1_url, membership_data, users)
assert results == [401, 403, 403, 403, 200]
membership_data = serializers.MembershipSerializer(data.private_membership2).data
membership_data["token"] = "test"
membership_data = json.dumps(membership_data)
results = helper_test_http_method(client, 'put', private2_url, membership_data, users)
assert results == [401, 403, 403, 403, 200]
membership_data = serializers.MembershipSerializer(data.blocked_membership).data
membership_data["token"] = "test"
membership_data = json.dumps(membership_data)
results = helper_test_http_method(client, 'put', blocked_url, membership_data, users)
assert results == [401, 403, 403, 403, 451]
def test_membership_delete(client, data):
public_url = reverse('memberships-detail', kwargs={"pk": data.public_membership.pk})
private1_url = reverse('memberships-detail', kwargs={"pk": data.private_membership1.pk})
private2_url = reverse('memberships-detail', kwargs={"pk": data.private_membership2.pk})
blocked_url = reverse('memberships-detail', kwargs={"pk": data.blocked_membership.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'delete', public_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private1_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', private2_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'delete', blocked_url, None, users)
assert results == [401, 403, 403, 403, 451]
def test_membership_list(client, data):
url = reverse('memberships-list')
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 5
assert response.status_code == 200
client.login(data.registered_user)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 5
assert response.status_code == 200
client.login(data.project_member_without_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 5
assert response.status_code == 200
client.login(data.project_member_with_perms)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 11
assert response.status_code == 200
client.login(data.project_owner)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 11
assert response.status_code == 200
def test_membership_patch(client, data):
public_url = reverse('memberships-detail', kwargs={"pk": data.public_membership.pk})
private1_url = reverse('memberships-detail', kwargs={"pk": data.private_membership1.pk})
private2_url = reverse('memberships-detail', kwargs={"pk": data.private_membership2.pk})
blocked_url = reverse('memberships-detail', kwargs={"pk": data.blocked_membership.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'patch', public_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private1_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', private2_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 200]
results = helper_test_http_method(client, 'patch', blocked_url, '{"name": "Test"}', users)
assert results == [401, 403, 403, 403, 451]
def test_membership_create(client, data):
url = reverse('memberships-list')
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
membership_data = serializers.MembershipSerializer(data.public_membership).data
membership_data["id"] = None
membership_data["email"] = "test1@test.com"
membership_data = json.dumps(membership_data)
results = helper_test_http_method(client, 'post', url, membership_data, users)
assert results == [401, 403, 403, 403, 201]
membership_data = serializers.MembershipSerializer(data.private_membership1).data
membership_data["id"] = None
membership_data["email"] = "test2@test.com"
membership_data = json.dumps(membership_data)
results = helper_test_http_method(client, 'post', url, membership_data, users)
assert results == [401, 403, 403, 403, 201]
membership_data = serializers.MembershipSerializer(data.private_membership2).data
membership_data["id"] = None
membership_data["email"] = "test3@test.com"
membership_data = json.dumps(membership_data)
results = helper_test_http_method(client, 'post', url, membership_data, users)
assert results == [401, 403, 403, 403, 201]
membership_data = serializers.MembershipSerializer(data.blocked_membership).data
membership_data["id"] = None
membership_data["email"] = "test4@test.com"
membership_data = json.dumps(membership_data)
results = helper_test_http_method(client, 'post', url, membership_data, users)
assert results == [401, 403, 403, 403, 451]
def test_membership_action_bulk_create(client, data):
url = reverse('memberships-bulk-create')
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
bulk_data = {
"project_id": data.public_project.id,
"bulk_memberships": [
{"role_id": data.public_membership.role.pk, "email": "test1@test.com"},
{"role_id": data.public_membership.role.pk, "email": "test2@test.com"},
]
}
bulk_data = json.dumps(bulk_data)
results = helper_test_http_method(client, 'post', url, bulk_data, users)
assert results == [401, 403, 403, 403, 200]
bulk_data = {
"project_id": data.private_project1.id,
"bulk_memberships": [
{"role_id": data.private_membership1.role.pk, "email": "test1@test.com"},
{"role_id": data.private_membership1.role.pk, "email": "test2@test.com"},
]
}
bulk_data = json.dumps(bulk_data)
results = helper_test_http_method(client, 'post', url, bulk_data, users)
assert results == [401, 403, 403, 403, 200]
bulk_data = {
"project_id": data.private_project2.id,
"bulk_memberships": [
{"role_id": data.private_membership2.role.pk, "email": "test1@test.com"},
{"role_id": data.private_membership2.role.pk, "email": "test2@test.com"},
]
}
bulk_data = json.dumps(bulk_data)
results = helper_test_http_method(client, 'post', url, bulk_data, users)
assert results == [401, 403, 403, 403, 200]
bulk_data = {
"project_id": data.blocked_project.id,
"bulk_memberships": [
{"role_id": data.private_membership2.role.pk, "email": "test1@test.com"},
{"role_id": data.private_membership2.role.pk, "email": "test2@test.com"},
]
}
bulk_data = json.dumps(bulk_data)
results = helper_test_http_method(client, 'post', url, bulk_data, users)
assert results == [401, 403, 403, 403, 451]
def test_membership_action_resend_invitation(client, data):
public_invitation = f.InvitationFactory(project=data.public_project, role__project=data.public_project)
private_invitation1 = f.InvitationFactory(project=data.private_project1, role__project=data.private_project1)
private_invitation2 = f.InvitationFactory(project=data.private_project2, role__project=data.private_project2)
blocked_invitation = f.InvitationFactory(project=data.blocked_project, role__project=data.blocked_project)
public_url = reverse('memberships-resend-invitation', kwargs={"pk": public_invitation.pk})
private1_url = reverse('memberships-resend-invitation', kwargs={"pk": private_invitation1.pk})
private2_url = reverse('memberships-resend-invitation', kwargs={"pk": private_invitation2.pk})
blocked_url = reverse('memberships-resend-invitation', kwargs={"pk": blocked_invitation.pk})
users = [
None,
data.registered_user,
data.project_member_without_perms,
data.project_member_with_perms,
data.project_owner
]
results = helper_test_http_method(client, 'post', public_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'post', private1_url, None, users)
assert results == [401, 403, 403, 403, 204]
results = helper_test_http_method(client, 'post', private2_url, None, users)
assert results == [404, 404, 404, 403, 204]
results = helper_test_http_method(client, 'post', blocked_url, None, users)
assert results == [404, 404, 404, 403, 451]
def test_project_template_retrieve(client, data):
url = reverse('project-templates-detail', kwargs={"pk": data.project_template.pk})
users = [
None,
data.registered_user,
data.superuser,
]
results = helper_test_http_method(client, 'get', url, None, users)
assert results == [200, 200, 200]
def test_project_template_update(client, data):
url = reverse('project-templates-detail', kwargs={"pk": data.project_template.pk})
users = [
None,
data.registered_user,
data.superuser,
]
project_template_data = serializers.ProjectTemplateSerializer(data.project_template).data
project_template_data["default_owner_role"] = "test"
project_template_data = json.dumps(project_template_data)
results = helper_test_http_method(client, 'put', url, project_template_data, users)
assert results == [401, 403, 200]
def test_project_template_delete(client, data):
url = reverse('project-templates-detail', kwargs={"pk": data.project_template.pk})
users = [
None,
data.registered_user,
data.superuser,
]
results = helper_test_http_method(client, 'delete', url, None, users)
assert results == [401, 403, 204]
def test_project_template_list(client, data):
url = reverse('project-templates-list')
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 1
assert response.status_code == 200
client.login(data.registered_user)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 1
assert response.status_code == 200
client.login(data.superuser)
response = client.get(url)
projects_data = json.loads(response.content.decode('utf-8'))
assert len(projects_data) == 1
assert response.status_code == 200
def test_project_template_patch(client, data):
url = reverse('project-templates-detail', kwargs={"pk": data.project_template.pk})
users = [
None,
data.registered_user,
data.superuser,
]
results = helper_test_http_method(client, 'patch', url, '{"name": "Test"}', users)
assert results == [401, 403, 200]
|
4 tsp of Terrific Toffee tea (brewed for 2 minutes) in 300mls of freshly boiled water.
Separately brew 4 tsp of Just Ginger tea in 200mls freshly boiled water for 7 minutes.
Pour both teas into saucepan.
Add 300mls of vanilla soy milk and 1 heaped Tbs of honey.
|
import imex
from imex.metadata import Tag, ImageMetadata
class MetadataEditor(object):
def __init__(self, rules, keep_timestamps = True, **kwargs):
"""
Supported keyword arguments:
* debug
* dry_run
"""
self._keep_timestamps = keep_timestamps
self._debug = kwargs.pop('debug', False)
self._dry_run = kwargs.pop('dry_run', False)
self._rules = rules
def apply_rule(self, image_metadata, rule):
log = imex.log
changed = False
for new_tag_name in rule:
new_tag_value = rule[new_tag_name]
if not new_tag_name in image_metadata:
changed = True
image_metadata[new_tag_name] = Tag(new_tag_name)
new_tag = image_metadata[new_tag_name] # Just a convenience alias.
if new_tag.repeatable:
# Separate the values to be added from the values to be deleted
add_list, del_list = self._rules.parse_repeatable_tag_values(new_tag_value)
# # -------------------------------------------------------------------------
# # Deferred deletion of value new_tag_value for new_tag_name:
# # If the new tag is the same as the matching tag and its matching value was
# # set for deletion, add this value to the list of values to delete.
# # -------------------------------------------------------------------------
# if new_tag_name == search_tag_name and rules.must_remove(search_tag_name, search_tag_value):
# del_list.append(search_tag_value)
# log.qdebug(' Deferred removal of value \'{0}\' for tag {1}'.format(search_tag_value, search_tag_name))
if add_list:
log.qdebug(' Adding values \'{0}\' to tag {1}'.format(', '.join(add_list), new_tag_name))
if del_list:
log.qdebug(' Deleting values \'{0}\' from tag {1}'.format(', '.join(del_list), new_tag_name))
# Add and delete (in this order) the new values from the current rule
if new_tag.combine_raw_values(add_list, del_list):
changed = True
log.dump()
else:
log.clear()
else:
# For non-repeatable tags, simply set the new value (this will take care of
# deferred removal, too).
new_adjusted_tag_value = [new_tag_value] if new_tag.is_iptc() else new_tag_value
if new_tag.raw_value != new_adjusted_tag_value:
log.dump()
log.debug(' Setting new value \'{0}\' for tag {1}'.format(new_tag_value, new_tag_name))
new_tag.raw_value = new_adjusted_tag_value
changed = True
log.clear()
return changed
def process_image(self, image_filename, rules):
"""
Find all matching tags in an image's metadata and apply changes according
to the given set of rules.
This is the structure of a rule:
search_tag_name : search_tag_value : (new_tag_name : new_tag_value)
And it is read as: if the *search_tag_name* tag is found on the image with
a value of *search_tag_value*, then set the value of each *new_tag_name* to
its corresponding *new_tag_value*
A search_tag_value can be set for removal once it has been found.
"""
log = imex.log
log.info('Processing {0}'.format(image_filename))
imd = ImageMetadata(image_filename)
imd.read()
log.qdebug(' Applying default assignment')
need_write = self.apply_rule(imd, rules.default_rule)
# Tags that are present in the current image and have an associated rule
matching_tags = rules.get_matching_tags(imd)
for search_tag_name in matching_tags:
for search_tag_value in rules.get_search_tag_values(search_tag_name):
# --------------------------------------------------------------------------------
# Skip this search_tag_value if it is not one of the values of the search_tag_name
# tag in the current image
# --------------------------------------------------------------------------------
if not imd[search_tag_name].has_raw_value(search_tag_value):
continue
log.debug(' Found match: value \'{0}\' for tag {1}'.format(search_tag_value, search_tag_name))
# --------------------------------------------------------------------------------
# The current search_tag_value can be marked for removal in the rules.
#
# We will normally delete the value right away, but if the same search_tag_name is
# going to be modified as part of this rule, defer this deletion.
#
# In the case of a non-repeatable tag, the value will simply be replaced with the
# new one. If it is a repeatable tag, we'll simply add search_tag_value to the
# list of values to delete
# --------------------------------------------------------------------------------
if rules.must_remove(search_tag_name, search_tag_value):
# Remove now if we are not touching this search_tag_name in
# the current rule
if search_tag_name not in rules.get_new_tag_names(search_tag_name, search_tag_value):
if imd[search_tag_name].repeatable:
# If the list is empty, the tag will be deleted when
# the metadata is written
imd[search_tag_name].combine_raw_values([], [search_tag_value])
else:
del imd[search_tag_name]
log.debug(' Removed value \'{0}\' for tag {1}'.format(search_tag_value, search_tag_name))
# ------------------------------------------------------------------------------
# The current image has a search_tag_name tag and its value is search_tag_value,
# now set all new_tag_names to their corresponding new_tag_values
# ------------------------------------------------------------------------------
for new_tag_name in rules.get_new_tag_names(search_tag_name, search_tag_value):
# Track any changes, only then we will need to run the rules again
changed = False
new_tag_value = rules.get_new_tag_value(search_tag_name, search_tag_value, new_tag_name)
# Add the new tag if it is not already present in the image. We will set it's
# value later.
if not new_tag_name in imd:
changed = True
imd[new_tag_name] = Tag(new_tag_name)
new_tag = imd[new_tag_name] # Just a convenience alias.
if new_tag.repeatable:
# Separate the values to be added from the values to be deleted
add_list, del_list = rules.parse_repeatable_tag_values(new_tag_value)
# -------------------------------------------------------------------------
# Deferred deletion of value new_tag_value for new_tag_name:
# If the new tag is the same as the matching tag and its matching value was
# set for deletion, add this value to the list of values to delete.
# -------------------------------------------------------------------------
if new_tag_name == search_tag_name and rules.must_remove(search_tag_name, search_tag_value):
del_list.append(search_tag_value)
log.qdebug(' Deferred removal of value \'{0}\' for tag {1}'.format(search_tag_value,
search_tag_name))
if add_list:
log.qdebug(' Adding values \'{0}\' to tag {1}'.format(', '.join(add_list),
new_tag_name))
if del_list:
log.qdebug(' Deleting values \'{0}\' from tag {1}'.format(', '.join(del_list),
new_tag_name))
# Add and delete (in this order) the new values from the current rule
if new_tag.combine_raw_values(add_list, del_list):
changed = True
log.dump()
else:
log.clear()
else:
# For non-repeatable tags, simply set the new value (this will take care of
# deferred removal, too).
if new_tag.raw_value != new_tag_value:
log.debug(' Setting new value \'{0}\' for tag {1}'.format(new_tag_value, new_tag_name))
new_tag.raw_value = [new_tag_value] if new_tag.is_iptc() else new_tag_value
changed = True
if changed:
need_write = True
# ------------------------------------------------------------------------
# The current tag has changed, if there are any rules that have the
# current new_tag_name as their search_tag_name, then we need to apply the
# rules for that tag again, since some of their search_tag_value could
# match the new values.
# ------------------------------------------------------------------------
if new_tag_name in rules:
matching_tags.append(new_tag_name) # Extend the outermost for loop
log.debug(' **A matching tag has been modified. Revisiting all rules**')
# for new_tag_name
# for search_tag_value
# for search_tag_name
if need_write:
if self._dry_run:
log.debug(' Changes detected. File not saved (dry-run)')
else:
imd.write(self._keep_timestamps)
log.debug(' Changes saved')
else:
log.debug(' No changes detected')
log.debug('')
|
From time to time one of us will take a little breather from the self-induced chaos that is our day-to-day life and realise that time is in fact ticking on, and we are off on really rather a big and involved trip in the not-too-distant (in fact frighteningly all-too-soon) future. I’m writing this in just such a gulp of air that involves an evening off with a stubbornly uncreative brain, a handful of peppermint tea and an internet dongle that has far too much autonomy.
My task for the evening: inform you all of a wondrous event that we are heralding in October. Bass, Dan, Guy and I have been lucky enough to live together in Perranporth for the last year, which in its self has been an amazing experience. One of the best things about our year has been the warm friendship and membership of the local Cornish choir, The Perraners. We’ve shared many a great evening of laughter and song in Perranporth with these great people.
We’ve organised this evening as an excuse to get old friends back together, sing our favourite songs, eat some good food, and raise as much money for charity as we can! Having set our departure date for the next day, we thought it would be an ideal send-off bash. Luckily we don’t have too far to go on Day 1 – a bleary-eyed drive to Somerset and Bass’ family home!
– October 7th, probably 7pm!
|
from flask import request, current_app
from flask.ext.restful import Resource
from flask.ext.consulate import ConsulService
from urlparse import urljoin
import requests
import json
class ProxyView(Resource):
"""Proxies a request to a remote webservice"""
def __init__(self, endpoint, service_uri, deploy_path):
self.endpoint = endpoint
self.service_uri = service_uri
self.deploy_path = deploy_path
self.cs = None
if service_uri.startswith('consul://'):
self.cs = ConsulService(
service_uri,
nameservers=[current_app.config.get("CONSUL_DNS", "172.17.42.1")]
)
self.session = self.cs
else:
self.session = requests.Session()
@staticmethod
def get_body_data(request):
"""
Returns the correct payload data coming from the flask.Request object
"""
payload = request.get_json(silent=True)
if payload:
return json.dumps(payload)
return request.form or request.data
def dispatcher(self, **kwargs):
"""
Having a dispatch based on request.method solves being able to set up
ProxyViews on the same resource for different routes. However, it
limits the ability to scope a resouce on a per-method basis
"""
path = request.full_path.replace(self.deploy_path, '', 1)
path = path[1:] if path.startswith('/') else path
if self.cs is None:
ep = urljoin(self.service_uri, path)
else:
ep = path
resp = self.__getattribute__(request.method.lower())(ep, request)
headers = {}
if resp.headers:
[headers.update({key: resp.headers[key]}) for key in current_app.config['REMOTE_PROXY_ALLOWED_HEADERS'] if key in resp.headers]
if headers:
return resp.text, resp.status_code, headers
else:
return resp.text, resp.status_code
def get(self, ep, request):
"""
Proxy to remote GET endpoint, should be invoked via self.dispatcher()
"""
return self.session.get(ep, headers=request.headers)
def post(self, ep, request):
"""
Proxy to remote POST endpoint, should be invoked via self.dispatcher()
"""
if not isinstance(request.data, basestring):
request.data = json.dumps(request.data)
return self.session.post(
ep, data=ProxyView.get_body_data(request), headers=request.headers
)
def put(self, ep, request):
"""
Proxy to remote PUT endpoint, should be invoked via self.dispatcher()
"""
if not isinstance(request.data, basestring):
request.data = json.dumps(request.data)
return self.session.put(
ep, data=ProxyView.get_body_data(request), headers=request.headers
)
def delete(self, ep, request):
"""
Proxy to remote PUT endpoint, should be invoked via self.dispatcher()
"""
if not isinstance(request.data, basestring):
request.data = json.dumps(request.data)
return self.session.delete(
ep, data=ProxyView.get_body_data(request), headers=request.headers
)
|
« It just makes cense. Or does it?
The day came, when my spoils from the recent New Amsterdam Market would be fully realized. Tonight, it was time for the Bo Bo young chicken to meet its fate. Well, I guess it had already met its fate, but it was time to meet a new, tastier fate. The silky chicken is still in the freezer, waiting to say hello to the oven. Question, though– how am I going to know if the silky chicken is cooked? I mean, the meat is black, so what color are the juices?
Anyway, it was time for some roast chicken. I pulled it out of the chicken, knowing that some preparations would need to be made. The “buddhist style” chicken come complete with head and feet, as I had already mentioned in a previous post. Since I don’t have a cleaver, I knew that this could be tough to negotiate.
Then I attacked him with a knife. It was easier than I expected, but I wrangled his feet off and hacked off his head like a cold-hearted criminal. It actually wasn’t that bad. But bone sure is tough to cut through. As a reward for his patience, I rubbed him down with some frozen pesto that I had made a few months back. I threw some basil leaves and a few cloves of fresh garlic in his “cavity” (which, as I found out later, had the kidneys wedged in there, which I think actually gave the meat some mineral-y flavor). I sprinkled some kosher salt and freshly cracked black pepper on him, gave him a little glug of olive oil, and popped him into a pre-heated 425 degree oven.
About 30 minutes into the process, he was getting a deep amber tan. He looked like one of those lovely ladies from the jersey shore.
His little hotbox was browning him nicely. I was really happy with how the skin was getting nice and crispy, and the fat running out of him was pure and clear. I felt good about this little chicken, even if he had listened to belle and sebastian during his final hours. About a half-hour later, he was ready to get out of the tanning bed and into my belly.
BEEEEA-UTIFUL. Let’s have a moment of silence for Herbert. He was a simple chicken, and he led a simple life. He got a simple roasting. And he was simply delicious.
BG made a little Isreaeli couscous with eggplant and basil to accompany Herbert – they make a lovely pair, don’t you think?
8.63 out of 7 cows.
Tags: bo bo chicken, cooking, new amsterdam market.
This is not a silky chicken? The skin should be much much darker. and the silky chickens have an extra toe on each foot.
You are correct, this is not a silkie chicken. It is a regular young chicken from Bo Bo chicken. The silky chicken is still in burgerboy’s freezer, waiting to be cooked.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import calendar
import xlsxwriter
from django.utils import timezone
from django.conf import settings
SITE_URL = getattr(settings, "SITE_URL")
ISRC_HINT_TEXT = """Please be aware that collecting societies only will distribute the earnings properly if an ISRC code is present."""
log = logging.getLogger(__name__)
def label_statistics_as_xls(label, years, title=None, output=None):
ROW_LETTERS = "ABCDEFGHIJKLMNOPQRSTUVWXZY"
title = title or "Airplay Statistics: open broadcast radio"
output = output or "Airplay statistics - {}.xlsx".format(label.name)
log.info("output to: {}".format(output))
###################################################################
# workbook preparation
###################################################################
workbook = xlsxwriter.Workbook(output, {"in_memory": True})
workbook.set_properties(
{
"title": title,
"subject": title,
"author": "digris AG",
"company": "digris AG",
"created": timezone.now(),
}
)
###################################################################
# workbook style definitions
###################################################################
bold = workbook.add_format({"bold": True})
border_top = workbook.add_format({"bold": 1, "top": 1})
border_bottom = workbook.add_format({"bold": 1, "bottom": 1})
small = workbook.add_format({"font_size": 9, "italic": 1})
isrc_hint = workbook.add_format({"color": "red"})
###################################################################
# add statistics as sheet per year
###################################################################
for year in years:
start = year.get("start")
end = year.get("end")
objects = year.get("objects")
first_row = 7
last_row = len(objects) - 1 + first_row
total_events = sum([i.num_events for i in objects])
worksheet = workbook.add_worksheet("{}".format(start.year))
# Widen the first columns
worksheet.set_column("A:C", 32)
worksheet.set_column("D:D", 18)
worksheet.set_row("1:1", 200)
worksheet.merge_range(
"A1:C1", "{} - {:%Y-%m-%d} - {:%Y-%m-%d}".format(title, start, end), bold
)
worksheet.merge_range("A2:C2", "Label: {}".format(label.name), bold)
worksheet.merge_range("A3:C3", "Total: {}".format(total_events), bold)
worksheet.merge_range("A4:C4", "{}".format(ISRC_HINT_TEXT), isrc_hint)
worksheet.merge_range("A5:C5", "File created: {}".format(timezone.now()), small)
worksheet.write("A{}".format(first_row), "Title", border_bottom)
worksheet.write("B{}".format(first_row), "Artist", border_bottom)
worksheet.write("C{}".format(first_row), "Release", border_bottom)
worksheet.write("D{}".format(first_row), "ISRC", border_bottom)
try:
header = [
calendar.month_name[dt.month]
for dt in [i[0] for i in objects[0].time_series]
]
except IndexError:
header = []
# write date (month) headers
for index, item in enumerate(header, start=4):
worksheet.write(first_row - 1, index, item, border_bottom)
# set column width
worksheet.set_column(index, index, 14)
# write entries
for index, item in enumerate(objects, start=first_row):
worksheet.write(index, 0, item.name)
worksheet.write(index, 1, item.artist.name)
worksheet.write(index, 2, item.release.name)
if item.isrc:
worksheet.write(index, 3, item.isrc)
else:
worksheet.write_url(
index,
3,
"{}{}".format(SITE_URL, item.get_edit_url()),
string="Add ISRC",
)
# add monthly numbers
for ts_index, ts_item in enumerate(
[ts[1] for ts in item.time_series], start=4
):
worksheet.write(index, ts_index, ts_item)
# add summs / formula
worksheet.merge_range(
"A{}:D{}".format(last_row + 2, last_row + 2), "Total", border_top
)
for index, item in enumerate(header, start=4):
letter = ROW_LETTERS[index]
formula = "=SUM({}{}:{}{})".format(
letter, first_row + 1, letter, last_row + 1
)
worksheet.write_formula(last_row + 1, index, formula, border_top)
# worksheet.merge_range('A{}:C{}'.format(last_row + 4, last_row + 4), '{}'.format(timezone.now()), small)
workbook.close()
|
We have thousand's of tradesman covering the Emerson's Green (BS16) area. Whether you need a local boiler engineer or a handyman in Emerson's Green, simply pick a trade and get quotes ASAP.
Simply post your job for tradesman in Emerson's Green . All relevant tradesman nearby receive an alert, generating the best offer for the lowest price for your job. Based on reviews, you can pick your pro, allow them to contact you and get your job done for the best price for you. It's simple, easy and allows you to get on with the bigger things in life whilst we look for you. All you have to do is pick the best quote!
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012, 2013 Samsung SDS Co., LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import uuid
import pycassa
from datetime import datetime, timedelta
from pycassa import (types, create_index_clause, create_index_expression, EQ,
GT, GTE, LT, LTE)
import struct
import json
import pickle
from collections import OrderedDict
from synaps import flags
from synaps import log as logging
from synaps import utils
from synaps import exception
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
class Cassandra(object):
STATISTICS = ["Sum", "SampleCount", "Average", "Minimum", "Maximum"]
def __init__(self, keyspace=None):
self.statistics_ttl = FLAGS.get('statistics_ttl')
self.ARCHIVE = map(lambda x: int(x) * 60,
FLAGS.get('statistics_archives'))
if not keyspace:
keyspace = FLAGS.get("cassandra_keyspace", "synaps_test")
serverlist = FLAGS.get("cassandra_server_list")
cassandra_timeout = FLAGS.get("cassandra_timeout")
self.pool = pycassa.ConnectionPool(keyspace, server_list=serverlist,
timeout=cassandra_timeout)
self.cf_metric = pycassa.ColumnFamily(self.pool, 'Metric')
self.scf_stat_archive = pycassa.ColumnFamily(self.pool, 'StatArchive')
self.cf_metric_alarm = pycassa.ColumnFamily(self.pool, 'MetricAlarm')
self.cf_alarm_history = pycassa.ColumnFamily(self.pool,
'AlarmHistory')
self.cf_alarm_counter = pycassa.ColumnFamily(self.pool,
'AlarmCounter')
self.cf_notification_group = pycassa.ColumnFamily(self.pool,
'NotificationGroup')
def delete_metric_alarm(self, alarm_key, project_id=None):
try:
if not project_id:
alarm = self.cf_metric_alarm.get(alarm_key)
project_id = alarm.get('project_id')
self.cf_metric_alarm.remove(alarm_key)
self.cf_alarm_counter.add(project_id, 'alarm_counter', -1)
except pycassa.NotFoundException:
LOG.info(_("alarm key %s is not deleted" % alarm_key))
def _describe_alarms_by_names(self, project_id, alarm_names):
for alarm_name in alarm_names:
expr_list = [
pycassa.create_index_expression("project_id", project_id),
pycassa.create_index_expression("alarm_name", alarm_name)
]
index_clause = pycassa.create_index_clause(expr_list)
items = self.cf_metric_alarm.get_indexed_slices(index_clause)
for k, v in items:
yield k, v
def get_alarm_by_name(self, project_id, alarm_name):
alarms = list(self._describe_alarms_by_names(project_id, [alarm_name]))
if alarms:
return alarms[0]
else:
return None
def describe_alarms(self, project_id, action_prefix=None,
alarm_name_prefix=None, alarm_names=None,
max_records=100, next_token=None, state_value=None):
"""
params:
project_id: string
action_prefix: TODO: not implemented yet.
alarm_name_prefix: string
alarm_names: string list
max_records: integer
next_token: string (uuid type)
state_value: string (OK | ALARM | INSUFFICIENT_DATA)
"""
if alarm_names:
return self._describe_alarms_by_names(project_id, alarm_names)
next_token = uuid.UUID(next_token) if next_token else ''
expr_list = []
prj_expr = create_index_expression("project_id", project_id)
expr_list.append(prj_expr)
if alarm_name_prefix:
expr_s = create_index_expression("alarm_name", alarm_name_prefix,
GTE)
expr_e = create_index_expression("alarm_name",
utils.prefix_end(alarm_name_prefix),
LT)
expr_list.append(expr_s)
expr_list.append(expr_e)
if state_value:
expr = create_index_expression("state_value", state_value)
expr_list.append(expr)
LOG.info("expr %s" % expr_list)
index_clause = create_index_clause(expr_list=expr_list,
start_key=next_token,
count=max_records)
items = self.cf_metric_alarm.get_indexed_slices(index_clause)
return items
def describe_alarms_for_metric(self, project_id, namespace, metric_name,
dimensions=None, period=None,
statistic=None, unit=None):
metric_key = self.get_metric_key(project_id, namespace, metric_name,
dimensions)
if not metric_key:
raise exception.InvalidParameterValue("no metric")
expr_list = [create_index_expression("metric_key", metric_key)]
if period:
expr = create_index_expression("period", int(period))
expr_list.append(expr)
if statistic:
expr = create_index_expression("statistic", statistic)
expr_list.append(expr)
if unit:
expr = create_index_expression("unit", unit)
expr_list.append(expr)
LOG.info("expr %s" % expr_list)
index_clause = pycassa.create_index_clause(expr_list)
items = self.cf_metric_alarm.get_indexed_slices(index_clause)
return items
def get_alarms_per_metric_count(self, project_id, namespace, metric_name,
dimensions=None):
alarms = self.describe_alarms_for_metric(project_id, namespace,
metric_name, dimensions)
return sum(1 for a in alarms)
def describe_alarm_history(self, project_id, alarm_name=None,
end_date=None, history_item_type=None,
max_records=100, next_token=None,
start_date=None):
"""
params:
project_id: string
alarm_name: string
end_date: datetime
history_item_type: string (ConfigurationUpdate | StateUpdate |
Action)
max_records: integer
next_token: string (uuid type)
start_date: datetime
"""
next_token = uuid.UUID(next_token) if next_token else ''
expr_list = [
pycassa.create_index_expression("project_id", project_id),
]
if alarm_name:
expr = create_index_expression("alarm_name", alarm_name)
expr_list.append(expr)
if end_date:
expr = create_index_expression("timestamp", end_date, LTE)
expr_list.append(expr)
if start_date:
expr = create_index_expression("timestamp", start_date, GTE)
expr_list.append(expr)
if history_item_type:
expr = create_index_expression("history_item_type",
history_item_type)
expr_list.append(expr)
index_clause = pycassa.create_index_clause(expr_list=expr_list,
start_key=next_token,
count=max_records)
items = self.cf_alarm_history.get_indexed_slices(index_clause)
return items
def get_metric_alarm_key(self, project_id, alarm_name):
expr_list = [
pycassa.create_index_expression("project_id", project_id),
pycassa.create_index_expression("alarm_name", alarm_name)
]
index_clause = pycassa.create_index_clause(expr_list)
items = self.cf_metric_alarm.get_indexed_slices(index_clause)
for k, v in items:
return k
return None
def get_metric_alarm(self, alarm_key):
ret = None
try:
ret = self.cf_metric_alarm.get(alarm_key)
except pycassa.NotFoundException:
pass
return ret
def delete_metric(self, key):
try:
expr_list = [create_index_expression("metric_key", key)]
index_clause = pycassa.create_index_clause(expr_list)
items = self.cf_metric_alarm.get_indexed_slices(index_clause)
for k, v in items:
project_id = v.get('project_id')
self.delete_metric_alarm(k, project_id)
self.scf_stat_archive.remove(key)
self.cf_metric.remove(key)
LOG.debug("metric is deleted(%s)" % str(key))
except pycassa.NotFoundException:
LOG.error("failed to delete metric(%s)" % str(key))
def get_metric_key(self, project_id, namespace, metric_name, dimensions):
dimensions = utils.pack_dimensions(dimensions)
expr_list = [
pycassa.create_index_expression("project_id", project_id),
pycassa.create_index_expression("name", metric_name),
pycassa.create_index_expression("namespace", namespace),
pycassa.create_index_expression("dimensions", dimensions)
]
index_clause = pycassa.create_index_clause(expr_list)
items = self.cf_metric.get_indexed_slices(index_clause)
for k, v in items:
return k
else:
return None
def get_metric_key_or_create(self, project_id, namespace, metric_name,
dimensions, unit='None'):
# get metric key
key = None
try:
key = self.get_metric_key(project_id, namespace, metric_name,
dimensions)
except Exception as e:
LOG.exception(e)
# or create metric
if not key:
json_dim = utils.pack_dimensions(dimensions)
key = utils.generate_metric_key(project_id, namespace, metric_name,
dimensions)
columns = {'project_id': project_id, 'namespace': namespace,
'name': metric_name, 'dimensions': json_dim,
'unit': unit or 'None',
'updated_timestamp': datetime.utcnow(),
'created_timestamp': datetime.utcnow()}
self.cf_metric.insert(key=key, columns=columns)
LOG.info("New metric is created (%s, %s)" % (key, columns))
return key
def get_metric_statistics(self, project_id, namespace, metric_name,
start_time, end_time, period, statistics,
dimensions=None):
def get_stat(key, super_column, column_start, column_end):
stat = {}
count = (column_end - column_start).total_seconds() / 60
try:
stat = self.scf_stat_archive.get(key,
super_column=super_column,
column_start=column_start,
column_finish=column_end,
column_count=count)
except pycassa.NotFoundException:
LOG.debug("data not found - %s %s %s %s" % (key, super_column,
column_start,
column_end))
return stat
# get metric key
key = self.get_metric_key(project_id, namespace, metric_name,
dimensions)
# or return {}
if not key:
return {}
statistics = map(utils.to_ascii, statistics)
stats = map(lambda x: get_stat(key, x, start_time, end_time),
statistics)
return stats
def get_metric_statistics_for_key(self, key, time_idx):
def get_stat(key, super_column, column_start, column_end):
stat = {}
try:
stat = self.scf_stat_archive.get(key,
super_column=super_column,
column_start=column_start,
column_finish=column_end,
column_count=1440)
except pycassa.NotFoundException:
LOG.info("not found data - %s %s %s %s" % (key, super_column,
column_start,
column_end))
return stat
if not key:
return {}
stats = map(lambda x: get_stat(key, x, time_idx, time_idx),
self.STATISTICS)
return stats
def get_metric_unit(self, metric_key):
try:
metric = self.cf_metric.get(key=metric_key)
except pycassa.NotFoundException:
return "None"
return metric.get('unit', "None")
def insert_stat(self, metric_key, stat, ttl=None):
LOG.debug("scf_stat_archive.insert (%s, %s)" % (metric_key, stat))
ttl = ttl if ttl else self.statistics_ttl
self.scf_stat_archive.insert(metric_key, stat, ttl=ttl)
def insert_alarm_history(self, key, column, ttl=None):
LOG.debug("cf_alarm_history.insert (%s, %s)" % (key, column))
ttl = ttl or self.statistics_ttl
self.cf_alarm_history.insert(key, column, ttl=ttl)
def update_alarm_state(self, alarmkey, state, reason, reason_data,
timestamp):
state_info = {'state_value': state, 'state_reason': reason,
'state_reason_data': reason_data,
'state_updated_timestamp':timestamp}
self.cf_metric_alarm.insert(alarmkey, state_info)
LOG.debug("cf_metric_alarm.insert (%s, %s)" % (str(alarmkey),
str(state_info)))
def list_metrics(self, project_id, namespace=None, metric_name=None,
dimensions=None, next_token=""):
def parse_filter(filter_dict):
if not filter_dict:
return None
full_filter, name_filter, value_filter = [], [], []
for k, v in filter_dict.iteritems():
k, v = utils.utf8(k), utils.utf8(v)
if k and v:
full_filter.append((k, v))
elif k and not v:
name_filter.append(k)
elif not k and v:
value_filter.append(v)
else:
msg = "Invalid dimension filter - both name and value "\
"can not be empty."
raise exception.InvalidRequest(msg)
return full_filter, name_filter, value_filter
filters = parse_filter(dimensions)
LOG.info("parse filter: %s", filters)
ret = []
skip_first = False
while True:
metrics, new_next_token, next_skip_first = self._list_metrics(
project_id, namespace, metric_name, filters, next_token)
if skip_first and metrics:
ret = ret + metrics[1:]
else:
ret = ret + metrics
skip_first = next_skip_first
if len(ret) > 500:
last_key, last_value = ret[500]
next_token = str(last_key) if last_key else None
break
elif new_next_token == next_token:
next_token = None
break
else:
next_token = new_next_token
LOG.info("next token: %s", next_token)
return ret[:500], next_token
def _list_metrics(self, project_id, namespace=None, metric_name=None,
filters=None, next_token=""):
def to_dict(v):
return {'project_id': v['project_id'],
'dimensions': json.loads(v['dimensions']),
'name': v['name'],
'namespace': v['namespace']}
def apply_filter(metric, filters):
if not filters:
return True
dimensions = metric.get('dimensions')
dimensions = json.loads(dimensions) if dimensions else {}
full_filter, name_filter, value_filter = filters
if full_filter:
if not set(full_filter).issubset(set(dimensions.items())):
return False
if name_filter:
if set(dimensions.keys()) != set(name_filter):
return False
if value_filter:
for v_in_dim in dimensions.values():
for v in value_filter:
if v in utils.utf8(v_in_dim):
return True
return False
return True
next_token = uuid.UUID(next_token) if next_token else ''
new_next_token = None
expr_list = [pycassa.create_index_expression("project_id",
project_id), ]
if namespace:
expr = pycassa.create_index_expression("namespace", namespace)
expr_list.append(expr)
if metric_name:
expr = pycassa.create_index_expression("name", metric_name)
expr_list.append(expr)
index_clause = pycassa.create_index_clause(expr_list, count=501,
start_key=next_token)
items = self.cf_metric.get_indexed_slices(index_clause,
column_count=100)
last_token = None
metrics = []
for key, value in items:
new_next_token = key
if value and apply_filter(value, filters):
last_token = key
metrics.append((key, to_dict(value)))
skip_first = last_token and last_token == new_next_token
LOG.info("%s %s %s", next_token, new_next_token, last_token)
new_next_token = str(new_next_token) if new_next_token \
else new_next_token
return metrics, new_next_token, skip_first
def get_all_metrics(self):
return self.cf_metric.get_range()
def get_all_alarms(self):
return self.cf_metric_alarm.get_range()
def get_metric(self, metric_key):
try:
data = self.cf_metric.get(metric_key)
except pycassa.NotFoundException:
data = {}
return data
def update_metric(self, metric_key, columns):
try:
data = self.cf_metric.get(metric_key)
except pycassa.NotFoundException:
LOG.debug("Metric Not Found %s" % str(metric_key))
else:
data.update(columns)
self.cf_metric.insert(key=metric_key, columns=data)
def load_metric_data(self, metric_key):
try:
data = self.cf_metric_archive.get(metric_key, column_count=1440)
except pycassa.NotFoundException:
data = {}
return data
def load_statistics(self, metric_key, start, finish):
def get_stat(statistic):
datapoints = self.scf_stat_archive.get(metric_key,
super_column=statistic,
column_start=start,
column_finish=finish)
return statistic, datapoints
try:
stat = dict([get_stat(statistic)
for statistic in self.STATISTICS])
except pycassa.NotFoundException:
stat = {}
return stat
def load_alarms(self, metric_key):
expr_list = [
pycassa.create_index_expression("metric_key", metric_key),
]
index_clause = pycassa.create_index_clause(expr_list)
try:
items = self.cf_metric_alarm.get_indexed_slices(index_clause)
except pycassa.NotFoundException:
LOG.debug("no alarm found")
items = {}
return items
def put_metric_alarm(self, alarm_key, metricalarm):
"""
update MetricAlarm CF
"""
LOG.debug("cf_metric_alarm.insert (%s, %s)" % (alarm_key, metricalarm))
project_id = metricalarm.get('project_id')
self.cf_metric_alarm.insert(key=alarm_key, columns=metricalarm)
self.cf_alarm_counter.add(project_id, 'alarm_counter', 1)
return alarm_key
def restructed_stats(self, stat):
def get_stat(timestamp):
ret = {}
for key in stat.keys():
ret[key] = stat[key][timestamp]
return ret
ret = []
timestamps = reduce(lambda x, y: x if x == y else None,
map(lambda x: x.keys(), stat.values()))
for timestamp in timestamps:
ret.append((timestamp, get_stat(timestamp)))
return ret
def reset_alarm_counter(self):
counter = {}
for k, v in self.cf_metric_alarm.get_range():
project_id = v.get('project_id')
if counter.has_key(project_id):
counter[project_id] += 1
else:
counter[project_id] = 1
# reset counter
for k in counter:
self.cf_alarm_counter.remove_counter(k, 'alarm_counter')
rows = {k: {'alarm_counter': v} for k, v in counter.iteritems()}
self.cf_alarm_counter.batch_insert(rows)
def get_alarm_count(self, project_id):
try:
counter = self.cf_alarm_counter.get(project_id)
except:
return 0
return counter.get('alarm_counter', 0)
def get_notification_group(self, name):
try:
values = self.cf_notification_group.get(name)
except:
return []
return values.keys()
@staticmethod
def syncdb(keyspace=None):
"""
Create Cassandra keyspace, CF, SCF
"""
if not keyspace:
keyspace = FLAGS.get("cassandra_keyspace", "synaps_test")
serverlist = FLAGS.get("cassandra_server_list")
replication_factor = FLAGS.get("cassandra_replication_factor")
manager = pycassa.SystemManager(server=serverlist[0])
strategy_options = {'replication_factor':replication_factor}
# create keyspace
LOG.info(_("cassandra syncdb is started for keyspace(%s)" % keyspace))
if keyspace not in manager.list_keyspaces():
LOG.info(_("cassandra keyspace %s does not exist.") % keyspace)
manager.create_keyspace(keyspace, strategy_options=strategy_options)
LOG.info(_("cassandra keyspace %s is created.") % keyspace)
else:
property = manager.get_keyspace_properties(keyspace)
# check strategy_option
if not (strategy_options == property.get('strategy_options')):
manager.alter_keyspace(keyspace,
strategy_options=strategy_options)
LOG.info(_("cassandra keyspace strategy options is updated - %s"
% str(strategy_options)))
# create CF, SCF
column_families = manager.get_keyspace_column_families(keyspace)
if 'Metric' not in column_families.keys():
manager.create_column_family(
keyspace=keyspace,
name='Metric',
comparator_type=pycassa.ASCII_TYPE,
key_validation_class=pycassa.LEXICAL_UUID_TYPE,
column_validation_classes={
'project_id': pycassa.UTF8_TYPE,
'name': pycassa.UTF8_TYPE,
'namespace': pycassa.UTF8_TYPE,
'unit': pycassa.UTF8_TYPE,
'dimensions': pycassa.UTF8_TYPE,
'updated_timestamp': pycassa.DATE_TYPE,
'created_timestamp': pycassa.DATE_TYPE
}
)
manager.create_index(keyspace=keyspace, column_family='Metric',
column='project_id',
value_type=types.UTF8Type())
manager.create_index(keyspace=keyspace, column_family='Metric',
column='name',
value_type=types.UTF8Type())
manager.create_index(keyspace=keyspace, column_family='Metric',
column='namespace',
value_type=types.UTF8Type())
manager.create_index(keyspace=keyspace, column_family='Metric',
column='dimensions',
value_type=types.UTF8Type())
manager.create_index(keyspace=keyspace, column_family='Metric',
column='updated_timestamp',
value_type=types.DateType())
manager.create_index(keyspace=keyspace, column_family='Metric',
column='created_timestamp',
value_type=types.DateType())
if 'StatArchive' not in column_families.keys():
manager.create_column_family(
keyspace=keyspace,
name='StatArchive', super=True,
key_validation_class=pycassa.LEXICAL_UUID_TYPE,
comparator_type=pycassa.ASCII_TYPE,
subcomparator_type=pycassa.DATE_TYPE,
default_validation_class=pycassa.DOUBLE_TYPE
)
if 'MetricAlarm' not in column_families.keys():
manager.create_column_family(
keyspace=keyspace,
name='MetricAlarm',
key_validation_class=pycassa.LEXICAL_UUID_TYPE,
comparator_type=pycassa.ASCII_TYPE,
column_validation_classes={
'metric_key': pycassa.LEXICAL_UUID_TYPE,
'project_id': pycassa.UTF8_TYPE,
'actions_enabled': pycassa.BOOLEAN_TYPE,
'alarm_actions': pycassa.UTF8_TYPE,
'alarm_arn': pycassa.UTF8_TYPE,
'alarm_configuration_updated_timestamp': pycassa.DATE_TYPE,
'alarm_description': pycassa.UTF8_TYPE,
'alarm_name': pycassa.UTF8_TYPE,
'comparison_operator': pycassa.UTF8_TYPE,
'dimensions':pycassa.UTF8_TYPE,
'evaluation_periods':pycassa.INT_TYPE,
'insufficient_data_actions': pycassa.UTF8_TYPE,
'metric_name':pycassa.UTF8_TYPE,
'namespace':pycassa.UTF8_TYPE,
'ok_actions':pycassa.UTF8_TYPE,
'period':pycassa.INT_TYPE,
'state_reason':pycassa.UTF8_TYPE,
'state_reason_data':pycassa.UTF8_TYPE,
'state_updated_timestamp':pycassa.DATE_TYPE,
'state_value':pycassa.UTF8_TYPE,
'statistic':pycassa.UTF8_TYPE,
'threshold':pycassa.DOUBLE_TYPE,
'unit':pycassa.UTF8_TYPE
}
)
manager.create_index(keyspace=keyspace,
column_family='MetricAlarm',
column='project_id',
value_type=types.UTF8Type())
manager.create_index(keyspace=keyspace,
column_family='MetricAlarm',
column='metric_key',
value_type=types.LexicalUUIDType())
manager.create_index(keyspace=keyspace,
column_family='MetricAlarm',
column='alarm_name',
value_type=types.UTF8Type())
manager.create_index(keyspace=keyspace,
column_family='MetricAlarm',
column='state_updated_timestamp',
value_type=types.DateType())
manager.create_index(keyspace=keyspace,
column_family='MetricAlarm',
column='alarm_configuration_updated_timestamp',
value_type=types.DateType())
manager.create_index(keyspace=keyspace,
column_family='MetricAlarm',
column='state_value',
value_type=types.UTF8Type())
manager.create_index(keyspace=keyspace,
column_family='MetricAlarm',
column='period',
value_type=types.IntegerType())
manager.create_index(keyspace=keyspace,
column_family='MetricAlarm',
column='statistic',
value_type=types.UTF8Type())
if 'AlarmHistory' not in column_families.keys():
manager.create_column_family(
keyspace=keyspace,
name='AlarmHistory',
key_validation_class=pycassa.LEXICAL_UUID_TYPE,
comparator_type=pycassa.ASCII_TYPE,
column_validation_classes={
'project_id': pycassa.UTF8_TYPE,
'alarm_key': pycassa.LEXICAL_UUID_TYPE,
'alarm_name': pycassa.UTF8_TYPE,
'history_data': pycassa.UTF8_TYPE,
'history_item_type': pycassa.UTF8_TYPE,
'history_summary': pycassa.UTF8_TYPE,
'timestamp': pycassa.DATE_TYPE,
}
)
manager.create_index(keyspace=keyspace,
column_family='AlarmHistory',
column='project_id',
value_type=types.UTF8Type())
manager.create_index(keyspace=keyspace,
column_family='AlarmHistory',
column='alarm_key',
value_type=types.LexicalUUIDType())
manager.create_index(keyspace=keyspace,
column_family='AlarmHistory',
column='alarm_name',
value_type=types.UTF8Type())
manager.create_index(keyspace=keyspace,
column_family='AlarmHistory',
column='history_item_type',
value_type=types.UTF8Type())
manager.create_index(keyspace=keyspace,
column_family='AlarmHistory',
column='timestamp',
value_type=types.DateType())
if 'AlarmCounter' not in column_families.keys():
manager.create_column_family(keyspace=keyspace,
name='AlarmCounter',
default_validation_class=pycassa.COUNTER_COLUMN_TYPE,
key_validation_class=pycassa.UTF8_TYPE)
if 'NotificationGroup' not in column_families.keys():
manager.create_column_family(keyspace=keyspace,
name='NotificationGroup',
key_validation_class=pycassa.UTF8_TYPE,
comparator_type=pycassa.UTF8_TYPE,
default_validation_class=pycassa.UTF8_TYPE)
LOG.info(_("cassandra syncdb has finished"))
|
Global Impact represents our faith community’s commitment to impacting the world around us in order to draw people into relationship with Jesus. Our unique vision looks to partner with “missionaries” bringing that message to people living in an area known as the 10/40 Window. The 10/40 Window represents an area of the world with the highest concentration of people groups that have never heard about Jesus. However, we also partner with “missionaries” impacting areas all around the world.
Currently, we support over 60 missionary families around the world. But, we don’t just give money to missions or missionaries. Every missionary and missionary family we support becomes part of our church family. We commit to pray, encourage, and support them longterm. While we have many new missions families and enjoy adding new ones, several have been with us for over 30 years.
When asked about the greatest commandments Jesus replied, “Love the Lord you God with all your heart, soul, mind, and strength. This is the first and greatest commandment. And the second is like the first, “Love your neighbor as yourself.” One way we strive love our neighbor, globally, comes by participating in Operation Christmas Child. OCC is a ministry of Samaritan’s Purse that provides Christmas gifts to millions of children around the world who may otherwise receive nothing. We also partner with Convoy of Hope to help in relief efforts world-wide. We have assisted in building churches, Bible schools, orphanages, and senior citizen homes. Our children and youth give to help provide equipment, books, and vehicles for our missionaries.
|
#/Club/Players/?TeamID=818875
import sys
if sys.version > '3':
import html.parser as HTMLParser
else:
import HTMLParser
import re
# Parses chpp holders
# CatzHoek
class CHPPHolderParser(HTMLParser.HTMLParser):
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self.users = []
self.currentUser = {}
self.currentUser['appNames'] = []
self.currentAppname = ""
#in relevant area?
self.in_creator_paragraph = False;
self.in_approvedApplications = False;
self.in_approvedApplicationsSubDivCount = 0
def getUserIdFromUrl(self, url):
pattern = re.compile("\/Club\/Manager\/\?userId=(\d+)")
match = re.match(pattern, url)
if match and match.group(1):
return int(match.group(1))
def handle_starttag(self, tag, attrs):
if tag == 'p':
for name, value in attrs:
if name == 'id' and value == 'creator':
self.in_creator_paragraph = True;
if tag == 'a' and self.in_creator_paragraph:
for key, value in attrs:
if key == 'title':
self.currentUser["name"] = value
if key == 'href':
try:
id = self.getUserIdFromUrl( value )
self.currentUser["id"] = id
except Exception:
pass
if tag == 'div':
if self.in_approvedApplications:
self.in_approvedApplicationsSubDivCount += 1
if self.in_approvedApplicationsSubDivCount == 1:
for name, value in attrs:
if name == "title":
self.currentAppname = value
#print value.encode('utf-8')
return
for name, value in attrs:
if name == 'id' and value == 'approvedApplications':
self.in_approvedApplications = True
def handle_endtag(self, tag):
if tag == 'div' and self.in_approvedApplications:
if self.in_approvedApplicationsSubDivCount == 0:
self.in_approvedApplications = False
else:
self.in_approvedApplicationsSubDivCount -= 1
if tag == 'p':
if self.in_creator_paragraph:
found = False
for u in self.users:
if u['id'] == self.currentUser['id']:
found = True
if not found:
self.currentUser["appNames"].append(self.currentAppname)
self.users.append(self.currentUser)
else:
#print "already in there"
for u in self.users:
if u['id'] == self.currentUser['id']:
u['appNames'].append(self.currentAppname)
self.currentUser = {}
self.currentUser['appNames'] = []
self.in_creator_paragraph = False; #no nested divs in playerinfo, this is okay
def get(self):
for u in self.users:
u['appNames'] = sorted(u['appNames'])
return self.users
|
to have been part of it all.
To have found so much pleasure in slow living.
in pages filled with lines and colour.
I feel fortunate for the new things I have learnt.
I'd like to share a small part of those small pleasures with you.
The Winner is INES FONSECA!
Thank you all for entering and taking the time to reflect about your year and sharing a little ofit with me.
Tell Me: What Has Made This A Beautiful Year for you?
2 - LIKE (pangaweka) on Facebook .
A Winner will be chosen randomly, by hand on the 13th of December.
be VALID, head over to Facebook and like (Pangaweka)'s Facebook page.
(that's me)/ 1 Handmade Fragrant Hand Pillow/ 3 Handmade Ornaments.
Illustration Copyright Stella Pereira GOOD LUCK!!
This entry was posted on December 12, 2013 by pangaweka. It was filed under Art, DIY, Handmade and was tagged with christmas ornaments, free, giveaway, handmade, illustrated notebooks, illustration, limited, limited edition, notebook, original, pangaweka, stella pereira.
Hi Angela, thank you for participating and best of Luck!! To Completed Projects!
What has made this such a beautiful year is not so much a what as a who (s): Nikki and “Hannah”!
This year I was able to find love and quietness without having to rush.
You’re right Stella! That’s also one of the things I like about your illustrations… there is a simplicity about them that makes them really beautiful.
Hi Stella…it’s been awhile since I visited…seems I’m always trying to catch-up with commenting on my community’s blogs. Love your ‘Santa’ idea…I’m not eligible because I have chosen to leave FB, however, still want to participate: My year has been special because I am still alive and whole and able to create, because I have loved and have been loved, because my sons are safe, sane and healthy and because I am privileged to have a roof over my head, food in my belly and the ability to help those that don’t.
Hi John. I’m always pleased when you visit… I like that you’re happy to be alive. That alone makes my everyday special. Thank you for your wonderful comment and helping me reflect on the simple things that make life so special.
I brought my kids to visit my granny & papa for the summer, it had been too long since we had seen them! We made such new sweet family memories. Unexpectedly my granny passed away in September. Luckily I had an uneasy feeling about leaving so we were still here. While I stayed by her bedside and despite the pain and loss, it was such a privelage and honor to spend such a beautiful time. with her. It will always be a year etc hed in my heart.
This year I have worked on being grounded in the moment and enjoying the process of making things.
My daughter was married September 15, 2013. I decided that instead of trying to take on too many projects I would take on a few and put love into everything I did for the wedding. I made a surprise wedding video. I made a quilt. I made the wedding cake. I did each with purpose. I filled each project with love. I enjoyed the process of making each. It was so much fun and the results were amazing. The video was a hit! My daughter and her husband love the quilt, they can even feel the love that I filled it with! And the cake was delicious! I’ve had many requests for the recipe.
I’m not saying any of this to brag about what a great job I did, it isn’t about that at all. It was the intention of love and care that went into each project that made them special.
I learned that it is important to enjoy the process of creating a few meaningful things instead of taking on too much and it being work and NOT fun. The results are amazing when you craft with intention.
– not here to participate, but just to say that I am lucky to have met a beautiful person in Camb. Thanks for being in my life friend!
|
import webuntis
import mock
from webuntis.utils.third_party import json
from .. import WebUntisTestCase, BytesIO
class BasicUsage(WebUntisTestCase):
def test_parse_result(self):
x = webuntis.utils.remote._parse_result
a = {'id': 2}
b = {'id': 3}
self.assertRaisesRegex(webuntis.errors.RemoteError,
'Request ID', x, a, b)
a = b = {'id': 2}
self.assertRaisesRegex(webuntis.errors.RemoteError,
'no information', x, a, b)
a = {'id': 2}
b = {'id': 2, 'result': 'YESSIR'}
assert x(a, b) == 'YESSIR'
def test_parse_error_code(self):
x = webuntis.utils.remote._parse_error_code
a = b = {}
self.assertRaisesRegex(webuntis.errors.RemoteError,
'no information', x, a, b)
b = {'error': {'code': 0, 'message': 'hello world'}}
self.assertRaisesRegex(webuntis.errors.RemoteError,
'hello world', x, a, b)
for code, exc in webuntis.utils.remote._errorcodes.items():
self.assertRaises(exc, x, a, {
'error': {'code': code, 'message': 'hello'}
})
|
We are sceptical that Egypt’s plans for a new electric railway linking Cairo with the planned New Administrative Capital (NAC) will materialise over our long-term forecast period (to 2028).
Although the funding for the railway has reportedly been agreed with a USD1.2bn loan from Exim Bank of China, the project faces numerous obstacles including its questionable feasibility, high costs and complexity, which make lengthy delays likely.
More broadly, recent announcements of negotiation breakdowns and financing difficulties related to the development of the NAC support our view that the new city will not be fully completed over the next decade.
We hold a cautious view on the prospects for Egypt’s planned electric railway linking Cairo with the NAC. In January 2019, an agreement was signed between the Egyptian government and Exim Bank of China for a USD1.2bn loan to finance the construction of the new electric railway, bringing it a step closer to realisation. This would represent one of the highest-value rail projects undertaken in Egypt, reflecting the ambitious nature of the government’s plans for the NAC and its supporting infrastructure. Nevertheless, we believe that there are serious obstacles standing in the way of the railway’s completion over the next decade.
The most prominent of these is the unclear economic viability of the project. While Egypt’s rapidly growing population and the development of new urban areas will drive demand for investment in public transport, the business case for expensive new railway developments is not strong in the medium term. The government has claimed that the project will serve 350,000 people, but given the lack of permanent residents in the NAC at present, the demand case remains weak. Indeed, previous attempts to establish new cities in Egypt outside Cairo have failed to attract anywhere close to the numbers of new residents expected. We believe that this will also be the case for the NAC, with an initial lack of services and facilities as well as high housing costs likely to deter a large influx of new residents, resulting in limited demand for new public transport.
The unclear feasibility of the new railway will likely deter potential private sector construction firms and operators.As a result, while no contractor has yet been announced for the construction of the railway, we expect a Chinese firm to be awarded the project in due course. Chinese firms do not currently have a significant presence in the Egyptian rail sector, but a global pattern of Chinese investment suggests that the provision of loans for infrastructure projects by Chinese banks is usually followed by the selection of a Chinese construction company to carry out the work. Nevertheless, while state-backed Chinese companies have the ability to absorb cost overruns and operating losses better than private firms, China’s previous experiences of running railways in Africa should act as a warning. In both Kenya and Ethiopia, Chinese-backed railway projects have struggled with profitability, leading to concerns over the ability to repay loans for the projects.
We do not expect similar issues with loan repayments to affect Egypt’s electric railway, as the terms offered on the loan are generous and Egypt’s debt to GDP ratio, while high, is manageable. Nevertheless, the weak business case may make it difficult to attract an operator for the railway, as even state-owned Chinese firms will be reluctant to take on a loss-making project, potentially leading to delays to construction. Further delays are likely due to the complexity of the project, as this will be Egypt’s first electric-powered railway. While we believe that the country has the necessary power capacity to support an electric railway, it will likely require a dedicated power supply to ensure uninterrupted operations, necessitating further investment and heightening risks of delays to the project.
The likely obstacles to the development of the electric railway are mirrored by the difficulties facing the construction of the NAC itself. As noted above, Egypt has a history of grand new city projects failing to meet potential or attract the number of residents required to make them viable. The NAC is slightly different, as the government intends to move all its ministries and official functions to the city, which will include the relocation of thousands of civil servants, creating a more solid demand base. Nevertheless, the ambitious nature of the plans for the new city, the lack of a clear budget and the high costs involved lead us to adopt a cautious view towards its full realisation over the next decade. This has been reinforced by recent announcements of negotiation breakdowns with potential contractors and partners, including China Fortune Land Development, which planned to invest USD20bn in the city, and UAE-based Emaar. Further negotiation and financing difficulties are likely, and ultimately we do not believe that the NAC will be completed per the government’s vision over the next decade.
|
#-------------------------------------------------------------------------------
# Name: Settings
# Purpose: Contains the settings for the application and threads
#
# TODO: Move the reading of config xml into this file
# Move some setting into external xml file
# Move the config files to ~/.flannelfox
#-------------------------------------------------------------------------------
# -*- coding: utf-8 -*-
# System Includes
import datetime, json, math, time, os
from flannelfox import logging
def getConfigFiles(directory):
logger = logging.getLogger(__name__)
configFiles = []
if os.path.isdir(directory):
for configFile in os.listdir(directory):
configFilePath = os.path.join(directory,configFile)
try:
if configFile.endswith('.json'):
configFileJson = __readConfigFile(configFilePath)
if configFile != None:
configFiles.append((configFilePath, configFileJson))
except Exception as e:
logger.warning('There was a problem reading the a config file\n{}\n{}'.format(
configFilePath,
e
))
continue
return configFiles
def __readConfigFile(file):
# Try to read in the rss lists
logger = logging.getLogger(__name__)
try:
logger.debug('Reading RSS config file: {0}'.format(file))
with open(file) as rssJson:
return json.load(rssJson)
except Exception as e:
logger.error('There was a problem reading the rss config file\n{0}'.format(e))
return []
def __getModificationDate(filename):
'''
Checks the modification time of the file it is given
filename: The full path of the file to return the timestamp of.
Returns the timestamp in seconds since epoch
'''
logger = logging.getLogger(__name__)
try:
return int(datetime.datetime.fromtimestamp(os.path.getmtime(filename)).strftime('%s'))
except Exception:
logger.error('There was a problem getting the timestamp for:\n{0}'.format(filename))
return -1
def isCacheStillValid(force=False, cacheFileName=None, frequency=360):
'''
Used to determine if a cachefile needs to be updated
force: force an update
cacheFileName: The full path of the file to check
frequency: how often the file should be updated in minutes
Returns Boolean
'''
logger = logging.getLogger(__name__)
try:
if not os.path.exists(os.path.dirname(cacheFileName)):
try:
os.makedirs(os.path.dirname(cacheFileName))
except OSError: # Guard against race condition
pass
lastModified = __getModificationDate(cacheFileName)
if lastModified == -1:
return False
logger.debug('Checking cache: {0} {1}:{2}'.format(cacheFileName, frequency, math.ceil((time.time()/60 - lastModified/60))))
difference = math.ceil((time.time()/60 - lastModified/60))
if difference >= frequency:
logger.debug('Cache update needed')
return False
else:
logger.debug('Cache update not needed')
return True
except Exception:
logger.error('Cache validity for {0} could not be determined'.format(cacheFileName))
return False
def readCacheFile(cacheFileName):
logger = logging.getLogger(__name__)
try:
logger.debug('Reading cache file for [{0}]'.format(cacheFileName))
with open(cacheFileName) as cacheFile:
return json.load(cacheFile)
except Exception as e:
logger.error('There was a problem reading a lastfm list cache file: {0}'.format(e))
return []
def updateCacheFile(force=False, cacheFileName=None, data=None):
'''
Used to update cache files for api calls. This is needed so we do not keep
asking the api servers for the same information on a frequent basis. The
fault frequency is to ask once an hour.
force: preform the update regardless of frequency
location: where to save the file
frequency: how often to update the file in minutes
'''
directory = os.path.dirname(cacheFileName)
if not os.path.exists(directory):
os.makedirs(directory)
logger = logging.getLogger(__name__)
try:
logger.debug('Cache update for {0} needed'.format(cacheFileName))
with open(cacheFileName, 'w') as cache:
cache.write(json.dumps(data))
except Exception as e:
logger.error('There was a problem writing a cache file {0}: {1}'.format(cacheFileName, e))
|
Posted November 1446, 2014 by createadmin & filed under Uncategorized.
We are working towards a broad definition of CACD.
We don’t want to be prescriptive but recognise the need for the sector to ably describe and explain the important work it does to communities and other potential supporters.
What are the principles that CACD work upholds?
Along with a broad definition of CACD we have written a draft document expressing the principles that guide the work of the CACD sector.
|
# Copyright © 2020 Red Hat Inc., and others.
#
# This file is part of Bodhi.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Defines schemas related to GraphQL objects."""
from graphene import relay, Field, String
from graphene_sqlalchemy import SQLAlchemyObjectType
from bodhi.server.models import (
Release as ReleaseModel,
Update as UpdateModel,
BuildrootOverride as BuildrootOverrideModel
)
class Release(SQLAlchemyObjectType):
"""Type object representing a distribution release from bodhi.server.models like Fedora 27."""
class Meta:
"""Allow to set different options to the class."""
model = ReleaseModel
interfaces = (relay.Node, )
state = Field(String)
package_manager = Field(String)
class Update(SQLAlchemyObjectType):
"""Type object representing an update from bodhi.server.models."""
class Meta:
"""Allow to set different options to the class."""
model = UpdateModel
interfaces = (relay.Node, )
status = Field(String)
request = Field(String)
date_approved = Field(String)
class BuildrootOverride(SQLAlchemyObjectType):
"""Type object representing an update from bodhi.server.models."""
class Meta:
"""Allow to set different options to the class."""
model = BuildrootOverrideModel
interfaces = (relay.Node, )
submitter = Field(String)
|
The data was added on , 24 May 2018 read 398 times.
A Syrian-Russian reconciliation delegation was forwarded to Dara’a province to offer the militants to lay down their arms and join the peace agreement with the Syrian Army, military sources in Damascus reported on Wednesday.
Sources said that a Russian-Syrian reconciliation delegation was sent to the towns of Nahij and Mohaja to call on the militants to end battle with the army and endorse the peace agreement.
The sources further said that if the militants reject the peace offer the army will launch military operation against them.
Also, the sources said that the army has sent a long convoy of military equipment and thousands of fresh forces to Azra base in Dara’a.
The sources said that the army plans to reopen the strategic Nasib Passageway at the border with Jordan as its first priority of operation in Dara’a, adding that the passageway was occupied by the terrorists four years ago.
The Syrian Army troops engaged in fierce clashes with ISIL in a desert-like region between Western Deir Ezzur and Eastern Homs and fended off terrorists’ heavy offensive, killing or wounding a large number of the gunmen on Wednesday.
The army men exchanged fire with ISIL after the terrorists tired to break through the government forces’ positions in Badiyeh (desert) near the town of al-Mayadeen and another Badiyeh near the villages in the Northern countryside of the town of Albu Kamal in Southeastern Deir Ezzur near the border with Iraq, and managed to repel the terrorists’ heavy offensives.
Terrorists suffered tens of casualties in the failed attacks.
In the meantime, other units of the army clashed with the ISIL near an army positions near Oweiraz Dam Southeast of T3 Station in Southeastern Badiyeh of Homs, repelling the terrorists’ attack after killing and injuring a large number of them.
The ISIL further retreated from the battlefield to evade more losses.
The Syrian Army’s artillery and missile units launched a heavy shelling attack on the positions of Tahrir al-Sham Hay’at (the Levant Liberation Board or the Al-Nusra Front) in Northern Hama on Wednesday, damaging their strongholds and inflicting a number of casualties on the militants.
The artillery and missile units opened heavy fire at Tahrir al-Sham’s positions near the village of al-Zakah and the town of al-Latamina, destroying several heavy vehicles and killing or wounding a number of terrorists.
Other artillery and missile units pounded more strongholds of the terrorists in Kafr Zita region in Northern Hama, destroying a position and a volume of military equipment and killing several gunmen.
In the meantime, a unit of the army opened fire at a group of Tahrir al-Sham Hay’at in the village of al-Ankawi in Western al-Ghaab Plain and managed to repel their attack.
A number of Tahrir al-Sham’s fighters were killed or wounded and the remaining pockets of them retreated towards Southwestern Idlib.
Also in the past 24 hours, the Turkish Army dispatched a new large convoy of military equipment to terrorist-held regions in Southeastern Idlib and Northern Hama to reinvigorate its forces in the war-hit country.
A long convoy of the Turkish military, including forty vehicles and heavy equipment, entered Northwestern Syria via Kafr Lusin passageway, heading toward three points in Morek, Sarman an Jabal Shahshabu regions.
In the meantime, the Arabic-language al-Watan daily pointed to widening insecurity in militant-held regions in Idlib province, adding that the regions where the Turkish troops have deployed to set up truce-monitoring points are experiencing a deteriorating security situation, mainly in Northern Lattakia and Southern Idlib.
Russia, Iran and Turkey have completed the establishment of observation points in the Idlib de-escalation zone, Head of the Main Operations Department at Russia’s General Staff Colonel General Sergei Rudskoi said on Wednesday.
“The establishment of observation points along the disengagement line has been completed,” Colonel General Sergei Rudskoi said, adding that military servicemen from the guarantor countries of the Astana agreements – Russia, Iran and Turkey – had been deployed there, TASS reported.
“Russia set up a total of ten observation points, another twelve were established by Turkey and seven by Iran. These observation points are being used to monitor the ceasefire between government troops and armed opposition units,” Rudskoi added.
Also on Wednesday, a fresh round of insecurity has covered Idlib province as assassination operations against militant commanders are on rise, field sources reported.
The sources said that a number of Faylaq al-Sham militants were killed or wounded after unknown raiders opened fire at a group of militants at several checkpoints near the village of Tal Adah.
They added that two bombs went off in front of Idlib Free Police’s base in Tamanin settlement in Northern Idlib, killing a number of terrorists.
In the meantime, a bomb went off at the entrance of the town of Ma’arat Mesrin in Northern Idlib, but, there is no report on the possible casualties, the sources said.
Sources in Idlib city said that Tahrir al-Sham Hay’at has embarked on vast arrest across the city, storming civilians’ houses under the pretext of arresting ISIL-affiliated people behind recent blasts.
Mahmoud Othman, one of the commanders of the Turkey-backed al-Ra’ei Police, was killed in a fresh round of clashes among the Ankara-backed militants in al-Bab region in Northern Aleppo on Wednesday.
Gunmen of al-Montaser Bellah Brigade, affiliated to the Ankara-backed Free Syrian Army (FSA), engaged in heavy fighting with members of the Turkey-backed Free Police in the main bazaar in the town of al-Ra’ei in al-Bab region in Northern Aleppo, leaving Mahmoud Othman dead and eight more militants injured.
Also on Wednesday, the Syrian Army dispatched more troops and equipment to the Northwestern countryside of Aleppo city, preparing for an imminent anti-terrorism operation in the region, field sources reported.
The sources said that the army has send a large number of soldiers and a large volume of military hardware from Lattakia to Kafr Hamra and al-Lairamoun regions in the Northwestern countryside of Aleppo city to take part in a possible operation against terrorists.
They went on to say that the terrorists of Tahrir al-Sham Hay’at have recently increased their attacks on army positions and residential areas in the region.
|
# -*- coding: UTF-8 -*-
from pyramid.response import Response
from pyramid.view import view_config
import httplib2
from urllib.parse import urlencode
from urllib.parse import urlparse
from crdppf.lib.wfsparsing import is_get_feature, limit_featurecollection
@view_config(route_name='ogcproxy', renderer='json')
def ogcproxy(request):
params = dict(request.params)
params_encoded = {}
for k, v in params.items():
if k == 'callback':
continue
params_encoded[k] = v
query_string = urlencode(params_encoded)
if len(params_encoded) > 0:
_url = '?' + query_string
else:
_url = ''
method = request.method
url = request.registry.settings['crdppf_wms']
h = dict(request.headers)
if urlparse(url).hostname != 'localhost':
h.pop("Host", h)
body = None
if method in ("POST", "PUT"):
body = request.body
url += _url
http = httplib2.Http()
resp, content = http.request(url, method=method, body=body, headers=h)
if method == "POST" and is_get_feature(body):
content = limit_featurecollection(content, limit=4)
headers = {"Content-Type": resp["content-type"]}
return Response(content, status=resp.status, headers=headers)
|
Studies have shown that women who get regular vigorous exercise decrease their chances of ovulatory infertility. With every hour you work out, you could be reducing your chances of infertility by 7 percent. So if you are looking to conceive anytime soon, you’ll want to hit the gym.
When it comes to caffeine and alcohol — every woman metabolizes it differently. Even your menstrual cycle can play a part in how you react to caffeine or alcohol. Since there is no definitive answer to how it affects your fertility, it’s best to cut down on your daily caffeine intake and avoid alcohol.
You read that right; you’ll want to get your daily dose of whole milk — so why not do it with ice cream! Now, this isn’t an open ticket to eat a tub a night, but if you replace one low-fat milk item a day with a full-fat ice cream treat, while still keeping your daily calorie intake in mind — you could be increasing your chances of fertility.
Stress is bad for your body mentally and physically. Meditation, yoga, and other relaxation techniques can help double your chances of conceiving. So if you are feeling overwhelmed, take 10-20 minutes twice a day to relax.
Women ovulate on an average of 14 days before their next period. The most fertile time is usually five days before ovulation or on day one of ovulation. Looking to be precise; many local drug stores sell over-the-counter ovulation kits that measure the surge of hormones you receive just before ovulation.
NuWave Medical, PLLC offers hormone testing and treatment options for females living in New York City, Nassau County, and Suffolk County, Long Island. Visit our website to learn more about female hormone replacement therapy, or give us a call at (631) 343-7144.
|
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of ResponseGraphUCB run on a 2x2 game."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
import matplotlib.pyplot as plt
import numpy as np
from open_spiel.python.algorithms import response_graph_ucb
from open_spiel.python.algorithms import response_graph_ucb_utils
def get_example_2x2_payoffs():
mean_payoffs = np.random.uniform(-1, 1, size=(2, 2, 2))
mean_payoffs[0, :, :] = np.asarray([[0.5, 0.85], [0.15, 0.5]])
mean_payoffs[1, :, :] = 1 - mean_payoffs[0, :, :]
return mean_payoffs
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
mean_payoffs = get_example_2x2_payoffs()
game = response_graph_ucb_utils.BernoulliGameSampler(
[2, 2], mean_payoffs, payoff_bounds=[-1., 1.])
game.p_max = mean_payoffs
game.means = mean_payoffs
print('Game means:\n', game.means)
exploration_strategy = 'uniform-exhaustive'
confidence_method = 'ucb-standard'
r_ucb = response_graph_ucb.ResponseGraphUCB(
game,
exploration_strategy=exploration_strategy,
confidence_method=confidence_method,
delta=0.1)
results = r_ucb.run()
# Plotting
print('Number of total samples: {}'.format(np.sum(r_ucb.count[0])))
r_ucb.visualise_2x2x2(real_values=game.means, graph=results['graph'])
r_ucb.visualise_count_history(figsize=(5, 3))
plt.gca().xaxis.label.set_fontsize(15)
plt.gca().yaxis.label.set_fontsize(15)
# Compare to ground truth graph
real_graph = r_ucb.construct_real_graph()
r_ucb.plot_graph(real_graph)
plt.show()
if __name__ == '__main__':
app.run(main)
|
Update: The Savage Principle is Next!
Author J.A. Remerski THE EDGE OF NEVER Giveaway!
Tamara Rose Rambles Happy New Year Vlog!
UNREQUITED DEATH "Live" on Barnes & Noble!
|
import sys
from PyQt4 import Qt
import PyQt4.Qwt5 as Qwt
import PyQt4.Qwt5.anynumpy as np
# Thermocouple application: channel 0 has the thermocouple
# connected to and channel 1 receives the temperture of the cold junction
# check out http://www.linux-usb-daq.co.uk/howto2/thermocouple/
class DAQThermo(Qt.QWidget):
def __init__(self, *args):
Qt.QWidget.__init__(self, *args)
self.thermo = Qwt.QwtThermo(self)
self.thermo.setOrientation(Qt.Qt.Vertical,Qwt.QwtThermo.LeftScale)
self.thermo.setFillColor(Qt.Qt.green)
label = Qt.QLabel("Temperature", self)
label.setAlignment(Qt.Qt.AlignCenter)
layout = Qt.QVBoxLayout(self)
layout.setMargin(0)
layout.addWidget(self.thermo)
layout.addWidget(label)
self.setFixedWidth(3*label.sizeHint().width())
# __init__()
def setValue(self, value):
self.thermo.setValue(value)
# setValue()
def setRange(self,mi,ma):
self.thermo.setRange(mi,ma)
# this is taken from the QWT demos and slightly modified
# to get this scrolling plot
class ScrollingPlot(Qwt.QwtPlot):
def __init__(self, *args):
Qwt.QwtPlot.__init__(self, *args)
def initPlotwindow(self,y,samplingrate):
self.samplingrate = samplingrate;
# set axis titles
self.setAxisTitle(Qwt.QwtPlot.xBottom, 't/sec -->')
self.setAxisTitle(Qwt.QwtPlot.yLeft, 'temperature/C -->')
# insert a few curves
self.cData = Qwt.QwtPlotCurve('y = temperature')
self.cData.setPen(Qt.QPen(Qt.Qt.red))
self.cData.attach(self)
# make a Numeric array for the horizontal data
self.x = np.arange(0.0, 500, 1)
self.x = self.x / samplingrate;
# sneaky way of creating an array of just zeroes
self.y = self.x * 0 + y
# initialize the data
self.cData.setData(self.x,self.y)
# insert a horizontal marker at y = 0
mY = Qwt.QwtPlotMarker()
mY.setLineStyle(Qwt.QwtPlotMarker.HLine)
mY.setYValue(0.0)
mY.attach(self)
# replot
self.replot()
# __init__()
def new_data(self,d):
# shift the data to create a scrolling dataplotx
self.y = np.concatenate( ([d], self.y[0:-1] ) )
self.cData.setData(self.x,self.y)
self.replot()
# class Plot
# calculate the temperature
def calcTemperature(voltageTheormocouple,temperatureLM35):
# gain of the instrumentation amplifier INA126
GAIN_INSTR_AMP=((5+80/0.456))
# zero offset of the instrumentation amplifier
ZERO_INSTR_AMP=(-0.05365)
return (((voltageTheormocouple-ZERO_INSTR_AMP)/GAIN_INSTR_AMP)/39E-6) + temperatureLM35
def makePlot(samplingrate):
scrollplot = ScrollingPlot()
scrollplot.initPlotwindow(0,samplingrate)
scrollplot.resize(500, 300)
scrollplot.show()
return scrollplot
def makeThermo():
thermo = DAQThermo()
thermo.resize(100,400)
thermo.setRange(-20,300)
thermo.show()
return thermo
#########################################################
# functions called by comedi2py
# called once with the samplingrate in Hz
def comedistart(samplingrate,minValue,maxValue):
global scrollplot
global thermo
scrollplot = makePlot(samplingrate)
thermo = makeThermo()
# called every sample
def comedidata(a):
global scrollplot
global thermo
voltage_thermo = a[0]
temperature_lm35 = a[1] / 10E-3
temperature = calcTemperature(voltage_thermo,temperature_lm35)
scrollplot.new_data(temperature);
thermo.setValue(temperature);
# called at the end
def comedistop():
print "\n"
|
March 2, 2018 — Mississippi state Sen. Chris McDaniel (R-Ellisville) announced at a rally this week that he will challenge Sen. Roger Wicker (R) in the June 5 Republican primary. McDaniel’s declaration, which had been speculated upon for months, came just before the state’s candidate filing deadline, which was yesterday.
In 2014, McDaniel came within an eyelash of denying Sen. Thad Cochran (R) re-nomination, as the incumbent was saved ironically through a reported deal made with African American leaders to deliver black votes for the senator in the Republican run-off.
In his original primary against Sen. Cochran, McDaniel actually placed first, but was denied winning the party nomination because he finished 1,719 votes away from attracting majority support. This forced the secondary run-off election. The presence of a third candidate in that primary race, the little-known Thomas Carey, who received 4,854 votes, created the dynamic for the run-off. Had Carey not been a candidate, McDaniel would have successfully won the GOP nomination, and would very likely be serving in the Senate today.
But a race against Sen. Wicker will be much different. Though McDaniel did very well in his challenge to Sen. Cochran, he still failed to win. Therefore, some of the luster his grassroots supporters had for him as a candidate may have faded at least to a degree.
Oct. 12, 2017 — Missouri Attorney General Josh Hawley (R) officially announced his long-awaited challenge to Sen. Claire McCaskill (D) on Tuesday this week. The move had been expected since even before he formed a senatorial exploratory committee at the beginning of August. Hawley then found himself encouraged to run for the Senate literally from the first few days after his election as the state’s AG in November.
Saying Sen. McCaskill has turned her back on many key Show Me State constituencies and industries, that she has been in Washington “forever”, and simply “doesn’t represent Missouri” anymore, Hawley declared his new US Senate candidacy via campaign video featuring he, his wife, and two young sons (above).
Already, a McCaskill-Hawley general election race is being viewed as the Republicans’ top conversion opportunity. Though Hawley must get past several lesser GOP primary candidates, including state Rep. Paul Curtman (R-Pacific/Franklin County), he is the prohibitive favorite to become the party nominee next August.
|
# -*- encoding:utf-8 -*-
from datetime import datetime
import socket
import re
from twython import Twython
import os
import json
home = os.path.expanduser("~")
twitter_conf_file = os.path.join(home, '.ashioto', 'twitter.json')
tc = json.load(open(twitter_conf_file))
CONSUMER_KEY = tc["CONSUMER_KEY"]
CONSUMER_SECRET = tc["CONSUMER_SECRET"]
ACCESS_TOKEN = tc["ACCESS_TOKEN"]
ACCESS_TOKEN_SECRET = tc["ACCESS_TOKEN_SECRET"]
class NameServer(object):
def __init__(self, host="localhost", port=8000, buffer_size=8192, timeout=1):
self.host = host
self.port = port
self.buffer_size = buffer_size
self.timeout = timeout
self.conn = None
self.connected = False
self.twitter = Twython(app_key=CONSUMER_KEY,
app_secret=CONSUMER_SECRET,
oauth_token=ACCESS_TOKEN,
oauth_token_secret=ACCESS_TOKEN_SECRET)
def response_ok(self):
self.conn.send('HTTP/1.0 200 OK\r\n\r\n')
def tweet(self, name, title):
songinfo = '♪ "{}" ({})'.format(title, name)
print songinfo
self.twitter.update_status(status=songinfo)
def run(self):
cue = SongCue(callback=self.tweet)
artist_title_re = re.compile("ARTIST=(.*)TITLE=(.*)vorbis")
print "NameServer start at {}:{}".format(self.host, self.port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((self.host, self.port))
s.listen(1)
conn, addr = s.accept()
print 'Connected by', addr
self.conn = conn
while 1:
data = conn.recv(8192)
if not data: break
if not self.connected:
self.response_ok()
self.connected = True
at = artist_title_re.search(data)
if at:
name = at.group(1)
title = at.group(2)
cue.add(name, title)
cue.noop()
self.conn.close()
print "NameServer stop"
class SongCue(object):
def __init__(self, bytime=60, callback=None):
self.bytime = bytime
self.callback = callback
self.new = {}
def add(self, name, title):
self.new["title"] = title
self.new["name"] = name
self.new["time"] = datetime.now()
def noop(self):
if "time" in self.new:
dur = datetime.now() - self.new["time"]
if dur.seconds > self.bytime:
self.fire()
def fire(self):
self.callback(self.new["name"], self.new["title"])
self.new = {}
if __name__ == '__main__':
NameServer().run()
|
Why do tourists book private transfers from Novo Mesto?
You can pre-order inexpensive taxi transfer to the city center or to the airport, comfortable car transfer for the family with children, Economy minivan for a tourist group or a Luxury transfer. The transfer cost from Novo Mesto is fixed at the time of pre-order and never rises : either when the driver is waiting for the passenger, when the stop is necessary, or because of traffic jams.
As Novo Mesto — is an important transport hub in Slovenia, there are taxis, shuttles, buses and trains.
Minimum 10 minutes faster, because the Kiwitaxi driver knows your arrival time in advance and will be waiting for you at Novo Mesto, at the appointed place.
Minimum 10 minutes faster: there are several official taxi services at Novo Mesto, but there are often queues to get into a taxi, as the airport staff is distributing the passengers into cars or the tourists are choosing a car too long. Outside the airport you can find owner drivers, catching passengers, but it’s crucial to find the most trustworthy one — and this takes time.
1,5–2,5 times faster. Given the public transport waiting time and transport changes, it is at average 1,75 times longer to get from Novo Mesto to the hotel within the city boundaries or to the transport hub.
Peaceful and comfortable. Kiwitaxi driver will be waiting for you at the given address in Novo Mesto and will help you with the luggage. During the trip you can always ask to pull up, stop by a cafe or a shop. If necessary, the driver can help you check into a hotel, acting as an interpreter at the reception - just ask him for it.
Full service You will not get lost in directions in Novo Mesto, as the driver will meet you and lead you to the car. He will bring you to the designated place, so you won’t have to get there on foot from the public transport stop.
|
# Copyright 2013 IBM Corp.
import json
import socket
import urllib2
import urlparse
def is_ipv4_address(ip_or_host):
"""Determines if a netloc is an IPv4 address.
:param ip_or_host: the host/ip to check
"""
try:
socket.inet_aton(ip_or_host)
return True
except:
return False
def hostname_url(url):
"""Converts the URL into its FQHN form.
This requires DNS to be setup on the OS or the hosts table
to be updated.
:param url: the url to convert to FQHN form
"""
frags = urlparse.urlsplit(url)
if is_ipv4_address(frags.hostname) is True:
return url
try:
fqhn, alist, ip = socket.gethostbyaddr(frags.hostname)
except:
# likely no DNS configured, return inital url
return url
port_str = ''
if frags.port is not None:
port_str = ':' + str(frags.port)
return frags.scheme + '://' + fqhn + port_str + frags.path
def extract_url_segment(url, needles):
"""searches the url segments for the 1st occurence
of an element in the list of search keys.
:param url: the url or uri to search
:param needles: the keys to search for
"""
for seg in reversed(url.split('/')):
if seg in needles:
return seg
return None
class JSONRESTClient(object):
"""a simple json rest client
"""
def __init__(self, token):
self.token = token
def get(self, url):
"""perform a http GET on the url
:param url: the url to GET
"""
return self._rest_call(url)
def post(self, url, json_body):
"""perform a http POST on the url
:param url: the url to POST
:param json_body: the body to POST
"""
return self._rest_call(url, 'POST', json_body)
def put(self, url, json_body):
"""perform a http PUT on the url
:param url: the url to PUT
:param json_body: the body to PUT
"""
return self._rest_call(url, 'PUT', json_body)
def delete(self, url):
"""perform an http DELETE on the url
:param url: the url to DELETE
"""
return self._rest_call(url, 'DELETE')
def _rest_call(self, url, method='GET', json_body=None):
request = urllib2.Request(url)
request.add_header('Content-Type', 'application/json;charset=utf8')
request.add_header('Accept', 'application/json')
request.add_header('User-Agent', 'python-client')
if self.token:
request.add_header('X-Auth-Token', self.token)
if json_body:
request.add_data(json.dumps(json_body))
request.get_method = lambda: method
try:
response = urllib2.urlopen(request)
except urllib2.HTTPError as e:
if e.code == 300:
return json.loads(e.read())
raise e
return json.loads(response.read())
|
Introduce yourself – who you are, where you sit in the business and what does the business do?
Simon Evans, Partner at Lewis Evans. We are a chartered accountancy firm based in West Kirby on the Wirral with many fantastic clients across the Liverpool City Region.
Where do you see Lewis Evans in 5 years time?
I’d like to see us continue to grow and expand organically with the same ethos we have now. A lot of our growth has been through word of mouth based on our levels of service in accountancy, tax, audit and other areas and we are extremely grateful for that and aim to continue to produce for our clients in a friendly and pro-active way. We expect to have opened a second office by then as well. One of our strengths is our people, with their knowledge, experience and approach.
How and why did the business start?
My wife, Rosie, and I always had an urge to do our own thing and to take our larger firm experience into something smaller and more client focused.
How long have you got?! I suppose to relax and enjoy it more and perhaps also to get a decent haircut!
I’ve never been one for ego driven big business leaders but I am often amazed by our clients and their entrepreneurial spirit or commitment in the face of adversity. We work with charities locally and nationally as well and there are plenty of role models to be found in that sector.
Because it’s the best place to live and work in the UK! I worked in London for many years and we have clients there which means I can visit and keep in touch which is great but I’m always more than happy to get the train home.
Sometimes our sector can be slow to adapt to change and as things are moving so quickly it is a challenging time for some accountants I think. The industry has always had a reputation for being boring so it would be great to see people’s perceptions change as well but I think that is probably unlikely!
|
import numpy as np
from optparse import OptionParser
import scipy.linalg as la
import scipy.linalg.blas as blas
import csv
import time
import fastlmm.util.VertexCut as vc
from pysnptools.snpreader.bed import Bed
import pysnptools.util as pstutil
import pysnptools.util.pheno as phenoUtils
np.set_printoptions(precision=3, linewidth=200)
def loadData(bfile, extractSim, phenoFile, missingPhenotype='-9', loadSNPs=False, standardize=True):
bed = Bed(bfile)
if (extractSim is not None):
f = open(extractSim)
csvReader = csv.reader(f)
extractSnpsSet = set([])
for l in csvReader: extractSnpsSet.add(l[0])
f.close()
keepSnpsInds = [i for i in xrange(bed.sid.shape[0]) if bed.sid[i] in extractSnpsSet]
bed = bed[:, keepSnpsInds]
phe = None
if (phenoFile is not None): bed, phe = loadPheno(bed, phenoFile, missingPhenotype)
if (loadSNPs):
bed = bed.read()
if (standardize): bed = bed.standardize()
return bed, phe
def loadPheno(bed, phenoFile, missingPhenotype='-9', keepDict=False):
pheno = phenoUtils.loadOnePhen(phenoFile, missing=missingPhenotype, vectorize=True)
checkIntersection(bed, pheno, 'phenotypes')
bed, pheno = pstutil.intersect_apply([bed, pheno])
if (not keepDict): pheno = pheno['vals']
return bed, pheno
def checkIntersection(bed, fileDict, fileStr, checkSuperSet=False):
bedSet = set((b[0], b[1]) for b in bed.iid)
fileSet = set((b[0], b[1]) for b in fileDict['iid'])
if checkSuperSet:
if (not fileSet.issuperset(bedSet)): raise Exception(fileStr + " file does not include all individuals in the bfile")
intersectSet = bedSet.intersection(fileSet)
if (len(intersectSet) != len (bedSet)):
print len(intersectSet), 'individuals appear in both the plink file and the', fileStr, 'file'
def symmetrize(a):
return a + a.T - np.diag(a.diagonal())
def loadRelatedFile(bed, relFile):
relatedDict = phenoUtils.loadOnePhen(relFile, vectorize=True)
checkIntersection(bed, relatedDict, 'relatedness', checkSuperSet=True)
_, relatedDict = pstutil.intersect_apply([bed, relatedDict])
related = relatedDict['vals']
keepArr = (related < 0.5)
print np.sum(~keepArr), 'individuals will be removed due to high relatedness'
return keepArr
def findRelated(bed, cutoff):
print 'Computing kinship matrix...'
t0 = time.time()
XXT = symmetrize(blas.dsyrk(1.0, bed.val, lower=1) / bed.val.shape[1])
print 'Done in %0.2f'%(time.time()-t0), 'seconds'
#Find related individuals
removeSet = set(np.sort(vc.VertexCut().work(XXT, cutoff))) #These are the indexes of the IIDs to remove
print 'Marking', len(removeSet), 'individuals to be removed due to high relatedness'
#keepArr = np.array([(1 if iid in keepSet else 0) for iid in bed.iid], dtype=bool)
keepArr = np.ones(bed.iid.shape[0], dtype=bool)
for i in removeSet: keepArr[i] = False
return keepArr
def eigenDecompose(XXT):
t0 = time.time()
print 'Computing eigendecomposition...'
s,U = la.eigh(XXT)
if (np.min(s) < -1e-4): raise Exception('Negative eigenvalues found')
s[s<0]=0
ind = np.argsort(s)
ind = ind[s>1e-12]
U = U[:, ind]
s = s[ind]
print 'Done in %0.2f'%(time.time()-t0), 'seconds'
return s,U
def loadCovars(bed, covarFile):
covarsDict = phenoUtils.loadOnePhen(covarFile, vectorize=False)
checkIntersection(bed, covarsDict, 'covariates', checkSuperSet=True)
_, covarsDict = pstutil.intersect_apply([bed, covarsDict])
covar = covarsDict['vals']
covar -= np.mean(covar, axis=0)
covar /= np.std(covar, axis=0)
return covar
def getSNPCovarsMatrix(bed, resfile, pthresh, mindist):
snpNameToNumDict = dict([])
for i,s in enumerate(bed.sid): snpNameToNumDict[s] = i
f = open(resfile)
csvReader = csv.reader(f, delimiter="\t")
csvReader.next()
significantSNPs = []
significantSNPNames = []
lastPval = 0
featuresPosList = []
for l in csvReader:
snpName, pVal = l[0], float(l[4])
if (pVal < lastPval): raise Exception('P-values are not sorted in descending order: ' + str(pVal) + ">" + str(lastPval))
lastPval = pVal
if (pVal > pthresh): break
if (snpName not in snpNameToNumDict): continue
significantSNPNames.append(snpName)
if (mindist == 0):
significantSNPs.append(snpNameToNumDict[snpName])
print 'Using SNP', snpName, 'with p<%0.2e'%pVal, 'as a fixed effect'
else:
posArr = bed.pos[snpNameToNumDict[snpName]]
chrom, pos = posArr[0], int(posArr[2])
addSNP = True
for (c,p) in featuresPosList:
if (chrom == c and abs(pos-p) < mindist):
addSNP = False
break
if addSNP:
significantSNPs.append(snpNameToNumDict[snpName])
featuresPosList.append((chrom, pos))
print 'Using SNP', snpName, '('+str(int(chrom))+':'+str(pos)+') with p<%0.2e'%pVal, 'as a fixed effect'
f.close()
snpCovarsMat = bed.val[:, significantSNPs]
return snpCovarsMat
def getExcludedChromosome(bfile, chrom):
bed = Bed(bfile)
indsToKeep = (bed.pos[:,0] != chrom)
bed = bed[:, indsToKeep]
return bed.read().standardize()
def getChromosome(bfile, chrom):
bed = Bed(bfile)
indsToKeep = (bed.pos[:,0] == chrom)
bed = bed[:, indsToKeep]
return bed.read().standardize()
def _fixupBedAndPheno(bed, pheno, missingPhenotype='-9'):
bed = _fixupBed(bed)
bed, pheno = _fixup_pheno(pheno, bed, missingPhenotype)
return bed, pheno
def _fixupBed(bed):
if isinstance(bed, str):
return Bed(bed).read().standardize()
else: return bed
def _fixup_pheno(pheno, bed=None, missingPhenotype='-9'):
if (isinstance(pheno, str)):
if (bed is not None):
bed, pheno = loadPheno(bed, pheno, missingPhenotype, keepDict=True)
return bed, pheno
else:
phenoDict = phenoUtils.loadOnePhen(pheno, missing=missingPhenotype, vectorize=True)
return phenoDict
else:
if (bed is not None): return bed, pheno
else: return pheno
|
, Get off the couch and get running with the OFFICIAL Couch to 5K® training app from Active.com! This oft-imitated program has helped thousands of new runners move from the couch to the finish line. Spend just 20 to 30 minutes, three times a week, for nine weeks, and you’ll be ready to finish your first 5K (3.1-mile) race!brbrWINNER of the 2012 Appy Award for best Healthcare & Fitness App!brbrFeaturesbr Training plan designed by Active.com trainersbr Choose from 4 different motivating virtual coaches Johnny Dead, Constance, Billie or Sergeant Blockbr Hear human audio cues to guide you through each workoutbr Listen to your favorite playlists with in-app music playerbr Calculate your distance & pace and map your routes with FREE GPS support*br Log your workouts and share your progress on Facebook br Get support from the largest running community on Active.com Trainerbr Repeat workouts and track your best performancebr Treadmill support allows manual entry of workoutsbr Track your progress with total distance and average pacebr Graphs for workouts to compare distance and pacebrbrFinished the Couch to 5K program and ready to take your running program to the next level Check out our 5K to 10K app to prep for your first 10K race here. https://itunes.apple.com/us/app/5k-to-10k/id526458735!brbrPraisebr"The popular Couch to 5K app helps new runners avoid injury from doing too much, too soon."br Runners World, June 2012brbr"It's ridiculously easy to use (it's hard not to, as you just do what you're told) and it's fantastic that you have an encouraging voice talking you through things."br Engadget, June 2012brbr"Active.com’s Couch to 5K is one of my favorite… apps. With its customizable features, interactivity, and well-rounded interface, I may actually stay off the couch this time."br 148Apps, October 2011brbr"I knew as soon as I decided to start the Couch to 5K program that I would need something to keep me accountable not just to showing up for my training, but to actually doing it properly. Of course there’s an app for that."br CalorieLab, April 2012brbr"If you have been struggling to get your buns off the couch and somewhat in shape before the summer hits, training for a 5K is not a bad way to start. If you are looking for a program to help get you there, then the “Couch to 5K” running plan by CoolRunning is probably your best bet if you haven’t done an ounce of athletic activity for a number of years."br Droid Life, April 2012brbrGet more information about the Couch to 5K app here: http://www.active.com/mobile/couch-to-5k-appbrbrFor support please contact us at: MobileSupport@activenetwork.com. We would love to hear from you.brbrNotesbr Once you purchase the app, it is yours to keep. It does not expire after 9 weeks.brbrTHIS APP AND ANY INFORMATION GIVEN BY IT OR THE ACTIVE NETWORK, LLC, ARE FOR INFORMATIONAL PURPOSES ONLY. THEY ARE NOT INTENDED NOR IMPLIED TO BE A SUBSTITUTE FOR PROFESSIONAL MEDICAL ADVICE. YOU SHOULD ALWAYS CONSULT YOUR HEALTHCARE PROVIDER BEFORE BEGINNING ANY FITNESS PROGRAM. IN PARTICULAR, THE ACTIVITIES AND EXERCISES DESCRIBED IN TRAINING PROGRAMS AND ARTICLES ON ACTIVE CAN BE DANGEROUS AND MAY RESULT IN INJURY OR DEATH. YOU MUST CONSULT WITH A LICENSED PHYSICIAN BEFORE PARTICIPATING IN ANY OF THE ACTIVITIES DESCRIBED IN THE APPLICATION.
Get off the couch and get running with the OFFICIAL Couch to 5K® training app from Active.com! This oft-imitated program has helped thousands of new runners move from the couch to the finish line. Spend just 20 to 30 minutes, three times a week, for nine weeks, and you’ll be ready to finish your first 5K (3.1-mile) race!
WINNER of the 2012 Appy Award for best Healthcare & Fitness App!
Finished the Couch to 5K program and ready to take your running program to the next level Check out our 5K to 10K app to prep for your first 10K race here. https://itunes.apple.com/us/app/5k-to-10k/id526458735!
"The popular Couch to 5K app helps new runners avoid injury from doing too much, too soon."
"It's ridiculously easy to use (it's hard not to, as you just do what you're told) and it's fantastic that you have an encouraging voice talking you through things."
"Active.com’s Couch to 5K is one of my favorite… apps. With its customizable features, interactivity, and well-rounded interface, I may actually stay off the couch this time."
"I knew as soon as I decided to start the Couch to 5K program that I would need something to keep me accountable not just to showing up for my training, but to actually doing it properly. Of course there’s an app for that."
"If you have been struggling to get your buns off the couch and somewhat in shape before the summer hits, training for a 5K is not a bad way to start. If you are looking for a program to help get you there, then the “Couch to 5K” running plan by CoolRunning is probably your best bet if you haven’t done an ounce of athletic activity for a number of years."
For support please contact us at: MobileSupport@activenetwork.com. We would love to hear from you.
Once you purchase the app, it is yours to keep. It does not expire after 9 weeks.
THIS APP AND ANY INFORMATION GIVEN BY IT OR THE ACTIVE NETWORK, LLC, ARE FOR INFORMATIONAL PURPOSES ONLY. THEY ARE NOT INTENDED NOR IMPLIED TO BE A SUBSTITUTE FOR PROFESSIONAL MEDICAL ADVICE. YOU SHOULD ALWAYS CONSULT YOUR HEALTHCARE PROVIDER BEFORE BEGINNING ANY FITNESS PROGRAM. IN PARTICULAR, THE ACTIVITIES AND EXERCISES DESCRIBED IN TRAINING PROGRAMS AND ARTICLES ON ACTIVE CAN BE DANGEROUS AND MAY RESULT IN INJURY OR DEATH. YOU MUST CONSULT WITH A LICENSED PHYSICIAN BEFORE PARTICIPATING IN ANY OF THE ACTIVITIES DESCRIBED IN THE APPLICATION.
|
from bespin.errors import BadDeployment, BadStack, BadOption
from bespin import helpers as hp
from input_algorithms.spec_base import NotSpecified
from input_algorithms.dictobj import dictobj
import requests
import fnmatch
import logging
log = logging.getLogger("bespin.option_spec.deployment")
class UrlChecker(dictobj):
fields = {
"expect": "The value we expect for a successful deployment"
, "endpoint": "The domain of the url to hit"
, "check_url": "The path of the url to hit"
, "timeout_after": "Stop waiting after this many seconds"
}
def wait(self, environment):
endpoint = self.endpoint().resolve()
while endpoint.endswith("/"):
endpoint = endpoint[:-1]
while endpoint.endswith("."):
endpoint = endpoint[:-1]
while self.check_url.startswith("/"):
self.check_url = self.check_url[1:]
url = endpoint + '/' + self.check_url
expected = self.expect.format(**environment)
log.info("Asking server for version till we match %s", expected)
for _ in hp.until(self.timeout_after, step=15):
log.info("Asking %s", url)
try:
res = requests.get(url)
result = res.text
status = res.status_code
except requests.exceptions.ConnectionError as error:
log.warning("Failed to ask server\terror=%s", error)
else:
log.info("\tgot back (%s) '%s'", status, result)
if fnmatch.fnmatch(result, expected):
log.info("Deployment successful!")
return
raise BadStack("Timedout waiting for the app to give back the correct version")
class SNSConfirmation(dictobj):
fields = {
"version_message": "The expected version that indicates successful deployment"
, "deployment_queue": "The sqs queue to check for messages"
, ("timeout", 300): "Stop waiting after this amount of time"
}
def wait(self, instances, environment, sqs):
version_message = self.version_message.format(**environment)
deployment_queue = self.deployment_queue.format(**environment)
failed = []
success = []
attempt = 0
log.info("Checking sqs for %s", version_message)
log.info("Checking for message for instances [%s]", ",".join(instances))
for _ in hp.until(timeout=self.timeout, step=5, action="Checking for valid deployment actions"):
messages = sqs.get_all_deployment_messages(deployment_queue)
# Look for success and failure in the messages
for message in messages:
log.info("Message received for instance %s with content [%s]", message.instance_id, message.output)
# Ignore the messages for instances outside this deployment
if message.instance_id in instances:
if fnmatch.fnmatch(message.output, version_message):
log.info("Deployed instance %s", message.instance_id)
success.append(message.instance_id)
else:
log.info("Failed to deploy instance %s", message.instance_id)
log.info("Failure Message: %s", message.output)
failed.append(message.instance_id)
# Stop trying if we have all the instances
if set(failed + success) == set(instances):
break
# Record the iteration of checking for a valid deployment
attempt += 1
log.info("Completed attempt %s of checking for a valid deployment state", attempt)
if success:
log.info("Succeeded to deploy %s", success)
if failed:
log.error("Failed to deploy %s", failed)
raise BadDeployment(failed=failed)
if not success and not failed:
log.error("Failed to receive any messages")
raise BadDeployment("Failed to receive any messages")
log.info("All instances have been confirmed to be deployed with version_message [%s]!", version_message)
class ConfirmDeployment(dictobj):
fields = {
"deploys_s3_path": "A list of s3 paths that we expect to be created as part of the deployment"
, "zero_instances_is_ok": "Don't do deployment confirmation if the scaling group has no instances"
, "auto_scaling_group_name": "The name of the auto scaling group that has the instances to be checked"
, "url_checker": "Check an endpoint on our instances for a particular version message"
, "sns_confirmation": "Check an sqs queue for messages our Running instances produced"
}
def instances(self, stack):
auto_scaling_group_name = self.auto_scaling_group_name
asg_physical_id = stack.cloudformation.map_logical_to_physical_resource_id(auto_scaling_group_name)
return stack.ec2.get_instances_in_asg_by_lifecycle_state(asg_physical_id, lifecycle_state="InService")
def confirm(self, stack, environment, start=None):
instances = []
if self.auto_scaling_group_name is not NotSpecified:
instances = self.instances(stack)
if len(instances) is 0:
if self.zero_instances_is_ok:
log.info("No instances to check, but config says that's ok!")
return
else:
raise BadDeployment("No instances are InService in the auto scaling group!", stack=stack.name, auto_scaling_group_name=self.auto_scaling_group_name)
else:
if any(item is not NotSpecified for item in (self.sns_confirmation, self.url_checker)):
raise BadOption("Auto_scaling_group_name must be specified if sns_confirmation or url_checker are specified")
for checker in (self.check_sns, self.check_url, self.check_deployed_s3_paths):
checker(stack, instances, environment, start)
def check_sns(self, stack, instances, environment, start=None):
if self.sns_confirmation is not NotSpecified:
self.sns_confirmation.wait(instances, environment, stack.sqs)
def check_url(self, stack, instances, environment, start=None):
if self.url_checker is not NotSpecified:
self.url_checker.wait(environment)
def check_deployed_s3_paths(self, stack, instances, environment, start=None):
if self.deploys_s3_path is not NotSpecified:
for path in self.deploys_s3_path:
stack.s3.wait_for(path.bucket.format(**environment), path.key.format(**environment), path.timeout, start=start)
|
Learn To Generate Online - Even A Jock Are Able To Do It.
If you're starting up a home based business there's every chance that you will need to require a website designed in so doing will should try to buy affiliate marketing website. After all, in today's world, any sort of business incorporates website to be a means of reaching to be able to their respective markets.
Deliveries / Driver: Businesses of a wide range (in bigger cities especially) need stuff delivered just about every. Print up some flyers, charge less, and away you go buy heroin online !
I must warm you though the net is packed with scams and take rich plots. For every legitimate opportunity usually are at least ten fraudulent scams. This very in order to fall on account of get rich quick schemes because we all love band is supposed to of not doing anything and getting large sums of money but wake way up! This is reality and actual no such thing for a free bike ride. Other people buy hydrocodone online will promise they can make you wealthy and the truth is nobody can provide you with rich or succeed any kind of area of life. You alone can accomplish this and want is some time, commitment and a little bit of patience.
I often have clients who are very disappointed by their particular insurance corporations. People usually hire me to assist them with their claims against other people's insurance makers. Dealing with a first-party concern is often secondary to cause case - but i am not saying they aren't in for a fight.
Now, most buy oxycodone online stores have new fashion clothing and accessories. Soon therefore be slipping on boots to help you buy mdma powder online warm, as well as cozy sweaters, jackets and shorts. Consider the colors of your clothes positive you can opt the most suitable moccasin boot pair. Black colored boots, for brown and black can be placed well the majority of of your dull winter clothes. The same as need to wear fashionable shoes, pay enough attention to his or her quality and ability to warm ft. Since the grounds will soon be slippery and brimming with snowflakes, concentrate on the boot's sole. It ought to offer good traction and light inches around your waist.
Car shows. Attend a classic car show like "Hot August Nights" in Reno, Nevada. You will thousands of car enthusiasts at the show, you'll be able to see variety of classic cars, and fantastic come towards show buyer and sell vehicles. Contend with the car of your dreams presented at the show. Many car shows like this include car auctions, too, so include even really a possibility to find the best car you r.
The most important step is pick one solid MLM opportunity, focus and work hard, and stick with it. Do your homework carefully and select a solid company and upline team that will support as well as help acquire started successfully. Build a strong and serious business that will stand the test of evening.
|
'''
@author: Frank
'''
from zstacklib.utils import plugin
from zstacklib.utils import log
from zstacklib.utils import http
from zstacklib.utils import jsonobject
from zstacklib.utils import shell
from zstacklib.utils import daemon
from zstacklib.utils import iptables
import os.path
import traceback
import pprint
import functools
class VRAgent(plugin.Plugin):
pass
class VirtualRouterError(Exception):
'''vritual router error'''
logger = log.get_logger(__name__)
class AgentResponse(object):
def __init__(self, success=True, error=None):
self.success = success
self.error = error if error else ''
class AgentCommand(object):
def __init__(self):
pass
class InitRsp(AgentResponse):
def __init__(self):
super(InitRsp, self).__init__()
class PingRsp(AgentResponse):
def __init__(self):
super(PingRsp, self).__init__()
self.uuid = None
def replyerror(func):
@functools.wraps(func)
def wrap(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
content = traceback.format_exc()
err = '%s\n%s\nargs:%s' % (str(e), content, pprint.pformat([args, kwargs]))
rsp = AgentResponse()
rsp.success = False
rsp.error = str(e)
logger.warn(err)
return jsonobject.dumps(rsp)
return wrap
class VirtualRouter(object):
http_server = http.HttpServer(port=7272)
http_server.logfile_path = log.get_logfile_path()
PLUGIN_PATH = "plugin_path"
INIT_PATH = "/init"
PING_PATH = "/ping"
def __init__(self, config={}):
self.config = config
plugin_path = self.config.get(self.PLUGIN_PATH, None)
if not plugin_path:
plugin_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'plugins')
self.plugin_path = plugin_path
self.plugin_rgty = plugin.PluginRegistry(self.plugin_path)
self.init_command = None
self.uuid = None
@replyerror
def init(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
self.init_command = cmd
self.uuid = cmd.uuid;
return jsonobject.dumps(InitRsp())
@replyerror
def ping(self ,req):
rsp = PingRsp()
rsp.uuid = self.uuid
return jsonobject.dumps(rsp)
def start(self, in_thread=True):
self.plugin_rgty.configure_plugins(self)
self.plugin_rgty.start_plugins()
self.http_server.register_async_uri(self.INIT_PATH, self.init)
self.http_server.register_async_uri(self.PING_PATH, self.ping)
if in_thread:
self.http_server.start_in_thread()
else:
self.http_server.start()
def stop(self):
self.plugin_rgty.stop_plugins()
self.http_server.stop()
class VirutalRouterDaemon(daemon.Daemon):
def __init__(self, pidfile):
super(VirutalRouterDaemon, self).__init__(pidfile)
def run(self):
self.agent = VirtualRouter()
self.agent.start(False)
|
Montrose Decorative Kitchen & Bath Showroom is proud to present this polished brass pvd finished shower only faucet with head, by Sigma. The 1.007542T.40 is made from premium materials, this Shower Only Faucet With Head offers great function and value for your home. This fixture is part of Sigma's decorative Collection, so make sure to check out other styles of fixtures to accessorize your room.
|
# MajorMajor - Collaborative Document Editing Library
# Copyright (C) 2013 Ritchie Wilson
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Some expanded deletion ranges overlapping other deletion ranges
"""
from majormajor.document import Document
from tests.test_utils import build_changesets_from_tuples
class TestExpandDeletionRange:
def test_expand_deletion_range(self):
doc = Document(snapshot='HjpRFtZXW5')
doc.HAS_EVENT_LOOP = False
css_data = [
('si', 7, 'OeI', ['root'], 'c3c'), # HjpRFtZ OeI XW5
('sd', 2, 5, ['c3c'], '950'), # delete pRFtZ
('si', 2, 'Qx', ['950'], 'bf0'), # Hj Qx OeIXW5
('sd', 2, 4, ['bf0'], '4c5'), # delete QxOe
('si', 6, 'U6', ['4c5'], '61a'), # HjIXW5 U6
('si', 3, 'AG', ['61a'], '1f0'), # HjI AG XW5U6
('si', 3, 'qwEg', ['1f0'], '393'), # HjI qwEg AGXW5U6
('si', 9, 'vsY', ['393'], '18d'), # HjIqwEgAG vsY XW5U6
('si', 0, 'MiNV', ['18d'], '688'), # MiNV HjIqwEgAGvsYXW5U6
('si', 20, 'L4n', ['688'], '796'), # MiNVHjIqwEgAGvsYXW5U L4n 6
('si', 5, '9l', ['796'], 'b29'), # MiNVH 9l jIqwEgAGvsYXW5UL4n6
('si', 1, 'k0Jf', ['b29'], 'e1a'),
# M k0Jf iNVH9ljIqwEgAGvsYXW5UL4n6
('si', 8, 'd', ['e1a'], 'a23'),
# Mk0JfiNV d H9ljIqwEgAGvsYXW5UL4n6
('sd', 3, 1, ['1f0'], '47a'), # delete A
('sd', 0, 3, ['47a'], 'cc0'), # delete HjI
('si', 4, 'K1DT', ['cc0'], 'd32'), # GXW5 K1DT U6
('si', 5, 'b3oS', ['d32'], '175'), # GXW5K b3oS 1DTU6
('si', 3, 'hm8z', ['175'], 'd28'), # GXW hm8z 5Kb3oS1DTU6
('sd', 0, 5, ['1f0'], '997'), # delete HjIAG
('si', 0, 'rBya', ['997'], '17a'), # rBya XW5U6
('sd', 7, 1, ['17a'], '592'), # delete U
('si', 8, 'cPu', ['592'], '893'), # rByaXW56 cPu
('si', 1, 'C72', ['d28', '893'], 'b20'),
# r C72 ByaXWhm8z5Kb3oS1DT6cPu
('sd', 37, 3, ['a23', 'b20'], '9e0'), # delete 6cP
]
self.css = build_changesets_from_tuples(css_data, doc)
get_cs = self.get_cs
for i in self.css[:13]:
doc.receive_changeset(i)
assert doc.get_snapshot() == 'Mk0JfiNVdH9ljIqwEgAGvsYXW5UL4n6'
for i in self.css[13:18]:
doc.receive_changeset(i)
assert doc.get_snapshot() == 'Mk0JfiNVdqwEgGvsYXWhm8z5Kb3oS1DTUL4n6'
cs = get_cs('997')
doc.receive_changeset(cs)
assert doc.get_snapshot() == 'Mk0JfiNVdvsYXWhm8z5Kb3oS1DTUL4n6'
cs = get_cs('17a')
doc.receive_changeset(cs)
assert doc.get_snapshot() == 'Mk0JfiNVdvsYrByaXWhm8z5Kb3oS1DTUL4n6'
cs = get_cs('592')
doc.receive_changeset(cs)
assert doc.get_snapshot() == 'Mk0JfiNVdvsYrByaXWhm8z5Kb3oS1DTL4n6'
cs = get_cs('893')
doc.receive_changeset(cs)
assert doc.get_snapshot() == 'Mk0JfiNVdvsYrByaXWhm8z5Kb3oS1DTL4n6cPu'
cs = get_cs('b20')
doc.receive_changeset(cs)
assert doc.get_snapshot() == \
'Mk0JfiNVdvsYrC72ByaXWhm8z5Kb3oS1DTL4n6cPu'
cs = get_cs('9e0')
doc.receive_changeset(cs)
assert doc.get_snapshot() == 'Mk0JfiNVdvsYrC72ByaXWhm8z5Kb3oS1DTL4nu'
def get_cs(self, _id):
for cs in self.css:
if cs.get_short_id() == _id:
return cs
raise Exception("wrong id, jerk", _id)
|
she lost the one of the top this time! Happy Sunday!
And this my friends is why we are so busy... BASEBALL!!!!!
My Little Devil Ray! ;) UP TO BAT!!!
Hope you are having a great week... and guess what????
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
import argparse
import os
from diceware_cli import subcommands
from diceware_cli.persistence import word_states
from os import path
from sys import argv
diceware_dir = path.dirname(path.abspath(__file__))
os.chdir(diceware_dir)
def get_args(cli_args):
parser = argparse.ArgumentParser(prog=path.basename(__file__),
allow_abbrev=False,
)
subparsers = parser.add_subparsers()
subparsers.required = True
subparsers.dest = 'command'
load_db_subparser = subparsers.add_parser('load-db',
help='Load words into the database',
allow_abbrev=False,
)
load_db_subparser.add_argument('-l',
'--language',
help='The language of the wordlist',
type=str,
required=True,
)
load_db_subparser.add_argument('-f',
'--file',
help='A file to load into the db. Use \'-\' for stdin.'
'Repeat this argument for multiple files.',
action='append',
dest='files',
type=argparse.FileType('r'),
required=True,
)
load_db_subparser.add_argument('-s',
'--state',
help='The initial state for the loaded words',
type=str,
default='pending',
choices=word_states,
)
load_db_subparser.add_argument('--allow-updates',
help='Allow words in the DB to have their state updated.'
'Default behavior is insert only.',
dest='allow_updates',
action='store_true',
)
load_db_subparser.set_defaults(func=subcommands.load_db)
clean_subparser = subparsers.add_parser('clean',
help='Clean the project',
allow_abbrev=False,
)
clean_subparser.set_defaults(func=subcommands.clean_project)
finalize_subparser = subparsers.add_parser('finalize',
help='Run checks and generate enumerated wordlists',
allow_abbrev=False,
)
finalize_subparser.set_defaults(func=subcommands.finalize)
select_words_subparser = subparsers.add_parser('select-words',
help='Iterate through the DB and select or reject words',
allow_abbrev=False,
)
select_words_subparser.add_argument('-l',
'--language',
help='The language of the wordlist',
type=str,
required=True,
)
select_words_subparser.add_argument('--include-skipped',
help='Re-evaluated words that were previously skipped',
dest='include_skipped',
action='store_true',
)
select_words_subparser.set_defaults(func=subcommands.select_words)
dump_db_subparser = subparsers.add_parser('dump-db',
help='Dump the contents of the sqlite db to disk',
allow_abbrev=False,
)
dump_db_subparser.set_defaults(func=subcommands.dump_sqlite)
db_state_subparser = subparsers.add_parser('db-state',
help='Get the state of the db',
allow_abbrev=False,
)
db_state_subparser.set_defaults(func=subcommands.db_state)
return parser.parse_args(cli_args)
if __name__ == '__main__':
try:
args = get_args(argv[1:])
args.func(args)
except KeyboardInterrupt:
print('') # for a pretty newline
exit(1)
|
Welcome to our home on the web! Let me introduce myself. My name is Meg Fox and I am happy to fill you in a little on our exciting venture here. The Fox and The Finch is a collaborative effort between myself and Abby Ogden. We are working hard to bring you a selection of vintage goods that you can rent for your special events. If words like Hoosier cabinet, card catalog, victorian fireside chair, or steamer trunk make you weak in the knees...then you've come to the right place. We have been weak in the knees for years and are happy to include you in our passion for all things vintage, antique and bygone.
Currently we are preparing to upload photos of our inventory to our website. Many thanks to our dear friends at Birds of a Feather for allowing us to use a photo from an amazing winter wedding they shot last year as a temporary photo for our site header. We know the Bride and Groom personally and let me tell you, their wedding was gorgeous. They have impeccable taste, because of course, they love vintage!
As we toil away at our website, we are also preparing to photograph our beautiful inventory in action. Next week we will be shooting a wedding and a birthday scene in an historic movie theater. I will be thrilled to share some of those photos with you when they are available. I am no cinemaphile, but there is something so dramatic and romantic about old movie theaters. Think of all the passionate kisses that have graced that screen! Or is it the heavy, red velvet curtain that makes us swoon? Either way, get ready to swoon over our photos, because the brilliant Alison Conklin will be taking them, and they will include the floral designs by Elysian Fields Specialty Florist.
|
import os
from sys import getsizeof
import models
from flask_wtf import Form
from wtforms import StringField, PasswordField, TextAreaField, BooleanField, FileField
from wtforms.validators import ValidationError, DataRequired, regexp, Email, EqualTo, Length
from flask_bcrypt import check_password_hash
if 'HEROKU' in os.environ:
AUTH_PASS = os.environ['auth_pass']
else:
AUTH_PASS = 'gjdfskghl'
def username_exists(form, field):
print(form)
try:
models.User.get(models.User.username ** field.data)
except models.DoesNotExist:
pass
else:
raise ValidationError('User with that username already exists')
def email_exists(form, field):
print(form)
try:
models.User.get(models.User.email ** field.data)
except models.DoesNotExist:
pass
else:
raise ValidationError('User with that email already exists')
def auth_matches(form, field):
print(form)
if 'HEROKU' in os.environ:
if check_password_hash(AUTH_PASS, field.data):
pass
else:
raise ValidationError('Special Password Incorrect')
def valid_image(form, field):
print(form)
if field.data:
ext = os.path.splitext(field.data.filename)[1].strip(".")
if ext in ['jpeg', 'jpg', 'png', 'psd', 'gif', 'bmp', 'exif', 'tif', 'tiff']:
file_u = field.data
if getsizeof(file_u) <= 3000000:
pass
else:
raise ValidationError('Avatar is bigger than 3 mb.')
else:
raise ValidationError('Avatar is not an image.')
else:
pass
class SignUpForm(Form):
username = StringField(
'Username',
validators=[
DataRequired(),
username_exists,
regexp(r'^[a-z0-9]{3,10}$',
message='Username can only be lowercase letters & numbers, '
'and length can only be 3-10 characters long')
]
)
email = StringField(
'Email',
validators=[
DataRequired(),
email_exists,
Email()
]
)
first_name = StringField(
'First Name',
validators=[
DataRequired(),
regexp(r'[A-Z][a-z]+', message='Name can only be uppercase first letter and lowercase proceeding letters')
]
)
last_name = StringField(
'Last Name',
validators=[
DataRequired(),
regexp(r'[A-Z][a-z]+', message='Name can only be uppercase first letter and lowercase proceeding letters')
]
)
password = PasswordField(
'Password',
validators=[
DataRequired(),
EqualTo('password2', message='Passwords must match'),
]
)
password2 = PasswordField(
'Confirm Password',
validators=[DataRequired()]
)
auth = PasswordField(
'Special Password',
validators=[
DataRequired(),
auth_matches
]
)
class SignInForm(Form):
name_email = StringField('Username or Email', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember = BooleanField('Remember Me')
class PostForm(Form):
content = TextAreaField('What do you have to say?', validators=[Length(1, 255)],
render_kw={'class': 'materialize-textarea',
'data-length': '255'})
image = FileField('Optional Image (Up to 3 MB)', validators=[valid_image])
|
Diamond Falls Estates is a gated community perfectly located in the Cartoogechaye area of Franklin. Under 20 minutes from Main Street, grocery shopping, restaurants, entertainment venues, and the Winding Stair Gap access to the Appalachian Trail, Lot 110 offers 1.67 /- acres not far from the clubhouse and pool on paved roads. Enjoy and entertain in your relaxing mountain setting with amenities including: a clubhouse w/ a great room, kitchen, billiard room, his & her saunas, workout room, a large outdoor pool, walking paths, and gorgeous common area w/ creek frontage. Underground utilities at the property, community water may be available. Walk this lot & envision your ideal primary residence or retreat. . Call agent for information on other lots in this community that may also be available for individual sale.
|
# GLONASS P code construction
#
# Copyright 2014 Peter Monta
import numpy as np
chip_rate = 5110000
code_length = 5110000
def glonass_p_shift(x):
return [x[24]^x[2]] + x[0:24]
def make_glonass_p():
n = code_length
x = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
y = np.zeros(n)
for i in range(n):
y[i] = x[9]
x = glonass_p_shift(x)
return y
c = make_glonass_p()
def p_code():
return c
def code(chips,frac,incr,n):
idx = (chips%code_length) + frac + incr*np.arange(n)
idx = np.floor(idx).astype('int')
idx = np.mod(idx,code_length)
x = c[idx]
return 1.0 - 2.0*x
try:
from numba import jit
except:
def jit(**kwargs):
return lambda x: x
@jit(nopython=True)
def correlate(x,chips,frac,incr,c):
n = len(x)
p = 0.0j
cp = (chips+frac)%code_length
for i in range(n):
p += x[i]*(1.0-2.0*c[int(cp)])
cp += incr
if cp>=code_length:
cp -= code_length
return p
#
# testing: print out a small sample of the code
#
if __name__=='__main__':
print(c[0:100])
|
We are looking for a skilled and motivated Team Manager to manage the new Adult Social Care Learning Disabilities and Autism team. The role will be 36 hours a week based in 1 of the following locations; Guildford, Woking, Leatherhead, Reigate. Job location can be discussed further at interview.
In this role as a Team Manager you will ensure that services are high quality and customer focused while taking responsibility for leading the team and managing resources. As the team manager for this specialist service you will ensure consistent practice. We are looking for someone who is enthusiastic and creative and with a can-do approach. A social work, occupational therapy or other relevant professional qualification together with significant knowledge and/or experience of health and social care are essential.
Your remit will include building effective relationships with individuals, their carers and families, as well as a range of care providers, district and borough councils, health partners and the voluntary sector. You will work alongside specialist commissioners. You will put measures in place for risk management and business continuity and contribute to service improvement initiatives.
You will have a sound understanding of and commitment to the personalisation agenda. You will bring substantial experience of managing the delivery and improvement of social care and will have applied knowledge of adult social care legislation. You will have a commitment to supporting carers and empowering people through direct payments. You will have strong experience and knowledge of safeguarding adults.
You will bring experience of the successful management of staff, in accordance with the Councils Equality and Diversity policy. You will manage performance and development while successfully addressing conduct, attendance and performance capability issues. You will be skilled and experienced in managing budgets.
|
#!/usr/bin/env python
# vim: sts=4 sw=4 et
# GladeVcp Widgets
#
# Copyright (c) 2010 Pavel Shramov <shramov@mexmat.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# 2014 Steffen Noack
# add property 'mouse_btn_mode'
# 0 = default: left rotate, middle move, right zoom
# 1 = left zoom, middle move, right rotate
# 2 = left move, middle rotate, right zoom
# 3 = left zoom, middle rotate, right move
# 4 = left move, middle zoom, right rotate
# 5 = left rotate, middle zoom, right move
import os
import gtk, gobject
import linuxcnc
import gremlin
import rs274.glcanon
import gcode
from hal_actions import _EMC_ActionBase
from hal_glib import GStat
class HAL_Gremlin(gremlin.Gremlin, _EMC_ActionBase):
__gtype_name__ = "HAL_Gremlin"
__gsignals__ = {
'line-clicked': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, (gobject.TYPE_INT,)),
'gcode_error': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, (gobject.TYPE_STRING,)),
}
__gproperties__ = {
'view' : ( gobject.TYPE_STRING, 'View type', 'Default view: p, x, y, y2, z, z2',
'p', gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'enable_dro' : ( gobject.TYPE_BOOLEAN, 'Enable DRO', 'Show DRO on graphics',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'metric_units' : ( gobject.TYPE_BOOLEAN, 'Use Metric Units', 'Show DRO in metric or imperial units',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'use_relative' : ( gobject.TYPE_BOOLEAN, 'Show Relative', 'Show DRO relative to active system or machine origin',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'use_commanded' : ( gobject.TYPE_BOOLEAN, 'Show Commanded', 'Show commanded or actual position',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_extents_option' : ( gobject.TYPE_BOOLEAN, 'Show Extents', 'Show machine extents',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_limits' : ( gobject.TYPE_BOOLEAN, 'Show limits', 'Show machine limits',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_live_plot' : ( gobject.TYPE_BOOLEAN, 'Show live plot', 'Show machine plot',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_velocity' : ( gobject.TYPE_BOOLEAN, 'Show tool speed', 'Show tool velocity',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_program' : ( gobject.TYPE_BOOLEAN, 'Show program', 'Show program',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_rapids' : ( gobject.TYPE_BOOLEAN, 'Show rapids', 'Show rapid moves',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_tool' : ( gobject.TYPE_BOOLEAN, 'Show tool', 'Show tool',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_dtg' : ( gobject.TYPE_BOOLEAN, 'Show DTG', 'Show Distance To Go',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'show_lathe_radius' : ( gobject.TYPE_BOOLEAN, 'Show Lathe Radius', 'Show X axis in Radius',
False, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'grid_size' : ( gobject.TYPE_FLOAT, 'Grid Size', 'Grid Size',
0, 100, 0, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'use_joints_mode' : ( gobject.TYPE_BOOLEAN, 'Use joints mode', 'Use joints mode',
False, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'use_default_controls' : ( gobject.TYPE_BOOLEAN, 'Use Default Mouse Controls', 'Use Default Mouse Controls',
True, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
'mouse_btn_mode' : ( gobject.TYPE_INT, 'Mouse Button Mode',
('Mousebutton assignment, l means left, m middle, r right \n'
'0 = default: l-rotate, m-move, r-zoom \n'
'1 = l-zoom, m-move, r-rotate\n'
'2 = l-move, m-rotate, r-zoom\n'
'3 = l-zoom, m-rotate, r-move\n'
'4 = l-move, m-zoom, r-rotate\n'
'5 = l-rotate, m-zoom, r-move\n'
'6 = l-move, m-zoom, r-zoom'),
0, 6, 0, gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT),
}
__gproperties = __gproperties__
def __init__(self, *a, **kw):
gobject.GObject.__init__(self)
inifile = os.environ.get('INI_FILE_NAME', '/dev/null')
inifile = linuxcnc.ini(inifile)
gremlin.Gremlin.__init__(self, inifile)
self._reload_filename = None
self.gstat = GStat()
self.gstat.connect('file-loaded', self.fileloaded)
self.gstat.connect('reload-display', self.reloadfile)
self.show()
def reloadfile(self,w):
try:
self.fileloaded(None,self._reload_filename)
except:
pass
def fileloaded(self,w,f):
self._reload_filename=f
try:
self._load(f)
except AttributeError,detail:
#AttributeError: 'NoneType' object has no attribute 'gl_end'
print 'hal_gremlin: continuing after',detail
def do_get_property(self, property):
name = property.name.replace('-', '_')
if name == 'view':
return self.current_view
elif name in self.__gproperties.keys():
return getattr(self, name)
else:
raise AttributeError('unknown property %s' % property.name)
def do_set_property(self, property, value):
name = property.name.replace('-', '_')
if name == 'view':
view = value.lower()
if self.lathe_option:
if view not in ['p','y','y2']:
return False
elif view not in ['p', 'x', 'y', 'z', 'z2']:
return False
self.current_view = view
if self.initialised:
self.set_current_view()
elif name == 'enable_dro':
self.enable_dro = value
elif name == 'metric_units':
self.metric_units = value
elif name in self.__gproperties.keys():
setattr(self, name, value)
else:
raise AttributeError('unknown property %s' % property.name)
self.queue_draw()
return True
# This overrides glcannon.py method so we can change the DRO
def dro_format(self,s,spd,dtg,limit,homed,positions,axisdtg,g5x_offset,g92_offset,tlo_offset):
if not self.enable_dro:
return limit, homed, [''], ['']
if self.metric_units:
format = "% 6s:% 9.3f"
if self.show_dtg:
droformat = " " + format + " DTG %1s:% 9.3f"
else:
droformat = " " + format
offsetformat = "% 5s %1s:% 9.3f G92 %1s:% 9.3f"
rotformat = "% 5s %1s:% 9.3f"
else:
format = "% 6s:% 9.4f"
if self.show_dtg:
droformat = " " + format + " DTG %1s:% 9.4f"
else:
droformat = " " + format
offsetformat = "% 5s %1s:% 9.4f G92 %1s:% 9.4f"
rotformat = "% 5s %1s:% 9.4f"
diaformat = " " + format
posstrs = []
droposstrs = []
for i in range(9):
a = "XYZABCUVW"[i]
if s.axis_mask & (1<<i):
posstrs.append(format % (a, positions[i]))
if self.show_dtg:
droposstrs.append(droformat % (a, positions[i], a, axisdtg[i]))
else:
droposstrs.append(droformat % (a, positions[i]))
droposstrs.append("")
for i in range(9):
index = s.g5x_index
if index<7:
label = "G5%d" % (index+3)
else:
label = "G59.%d" % (index-6)
a = "XYZABCUVW"[i]
if s.axis_mask & (1<<i):
droposstrs.append(offsetformat % (label, a, g5x_offset[i], a, g92_offset[i]))
droposstrs.append(rotformat % (label, 'R', s.rotation_xy))
droposstrs.append("")
for i in range(9):
a = "XYZABCUVW"[i]
if s.axis_mask & (1<<i):
droposstrs.append(rotformat % ("TLO", a, tlo_offset[i]))
# if its a lathe only show radius or diameter as per property
# we have to adjust the homing icon to line up:
if self.is_lathe():
if homed[0]:
homed.pop(0)
homed.pop(0)
homed.insert(0,1)
homed.insert(0,0)
posstrs[0] = ""
if self.show_lathe_radius:
posstrs.insert(1, format % ("Rad", positions[0]))
else:
posstrs.insert(1, format % ("Dia", positions[0]*2.0))
droposstrs[0] = ""
if self.show_dtg:
if self.show_lathe_radius:
droposstrs.insert(1, droformat % ("Rad", positions[0], "R", axisdtg[0]))
else:
droposstrs.insert(1, droformat % ("Dia", positions[0]*2.0, "D", axisdtg[0]*2.0))
else:
if self.show_lathe_radius:
droposstrs.insert(1, droformat % ("Rad", positions[0]))
else:
droposstrs.insert(1, diaformat % ("Dia", positions[0]*2.0))
if self.show_velocity:
posstrs.append(format % ("Vel", spd))
pos=0
for i in range(9):
if s.axis_mask & (1<<i): pos +=1
if self.is_lathe:
pos +=1
droposstrs.insert(pos, " " + format % ("Vel", spd))
if self.show_dtg:
posstrs.append(format % ("DTG", dtg))
return limit, homed, posstrs, droposstrs
# Override gremlin's / glcannon.py function so we can emit a GObject signal
def update_highlight_variable(self,line):
self.highlight_line = line
if line == None:
line = -1
self.emit('line-clicked', line)
def realize(self, widget):
gremlin.Gremlin.realize(self, widget)
@rs274.glcanon.with_context
def _load(self, filename):
return self.load(filename)
def report_gcode_error(self, result, seq, filename):
error_str = gcode.strerror(result)
errortext = "G-Code error in " + os.path.basename(filename) + "\n" + "Near line " \
+ str(seq) + " of\n" + filename + "\n" + error_str + "\n"
print(errortext)
self.emit("gcode-error", errortext)
|
In addition to my dehydrator, this lucky girl is also a proud owner of a juicer. I’m not yet ready to fully endorse the product. However, on it’s first run it made a pretty damn good juice that I usually pay $5 for.
The juicer is the Breville Multi-speed Juice Fountain and it’s a honker. It takes up a lot of counter space and it’s loud. But the pros outweigh the cons right now. It’s rather simple to clean-up (I put a veggie bag in the pulp catch), it’s easy to use and it made a great pulp free juice.
Toss in at 5 and drink.
My composting worms are going to be very happy too! Look at all this fresh mash.
I’m a lucky girl. For Christmas this year, H got me a dehydrator. I finally busted it out yesterday since I had a bunch of kale. You see, there’s this kale chip that I love but it’s $8 a bag! So, I took a look at the ingredients and made my own recipe. It is almost exactly the same taste as the $8 bag and mine cost $3 to make.
If you don’t have a dehydrator, you can definitely use your oven.
Mix everything except the kale in the blender (this is where I highly endorse the vitamix blender).
In a large bowl, add your kale and a couple tablespoons of the mixture. The mixture is quite thick so work in batches so you don’t have too much on one piece of kale and not enough on another.
Pop the coated chips into your dehydrator (125 degrees F) or on a cookie sheet and into the oven (lowest setting). Make sure it’s in a single layer.
A few hours later (6 in the dehydrator in my neck of the woods or 2 in the oven), you have a deliciously nutritious snack!!!
So go out, get some ground chuck and make this little loaf the feature on tomorrow’s dinner table.
Mix all the above ingredients except the extra catsup. Don’t mix too much though – just enough to incorporate all the ingredients.
Form the meat in a loaf pan and pat down the top a bit. Make an indentation lengthwise in the middle of the loaf. Squeeze catsup into the indentation and smear it along the top as well.
Place it in the oven covered with foil for about 45 mins. Uncover it and broil for a couple mins to crisp up the top a bit.
Ever since munchkin started eating solids, rice has been the starch option for the majority of our dinners. I make it at night for our dinner and then use the leftovers for her favorite breakfast: steamed rice and eggs (often with spinach or other greens in the eggs). (pro tip: steamed rice freezes perfectly. Put it in individual ziplocks and just pop it out of the bag when ready and microwave for 30 seconds.) Munchkin adores her rice and eggs for breakfast. But what she loves even more is breakfast fried rice. Who wouldn’t with all that soy sauce and bacon?
And now since NY Nana and Papa have arrived, we’ve eaten it at least 5 times.
The next time you have guests over for breakfast, skip the pancakes and throw on a pan of breakfast fried rice. They’ll love it!
Heat the oven to 375. Line a cookie sheet with parchment paper. (do not use the totally flat cookie sheet or all your bacon drippings will be in the bottom of your oven. You need a cookie sheet with at least a 1/2″ lip around it). Place the bacon on the parchment paper and bake until crispy (around 15 mins).
Carefully remove the bacon from the oven. That oil is going to be hottttt! And put the bacon on some paper towels. Then chop up about 4-5 slices into 1/4″ strips. Set aside. (the remaining bacon is for the cook to share).
Scramble the eggs and cook them in a bit of veggie oil (or bacon drippings if you’d like) until done in a large nonstick frying pan. Set aside.
Using the same pan, add the rest of the oil or 1 TBS of bacon drippings and fry the rice for 2 mins. Add in the soy sauce and stir until the rice is no longer white. You may want to add more. Add in the eggs, bacon, and scallions. Top with a bit of pepper. Be sure to taste. It should be salty goodness.
|
"""
================
XDAWN Denoising
================
XDAWN filters are trained from epochs, signal is projected in the sources
space and then projected back in the sensor space using only the first two
XDAWN components. The process is similar to an ICA, but is
supervised in order to maximize the signal to signal + noise ratio of the
evoked response.
WARNING: As this denoising method exploits the known events to
maximize SNR of the contrast between conditions it can lead to overfit.
To avoid a statistical analysis problem you should split epochs used
in fit with the ones used in apply method.
References
----------
[1] Rivet, B., Souloumiac, A., Attina, V., & Gibert, G. (2009). xDAWN
algorithm to enhance evoked potentials: application to brain-computer
interface. Biomedical Engineering, IEEE Transactions on, 56(8), 2035-2043.
[2] Rivet, B., Cecotti, H., Souloumiac, A., Maby, E., & Mattout, J. (2011,
August). Theoretical analysis of xDAWN algorithm: application to an
efficient sensor selection in a P300 BCI. In Signal Processing Conference,
2011 19th European (pp. 1382-1386). IEEE.
"""
# Authors: Alexandre Barachant <alexandre.barachant@gmail.com>
#
# License: BSD (3-clause)
from mne import (io, compute_raw_covariance, read_events, pick_types,
Epochs)
from mne.datasets import sample
from mne.preprocessing import Xdawn
from mne.viz import plot_epochs_image
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters and read data
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.1, 0.3
event_id = dict(vis_r=4)
# Setup for reading the raw data
raw = io.Raw(raw_fname, preload=True)
raw.filter(1, 20, method='iir') # replace baselining with high-pass
events = read_events(event_fname)
raw.info['bads'] = ['MEG 2443'] # set bad channels
picks = pick_types(raw.info, meg=True, eeg=False, stim=False, eog=False,
exclude='bads')
# Epoching
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=False,
picks=picks, baseline=None, preload=True,
add_eeg_ref=False, verbose=False)
# Plot image epoch before xdawn
plot_epochs_image(epochs['vis_r'], picks=[230], vmin=-500, vmax=500)
# Estimates signal covariance
signal_cov = compute_raw_covariance(raw, picks=picks)
# Xdawn instance
xd = Xdawn(n_components=2, signal_cov=signal_cov)
# Fit xdawn
xd.fit(epochs)
# Denoise epochs
epochs_denoised = xd.apply(epochs)
# Plot image epoch after xdawn
plot_epochs_image(epochs_denoised['vis_r'], picks=[230], vmin=-500, vmax=500)
|
If you are finding it difficult to narrow down your design choice on our website then don't worry - it's really easy to choose more than one design at Coolaz. You just have to let us know which ones you like the best and we'll do the rest. Gemma and Edward selected three of our designs for their bomboniere for their wedding just past this weekend (congratulations guys!) and asked us to match them all with their wedding colours.
|
# Copyright (C) 2013 Jolla Ltd.
# Contact: Islam Amer <islam.amer@jollamobile.com>
# All rights reserved.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to
# the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os
import re
from django.conf import settings
from django.contrib.auth.models import Group, User
from django.core.exceptions import ValidationError
from django.db import models
from django.utils import timezone
from webhook_launcher.app.boss import launch_notify, launch_build
from webhook_launcher.app.misc import get_or_none, giturlparse
# FIXME: All null=True + blank=True text fields
# Unless it is intentional that text field can be set to either NULL or ''
# (emtpy string), then it is recommended not to use null=True, to avoid
# situatioon where the field has two possible values for empty. As that can
# problematic for example in lookups where NULL and '' behave differently
class BuildService(models.Model):
namespace = models.CharField(
max_length=50,
unique=True,
help_text="This is also used to identify the OBS alias "
"in BOSS processes",
)
apiurl = models.CharField(
max_length=250,
unique=True,
)
weburl = models.CharField(
max_length=250,
unique=True,
)
def __unicode__(self):
return self.weburl
class VCSService(models.Model):
name = models.CharField(
max_length=50,
unique=True,
help_text="Friendly name of this VCS hosting service",
)
netloc = models.CharField(
max_length=200,
unique=True,
help_text="Network location from payload "
"(for example: git@git.merproject.org:1234)",
)
ips = models.TextField(
blank=True,
null=True,
help_text="Known IP adresses of this service (optional)",
)
def __unicode__(self):
return self.netloc
class VCSNameSpace(models.Model):
service = models.ForeignKey(
VCSService,
help_text="VCS service where this namespace is hosted",
)
path = models.CharField(
max_length=200,
help_text="the network path "
"(gitlab group or github organization eg. /mer-core)",
)
default_project = models.ForeignKey(
"Project",
blank=True,
null=True,
help_text="Default project for webhook placeholder creation",
)
def __unicode__(self):
return "%s%s" % (self.service, self.path)
@staticmethod
def find(repourl):
url = giturlparse(repourl)
return get_or_none(
VCSNameSpace,
service__netloc=url.netloc,
path=os.path.dirname(url.path)
)
class Project(models.Model):
name = models.CharField(
max_length=250,
help_text="The OBS project name. eg nemo:mw",
)
obs = models.ForeignKey(
BuildService,
)
official = models.BooleanField(
default=True,
help_text="If set then only valid namespaces can be used for the "
"git repo",
)
allowed = models.BooleanField(
default=True,
help_text="If not set then webhooks are not allowed for this project. "
"This is useful for projects which should only have "
"specific versions of packages promoted to them.",
)
gated = models.BooleanField(
default=False,
help_text="If set then webhooks pointing at this project will be "
"triggered to a side project instead and then "
"an autopromotion attempted. This is useful for projects "
"which apply formal entry checks and/or QA.",
)
groups = models.ManyToManyField(
Group,
blank=True,
)
vcsnamespaces = models.ManyToManyField(
VCSNameSpace,
blank=True,
)
match = models.CharField(
max_length=250,
blank=True,
null=True,
help_text="If set then used as well as name to re.match() "
"project names",
)
class Meta:
unique_together = (("name", "obs"),)
def __unicode__(self):
return "%s on %s" % (self.name, self.obs)
def is_repourl_allowed(self, repourl):
repourl = giturlparse(repourl)
netloc = repourl.netloc
path = repourl.path.rsplit("/", 1)[1]
if self.vcsnamespaces.count():
return self.vcsnamespaces.filter(
path=path,
service__netloc=netloc,
).count()
else:
return True
def is_user_allowed(self, user):
user_groups = set(user.groups.all())
groups = set(self.groups.all())
if groups and (user_groups & groups):
return True
else:
return False
def matches(self, proj_name):
# TODO Update if/when
# https://pypi.python.org/pypi/django-regex-field/0.1.4 is used
if proj_name == self.name:
return True
if self.match:
# this is optimised to a cache in regex-field
reg = re.compile(self.match)
if reg.match(proj_name):
return True
return False
class WebHookMapping(models.Model):
# If any fields are added/removed then ensure they are handled
# correctly in to_fields and the webhook_diff.py
repourl = models.CharField(
max_length=200,
help_text="url of git repo to clone from. Should be a remote http[s]",
)
branch = models.CharField(
max_length=100,
default="master",
help_text="name of branch to use. If not specified default branch "
"(or currently checked out one) will be used",
)
project = models.CharField(
max_length=250,
default=settings.DEFAULT_PROJECT,
help_text="name of an existing project under which to create "
"or update the package",
)
package = models.CharField(
max_length=250,
help_text="name of the package to create or update in OBS",
)
token = models.CharField(
max_length=100,
default="",
null=True,
blank=True,
help_text="a token that should exist in tag names and "
"changelog entry headers to enable handling them",
)
debian = models.CharField(
max_length=2,
default="",
null=True,
blank=True,
choices=(
('N', 'N'),
('Y', 'Y'),
),
help_text="Choose Y to turn on debian packaging support",
)
dumb = models.CharField(
max_length=2,
default="",
null=True,
blank=True,
choices=(
('N', 'N'),
('Y', 'Y'),
),
help_text="Choose Y to take content of revision as-is without "
"automatic processing (example: tarballs in git)",
)
notify = models.BooleanField(
default=True,
help_text="Enable IRC notifications of events",
)
build = models.BooleanField(
default=True,
help_text="Enable OBS build triggering",
)
comment = models.TextField(
blank=True,
null=True,
default="",
)
user = models.ForeignKey(
User,
editable=False,
)
obs = models.ForeignKey(
BuildService,
)
class Meta:
unique_together = (("project", "package", "obs"),)
def __unicode__(self):
return "%s/%s -> %s/%s" % (
self.repourl, self.branch, self.project, self.package
)
@property
def tag(self):
lsr = self.lsr
if lsr:
return lsr.tag
@property
def revision(self):
lsr = self.lsr
if lsr:
return lsr.revision
@property
def lsr(self):
# TODO: refactor the WebHookMapping and LastSeenRevision relation
if not hasattr(self, '_lsr'):
if self.pk:
self._lsr, _ = LastSeenRevision.objects.get_or_create(
mapping=self
)
else:
return None
return self._lsr
@property
def mapped(self):
return self.project and self.package
@property
def rev_or_head(self):
return self.revision or self.branch
@property
def project_disabled(self):
# Just search all Projects for a match
for project in Project.objects.all():
if project.matches(self.project):
print "Project disable check: %s matches rules in %s" % (
self.project, project.name
)
if project and not project.allowed:
# Disabled if Project is marked not-allowed
return True
if project and project.official:
# Disabled if Project is official and namespace is not
# valid
repourl = giturlparse(self.repourl)
service = get_or_none(
VCSService,
netloc=repourl.netloc,
)
if not service:
return True
namespace = get_or_none(
VCSNameSpace,
service=service,
path=os.path.dirname(repourl.path),
)
if not namespace:
return True
return False
def clean(self, exclude=None):
self.repourl = self.repourl.strip()
self.branch = self.branch.strip()
self.project = self.project.strip()
self.package = self.package.strip()
if WebHookMapping.objects.exclude(pk=self.pk).filter(
project=self.project,
package=self.package,
obs=self.obs
).count():
raise ValidationError(
'A mapping object with the same parameters already exists'
)
repourl = giturlparse(self.repourl)
service = get_or_none(VCSService, netloc=repourl.netloc)
if settings.SERVICE_WHITELIST and service is None:
raise ValidationError(
'%s is not an allowed service' % repourl.netloc
)
project = get_or_none(Project, name=self.project)
if project and not project.allowed:
raise ValidationError(
'Project %s does not allow mappings' % project
)
if project and project.official:
namespace = get_or_none(
VCSNameSpace,
service=service,
path=os.path.dirname(repourl.path),
)
if not service or not namespace:
raise ValidationError(
'Official project %s allows mapping from known service '
'namespaces only' % project
)
if settings.STRICT_MAPPINGS:
if project and not project.is_repourl_allowed(self.repourl):
raise ValidationError(
"Webhook mapping repourl is not allowed by %s's "
"strict rules" % project
)
if project and not project.is_user_allowed(self.user):
raise ValidationError(
"Webhook mapping to %s not allowed for %s" %
(project, self.user)
)
if (
not self.project.startswith("home:%s" % self.user.username) and
not self.user.is_superuser
):
raise ValidationError(
"Webhook mapping to %s not allowed for %s" %
(project, self.user)
)
def trigger_build(self, user=None, tag=None, force=False):
if not self.pk:
raise RuntimeError(
"trigger_build() on unsaved WebHookMapping"
)
# Only fire for projects which allow webhooks. We can't just
# rely on validation since a Project may forbid hooks after
# the hook was created
if self.project_disabled:
print "Project has build disabled"
return
handled = self.lsr.handled and self.lsr.tag == tag and not force
if handled:
print "build already handled, skipping"
build = self.build and self.mapped and not handled
qp = None
if user is None:
user = self.user.username
if build:
if tag:
self.lsr.tag = tag
# Find possible queue period objects
qps = QueuePeriod.objects.filter(
projects__name=self.project,
projects__obs=self.obs,
)
for qp in qps:
if qp.delay() and not qp.override(webuser=user):
print "Build trigger for %s delayed by %s" % (self, qp)
print qp.comment
build = False
break
else:
qp = None
message = self._get_build_message(user, force, handled, qp)
fields = self.to_fields()
fields['msg'] = message
if self.notify:
launch_notify(fields)
if build:
fields = self.to_fields()
launch_build(fields)
self.lsr.handled = True
self.lsr.save()
return message
def _get_build_message(self, user, force=None, handled=False, qp=None):
parts = []
if force:
parts.append("Forced build trigger:")
if self.tag:
parts.append("Tag %s" % self.tag)
else:
parts.append(self.revision)
parts.append(
"by %s in %s branch of %s" % (
user, self.branch, self.repourl,
)
)
if not self.mapped:
parts.append("- which is not mapped yet. Please map it.")
elif self.build:
parts.append(
"- which will trigger build in project %s package "
"%s (%s/package/show/%s/%s)" % (
self.project, self.package, self.obs.weburl,
self.package, self.project,
)
)
elif handled:
parts.append("- which was already handled; skipping")
elif qp:
parts.append("- which will be delayed by %s" % qp)
if qp.comment:
parts.append("(%s)" % qp.comment)
return " ".join(parts)
def handle_commit(self, user=None, notify=None):
if not self.pk:
raise RuntimeError(
"handle_commit() on unsaved WebHookMapping"
)
if user is None:
user = self.user.username
if notify is None:
notify = self.notify
self.lsr.tag = ""
self.lsr.handled = False
self.lsr.save()
if not notify:
return
message = "Commit(s) pushed by %s to %s branch of %s" % (
user, self.branch, self.repourl
)
if not self.mapped:
message = "%s, which is not mapped yet. Please map it." % message
fields = self.to_fields()
fields['msg'] = message
print message
launch_notify(fields)
def to_fields(self):
fields = {}
fields['repourl'] = self.repourl
fields['branch'] = self.branch
fields['pk'] = self.pk
if self.project:
fields['project'] = self.project
fields['package'] = self.package
fields['ev'] = {
'namespace': self.obs.namespace
}
if self.token:
fields['token'] = self.token
if self.debian:
fields['debian'] = self.debian
if self.dumb:
fields['dumb'] = self.dumb
if self.revision:
fields['revision'] = self.revision
if self.tag:
fields['tag'] = self.tag
return fields
class LastSeenRevision(models.Model):
mapping = models.ForeignKey(
WebHookMapping,
)
revision = models.CharField(
max_length=250,
)
tag = models.CharField(
max_length=50,
blank=True,
null=True
)
handled = models.BooleanField(
default=False,
editable=False,
)
timestamp = models.DateTimeField(
auto_now=True,
)
emails = models.TextField(
blank=True,
null=True,
editable=False,
)
payload = models.TextField(
blank=True,
null=True,
editable=False,
)
def __unicode__(self):
return "%s @ %s/%s" % (
self.revision, self.mapping.repourl, self.mapping.branch
)
class QueuePeriod(models.Model):
start_time = models.TimeField(
default=timezone.now,
)
end_time = models.TimeField(
default=timezone.now,
)
start_date = models.DateField(
blank=True,
null=True,
)
end_date = models.DateField(
blank=True,
null=True,
)
recurring = models.BooleanField(
default=False,
)
comment = models.TextField(
blank=True,
null=True,
)
projects = models.ManyToManyField(
Project,
)
class Meta:
permissions = (
("can_override_queueperiod", "Can override queue periods"),
)
def __unicode__(self):
return "Queue period from %s %s to %s %s for %s" % (
self.start_date or "", self.start_time, self.end_date or "",
self.end_time,
",".join([str(prj) for prj in self.projects.all()])
)
def override(self, user):
if not user:
return False
if user.has_perm("app.can_override_queueperiod"):
return True
def delay(self, dto=timezone.now()):
if self.start_time <= self.end_time:
if not (self.start_time <= dto.time() <= self.end_time):
# wrong time of day
return False
if self.start_time >= self.end_time:
if (self.start_time >= dto.time() >= self.end_time):
# wrong time of day
return False
if self.start_date and (dto.date() < self.start_date):
# not started yet
return False
if self.end_date and (dto.date() > self.end_date):
# already ended
return False
return True
class RelayTarget(models.Model):
active = models.BooleanField(
default=True,
help_text="Whether this relay will fire on matching events",
)
name = models.CharField(
max_length=50,
help_text="Friendly name of recipient, for example: Organization name",
)
url = models.CharField(
max_length=200,
help_text="HTTP(S) endpoint which will receive POST of GIT events "
"(for example http://webhook.example.com/webhook/)",
)
verify_SSL = models.BooleanField(
default=True,
help_text="Turn on SSL certificate verification",
)
sources = models.ManyToManyField(
VCSNameSpace,
help_text="List of VCS namespaces "
"(for example github organization or gitlab groups)",
)
def __unicode__(self):
return "%s webhook relay" % self.name
|
Thank you for a fantastic talk, which everyone really loved. Several people even described it as the best session in their evaluation of the entire summit. And the message has clearly got through. On day two there were loads of references to "Yes, and ...", "Open Mind" and "Ready for anything, busy doing nothing".
|
# -*- coding: utf-8 -*-
"""
99. Recover Binary Search Tree
Two elements of a binary search tree (BST) are swapped by mistake.
Recover the tree without changing its structure.
Note:
A solution using O(n) space is pretty straight forward. Could you devise a constant space solution?
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def __repr__(self):
if self:
serial = []
queue = [self]
while queue:
cur = queue[0]
if cur:
serial.append(cur.val)
queue.append(cur.left)
queue.append(cur.right)
else:
serial.append("#")
queue = queue[1:]
while serial[-1] == "#":
serial.pop()
return repr(serial)
else:
return None
class Solution(object):
def recoverTree(self, root):
"""
算法一:思路很简单,一颗二叉查找树的中序遍历应该是升序的,
而两个节点被交换了,那么对这个错误的二叉查找树中序遍历,肯定不是升序的。
那我们只需把顺序恢复过来然后进行重新赋值就可以了。
开辟两个列表,list用来存储被破坏的二叉查找树的节点值,
listp用来存储二叉查找树的节点的指针。
然后将list排序,再使用listp里面存储的节点指针赋值就可以了。
:type root: TreeNode
:rtype: void Do not return anything, modify root in-place instead.
"""
list = []
listp = []
self.inorder(root, list, listp)
list.sort()
for i in range(len(list)):
listp[i].val = list[i]
return root
def inorder(self, root, list, listp):
if root:
self.inorder(root.left, list, listp)
list.append(root.val)
listp.append(root)
self.inorder(root.right, list, listp)
if __name__ == "__main__":
root = TreeNode(0)
root.left = TreeNode(1)
print root
print Solution().recoverTree(root)
|
I had occasion today to set up a transparent bridging firewall. It's basically a computer that sits on the wire and silently drops packets matching certain rules.
In my case I wanted to restrict traffic to certain subnets without disturbing anything. So I took an Pentium Pro based HP Vectra (I'm telling you, those old HPs never die!) and put in two PCI-based ethernet cards, an Intel card and a trusty 3Com 905. It was nice that I had two different cards lying around, since they can be easily identified within FreeBSD by their separate interfaces (fxp0 and xl0, respectively). I installed FreeBSD 6.2 from a CD with minimal settings -- in particular, I declined to configure the network interface cards.
Added the following lines to /etc/rc.conf to force the two ethernet interfaces up and set up the bridge.
ifconfig_bridge0="addm fxp0 addm xl0 up"
Then I added ipfw rules to /etc/rc.firewall.local.
Note that I did not assign an IP address to either network card. That means the only way into the machine is through the console, and the only way to tell that it's there on the wire is to guess because response time is a tiny bit longer.
So far it looks like even a Pentium Pro is overkill.
|
"""
desisim.scripts.pixsim
======================
This is a module.
"""
from __future__ import absolute_import, division, print_function
import os,sys
import os.path
import shutil
import random
from time import asctime
import numpy as np
import desimodel.io
from desiutil.log import get_logger
import desispec.io
from desispec.parallel import stdouterr_redirected
from ..pixsim import simulate_exposure
from .. import io
log = get_logger()
def expand_args(args):
'''expand camera string into list of cameras
'''
if args.simspec is None:
if args.night is None or args.expid is None:
msg = 'Must set --simspec or both --night and --expid'
log.error(msg)
raise ValueError(msg)
args.simspec = io.findfile('simspec', args.night, args.expid)
#- expand camera list
if args.cameras is not None:
args.cameras = args.cameras.split(',')
#- write to same directory as simspec
if args.rawfile is None:
rawfile = os.path.basename(desispec.io.findfile('raw', args.night, args.expid))
args.rawfile = os.path.join(os.path.dirname(args.simspec), rawfile)
if args.simpixfile is None:
outdir = os.path.dirname(os.path.abspath(args.rawfile))
args.simpixfile = io.findfile(
'simpix', night=args.night, expid=args.expid, outdir=outdir)
#-------------------------------------------------------------------------
#- Parse options
def parse(options=None):
import argparse
parser = argparse.ArgumentParser(
description = 'Generates simulated DESI pixel-level raw data',
)
#- Inputs
parser.add_argument("--simspec", type=str, help="input simspec file")
parser.add_argument("--psf", type=str, help="PSF filename")
parser.add_argument("--cosmics", action="store_true", help="Add cosmics")
# parser.add_argument("--cosmics_dir", type=str,
# help="Input directory with cosmics templates")
# parser.add_argument("--cosmics_file", type=str,
# help="Input file with cosmics templates")
#- Outputs
parser.add_argument("--rawfile", type=str, help="output raw data file")
parser.add_argument("--simpixfile", type=str,
help="output truth image file")
#- Alternately derive inputs/outputs from night, expid, and cameras
parser.add_argument("--night", type=str, help="YEARMMDD")
parser.add_argument("--expid", type=int, help="exposure id")
parser.add_argument("--cameras", type=str, help="cameras, e.g. b0,r5,z9")
parser.add_argument("--ccd_npix_x", type=int,
help="for testing; number of x (columns) to include in output",
default=None)
parser.add_argument("--ccd_npix_y", type=int,
help="for testing; number of y (rows) to include in output",
default=None)
parser.add_argument("--verbose", action="store_true",
help="Include debug log info")
parser.add_argument("--overwrite", action="store_true",
help="Overwrite existing raw and simpix files")
#- Not yet supported so don't pretend it is
### parser.add_argument("--seed", type=int, help="random number seed")
parser.add_argument("--ncpu", type=int,
help="Number of cpu cores per thread to use", default=0)
parser.add_argument("--wavemin", type=float,
help="Minimum wavelength to simulate")
parser.add_argument("--wavemax", type=float,
help="Maximum wavelength to simulate")
parser.add_argument("--nspec", type=int,
help="Number of spectra to simulate per camera")
if options is None:
args = parser.parse_args()
else:
options = [str(x) for x in options]
args = parser.parse_args(options)
expand_args(args)
return args
def main(args, comm=None):
if args.verbose:
import logging
log.setLevel(logging.DEBUG)
if comm is None or comm.rank == 0:
log.info('Starting pixsim at {}'.format(asctime()))
if args.overwrite and os.path.exists(args.rawfile):
log.debug('Removing {}'.format(args.rawfile))
os.remove(args.rawfile)
simulate_exposure(args.simspec, args.rawfile, cameras=args.cameras,
simpixfile=args.simpixfile, addcosmics=args.cosmics,
nspec=args.nspec, wavemin=args.wavemin, wavemax=args.wavemax,
comm=comm)
|
Pollinators are crucial contributors to our environment and society by enhancing plant diversity in wild lands and providing food for humans in agricultural settings. Some three-fourths of all native plants in the world require pollination by an animal, most often an insect, and most often a native bee.
decision tools such as the National Protocol Framework for the Inventory and Monitoring of Bees.
Our projects are coordinated through the Federal Pollinator Health Task Force and the Monarch Butterfly High Level Working Group as identified in the 2014 Presidential Memorandum on Pollinator Health.
Pollinators, most often honey bees, are responsible for one in every three bites of food you take, and increase our nation’s crop values each year by more than 15 billion dollars.
More information about Status and Trends program pollinator research is available from the "Related Science" tab on the top navigation or from the links below.
DNA Barcoding, based on sequence variation between taxa in the cytochrome c oxidase subunit 1 (COI) of mitochondrial DNA or other loci, has provided an alternative, genetically-based methodology for distinguishing bee specimens. The application of Next-Generation Sequencing (NGS) technologies to DNA Barcoding has permitted the sequencing of many more samples at one time, at lower cost, and increased sequencing depth per specimen.
Beginning in 2012, the USGS collaborated with the USDA to assess the effectiveness of pollinator plantings and how alteration of landscapes has affected native pollinators and potentially contributed to their decline. The 2008 Farm Bill recognized contributions made by pollinators and made conservation of pollinator habitat a priority. The USGS is assessing native bee habitat, diversity, and richness in eastern Colorado grasslands and croplands to evaluate the extent to which they provide food and refuge.
Native bees also face challenges due to loss of the plants, from which they gather nectar and pollen, and from introduced diseases and general loss of habitat. Additionally, both Mountain Tops and Dunes contain bees that are only found in these isolated habitats. As regions warm and seas rise these species may be trapped without a place to go.
Honey bee colony failure is primarily due to the infestation of mites and agricultural pesticides, including neonicotinoids. USGS researches the impacts these have on honey bee reproductive capabilities.
USGS studies related to pollinators that are part of the Status and Trends program are listed below.
Bogan, Michael A.; Cryan, Paul; Weise, Christa D.; Valdez, Ernest W.
Bogan, M.A., P.M. Cryan, C.D. Weise, and E.W. Valdez. 2017. Landscape movements by two species of migratory nectar-feeding bats (Leptonycteris) in a northern area of seasonal sympatry. Western North American Naturalist 77(3):317-330.
Otto, C. R. V., S. O’Dell, R. B. Bryant, N. H. Euliss Jr., R. Bush, and M. D. Smart. 2017. Using publicly available data to quantify plant-pollinator interactions and evaluate conservation seeding mixes in the Northern Great Plains. Environmental Entomology, doi:10.1093/ee/nvx070.
Hladik, M.L., Bradbury, S., Schulte, L.A., Helmers, M., Witte, C., Kolpin, D.W., Garrett, J.D. and Harris, M., 2017, Neonicotinoid insecticide removal by prairie strips in row-cropped watersheds with historical seed coating use, Agric. Ecosyst. Environ., v. 241, pp. 160-167.
Tick and mosquito control provides important public health protection, but can also affect pollinator populations. The effects are often dependent on specific local conditions, such as how close the pesticide application is to places pollinators frequent, and when they frequent them.
|
from django.contrib import admin
from django.contrib.contenttypes.models import ContentType
from django.contrib.sessions.models import Session
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from .forms import BlacklistForm
from .models import Blacklist
def nuke_users(modeladmin, request, queryset):
"""
Deactivates user, removes their comments, deletes their session,
and leaves a record of what they did to get nuked.
This action can be used from user or comment admin.
If you would like to use it in other model admins,
you'll need to add appropriate content type handling.
"""
users = None
form = BlacklistForm(initial={'_selected_action': request.POST.getlist(admin.ACTION_CHECKBOX_NAME)})
contenttype = ContentType.objects.get_for_model(queryset.model)
# Because we want this action available from comments or user admin lists, sort out content type
ctype_as_string = unicode(contenttype)
if ctype_as_string == 'user':
users = queryset
if ctype_as_string == 'comment':
# build list of unique users within comment list.
users = []
for comment in queryset:
if not comment.user in users:
users.append(comment.user)
if ctype_as_string == 'contact':
# build list of unique users from contact list.
users = []
for c in queryset:
if c.user and c.user not in users:
users.append(c.user)
if not users:
# we haven't built out a content-type appropriate user list.
return HttpResponse("Error finding content type: %s" % contenttype)
if 'apply_blacklist' in request.POST: # we're returning from the intermediate page and are ready to do some work.
form = BlacklistForm(request.POST)
if form.is_valid():
reason = form.cleaned_data['reason']
spammer = form.cleaned_data['is_spammer']
for user in users:
# Deactivate user accounts
# Note: Update is more efficient,
# but we can't use it because we may have a list (from comments)
# rather than a proper queryset.
user.is_active = False
user.save()
for c in user.comment_comments.all(): # remove their comments from public view.
if spammer:
c.delete()
else:
c.is_public = False
c.is_removed = True
c.save()
for c in user.contact_set.all(): # and contact messages
if spammer:
c.delete()
else:
c.publish = False
c.save()
# remove their session. -- Is there a more efficient way than looping through all sessions? That can become a mighty big table.
for s in Session.objects.all():
decoded_session = s.get_decoded()
if '_auth_user_id' in decoded_session and decoded_session['_auth_user_id'] == user.id:
s.delete()
# and add them to the blacklist
blacklist = Blacklist(
user = user,
blacklister = request.user,
reason = reason,
)
blacklist.save()
if spammer:
resp_str = 'Any related accounts will still be visible, but related comments have been deleted.'
else:
resp_str = 'Any related accounts and comments will still be visible in the admin.'
count = len(users)
if count == 1:
modeladmin.message_user(request, "%s was removed and blocked from the site. %s" % (users[0].username, resp_str))
else:
modeladmin.message_user(request, "%s users were removed and blocked from the site. %s" % (count, resp_str))
return HttpResponseRedirect(request.get_full_path())
else:
return HttpResponse("error!")
# We haven't captured intermediate page data. Go there...
return render(request, 'admin/blacklist.html', {'users': users, 'form': form})
nuke_users.short_description = "Blacklist Users"
|
Instead of the header or paragraph value they submit, it returns "Field("Header One")" and " Field("Paragraph One")"(attached is a screenshot showing it is the right color and font, but not the value).
Am I missing a tag of some sort to return the value of the fields instead of the field names, or am I missing this all together in my approach?
Thank you for any guidance you can offer.
Thank you so much for your help! This solved the problem.
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the analysis plugins CLI arguments helper."""
import argparse
import unittest
from plaso.cli import tools
from plaso.cli.helpers import analysis_plugins
from plaso.lib import errors
from tests.cli import test_lib as cli_test_lib
class AnalysisPluginsArgumentsHelperTest(cli_test_lib.CLIToolTestCase):
"""Tests for the analysis plugins CLI arguments helper."""
# pylint: disable=no-member,protected-access
_EXPECTED_OUTPUT = """\
usage: cli_helper.py [--analysis PLUGIN_LIST]
Test argument parser.
optional arguments:
--analysis PLUGIN_LIST
A comma separated list of analysis plugin names to be
loaded or "--analysis list" to see a list of available
plugins.
"""
def testAddArguments(self):
"""Tests the AddArguments function."""
argument_parser = argparse.ArgumentParser(
prog='cli_helper.py', description='Test argument parser.',
add_help=False,
formatter_class=cli_test_lib.SortedArgumentsHelpFormatter)
analysis_plugins.AnalysisPluginsArgumentsHelper.AddArguments(
argument_parser)
output = self._RunArgparseFormatHelp(argument_parser)
self.assertEqual(output, self._EXPECTED_OUTPUT)
def testParseOptions(self):
"""Tests the ParseOptions function."""
options = cli_test_lib.TestOptions()
options.analysis_plugins = 'tagging'
test_tool = tools.CLITool()
analysis_plugins.AnalysisPluginsArgumentsHelper.ParseOptions(
options, test_tool)
self.assertEqual(test_tool._analysis_plugins, ['tagging'])
with self.assertRaises(errors.BadConfigObject):
analysis_plugins.AnalysisPluginsArgumentsHelper.ParseOptions(
options, None)
options.analysis_plugins = 'bogus'
with self.assertRaises(errors.BadConfigOption):
analysis_plugins.AnalysisPluginsArgumentsHelper.ParseOptions(
options, test_tool)
# TODO: add test for '--analysis list'
# TODO: improve test coverage.
if __name__ == '__main__':
unittest.main()
|
No major difficulties. Mostly an easy scramble with some minor route finding. We skied it which makes it slightly more difficult.
On Saturday, February 10 I finally bagged my first summit of the year 2007 and first summit since October 14 2006! That's almost 4 months without a summit. No wonder it felt so good. ;-) The skiing was fantastic and we took about 5.5 hours for the ascent / descent.
!!Attention!! explor8ion.com is being updated and trip reports migrated to a new site while this one is still operational. The new version of this trip report can be found at https://verndewit.com/2007/02/10/paget-peak/ and contains more photos in a modern format. For more information on this move and possible future changes please click here.
|
"""organizer
organize movies by:
release date:
current decade - by year: 2015, 2014, 2013...
previous decards - by decade: 2000, 1990, 1980
imdb rating:
5.0 and below
7.0 and below
7.5, 8.0, 8.5 ... (0.5 increments)
"""
# TODO: propogate error to browser if thread fails
from datetime import datetime
from math import floor
from ntpath import splitext
from os import makedirs
from os import path
from os import walk
from shutil import move
from shutil import rmtree
from threading import Thread
from hdd_settings.models import HDDRoot
from hdd_settings.models import MovieFolder
from movie_metadata.models import Movie
import logging
log = logging.getLogger('organize')
log.info(72 * '-')
log.info('organize module loaded')
def organizer_status(key=None, value=None):
"""Organizer status
Args:
key(dict key): key to search in dict
value(dict value): value to assign to key
Returns:
bool: True for ON, False for OFF
"""
if 'status' not in organizer_status.__dict__:
organizer_status.status = {
'STATUS': False,
'FILES_EVALUATED': 0,
}
_ORGANIZER = organizer_status.status
if _ORGANIZER.get(key) is not None:
if value is not None:
log.info('organizer status: %s -> %s' % (key, value))
# TODO: check if key and value are valid
_ORGANIZER[key] = value
else:
return _ORGANIZER[key]
return _ORGANIZER['STATUS']
def make_fname(title, relpath):
"""creates a new filename for the movie from its title
Uses the movie title saved in database along with the original
file extension to create a new and correct filename
Args:
title(str): title of the movie
relpath(str): path stored in database
Returns:
str: new filename.ext
Raises:
None
"""
# TODO: validate relpath contains filename.ext
extension = splitext(relpath)[1]
# TODO: validate that this is a valid/legal filename
return title + extension
def _criterion_tools(criterion):
"""select the organization criteria
Attaches functions based on user choice for organization criterion.
Supported criterions are: release date, imdb score
Args:
criterion(str): choice selected by user
Returns:
None
Raises:
ValueError: invalid criterion
"""
assert type(criterion) == str or type(criterion) == unicode
log.info('organization criterion: %s' % criterion)
if criterion == 'release':
_get_folder = _folder_by_release_date
_field_exists = lambda m: m.release is not None
elif criterion == 'imdb_score':
_get_folder = _folder_by_imdb_score
_field_exists = lambda m: m.imdb_score is not None
else:
raise ValueError('Invalid organization criterion: %s' % criterion)
return _get_folder, _field_exists
def _organize(criterion):
"""organize movies on disk/database by provided criterion
Selects all movies and updates their filenames based on their
metadata titles. Moves their files to organized folders whose
name and hierarchy are based on criterion selected.
Args:
criterion(str): user choice of organization criterion
Returns:
None
Raises:
None
"""
def create_folder(folder):
""" creates a folder if it does not exist
Args:
folder(str): path of the folder
Returns:
None
Raises:
None
"""
# TODO: check if path is valid
if not path.exists(path.join(destination, folder)):
log.info('created directory %s' % folder)
makedirs(path.join(destination, folder))
# functions for selected criterion
_get_folder, _field_exists = _criterion_tools(criterion)
# temporary folder for holding created folders
tempname = 'tmp'
log.debug('temporary folder set to ./%s' % tempname)
uncategorized = 'uncategorized'
log.debug('uncategorized folder set to ./%s/%s' % (
tempname, uncategorized))
parentpath = path.join(
HDDRoot.get_solo().path,
MovieFolder.get_solo().relpath)
destination = path.join(parentpath, tempname)
create_folder(destination)
movies = Movie.objects.all()
for movie in movies:
# parent folder for the movie file
if _field_exists(movie):
folder = _get_folder(movie)
else:
folder = uncategorized
log.debug('folder: %s' % folder)
create_folder(folder)
# create new filename -> title with extension
fname = make_fname(movie.title, movie.relpath)
# move the file to its new location
newpath = path.join(
path.join(destination, folder),
fname)
oldpath = path.join(parentpath, movie.relpath)
move(oldpath, newpath)
log.debug('%s moved from %s to %s' % (
movie.title, movie.relpath, newpath))
# update movie path to the newpath
movie.relpath = path.join(folder, fname)
# save updated movie to database
movie.save()
# move other files from movie_folder to new folder
other_files = path.join(destination, 'other_files')
create_folder(other_files)
for root, directory, files in walk(parentpath):
# don't go into the temporary folder
if not root.startswith(destination):
for somefile in files:
move(
path.join(root, somefile),
path.join(other_files, somefile))
log.info('moved other files into %s' % other_files)
# remove all directories from movie folder
for directory in walk(parentpath).next()[1]:
if directory != tempname:
rmtree(path.join(parentpath, directory))
log.info('removed all directories from movie folder')
# move all new folders into movie folder directory
for directory in walk(destination).next()[1]:
move(
path.join(destination, directory),
path.join(parentpath, directory))
# delete temporary directory
rmtree(destination)
# update status of organizer
organizer_status('STATUS', False)
def _folder_by_release_date(movie):
"""identifies the correct folder from movie release date
If the movie's release date is in the current decade, it assigns
the release year as its folder name. Otherwise, the decade year
is assigned as its folder name.
E.g. release dates in 2015 (now) will be stored in '2015'
release dates (2001, 2006, ...) will be stored in '2000'
Args:
movie(Movie): movie object from database
Returns:
str: foldername for the movie file
Raises:
None
"""
# TODO: check if movie is a valid Movie object
# TODO: check if movie has a valid release date
if 'this_decade' not in _folder_by_release_date.__dict__:
_folder_by_release_date.this_decade = \
datetime.now().year - datetime.now().year % 10
if 'get_decade' not in _folder_by_release_date.__dict__:
_folder_by_release_date.get_decade = lambda year: year - year % 10
if movie.release.year < _folder_by_release_date.this_decade:
folder = _folder_by_release_date.get_decade(movie.release.year)
else:
folder = movie.release.year
return str(folder)
def _folder_by_imdb_score(movie):
"""identifies the correct folder from movie score
If the movie's score is below a certain threshold, dumps all such
movies together. Otherwise saves each movie in folder based on
IMDb score with 0.5 incrememnts.
For e.g. movie with score 4.5, 3.2, ... go into 'below 5.0'
movie with score 5.1, 6.2, 6.9, ... go into 'below 7.0'
movie with score 7.3 go into '7.0', 7.8 go into '7.5'
Args:
movie(Movie): movie object from database
Returns:
str: foldername for movie file
Raises:
None
"""
imdb_score = movie.imdb_score
# movies rated 5.0 and below
if imdb_score < 5.0:
folder = 'below 5.0'
# movies rated 7.0 and below
elif imdb_score < 7.0:
folder = 'below 7.0'
else:
# 8.2 -> 8.2 + 0.5 -> floor(8.7) -> 8.0 -> 8.0
# 8.7 -> 8.7 + 0.5 -> floot(9.2) -> 9.0 -> 8.5
base = floor(imdb_score + 0.5)
# movie is rated something like x.y
if imdb_score < base:
# y > 0.5, e.g. score:8.7 -> folder:8.5
folder = str(base - 0.5) + ' and above'
else:
# y < 0.5 e.g. score:8.2 -> folder:8.0
folder = str(base) + ' and above'
return folder
def start_organizer(criterion='release'):
"""Start the organizer
Args:
criterion(str): specifies organization structure
Returns:
None
Raises:
None
"""
log.info('Started organizer with criterion: %s' % criterion)
thread = Thread(target=_organize, args=(criterion, ))
thread.daemon = True
thread.start()
log.info('organizer started on daemon thread')
organizer_status('STATUS', True)
def stop_organizer():
"""Stop the organizer
Args:
None
Returns:
None
Raises:
None
"""
log.info('Stopped organizer')
organizer_status('STATUS', False)
|
Sign up for future Shakers, Mixers & Bottle news!
Click the button below to sign up for future Shakers, Mixers & Bottle news, deals, coupons, and reviews!
Sign up for future Shakers, Mixers & Bottle deals and coupons!
Click the button below to sign up for future Shakers, Mixers & Bottle deals, news, and coupons!
Subscribe for more Shakers, Mixers & Bottle news and alerts!
Subscribe to PricePlow on YouTube or click the button below to sign up for our latest Shakers, Mixers & Bottle news and reviews!
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
TauDEMUtils.py
---------------------
Date : October 2012
Copyright : (C) 2012 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'October 2012'
__copyright__ = '(C) 2012, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import subprocess
from PyQt4.QtCore import QCoreApplication
from qgis.core import QgsApplication
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.ProcessingLog import ProcessingLog
from processing.tools.system import isMac
class TauDEMUtils:
TAUDEM_FOLDER = 'TAUDEM_FOLDER'
TAUDEM_MULTIFILE_FOLDER = 'TAUDEM_MULTIFILE_FOLDER'
TAUDEM_USE_SINGLEFILE = 'TAUDEM_USE_SINGLEFILE'
TAUDEM_USE_MULTIFILE = 'TAUDEM_USE_MULTIFILE'
MPIEXEC_FOLDER = 'MPIEXEC_FOLDER'
MPI_PROCESSES = 'MPI_PROCESSES'
@staticmethod
def taudemPath():
folder = ProcessingConfig.getSetting(TauDEMUtils.TAUDEM_FOLDER)
if folder is None:
folder = ''
if isMac():
testfolder = os.path.join(QgsApplication.prefixPath(), 'bin')
if os.path.exists(os.path.join(testfolder, 'slopearea')):
folder = testfolder
else:
testfolder = '/usr/local/bin'
if os.path.exists(os.path.join(testfolder, 'slopearea')):
folder = testfolder
return folder
@staticmethod
def taudemMultifilePath():
folder = ProcessingConfig.getSetting(TauDEMUtils.TAUDEM_MULTIFILE_FOLDER)
if folder is None:
folder = ''
if isMac():
testfolder = os.path.join(QgsApplication.prefixPath(), 'bin')
if os.path.exists(os.path.join(testfolder, 'slopearea')):
folder = testfolder
else:
testfolder = '/usr/local/bin'
if os.path.exists(os.path.join(testfolder, 'slopearea')):
folder = testfolder
return folder
@staticmethod
def mpiexecPath():
folder = ProcessingConfig.getSetting(TauDEMUtils.MPIEXEC_FOLDER)
if folder is None:
folder = ''
if isMac():
testfolder = os.path.join(QgsApplication.prefixPath(), 'bin')
if os.path.exists(os.path.join(testfolder, 'mpiexec')):
folder = testfolder
else:
testfolder = '/usr/local/bin'
if os.path.exists(os.path.join(testfolder, 'mpiexec')):
folder = testfolder
return folder
@staticmethod
def taudemDescriptionPath():
return os.path.normpath(
os.path.join(os.path.dirname(__file__), 'description'))
@staticmethod
def executeTauDEM(command, progress):
loglines = []
loglines.append(TauDEMUtils.tr('TauDEM execution console output'))
fused_command = ''.join(['"%s" ' % c for c in command])
progress.setInfo(TauDEMUtils.tr('TauDEM command:'))
progress.setCommand(fused_command.replace('" "', ' ').strip('"'))
proc = subprocess.Popen(
fused_command,
shell=True,
stdout=subprocess.PIPE,
stdin=open(os.devnull),
stderr=subprocess.STDOUT,
universal_newlines=True,
).stdout
for line in iter(proc.readline, ''):
progress.setConsoleInfo(line)
loglines.append(line)
ProcessingLog.addToLog(ProcessingLog.LOG_INFO, loglines)
@staticmethod
def tr(string, context=''):
if context == '':
context = 'TauDEMUtils'
return QCoreApplication.translate(context, string)
|
"Beating depression was the hardest thing for me to do, but I did it, and feel as powerful as never before."
Beating depression is never easy. However, it can be done if you are prepared to take the control over your life.
Your therapist cannot do everything for you. He or she can guide you and provide you with support and appropriate treatment.
You on the other hand, have to work hard and take the responsibility for your life.
Nobody is responsible for your actions but yourself. Therefore, instead of blaming others for your depression and the suffering it is causing you, you should take the responsibility and face it. You should never feel guilty or ashamed about it because there is nothing to be ashamed of.
Being depressed is not your fault, but it is in your power to work with your therapist to make yourself feel better and cured. Choosing the right therapist for you is very important so take your time. If you feel uncomfortable or are not satisfied with your therapist for whatever the reason, change him or her and look for another.
Beating depression on your own is almost impossible, so do not try it. An appropriate treatment is mandatory for many reasons. Therefore, make sure you have the best treatment you can afford.
Any kind of self-medicating is wrong. It will leave you even more depressed and vulnerable. Alcohol is not the answer to depression and if you have alcohol problems, you must be in alcohol counseling as well as in psychotherapy. It is the same with illegal and legal drugs. Any kind of substance addiction must be addressed appropriately.
James was severely depressed and had almost lost hope that he would ever get better. Unfortunately, he is not the only one that had lost hope. It is very common for severely depressed people tofeel hopeless and desperate, which makes their treatment very difficult. However, just because something is difficult that does not mean that it is impossible.
After two years of intense psychotherapy, James got much better and today he is a happy husband and father living without depression. Once a month he visits his therapist for depression prevention. He is planning to become a psychotherapist himself.
After beating depression, it is important to make sure that it does not take over your life again. It is very common for depression to reoccur. This is especially true with certain types of depression such as major depressive disorder. For this reason, after you overcome the depressive episode, you need to make sure that it is the last one.
You can do by always taking care of yourself and not reverting to the old way of thinking and behaving that made you depressed in first place. For example, if you used to be overweight do not eat too much and keep exercising. If you had alcohol problems do not go back to the bottle when you feel stressed and so forth.
Depression as any other illness is a call for change, but not just for the period of your treatment but for as long as you live. Make no mistake, if after your successful depression treatment, you go back to the destructive old habits, you will find yourself depressed again. Do not do that to yourself.
Use your depression to improve yourself and the quality of your life. Make the best out of beating depression and leave the worse of it forever behind.
|
#!/usr/bin/env python3
#
# Copyright (c) 2016, The Linux Foundation.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import pexpect
import sys
import os
import logging
import stubs
import email.header
# logging
logging.basicConfig()
logger = logging.getLogger('cmdtestlib')
# uncomment to get debug logs
# logger.setLevel(logging.DEBUG)
# Note: these prompts are regexps, escape accordingly!
PROMPT = 'master@data >'
# there's some odd word wrapping happening (pexpect?) so had to cut this
PROMPT_REVIEW_STATE = 'aPplicable/rFc/aBort\? '
PROMPT_REVIEW_REASON = 'Reason \(RET for no mail\): '
PROMPT_COMMIT_ALL = 'commit All/aBort\?'
# there's some odd word wrapping happening (pexpect?) so had to cut this
PROMPT_COMMIT_ACCEPT = 'aPplicable/rFc/aBort\? '
PROMPT_REPLY = 'Send/Edit/aBort?'
PROMPT_REPLY_RETRY = 'Retry/aBort?'
PROMPT_REVIEW_ACCEPT = 'Apply \d+ patches to the pending branch\? \[Apply/Skip/aBort\]'
PROMPT_ACCEPT_CONFIRM = 'Are you sure want to ACCEPT these patches [y/N]: '
PROMPT_UNDER_REVIEW_CONFIRM = 'Are you sure want to set these patches to UNDER REVIEW? [y/N]: '
# the toplevel source directory
srcdir = os.environ['SRCDIR']
# the directory where the tests can store temporary data
testdatadir = os.environ['DATADIR']
stubsdir = os.path.join(srcdir, 'stubs')
logger.debug('srcdir=%r' % (srcdir))
logger.debug('testdatadir=%r' % (testdatadir))
logger.debug('stubsdir=%r' % (stubsdir))
def decode_mime_encoded_words(text):
# Yeah, I know this looks stupid but couldn't figure out a better way
return str(email.header.make_header(email.header.decode_header(text)))
class StubContext():
def __init__(self, start=False, debug=False, stgit=False, builder='builder'):
self.debug = debug
self.git = stubs.GitStub()
if stgit:
self.stgit = stubs.StgStub()
else:
self.stgit = None
self.smtpd = stubs.SmtpdStub()
self.patchwork = stubs.PatchworkStub()
self.editor = stubs.EditorStub()
self.pwcli = None
self.builder = stubs.BuilderStub()
self.builder_cmd = builder
# move to the fake git repository before starting pwcli
os.chdir(testdatadir)
if start:
self.start()
@staticmethod
def run_test(func, stgit=False, builder='builder'):
ctxt = StubContext(start=True, stgit=stgit, builder=builder)
pwcli = ctxt.pwcli
try:
func(ctxt, pwcli)
except Exception as e:
print(e)
ctxt.stop_and_cleanup()
def start(self):
stgit = False
try:
self.git.start()
if self.stgit:
stgit = True
self.stgit.start()
self.smtpd.start()
self.patchwork.start()
# FIXME: should this be start()?
self.editor.stop()
# must be instiated only after daemon stubs are running,
# as this immediately starts pwcli
self.pwcli = PwcliSpawn(debug=self.debug, stgit=stgit,
builder=self.builder_cmd)
except Exception as e:
print('Failed to start stubs: %s' % (e))
self.stop_and_cleanup()
sys.exit(1)
def stop(self):
self.git.stop()
if self.stgit:
self.stgit = self.stgit.stop()
self.smtpd.stop()
self.patchwork.stop()
self.editor.stop()
def cleanup(self):
if self.pwcli:
self.pwcli.cleanup()
if self.git:
self.git.cleanup()
if self.stgit:
self.stgit = self.stgit.cleanup()
if self.smtpd:
self.smtpd.cleanup()
if self.patchwork:
self.patchwork.cleanup()
if self.editor:
self.editor.cleanup()
if self.builder:
self.builder.cleanup()
def stop_and_cleanup(self):
self.stop()
self.cleanup()
class PwcliSpawn(pexpect.spawn):
def __init__(self, debug=False, stgit=False, builder='builder',
signature='Sent by pwcli\n$URL\n'):
cmd = 'pwcli'
if debug:
cmd += ' --debug'
self.pwcli_wrapper = stubs.PwcliWrapper(stgit=stgit, builder=builder,
signature=signature)
self.pwcli_wrapper.write_config()
# use short timeout so that failures don't take too long to detect
super(PwcliSpawn, self).__init__(os.path.join(srcdir, cmd),
timeout=3,
logfile=sys.stdout,
encoding='utf-8')
def cleanup(self):
self.pwcli_wrapper.cleanup()
def expect_prompt(self):
return super(PwcliSpawn, self).expect(PROMPT)
|
WHORUFILE is help to protect your server for hacking.
You can defanse to create malware file on server.
if And you want to check virus check on virustotal, you need to virustotal api key. check below link.
2017.01.31 – Check file certificate and write in a log.
2017.01.23 – Offer file detail information when find to suspicious file .
“whorufile -u” : uninstall whorufile service type.
Syslog_IP=192.168.0.1 <– Send to log at syslog server, When input IP address.
File=false <– If you want to logging on the local machine, input here for true.
DirectoryPath=ALL <– If you need to only audit some folder or drive, input here. Default option is all drive.
Trust_List= notepad.exe <– If you need to trust item, input here.
|
from ._compat import ord_byte
import math
import string
from logging import getLogger
_logger = getLogger('pwm.encoding')
# 'full' repeats digits twice, to increase the probablity of a digit appearing in a default 16
# character password, for sites that suck at estimating entropy and requires digits to be present
PRESETS = {
'full': string.ascii_letters + 2 * string.digits + '!#$%&()*+,-./:;=?@[]^_|~',
'alpha': string.ascii_letters,
'numeric': string.digits,
'alphanumeric': string.ascii_letters + string.digits,
}
def ceildiv(dividend, divisor):
''' integer ceiling division '''
return (dividend + divisor - 1) // divisor
def calc_chunklen(alph_len):
'''
computes the ideal conversion ratio for the given alphabet.
A ratio is considered ideal when the number of bits in one output
encoding chunk that don't add up to one input encoding chunk is minimal.
'''
binlen, enclen = min([
(i, i*8 / math.log(alph_len, 2))
for i in range(1, 7)
], key=lambda k: k[1] % 1)
return binlen, int(enclen)
class Encoder(object):
'''
general-purpose encoder. Encodes arbitrary binary data with a given
specific base ("alphabet").
'''
def __init__(self, alphabet):
self.alphabet = alphabet
self.chunklen = calc_chunklen(len(alphabet))
def encode(self, digest, total_len):
nchunks = ceildiv(len(digest), self.chunklen[0])
binstr = digest.ljust(nchunks * self.chunklen[0], b'\0')
return ''.join([
self._encode_chunk(binstr, i) for i in range(0, nchunks)
])[:total_len]
def _encode_chunk(self, data, index):
'''
gets a chunk from the input data, converts it to a number and
encodes that number
'''
chunk = self._get_chunk(data, index)
return self._encode_long(self._chunk_to_long(chunk))
def _encode_long(self, val):
'''
encodes an integer of 8*self.chunklen[0] bits using the specified
alphabet
'''
return ''.join([
self.alphabet[(val//len(self.alphabet)**i) % len(self.alphabet)]
for i in reversed(range(self.chunklen[1]))
])
def _chunk_to_long(self, chunk):
'''
parses a chunk of bytes to integer using big-endian representation
'''
return sum([
256**(self.chunklen[0]-1-i) * ord_byte(chunk[i])
for i in range(self.chunklen[0])
])
def _get_chunk(self, data, index):
'''
partition the data into chunks and retrieve the chunk at the given index
'''
return data[index*self.chunklen[0]:(index+1)*self.chunklen[0]]
def lookup_alphabet(charset):
'''
retrieves a named charset or treats the input as a custom alphabet and use that
'''
if charset in PRESETS:
return PRESETS[charset]
if len(charset) < 16:
_logger.warning('very small alphabet in use, possibly a failed lookup?')
return charset
|
Alejandro de la Puente, a graduate student in the laboratory of Antonio Delgado, was awarded a Fermilab Fellowship in Theoretical Physics. The fellowship will support his graduate studies with a 12-month stipend of $30,000 as well as cover his health insurance and tuition.
Fermilab provides fellowships for graduate students at American universities who are conducting research studies in theoretical particle physics or theoretical astrophysics. Fellows remain matriculated at their home universities but take residence at Fermilab. The theoretical physics and theoretical astrophysics departments offer strong and exciting research opportunities in collider physics, model building, neutrino physics, perturbative QCD, lattice gauge theory, astrophysics, and cosmology. The research environment at Fermilab benefits from close connections with the laboratory’s experimental programs.
|
# Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.
# See LICENSE in the project root for license information.
from __future__ import absolute_import
from urllib import unquote_plus
from importlib import import_module
import falcon
import re
from beaker.middleware import SessionMiddleware
from falcon_cors import CORS
from . import db, constants, iris, auth
import logging
logger = logging.getLogger('oncall.app')
security_headers = [
('X-Frame-Options', 'SAMEORIGIN'),
('X-Content-Type-Options', 'nosniff'),
('X-XSS-Protection', '1; mode=block'),
('Strict-Transport-Security', 'max-age=31536000; includeSubDomains'),
]
def json_error_serializer(req, resp, exception):
resp.body = exception.to_json()
resp.content_type = 'application/json'
class SecurityHeaderMiddleware(object):
def process_request(self, req, resp):
resp.set_headers(security_headers)
class ReqBodyMiddleware(object):
'''
Falcon's req object has a stream that we read to obtain the post body. However, we can only read this once, and
we often need the post body twice (once for authentication and once in the handler method). To avoid this
problem, we read the post body into the request context and access it from there.
IMPORTANT NOTE: Because we use stream.read() here, all other uses of this method will return '', not the post body.
'''
def process_request(self, req, resp):
req.context['body'] = req.stream.read()
class AuthMiddleware(object):
def process_resource(self, req, resp, resource, params):
try:
if resource.allow_no_auth:
return
except AttributeError:
pass
auth_token = req.get_header('AUTHORIZATION')
if auth_token:
auth.authenticate_application(auth_token, req)
else:
auth.authenticate_user(req)
application = None
def init_falcon_api(config):
global application
cors = CORS(allow_origins_list=config.get('allow_origins_list', []))
middlewares = [
SecurityHeaderMiddleware(),
ReqBodyMiddleware(),
cors.middleware
]
if config.get('require_auth'):
middlewares.append(AuthMiddleware())
application = falcon.API(middleware=middlewares)
application.req_options.auto_parse_form_urlencoded = False
application.set_error_serializer(json_error_serializer)
from .auth import init as init_auth
init_auth(application, config['auth'])
from .ui import init as init_ui
init_ui(application, config)
from .api import init as init_api
init_api(application, config)
from .healthcheck import init as init_hc
init_hc(application, config)
for hook in config.get('post_init_hook', []):
try:
logger.debug('loading post init hook <%s>', hook)
getattr(import_module(hook), 'init')(application, config)
except:
logger.exception('Failed loading post init hook <%s>', hook)
return application
class RawPathPatcher(object):
slash_re = re.compile(r'%2[Ff]')
def __init__(self, app):
self.app = app
def __call__(self, env, start_response):
"""
Patch PATH_INFO wsgi variable so that '/api/v0/teams/foo%2Fbar' is not
treated as '/api/v0/teams/foo/bar'
List of extensions for raw URI:
* REQUEST_URI (uwsgi)
* RAW_URI (gunicorn)
"""
raw_path = env.get('REQUEST_URI', env.get('RAW_URI')).split('?', 1)[0]
env['PATH_INFO'] = unquote_plus(self.slash_re.sub('%252F', raw_path))
return self.app(env, start_response)
def init(config):
db.init(config['db'])
constants.init(config)
if 'iris_plan_integration' in config:
iris.init(config['iris_plan_integration'])
if not config.get('debug', False):
security_headers.append(
("Content-Security-Policy",
# unsafe-eval is required for handlebars without precompiled templates
"default-src 'self' %s 'unsafe-eval' ; "
"font-src 'self' data: blob; img-src data: uri https: http:; "
"style-src 'unsafe-inline' https: http:;" %
config.get('iris_plan_integration', {}).get('api_host', '')))
logging.basicConfig(level=logging.INFO)
logger.info('%s', security_headers)
else:
logging.basicConfig(level=logging.DEBUG)
init_falcon_api(config)
global application
session_opts = {
'session.type': 'cookie',
'session.cookie_expires': True,
'session.key': 'oncall-auth',
'session.encrypt_key': config['session']['encrypt_key'],
'session.validate_key': config['session']['sign_key'],
'session.secure': not (config.get('debug', False) or config.get('allow_http', False)),
'session.httponly': True,
'session.crypto_type': 'cryptography'
}
application = SessionMiddleware(application, session_opts)
application = RawPathPatcher(application)
def get_wsgi_app():
import sys
from . import utils
init(utils.read_config(sys.argv[1]))
return application
|
Blades made from selected carbon steel, heat treated with plastic grip.
In order to meet the diversified needs of clients, we are instrumental in providing Hedge Shear.
With rich industry experience and knowledge, we are providing an excellent quality range of Hedge Shear.
Hedge Shear - Wooden Handle 10"
- High carbon steel cutting blade - 7" & 9"
Hedge shear is most commonly known as hedge trimmer. The main use of this gardening tool is trimming of hedges or bushes. There are different designs of hedge shears available. Also both handheld and machine operated hedge shears exist.
hardened and tempered blades with oxidized finish comfortable wooden handle grip.
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import template
from django.template import defaultfilters as filters
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from horizon.utils import filters as utils_filters
SERVICE_ENABLED = "enabled"
SERVICE_DISABLED = "disabled"
SERVICE_STATUS_DISPLAY_CHOICES = (
(SERVICE_ENABLED, _("Enabled")),
(SERVICE_DISABLED, _("Disabled")),
)
class ServiceFilterAction(tables.FilterAction):
filter_field = 'type'
def filter(self, table, services, filter_string):
q = filter_string.lower()
def comp(service):
attr = getattr(service, self.filter_field, '')
if attr is not None and q in attr.lower():
return True
return False
return filter(comp, services)
class SubServiceFilterAction(ServiceFilterAction):
filter_field = 'binary'
def get_stats(service):
return template.loader.render_to_string('admin/services/_stats.html',
{'service': service})
def get_status(service):
# if not configured in this region, neither option makes sense
if service.host:
return SERVICE_ENABLED if not service.disabled else SERVICE_DISABLED
return None
class ServicesTable(tables.DataTable):
id = tables.Column('id', hidden=True)
name = tables.Column("name", verbose_name=_('Name'))
service_type = tables.Column('__unicode__', verbose_name=_('Service'))
host = tables.Column('host', verbose_name=_('Host'))
status = tables.Column(get_status,
verbose_name=_('Status'),
status=True,
display_choices=SERVICE_STATUS_DISPLAY_CHOICES)
class Meta:
name = "services"
verbose_name = _("Services")
table_actions = (ServiceFilterAction,)
multi_select = False
status_columns = ["status"]
def get_available(zone):
return zone.zoneState['available']
def get_nova_agent_status(agent):
template_name = 'admin/info/_cell_status.html'
context = {
'status': agent.status,
'disabled_reason': agent.disabled_reason
}
return template.loader.render_to_string(template_name, context)
class NovaServicesTable(tables.DataTable):
binary = tables.Column("binary", verbose_name=_('Name'))
host = tables.Column('host', verbose_name=_('Host'))
zone = tables.Column('zone', verbose_name=_('Zone'))
status = tables.Column(get_nova_agent_status, verbose_name=_('Status'))
state = tables.Column('state', verbose_name=_('State'),
filters=(filters.title,))
updated_at = tables.Column('updated_at',
verbose_name=pgettext_lazy(
'Time since the last update',
u'Last Updated'),
filters=(utils_filters.parse_isotime,
filters.timesince))
def get_object_id(self, obj):
return "%s-%s-%s" % (obj.binary, obj.host, obj.zone)
class Meta:
name = "nova_services"
verbose_name = _("Compute Services")
table_actions = (SubServiceFilterAction,)
multi_select = False
class CinderServicesTable(tables.DataTable):
binary = tables.Column("binary", verbose_name=_('Name'))
host = tables.Column('host', verbose_name=_('Host'))
zone = tables.Column('zone', verbose_name=_('Zone'))
status = tables.Column('status', verbose_name=_('Status'),
filters=(filters.title, ))
state = tables.Column('state', verbose_name=_('State'),
filters=(filters.title, ))
updated_at = tables.Column('updated_at',
verbose_name=pgettext_lazy(
'Time since the last update',
u'Last Updated'),
filters=(utils_filters.parse_isotime,
filters.timesince))
def get_object_id(self, obj):
return "%s-%s-%s" % (obj.binary, obj.host, obj.zone)
class Meta:
name = "cinder_services"
verbose_name = _("Block Storage Services")
table_actions = (SubServiceFilterAction,)
multi_select = False
class NetworkAgentsFilterAction(tables.FilterAction):
def filter(self, table, agents, filter_string):
q = filter_string.lower()
def comp(agent):
if q in agent.agent_type.lower():
return True
return False
return filter(comp, agents)
def get_network_agent_status(agent):
if agent.admin_state_up:
return _('Enabled')
return _('Disabled')
def get_network_agent_state(agent):
if agent.alive:
return _('Up')
return _('Down')
class NetworkAgentsTable(tables.DataTable):
agent_type = tables.Column('agent_type', verbose_name=_('Type'))
binary = tables.Column("binary", verbose_name=_('Name'))
host = tables.Column('host', verbose_name=_('Host'))
status = tables.Column(get_network_agent_status, verbose_name=_('Status'))
state = tables.Column(get_network_agent_state, verbose_name=_('State'))
heartbeat_timestamp = tables.Column('heartbeat_timestamp',
verbose_name=pgettext_lazy(
'Time since the last update',
u'Last Updated'),
filters=(utils_filters.parse_isotime,
filters.timesince))
def get_object_id(self, obj):
return "%s-%s" % (obj.binary, obj.host)
class Meta:
name = "network_agents"
verbose_name = _("Network Agents")
table_actions = (NetworkAgentsFilterAction,)
multi_select = False
|
After her center went bald the daisy was left to stand in a room owned by the sun; she knew from the feel of his rays on the dimples where seeds had been, by the riffling of his heat against her sere under-leaves like a tattered collar about her throat, by the ache in her roots for water, and by the absence of any green bodily smell—an absence she rued, and with great discontent, though she wasn’t certain why. It had been with her from near her beginning, this odor that came with her greening, with her first frail reach through a crack in the loam, a spindly pale curl unbending, reaching up until light filtered in and with the light there was bird song. The songs were air-shifted into notes by other air shuffled by leaves in high trees until they bloated, becoming plump melodies too heavy to stay airborne and sank like the knees of a nun near a narrow cot, where they whispered Grow, grow to the mere thread of her, before sinking further like rain. And the songs tilted grit in the dark earth as they went, easing the way for root hairs to flow from her source, to cling and rest, feast on worm castings, move on. It had been an adventure, a two-way fun ride, in which the opening curl of her and the smell of her greening shot ever upwards while her footing slipped by increments through a darkness she trusted without knowing why.
She’s been standing for days in this room of the sun’s and knows it belongs to him by how hot it is kept in the long corridors of July, and by the way the old songs rise out of the earth in waving undulations, silent in their evaporation. She is exhausted, tired of this bed, this room. The burred clover crowding her stalk is too green, its small yellow blooms too yellow, but she should not judge, she knows she should not, those lives unlike her own. Her face lowers. With every lowering there is no going back. Her last petals, poor darlings, have loved her too dearly, have clung to her chin like a beard, all their fired bright life wrung out by the bully who rides up the sky everyday to lash all he owns with his infernal rays. There is no going back. Daily, her neck atrophies against upward motion and she is a crippled thing, unable to lift her head or turn, unable to spot from what direction her landlord may approach or which door he may take to leave. Death is slow in arriving, a tardy guest, and the emptiness of her face looks on the floor of her home, now usurped by the massing clover. She is anxious to know the outcome of all her flown seeds. Then the roots, idle for such a long time, release what they’ve held and are released. She is light as a shaft of airborne song, she is song, she is rain, she is earth.
|
import pygame
from directions import Directions
from utils import *
class Enemy(pygame.sprite.Sprite):
def __init__(self, width, height, x, y):
pygame.sprite.Sprite.__init__(self)
if width <= BLOCK_WIDTH:
#print "WARNING WIDTH MAY CAUSE PROBLEMS"
pass
self.image = pygame.Surface((width, height))
self.image.fill((255,0,0))
self.rect = self.image.get_rect()
self.delta_y = 0
self.delta_x = 0
self.rect.x = x
self.rect.y = y
self.aggroRange = 300
# Default heading
self.heading = Directions.Right
# Sprite animation counter
self.curr_sprite_index = 0
self.frame_counter = 0
self.frames_per_sprite = 4
def checkAggro(self, c, default):
# Check aggro
dist = c.player.rect.x - self.rect.x
if abs(dist) < self.aggroRange:
# Close enough, set direction
if default:
if dist > 0:
self.dir = "R"
self.delta_x += self.speed
else:
self.dir = "L"
self.delta_x -= self.speed
return True
return False
# Basic left right mob update
def update(self, c):
self.update_sprites()
self.gravity()
# Check aggro
if not self.checkAggro(c, True):
if self.dir == "R":
self.delta_x += self.speed
else: # self.dir = "L"
self.delta_x -= self.speed
pl = c.lvl_current.platform_list
# collision detection in y
# check first so mob is positioned properly on top of platform
self.rect.y += self.delta_y
collide_list = pygame.sprite.spritecollide(self, pl, False)
for platform in collide_list:
if self.delta_y > 0:
self.rect.bottom = platform.rect.top
elif self.delta_y < 0:
self.rect.top = platform.rect.bottom
self.delta_y = 0
# Check to see if mob will fall off
# Find platform mob is standing on
p_cand = None
# If right, check right of rectangle against platforms
if self.dir == "R":
for platform in pl:
if platform.rect.left < self.rect.right \
and platform.rect.right >= self.rect.right \
and self.rect.bottom == platform.rect.top:
p_cand = platform
# min_dist = self.rect.bottom - platform.rect.top
else: # dir = "L" check left of rectangle against platforms
for platform in pl:
if platform.rect.right > self.rect.left \
and platform.rect.left <= self.rect.left \
and self.rect.bottom == platform.rect.top:
p_cand = platform
# Error: falling
if p_cand == None:
return
p_found = False
if self.dir == "R":
for platform in pl:
if platform.rect.left == p_cand.rect.right and platform.rect.top == p_cand.rect.top:
p_found = True
break
else: # dir = "L"
for platform in pl:
if platform.rect.right == p_cand.rect.left and platform.rect.top == p_cand.rect.top:
p_found = True
break
# Reverse directions if at edge
if not p_found:
if self.dir == 'R':
if self.rect.right >= p_cand.rect.right:
self.dir = 'L'
self.delta_x = 0
else:
if self.rect.left <= p_cand.rect.left:
self.dir = 'R'
self.delta_x = 0
# collision detection in x
# If collide with wall, reverse direction
self.rect.x += self.delta_x
collide_list = pygame.sprite.spritecollide(self, pl, False)
for platform in collide_list:
if self.delta_x > 0: # dir = "R"
self.rect.right = platform.rect.left
self.dir = "L"
elif self.delta_x < 0: # dir = "L"
self.rect.left = platform.rect.right
self.dir = "R"
self.delta_x = 0
def get_sprites(self):
raise NotImplementedError("Please implement this method")
def gravity(self):
if self.delta_y == 0:
self.delta_y = 1
else:
self.delta_y += 1
# check if we're on the ground
#if self.rect.y >= SCREEN_HEIGHT - self.rect.height and self.delta_y >= 0:
# self.delta_y = 0
# self.rect.y = SCREEN_HEIGHT - self.rect.height
def update_sprites(self):
if self.get_sprites():
self.frame_counter = (self.frame_counter + 1) % self.frames_per_sprite
if self.frame_counter == 0:
self.curr_sprite_index = (self.curr_sprite_index + 1) % len(self.get_sprites())
self.image = self.get_sprites()[self.curr_sprite_index]
|
Add new characters Usopp,modify a typed kill and the barrier mode is continuous fighting.Many more fighting modes to choose from: single mode, 2 players, fighting with computer and so on. Dazzling movements, splendid fighting scenes and cool characters, you should not miss this game! If you like One Piece Hot Fight 0.6, you can put it to your favorites. So it can be convenient for you to play later!
One Piece Hot Fight 0.6 has the mobile version.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.