repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
IITBinterns13/edx-platform-dev | lms/djangoapps/foldit/tests.py | 2 | 12192 | import json
import logging
from functools import partial
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.client import RequestFactory
from django.core.urlresolvers import reverse
from foldit.views import foldit_ops, verify_code
from foldit.models import PuzzleComplete, Score
from student.models import UserProfile, unique_id_for_user
from datetime import datetime, timedelta
from pytz import UTC
log = logging.getLogger(__name__)
class FolditTestCase(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.url = reverse('foldit_ops')
pwd = 'abc'
self.user = User.objects.create_user('testuser', 'test@test.com', pwd)
self.user2 = User.objects.create_user('testuser2', 'test2@test.com', pwd)
self.unique_user_id = unique_id_for_user(self.user)
self.unique_user_id2 = unique_id_for_user(self.user2)
now = datetime.now(UTC)
self.tomorrow = now + timedelta(days=1)
self.yesterday = now - timedelta(days=1)
UserProfile.objects.create(user=self.user)
UserProfile.objects.create(user=self.user2)
def make_request(self, post_data, user=None):
request = self.factory.post(self.url, post_data)
request.user = self.user if not user else user
return request
def make_puzzle_score_request(self, puzzle_ids, best_scores, user=None):
"""
Given lists of puzzle_ids and best_scores (must have same length), make a
SetPlayerPuzzleScores request and return the response.
"""
if not(type(best_scores) == list):
best_scores = [best_scores]
if not(type(puzzle_ids) == list):
puzzle_ids = [puzzle_ids]
user = self.user if not user else user
def score_dict(puzzle_id, best_score):
return {"PuzzleID": puzzle_id,
"ScoreType": "score",
"BestScore": best_score,
# current scores don't actually matter
"CurrentScore": best_score + 0.01,
"ScoreVersion": 23}
scores = [score_dict(pid, bs) for pid, bs in zip(puzzle_ids, best_scores)]
scores_str = json.dumps(scores)
verify = {"Verify": verify_code(user.email, scores_str),
"VerifyMethod": "FoldItVerify"}
data = {'SetPlayerPuzzleScoresVerify': json.dumps(verify),
'SetPlayerPuzzleScores': scores_str}
request = self.make_request(data, user)
response = foldit_ops(request)
self.assertEqual(response.status_code, 200)
return response
def test_SetPlayerPuzzleScores(self):
puzzle_id = 994391
best_score = 0.078034
response = self.make_puzzle_score_request(puzzle_id, [best_score])
self.assertEqual(response.content, json.dumps(
[{"OperationID": "SetPlayerPuzzleScores",
"Value": [{
"PuzzleID": puzzle_id,
"Status": "Success"}]}]))
# There should now be a score in the db.
top_10 = Score.get_tops_n(10, puzzle_id)
self.assertEqual(len(top_10), 1)
self.assertEqual(top_10[0]['score'], Score.display_score(best_score))
def test_SetPlayerPuzzleScores_many(self):
response = self.make_puzzle_score_request([1, 2], [0.078034, 0.080000])
self.assertEqual(response.content, json.dumps(
[{
"OperationID": "SetPlayerPuzzleScores",
"Value": [
{
"PuzzleID": 1,
"Status": "Success"
}, {
"PuzzleID": 2,
"Status": "Success"
}
]
}]
))
def test_SetPlayerPuzzleScores_multiple(self):
"""
Check that multiple posts with the same id are handled properly
(keep latest for each user, have multiple users work properly)
"""
orig_score = 0.07
puzzle_id = '1'
response = self.make_puzzle_score_request([puzzle_id], [orig_score])
# There should now be a score in the db.
top_10 = Score.get_tops_n(10, puzzle_id)
self.assertEqual(len(top_10), 1)
self.assertEqual(top_10[0]['score'], Score.display_score(orig_score))
# Reporting a better score should overwrite
better_score = 0.06
response = self.make_puzzle_score_request([1], [better_score])
top_10 = Score.get_tops_n(10, puzzle_id)
self.assertEqual(len(top_10), 1)
# Floats always get in the way, so do almostequal
self.assertAlmostEqual(
top_10[0]['score'],
Score.display_score(better_score),
delta=0.5
)
# reporting a worse score shouldn't
worse_score = 0.065
response = self.make_puzzle_score_request([1], [worse_score])
top_10 = Score.get_tops_n(10, puzzle_id)
self.assertEqual(len(top_10), 1)
# should still be the better score
self.assertAlmostEqual(
top_10[0]['score'],
Score.display_score(better_score),
delta=0.5
)
def test_SetPlayerPuzzleScores_manyplayers(self):
"""
Check that when we send scores from multiple users, the correct order
of scores is displayed. Note that, before being processed by
display_score, lower scores are better.
"""
puzzle_id = ['1']
player1_score = 0.08
player2_score = 0.02
response1 = self.make_puzzle_score_request(
puzzle_id, player1_score, self.user
)
# There should now be a score in the db.
top_10 = Score.get_tops_n(10, puzzle_id)
self.assertEqual(len(top_10), 1)
self.assertEqual(top_10[0]['score'], Score.display_score(player1_score))
response2 = self.make_puzzle_score_request(
puzzle_id, player2_score, self.user2
)
# There should now be two scores in the db
top_10 = Score.get_tops_n(10, puzzle_id)
self.assertEqual(len(top_10), 2)
# Top score should be player2_score. Second should be player1_score
self.assertAlmostEqual(
top_10[0]['score'],
Score.display_score(player2_score),
delta=0.5
)
self.assertAlmostEqual(
top_10[1]['score'],
Score.display_score(player1_score),
delta=0.5
)
# Top score user should be self.user2.username
self.assertEqual(top_10[0]['username'], self.user2.username)
def test_SetPlayerPuzzleScores_error(self):
scores = [{"PuzzleID": 994391,
"ScoreType": "score",
"BestScore": 0.078034,
"CurrentScore": 0.080035,
"ScoreVersion": 23}]
validation_str = json.dumps(scores)
verify = {"Verify": verify_code(self.user.email, validation_str),
"VerifyMethod": "FoldItVerify"}
# change the real string -- should get an error
scores[0]['ScoreVersion'] = 22
scores_str = json.dumps(scores)
data = {'SetPlayerPuzzleScoresVerify': json.dumps(verify),
'SetPlayerPuzzleScores': scores_str}
request = self.make_request(data)
response = foldit_ops(request)
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response.content,
json.dumps([{
"OperationID": "SetPlayerPuzzleScores",
"Success": "false",
"ErrorString": "Verification failed",
"ErrorCode": "VerifyFailed"}]))
def make_puzzles_complete_request(self, puzzles):
"""
Make a puzzles complete request, given an array of
puzzles. E.g.
[ {"PuzzleID": 13, "Set": 1, "SubSet": 2},
{"PuzzleID": 53524, "Set": 1, "SubSet": 1} ]
"""
puzzles_str = json.dumps(puzzles)
verify = {"Verify": verify_code(self.user.email, puzzles_str),
"VerifyMethod":"FoldItVerify"}
data = {'SetPuzzlesCompleteVerify': json.dumps(verify),
'SetPuzzlesComplete': puzzles_str}
request = self.make_request(data)
response = foldit_ops(request)
self.assertEqual(response.status_code, 200)
return response
@staticmethod
def set_puzzle_complete_response(values):
return json.dumps([{"OperationID":"SetPuzzlesComplete",
"Value": values}])
def test_SetPlayerPuzzlesComplete(self):
puzzles = [ {"PuzzleID": 13, "Set": 1, "SubSet": 2},
{"PuzzleID": 53524, "Set": 1, "SubSet": 1} ]
response = self.make_puzzles_complete_request(puzzles)
self.assertEqual(response.content,
self.set_puzzle_complete_response([13, 53524]))
def test_SetPlayerPuzzlesComplete_multiple(self):
"""Check that state is stored properly"""
puzzles = [ {"PuzzleID": 13, "Set": 1, "SubSet": 2},
{"PuzzleID": 53524, "Set": 1, "SubSet": 1} ]
response = self.make_puzzles_complete_request(puzzles)
self.assertEqual(response.content,
self.set_puzzle_complete_response([13, 53524]))
puzzles = [ {"PuzzleID": 14, "Set": 1, "SubSet": 3},
{"PuzzleID": 15, "Set": 1, "SubSet": 1} ]
response = self.make_puzzles_complete_request(puzzles)
self.assertEqual(response.content,
self.set_puzzle_complete_response([13, 14, 15, 53524]))
def test_SetPlayerPuzzlesComplete_level_complete(self):
"""Check that the level complete function works"""
puzzles = [ {"PuzzleID": 13, "Set": 1, "SubSet": 2},
{"PuzzleID": 53524, "Set": 1, "SubSet": 1} ]
response = self.make_puzzles_complete_request(puzzles)
self.assertEqual(response.content,
self.set_puzzle_complete_response([13, 53524]))
puzzles = [ {"PuzzleID": 14, "Set": 1, "SubSet": 3},
{"PuzzleID": 15, "Set": 1, "SubSet": 1} ]
response = self.make_puzzles_complete_request(puzzles)
self.assertEqual(response.content,
self.set_puzzle_complete_response([13, 14, 15, 53524]))
is_complete = partial(
PuzzleComplete.is_level_complete, self.unique_user_id)
self.assertTrue(is_complete(1, 1))
self.assertTrue(is_complete(1, 3))
self.assertTrue(is_complete(1, 2))
self.assertFalse(is_complete(4, 5))
puzzles = [ {"PuzzleID": 74, "Set": 4, "SubSet": 5} ]
response = self.make_puzzles_complete_request(puzzles)
self.assertTrue(is_complete(4, 5))
# Now check due dates
self.assertTrue(is_complete(1, 1, due=self.tomorrow))
self.assertFalse(is_complete(1, 1, due=self.yesterday))
def test_SetPlayerPuzzlesComplete_error(self):
puzzles = [ {"PuzzleID": 13, "Set": 1, "SubSet": 2},
{"PuzzleID": 53524, "Set": 1, "SubSet": 1} ]
puzzles_str = json.dumps(puzzles)
verify = {"Verify": verify_code(self.user.email, puzzles_str + "x"),
"VerifyMethod":"FoldItVerify"}
data = {'SetPuzzlesCompleteVerify': json.dumps(verify),
'SetPuzzlesComplete': puzzles_str}
request = self.make_request(data)
response = foldit_ops(request)
self.assertEqual(response.status_code, 200)
response_data = json.loads(response.content)
self.assertEqual(response.content,
json.dumps([{
"OperationID": "SetPuzzlesComplete",
"Success": "false",
"ErrorString": "Verification failed",
"ErrorCode": "VerifyFailed"}]))
| agpl-3.0 |
whummer/moto | moto/redshift/responses.py | 1 | 24847 | from __future__ import unicode_literals
import json
import xmltodict
from jinja2 import Template
from six import iteritems
from moto.core.responses import BaseResponse
from .models import redshift_backends
def convert_json_error_to_xml(json_error):
error = json.loads(json_error)
code = error['Error']['Code']
message = error['Error']['Message']
template = Template("""
<RedshiftClientError>
<Error>
<Code>{{ code }}</Code>
<Message>{{ message }}</Message>
<Type>Sender</Type>
</Error>
<RequestId>6876f774-7273-11e4-85dc-39e55ca848d1</RequestId>
</RedshiftClientError>""")
return template.render(code=code, message=message)
def itemize(data):
"""
The xmltodict.unparse requires we modify the shape of the input dictionary slightly. Instead of a dict of the form:
{'key': ['value1', 'value2']}
We must provide:
{'key': {'item': ['value1', 'value2']}}
"""
if isinstance(data, dict):
ret = {}
for key in data:
ret[key] = itemize(data[key])
return ret
elif isinstance(data, list):
return {'item': [itemize(value) for value in data]}
else:
return data
class RedshiftResponse(BaseResponse):
@property
def redshift_backend(self):
return redshift_backends[self.region]
def get_response(self, response):
if self.request_json:
return json.dumps(response)
else:
xml = xmltodict.unparse(itemize(response), full_document=False)
if hasattr(xml, 'decode'):
xml = xml.decode('utf-8')
return xml
def call_action(self):
status, headers, body = super(RedshiftResponse, self).call_action()
if status >= 400 and not self.request_json:
body = convert_json_error_to_xml(body)
return status, headers, body
def unpack_complex_list_params(self, label, names):
unpacked_list = list()
count = 1
while self._get_param('{0}.{1}.{2}'.format(label, count, names[0])):
param = dict()
for i in range(len(names)):
param[names[i]] = self._get_param(
'{0}.{1}.{2}'.format(label, count, names[i]))
unpacked_list.append(param)
count += 1
return unpacked_list
def unpack_list_params(self, label):
unpacked_list = list()
count = 1
while self._get_param('{0}.{1}'.format(label, count)):
unpacked_list.append(self._get_param(
'{0}.{1}'.format(label, count)))
count += 1
return unpacked_list
def _get_cluster_security_groups(self):
cluster_security_groups = self._get_multi_param('ClusterSecurityGroups.member')
if not cluster_security_groups:
cluster_security_groups = self._get_multi_param('ClusterSecurityGroups.ClusterSecurityGroupName')
return cluster_security_groups
def _get_vpc_security_group_ids(self):
vpc_security_group_ids = self._get_multi_param('VpcSecurityGroupIds.member')
if not vpc_security_group_ids:
vpc_security_group_ids = self._get_multi_param('VpcSecurityGroupIds.VpcSecurityGroupId')
return vpc_security_group_ids
def _get_iam_roles(self):
iam_roles = self._get_multi_param('IamRoles.member')
if not iam_roles:
iam_roles = self._get_multi_param('IamRoles.IamRoleArn')
return iam_roles
def _get_subnet_ids(self):
subnet_ids = self._get_multi_param('SubnetIds.member')
if not subnet_ids:
subnet_ids = self._get_multi_param('SubnetIds.SubnetIdentifier')
return subnet_ids
def create_cluster(self):
cluster_kwargs = {
"cluster_identifier": self._get_param('ClusterIdentifier'),
"node_type": self._get_param('NodeType'),
"master_username": self._get_param('MasterUsername'),
"master_user_password": self._get_param('MasterUserPassword'),
"db_name": self._get_param('DBName'),
"cluster_type": self._get_param('ClusterType'),
"cluster_security_groups": self._get_cluster_security_groups(),
"vpc_security_group_ids": self._get_vpc_security_group_ids(),
"cluster_subnet_group_name": self._get_param('ClusterSubnetGroupName'),
"availability_zone": self._get_param('AvailabilityZone'),
"preferred_maintenance_window": self._get_param('PreferredMaintenanceWindow'),
"cluster_parameter_group_name": self._get_param('ClusterParameterGroupName'),
"automated_snapshot_retention_period": self._get_int_param('AutomatedSnapshotRetentionPeriod'),
"port": self._get_int_param('Port'),
"cluster_version": self._get_param('ClusterVersion'),
"allow_version_upgrade": self._get_bool_param('AllowVersionUpgrade'),
"number_of_nodes": self._get_int_param('NumberOfNodes'),
"publicly_accessible": self._get_param("PubliclyAccessible"),
"encrypted": self._get_param("Encrypted"),
"region_name": self.region,
"tags": self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')),
"iam_roles_arn": self._get_iam_roles(),
}
cluster = self.redshift_backend.create_cluster(**cluster_kwargs).to_json()
cluster['ClusterStatus'] = 'creating'
return self.get_response({
"CreateClusterResponse": {
"CreateClusterResult": {
"Cluster": cluster,
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def restore_from_cluster_snapshot(self):
restore_kwargs = {
"snapshot_identifier": self._get_param('SnapshotIdentifier'),
"cluster_identifier": self._get_param('ClusterIdentifier'),
"port": self._get_int_param('Port'),
"availability_zone": self._get_param('AvailabilityZone'),
"allow_version_upgrade": self._get_bool_param(
'AllowVersionUpgrade'),
"cluster_subnet_group_name": self._get_param(
'ClusterSubnetGroupName'),
"publicly_accessible": self._get_param("PubliclyAccessible"),
"cluster_parameter_group_name": self._get_param(
'ClusterParameterGroupName'),
"cluster_security_groups": self._get_cluster_security_groups(),
"vpc_security_group_ids": self._get_vpc_security_group_ids(),
"preferred_maintenance_window": self._get_param(
'PreferredMaintenanceWindow'),
"automated_snapshot_retention_period": self._get_int_param(
'AutomatedSnapshotRetentionPeriod'),
"region_name": self.region,
"iam_roles_arn": self._get_iam_roles(),
}
cluster = self.redshift_backend.restore_from_cluster_snapshot(**restore_kwargs).to_json()
cluster['ClusterStatus'] = 'creating'
return self.get_response({
"RestoreFromClusterSnapshotResponse": {
"RestoreFromClusterSnapshotResult": {
"Cluster": cluster,
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def describe_clusters(self):
cluster_identifier = self._get_param("ClusterIdentifier")
clusters = self.redshift_backend.describe_clusters(cluster_identifier)
return self.get_response({
"DescribeClustersResponse": {
"DescribeClustersResult": {
"Clusters": [cluster.to_json() for cluster in clusters]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def modify_cluster(self):
request_kwargs = {
"cluster_identifier": self._get_param('ClusterIdentifier'),
"new_cluster_identifier": self._get_param('NewClusterIdentifier'),
"node_type": self._get_param('NodeType'),
"master_user_password": self._get_param('MasterUserPassword'),
"cluster_type": self._get_param('ClusterType'),
"cluster_security_groups": self._get_cluster_security_groups(),
"vpc_security_group_ids": self._get_vpc_security_group_ids(),
"cluster_subnet_group_name": self._get_param('ClusterSubnetGroupName'),
"preferred_maintenance_window": self._get_param('PreferredMaintenanceWindow'),
"cluster_parameter_group_name": self._get_param('ClusterParameterGroupName'),
"automated_snapshot_retention_period": self._get_int_param('AutomatedSnapshotRetentionPeriod'),
"cluster_version": self._get_param('ClusterVersion'),
"allow_version_upgrade": self._get_bool_param('AllowVersionUpgrade'),
"number_of_nodes": self._get_int_param('NumberOfNodes'),
"publicly_accessible": self._get_param("PubliclyAccessible"),
"encrypted": self._get_param("Encrypted"),
"iam_roles_arn": self._get_iam_roles(),
}
cluster_kwargs = {}
# We only want parameters that were actually passed in, otherwise
# we'll stomp all over our cluster metadata with None values.
for (key, value) in iteritems(request_kwargs):
if value is not None and value != []:
cluster_kwargs[key] = value
cluster = self.redshift_backend.modify_cluster(**cluster_kwargs)
return self.get_response({
"ModifyClusterResponse": {
"ModifyClusterResult": {
"Cluster": cluster.to_json(),
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def delete_cluster(self):
request_kwargs = {
"cluster_identifier": self._get_param("ClusterIdentifier"),
"final_cluster_snapshot_identifier": self._get_param("FinalClusterSnapshotIdentifier"),
"skip_final_snapshot": self._get_bool_param("SkipFinalClusterSnapshot")
}
cluster = self.redshift_backend.delete_cluster(**request_kwargs)
return self.get_response({
"DeleteClusterResponse": {
"DeleteClusterResult": {
"Cluster": cluster.to_json()
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def create_cluster_subnet_group(self):
cluster_subnet_group_name = self._get_param('ClusterSubnetGroupName')
description = self._get_param('Description')
subnet_ids = self._get_subnet_ids()
tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value'))
subnet_group = self.redshift_backend.create_cluster_subnet_group(
cluster_subnet_group_name=cluster_subnet_group_name,
description=description,
subnet_ids=subnet_ids,
region_name=self.region,
tags=tags
)
return self.get_response({
"CreateClusterSubnetGroupResponse": {
"CreateClusterSubnetGroupResult": {
"ClusterSubnetGroup": subnet_group.to_json(),
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def describe_cluster_subnet_groups(self):
subnet_identifier = self._get_param("ClusterSubnetGroupName")
subnet_groups = self.redshift_backend.describe_cluster_subnet_groups(
subnet_identifier)
return self.get_response({
"DescribeClusterSubnetGroupsResponse": {
"DescribeClusterSubnetGroupsResult": {
"ClusterSubnetGroups": [subnet_group.to_json() for subnet_group in subnet_groups]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def delete_cluster_subnet_group(self):
subnet_identifier = self._get_param("ClusterSubnetGroupName")
self.redshift_backend.delete_cluster_subnet_group(subnet_identifier)
return self.get_response({
"DeleteClusterSubnetGroupResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def create_cluster_security_group(self):
cluster_security_group_name = self._get_param(
'ClusterSecurityGroupName')
description = self._get_param('Description')
tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value'))
security_group = self.redshift_backend.create_cluster_security_group(
cluster_security_group_name=cluster_security_group_name,
description=description,
region_name=self.region,
tags=tags
)
return self.get_response({
"CreateClusterSecurityGroupResponse": {
"CreateClusterSecurityGroupResult": {
"ClusterSecurityGroup": security_group.to_json(),
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def describe_cluster_security_groups(self):
cluster_security_group_name = self._get_param(
"ClusterSecurityGroupName")
security_groups = self.redshift_backend.describe_cluster_security_groups(
cluster_security_group_name)
return self.get_response({
"DescribeClusterSecurityGroupsResponse": {
"DescribeClusterSecurityGroupsResult": {
"ClusterSecurityGroups": [security_group.to_json() for security_group in security_groups]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def delete_cluster_security_group(self):
security_group_identifier = self._get_param("ClusterSecurityGroupName")
self.redshift_backend.delete_cluster_security_group(
security_group_identifier)
return self.get_response({
"DeleteClusterSecurityGroupResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def create_cluster_parameter_group(self):
cluster_parameter_group_name = self._get_param('ParameterGroupName')
group_family = self._get_param('ParameterGroupFamily')
description = self._get_param('Description')
tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value'))
parameter_group = self.redshift_backend.create_cluster_parameter_group(
cluster_parameter_group_name,
group_family,
description,
self.region,
tags
)
return self.get_response({
"CreateClusterParameterGroupResponse": {
"CreateClusterParameterGroupResult": {
"ClusterParameterGroup": parameter_group.to_json(),
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def describe_cluster_parameter_groups(self):
cluster_parameter_group_name = self._get_param("ParameterGroupName")
parameter_groups = self.redshift_backend.describe_cluster_parameter_groups(
cluster_parameter_group_name)
return self.get_response({
"DescribeClusterParameterGroupsResponse": {
"DescribeClusterParameterGroupsResult": {
"ParameterGroups": [parameter_group.to_json() for parameter_group in parameter_groups]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def delete_cluster_parameter_group(self):
cluster_parameter_group_name = self._get_param("ParameterGroupName")
self.redshift_backend.delete_cluster_parameter_group(
cluster_parameter_group_name)
return self.get_response({
"DeleteClusterParameterGroupResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def create_cluster_snapshot(self):
cluster_identifier = self._get_param('ClusterIdentifier')
snapshot_identifier = self._get_param('SnapshotIdentifier')
tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value'))
snapshot = self.redshift_backend.create_cluster_snapshot(cluster_identifier,
snapshot_identifier,
self.region,
tags)
return self.get_response({
'CreateClusterSnapshotResponse': {
"CreateClusterSnapshotResult": {
"Snapshot": snapshot.to_json(),
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def describe_cluster_snapshots(self):
cluster_identifier = self._get_param('ClusterIdentifier')
snapshot_identifier = self._get_param('SnapshotIdentifier')
snapshots = self.redshift_backend.describe_cluster_snapshots(cluster_identifier,
snapshot_identifier)
return self.get_response({
"DescribeClusterSnapshotsResponse": {
"DescribeClusterSnapshotsResult": {
"Snapshots": [snapshot.to_json() for snapshot in snapshots]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def delete_cluster_snapshot(self):
snapshot_identifier = self._get_param('SnapshotIdentifier')
snapshot = self.redshift_backend.delete_cluster_snapshot(snapshot_identifier)
return self.get_response({
"DeleteClusterSnapshotResponse": {
"DeleteClusterSnapshotResult": {
"Snapshot": snapshot.to_json()
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def create_snapshot_copy_grant(self):
copy_grant_kwargs = {
'snapshot_copy_grant_name': self._get_param('SnapshotCopyGrantName'),
'kms_key_id': self._get_param('KmsKeyId'),
'region_name': self._get_param('Region'),
}
copy_grant = self.redshift_backend.create_snapshot_copy_grant(**copy_grant_kwargs)
return self.get_response({
"CreateSnapshotCopyGrantResponse": {
"CreateSnapshotCopyGrantResult": {
"SnapshotCopyGrant": copy_grant.to_json()
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def delete_snapshot_copy_grant(self):
copy_grant_kwargs = {
'snapshot_copy_grant_name': self._get_param('SnapshotCopyGrantName'),
}
self.redshift_backend.delete_snapshot_copy_grant(**copy_grant_kwargs)
return self.get_response({
"DeleteSnapshotCopyGrantResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def describe_snapshot_copy_grants(self):
copy_grant_kwargs = {
'snapshot_copy_grant_name': self._get_param('SnapshotCopyGrantName'),
}
copy_grants = self.redshift_backend.describe_snapshot_copy_grants(**copy_grant_kwargs)
return self.get_response({
"DescribeSnapshotCopyGrantsResponse": {
"DescribeSnapshotCopyGrantsResult": {
"SnapshotCopyGrants": [copy_grant.to_json() for copy_grant in copy_grants]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def create_tags(self):
resource_name = self._get_param('ResourceName')
tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value'))
self.redshift_backend.create_tags(resource_name, tags)
return self.get_response({
"CreateTagsResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def describe_tags(self):
resource_name = self._get_param('ResourceName')
resource_type = self._get_param('ResourceType')
tagged_resources = self.redshift_backend.describe_tags(resource_name,
resource_type)
return self.get_response({
"DescribeTagsResponse": {
"DescribeTagsResult": {
"TaggedResources": tagged_resources
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def delete_tags(self):
resource_name = self._get_param('ResourceName')
tag_keys = self.unpack_list_params('TagKeys.TagKey')
self.redshift_backend.delete_tags(resource_name, tag_keys)
return self.get_response({
"DeleteTagsResponse": {
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def enable_snapshot_copy(self):
snapshot_copy_kwargs = {
'cluster_identifier': self._get_param('ClusterIdentifier'),
'destination_region': self._get_param('DestinationRegion'),
'retention_period': self._get_param('RetentionPeriod', 7),
'snapshot_copy_grant_name': self._get_param('SnapshotCopyGrantName'),
}
cluster = self.redshift_backend.enable_snapshot_copy(**snapshot_copy_kwargs)
return self.get_response({
"EnableSnapshotCopyResponse": {
"EnableSnapshotCopyResult": {
"Cluster": cluster.to_json()
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def disable_snapshot_copy(self):
snapshot_copy_kwargs = {
'cluster_identifier': self._get_param('ClusterIdentifier'),
}
cluster = self.redshift_backend.disable_snapshot_copy(**snapshot_copy_kwargs)
return self.get_response({
"DisableSnapshotCopyResponse": {
"DisableSnapshotCopyResult": {
"Cluster": cluster.to_json()
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
def modify_snapshot_copy_retention_period(self):
snapshot_copy_kwargs = {
'cluster_identifier': self._get_param('ClusterIdentifier'),
'retention_period': self._get_param('RetentionPeriod'),
}
cluster = self.redshift_backend.modify_snapshot_copy_retention_period(**snapshot_copy_kwargs)
return self.get_response({
"ModifySnapshotCopyRetentionPeriodResponse": {
"ModifySnapshotCopyRetentionPeriodResult": {
"Clusters": [cluster.to_json()]
},
"ResponseMetadata": {
"RequestId": "384ac68d-3775-11df-8963-01868b7c937a",
}
}
})
| apache-2.0 |
HonzaKral/mongo-connector | tests/test_mongo_doc_manager.py | 34 | 8774 | # Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests each of the functions in mongo_doc_manager
"""
import time
import sys
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
sys.path[0:0] = [""]
from mongo_connector.doc_managers.mongo_doc_manager import DocManager
from pymongo import MongoClient
from tests import mongo_host
from tests.setup_cluster import start_mongo_proc, kill_mongo_proc
class MongoDocManagerTester(unittest.TestCase):
"""Test class for MongoDocManager
"""
@classmethod
def setUpClass(cls):
cls.standalone_port = start_mongo_proc(options=['--nojournal',
'--noprealloc'])
cls.standalone_pair = '%s:%d' % (mongo_host, cls.standalone_port)
cls.MongoDoc = DocManager(cls.standalone_pair)
cls.mongo_conn = MongoClient(cls.standalone_pair)
cls.mongo = cls.mongo_conn['test']['test']
cls.namespaces_inc = ["test.test_include1", "test.test_include2"]
cls.namespaces_exc = ["test.test_exclude1", "test.test_exclude2"]
cls.choosy_docman = DocManager(
cls.standalone_pair,
namespace_set=MongoDocManagerTester.namespaces_inc
)
@classmethod
def tearDownClass(cls):
kill_mongo_proc(cls.standalone_port)
def setUp(self):
"""Empty Mongo at the start of every test
"""
self.mongo_conn.drop_database("__mongo_connector")
self.mongo.remove()
conn = MongoClient('%s:%d' % (mongo_host, self.standalone_port))
for ns in self.namespaces_inc + self.namespaces_exc:
db, coll = ns.split('.', 1)
conn[db][coll].remove()
def test_namespaces(self):
"""Ensure that a DocManager instantiated with a namespace set
has the correct namespaces
"""
self.assertEqual(set(self.namespaces_inc),
set(self.choosy_docman._namespaces()))
def test_update(self):
doc = {"_id": '1', "ns": "test.test", "_ts": 1,
"a": 1, "b": 2}
self.mongo.insert(doc)
# $set only
update_spec = {"$set": {"a": 1, "b": 2}}
doc = self.choosy_docman.update(doc, update_spec)
self.assertEqual(doc, {"_id": '1', "ns": "test.test", "_ts": 1,
"a": 1, "b": 2})
# $unset only
update_spec = {"$unset": {"a": True}}
doc = self.choosy_docman.update(doc, update_spec)
self.assertEqual(doc, {"_id": '1', "ns": "test.test", "_ts": 1,
"b": 2})
# mixed $set/$unset
update_spec = {"$unset": {"b": True}, "$set": {"c": 3}}
doc = self.choosy_docman.update(doc, update_spec)
self.assertEqual(doc, {"_id": '1', "ns": "test.test", "_ts": 1,
"c": 3})
def test_upsert(self):
"""Ensure we can properly insert into Mongo via DocManager.
"""
docc = {'_id': '1', 'name': 'John', 'ns': 'test.test',
'_ts': 5767301236327972865}
self.MongoDoc.upsert(docc)
time.sleep(3)
res = self.mongo.find()
self.assertTrue(res.count() == 1)
for doc in res:
self.assertTrue(doc['_id'] == '1' and doc['name'] == 'John')
docc = {'_id': '1', 'name': 'Paul', 'ns': 'test.test',
'_ts': 5767301236327972865}
self.MongoDoc.upsert(docc)
time.sleep(1)
res = self.mongo.find()
self.assertTrue(res.count() == 1)
for doc in res:
self.assertTrue(doc['_id'] == '1' and doc['name'] == 'Paul')
def test_remove(self):
"""Ensure we can properly delete from Mongo via DocManager.
"""
docc = {'_id': '1', 'name': 'John', 'ns': 'test.test',
'_ts': 5767301236327972865}
self.MongoDoc.upsert(docc)
time.sleep(3)
res = self.mongo.find()
self.assertTrue(res.count() == 1)
if "ns" not in docc:
docc["ns"] = 'test.test'
self.MongoDoc.remove(docc)
time.sleep(1)
res = self.mongo.find()
self.assertTrue(res.count() == 0)
def test_full_search(self):
"""Query Mongo for all docs via API and via DocManager's
_search(), compare.
"""
docc = {'_id': '1', 'name': 'John', 'ns': 'test.test',
'_ts': 5767301236327972865}
self.MongoDoc.upsert(docc)
docc = {'_id': '2', 'name': 'Paul', 'ns': 'test.test',
'_ts': 5767301236327972865}
self.MongoDoc.upsert(docc)
self.MongoDoc.commit()
search = list(self.MongoDoc._search())
search2 = list(self.mongo.find())
self.assertTrue(len(search) == len(search2))
self.assertTrue(len(search) != 0)
self.assertTrue(all(x in search for x in search2) and
all(y in search2 for y in search))
def test_search(self):
"""Query Mongo for docs in a timestamp range.
We use API and DocManager's search(start_ts,end_ts), and then compare.
"""
docc = {'_id': '1', 'name': 'John', '_ts': 5767301236327972865,
'ns': 'test.test'}
self.MongoDoc.upsert(docc)
docc2 = {'_id': '2', 'name': 'John Paul', '_ts': 5767301236327972866,
'ns': 'test.test'}
self.MongoDoc.upsert(docc2)
docc3 = {'_id': '3', 'name': 'Paul', '_ts': 5767301236327972870,
'ns': 'test.test'}
self.MongoDoc.upsert(docc3)
search = list(self.MongoDoc.search(5767301236327972865,
5767301236327972866))
self.assertTrue(len(search) == 2)
result_id = [result.get("_id") for result in search]
self.assertIn('1', result_id)
self.assertIn('2', result_id)
def test_search_namespaces(self):
"""Test search within timestamp range with a given namespace set
"""
for ns in self.namespaces_inc + self.namespaces_exc:
for i in range(100):
self.choosy_docman.upsert({"_id": i, "ns": ns, "_ts": i})
results = list(self.choosy_docman.search(0, 49))
self.assertEqual(len(results), 100)
for r in results:
self.assertIn(r["ns"], self.namespaces_inc)
def test_get_last_doc(self):
"""Insert documents, verify that get_last_doc() returns the one with
the latest timestamp.
"""
docc = {'_id': '4', 'name': 'Hare', '_ts': 3, 'ns': 'test.test'}
self.MongoDoc.upsert(docc)
docc = {'_id': '5', 'name': 'Tortoise', '_ts': 2, 'ns': 'test.test'}
self.MongoDoc.upsert(docc)
docc = {'_id': '6', 'name': 'Mr T.', '_ts': 1, 'ns': 'test.test'}
self.MongoDoc.upsert(docc)
time.sleep(1)
doc = self.MongoDoc.get_last_doc()
self.assertTrue(doc['_id'] == '4')
docc = {'_id': '6', 'name': 'HareTwin', '_ts': 4, 'ns': 'test.test'}
self.MongoDoc.upsert(docc)
time.sleep(3)
doc = self.MongoDoc.get_last_doc()
self.assertTrue(doc['_id'] == '6')
def test_get_last_doc_namespaces(self):
"""Ensure that get_last_doc returns the latest document in one of
the given namespaces
"""
# latest document is not in included namespace
for i in range(100):
ns = (self.namespaces_inc, self.namespaces_exc)[i % 2][0]
self.choosy_docman.upsert({
"_id": i,
"ns": ns,
"_ts": i
})
last_doc = self.choosy_docman.get_last_doc()
self.assertEqual(last_doc["ns"], self.namespaces_inc[0])
self.assertEqual(last_doc["_id"], 98)
# remove latest document so last doc is in included namespace,
# shouldn't change result
db, coll = self.namespaces_inc[0].split(".", 1)
MongoClient(self.standalone_pair)[db][coll].remove({"_id": 99})
last_doc = self.choosy_docman.get_last_doc()
self.assertEqual(last_doc["ns"], self.namespaces_inc[0])
self.assertEqual(last_doc["_id"], 98)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
tanium/pytan | lib/libs_external/any/urllib3/connectionpool.py | 121 | 34940 | from __future__ import absolute_import
import errno
import logging
import sys
import warnings
from socket import error as SocketError, timeout as SocketTimeout
import socket
from .exceptions import (
ClosedPoolError,
ProtocolError,
EmptyPoolError,
HeaderParsingError,
HostChangedError,
LocationValueError,
MaxRetryError,
ProxyError,
ReadTimeoutError,
SSLError,
TimeoutError,
InsecureRequestWarning,
NewConnectionError,
)
from .packages.ssl_match_hostname import CertificateError
from .packages import six
from .packages.six.moves import queue
from .connection import (
port_by_scheme,
DummyConnection,
HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
HTTPException, BaseSSLError,
)
from .request import RequestMethods
from .response import HTTPResponse
from .util.connection import is_connection_dropped
from .util.request import set_file_position
from .util.response import assert_header_parsing
from .util.retry import Retry
from .util.timeout import Timeout
from .util.url import get_host, Url
if six.PY2:
# Queue is imported for side effects on MS Windows
import Queue as _unused_module_Queue # noqa: F401
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = object()
# Pool objects
class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
"""
scheme = None
QueueCls = queue.LifoQueue
def __init__(self, host, port=None):
if not host:
raise LocationValueError("No host specified.")
self.host = _ipv6_host(host).lower()
self.port = port
def __str__(self):
return '%s(host=%r, port=%r)' % (type(self).__name__,
self.host, self.port)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
# Return False to re-raise any potential exceptions
return False
def close(self):
"""
Close all pooled connections and disable the pool.
"""
pass
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`httplib.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`httplib.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
.. note::
Only works in Python 2. This parameter is ignored in Python 3.
:param timeout:
Socket timeout in seconds for each individual connection. This can
be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to False, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param retries:
Retry configuration to use by default with requests in this pool.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.connectionpool.ProxyManager`"
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.connectionpool.ProxyManager`"
:param \\**conn_kw:
Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
:class:`urllib3.connection.HTTPSConnection` instances.
"""
scheme = 'http'
ConnectionCls = HTTPConnection
ResponseCls = HTTPResponse
def __init__(self, host, port=None, strict=False,
timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
headers=None, retries=None,
_proxy=None, _proxy_headers=None,
**conn_kw):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
if retries is None:
retries = Retry.DEFAULT
self.timeout = timeout
self.retries = retries
self.pool = self.QueueCls(maxsize)
self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
self.conn_kw = conn_kw
if self.proxy:
# Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
# We cannot know if the user has added default socket options, so we cannot replace the
# list.
self.conn_kw.setdefault('socket_options', [])
def _new_conn(self):
"""
Return a fresh :class:`HTTPConnection`.
"""
self.num_connections += 1
log.debug("Starting new HTTP connection (%d): %s",
self.num_connections, self.host)
conn = self.ConnectionCls(host=self.host, port=self.port,
timeout=self.timeout.connect_timeout,
strict=self.strict, **self.conn_kw)
return conn
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except queue.Empty:
if self.block:
raise EmptyPoolError(self,
"Pool reached maximum size and no more "
"connections are allowed.")
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.debug("Resetting dropped connection: %s", self.host)
conn.close()
if getattr(conn, 'auto_open', 1) == 0:
# This is a proxied connection that has been mutated by
# httplib._tunnel() and cannot be reused (since it would
# attempt to bypass the proxy)
conn = None
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except queue.Full:
# This should never happen if self.block == True
log.warning(
"Connection pool is full, discarding connection: %s",
self.host)
# Connection never got put back into the pool, close it.
if conn:
conn.close()
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
pass
def _prepare_proxy(self, conn):
# Nothing to do for HTTP connections.
pass
def _get_timeout(self, timeout):
""" Helper that always returns a :class:`urllib3.util.Timeout` """
if timeout is _Default:
return self.timeout.clone()
if isinstance(timeout, Timeout):
return timeout.clone()
else:
# User passed us an int/float. This is for backwards compatibility,
# can be removed later
return Timeout.from_float(timeout)
def _raise_timeout(self, err, url, timeout_value):
"""Is the error actually a timeout? Will raise a ReadTimeout or pass"""
if isinstance(err, SocketTimeout):
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
# See the above comment about EAGAIN in Python 3. In Python 2 we have
# to specifically catch it and throw the timeout error
if hasattr(err, 'errno') and err.errno in _blocking_errnos:
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
# Catch possible read timeouts thrown as SSL errors. If not the
# case, rethrow the original. We need to do this because of:
# http://bugs.python.org/issue10272
if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
def _make_request(self, conn, method, url, timeout=_Default, chunked=False,
**httplib_request_kw):
"""
Perform a request on a given urllib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param timeout:
Socket timeout in seconds for the request. This can be a
float or integer, which will set the same timeout value for
the socket connect and the socket read, or an instance of
:class:`urllib3.util.Timeout`, which gives you more fine-grained
control over your timeouts.
"""
self.num_requests += 1
timeout_obj = self._get_timeout(timeout)
timeout_obj.start_connect()
conn.timeout = timeout_obj.connect_timeout
# Trigger any extra validation we need to do.
try:
self._validate_conn(conn)
except (SocketTimeout, BaseSSLError) as e:
# Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
raise
# conn.request() calls httplib.*.request, not the method in
# urllib3.request. It also calls makefile (recv) on the socket.
if chunked:
conn.request_chunked(method, url, **httplib_request_kw)
else:
conn.request(method, url, **httplib_request_kw)
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
# App Engine doesn't have a sock attr
if getattr(conn, 'sock', None):
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % read_timeout)
if read_timeout is Timeout.DEFAULT_TIMEOUT:
conn.sock.settimeout(socket.getdefaulttimeout())
else: # None or a value
conn.sock.settimeout(read_timeout)
# Receive the response from the server
try:
try: # Python 2.7, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError: # Python 2.6 and older, Python 3
try:
httplib_response = conn.getresponse()
except Exception as e:
# Remove the TypeError from the exception chain in Python 3;
# otherwise it looks like a programming error was the cause.
six.raise_from(e, None)
except (SocketTimeout, BaseSSLError, SocketError) as e:
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
raise
# AppEngine doesn't have a version attr.
http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
log.debug("%s://%s:%s \"%s %s %s\" %s %s", self.scheme, self.host, self.port,
method, url, http_version, httplib_response.status,
httplib_response.length)
try:
assert_header_parsing(httplib_response.msg)
except HeaderParsingError as hpe: # Platform-specific: Python 3
log.warning(
'Failed to parse headers (url=%s): %s',
self._absolute_url(url), hpe, exc_info=True)
return httplib_response
def _absolute_url(self, path):
return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
def close(self):
"""
Close all pooled connections and disable the pool.
"""
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except queue.Empty:
pass # Done.
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith('/'):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
host = _ipv6_host(host).lower()
# Use explicit default port for comparison when none is given
if self.port and not port:
port = port_by_scheme.get(scheme)
elif not self.port and port == port_by_scheme.get(scheme):
port = None
return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(self, method, url, body=None, headers=None, retries=None,
redirect=True, assert_same_host=True, timeout=_Default,
pool_timeout=None, release_conn=None, chunked=False,
body_pos=None, **response_kw):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry. Disabling retries
will disable redirect, too.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param chunked:
If True, urllib3 will send the body using chunked transfer
encoding. Otherwise, urllib3 will send the body using the standard
content-length form. Defaults to False.
:param int body_pos:
Position to seek to in file-like body in the event of a retry or
redirect. Typically this won't need to be set because urllib3 will
auto-populate the value when needed.
:param \\**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
if release_conn is None:
release_conn = response_kw.get('preload_content', True)
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries)
conn = None
# Track whether `conn` needs to be released before
# returning/raising/recursing. Update this variable if necessary, and
# leave `release_conn` constant throughout the function. That way, if
# the function recurses, the original value of `release_conn` will be
# passed down into the recursive call, and its value will be respected.
#
# See issue #651 [1] for details.
#
# [1] <https://github.com/shazow/urllib3/issues/651>
release_this_conn = release_conn
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
if self.scheme == 'http':
headers = headers.copy()
headers.update(self.proxy_headers)
# Must keep the exception bound to a separate variable or else Python 3
# complains about UnboundLocalError.
err = None
# Keep track of whether we cleanly exited the except block. This
# ensures we do proper cleanup in finally.
clean_exit = False
# Rewind body position, if needed. Record current position
# for future rewinds in the event of a redirect/retry.
body_pos = set_file_position(body, body_pos)
try:
# Request a connection from the queue.
timeout_obj = self._get_timeout(timeout)
conn = self._get_conn(timeout=pool_timeout)
conn.timeout = timeout_obj.connect_timeout
is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
if is_new_proxy_conn:
self._prepare_proxy(conn)
# Make the request on the httplib connection object.
httplib_response = self._make_request(conn, method, url,
timeout=timeout_obj,
body=body, headers=headers,
chunked=chunked)
# If we're going to release the connection in ``finally:``, then
# the response doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = conn if not release_conn else None
# Pass method to Response for length checking
response_kw['request_method'] = method
# Import httplib's response into our own wrapper object
response = self.ResponseCls.from_httplib(httplib_response,
pool=self,
connection=response_conn,
retries=retries,
**response_kw)
# Everything went great!
clean_exit = True
except queue.Empty:
# Timed out by queue.
raise EmptyPoolError(self, "No pool connections are available.")
except (BaseSSLError, CertificateError) as e:
# Close the connection. If a connection is reused on which there
# was a Certificate error, the next request will certainly raise
# another Certificate error.
clean_exit = False
raise SSLError(e)
except SSLError:
# Treat SSLError separately from BaseSSLError to preserve
# traceback.
clean_exit = False
raise
except (TimeoutError, HTTPException, SocketError, ProtocolError) as e:
# Discard the connection for these exceptions. It will be
# be replaced during the next _get_conn() call.
clean_exit = False
if isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
e = ProxyError('Cannot connect to proxy.', e)
elif isinstance(e, (SocketError, HTTPException)):
e = ProtocolError('Connection aborted.', e)
retries = retries.increment(method, url, error=e, _pool=self,
_stacktrace=sys.exc_info()[2])
retries.sleep()
# Keep track of the error for the retry warning.
err = e
finally:
if not clean_exit:
# We hit some kind of exception, handled or otherwise. We need
# to throw the connection away unless explicitly told not to.
# Close the connection, set the variable to None, and make sure
# we put the None back in the pool to avoid leaking it.
conn = conn and conn.close()
release_this_conn = True
if release_this_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warning("Retrying (%r) after connection "
"broken by '%r': %s", retries, err, url)
return self.urlopen(method, url, body, headers, retries,
redirect, assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, body_pos=body_pos,
**response_kw)
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
if response.status == 303:
method = 'GET'
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
# Release the connection for this response, since we're not
# returning it to be released manually.
response.release_conn()
raise
return response
retries.sleep_for_retry(response)
log.debug("Redirecting %s -> %s", url, redirect_location)
return self.urlopen(
method, redirect_location, body, headers,
retries=retries, redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn, body_pos=body_pos,
**response_kw)
# Check if we should retry the HTTP response.
has_retry_after = bool(response.getheader('Retry-After'))
if retries.is_retry(method, response.status, has_retry_after):
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_status:
# Release the connection for this response, since we're not
# returning it to be released manually.
response.release_conn()
raise
return response
retries.sleep(response)
log.debug("Retry: %s", url)
return self.urlopen(
method, url, body, headers,
retries=retries, redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout, pool_timeout=pool_timeout,
release_conn=release_conn,
body_pos=body_pos, **response_kw)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:`.HTTPSConnection`.
:class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is
available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
the connection socket into an SSL socket.
"""
scheme = 'https'
ConnectionCls = HTTPSConnection
def __init__(self, host, port=None,
strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1,
block=False, headers=None, retries=None,
_proxy=None, _proxy_headers=None,
key_file=None, cert_file=None, cert_reqs=None,
ca_certs=None, ssl_version=None,
assert_hostname=None, assert_fingerprint=None,
ca_cert_dir=None, **conn_kw):
HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
block, headers, retries, _proxy, _proxy_headers,
**conn_kw)
if ca_certs and cert_reqs is None:
cert_reqs = 'CERT_REQUIRED'
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.ca_cert_dir = ca_cert_dir
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def _prepare_conn(self, conn):
"""
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
"""
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(key_file=self.key_file,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint)
conn.ssl_version = self.ssl_version
return conn
def _prepare_proxy(self, conn):
"""
Establish tunnel connection early, because otherwise httplib
would improperly set Host: header to proxy's IP:port.
"""
# Python 2.7+
try:
set_tunnel = conn.set_tunnel
except AttributeError: # Platform-specific: Python 2.6
set_tunnel = conn._set_tunnel
if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older
set_tunnel(self.host, self.port)
else:
set_tunnel(self.host, self.port, self.proxy_headers)
conn.connect()
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.debug("Starting new HTTPS connection (%d): %s",
self.num_connections, self.host)
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
raise SSLError("Can't connect to HTTPS URL because the SSL "
"module is not available.")
actual_host = self.host
actual_port = self.port
if self.proxy is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
conn = self.ConnectionCls(host=actual_host, port=actual_port,
timeout=self.timeout.connect_timeout,
strict=self.strict, **self.conn_kw)
return self._prepare_conn(conn)
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
super(HTTPSConnectionPool, self)._validate_conn(conn)
# Force connect early to allow us to validate the connection.
if not getattr(conn, 'sock', None): # AppEngine might not have `.sock`
conn.connect()
if not conn.is_verified:
warnings.warn((
'Unverified HTTPS request is being made. '
'Adding certificate verification is strongly advised. See: '
'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
'#ssl-warnings'),
InsecureRequestWarning)
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \\**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
port = port or port_by_scheme.get(scheme, 80)
if scheme == 'https':
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
def _ipv6_host(host):
"""
Process IPv6 address literals
"""
# httplib doesn't like it when we include brackets in IPv6 addresses
# Specifically, if we include brackets but also pass the port then
# httplib crazily doubles up the square brackets on the Host header.
# Instead, we need to make sure we never pass ``None`` as the port.
# However, for backward compatibility reasons we can't actually
# *assert* that. See http://bugs.python.org/issue28539
#
# Also if an IPv6 address literal has a zone identifier, the
# percent sign might be URIencoded, convert it back into ASCII
if host.startswith('[') and host.endswith(']'):
host = host.replace('%25', '%').strip('[]')
return host
| mit |
dstndstn/astrometry.net | sdss/dr9.py | 2 | 1887 | # This file is part of the Astrometry.net suite.
# Licensed under a 3-clause BSD style license - see LICENSE
from __future__ import print_function
from __future__ import absolute_import
from .common import *
from .dr8 import *
class DR9(DR8):
def __init__(self, **kwargs):
'''
Useful kwargs:
basedir : (string) - local directory where data will be stored.
'''
DR8.__init__(self, **kwargs)
self.dasurl = 'http://data.sdss3.org/sas/dr9/boss/'
def getDRNumber(self):
return 9
def _get_runlist_filename(self):
return self._get_data_file('runList-dr9.par')
if __name__ == '__main__':
sdss = DR9()
rcfb = (2873, 3, 211, 'r')
r,c,f,b = rcfb
bandnum = band_index(b)
sdss.retrieve('psField', *rcfb)
psfield = sdss.readPsField(r,c,f)
dg = psfield.getDoubleGaussian(bandnum, normalize=True)
psf = psfield.getPsfAtPoints(bandnum, 2048/2., 1489./2.)
import matplotlib
matplotlib.use('Agg')
import pylab as plt
import numpy as np
H,W = psf.shape
cx,cy = (W/2, H/2)
DX,DY = np.meshgrid(np.arange(W)-cx, np.arange(H)-cy)
(a1,s1, a2,s2) = dg
R2 = (DX**2 + DY**2)
G = (a1 / (2.*np.pi*s1**2) * np.exp(-R2/(2.*s1**2)) +
a2 / (2.*np.pi*s2**2) * np.exp(-R2/(2.*s2**2)))
print('G sum', G.sum())
print('psf sum', psf.sum())
psf /= psf.sum()
plt.clf()
plt.subplot(2,2,1)
ima = dict(interpolation='nearest', origin='lower')
plt.imshow(psf, **ima)
plt.subplot(2,2,2)
plt.imshow(G, **ima)
plt.subplot(2,2,3)
plt.plot(psf[H/2,:], 'rs-', mec='r', mfc='none')
plt.plot(G[H/2,:], 'gx-')
plt.subplot(2,2,4)
plt.semilogy(np.maximum(1e-6, psf[H/2,:]), 's-', mec='r', mfc='none')
plt.semilogy(np.maximum(1e-6, G[H/2,:]), 'gx-')
plt.savefig('psf1.png')
| bsd-3-clause |
avneesh91/django | django/conf/locale/sk/formats.py | 65 | 1106 | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'G:i'
DATETIME_FORMAT = 'j. F Y G:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y G:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%y-%m-%d', # '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| bsd-3-clause |
polimediaupv/edx-platform | common/djangoapps/embargo/migrations/0003_add_countries.py | 102 | 6889 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from django_countries import countries
class Migration(DataMigration):
def forwards(self, orm):
"""Populate the available countries with all 2-character ISO country codes. """
for country_code, __ in list(countries):
orm.Country.objects.get_or_create(country=country_code)
def backwards(self, orm):
"""Clear all available countries. """
orm.Country.objects.all().delete()
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'embargo.country': {
'Meta': {'object_name': 'Country'},
'country': ('django_countries.fields.CountryField', [], {'unique': 'True', 'max_length': '2', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'embargo.countryaccessrule': {
'Meta': {'unique_together': "(('restricted_course', 'rule_type'),)", 'object_name': 'CountryAccessRule'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['embargo.Country']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'restricted_course': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['embargo.RestrictedCourse']"}),
'rule_type': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'embargo.embargoedcourse': {
'Meta': {'object_name': 'EmbargoedCourse'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'embargoed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'embargo.embargoedstate': {
'Meta': {'object_name': 'EmbargoedState'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'embargoed_countries': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'embargo.ipfilter': {
'Meta': {'object_name': 'IPFilter'},
'blacklist': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'whitelist': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'embargo.restrictedcourse': {
'Meta': {'object_name': 'RestrictedCourse'},
'access_msg_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'course_key': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'enroll_msg_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['embargo']
symmetrical = True
| agpl-3.0 |
bertrand-l/numpy | numpy/polynomial/_polybase.py | 37 | 30184 | """
Abstract base class for the various polynomial Classes.
The ABCPolyBase class provides the methods needed to implement the common API
for the various polynomial classes. It operates as a mixin, but uses the
abc module from the stdlib, hence it is only available for Python >= 2.6.
"""
from __future__ import division, absolute_import, print_function
from abc import ABCMeta, abstractmethod, abstractproperty
from numbers import Number
import numpy as np
from . import polyutils as pu
__all__ = ['ABCPolyBase']
class ABCPolyBase(object):
"""An abstract base class for series classes.
ABCPolyBase provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' along with the
methods listed below.
.. versionadded:: 1.9.0
Parameters
----------
coef : array_like
Series coefficients in order of increasing degree, i.e.,
``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``, where
``P_i`` is the basis polynomials of degree ``i``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is the derived class domain.
window : (2,) array_like, optional
Window, see domain for its use. The default value is the
derived class window.
Attributes
----------
coef : (N,) ndarray
Series coefficients in order of increasing degree.
domain : (2,) ndarray
Domain that is mapped to window.
window : (2,) ndarray
Window that domain is mapped to.
Class Attributes
----------------
maxpower : int
Maximum power allowed, i.e., the largest number ``n`` such that
``p(x)**n`` is allowed. This is to limit runaway polynomial size.
domain : (2,) ndarray
Default domain of the class.
window : (2,) ndarray
Default window of the class.
"""
__metaclass__ = ABCMeta
# Not hashable
__hash__ = None
# Don't let participate in array operations. Value doesn't matter.
__array_priority__ = 1000
# Limit runaway size. T_n^m has degree n*m
maxpower = 100
@abstractproperty
def domain(self):
pass
@abstractproperty
def window(self):
pass
@abstractproperty
def nickname(self):
pass
@abstractmethod
def _add(self):
pass
@abstractmethod
def _sub(self):
pass
@abstractmethod
def _mul(self):
pass
@abstractmethod
def _div(self):
pass
@abstractmethod
def _pow(self):
pass
@abstractmethod
def _val(self):
pass
@abstractmethod
def _int(self):
pass
@abstractmethod
def _der(self):
pass
@abstractmethod
def _fit(self):
pass
@abstractmethod
def _line(self):
pass
@abstractmethod
def _roots(self):
pass
@abstractmethod
def _fromroots(self):
pass
def has_samecoef(self, other):
"""Check if coefficients match.
.. versionadded:: 1.6.0
Parameters
----------
other : class instance
The other class must have the ``coef`` attribute.
Returns
-------
bool : boolean
True if the coefficients are the same, False otherwise.
"""
if len(self.coef) != len(other.coef):
return False
elif not np.all(self.coef == other.coef):
return False
else:
return True
def has_samedomain(self, other):
"""Check if domains match.
.. versionadded:: 1.6.0
Parameters
----------
other : class instance
The other class must have the ``domain`` attribute.
Returns
-------
bool : boolean
True if the domains are the same, False otherwise.
"""
return np.all(self.domain == other.domain)
def has_samewindow(self, other):
"""Check if windows match.
.. versionadded:: 1.6.0
Parameters
----------
other : class instance
The other class must have the ``window`` attribute.
Returns
-------
bool : boolean
True if the windows are the same, False otherwise.
"""
return np.all(self.window == other.window)
def has_sametype(self, other):
"""Check if types match.
.. versionadded:: 1.7.0
Parameters
----------
other : object
Class instance.
Returns
-------
bool : boolean
True if other is same class as self
"""
return isinstance(other, self.__class__)
def _get_coefficients(self, other):
"""Interpret other as polynomial coefficients.
The `other` argument is checked to see if it is of the same
class as self with identical domain and window. If so,
return its coefficients, otherwise return `other`.
.. versionadded:: 1.9.0
Parameters
----------
other : anything
Object to be checked.
Returns
-------
coef
The coefficients of`other` if it is a compatible instance,
of ABCPolyBase, otherwise `other`.
Raises
------
TypeError
When `other` is an incompatible instance of ABCPolyBase.
"""
if isinstance(other, ABCPolyBase):
if not isinstance(other, self.__class__):
raise TypeError("Polynomial types differ")
elif not np.all(self.domain == other.domain):
raise TypeError("Domains differ")
elif not np.all(self.window == other.window):
raise TypeError("Windows differ")
return other.coef
return other
def __init__(self, coef, domain=None, window=None):
[coef] = pu.as_series([coef], trim=False)
self.coef = coef
if domain is not None:
[domain] = pu.as_series([domain], trim=False)
if len(domain) != 2:
raise ValueError("Domain has wrong number of elements.")
self.domain = domain
if window is not None:
[window] = pu.as_series([window], trim=False)
if len(window) != 2:
raise ValueError("Window has wrong number of elements.")
self.window = window
def __repr__(self):
format = "%s(%s, %s, %s)"
coef = repr(self.coef)[6:-1]
domain = repr(self.domain)[6:-1]
window = repr(self.window)[6:-1]
name = self.__class__.__name__
return format % (name, coef, domain, window)
def __str__(self):
format = "%s(%s)"
coef = str(self.coef)
name = self.nickname
return format % (name, coef)
# Pickle and copy
def __getstate__(self):
ret = self.__dict__.copy()
ret['coef'] = self.coef.copy()
ret['domain'] = self.domain.copy()
ret['window'] = self.window.copy()
return ret
def __setstate__(self, dict):
self.__dict__ = dict
# Call
def __call__(self, arg):
off, scl = pu.mapparms(self.domain, self.window)
arg = off + scl*arg
return self._val(arg, self.coef)
def __iter__(self):
return iter(self.coef)
def __len__(self):
return len(self.coef)
# Numeric properties.
def __neg__(self):
return self.__class__(-self.coef, self.domain, self.window)
def __pos__(self):
return self
def __add__(self, other):
try:
othercoef = self._get_coefficients(other)
coef = self._add(self.coef, othercoef)
except TypeError as e:
raise e
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __sub__(self, other):
try:
othercoef = self._get_coefficients(other)
coef = self._sub(self.coef, othercoef)
except TypeError as e:
raise e
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __mul__(self, other):
try:
othercoef = self._get_coefficients(other)
coef = self._mul(self.coef, othercoef)
except TypeError as e:
raise e
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __div__(self, other):
# set to __floordiv__, /, for now.
return self.__floordiv__(other)
def __truediv__(self, other):
# there is no true divide if the rhs is not a Number, although it
# could return the first n elements of an infinite series.
# It is hard to see where n would come from, though.
if not isinstance(other, Number) or isinstance(other, bool):
form = "unsupported types for true division: '%s', '%s'"
raise TypeError(form % (type(self), type(other)))
return self.__floordiv__(other)
def __floordiv__(self, other):
res = self.__divmod__(other)
if res is NotImplemented:
return res
return res[0]
def __mod__(self, other):
res = self.__divmod__(other)
if res is NotImplemented:
return res
return res[1]
def __divmod__(self, other):
try:
othercoef = self._get_coefficients(other)
quo, rem = self._div(self.coef, othercoef)
except (TypeError, ZeroDivisionError) as e:
raise e
except:
return NotImplemented
quo = self.__class__(quo, self.domain, self.window)
rem = self.__class__(rem, self.domain, self.window)
return quo, rem
def __pow__(self, other):
coef = self._pow(self.coef, other, maxpower=self.maxpower)
res = self.__class__(coef, self.domain, self.window)
return res
def __radd__(self, other):
try:
coef = self._add(other, self.coef)
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __rsub__(self, other):
try:
coef = self._sub(other, self.coef)
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __rmul__(self, other):
try:
coef = self._mul(other, self.coef)
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __rdiv__(self, other):
# set to __floordiv__ /.
return self.__rfloordiv__(other)
def __rtruediv__(self, other):
# An instance of ABCPolyBase is not considered a
# Number.
return NotImplemented
def __rfloordiv__(self, other):
res = self.__rdivmod__(other)
if res is NotImplemented:
return res
return res[0]
def __rmod__(self, other):
res = self.__rdivmod__(other)
if res is NotImplemented:
return res
return res[1]
def __rdivmod__(self, other):
try:
quo, rem = self._div(other, self.coef)
except ZeroDivisionError as e:
raise e
except:
return NotImplemented
quo = self.__class__(quo, self.domain, self.window)
rem = self.__class__(rem, self.domain, self.window)
return quo, rem
# Enhance me
# some augmented arithmetic operations could be added here
def __eq__(self, other):
res = (isinstance(other, self.__class__) and
np.all(self.domain == other.domain) and
np.all(self.window == other.window) and
(self.coef.shape == other.coef.shape) and
np.all(self.coef == other.coef))
return res
def __ne__(self, other):
return not self.__eq__(other)
#
# Extra methods.
#
def copy(self):
"""Return a copy.
Returns
-------
new_series : series
Copy of self.
"""
return self.__class__(self.coef, self.domain, self.window)
def degree(self):
"""The degree of the series.
.. versionadded:: 1.5.0
Returns
-------
degree : int
Degree of the series, one less than the number of coefficients.
"""
return len(self) - 1
def cutdeg(self, deg):
"""Truncate series to the given degree.
Reduce the degree of the series to `deg` by discarding the
high order terms. If `deg` is greater than the current degree a
copy of the current series is returned. This can be useful in least
squares where the coefficients of the high degree terms may be very
small.
.. versionadded:: 1.5.0
Parameters
----------
deg : non-negative int
The series is reduced to degree `deg` by discarding the high
order terms. The value of `deg` must be a non-negative integer.
Returns
-------
new_series : series
New instance of series with reduced degree.
"""
return self.truncate(deg + 1)
def trim(self, tol=0):
"""Remove trailing coefficients
Remove trailing coefficients until a coefficient is reached whose
absolute value greater than `tol` or the beginning of the series is
reached. If all the coefficients would be removed the series is set
to ``[0]``. A new series instance is returned with the new
coefficients. The current instance remains unchanged.
Parameters
----------
tol : non-negative number.
All trailing coefficients less than `tol` will be removed.
Returns
-------
new_series : series
Contains the new set of coefficients.
"""
coef = pu.trimcoef(self.coef, tol)
return self.__class__(coef, self.domain, self.window)
def truncate(self, size):
"""Truncate series to length `size`.
Reduce the series to length `size` by discarding the high
degree terms. The value of `size` must be a positive integer. This
can be useful in least squares where the coefficients of the
high degree terms may be very small.
Parameters
----------
size : positive int
The series is reduced to length `size` by discarding the high
degree terms. The value of `size` must be a positive integer.
Returns
-------
new_series : series
New instance of series with truncated coefficients.
"""
isize = int(size)
if isize != size or isize < 1:
raise ValueError("size must be a positive integer")
if isize >= len(self.coef):
coef = self.coef
else:
coef = self.coef[:isize]
return self.__class__(coef, self.domain, self.window)
def convert(self, domain=None, kind=None, window=None):
"""Convert series to a different kind and/or domain and/or window.
Parameters
----------
domain : array_like, optional
The domain of the converted series. If the value is None,
the default domain of `kind` is used.
kind : class, optional
The polynomial series type class to which the current instance
should be converted. If kind is None, then the class of the
current instance is used.
window : array_like, optional
The window of the converted series. If the value is None,
the default window of `kind` is used.
Returns
-------
new_series : series
The returned class can be of different type than the current
instance and/or have a different domain and/or different
window.
Notes
-----
Conversion between domains and class types can result in
numerically ill defined series.
Examples
--------
"""
if kind is None:
kind = self.__class__
if domain is None:
domain = kind.domain
if window is None:
window = kind.window
return self(kind.identity(domain, window=window))
def mapparms(self):
"""Return the mapping parameters.
The returned values define a linear map ``off + scl*x`` that is
applied to the input arguments before the series is evaluated. The
map depends on the ``domain`` and ``window``; if the current
``domain`` is equal to the ``window`` the resulting map is the
identity. If the coefficients of the series instance are to be
used by themselves outside this class, then the linear function
must be substituted for the ``x`` in the standard representation of
the base polynomials.
Returns
-------
off, scl : float or complex
The mapping function is defined by ``off + scl*x``.
Notes
-----
If the current domain is the interval ``[l1, r1]`` and the window
is ``[l2, r2]``, then the linear mapping function ``L`` is
defined by the equations::
L(l1) = l2
L(r1) = r2
"""
return pu.mapparms(self.domain, self.window)
def integ(self, m=1, k=[], lbnd=None):
"""Integrate.
Return a series instance that is the definite integral of the
current series.
Parameters
----------
m : non-negative int
The number of integrations to perform.
k : array_like
Integration constants. The first constant is applied to the
first integration, the second to the second, and so on. The
list of values must less than or equal to `m` in length and any
missing values are set to zero.
lbnd : Scalar
The lower bound of the definite integral.
Returns
-------
new_series : series
A new series representing the integral. The domain is the same
as the domain of the integrated series.
"""
off, scl = self.mapparms()
if lbnd is None:
lbnd = 0
else:
lbnd = off + scl*lbnd
coef = self._int(self.coef, m, k, lbnd, 1./scl)
return self.__class__(coef, self.domain, self.window)
def deriv(self, m=1):
"""Differentiate.
Return a series instance of that is the derivative of the current
series.
Parameters
----------
m : non-negative int
Find the derivative of order `m`.
Returns
-------
new_series : series
A new series representing the derivative. The domain is the same
as the domain of the differentiated series.
"""
off, scl = self.mapparms()
coef = self._der(self.coef, m, scl)
return self.__class__(coef, self.domain, self.window)
def roots(self):
"""Return the roots of the series polynomial.
Compute the roots for the series. Note that the accuracy of the
roots decrease the further outside the domain they lie.
Returns
-------
roots : ndarray
Array containing the roots of the series.
"""
roots = self._roots(self.coef)
return pu.mapdomain(roots, self.window, self.domain)
def linspace(self, n=100, domain=None):
"""Return x, y values at equally spaced points in domain.
Returns the x, y values at `n` linearly spaced points across the
domain. Here y is the value of the polynomial at the points x. By
default the domain is the same as that of the series instance.
This method is intended mostly as a plotting aid.
.. versionadded:: 1.5.0
Parameters
----------
n : int, optional
Number of point pairs to return. The default value is 100.
domain : {None, array_like}, optional
If not None, the specified domain is used instead of that of
the calling instance. It should be of the form ``[beg,end]``.
The default is None which case the class domain is used.
Returns
-------
x, y : ndarray
x is equal to linspace(self.domain[0], self.domain[1], n) and
y is the series evaluated at element of x.
"""
if domain is None:
domain = self.domain
x = np.linspace(domain[0], domain[1], n)
y = self(x)
return x, y
@classmethod
def fit(cls, x, y, deg, domain=None, rcond=None, full=False, w=None,
window=None):
"""Least squares fit to data.
Return a series instance that is the least squares fit to the data
`y` sampled at `x`. The domain of the returned instance can be
specified and this will often result in a superior fit with less
chance of ill conditioning.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer
all terms up to and including the `deg`'th term are included in the
fit. For NumPy versions >= 1.11.0 a list of integers specifying the
degrees of the terms to include may be used instead.
domain : {None, [beg, end], []}, optional
Domain to use for the returned series. If ``None``,
then a minimal domain that covers the points `x` is chosen. If
``[]`` the class domain is used. The default value was the
class domain in NumPy 1.4 and ``None`` in later versions.
The ``[]`` option was added in numpy 1.5.0.
rcond : float, optional
Relative condition number of the fit. Singular values smaller
than this relative to the largest singular value will be
ignored. The default value is len(x)*eps, where eps is the
relative precision of the float type, about 2e-16 in most
cases.
full : bool, optional
Switch determining nature of return value. When it is False
(the default) just the coefficients are returned, when True
diagnostic information from the singular value decomposition is
also returned.
w : array_like, shape (M,), optional
Weights. If not None the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products
``w[i]*y[i]`` all have the same variance. The default value is
None.
.. versionadded:: 1.5.0
window : {[beg, end]}, optional
Window to use for the returned series. The default
value is the default class domain
.. versionadded:: 1.6.0
Returns
-------
new_series : series
A series that represents the least squares fit to the data and
has the domain specified in the call.
[resid, rank, sv, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
"""
if domain is None:
domain = pu.getdomain(x)
elif type(domain) is list and len(domain) == 0:
domain = cls.domain
if window is None:
window = cls.window
xnew = pu.mapdomain(x, domain, window)
res = cls._fit(xnew, y, deg, w=w, rcond=rcond, full=full)
if full:
[coef, status] = res
return cls(coef, domain=domain, window=window), status
else:
coef = res
return cls(coef, domain=domain, window=window)
@classmethod
def fromroots(cls, roots, domain=[], window=None):
"""Return series instance that has the specified roots.
Returns a series representing the product
``(x - r[0])*(x - r[1])*...*(x - r[n-1])``, where ``r`` is a
list of roots.
Parameters
----------
roots : array_like
List of roots.
domain : {[], None, array_like}, optional
Domain for the resulting series. If None the domain is the
interval from the smallest root to the largest. If [] the
domain is the class domain. The default is [].
window : {None, array_like}, optional
Window for the returned series. If None the class window is
used. The default is None.
Returns
-------
new_series : series
Series with the specified roots.
"""
[roots] = pu.as_series([roots], trim=False)
if domain is None:
domain = pu.getdomain(roots)
elif type(domain) is list and len(domain) == 0:
domain = cls.domain
if window is None:
window = cls.window
deg = len(roots)
off, scl = pu.mapparms(domain, window)
rnew = off + scl*roots
coef = cls._fromroots(rnew) / scl**deg
return cls(coef, domain=domain, window=window)
@classmethod
def identity(cls, domain=None, window=None):
"""Identity function.
If ``p`` is the returned series, then ``p(x) == x`` for all
values of x.
Parameters
----------
domain : {None, array_like}, optional
If given, the array must be of the form ``[beg, end]``, where
``beg`` and ``end`` are the endpoints of the domain. If None is
given then the class domain is used. The default is None.
window : {None, array_like}, optional
If given, the resulting array must be if the form
``[beg, end]``, where ``beg`` and ``end`` are the endpoints of
the window. If None is given then the class window is used. The
default is None.
Returns
-------
new_series : series
Series of representing the identity.
"""
if domain is None:
domain = cls.domain
if window is None:
window = cls.window
off, scl = pu.mapparms(window, domain)
coef = cls._line(off, scl)
return cls(coef, domain, window)
@classmethod
def basis(cls, deg, domain=None, window=None):
"""Series basis polynomial of degree `deg`.
Returns the series representing the basis polynomial of degree `deg`.
.. versionadded:: 1.7.0
Parameters
----------
deg : int
Degree of the basis polynomial for the series. Must be >= 0.
domain : {None, array_like}, optional
If given, the array must be of the form ``[beg, end]``, where
``beg`` and ``end`` are the endpoints of the domain. If None is
given then the class domain is used. The default is None.
window : {None, array_like}, optional
If given, the resulting array must be if the form
``[beg, end]``, where ``beg`` and ``end`` are the endpoints of
the window. If None is given then the class window is used. The
default is None.
Returns
-------
new_series : series
A series with the coefficient of the `deg` term set to one and
all others zero.
"""
if domain is None:
domain = cls.domain
if window is None:
window = cls.window
ideg = int(deg)
if ideg != deg or ideg < 0:
raise ValueError("deg must be non-negative integer")
return cls([0]*ideg + [1], domain, window)
@classmethod
def cast(cls, series, domain=None, window=None):
"""Convert series to series of this class.
The `series` is expected to be an instance of some polynomial
series of one of the types supported by by the numpy.polynomial
module, but could be some other class that supports the convert
method.
.. versionadded:: 1.7.0
Parameters
----------
series : series
The series instance to be converted.
domain : {None, array_like}, optional
If given, the array must be of the form ``[beg, end]``, where
``beg`` and ``end`` are the endpoints of the domain. If None is
given then the class domain is used. The default is None.
window : {None, array_like}, optional
If given, the resulting array must be if the form
``[beg, end]``, where ``beg`` and ``end`` are the endpoints of
the window. If None is given then the class window is used. The
default is None.
Returns
-------
new_series : series
A series of the same kind as the calling class and equal to
`series` when evaluated.
See Also
--------
convert : similar instance method
"""
if domain is None:
domain = cls.domain
if window is None:
window = cls.window
return series.convert(domain, cls, window)
| bsd-3-clause |
Arakmar/Sick-Beard | lib/requests/packages/oauthlib/oauth1/rfc5849/__init__.py | 69 | 38632 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
"""
oauthlib.oauth1.rfc5849
~~~~~~~~~~~~~~
This module is an implementation of various logic needed
for signing and checking OAuth 1.0 RFC 5849 requests.
"""
import logging
import time
import urlparse
from oauthlib.common import Request, urlencode, generate_nonce
from oauthlib.common import generate_timestamp
from . import parameters, signature, utils
logger = logging.getLogger(__name__)
SIGNATURE_HMAC = u"HMAC-SHA1"
SIGNATURE_RSA = u"RSA-SHA1"
SIGNATURE_PLAINTEXT = u"PLAINTEXT"
SIGNATURE_METHODS = (SIGNATURE_HMAC, SIGNATURE_RSA, SIGNATURE_PLAINTEXT)
SIGNATURE_TYPE_AUTH_HEADER = u'AUTH_HEADER'
SIGNATURE_TYPE_QUERY = u'QUERY'
SIGNATURE_TYPE_BODY = u'BODY'
CONTENT_TYPE_FORM_URLENCODED = u'application/x-www-form-urlencoded'
class Client(object):
"""A client used to sign OAuth 1.0 RFC 5849 requests"""
def __init__(self, client_key,
client_secret=None,
resource_owner_key=None,
resource_owner_secret=None,
callback_uri=None,
signature_method=SIGNATURE_HMAC,
signature_type=SIGNATURE_TYPE_AUTH_HEADER,
rsa_key=None, verifier=None):
self.client_key = client_key
self.client_secret = client_secret
self.resource_owner_key = resource_owner_key
self.resource_owner_secret = resource_owner_secret
self.signature_method = signature_method
self.signature_type = signature_type
self.callback_uri = callback_uri
self.rsa_key = rsa_key
self.verifier = verifier
if self.signature_method == SIGNATURE_RSA and self.rsa_key is None:
raise ValueError('rsa_key is required when using RSA signature method.')
def get_oauth_signature(self, request):
"""Get an OAuth signature to be used in signing a request
"""
if self.signature_method == SIGNATURE_PLAINTEXT:
# fast-path
return signature.sign_plaintext(self.client_secret,
self.resource_owner_secret)
uri, headers, body = self._render(request)
collected_params = signature.collect_parameters(
uri_query=urlparse.urlparse(uri).query,
body=body,
headers=headers)
logger.debug("Collected params: {0}".format(collected_params))
normalized_params = signature.normalize_parameters(collected_params)
normalized_uri = signature.normalize_base_string_uri(request.uri)
logger.debug("Normalized params: {0}".format(normalized_params))
logger.debug("Normalized URI: {0}".format(normalized_uri))
base_string = signature.construct_base_string(request.http_method,
normalized_uri, normalized_params)
logger.debug("Base signing string: {0}".format(base_string))
if self.signature_method == SIGNATURE_HMAC:
sig = signature.sign_hmac_sha1(base_string, self.client_secret,
self.resource_owner_secret)
elif self.signature_method == SIGNATURE_RSA:
sig = signature.sign_rsa_sha1(base_string, self.rsa_key)
else:
sig = signature.sign_plaintext(self.client_secret,
self.resource_owner_secret)
logger.debug("Signature: {0}".format(sig))
return sig
def get_oauth_params(self):
"""Get the basic OAuth parameters to be used in generating a signature.
"""
params = [
(u'oauth_nonce', generate_nonce()),
(u'oauth_timestamp', generate_timestamp()),
(u'oauth_version', u'1.0'),
(u'oauth_signature_method', self.signature_method),
(u'oauth_consumer_key', self.client_key),
]
if self.resource_owner_key:
params.append((u'oauth_token', self.resource_owner_key))
if self.callback_uri:
params.append((u'oauth_callback', self.callback_uri))
if self.verifier:
params.append((u'oauth_verifier', self.verifier))
return params
def _render(self, request, formencode=False):
"""Render a signed request according to signature type
Returns a 3-tuple containing the request URI, headers, and body.
If the formencode argument is True and the body contains parameters, it
is escaped and returned as a valid formencoded string.
"""
# TODO what if there are body params on a header-type auth?
# TODO what if there are query params on a body-type auth?
uri, headers, body = request.uri, request.headers, request.body
# TODO: right now these prepare_* methods are very narrow in scope--they
# only affect their little thing. In some cases (for example, with
# header auth) it might be advantageous to allow these methods to touch
# other parts of the request, like the headers—so the prepare_headers
# method could also set the Content-Type header to x-www-form-urlencoded
# like the spec requires. This would be a fundamental change though, and
# I'm not sure how I feel about it.
if self.signature_type == SIGNATURE_TYPE_AUTH_HEADER:
headers = parameters.prepare_headers(request.oauth_params, request.headers)
elif self.signature_type == SIGNATURE_TYPE_BODY and request.decoded_body is not None:
body = parameters.prepare_form_encoded_body(request.oauth_params, request.decoded_body)
if formencode:
body = urlencode(body)
headers['Content-Type'] = u'application/x-www-form-urlencoded'
elif self.signature_type == SIGNATURE_TYPE_QUERY:
uri = parameters.prepare_request_uri_query(request.oauth_params, request.uri)
else:
raise ValueError('Unknown signature type specified.')
return uri, headers, body
def sign(self, uri, http_method=u'GET', body=None, headers=None):
"""Sign a request
Signs an HTTP request with the specified parts.
Returns a 3-tuple of the signed request's URI, headers, and body.
Note that http_method is not returned as it is unaffected by the OAuth
signing process.
The body argument may be a dict, a list of 2-tuples, or a formencoded
string. The Content-Type header must be 'application/x-www-form-urlencoded'
if it is present.
If the body argument is not one of the above, it will be returned
verbatim as it is unaffected by the OAuth signing process. Attempting to
sign a request with non-formencoded data using the OAuth body signature
type is invalid and will raise an exception.
If the body does contain parameters, it will be returned as a properly-
formatted formencoded string.
All string data MUST be unicode. This includes strings inside body
dicts, for example.
"""
# normalize request data
request = Request(uri, http_method, body, headers)
# sanity check
content_type = request.headers.get('Content-Type', None)
multipart = content_type and content_type.startswith('multipart/')
should_have_params = content_type == CONTENT_TYPE_FORM_URLENCODED
has_params = request.decoded_body is not None
# 3.4.1.3.1. Parameter Sources
# [Parameters are collected from the HTTP request entity-body, but only
# if [...]:
# * The entity-body is single-part.
if multipart and has_params:
raise ValueError("Headers indicate a multipart body but body contains parameters.")
# * The entity-body follows the encoding requirements of the
# "application/x-www-form-urlencoded" content-type as defined by
# [W3C.REC-html40-19980424].
elif should_have_params and not has_params:
raise ValueError("Headers indicate a formencoded body but body was not decodable.")
# * The HTTP request entity-header includes the "Content-Type"
# header field set to "application/x-www-form-urlencoded".
elif not should_have_params and has_params:
raise ValueError("Body contains parameters but Content-Type header was not set.")
# 3.5.2. Form-Encoded Body
# Protocol parameters can be transmitted in the HTTP request entity-
# body, but only if the following REQUIRED conditions are met:
# o The entity-body is single-part.
# o The entity-body follows the encoding requirements of the
# "application/x-www-form-urlencoded" content-type as defined by
# [W3C.REC-html40-19980424].
# o The HTTP request entity-header includes the "Content-Type" header
# field set to "application/x-www-form-urlencoded".
elif self.signature_type == SIGNATURE_TYPE_BODY and not (
should_have_params and has_params and not multipart):
raise ValueError('Body signatures may only be used with form-urlencoded content')
# generate the basic OAuth parameters
request.oauth_params = self.get_oauth_params()
# generate the signature
request.oauth_params.append((u'oauth_signature', self.get_oauth_signature(request)))
# render the signed request and return it
return self._render(request, formencode=True)
class Server(object):
"""A server base class used to verify OAuth 1.0 RFC 5849 requests
OAuth providers should inherit from Server and implement the methods
and properties outlined below. Further details are provided in the
documentation for each method and property.
Methods used to check the format of input parameters. Common tests include
length, character set, membership, range or pattern. These tests are
referred to as `whitelisting or blacklisting`_. Whitelisting is better
but blacklisting can be usefull to spot malicious activity.
The following have methods a default implementation:
- check_client_key
- check_request_token
- check_access_token
- check_nonce
- check_verifier
- check_realm
The methods above default to whitelist input parameters, checking that they
are alphanumerical and between a minimum and maximum length. Rather than
overloading the methods a few properties can be used to configure these
methods.
@ safe_characters -> (character set)
@ client_key_length -> (min, max)
@ request_token_length -> (min, max)
@ access_token_length -> (min, max)
@ nonce_length -> (min, max)
@ verifier_length -> (min, max)
@ realms -> [list, of, realms]
Methods used to validate input parameters. These checks usually hit either
persistent or temporary storage such as databases or the filesystem. See
each methods documentation for detailed usage.
The following methods must be implemented:
- validate_client
- validate_request_token
- validate_access_token
- validate_nonce_and_timestamp
- validate_redirect_uri
- validate_requested_realm
- validate_realm
- validate_verifier
Method used to retrieve sensitive information from storage.
The following methods must be implemented:
- get_client_secret
- get_request_token_secret
- get_access_token_secret
- get_rsa_key
To prevent timing attacks it is necessary to not exit early even if the
client key or resource owner key is invalid. Instead dummy values should
be used during the remaining verification process. It is very important
that the dummy client and token are valid input parameters to the methods
get_client_secret, get_rsa_key and get_(access/request)_token_secret and
that the running time of those methods when given a dummy value remain
equivalent to the running time when given a valid client/resource owner.
The following properties must be implemented:
@ dummy_client
@ dummy_request_token
@ dummy_access_token
.. _`whitelisting or blacklisting`: http://www.schneier.com/blog/archives/2011/01/whitelisting_vs.html
"""
def __init__(self):
pass
@property
def allowed_signature_methods(self):
return SIGNATURE_METHODS
@property
def safe_characters(self):
return set(utils.UNICODE_ASCII_CHARACTER_SET)
@property
def client_key_length(self):
return 20, 30
@property
def request_token_length(self):
return 20, 30
@property
def access_token_length(self):
return 20, 30
@property
def timestamp_lifetime(self):
return 600
@property
def nonce_length(self):
return 20, 30
@property
def verifier_length(self):
return 20, 30
@property
def realms(self):
return []
@property
def enforce_ssl(self):
return True
def check_client_key(self, client_key):
"""Check that the client key only contains safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.client_key_length
return (set(client_key) <= self.safe_characters and
lower <= len(client_key) <= upper)
def check_request_token(self, request_token):
"""Checks that the request token contains only safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.request_token_length
return (set(request_token) <= self.safe_characters and
lower <= len(request_token) <= upper)
def check_access_token(self, request_token):
"""Checks that the token contains only safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.access_token_length
return (set(request_token) <= self.safe_characters and
lower <= len(request_token) <= upper)
def check_nonce(self, nonce):
"""Checks that the nonce only contains only safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.nonce_length
return (set(nonce) <= self.safe_characters and
lower <= len(nonce) <= upper)
def check_verifier(self, verifier):
"""Checks that the verifier contains only safe characters
and is no shorter than lower and no longer than upper.
"""
lower, upper = self.verifier_length
return (set(verifier) <= self.safe_characters and
lower <= len(verifier) <= upper)
def check_realm(self, realm):
"""Check that the realm is one of a set allowed realms.
"""
return realm in self.realms
def get_client_secret(self, client_key):
"""Retrieves the client secret associated with the client key.
This method must allow the use of a dummy client_key value.
Fetching the secret using the dummy key must take the same amount of
time as fetching a secret for a valid client.
Note that the returned key must be in plaintext.
"""
raise NotImplementedError("Subclasses must implement this function.")
@property
def dummy_client(self):
"""Dummy client used when an invalid client key is supplied.
The dummy client should be associated with either a client secret,
a rsa key or both depending on which signature methods are supported.
Providers should make sure that
get_client_secret(dummy_client)
get_rsa_key(dummy_client)
return a valid secret or key for the dummy client.
"""
raise NotImplementedError("Subclasses must implement this function.")
def get_request_token_secret(self, client_key, request_token):
"""Retrieves the shared secret associated with the request token.
This method must allow the use of a dummy values and the running time
must be roughly equivalent to that of the running time of valid values.
Note that the returned key must be in plaintext.
"""
raise NotImplementedError("Subclasses must implement this function.")
def get_access_token_secret(self, client_key, access_token):
"""Retrieves the shared secret associated with the access token.
This method must allow the use of a dummy values and the running time
must be roughly equivalent to that of the running time of valid values.
Note that the returned key must be in plaintext.
"""
raise NotImplementedError("Subclasses must implement this function.")
@property
def dummy_request_token(self):
"""Dummy request token used when an invalid token was supplied.
The dummy request token should be associated with a request token
secret such that get_request_token_secret(.., dummy_request_token)
returns a valid secret.
"""
raise NotImplementedError("Subclasses must implement this function.")
@property
def dummy_access_token(self):
"""Dummy access token used when an invalid token was supplied.
The dummy access token should be associated with an access token
secret such that get_access_token_secret(.., dummy_access_token)
returns a valid secret.
"""
raise NotImplementedError("Subclasses must implement this function.")
def get_rsa_key(self, client_key):
"""Retrieves a previously stored client provided RSA key.
This method must allow the use of a dummy client_key value. Fetching
the rsa key using the dummy key must take the same aount of time
as fetching a key for a valid client.
Note that the key must be returned in plaintext.
"""
raise NotImplementedError("Subclasses must implement this function.")
def get_signature_type_and_params(self, request):
"""Extracts parameters from query, headers and body. Signature type
is set to the source in which parameters were found.
"""
header_params = signature.collect_parameters(headers=request.headers,
exclude_oauth_signature=False)
body_params = signature.collect_parameters(body=request.body,
exclude_oauth_signature=False)
query_params = signature.collect_parameters(uri_query=request.uri_query,
exclude_oauth_signature=False)
params = []
params.extend(header_params)
params.extend(body_params)
params.extend(query_params)
signature_types_with_oauth_params = filter(lambda s: s[2], (
(SIGNATURE_TYPE_AUTH_HEADER, params,
utils.filter_oauth_params(header_params)),
(SIGNATURE_TYPE_BODY, params,
utils.filter_oauth_params(body_params)),
(SIGNATURE_TYPE_QUERY, params,
utils.filter_oauth_params(query_params))
))
if len(signature_types_with_oauth_params) > 1:
raise ValueError('oauth_ params must come from only 1 signature type but were found in %s' % ', '.join(
[s[0] for s in signature_types_with_oauth_params]))
try:
signature_type, params, oauth_params = signature_types_with_oauth_params[0]
except IndexError:
raise ValueError('oauth_ params are missing. Could not determine signature type.')
return signature_type, params, oauth_params
def validate_client_key(self, client_key):
"""Validates that supplied client key is a registered and valid client.
Note that if the dummy client is supplied it should validate in same
or nearly the same amount of time as a valid one.
Bad:
if client_key == self.dummy_client:
return False
else:
return storage.has_client(client_key)
Good:
return storage.has_client(client_key) and client_key != self.dummy_client
"""
raise NotImplementedError("Subclasses must implement this function.")
def validate_request_token(self, client_key, request_token):
"""Validates that supplied request token is registered and valid.
Note that if the dummy request_token is supplied it should validate in
the same nearly the same amount of time as a valid one.
Bad:
if request_token == self.dummy_request_token:
return False
else:
return storage.has_request_token(request_token)
Good:
return (storage.has_request_token(request_token) and
request_token != self.dummy_request_token)
"""
raise NotImplementedError("Subclasses must implement this function.")
def validate_access_token(self, client_key, access_token):
"""Validates that supplied access token is registered and valid.
Note that if the dummy access token is supplied it should validate in
the same or nearly the same amount of time as a valid one.
Bad:
if access_token == self.dummy_access_token:
return False
else:
return storage.has_access_token(access_token)
Good:
return (storage.has_access_token(access_token) and
access_token != self.dummy_access_token)
"""
raise NotImplementedError("Subclasses must implement this function.")
def validate_timestamp_and_nonce(self, client_key, timestamp, nonce,
request_token=None, access_token=None):
"""Validates that the nonce has not been used before.
Per `Section 3.3`_ of the spec.
"A nonce is a random string, uniquely generated by the client to allow
the server to verify that a request has never been made before and
helps prevent replay attacks when requests are made over a non-secure
channel. The nonce value MUST be unique across all requests with the
same timestamp, client credentials, and token combinations."
.. _`Section 3.3`: http://tools.ietf.org/html/rfc5849#section-3.3
"""
raise NotImplementedError("Subclasses must implement this function.")
def validate_redirect_uri(self, client_key, redirect_uri):
"""Validates the client supplied redirection URI.
It is highly recommended that OAuth providers require their clients
to register all redirection URIs prior to using them in requests and
register them as absolute URIs. See `CWE-601`_ for more information
about open redirection attacks.
By requiring registration of all redirection URIs it should be
straightforward for the provider to verify whether the supplied
redirect_uri is valid or not.
.. _`CWE-601`: http://cwe.mitre.org/top25/index.html#CWE-601
"""
raise NotImplementedError("Subclasses must implement this function.")
def validate_requested_realm(self, client_key, realm):
"""Validates that the client may request access to the realm.
This method is invoked when obtaining a request token and should
tie a realm to the request token and after user authorization
this realm restriction should transfer to the access token.
"""
raise NotImplementedError("Subclasses must implement this function.")
def validate_realm(self, client_key, access_token, uri=None,
required_realm=None):
"""Validates access to the request realm.
How providers choose to use the realm parameter is outside the OAuth
specification but it is commonly used to restrict access to a subset
of protected resources such as "photos".
required_realm is a convenience parameter which can be used to provide
a per view method pre-defined list of allowed realms.
"""
raise NotImplementedError("Subclasses must implement this function.")
def validate_verifier(self, client_key, request_token, verifier):
"""Validates a verification code.
OAuth providers issue a verification code to clients after the
resource owner authorizes access. This code is used by the client to
obtain token credentials and the provider must verify that the
verifier is valid and associated with the client as well as the
resource owner.
"""
raise NotImplementedError("Subclasses must implement this function.")
def verify_request(self, uri, http_method=u'GET', body=None,
headers=None, require_resource_owner=True, require_verifier=False,
require_realm=False, required_realm=None):
"""Verifies a request ensuring that the following is true:
Per `section 3.2`_ of the spec.
- all mandated OAuth parameters are supplied
- parameters are only supplied in one source which may be the URI
query, the Authorization header or the body
- all parameters are checked and validated, see comments and the
methods and properties of this class for further details.
- the supplied signature is verified against a recalculated one
A ValueError will be raised if any parameter is missing,
supplied twice or invalid. A HTTP 400 Response should be returned
upon catching an exception.
A HTTP 401 Response should be returned if verify_request returns False.
`Timing attacks`_ are prevented through the use of dummy credentials to
create near constant time verification even if an invalid credential
is used. Early exit on invalid credentials would enable attackers
to perform `enumeration attacks`_. Near constant time string comparison
is used to prevent secret key guessing. Note that timing attacks can
only be prevented through near constant time execution, not by adding
a random delay which would only require more samples to be gathered.
.. _`section 3.2`: http://tools.ietf.org/html/rfc5849#section-3.2
.. _`Timing attacks`: http://rdist.root.org/2010/07/19/exploiting-remote-timing-attacks/
.. _`enumeration attacks`: http://www.sans.edu/research/security-laboratory/article/attacks-browsing
"""
# Only include body data from x-www-form-urlencoded requests
headers = headers or {}
if (u"Content-Type" in headers and
headers[u"Content-Type"] == CONTENT_TYPE_FORM_URLENCODED):
request = Request(uri, http_method, body, headers)
else:
request = Request(uri, http_method, u'', headers)
if self.enforce_ssl and not request.uri.lower().startswith("https://"):
raise ValueError("Insecure transport, only HTTPS is allowed.")
signature_type, params, oauth_params = self.get_signature_type_and_params(request)
# The server SHOULD return a 400 (Bad Request) status code when
# receiving a request with duplicated protocol parameters.
if len(dict(oauth_params)) != len(oauth_params):
raise ValueError("Duplicate OAuth entries.")
oauth_params = dict(oauth_params)
request_signature = oauth_params.get(u'oauth_signature')
client_key = oauth_params.get(u'oauth_consumer_key')
resource_owner_key = oauth_params.get(u'oauth_token')
nonce = oauth_params.get(u'oauth_nonce')
timestamp = oauth_params.get(u'oauth_timestamp')
callback_uri = oauth_params.get(u'oauth_callback')
verifier = oauth_params.get(u'oauth_verifier')
signature_method = oauth_params.get(u'oauth_signature_method')
realm = dict(params).get(u'realm')
# The server SHOULD return a 400 (Bad Request) status code when
# receiving a request with missing parameters.
if not all((request_signature, client_key, nonce,
timestamp, signature_method)):
raise ValueError("Missing OAuth parameters.")
# OAuth does not mandate a particular signature method, as each
# implementation can have its own unique requirements. Servers are
# free to implement and document their own custom methods.
# Recommending any particular method is beyond the scope of this
# specification. Implementers should review the Security
# Considerations section (`Section 4`_) before deciding on which
# method to support.
# .. _`Section 4`: http://tools.ietf.org/html/rfc5849#section-4
if not signature_method in self.allowed_signature_methods:
raise ValueError("Invalid signature method.")
# Servers receiving an authenticated request MUST validate it by:
# If the "oauth_version" parameter is present, ensuring its value is
# "1.0".
if u'oauth_version' in oauth_params and oauth_params[u'oauth_version'] != u'1.0':
raise ValueError("Invalid OAuth version.")
# The timestamp value MUST be a positive integer. Unless otherwise
# specified by the server's documentation, the timestamp is expressed
# in the number of seconds since January 1, 1970 00:00:00 GMT.
if len(timestamp) != 10:
raise ValueError("Invalid timestamp size")
try:
ts = int(timestamp)
except ValueError:
raise ValueError("Timestamp must be an integer")
else:
# To avoid the need to retain an infinite number of nonce values for
# future checks, servers MAY choose to restrict the time period after
# which a request with an old timestamp is rejected.
if time.time() - ts > self.timestamp_lifetime:
raise ValueError("Request too old, over 10 minutes.")
# Provider specific validation of parameters, used to enforce
# restrictions such as character set and length.
if not self.check_client_key(client_key):
raise ValueError("Invalid client key.")
if not resource_owner_key and require_resource_owner:
raise ValueError("Missing resource owner.")
if (require_resource_owner and not require_verifier and
not self.check_access_token(resource_owner_key)):
raise ValueError("Invalid resource owner key.")
if (require_resource_owner and require_verifier and
not self.check_request_token(resource_owner_key)):
raise ValueError("Invalid resource owner key.")
if not self.check_nonce(nonce):
raise ValueError("Invalid nonce.")
if realm and not self.check_realm(realm):
raise ValueError("Invalid realm. Allowed are %s" % self.realms)
if not verifier and require_verifier:
raise ValueError("Missing verifier.")
if require_verifier and not self.check_verifier(verifier):
raise ValueError("Invalid verifier.")
# Servers receiving an authenticated request MUST validate it by:
# If using the "HMAC-SHA1" or "RSA-SHA1" signature methods, ensuring
# that the combination of nonce/timestamp/token (if present)
# received from the client has not been used before in a previous
# request (the server MAY reject requests with stale timestamps as
# described in `Section 3.3`_).
# .._`Section 3.3`: http://tools.ietf.org/html/rfc5849#section-3.3
#
# We check this before validating client and resource owner for
# increased security and performance, both gained by doing less work.
if require_verifier:
token = {"request_token": resource_owner_key}
else:
token = {"access_token": resource_owner_key}
if not self.validate_timestamp_and_nonce(client_key, timestamp,
nonce, **token):
return False
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid client credentials.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy client is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable client enumeration
valid_client = self.validate_client_key(client_key)
if not valid_client:
client_key = self.dummy_client
# Ensure a valid redirection uri is used
valid_redirect = self.validate_redirect_uri(client_key, callback_uri)
# The server SHOULD return a 401 (Unauthorized) status code when
# receiving a request with invalid or expired token.
# Note: This is postponed in order to avoid timing attacks, instead
# a dummy token is assigned and used to maintain near constant
# time request verification.
#
# Note that early exit would enable resource owner enumeration
if resource_owner_key:
if require_verifier:
valid_resource_owner = self.validate_request_token(
client_key, resource_owner_key)
else:
valid_resource_owner = self.validate_access_token(
client_key, resource_owner_key)
if not valid_resource_owner:
resource_owner_key = self.dummy_resource_owner
else:
valid_resource_owner = True
# Note that `realm`_ is only used in authorization headers and how
# it should be interepreted is not included in the OAuth spec.
# However they could be seen as a scope or realm to which the
# client has access and as such every client should be checked
# to ensure it is authorized access to that scope or realm.
# .. _`realm`: http://tools.ietf.org/html/rfc2617#section-1.2
#
# Note that early exit would enable client realm access enumeration.
#
# The require_realm indicates this is the first step in the OAuth
# workflow where a client requests access to a specific realm.
#
# Clients obtaining an access token will not supply a realm and it will
# not be checked. Instead the previously requested realm should be
# transferred from the request token to the access token.
#
# Access to protected resources will always validate the realm but note
# that the realm is now tied to the access token and not provided by
# the client.
if require_realm and not resource_owner_key:
valid_realm = self.validate_requested_realm(client_key, realm)
elif require_verifier:
valid_realm = True
else:
valid_realm = self.validate_realm(client_key, resource_owner_key,
uri=request.uri, required_realm=required_realm)
# The server MUST verify (Section 3.2) the validity of the request,
# ensure that the resource owner has authorized the provisioning of
# token credentials to the client, and ensure that the temporary
# credentials have not expired or been used before. The server MUST
# also verify the verification code received from the client.
# .. _`Section 3.2`: http://tools.ietf.org/html/rfc5849#section-3.2
#
# Note that early exit would enable resource owner authorization
# verifier enumertion.
if verifier:
valid_verifier = self.validate_verifier(client_key,
resource_owner_key, verifier)
else:
valid_verifier = True
# Parameters to Client depend on signature method which may vary
# for each request. Note that HMAC-SHA1 and PLAINTEXT share parameters
request.params = filter(lambda x: x[0] != "oauth_signature", params)
request.signature = request_signature
# ---- RSA Signature verification ----
if signature_method == SIGNATURE_RSA:
# The server verifies the signature per `[RFC3447] section 8.2.2`_
# .. _`[RFC3447] section 8.2.2`: http://tools.ietf.org/html/rfc3447#section-8.2.1
rsa_key = self.get_rsa_key(client_key)
valid_signature = signature.verify_rsa_sha1(request, rsa_key)
# ---- HMAC or Plaintext Signature verification ----
else:
# Servers receiving an authenticated request MUST validate it by:
# Recalculating the request signature independently as described in
# `Section 3.4`_ and comparing it to the value received from the
# client via the "oauth_signature" parameter.
# .. _`Section 3.4`: http://tools.ietf.org/html/rfc5849#section-3.4
client_secret = self.get_client_secret(client_key)
if require_verifier:
resource_owner_secret = self.get_request_token_secret(
client_key, resource_owner_key)
else:
resource_owner_secret = self.get_access_token_secret(
client_key, resource_owner_key)
if signature_method == SIGNATURE_HMAC:
valid_signature = signature.verify_hmac_sha1(request,
client_secret, resource_owner_secret)
else:
valid_signature = signature.verify_plaintext(request,
client_secret, resource_owner_secret)
# We delay checking validity until the very end, using dummy values for
# calculations and fetching secrets/keys to ensure the flow of every
# request remains almost identical regardless of whether valid values
# have been supplied. This ensures near constant time execution and
# prevents malicious users from guessing sensitive information
v = all((valid_client, valid_resource_owner, valid_realm,
valid_redirect, valid_verifier, valid_signature))
logger = logging.getLogger("oauthlib")
if not v:
logger.info("[Failure] OAuthLib request verification failed.")
logger.info("Valid client:\t%s" % valid_client)
logger.info("Valid token:\t%s\t(Required: %s" % (valid_resource_owner, require_resource_owner))
logger.info("Valid realm:\t%s\t(Required: %s)" % (valid_realm, require_realm))
logger.info("Valid callback:\t%s" % valid_redirect)
logger.info("Valid verifier:\t%s\t(Required: %s)" % (valid_verifier, require_verifier))
logger.info("Valid signature:\t%s" % valid_signature)
return v
| gpl-3.0 |
diofant/diofant | diofant/__main__.py | 1 | 3841 | """
Python shell for Diofant.
This is just a normal Python shell (IPython shell if you have the
IPython package installed), that adds default imports and run
some initialization code.
"""
import argparse
import ast
import atexit
import code
import os
import readline
from diofant.interactive.session import (AutomaticSymbols,
IntegerDivisionWrapper)
__all__ = ()
parser = argparse.ArgumentParser(description=__doc__,
prog='python -m diofant')
parser.add_argument('--no-wrap-division',
help="Don't wrap integer divisions with Fraction",
action='store_true')
parser.add_argument('-a', '--auto-symbols',
help="Automatically create missing Symbol's",
action='store_true')
parser.add_argument('--no-ipython', help="Don't use IPython",
action='store_true')
def main():
args, ipython_args = parser.parse_known_args()
lines = ['from diofant import *',
'init_printing()',
"x, y, z, t = symbols('x y z t')",
"k, m, n = symbols('k m n', integer=True)",
"f, g, h = symbols('f g h', cls=Function)",
'init_printing(pretty_print=True, use_unicode=True)']
try:
import IPython
import traitlets
except ImportError:
args.no_ipython = True
if not args.no_ipython:
config = traitlets.config.loader.Config()
shell = config.InteractiveShell
ast_transformers = shell.ast_transformers
if not args.no_wrap_division:
ast_transformers.append(IntegerDivisionWrapper())
shell.confirm_exit = False
config.TerminalIPythonApp.display_banner = False
app = IPython.terminal.ipapp.TerminalIPythonApp.instance(config=config)
app.initialize(ipython_args)
shell = app.shell
for l in lines:
shell.run_cell(l, silent=True)
if args.auto_symbols:
shell.run_cell('from diofant.interactive.session import AutomaticSymbols')
shell.run_cell('ip = get_ipython()')
shell.run_cell('ip.ast_transformers.append(AutomaticSymbols(ip.user_ns))')
shell.run_cell('del ip')
app.start()
else:
ast_transformers = []
ns = {}
if not args.no_wrap_division:
ast_transformers.append(IntegerDivisionWrapper())
if args.auto_symbols:
ast_transformers.append(AutomaticSymbols(ns))
class DiofantConsole(code.InteractiveConsole):
"""An interactive console with readline support."""
def __init__(self, ast_transformers=[], **kwargs):
super().__init__(**kwargs)
readline.parse_and_bind('tab: complete')
history = os.path.expanduser('~/.python_history')
readline.read_history_file(history)
atexit.register(readline.write_history_file, history)
self.ast_transformers = ast_transformers
def runsource(self, source, filename='<input>', symbol='single'):
try:
tree = ast.parse(source)
except SyntaxError:
return True
for t in self.ast_transformers:
tree = t.visit(tree)
ast.fix_missing_locations(tree)
source = ast.unparse(tree)
source = source.split('\n')
source = ';'.join(source)
return super().runsource(source, filename=filename, symbol=symbol)
c = DiofantConsole(ast_transformers=ast_transformers, locals=ns)
for l in lines:
c.push(l)
c.interact('', '')
if __name__ == '__main__': # pragma: no branch
main()
| bsd-3-clause |
jcristau/yapps | yapps/runtime.py | 1 | 14747 | # Yapps 2 Runtime, part of Yapps 2 - yet another python parser system
# Copyright 1999-2003 by Amit J. Patel <amitp@cs.stanford.edu>
# Enhancements copyright 2003-2004 by Matthias Urlichs <smurf@debian.org>
#
# This version of the Yapps 2 Runtime can be distributed under the
# terms of the MIT open source license, either found in the LICENSE file
# included with the Yapps distribution
# <http://theory.stanford.edu/~amitp/yapps/> or at
# <http://www.opensource.org/licenses/mit-license.php>
#
"""Run time libraries needed to run parsers generated by Yapps.
This module defines parse-time exception classes, a scanner class, a
base class for parsers produced by Yapps, and a context class that
keeps track of the parse stack.
"""
from __future__ import print_function
import sys, re
MIN_WINDOW=4096
# File lookup window
class SyntaxError(Exception):
"""When we run into an unexpected token, this is the exception to use"""
def __init__(self, pos=None, msg="Bad Token", context=None):
Exception.__init__(self)
self.pos = pos
self.msg = msg
self.context = context
def __str__(self):
if not self.pos: return 'SyntaxError'
else: return 'SyntaxError@%s(%s)' % (repr(self.pos), self.msg)
class NoMoreTokens(Exception):
"""Another exception object, for when we run out of tokens"""
pass
class Token(object):
"""Yapps token.
This is a container for a scanned token.
"""
def __init__(self, type,value, pos=None):
"""Initialize a token."""
self.type = type
self.value = value
self.pos = pos
def __repr__(self):
output = '<%s: %s' % (self.type, repr(self.value))
if self.pos:
output += " @ "
if self.pos[0]:
output += "%s:" % self.pos[0]
if self.pos[1]:
output += "%d" % self.pos[1]
if self.pos[2] is not None:
output += ".%d" % self.pos[2]
output += ">"
return output
in_name=0
class Scanner(object):
"""Yapps scanner.
The Yapps scanner can work in context sensitive or context
insensitive modes. The token(i) method is used to retrieve the
i-th token. It takes a restrict set that limits the set of tokens
it is allowed to return. In context sensitive mode, this restrict
set guides the scanner. In context insensitive mode, there is no
restriction (the set is always the full set of tokens).
"""
def __init__(self, patterns, ignore, input="",
file=None,filename=None,stacked=False):
"""Initialize the scanner.
Parameters:
patterns : [(terminal, uncompiled regex), ...] or None
ignore : {terminal:None, ...}
input : string
If patterns is None, we assume that the subclass has
defined self.patterns : [(terminal, compiled regex), ...].
Note that the patterns parameter expects uncompiled regexes,
whereas the self.patterns field expects compiled regexes.
The 'ignore' value is either None or a callable, which is called
with the scanner and the to-be-ignored match object; this can
be used for include file or comment handling.
"""
if not filename:
global in_name
filename="<f.%d>" % in_name
in_name += 1
self.input = input
self.ignore = ignore
self.file = file
self.filename = filename
self.pos = 0
self.del_pos = 0 # skipped
self.line = 1
self.del_line = 0 # skipped
self.col = 0
self.tokens = []
self.stack = None
self.stacked = stacked
self.last_read_token = None
self.last_token = None
self.last_types = None
if patterns is not None:
# Compile the regex strings into regex objects
self.patterns = []
for terminal, regex in patterns:
self.patterns.append( (terminal, re.compile(regex)) )
def stack_input(self, input="", file=None, filename=None):
"""Temporarily parse from a second file."""
# Already reading from somewhere else: Go on top of that, please.
if self.stack:
# autogenerate a recursion-level-identifying filename
if not filename:
filename = 1
else:
try:
filename += 1
except TypeError:
pass
# now pass off to the include file
self.stack.stack_input(input,file,filename)
else:
try:
filename += 0
except TypeError:
pass
else:
filename = "<str_%d>" % filename
# self.stack = object.__new__(self.__class__)
# Scanner.__init__(self.stack,self.patterns,self.ignore,input,file,filename, stacked=True)
# Note that the pattern+ignore are added by the generated
# scanner code
self.stack = self.__class__(input,file,filename, stacked=True)
def get_pos(self):
"""Return a file/line/char tuple."""
if self.stack: return self.stack.get_pos()
return (self.filename, self.line+self.del_line, self.col)
# def __repr__(self):
# """Print the last few tokens that have been scanned in"""
# output = ''
# for t in self.tokens:
# output += '%s\n' % (repr(t),)
# return output
def print_line_with_pointer(self, pos, length=0, out=None):
"""Print the line of 'text' that includes position 'p',
along with a second line with a single caret (^) at position p"""
if out is None:
out = sys.stderr
file,line,p = pos
if file != self.filename:
if self.stack: return self.stack.print_line_with_pointer(pos,length=length,out=out)
print("(%s: not in input buffer)" % file, file=out)
return
text = self.input
p += length-1 # starts at pos 1
origline=line
line -= self.del_line
spos=0
if line > 0:
while 1:
line = line - 1
try:
cr = text.index("\n",spos)
except ValueError:
if line:
text = ""
break
if line == 0:
text = text[spos:cr]
break
spos = cr+1
else:
print("(%s:%d not in input buffer)" % (file,origline), file=out)
return
# Now try printing part of the line
text = text[max(p-80, 0):p+80]
p = p - max(p-80, 0)
# Strip to the left
i = text[:p].rfind('\n')
j = text[:p].rfind('\r')
if i < 0 or (0 <= j < i): i = j
if 0 <= i < p:
p = p - i - 1
text = text[i+1:]
# Strip to the right
i = text.find('\n', p)
j = text.find('\r', p)
if i < 0 or (0 <= j < i): i = j
if i >= 0:
text = text[:i]
# Now shorten the text
while len(text) > 70 and p > 60:
# Cut off 10 chars
text = "..." + text[10:]
p = p - 7
# Now print the string, along with an indicator
print('> ',text, file=out)
print('> ',' '*p + '^', file=out)
def grab_input(self):
"""Get more input if possible."""
if not self.file: return
if len(self.input) - self.pos >= MIN_WINDOW: return
data = self.file.read(MIN_WINDOW)
if data is None or data == "":
self.file = None
# Drop bytes from the start, if necessary.
if self.pos > 2*MIN_WINDOW:
self.del_pos += MIN_WINDOW
self.del_line += self.input[:MIN_WINDOW].count("\n")
self.pos -= MIN_WINDOW
self.input = self.input[MIN_WINDOW:] + data
else:
self.input = self.input + data
def getchar(self):
"""Return the next character."""
self.grab_input()
c = self.input[self.pos]
self.pos += 1
return c
def token(self, restrict, context=None):
"""Scan for another token."""
while 1:
if self.stack:
try:
return self.stack.token(restrict, context)
except StopIteration:
self.stack = None
# Keep looking for a token, ignoring any in self.ignore
self.grab_input()
# special handling for end-of-file
if self.stacked and self.pos==len(self.input):
raise StopIteration
# Search the patterns for the longest match, with earlier
# tokens in the list having preference
best_match = -1
best_pat = '(error)'
best_m = None
for p, regexp in self.patterns:
# First check to see if we're ignoring this token
if restrict and p not in restrict and p not in self.ignore:
continue
m = regexp.match(self.input, self.pos)
if m and m.end()-m.start() > best_match:
# We got a match that's better than the previous one
best_pat = p
best_match = m.end()-m.start()
best_m = m
# If we didn't find anything, raise an error
if best_pat == '(error)' and best_match < 0:
msg = 'Bad Token'
if restrict:
msg = 'Trying to find one of '+', '.join(restrict)
raise SyntaxError(self.get_pos(), msg, context=context)
ignore = best_pat in self.ignore
value = self.input[self.pos:self.pos+best_match]
if not ignore:
tok=Token(type=best_pat, value=value, pos=self.get_pos())
self.pos += best_match
npos = value.rfind("\n")
if npos > -1:
self.col = best_match-npos
self.line += value.count("\n")
else:
self.col += best_match
# If we found something that isn't to be ignored, return it
if not ignore:
if len(self.tokens) >= 10:
del self.tokens[0]
self.tokens.append(tok)
self.last_read_token = tok
# print repr(tok)
return tok
else:
ignore = self.ignore[best_pat]
if ignore:
ignore(self, best_m)
def peek(self, *types, **kw):
"""Returns the token type for lookahead; if there are any args
then the list of args is the set of token types to allow"""
context = kw.get("context",None)
if self.last_token is None:
self.last_types = types
self.last_token = self.token(types,context)
elif self.last_types:
for t in types:
if t not in self.last_types:
raise NotImplementedError("Unimplemented: restriction set changed")
return self.last_token.type
def scan(self, type, **kw):
"""Returns the matched text, and moves to the next token"""
context = kw.get("context",None)
if self.last_token is None:
tok = self.token([type],context)
else:
if self.last_types and type not in self.last_types:
raise NotImplementedError("Unimplemented: restriction set changed")
tok = self.last_token
self.last_token = None
if tok.type != type:
if not self.last_types: self.last_types=[]
raise SyntaxError(tok.pos, 'Trying to find '+type+': '+ ', '.join(self.last_types)+", got "+tok.type, context=context)
return tok.value
class Parser(object):
"""Base class for Yapps-generated parsers.
"""
def __init__(self, scanner):
self._scanner = scanner
def _stack(self, input="",file=None,filename=None):
"""Temporarily read from someplace else"""
self._scanner.stack_input(input,file,filename)
self._tok = None
def _peek(self, *types, **kw):
"""Returns the token type for lookahead; if there are any args
then the list of args is the set of token types to allow"""
return self._scanner.peek(*types, **kw)
def _scan(self, type, **kw):
"""Returns the matched text, and moves to the next token"""
return self._scanner.scan(type, **kw)
class Context(object):
"""Class to represent the parser's call stack.
Every rule creates a Context that links to its parent rule. The
contexts can be used for debugging.
"""
def __init__(self, parent, scanner, rule, args=()):
"""Create a new context.
Args:
parent: Context object or None
scanner: Scanner object
rule: string (name of the rule)
args: tuple listing parameters to the rule
"""
self.parent = parent
self.scanner = scanner
self.rule = rule
self.args = args
while scanner.stack: scanner = scanner.stack
self.token = scanner.last_read_token
def __str__(self):
output = ''
if self.parent: output = str(self.parent) + ' > '
output += self.rule
return output
def print_error(err, scanner, max_ctx=None):
"""Print error messages, the parser stack, and the input text -- for human-readable error messages."""
# NOTE: this function assumes 80 columns :-(
# Figure out the line number
pos = err.pos
if not pos:
pos = scanner.get_pos()
file_name, line_number, column_number = pos
print('%s:%d:%d: %s' % (file_name, line_number, column_number, err.msg), file=sys.stderr)
scanner.print_line_with_pointer(pos)
context = err.context
token = None
while context:
print('while parsing %s%s:' % (context.rule, tuple(context.args)), file=sys.stderr)
if context.token:
token = context.token
if token:
scanner.print_line_with_pointer(token.pos, length=len(token.value))
context = context.parent
if max_ctx:
max_ctx = max_ctx-1
if not max_ctx:
break
def wrap_error_reporter(parser, rule, *args,**kw):
try:
return getattr(parser, rule)(*args,**kw)
except SyntaxError as e:
print_error(e, parser._scanner)
except NoMoreTokens:
print('Could not complete parsing; stopped around here:', file=sys.stderr)
print(parser._scanner, file=sys.stderr)
| mit |
karan1276/servo | tests/wpt/web-platform-tests/tools/py/py/_code/code.py | 180 | 27436 | import py
import sys
from inspect import CO_VARARGS, CO_VARKEYWORDS
builtin_repr = repr
reprlib = py.builtin._tryimport('repr', 'reprlib')
if sys.version_info[0] >= 3:
from traceback import format_exception_only
else:
from py._code._py2traceback import format_exception_only
class Code(object):
""" wrapper around Python code objects """
def __init__(self, rawcode):
if not hasattr(rawcode, "co_filename"):
rawcode = py.code.getrawcode(rawcode)
try:
self.filename = rawcode.co_filename
self.firstlineno = rawcode.co_firstlineno - 1
self.name = rawcode.co_name
except AttributeError:
raise TypeError("not a code object: %r" %(rawcode,))
self.raw = rawcode
def __eq__(self, other):
return self.raw == other.raw
def __ne__(self, other):
return not self == other
@property
def path(self):
""" return a path object pointing to source code (note that it
might not point to an actually existing file). """
p = py.path.local(self.raw.co_filename)
# maybe don't try this checking
if not p.check():
# XXX maybe try harder like the weird logic
# in the standard lib [linecache.updatecache] does?
p = self.raw.co_filename
return p
@property
def fullsource(self):
""" return a py.code.Source object for the full source file of the code
"""
from py._code import source
full, _ = source.findsource(self.raw)
return full
def source(self):
""" return a py.code.Source object for the code object's source only
"""
# return source only for that part of code
return py.code.Source(self.raw)
def getargs(self, var=False):
""" return a tuple with the argument names for the code object
if 'var' is set True also return the names of the variable and
keyword arguments when present
"""
# handfull shortcut for getting args
raw = self.raw
argcount = raw.co_argcount
if var:
argcount += raw.co_flags & CO_VARARGS
argcount += raw.co_flags & CO_VARKEYWORDS
return raw.co_varnames[:argcount]
class Frame(object):
"""Wrapper around a Python frame holding f_locals and f_globals
in which expressions can be evaluated."""
def __init__(self, frame):
self.lineno = frame.f_lineno - 1
self.f_globals = frame.f_globals
self.f_locals = frame.f_locals
self.raw = frame
self.code = py.code.Code(frame.f_code)
@property
def statement(self):
""" statement this frame is at """
if self.code.fullsource is None:
return py.code.Source("")
return self.code.fullsource.getstatement(self.lineno)
def eval(self, code, **vars):
""" evaluate 'code' in the frame
'vars' are optional additional local variables
returns the result of the evaluation
"""
f_locals = self.f_locals.copy()
f_locals.update(vars)
return eval(code, self.f_globals, f_locals)
def exec_(self, code, **vars):
""" exec 'code' in the frame
'vars' are optiona; additional local variables
"""
f_locals = self.f_locals.copy()
f_locals.update(vars)
py.builtin.exec_(code, self.f_globals, f_locals )
def repr(self, object):
""" return a 'safe' (non-recursive, one-line) string repr for 'object'
"""
return py.io.saferepr(object)
def is_true(self, object):
return object
def getargs(self, var=False):
""" return a list of tuples (name, value) for all arguments
if 'var' is set True also include the variable and keyword
arguments when present
"""
retval = []
for arg in self.code.getargs(var):
try:
retval.append((arg, self.f_locals[arg]))
except KeyError:
pass # this can occur when using Psyco
return retval
class TracebackEntry(object):
""" a single entry in a traceback """
_repr_style = None
exprinfo = None
def __init__(self, rawentry):
self._rawentry = rawentry
self.lineno = rawentry.tb_lineno - 1
def set_repr_style(self, mode):
assert mode in ("short", "long")
self._repr_style = mode
@property
def frame(self):
return py.code.Frame(self._rawentry.tb_frame)
@property
def relline(self):
return self.lineno - self.frame.code.firstlineno
def __repr__(self):
return "<TracebackEntry %s:%d>" %(self.frame.code.path, self.lineno+1)
@property
def statement(self):
""" py.code.Source object for the current statement """
source = self.frame.code.fullsource
return source.getstatement(self.lineno)
@property
def path(self):
""" path to the source code """
return self.frame.code.path
def getlocals(self):
return self.frame.f_locals
locals = property(getlocals, None, None, "locals of underlaying frame")
def reinterpret(self):
"""Reinterpret the failing statement and returns a detailed information
about what operations are performed."""
if self.exprinfo is None:
source = str(self.statement).strip()
x = py.code._reinterpret(source, self.frame, should_fail=True)
if not isinstance(x, str):
raise TypeError("interpret returned non-string %r" % (x,))
self.exprinfo = x
return self.exprinfo
def getfirstlinesource(self):
# on Jython this firstlineno can be -1 apparently
return max(self.frame.code.firstlineno, 0)
def getsource(self, astcache=None):
""" return failing source code. """
# we use the passed in astcache to not reparse asttrees
# within exception info printing
from py._code.source import getstatementrange_ast
source = self.frame.code.fullsource
if source is None:
return None
key = astnode = None
if astcache is not None:
key = self.frame.code.path
if key is not None:
astnode = astcache.get(key, None)
start = self.getfirstlinesource()
try:
astnode, _, end = getstatementrange_ast(self.lineno, source,
astnode=astnode)
except SyntaxError:
end = self.lineno + 1
else:
if key is not None:
astcache[key] = astnode
return source[start:end]
source = property(getsource)
def ishidden(self):
""" return True if the current frame has a var __tracebackhide__
resolving to True
mostly for internal use
"""
try:
return self.frame.f_locals['__tracebackhide__']
except KeyError:
try:
return self.frame.f_globals['__tracebackhide__']
except KeyError:
return False
def __str__(self):
try:
fn = str(self.path)
except py.error.Error:
fn = '???'
name = self.frame.code.name
try:
line = str(self.statement).lstrip()
except KeyboardInterrupt:
raise
except:
line = "???"
return " File %r:%d in %s\n %s\n" %(fn, self.lineno+1, name, line)
def name(self):
return self.frame.code.raw.co_name
name = property(name, None, None, "co_name of underlaying code")
class Traceback(list):
""" Traceback objects encapsulate and offer higher level
access to Traceback entries.
"""
Entry = TracebackEntry
def __init__(self, tb):
""" initialize from given python traceback object. """
if hasattr(tb, 'tb_next'):
def f(cur):
while cur is not None:
yield self.Entry(cur)
cur = cur.tb_next
list.__init__(self, f(tb))
else:
list.__init__(self, tb)
def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None):
""" return a Traceback instance wrapping part of this Traceback
by provding any combination of path, lineno and firstlineno, the
first frame to start the to-be-returned traceback is determined
this allows cutting the first part of a Traceback instance e.g.
for formatting reasons (removing some uninteresting bits that deal
with handling of the exception/traceback)
"""
for x in self:
code = x.frame.code
codepath = code.path
if ((path is None or codepath == path) and
(excludepath is None or not hasattr(codepath, 'relto') or
not codepath.relto(excludepath)) and
(lineno is None or x.lineno == lineno) and
(firstlineno is None or x.frame.code.firstlineno == firstlineno)):
return Traceback(x._rawentry)
return self
def __getitem__(self, key):
val = super(Traceback, self).__getitem__(key)
if isinstance(key, type(slice(0))):
val = self.__class__(val)
return val
def filter(self, fn=lambda x: not x.ishidden()):
""" return a Traceback instance with certain items removed
fn is a function that gets a single argument, a TracebackItem
instance, and should return True when the item should be added
to the Traceback, False when not
by default this removes all the TracebackItems which are hidden
(see ishidden() above)
"""
return Traceback(filter(fn, self))
def getcrashentry(self):
""" return last non-hidden traceback entry that lead
to the exception of a traceback.
"""
for i in range(-1, -len(self)-1, -1):
entry = self[i]
if not entry.ishidden():
return entry
return self[-1]
def recursionindex(self):
""" return the index of the frame/TracebackItem where recursion
originates if appropriate, None if no recursion occurred
"""
cache = {}
for i, entry in enumerate(self):
# id for the code.raw is needed to work around
# the strange metaprogramming in the decorator lib from pypi
# which generates code objects that have hash/value equality
#XXX needs a test
key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno
#print "checking for recursion at", key
l = cache.setdefault(key, [])
if l:
f = entry.frame
loc = f.f_locals
for otherloc in l:
if f.is_true(f.eval(co_equal,
__recursioncache_locals_1=loc,
__recursioncache_locals_2=otherloc)):
return i
l.append(entry.frame.f_locals)
return None
co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2',
'?', 'eval')
class ExceptionInfo(object):
""" wraps sys.exc_info() objects and offers
help for navigating the traceback.
"""
_striptext = ''
def __init__(self, tup=None, exprinfo=None):
if tup is None:
tup = sys.exc_info()
if exprinfo is None and isinstance(tup[1], AssertionError):
exprinfo = getattr(tup[1], 'msg', None)
if exprinfo is None:
exprinfo = str(tup[1])
if exprinfo and exprinfo.startswith('assert '):
self._striptext = 'AssertionError: '
self._excinfo = tup
#: the exception class
self.type = tup[0]
#: the exception instance
self.value = tup[1]
#: the exception raw traceback
self.tb = tup[2]
#: the exception type name
self.typename = self.type.__name__
#: the exception traceback (py.code.Traceback instance)
self.traceback = py.code.Traceback(self.tb)
def __repr__(self):
return "<ExceptionInfo %s tblen=%d>" % (self.typename, len(self.traceback))
def exconly(self, tryshort=False):
""" return the exception as a string
when 'tryshort' resolves to True, and the exception is a
py.code._AssertionError, only the actual exception part of
the exception representation is returned (so 'AssertionError: ' is
removed from the beginning)
"""
lines = format_exception_only(self.type, self.value)
text = ''.join(lines)
text = text.rstrip()
if tryshort:
if text.startswith(self._striptext):
text = text[len(self._striptext):]
return text
def errisinstance(self, exc):
""" return True if the exception is an instance of exc """
return isinstance(self.value, exc)
def _getreprcrash(self):
exconly = self.exconly(tryshort=True)
entry = self.traceback.getcrashentry()
path, lineno = entry.frame.code.raw.co_filename, entry.lineno
return ReprFileLocation(path, lineno+1, exconly)
def getrepr(self, showlocals=False, style="long",
abspath=False, tbfilter=True, funcargs=False):
""" return str()able representation of this exception info.
showlocals: show locals per traceback entry
style: long|short|no|native traceback style
tbfilter: hide entries (where __tracebackhide__ is true)
in case of style==native, tbfilter and showlocals is ignored.
"""
if style == 'native':
return ReprExceptionInfo(ReprTracebackNative(
py.std.traceback.format_exception(
self.type,
self.value,
self.traceback[0]._rawentry,
)), self._getreprcrash())
fmt = FormattedExcinfo(showlocals=showlocals, style=style,
abspath=abspath, tbfilter=tbfilter, funcargs=funcargs)
return fmt.repr_excinfo(self)
def __str__(self):
entry = self.traceback[-1]
loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
return str(loc)
def __unicode__(self):
entry = self.traceback[-1]
loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
return unicode(loc)
class FormattedExcinfo(object):
""" presenting information about failing Functions and Generators. """
# for traceback entries
flow_marker = ">"
fail_marker = "E"
def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False):
self.showlocals = showlocals
self.style = style
self.tbfilter = tbfilter
self.funcargs = funcargs
self.abspath = abspath
self.astcache = {}
def _getindent(self, source):
# figure out indent for given source
try:
s = str(source.getstatement(len(source)-1))
except KeyboardInterrupt:
raise
except:
try:
s = str(source[-1])
except KeyboardInterrupt:
raise
except:
return 0
return 4 + (len(s) - len(s.lstrip()))
def _getentrysource(self, entry):
source = entry.getsource(self.astcache)
if source is not None:
source = source.deindent()
return source
def _saferepr(self, obj):
return py.io.saferepr(obj)
def repr_args(self, entry):
if self.funcargs:
args = []
for argname, argvalue in entry.frame.getargs(var=True):
args.append((argname, self._saferepr(argvalue)))
return ReprFuncArgs(args)
def get_source(self, source, line_index=-1, excinfo=None, short=False):
""" return formatted and marked up source lines. """
lines = []
if source is None or line_index >= len(source.lines):
source = py.code.Source("???")
line_index = 0
if line_index < 0:
line_index += len(source)
space_prefix = " "
if short:
lines.append(space_prefix + source.lines[line_index].strip())
else:
for line in source.lines[:line_index]:
lines.append(space_prefix + line)
lines.append(self.flow_marker + " " + source.lines[line_index])
for line in source.lines[line_index+1:]:
lines.append(space_prefix + line)
if excinfo is not None:
indent = 4 if short else self._getindent(source)
lines.extend(self.get_exconly(excinfo, indent=indent, markall=True))
return lines
def get_exconly(self, excinfo, indent=4, markall=False):
lines = []
indent = " " * indent
# get the real exception information out
exlines = excinfo.exconly(tryshort=True).split('\n')
failindent = self.fail_marker + indent[1:]
for line in exlines:
lines.append(failindent + line)
if not markall:
failindent = indent
return lines
def repr_locals(self, locals):
if self.showlocals:
lines = []
keys = [loc for loc in locals if loc[0] != "@"]
keys.sort()
for name in keys:
value = locals[name]
if name == '__builtins__':
lines.append("__builtins__ = <builtins>")
else:
# This formatting could all be handled by the
# _repr() function, which is only reprlib.Repr in
# disguise, so is very configurable.
str_repr = self._saferepr(value)
#if len(str_repr) < 70 or not isinstance(value,
# (list, tuple, dict)):
lines.append("%-10s = %s" %(name, str_repr))
#else:
# self._line("%-10s =\\" % (name,))
# # XXX
# py.std.pprint.pprint(value, stream=self.excinfowriter)
return ReprLocals(lines)
def repr_traceback_entry(self, entry, excinfo=None):
source = self._getentrysource(entry)
if source is None:
source = py.code.Source("???")
line_index = 0
else:
# entry.getfirstlinesource() can be -1, should be 0 on jython
line_index = entry.lineno - max(entry.getfirstlinesource(), 0)
lines = []
style = entry._repr_style
if style is None:
style = self.style
if style in ("short", "long"):
short = style == "short"
reprargs = self.repr_args(entry) if not short else None
s = self.get_source(source, line_index, excinfo, short=short)
lines.extend(s)
if short:
message = "in %s" %(entry.name)
else:
message = excinfo and excinfo.typename or ""
path = self._makepath(entry.path)
filelocrepr = ReprFileLocation(path, entry.lineno+1, message)
localsrepr = None
if not short:
localsrepr = self.repr_locals(entry.locals)
return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style)
if excinfo:
lines.extend(self.get_exconly(excinfo, indent=4))
return ReprEntry(lines, None, None, None, style)
def _makepath(self, path):
if not self.abspath:
try:
np = py.path.local().bestrelpath(path)
except OSError:
return path
if len(np) < len(str(path)):
path = np
return path
def repr_traceback(self, excinfo):
traceback = excinfo.traceback
if self.tbfilter:
traceback = traceback.filter()
recursionindex = None
if excinfo.errisinstance(RuntimeError):
if "maximum recursion depth exceeded" in str(excinfo.value):
recursionindex = traceback.recursionindex()
last = traceback[-1]
entries = []
extraline = None
for index, entry in enumerate(traceback):
einfo = (last == entry) and excinfo or None
reprentry = self.repr_traceback_entry(entry, einfo)
entries.append(reprentry)
if index == recursionindex:
extraline = "!!! Recursion detected (same locals & position)"
break
return ReprTraceback(entries, extraline, style=self.style)
def repr_excinfo(self, excinfo):
reprtraceback = self.repr_traceback(excinfo)
reprcrash = excinfo._getreprcrash()
return ReprExceptionInfo(reprtraceback, reprcrash)
class TerminalRepr:
def __str__(self):
s = self.__unicode__()
if sys.version_info[0] < 3:
s = s.encode('utf-8')
return s
def __unicode__(self):
# FYI this is called from pytest-xdist's serialization of exception
# information.
io = py.io.TextIO()
tw = py.io.TerminalWriter(file=io)
self.toterminal(tw)
return io.getvalue().strip()
def __repr__(self):
return "<%s instance at %0x>" %(self.__class__, id(self))
class ReprExceptionInfo(TerminalRepr):
def __init__(self, reprtraceback, reprcrash):
self.reprtraceback = reprtraceback
self.reprcrash = reprcrash
self.sections = []
def addsection(self, name, content, sep="-"):
self.sections.append((name, content, sep))
def toterminal(self, tw):
self.reprtraceback.toterminal(tw)
for name, content, sep in self.sections:
tw.sep(sep, name)
tw.line(content)
class ReprTraceback(TerminalRepr):
entrysep = "_ "
def __init__(self, reprentries, extraline, style):
self.reprentries = reprentries
self.extraline = extraline
self.style = style
def toterminal(self, tw):
# the entries might have different styles
last_style = None
for i, entry in enumerate(self.reprentries):
if entry.style == "long":
tw.line("")
entry.toterminal(tw)
if i < len(self.reprentries) - 1:
next_entry = self.reprentries[i+1]
if entry.style == "long" or \
entry.style == "short" and next_entry.style == "long":
tw.sep(self.entrysep)
if self.extraline:
tw.line(self.extraline)
class ReprTracebackNative(ReprTraceback):
def __init__(self, tblines):
self.style = "native"
self.reprentries = [ReprEntryNative(tblines)]
self.extraline = None
class ReprEntryNative(TerminalRepr):
style = "native"
def __init__(self, tblines):
self.lines = tblines
def toterminal(self, tw):
tw.write("".join(self.lines))
class ReprEntry(TerminalRepr):
localssep = "_ "
def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style):
self.lines = lines
self.reprfuncargs = reprfuncargs
self.reprlocals = reprlocals
self.reprfileloc = filelocrepr
self.style = style
def toterminal(self, tw):
if self.style == "short":
self.reprfileloc.toterminal(tw)
for line in self.lines:
red = line.startswith("E ")
tw.line(line, bold=True, red=red)
#tw.line("")
return
if self.reprfuncargs:
self.reprfuncargs.toterminal(tw)
for line in self.lines:
red = line.startswith("E ")
tw.line(line, bold=True, red=red)
if self.reprlocals:
#tw.sep(self.localssep, "Locals")
tw.line("")
self.reprlocals.toterminal(tw)
if self.reprfileloc:
if self.lines:
tw.line("")
self.reprfileloc.toterminal(tw)
def __str__(self):
return "%s\n%s\n%s" % ("\n".join(self.lines),
self.reprlocals,
self.reprfileloc)
class ReprFileLocation(TerminalRepr):
def __init__(self, path, lineno, message):
self.path = str(path)
self.lineno = lineno
self.message = message
def toterminal(self, tw):
# filename and lineno output for each entry,
# using an output format that most editors unterstand
msg = self.message
i = msg.find("\n")
if i != -1:
msg = msg[:i]
tw.line("%s:%s: %s" %(self.path, self.lineno, msg))
class ReprLocals(TerminalRepr):
def __init__(self, lines):
self.lines = lines
def toterminal(self, tw):
for line in self.lines:
tw.line(line)
class ReprFuncArgs(TerminalRepr):
def __init__(self, args):
self.args = args
def toterminal(self, tw):
if self.args:
linesofar = ""
for name, value in self.args:
ns = "%s = %s" %(name, value)
if len(ns) + len(linesofar) + 2 > tw.fullwidth:
if linesofar:
tw.line(linesofar)
linesofar = ns
else:
if linesofar:
linesofar += ", " + ns
else:
linesofar = ns
if linesofar:
tw.line(linesofar)
tw.line("")
oldbuiltins = {}
def patch_builtins(assertion=True, compile=True):
""" put compile and AssertionError builtins to Python's builtins. """
if assertion:
from py._code import assertion
l = oldbuiltins.setdefault('AssertionError', [])
l.append(py.builtin.builtins.AssertionError)
py.builtin.builtins.AssertionError = assertion.AssertionError
if compile:
l = oldbuiltins.setdefault('compile', [])
l.append(py.builtin.builtins.compile)
py.builtin.builtins.compile = py.code.compile
def unpatch_builtins(assertion=True, compile=True):
""" remove compile and AssertionError builtins from Python builtins. """
if assertion:
py.builtin.builtins.AssertionError = oldbuiltins['AssertionError'].pop()
if compile:
py.builtin.builtins.compile = oldbuiltins['compile'].pop()
def getrawcode(obj, trycall=True):
""" return code object for given function. """
try:
return obj.__code__
except AttributeError:
obj = getattr(obj, 'im_func', obj)
obj = getattr(obj, 'func_code', obj)
obj = getattr(obj, 'f_code', obj)
obj = getattr(obj, '__code__', obj)
if trycall and not hasattr(obj, 'co_firstlineno'):
if hasattr(obj, '__call__') and not py.std.inspect.isclass(obj):
x = getrawcode(obj.__call__, trycall=False)
if hasattr(x, 'co_firstlineno'):
return x
return obj
| mpl-2.0 |
jbuchbinder/youtube-dl | youtube_dl/extractor/rtvnh.py | 17 | 2265 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import ExtractorError
class RTVNHIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?rtvnh\.nl/video/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.rtvnh.nl/video/131946',
'md5': 'cdbec9f44550763c8afc96050fa747dc',
'info_dict': {
'id': '131946',
'ext': 'mp4',
'title': 'Grote zoektocht in zee bij Zandvoort naar vermiste vrouw',
'thumbnail': 're:^https?:.*\.jpg$'
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
meta = self._parse_json(self._download_webpage(
'http://www.rtvnh.nl/video/json?m=' + video_id, video_id), video_id)
status = meta.get('status')
if status != 200:
raise ExtractorError(
'%s returned error code %d' % (self.IE_NAME, status), expected=True)
formats = []
rtmp_formats = self._extract_smil_formats(
'http://www.rtvnh.nl/video/smil?m=' + video_id, video_id)
formats.extend(rtmp_formats)
for rtmp_format in rtmp_formats:
rtmp_url = '%s/%s' % (rtmp_format['url'], rtmp_format['play_path'])
rtsp_format = rtmp_format.copy()
del rtsp_format['play_path']
del rtsp_format['ext']
rtsp_format.update({
'format_id': rtmp_format['format_id'].replace('rtmp', 'rtsp'),
'url': rtmp_url.replace('rtmp://', 'rtsp://'),
'protocol': 'rtsp',
})
formats.append(rtsp_format)
http_base_url = rtmp_url.replace('rtmp://', 'http://')
formats.extend(self._extract_m3u8_formats(
http_base_url + '/playlist.m3u8', video_id, 'mp4',
'm3u8_native', m3u8_id='hls', fatal=False))
formats.extend(self._extract_f4m_formats(
http_base_url + '/manifest.f4m',
video_id, f4m_id='hds', fatal=False))
self._sort_formats(formats)
return {
'id': video_id,
'title': meta['title'].strip(),
'thumbnail': meta.get('image'),
'formats': formats
}
| unlicense |
ampafdv/ampadb | extraescolars/forms.py | 2 | 4123 | import re
from ampadb.support import Forms
from django import forms
from django.core.exceptions import ValidationError
from .models import Extraescolar, Inscripcio
class _ExtraescolarMeta: # pylint: disable=too-few-public-methods
model = Extraescolar
fields = [
'nom', 'id_interna', 'descripcio_curta', 'descripcio',
'inscripcio_des_de', 'inscripcio_fins_a', 'preu', 'cursos'
]
class ExtraescolarForms: # pylint: disable=too-few-public-methods
class AddForm(Forms.ModelForm):
class Meta(_ExtraescolarMeta):
pass
class EditForm(Forms.ModelForm):
class Meta(_ExtraescolarMeta):
pass
id_interna = forms.CharField(disabled=True, required=False)
# Veure http://www.interior.gob.es/web/servicios-al-ciudadano/dni/calculo-del-digito-de-control-del-nif-nie # pylint: disable=line-too-long
def validate_dni(dni):
lletres = "TRWAGMYFPDXBNJZSQVHLCKE"
dni_regex = re.compile(r'''
([XYZ]|[0-9]) # X, Y, Z (NIE) o número
[0-9]{7}
[ABCDEFGHJKLMNPQRSTVWXYZ] # Lletra de validació
''', re.VERBOSE)
if re.fullmatch(dni_regex, dni) is None:
raise ValidationError('El format del DNI no és vàlid')
# NIEs
if dni[0].upper() == 'X':
dni = '0' + dni[1:]
elif dni[0].upper() == 'Y':
dni = '1' + dni[1:]
elif dni[0].upper() == 'Z':
dni = '2' + dni[1:]
num = int(dni[:-1])
if dni[-1] != lletres[num % 23]:
raise ValidationError('No és un DNI vàlid (la lletra no és correcta).')
class InscripcioForm(Forms.Form):
dni_tutor_1 = forms.CharField(
label="DNI del tutor 1",
max_length=9,
help_text='DNI o NIE del tutor 1 (no es guardarà).',
validators=[validate_dni])
dni_tutor_2 = forms.CharField(
label="DNI del tutor 2",
max_length=9,
help_text='DNI o NIE del tutor 2 (no es guardarà).',
validators=[validate_dni])
catsalut = forms.CharField(label="Núm. targeta sanitària (Catsalut)")
iban = forms.CharField(
label="IBAN",
required=False,
help_text="Núm. de compte (si cal) (no es guardarà)")
nif_titular = forms.CharField(
label="NIF del titular del compte",
required=False,
help_text="Necessari si cal l'IBAN (no es guardarà)")
drets_imatge = forms.BooleanField(
label="Drets d'imatge",
required=False,
help_text=(
"Segons l’article 18.1 de la Constitució i regulat per la "
"llei 5/1982, de 5 de maig, sobre el dret a l’honor, a la "
"intimitat personal i familiar a la pròpia imatge, en el cas que "
"<strong>NO VOLGUEU</strong> que el vostre fill/a aparegui en "
"fotografies, CD’s o vídeos que es realitzin a les activitats "
"extraescolars cal que marqueu aquesta casella."))
observacions = forms.CharField(
widget=forms.Textarea,
required=False,
help_text="En l’apartat d’observacions poseu qualsevol suggeriment "
"que intentarem tenir en compte.")
def clean(self):
cleaned_data = super().clean()
if cleaned_data.get('iban') and not cleaned_data.get('nif_titular'):
self.add_error(
'nif_titular',
ValidationError(
"S'ha d'introduïr el NIF del titular amb el compte."))
def validate_inscripcio_exists(pk_inscripcio):
try:
pk_inscripcio = int(pk_inscripcio)
except ValueError:
raise ValidationError('Clau invàlida: ' + pk_inscripcio)
if not Inscripcio.objects.filter(pk=pk_inscripcio).exists():
raise ValidationError('No existeix la inscripció ' +
str(pk_inscripcio))
class SearchInscripcioForm(Forms.Form):
q = forms.CharField( # pylint: disable=invalid-name
label='Id inscripció',
validators=[validate_inscripcio_exists])
class InscripcioAdminForm(Forms.ModelForm):
class Meta:
model = Inscripcio
fields = ['confirmat', 'pagat']
| mit |
kushG/osf.io | website/search/share_search.py | 1 | 8112 | from __future__ import unicode_literals
from time import gmtime
from calendar import timegm
from datetime import datetime
import pytz
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta
from elasticsearch import Elasticsearch
from website import settings
from util import generate_color
share_es = Elasticsearch(
settings.SHARE_ELASTIC_URI,
request_timeout=settings.ELASTIC_TIMEOUT
)
def search(query, raw=False):
# Run the real query and get the results
results = share_es.search(index='share', doc_type=None, body=query)
return results if raw else {
'results': [hit['_source'] for hit in results['hits']['hits']],
'count': results['hits']['total'],
}
def count(query):
if query.get('from') is not None:
del query['from']
if query.get('size') is not None:
del query['size']
count = share_es.count(index='share', body=query)
return {
'results': [],
'count': count['count']
}
def providers():
provider_map = share_es.search(index='share_providers', doc_type=None, body={
'query': {
'match_all': {}
},
'size': 10000
})
return {
'providerMap': {
hit['_source']['short_name']: hit['_source'] for hit in provider_map['hits']['hits']
}
}
def stats(query=None):
query = query or {"query": {"match_all": {}}}
three_months_ago = timegm((datetime.now() + relativedelta(months=-3)).timetuple()) * 1000
query['aggs'] = {
"sources": {
"terms": {
"field": "_type",
"size": 0,
"min_doc_count": 0,
}
},
"doisMissing": {
"filter": {
"missing": {
"field": "id.doi"
}
},
"aggs": {
"sources": {
"terms": {
"field": "_type",
"size": 0
}
}
}
},
"dois": {
"filter": {
"exists": {
"field": "id.doi"
}
},
"aggs": {
"sources": {
"terms": {
"field": "_type",
"size": 0
}
}
}
},
"earlier_documents": {
"filter": {
"range": {
"dateUpdated": {
"lt": three_months_ago
}
}
},
"aggs": {
"sources": {
"terms": {
"field": "_type",
"size": 0,
"min_doc_count": 0
}
}
}
}
}
date_histogram_query = {
'query': {
'filtered': {
'query': query['query'],
'filter': {
'range': {
'dateUpdated': {
'gt': three_months_ago
}
}
}
}
}
}
date_histogram_query['aggs'] = {
"date_chunks": {
"terms": {
"field": "_type",
"size": 0,
"exclude": "of|and|or"
},
"aggs": {
"articles_over_time": {
"date_histogram": {
"field": "dateUpdated",
"interval": "week",
"min_doc_count": 0,
"extended_bounds": {
"min": three_months_ago,
"max": timegm(gmtime()) * 1000
}
}
}
}
}
}
results = share_es.search(index='share', body=query)
date_results = share_es.search(index='share', body=date_histogram_query)
results['aggregations']['date_chunks'] = date_results['aggregations']['date_chunks']
chart_results = data_for_charts(results)
return chart_results
def data_for_charts(elastic_results):
source_data = elastic_results['aggregations']['sources']['buckets']
for_charts = {}
## for the donut graph list of many lists, source and count
source_and_counts = [[item['key'], item['doc_count']] for item in source_data]
for_charts['shareDonutGraph'] = source_and_counts
r = generate_color()
stats = {}
colors = {}
for bucket in elastic_results['aggregations']['sources']['buckets']:
stats[bucket['key']] = {
'doc_count': bucket['doc_count'],
}
colors[bucket['key']] = r.next()
for bucket in elastic_results['aggregations']['earlier_documents']['sources']['buckets']:
stats[bucket['key']]['earlier_documents'] = bucket['doc_count']
default_buckets = []
for bucket in elastic_results['aggregations']['date_chunks']['buckets']:
default_buckets = bucket['articles_over_time']['buckets']
stats[bucket['key']]['articles_over_time'] = bucket['articles_over_time']['buckets']
max_len = 0
for key, value in stats.iteritems():
if not stats[key].get('earlier_documents'):
stats[key]['earlier_documents'] = 0
if not stats[key].get('articles_over_time'):
stats[key]['articles_over_time'] = [
{
'key_as_string': item['key_as_string'],
'key': item['key'],
'doc_count': 0
}
for item in default_buckets
]
if len(stats[key]['articles_over_time']) > max_len:
max_len = len(stats[key]['articles_over_time'])
names = ['x']
numbers = [['x']]
for date in stats[stats.keys()[0]]['articles_over_time']:
numbers[0].append(' ')
for key, value in stats.iteritems():
try:
names.append(key)
x = [item['doc_count'] for item in value['articles_over_time']]
if len(x) < max_len:
x += [0] * (max_len - len(x))
x[0] += stats[key].get('earlier_documents', 0)
numbers.append([key] + [sum(x[0:i + 1]) for i in range(len(x[0:]))])
except IndexError:
pass
date_totals = {
'date_numbers': numbers,
'group_names': names
}
for_charts['date_totals'] = date_totals
all_data = {}
all_data['raw_aggregations'] = elastic_results['aggregations']
all_data['charts'] = {
'shareDonutGraph': {
'type': 'donut',
'columns': for_charts['shareDonutGraph'],
'colors': colors
},
'shareTimeGraph': {
'x': 'x',
'type': 'area-spline',
'columns': for_charts['date_totals']['date_numbers'],
'groups': [for_charts['date_totals']['group_names']],
'colors': colors
}
}
return all_data
def to_atom(result):
return {
'title': result.get('title') or 'No title provided.',
'summary': result.get('description') or 'No summary provided.',
'id': result['id']['url'],
'updated': get_date_updated(result),
'links': [
{'href': result['id']['url'], 'rel': 'alternate'}
],
'author': format_contributors_for_atom(result['contributors']),
'categories': [{"term": tag} for tag in result.get('tags')],
'published': parse(result.get('dateUpdated'))
}
def format_contributors_for_atom(contributors_list):
return [
{
'name': '{} {}'.format(entry['given'], entry['family'])
}
for entry in contributors_list
]
def get_date_updated(result):
try:
updated = pytz.utc.localize(parse(result.get('dateUpdated')))
except ValueError:
updated = parse(result.get('dateUpdated'))
return updated
| apache-2.0 |
fkie-cad/FACT_core | src/init_database.py | 1 | 1426 | #!/usr/bin/python3
'''
Firmware Analysis and Comparison Tool (FACT)
Copyright (C) 2015-2018 Fraunhofer FKIE
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import logging
import sys
from storage.MongoMgr import MongoMgr
from helperFunctions.program_setup import program_setup
PROGRAM_NAME = 'FACT Database Initializer'
PROGRAM_DESCRIPTION = 'Initialize authentication and users for FACT\'s Database'
def main(command_line_options=sys.argv):
_, config = program_setup(PROGRAM_NAME, PROGRAM_DESCRIPTION, command_line_options=command_line_options)
logging.info('Trying to start Mongo Server and initializing users...')
mongo_manger = MongoMgr(config=config, auth=False)
mongo_manger.init_users()
mongo_manger.shutdown()
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
kanteshraj/ansible | lib/ansible/plugins/lookup/shelvefile.py | 132 | 2906 | # (c) 2015, Alejandro Guirao <lekumberri@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import shelve
import os
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def read_shelve(self, shelve_filename, key):
"""
Read the value of "key" from a shelve file
"""
d = shelve.open(shelve_filename)
res = d.get(key, None)
d.close()
return res
def run(self, terms, variables=None, **kwargs):
if not isinstance(terms, list):
terms = [ terms ]
ret = []
for term in terms:
playbook_path = None
relative_path = None
paramvals = {"file": None, "key": None}
params = term.split()
try:
for param in params:
name, value = param.split('=')
assert(name in paramvals)
paramvals[name] = value
except (ValueError, AssertionError) as e:
# In case "file" or "key" are not present
raise AnsibleError(e)
file = paramvals['file']
key = paramvals['key']
basedir_path = self._loader.path_dwim(file)
# Search also in the role/files directory and in the playbook directory
if 'role_path' in variables:
relative_path = self._loader.path_dwim_relative(variables['role_path'], 'files', file)
if 'playbook_dir' in variables:
playbook_path = self._loader.path_dwim_relative(variables['playbook_dir'],'files', file)
for path in (basedir_path, relative_path, playbook_path):
if path and os.path.exists(path):
res = self.read_shelve(path, key)
if res is None:
raise AnsibleError("Key %s not found in shelve file %s" % (key, file))
# Convert the value read to string
ret.append(str(res))
break
else:
raise AnsibleError("Could not locate shelve file in lookup: %s" % file)
return ret
| gpl-3.0 |
9p0le/simiki | tests/test_parse_config.py | 1 | 1923 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os.path
import unittest
import datetime
from simiki.config import parse_config, get_default_config
test_path = os.path.dirname(os.path.abspath(__file__))
class TestParseConfig(unittest.TestCase):
def setUp(self):
wiki_path = os.path.join(test_path, 'mywiki_for_others')
self.expected_config = get_default_config()
self.expected_config.update({
"author": "Tanky Woo",
"debug": True,
"default_ext": "markdown",
"description": "This is a simiki's config sample, \u6d4b\u8bd5\u6837\u4f8b",
"destination": "destination",
"keywords": "wiki, simiki, python, \u7ef4\u57fa",
"root": "/wiki/",
"source": "source",
"attach": "attach",
"theme": "mytheme",
"themes_dir": "simiki_themes",
"title": "\u6211\u7684Wiki",
"url": "http://wiki.tankywoo.com"
})
self.config_file = os.path.join(wiki_path, "config_sample.yml")
def test_parse_config(self):
config = parse_config(self.config_file)
self.expected_config.pop('time')
_date = config.pop('time')
if hasattr(unittest.TestCase, 'assertIsInstance'):
self.assertIsInstance(_date, datetime.datetime)
else:
assert isinstance(_date, datetime.datetime), \
'%s is not an instance of %r' % \
(repr(_date), datetime.datetime)
self.assertEqual(
config,
self.expected_config
)
def test_parse_config_not_exist(self):
not_exist_config_file = os.path.join(self.config_file, "not_exist")
self.assertRaises(Exception,
lambda: parse_config(not_exist_config_file))
if __name__ == "__main__":
unittest.main()
| mit |
kjc88/sl4a | python/src/Lib/lib-tk/tkFont.py | 146 | 6104 | # Tkinter font wrapper
#
# written by Fredrik Lundh, February 1998
#
# FIXME: should add 'displayof' option where relevant (actual, families,
# measure, and metrics)
#
__version__ = "0.9"
import Tkinter
# weight/slant
NORMAL = "normal"
ROMAN = "roman"
BOLD = "bold"
ITALIC = "italic"
def nametofont(name):
"""Given the name of a tk named font, returns a Font representation.
"""
return Font(name=name, exists=True)
class Font:
"""Represents a named font.
Constructor options are:
font -- font specifier (name, system font, or (family, size, style)-tuple)
name -- name to use for this font configuration (defaults to a unique name)
exists -- does a named font by this name already exist?
Creates a new named font if False, points to the existing font if True.
Raises _Tkinter.TclError if the assertion is false.
the following are ignored if font is specified:
family -- font 'family', e.g. Courier, Times, Helvetica
size -- font size in points
weight -- font thickness: NORMAL, BOLD
slant -- font slant: ROMAN, ITALIC
underline -- font underlining: false (0), true (1)
overstrike -- font strikeout: false (0), true (1)
"""
def _set(self, kw):
options = []
for k, v in kw.items():
options.append("-"+k)
options.append(str(v))
return tuple(options)
def _get(self, args):
options = []
for k in args:
options.append("-"+k)
return tuple(options)
def _mkdict(self, args):
options = {}
for i in range(0, len(args), 2):
options[args[i][1:]] = args[i+1]
return options
def __init__(self, root=None, font=None, name=None, exists=False, **options):
if not root:
root = Tkinter._default_root
if font:
# get actual settings corresponding to the given font
font = root.tk.splitlist(root.tk.call("font", "actual", font))
else:
font = self._set(options)
if not name:
name = "font" + str(id(self))
self.name = name
if exists:
self.delete_font = False
# confirm font exists
if self.name not in root.tk.call("font", "names"):
raise Tkinter._tkinter.TclError, "named font %s does not already exist" % (self.name,)
# if font config info supplied, apply it
if font:
root.tk.call("font", "configure", self.name, *font)
else:
# create new font (raises TclError if the font exists)
root.tk.call("font", "create", self.name, *font)
self.delete_font = True
# backlinks!
self._root = root
self._split = root.tk.splitlist
self._call = root.tk.call
def __str__(self):
return self.name
def __eq__(self, other):
return self.name == other.name and isinstance(other, Font)
def __getitem__(self, key):
return self.cget(key)
def __setitem__(self, key, value):
self.configure(**{key: value})
def __del__(self):
try:
if self.delete_font:
self._call("font", "delete", self.name)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
pass
def copy(self):
"Return a distinct copy of the current font"
return Font(self._root, **self.actual())
def actual(self, option=None):
"Return actual font attributes"
if option:
return self._call("font", "actual", self.name, "-"+option)
else:
return self._mkdict(
self._split(self._call("font", "actual", self.name))
)
def cget(self, option):
"Get font attribute"
return self._call("font", "config", self.name, "-"+option)
def config(self, **options):
"Modify font attributes"
if options:
self._call("font", "config", self.name,
*self._set(options))
else:
return self._mkdict(
self._split(self._call("font", "config", self.name))
)
configure = config
def measure(self, text):
"Return text width"
return int(self._call("font", "measure", self.name, text))
def metrics(self, *options):
"""Return font metrics.
For best performance, create a dummy widget
using this font before calling this method."""
if options:
return int(
self._call("font", "metrics", self.name, self._get(options))
)
else:
res = self._split(self._call("font", "metrics", self.name))
options = {}
for i in range(0, len(res), 2):
options[res[i][1:]] = int(res[i+1])
return options
def families(root=None):
"Get font families (as a tuple)"
if not root:
root = Tkinter._default_root
return root.tk.splitlist(root.tk.call("font", "families"))
def names(root=None):
"Get names of defined fonts (as a tuple)"
if not root:
root = Tkinter._default_root
return root.tk.splitlist(root.tk.call("font", "names"))
# --------------------------------------------------------------------
# test stuff
if __name__ == "__main__":
root = Tkinter.Tk()
# create a font
f = Font(family="times", size=30, weight=NORMAL)
print f.actual()
print f.actual("family")
print f.actual("weight")
print f.config()
print f.cget("family")
print f.cget("weight")
print names()
print f.measure("hello"), f.metrics("linespace")
print f.metrics()
f = Font(font=("Courier", 20, "bold"))
print f.measure("hello"), f.metrics("linespace")
w = Tkinter.Label(root, text="Hello, world", font=f)
w.pack()
w = Tkinter.Button(root, text="Quit!", command=root.destroy)
w.pack()
fb = Font(font=w["font"]).copy()
fb.config(weight=BOLD)
w.config(font=fb)
Tkinter.mainloop()
| apache-2.0 |
nuwainfo/treeio | events/forms.py | 1 | 5581 | # encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
"""
Events module forms
"""
from django import forms
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from treeio.events.models import Event
from treeio.core.models import Object, Location
from treeio.core.decorators import preprocess_form
import datetime
preprocess_form()
class MassActionForm(forms.Form):
""" Mass action form for Reports """
delete = forms.ChoiceField(label=_("Delete"), choices=(('', '-----'), ('delete', _('Delete Completely')),
('trash', _('Move to Trash'))), required=False)
instance = None
def __init__(self, user, *args, **kwargs):
if 'instance' in kwargs:
self.instance = kwargs['instance']
del kwargs['instance']
super(MassActionForm, self).__init__(*args, **kwargs)
def save(self, *args, **kwargs):
"Process form"
if self.instance:
if self.is_valid():
if self.cleaned_data['delete']:
if self.cleaned_data['delete'] == 'delete':
self.instance.delete()
if self.cleaned_data['delete'] == 'trash':
self.instance.trash = True
self.instance.save()
class EventForm(forms.ModelForm):
""" Event form """
def _set_initial(self, field, value):
"Sets initial value"
def __init__(self, user=None, date=None, hour=None, *args, **kwargs):
super(EventForm, self).__init__(*args, **kwargs)
self.fields['name'].label = _('Title')
self.fields['name'].widget = forms.TextInput(attrs={'size': '30'})
self.fields['location'].queryset = Object.filter_permitted(
user, Location.objects, mode='x')
self.fields['location'].widget.attrs.update(
{'popuplink': reverse('identities_location_add')})
self.fields['location'].widget.attrs.update({'class': 'autocomplete',
'callback': reverse('identities_ajax_location_lookup')})
self.fields['location'].label = _("Location")
self.fields['start'].label = _("Start")
self.fields['end'].label = _("End")
self.fields['details'].label = _("Details")
self.fields['details'].widget.attrs.update({'class': 'full-editor'})
if date:
rdate = None
try:
rdate = datetime.datetime.strptime(date, "%Y-%m-%d")
if hour:
hour = int(hour)
else:
hour = 12
rdate = datetime.datetime(year=rdate.year,
month=rdate.month,
day=rdate.day,
hour=hour)
self.fields['end'].initial = rdate
except ValueError:
pass
# Set datepicker
self.fields['start'].widget.attrs.update({'class': 'datetimepicker'})
self.fields['end'].widget.attrs.update({'class': 'datetimepicker'})
if self.fields['start'].initial:
self.fields['start'].widget.attrs.update(
{'initial': self.fields['start'].initial.strftime('%s')})
if self.fields['end'].initial:
self.fields['end'].widget.attrs.update(
{'initial': self.fields['end'].initial.strftime('%s')})
if 'instance' in kwargs:
instance = kwargs['instance']
if instance.start:
self.fields['start'].widget.attrs.update(
{'initial': instance.start.strftime('%s')})
if instance.end:
self.fields['end'].widget.attrs.update(
{'initial': instance.end.strftime('%s')})
def clean_end(self):
"Make sure end date is greater than start date, when specified"
try:
start = self.cleaned_data['start']
if start:
end = self.cleaned_data['end']
if end < start:
raise forms.ValidationError(
_("End date can not be before the start date"))
except:
pass
return self.cleaned_data['end']
class Meta:
"Event"
model = Event
fields = ('name', 'location', 'start', 'end', 'details')
class GoToDateForm(forms.Form):
""" Go to date form definition """
def __init__(self, date, *args, **kwargs):
super(GoToDateForm, self).__init__(*args, **kwargs)
self.fields['goto'] = forms.DateField(
label=_("Go to date"), required=False)
self.fields['goto'].widget.attrs.update({'class': 'datepicker'})
class FilterForm(forms.Form):
""" Filters for Events """
def __init__(self, *args, **kwargs):
super(FilterForm, self).__init__(*args, **kwargs)
self.fields['datefrom'] = forms.DateField(label=_("Date From"))
self.fields['datefrom'].widget.attrs.update({'class': 'datepicker'})
self.fields['dateto'] = forms.DateField(label=_("Date To"))
self.fields['dateto'].widget.attrs.update({'class': 'datepicker'})
def clean_dateto(self):
"Clean date_to"
if not self.cleaned_data['dateto'] >= self.cleaned_data['datefrom']:
raise forms.ValidationError(
"From date can not be greater than To date.")
| mit |
up2wing/fox-qemu-comment | qemu-2.2.0/scripts/qapi-commands.py | 73 | 12372 | #
# QAPI command marshaller generator
#
# Copyright IBM, Corp. 2011
# Copyright (C) 2014 Red Hat, Inc.
#
# Authors:
# Anthony Liguori <aliguori@us.ibm.com>
# Michael Roth <mdroth@linux.vnet.ibm.com>
# Markus Armbruster <armbru@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
from ordereddict import OrderedDict
from qapi import *
import re
import sys
import os
import getopt
import errno
def type_visitor(name):
if type(name) == list:
return 'visit_type_%sList' % name[0]
else:
return 'visit_type_%s' % name
def generate_command_decl(name, args, ret_type):
arglist=""
for argname, argtype, optional, structured in parse_args(args):
argtype = c_type(argtype, is_param=True)
if optional:
arglist += "bool has_%s, " % c_var(argname)
arglist += "%s %s, " % (argtype, c_var(argname))
return mcgen('''
%(ret_type)s qmp_%(name)s(%(args)sError **errp);
''',
ret_type=c_type(ret_type), name=c_fun(name), args=arglist).strip()
def gen_err_check(errvar):
if errvar:
return mcgen('''
if (local_err) {
goto out;
}
''')
return ''
def gen_sync_call(name, args, ret_type, indent=0):
ret = ""
arglist=""
retval=""
if ret_type:
retval = "retval = "
for argname, argtype, optional, structured in parse_args(args):
if optional:
arglist += "has_%s, " % c_var(argname)
arglist += "%s, " % (c_var(argname))
push_indent(indent)
ret = mcgen('''
%(retval)sqmp_%(name)s(%(args)s&local_err);
''',
name=c_fun(name), args=arglist, retval=retval).rstrip()
if ret_type:
ret += "\n" + gen_err_check('local_err')
ret += "\n" + mcgen(''''
%(marshal_output_call)s
''',
marshal_output_call=gen_marshal_output_call(name, ret_type)).rstrip()
pop_indent(indent)
return ret.rstrip()
def gen_marshal_output_call(name, ret_type):
if not ret_type:
return ""
return "qmp_marshal_output_%s(retval, ret, &local_err);" % c_fun(name)
def gen_visitor_input_containers_decl(args, obj):
ret = ""
push_indent()
if len(args) > 0:
ret += mcgen('''
QmpInputVisitor *mi = qmp_input_visitor_new_strict(%(obj)s);
QapiDeallocVisitor *md;
Visitor *v;
''',
obj=obj)
pop_indent()
return ret.rstrip()
def gen_visitor_input_vars_decl(args):
ret = ""
push_indent()
for argname, argtype, optional, structured in parse_args(args):
if optional:
ret += mcgen('''
bool has_%(argname)s = false;
''',
argname=c_var(argname))
if is_c_ptr(argtype):
ret += mcgen('''
%(argtype)s %(argname)s = NULL;
''',
argname=c_var(argname), argtype=c_type(argtype))
else:
ret += mcgen('''
%(argtype)s %(argname)s = {0};
''',
argname=c_var(argname), argtype=c_type(argtype))
pop_indent()
return ret.rstrip()
def gen_visitor_input_block(args, dealloc=False):
ret = ""
errparg = '&local_err'
errarg = 'local_err'
if len(args) == 0:
return ret
push_indent()
if dealloc:
errparg = 'NULL'
errarg = None;
ret += mcgen('''
qmp_input_visitor_cleanup(mi);
md = qapi_dealloc_visitor_new();
v = qapi_dealloc_get_visitor(md);
''')
else:
ret += mcgen('''
v = qmp_input_get_visitor(mi);
''')
for argname, argtype, optional, structured in parse_args(args):
if optional:
ret += mcgen('''
visit_optional(v, &has_%(c_name)s, "%(name)s", %(errp)s);
''',
c_name=c_var(argname), name=argname, errp=errparg)
ret += gen_err_check(errarg)
ret += mcgen('''
if (has_%(c_name)s) {
''',
c_name=c_var(argname))
push_indent()
ret += mcgen('''
%(visitor)s(v, &%(c_name)s, "%(name)s", %(errp)s);
''',
c_name=c_var(argname), name=argname, argtype=argtype,
visitor=type_visitor(argtype), errp=errparg)
ret += gen_err_check(errarg)
if optional:
pop_indent()
ret += mcgen('''
}
''')
if dealloc:
ret += mcgen('''
qapi_dealloc_visitor_cleanup(md);
''')
pop_indent()
return ret.rstrip()
def gen_marshal_output(name, args, ret_type, middle_mode):
if not ret_type:
return ""
ret = mcgen('''
static void qmp_marshal_output_%(c_name)s(%(c_ret_type)s ret_in, QObject **ret_out, Error **errp)
{
Error *local_err = NULL;
QmpOutputVisitor *mo = qmp_output_visitor_new();
QapiDeallocVisitor *md;
Visitor *v;
v = qmp_output_get_visitor(mo);
%(visitor)s(v, &ret_in, "unused", &local_err);
if (local_err) {
goto out;
}
*ret_out = qmp_output_get_qobject(mo);
out:
error_propagate(errp, local_err);
qmp_output_visitor_cleanup(mo);
md = qapi_dealloc_visitor_new();
v = qapi_dealloc_get_visitor(md);
%(visitor)s(v, &ret_in, "unused", NULL);
qapi_dealloc_visitor_cleanup(md);
}
''',
c_ret_type=c_type(ret_type), c_name=c_fun(name),
visitor=type_visitor(ret_type))
return ret
def gen_marshal_input_decl(name, args, ret_type, middle_mode):
if middle_mode:
return 'int qmp_marshal_input_%s(Monitor *mon, const QDict *qdict, QObject **ret)' % c_fun(name)
else:
return 'static void qmp_marshal_input_%s(QDict *args, QObject **ret, Error **errp)' % c_fun(name)
def gen_marshal_input(name, args, ret_type, middle_mode):
hdr = gen_marshal_input_decl(name, args, ret_type, middle_mode)
ret = mcgen('''
%(header)s
{
Error *local_err = NULL;
''',
header=hdr)
if middle_mode:
ret += mcgen('''
QDict *args = (QDict *)qdict;
''')
if ret_type:
if is_c_ptr(ret_type):
retval = " %s retval = NULL;" % c_type(ret_type)
else:
retval = " %s retval;" % c_type(ret_type)
ret += mcgen('''
%(retval)s
''',
retval=retval)
if len(args) > 0:
ret += mcgen('''
%(visitor_input_containers_decl)s
%(visitor_input_vars_decl)s
%(visitor_input_block)s
''',
visitor_input_containers_decl=gen_visitor_input_containers_decl(args, "QOBJECT(args)"),
visitor_input_vars_decl=gen_visitor_input_vars_decl(args),
visitor_input_block=gen_visitor_input_block(args))
else:
ret += mcgen('''
(void)args;
''')
ret += mcgen('''
%(sync_call)s
''',
sync_call=gen_sync_call(name, args, ret_type, indent=4))
if re.search('^ *goto out\\;', ret, re.MULTILINE):
ret += mcgen('''
out:
''')
if not middle_mode:
ret += mcgen('''
error_propagate(errp, local_err);
''')
ret += mcgen('''
%(visitor_input_block_cleanup)s
''',
visitor_input_block_cleanup=gen_visitor_input_block(args,
dealloc=True))
if middle_mode:
ret += mcgen('''
if (local_err) {
qerror_report_err(local_err);
error_free(local_err);
return -1;
}
return 0;
''')
else:
ret += mcgen('''
return;
''')
ret += mcgen('''
}
''')
return ret
def option_value_matches(opt, val, cmd):
if opt in cmd and cmd[opt] == val:
return True
return False
def gen_registry(commands):
registry=""
push_indent()
for cmd in commands:
options = 'QCO_NO_OPTIONS'
if option_value_matches('success-response', 'no', cmd):
options = 'QCO_NO_SUCCESS_RESP'
registry += mcgen('''
qmp_register_command("%(name)s", qmp_marshal_input_%(c_name)s, %(opts)s);
''',
name=cmd['command'], c_name=c_fun(cmd['command']),
opts=options)
pop_indent()
ret = mcgen('''
static void qmp_init_marshal(void)
{
%(registry)s
}
qapi_init(qmp_init_marshal);
''',
registry=registry.rstrip())
return ret
def gen_command_decl_prologue(header, guard, prefix=""):
ret = mcgen('''
/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* schema-defined QAPI function prototypes
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#ifndef %(guard)s
#define %(guard)s
#include "%(prefix)sqapi-types.h"
#include "qapi/qmp/qdict.h"
#include "qapi/error.h"
''',
header=basename(header), guard=guardname(header), prefix=prefix)
return ret
def gen_command_def_prologue(prefix="", proxy=False):
ret = mcgen('''
/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT MODIFY */
/*
* schema-defined QMP->QAPI command dispatch
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
#include "qemu-common.h"
#include "qemu/module.h"
#include "qapi/qmp/qerror.h"
#include "qapi/qmp/types.h"
#include "qapi/qmp/dispatch.h"
#include "qapi/visitor.h"
#include "qapi/qmp-output-visitor.h"
#include "qapi/qmp-input-visitor.h"
#include "qapi/dealloc-visitor.h"
#include "%(prefix)sqapi-types.h"
#include "%(prefix)sqapi-visit.h"
''',
prefix=prefix)
if not proxy:
ret += '#include "%sqmp-commands.h"' % prefix
return ret + "\n\n"
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "chp:i:o:m",
["source", "header", "prefix=",
"input-file=", "output-dir=",
"type=", "middle"])
except getopt.GetoptError, err:
print str(err)
sys.exit(1)
output_dir = ""
prefix = ""
dispatch_type = "sync"
c_file = 'qmp-marshal.c'
h_file = 'qmp-commands.h'
middle_mode = False
do_c = False
do_h = False
for o, a in opts:
if o in ("-p", "--prefix"):
prefix = a
elif o in ("-i", "--input-file"):
input_file = a
elif o in ("-o", "--output-dir"):
output_dir = a + "/"
elif o in ("-t", "--type"):
dispatch_type = a
elif o in ("-m", "--middle"):
middle_mode = True
elif o in ("-c", "--source"):
do_c = True
elif o in ("-h", "--header"):
do_h = True
if not do_c and not do_h:
do_c = True
do_h = True
c_file = output_dir + prefix + c_file
h_file = output_dir + prefix + h_file
def maybe_open(really, name, opt):
if really:
return open(name, opt)
else:
import StringIO
return StringIO.StringIO()
try:
os.makedirs(output_dir)
except os.error, e:
if e.errno != errno.EEXIST:
raise
exprs = parse_schema(input_file)
commands = filter(lambda expr: expr.has_key('command'), exprs)
commands = filter(lambda expr: not expr.has_key('gen'), commands)
if dispatch_type == "sync":
fdecl = maybe_open(do_h, h_file, 'w')
fdef = maybe_open(do_c, c_file, 'w')
ret = gen_command_decl_prologue(header=basename(h_file), guard=guardname(h_file), prefix=prefix)
fdecl.write(ret)
ret = gen_command_def_prologue(prefix=prefix)
fdef.write(ret)
for cmd in commands:
arglist = []
ret_type = None
if cmd.has_key('data'):
arglist = cmd['data']
if cmd.has_key('returns'):
ret_type = cmd['returns']
ret = generate_command_decl(cmd['command'], arglist, ret_type) + "\n"
fdecl.write(ret)
if ret_type:
ret = gen_marshal_output(cmd['command'], arglist, ret_type, middle_mode) + "\n"
fdef.write(ret)
if middle_mode:
fdecl.write('%s;\n' % gen_marshal_input_decl(cmd['command'], arglist, ret_type, middle_mode))
ret = gen_marshal_input(cmd['command'], arglist, ret_type, middle_mode) + "\n"
fdef.write(ret)
fdecl.write("\n#endif\n");
if not middle_mode:
ret = gen_registry(commands)
fdef.write(ret)
fdef.flush()
fdef.close()
fdecl.flush()
fdecl.close()
| gpl-2.0 |
likaiwalkman/phantomjs | src/qt/qtwebkit/Source/WebKit2/Scripts/webkit2/messages_unittest.py | 115 | 31899 | # Copyright (C) 2010 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from StringIO import StringIO
import messages
import parser
_messages_file_contents = """# Copyright (C) 2010 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "config.h"
#if ENABLE(WEBKIT2)
messages -> WebPage LegacyReceiver {
LoadURL(WTF::String url)
#if ENABLE(TOUCH_EVENTS)
TouchEvent(WebKit::WebTouchEvent event)
#endif
DidReceivePolicyDecision(uint64_t frameID, uint64_t listenerID, uint32_t policyAction)
Close()
PreferencesDidChange(WebKit::WebPreferencesStore store)
SendDoubleAndFloat(double d, float f)
SendInts(Vector<uint64_t> ints, Vector<Vector<uint64_t>> intVectors)
CreatePlugin(uint64_t pluginInstanceID, WebKit::Plugin::Parameters parameters) -> (bool result)
RunJavaScriptAlert(uint64_t frameID, WTF::String message) -> ()
GetPlugins(bool refresh) -> (Vector<WebCore::PluginInfo> plugins)
GetPluginProcessConnection(WTF::String pluginPath) -> (CoreIPC::Connection::Handle connectionHandle) Delayed
TestMultipleAttributes() -> () WantsConnection Delayed
TestParameterAttributes([AttributeOne AttributeTwo] uint64_t foo, double bar, [AttributeThree] double baz)
TemplateTest(WTF::HashMap<String, std::pair<String, uint64_t>> a)
#if PLATFORM(MAC)
DidCreateWebProcessConnection(CoreIPC::MachPort connectionIdentifier)
#endif
#if PLATFORM(MAC)
# Keyboard support
InterpretKeyEvent(uint32_t type) -> (Vector<WebCore::KeypressCommand> commandName)
#endif
#if ENABLE(DEPRECATED_FEATURE)
DeprecatedOperation(CoreIPC::DummyType dummy)
#endif
#if ENABLE(EXPERIMENTAL_FEATURE)
ExperimentalOperation(CoreIPC::DummyType dummy)
#endif
}
#endif
"""
_expected_results = {
'name': 'WebPage',
'conditions': ('ENABLE(WEBKIT2)'),
'messages': (
{
'name': 'LoadURL',
'parameters': (
('WTF::String', 'url'),
),
'conditions': (None),
},
{
'name': 'TouchEvent',
'parameters': (
('WebKit::WebTouchEvent', 'event'),
),
'conditions': ('ENABLE(TOUCH_EVENTS)'),
},
{
'name': 'DidReceivePolicyDecision',
'parameters': (
('uint64_t', 'frameID'),
('uint64_t', 'listenerID'),
('uint32_t', 'policyAction'),
),
'conditions': (None),
},
{
'name': 'Close',
'parameters': (),
'conditions': (None),
},
{
'name': 'PreferencesDidChange',
'parameters': (
('WebKit::WebPreferencesStore', 'store'),
),
'conditions': (None),
},
{
'name': 'SendDoubleAndFloat',
'parameters': (
('double', 'd'),
('float', 'f'),
),
'conditions': (None),
},
{
'name': 'SendInts',
'parameters': (
('Vector<uint64_t>', 'ints'),
('Vector<Vector<uint64_t>>', 'intVectors')
),
'conditions': (None),
},
{
'name': 'CreatePlugin',
'parameters': (
('uint64_t', 'pluginInstanceID'),
('WebKit::Plugin::Parameters', 'parameters')
),
'reply_parameters': (
('bool', 'result'),
),
'conditions': (None),
},
{
'name': 'RunJavaScriptAlert',
'parameters': (
('uint64_t', 'frameID'),
('WTF::String', 'message')
),
'reply_parameters': (),
'conditions': (None),
},
{
'name': 'GetPlugins',
'parameters': (
('bool', 'refresh'),
),
'reply_parameters': (
('Vector<WebCore::PluginInfo>', 'plugins'),
),
'conditions': (None),
},
{
'name': 'GetPluginProcessConnection',
'parameters': (
('WTF::String', 'pluginPath'),
),
'reply_parameters': (
('CoreIPC::Connection::Handle', 'connectionHandle'),
),
'conditions': (None),
},
{
'name': 'TestMultipleAttributes',
'parameters': (
),
'reply_parameters': (
),
'conditions': (None),
},
{
'name': 'TestParameterAttributes',
'parameters': (
('uint64_t', 'foo', ('AttributeOne', 'AttributeTwo')),
('double', 'bar'),
('double', 'baz', ('AttributeThree',)),
),
'conditions': (None),
},
{
'name': 'TemplateTest',
'parameters': (
('WTF::HashMap<String, std::pair<String, uint64_t>>', 'a'),
),
'conditions': (None),
},
{
'name': 'DidCreateWebProcessConnection',
'parameters': (
('CoreIPC::MachPort', 'connectionIdentifier'),
),
'conditions': ('PLATFORM(MAC)'),
},
{
'name': 'InterpretKeyEvent',
'parameters': (
('uint32_t', 'type'),
),
'reply_parameters': (
('Vector<WebCore::KeypressCommand>', 'commandName'),
),
'conditions': ('PLATFORM(MAC)'),
},
{
'name': 'DeprecatedOperation',
'parameters': (
('CoreIPC::DummyType', 'dummy'),
),
'conditions': ('ENABLE(DEPRECATED_FEATURE)'),
},
{
'name': 'ExperimentalOperation',
'parameters': (
('CoreIPC::DummyType', 'dummy'),
),
'conditions': ('ENABLE(EXPERIMENTAL_FEATURE)'),
}
),
}
class MessagesTest(unittest.TestCase):
def setUp(self):
self.receiver = parser.parse(StringIO(_messages_file_contents))
class ParsingTest(MessagesTest):
def check_message(self, message, expected_message):
self.assertEquals(message.name, expected_message['name'])
self.assertEquals(len(message.parameters), len(expected_message['parameters']))
for index, parameter in enumerate(message.parameters):
expected_parameter = expected_message['parameters'][index]
self.assertEquals(parameter.type, expected_parameter[0])
self.assertEquals(parameter.name, expected_parameter[1])
if len(expected_parameter) > 2:
self.assertEquals(parameter.attributes, frozenset(expected_parameter[2]))
for attribute in expected_parameter[2]:
self.assertTrue(parameter.has_attribute(attribute))
else:
self.assertEquals(parameter.attributes, frozenset())
if message.reply_parameters != None:
for index, parameter in enumerate(message.reply_parameters):
self.assertEquals(parameter.type, expected_message['reply_parameters'][index][0])
self.assertEquals(parameter.name, expected_message['reply_parameters'][index][1])
else:
self.assertFalse('reply_parameters' in expected_message)
self.assertEquals(message.condition, expected_message['conditions'])
def test_receiver(self):
"""Receiver should be parsed as expected"""
self.assertEquals(self.receiver.name, _expected_results['name'])
self.assertEquals(self.receiver.condition, _expected_results['conditions'])
self.assertEquals(len(self.receiver.messages), len(_expected_results['messages']))
for index, message in enumerate(self.receiver.messages):
self.check_message(message, _expected_results['messages'][index])
_expected_header = """/*
* Copyright (C) 2010 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef WebPageMessages_h
#define WebPageMessages_h
#if ENABLE(WEBKIT2)
#include "Arguments.h"
#include "Connection.h"
#include "MessageEncoder.h"
#include "Plugin.h"
#include "StringReference.h"
#include <WebCore/KeyboardEvent.h>
#include <WebCore/PluginData.h>
#include <utility>
#include <wtf/HashMap.h>
#include <wtf/ThreadSafeRefCounted.h>
#include <wtf/Vector.h>
namespace CoreIPC {
class Connection;
class DummyType;
class MachPort;
}
namespace WTF {
class String;
}
namespace WebKit {
struct WebPreferencesStore;
class WebTouchEvent;
}
namespace Messages {
namespace WebPage {
static inline CoreIPC::StringReference messageReceiverName()
{
return CoreIPC::StringReference("WebPage");
}
struct LoadURL : CoreIPC::Arguments1<const WTF::String&> {
static CoreIPC::StringReference receiverName() { return messageReceiverName(); }
static CoreIPC::StringReference name() { return CoreIPC::StringReference("LoadURL"); }
static const bool isSync = false;
typedef CoreIPC::Arguments1<const WTF::String&> DecodeType;
explicit LoadURL(const WTF::String& url)
: CoreIPC::Arguments1<const WTF::String&>(url)
{
}
};
#if ENABLE(TOUCH_EVENTS)
struct TouchEvent : CoreIPC::Arguments1<const WebKit::WebTouchEvent&> {
static CoreIPC::StringReference receiverName() { return messageReceiverName(); }
static CoreIPC::StringReference name() { return CoreIPC::StringReference("TouchEvent"); }
static const bool isSync = false;
typedef CoreIPC::Arguments1<const WebKit::WebTouchEvent&> DecodeType;
explicit TouchEvent(const WebKit::WebTouchEvent& event)
: CoreIPC::Arguments1<const WebKit::WebTouchEvent&>(event)
{
}
};
#endif
struct DidReceivePolicyDecision : CoreIPC::Arguments3<uint64_t, uint64_t, uint32_t> {
static CoreIPC::StringReference receiverName() { return messageReceiverName(); }
static CoreIPC::StringReference name() { return CoreIPC::StringReference("DidReceivePolicyDecision"); }
static const bool isSync = false;
typedef CoreIPC::Arguments3<uint64_t, uint64_t, uint32_t> DecodeType;
DidReceivePolicyDecision(uint64_t frameID, uint64_t listenerID, uint32_t policyAction)
: CoreIPC::Arguments3<uint64_t, uint64_t, uint32_t>(frameID, listenerID, policyAction)
{
}
};
struct Close : CoreIPC::Arguments0 {
static CoreIPC::StringReference receiverName() { return messageReceiverName(); }
static CoreIPC::StringReference name() { return CoreIPC::StringReference("Close"); }
static const bool isSync = false;
typedef CoreIPC::Arguments0 DecodeType;
};
struct PreferencesDidChange : CoreIPC::Arguments1<const WebKit::WebPreferencesStore&> {
static CoreIPC::StringReference receiverName() { return messageReceiverName(); }
static CoreIPC::StringReference name() { return CoreIPC::StringReference("PreferencesDidChange"); }
static const bool isSync = false;
typedef CoreIPC::Arguments1<const WebKit::WebPreferencesStore&> DecodeType;
explicit PreferencesDidChange(const WebKit::WebPreferencesStore& store)
: CoreIPC::Arguments1<const WebKit::WebPreferencesStore&>(store)
{
}
};
struct SendDoubleAndFloat : CoreIPC::Arguments2<double, float> {
static CoreIPC::StringReference receiverName() { return messageReceiverName(); }
static CoreIPC::StringReference name() { return CoreIPC::StringReference("SendDoubleAndFloat"); }
static const bool isSync = false;
typedef CoreIPC::Arguments2<double, float> DecodeType;
SendDoubleAndFloat(double d, float f)
: CoreIPC::Arguments2<double, float>(d, f)
{
}
};
struct SendInts : CoreIPC::Arguments2<const Vector<uint64_t>&, const Vector<Vector<uint64_t>>&> {
static CoreIPC::StringReference receiverName() { return messageReceiverName(); }
static CoreIPC::StringReference name() { return CoreIPC::StringReference("SendInts"); }
static const bool isSync = false;
typedef CoreIPC::Arguments2<const Vector<uint64_t>&, const Vector<Vector<uint64_t>>&> DecodeType;
SendInts(const Vector<uint64_t>& ints, const Vector<Vector<uint64_t>>& intVectors)
: CoreIPC::Arguments2<const Vector<uint64_t>&, const Vector<Vector<uint64_t>>&>(ints, intVectors)
{
}
};
struct CreatePlugin : CoreIPC::Arguments2<uint64_t, const WebKit::Plugin::Parameters&> {
static CoreIPC::StringReference receiverName() { return messageReceiverName(); }
static CoreIPC::StringReference name() { return CoreIPC::StringReference("CreatePlugin"); }
static const bool isSync = true;
typedef CoreIPC::Arguments1<bool&> Reply;
typedef CoreIPC::Arguments2<uint64_t, const WebKit::Plugin::Parameters&> DecodeType;
CreatePlugin(uint64_t pluginInstanceID, const WebKit::Plugin::Parameters& parameters)
: CoreIPC::Arguments2<uint64_t, const WebKit::Plugin::Parameters&>(pluginInstanceID, parameters)
{
}
};
struct RunJavaScriptAlert : CoreIPC::Arguments2<uint64_t, const WTF::String&> {
static CoreIPC::StringReference receiverName() { return messageReceiverName(); }
static CoreIPC::StringReference name() { return CoreIPC::StringReference("RunJavaScriptAlert"); }
static const bool isSync = true;
typedef CoreIPC::Arguments0 Reply;
typedef CoreIPC::Arguments2<uint64_t, const WTF::String&> DecodeType;
RunJavaScriptAlert(uint64_t frameID, const WTF::String& message)
: CoreIPC::Arguments2<uint64_t, const WTF::String&>(frameID, message)
{
}
};
struct GetPlugins : CoreIPC::Arguments1<bool> {
static CoreIPC::StringReference receiverName() { return messageReceiverName(); }
static CoreIPC::StringReference name() { return CoreIPC::StringReference("GetPlugins"); }
static const bool isSync = true;
typedef CoreIPC::Arguments1<Vector<WebCore::PluginInfo>&> Reply;
typedef CoreIPC::Arguments1<bool> DecodeType;
explicit GetPlugins(bool refresh)
: CoreIPC::Arguments1<bool>(refresh)
{
}
};
struct GetPluginProcessConnection : CoreIPC::Arguments1<const WTF::String&> {
static CoreIPC::StringReference receiverName() { return messageReceiverName(); }
static CoreIPC::StringReference name() { return CoreIPC::StringReference("GetPluginProcessConnection"); }
static const bool isSync = true;
struct DelayedReply : public ThreadSafeRefCounted<DelayedReply> {
DelayedReply(PassRefPtr<CoreIPC::Connection>, PassOwnPtr<CoreIPC::MessageEncoder>);
~DelayedReply();
bool send(const CoreIPC::Connection::Handle& connectionHandle);
private:
RefPtr<CoreIPC::Connection> m_connection;
OwnPtr<CoreIPC::MessageEncoder> m_encoder;
};
typedef CoreIPC::Arguments1<CoreIPC::Connection::Handle&> Reply;
typedef CoreIPC::Arguments1<const WTF::String&> DecodeType;
explicit GetPluginProcessConnection(const WTF::String& pluginPath)
: CoreIPC::Arguments1<const WTF::String&>(pluginPath)
{
}
};
struct TestMultipleAttributes : CoreIPC::Arguments0 {
static CoreIPC::StringReference receiverName() { return messageReceiverName(); }
static CoreIPC::StringReference name() { return CoreIPC::StringReference("TestMultipleAttributes"); }
static const bool isSync = true;
struct DelayedReply : public ThreadSafeRefCounted<DelayedReply> {
DelayedReply(PassRefPtr<CoreIPC::Connection>, PassOwnPtr<CoreIPC::MessageEncoder>);
~DelayedReply();
bool send();
private:
RefPtr<CoreIPC::Connection> m_connection;
OwnPtr<CoreIPC::MessageEncoder> m_encoder;
};
typedef CoreIPC::Arguments0 Reply;
typedef CoreIPC::Arguments0 DecodeType;
};
struct TestParameterAttributes : CoreIPC::Arguments3<uint64_t, double, double> {
static CoreIPC::StringReference receiverName() { return messageReceiverName(); }
static CoreIPC::StringReference name() { return CoreIPC::StringReference("TestParameterAttributes"); }
static const bool isSync = false;
typedef CoreIPC::Arguments3<uint64_t, double, double> DecodeType;
TestParameterAttributes(uint64_t foo, double bar, double baz)
: CoreIPC::Arguments3<uint64_t, double, double>(foo, bar, baz)
{
}
};
struct TemplateTest : CoreIPC::Arguments1<const WTF::HashMap<String, std::pair<String, uint64_t>>&> {
static CoreIPC::StringReference receiverName() { return messageReceiverName(); }
static CoreIPC::StringReference name() { return CoreIPC::StringReference("TemplateTest"); }
static const bool isSync = false;
typedef CoreIPC::Arguments1<const WTF::HashMap<String, std::pair<String, uint64_t>>&> DecodeType;
explicit TemplateTest(const WTF::HashMap<String, std::pair<String, uint64_t>>& a)
: CoreIPC::Arguments1<const WTF::HashMap<String, std::pair<String, uint64_t>>&>(a)
{
}
};
#if PLATFORM(MAC)
struct DidCreateWebProcessConnection : CoreIPC::Arguments1<const CoreIPC::MachPort&> {
static CoreIPC::StringReference receiverName() { return messageReceiverName(); }
static CoreIPC::StringReference name() { return CoreIPC::StringReference("DidCreateWebProcessConnection"); }
static const bool isSync = false;
typedef CoreIPC::Arguments1<const CoreIPC::MachPort&> DecodeType;
explicit DidCreateWebProcessConnection(const CoreIPC::MachPort& connectionIdentifier)
: CoreIPC::Arguments1<const CoreIPC::MachPort&>(connectionIdentifier)
{
}
};
#endif
#if PLATFORM(MAC)
struct InterpretKeyEvent : CoreIPC::Arguments1<uint32_t> {
static CoreIPC::StringReference receiverName() { return messageReceiverName(); }
static CoreIPC::StringReference name() { return CoreIPC::StringReference("InterpretKeyEvent"); }
static const bool isSync = true;
typedef CoreIPC::Arguments1<Vector<WebCore::KeypressCommand>&> Reply;
typedef CoreIPC::Arguments1<uint32_t> DecodeType;
explicit InterpretKeyEvent(uint32_t type)
: CoreIPC::Arguments1<uint32_t>(type)
{
}
};
#endif
#if ENABLE(DEPRECATED_FEATURE)
struct DeprecatedOperation : CoreIPC::Arguments1<const CoreIPC::DummyType&> {
static CoreIPC::StringReference receiverName() { return messageReceiverName(); }
static CoreIPC::StringReference name() { return CoreIPC::StringReference("DeprecatedOperation"); }
static const bool isSync = false;
typedef CoreIPC::Arguments1<const CoreIPC::DummyType&> DecodeType;
explicit DeprecatedOperation(const CoreIPC::DummyType& dummy)
: CoreIPC::Arguments1<const CoreIPC::DummyType&>(dummy)
{
}
};
#endif
#if ENABLE(EXPERIMENTAL_FEATURE)
struct ExperimentalOperation : CoreIPC::Arguments1<const CoreIPC::DummyType&> {
static CoreIPC::StringReference receiverName() { return messageReceiverName(); }
static CoreIPC::StringReference name() { return CoreIPC::StringReference("ExperimentalOperation"); }
static const bool isSync = false;
typedef CoreIPC::Arguments1<const CoreIPC::DummyType&> DecodeType;
explicit ExperimentalOperation(const CoreIPC::DummyType& dummy)
: CoreIPC::Arguments1<const CoreIPC::DummyType&>(dummy)
{
}
};
#endif
} // namespace WebPage
} // namespace Messages
#endif // ENABLE(WEBKIT2)
#endif // WebPageMessages_h
"""
_expected_receiver_implementation = """/*
* Copyright (C) 2010 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#if ENABLE(WEBKIT2)
#include "WebPage.h"
#include "ArgumentCoders.h"
#include "Connection.h"
#if ENABLE(DEPRECATED_FEATURE) || ENABLE(EXPERIMENTAL_FEATURE)
#include "DummyType.h"
#endif
#include "HandleMessage.h"
#if PLATFORM(MAC)
#include "MachPort.h"
#endif
#include "MessageDecoder.h"
#include "Plugin.h"
#include "WebCoreArgumentCoders.h"
#if ENABLE(TOUCH_EVENTS)
#include "WebEvent.h"
#endif
#include "WebPageMessages.h"
#include "WebPreferencesStore.h"
#if PLATFORM(MAC)
#include <WebCore/KeyboardEvent.h>
#endif
#include <WebCore/PluginData.h>
#include <utility>
#include <wtf/HashMap.h>
#include <wtf/Vector.h>
#include <wtf/text/WTFString.h>
namespace Messages {
namespace WebPage {
GetPluginProcessConnection::DelayedReply::DelayedReply(PassRefPtr<CoreIPC::Connection> connection, PassOwnPtr<CoreIPC::MessageEncoder> encoder)
: m_connection(connection)
, m_encoder(encoder)
{
}
GetPluginProcessConnection::DelayedReply::~DelayedReply()
{
ASSERT(!m_connection);
}
bool GetPluginProcessConnection::DelayedReply::send(const CoreIPC::Connection::Handle& connectionHandle)
{
ASSERT(m_encoder);
*m_encoder << connectionHandle;
bool result = m_connection->sendSyncReply(m_encoder.release());
m_connection = nullptr;
return result;
}
TestMultipleAttributes::DelayedReply::DelayedReply(PassRefPtr<CoreIPC::Connection> connection, PassOwnPtr<CoreIPC::MessageEncoder> encoder)
: m_connection(connection)
, m_encoder(encoder)
{
}
TestMultipleAttributes::DelayedReply::~DelayedReply()
{
ASSERT(!m_connection);
}
bool TestMultipleAttributes::DelayedReply::send()
{
ASSERT(m_encoder);
bool result = m_connection->sendSyncReply(m_encoder.release());
m_connection = nullptr;
return result;
}
} // namespace WebPage
} // namespace Messages
namespace WebKit {
void WebPage::didReceiveWebPageMessage(CoreIPC::Connection*, CoreIPC::MessageDecoder& decoder)
{
if (decoder.messageName() == Messages::WebPage::LoadURL::name()) {
CoreIPC::handleMessage<Messages::WebPage::LoadURL>(decoder, this, &WebPage::loadURL);
return;
}
#if ENABLE(TOUCH_EVENTS)
if (decoder.messageName() == Messages::WebPage::TouchEvent::name()) {
CoreIPC::handleMessage<Messages::WebPage::TouchEvent>(decoder, this, &WebPage::touchEvent);
return;
}
#endif
if (decoder.messageName() == Messages::WebPage::DidReceivePolicyDecision::name()) {
CoreIPC::handleMessage<Messages::WebPage::DidReceivePolicyDecision>(decoder, this, &WebPage::didReceivePolicyDecision);
return;
}
if (decoder.messageName() == Messages::WebPage::Close::name()) {
CoreIPC::handleMessage<Messages::WebPage::Close>(decoder, this, &WebPage::close);
return;
}
if (decoder.messageName() == Messages::WebPage::PreferencesDidChange::name()) {
CoreIPC::handleMessage<Messages::WebPage::PreferencesDidChange>(decoder, this, &WebPage::preferencesDidChange);
return;
}
if (decoder.messageName() == Messages::WebPage::SendDoubleAndFloat::name()) {
CoreIPC::handleMessage<Messages::WebPage::SendDoubleAndFloat>(decoder, this, &WebPage::sendDoubleAndFloat);
return;
}
if (decoder.messageName() == Messages::WebPage::SendInts::name()) {
CoreIPC::handleMessage<Messages::WebPage::SendInts>(decoder, this, &WebPage::sendInts);
return;
}
if (decoder.messageName() == Messages::WebPage::TestParameterAttributes::name()) {
CoreIPC::handleMessage<Messages::WebPage::TestParameterAttributes>(decoder, this, &WebPage::testParameterAttributes);
return;
}
if (decoder.messageName() == Messages::WebPage::TemplateTest::name()) {
CoreIPC::handleMessage<Messages::WebPage::TemplateTest>(decoder, this, &WebPage::templateTest);
return;
}
#if PLATFORM(MAC)
if (decoder.messageName() == Messages::WebPage::DidCreateWebProcessConnection::name()) {
CoreIPC::handleMessage<Messages::WebPage::DidCreateWebProcessConnection>(decoder, this, &WebPage::didCreateWebProcessConnection);
return;
}
#endif
#if ENABLE(DEPRECATED_FEATURE)
if (decoder.messageName() == Messages::WebPage::DeprecatedOperation::name()) {
CoreIPC::handleMessage<Messages::WebPage::DeprecatedOperation>(decoder, this, &WebPage::deprecatedOperation);
return;
}
#endif
#if ENABLE(EXPERIMENTAL_FEATURE)
if (decoder.messageName() == Messages::WebPage::ExperimentalOperation::name()) {
CoreIPC::handleMessage<Messages::WebPage::ExperimentalOperation>(decoder, this, &WebPage::experimentalOperation);
return;
}
#endif
ASSERT_NOT_REACHED();
}
void WebPage::didReceiveSyncWebPageMessage(CoreIPC::Connection* connection, CoreIPC::MessageDecoder& decoder, OwnPtr<CoreIPC::MessageEncoder>& replyEncoder)
{
if (decoder.messageName() == Messages::WebPage::CreatePlugin::name()) {
CoreIPC::handleMessage<Messages::WebPage::CreatePlugin>(decoder, *replyEncoder, this, &WebPage::createPlugin);
return;
}
if (decoder.messageName() == Messages::WebPage::RunJavaScriptAlert::name()) {
CoreIPC::handleMessage<Messages::WebPage::RunJavaScriptAlert>(decoder, *replyEncoder, this, &WebPage::runJavaScriptAlert);
return;
}
if (decoder.messageName() == Messages::WebPage::GetPlugins::name()) {
CoreIPC::handleMessage<Messages::WebPage::GetPlugins>(decoder, *replyEncoder, this, &WebPage::getPlugins);
return;
}
if (decoder.messageName() == Messages::WebPage::GetPluginProcessConnection::name()) {
CoreIPC::handleMessageDelayed<Messages::WebPage::GetPluginProcessConnection>(connection, decoder, replyEncoder, this, &WebPage::getPluginProcessConnection);
return;
}
if (decoder.messageName() == Messages::WebPage::TestMultipleAttributes::name()) {
CoreIPC::handleMessageDelayed<Messages::WebPage::TestMultipleAttributes>(connection, decoder, replyEncoder, this, &WebPage::testMultipleAttributes);
return;
}
#if PLATFORM(MAC)
if (decoder.messageName() == Messages::WebPage::InterpretKeyEvent::name()) {
CoreIPC::handleMessage<Messages::WebPage::InterpretKeyEvent>(decoder, *replyEncoder, this, &WebPage::interpretKeyEvent);
return;
}
#endif
ASSERT_NOT_REACHED();
}
} // namespace WebKit
#endif // ENABLE(WEBKIT2)
"""
class GeneratedFileContentsTest(unittest.TestCase):
def assertGeneratedFileContentsEqual(self, first, second):
first_list = first.split('\n')
second_list = second.split('\n')
for index, first_line in enumerate(first_list):
self.assertEquals(first_line, second_list[index])
self.assertEquals(len(first_list), len(second_list))
class HeaderTest(GeneratedFileContentsTest):
def test_header(self):
file_contents = messages.generate_messages_header(StringIO(_messages_file_contents))
self.assertGeneratedFileContentsEqual(file_contents, _expected_header)
class ReceiverImplementationTest(GeneratedFileContentsTest):
def test_receiver_implementation(self):
file_contents = messages.generate_message_handler(StringIO(_messages_file_contents))
self.assertGeneratedFileContentsEqual(file_contents, _expected_receiver_implementation)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
sbarnabas/gowhere | main/auth/auth.py | 1 | 13032 | # coding: utf-8
from __future__ import absolute_import
import functools
import re
from flask.ext import login
from flask.ext import wtf
from flask.ext.oauthlib import client as oauth
from google.appengine.ext import ndb
import flask
import unidecode
import wtforms
import cache
import config
import model
import task
import util
from main import app
_signals = flask.signals.Namespace()
###############################################################################
# Flask Login
###############################################################################
login_manager = login.LoginManager()
class AnonymousUser(login.AnonymousUserMixin):
id = 0
admin = False
name = 'Anonymous'
user_db = None
def key(self):
return None
def has_permission(self, permission):
return False
login_manager.anonymous_user = AnonymousUser
class FlaskUser(AnonymousUser):
def __init__(self, user_db):
self.user_db = user_db
self.id = user_db.key.id()
self.name = user_db.name
self.admin = user_db.admin
def key(self):
return self.user_db.key.urlsafe()
def get_id(self):
return self.user_db.key.urlsafe()
def is_authenticated(self):
return True
def is_active(self):
return self.user_db.active
def is_anonymous(self):
return False
def has_permission(self, permission):
return self.user_db.has_permission(permission)
@login_manager.user_loader
def load_user(key):
user_db = ndb.Key(urlsafe=key).get()
if user_db:
return FlaskUser(user_db)
return None
login_manager.init_app(app)
def current_user_id():
return login.current_user.id
def current_user_key():
return login.current_user.user_db.key if login.current_user.user_db else None
def current_user_db():
return login.current_user.user_db
def is_logged_in():
return login.current_user.id != 0
###############################################################################
# Decorators
###############################################################################
def login_required(f):
decorator_order_guard(f, 'auth.login_required')
@functools.wraps(f)
def decorated_function(*args, **kwargs):
if is_logged_in():
return f(*args, **kwargs)
if flask.request.path.startswith('/api/'):
return flask.abort(401)
return flask.redirect(flask.url_for('signin', next=flask.request.url))
return decorated_function
def admin_required(f):
decorator_order_guard(f, 'auth.admin_required')
@functools.wraps(f)
def decorated_function(*args, **kwargs):
if is_logged_in() and current_user_db().admin:
return f(*args, **kwargs)
if not is_logged_in() and flask.request.path.startswith('/api/'):
return flask.abort(401)
if not is_logged_in():
return flask.redirect(flask.url_for('signin', next=flask.request.url))
return flask.abort(403)
return decorated_function
permission_registered = _signals.signal('permission-registered')
def permission_required(permission=None, methods=None):
def permission_decorator(f):
decorator_order_guard(f, 'auth.permission_required')
# default to decorated function name as permission
perm = permission or f.func_name
meths = [m.upper() for m in methods] if methods else None
permission_registered.send(f, permission=perm)
@functools.wraps(f)
def decorated_function(*args, **kwargs):
if meths and flask.request.method.upper() not in meths:
return f(*args, **kwargs)
if is_logged_in() and current_user_db().has_permission(perm):
return f(*args, **kwargs)
if not is_logged_in():
if flask.request.path.startswith('/api/'):
return flask.abort(401)
return flask.redirect(flask.url_for('signin', next=flask.request.url))
return flask.abort(403)
return decorated_function
return permission_decorator
###############################################################################
# Sign in stuff
###############################################################################
class SignInForm(wtf.Form):
email = wtforms.StringField(
'Email',
[wtforms.validators.required()],
filters=[util.email_filter],
)
password = wtforms.StringField(
'Password',
[wtforms.validators.required()],
)
remember = wtforms.BooleanField(
'Keep me signed in',
[wtforms.validators.optional()],
)
recaptcha = wtf.RecaptchaField()
next_url = wtforms.HiddenField()
@app.route('/signin/', methods=['GET', 'POST'])
def signin():
next_url = util.get_next_url()
form = None
if config.CONFIG_DB.has_email_authentication:
form = form_with_recaptcha(SignInForm())
save_request_params()
if form.validate_on_submit():
result = get_user_db_from_email(
form.email.data, form.password.data)
if result:
cache.reset_auth_attempt()
return signin_user_db(result)
if result is None:
form.email.errors.append('Email or Password do not match')
if result is False:
return flask.redirect(flask.url_for('welcome'))
if not form.errors:
form.next_url.data = next_url
if form and form.errors:
cache.bump_auth_attempt()
return flask.render_template(
'auth/auth.html',
title='Sign in',
html_class='auth',
next_url=next_url,
form=form,
form_type='signin' if config.CONFIG_DB.has_email_authentication else '',
**urls_for_oauth(next_url)
)
###############################################################################
# Sign up stuff
###############################################################################
class SignUpForm(wtf.Form):
email = wtforms.StringField(
'Email',
[wtforms.validators.required(), wtforms.validators.email()],
filters=[util.email_filter],
)
recaptcha = wtf.RecaptchaField()
@app.route('/signup/', methods=['GET', 'POST'])
def signup():
next_url = util.get_next_url()
form = None
if config.CONFIG_DB.has_email_authentication:
form = form_with_recaptcha(SignUpForm())
save_request_params()
if form.validate_on_submit():
user_db = model.User.get_by('email', form.email.data)
if user_db:
form.email.errors.append('This email is already taken.')
if not form.errors:
user_db = create_user_db(
None,
util.create_name_from_email(form.email.data),
form.email.data,
form.email.data,
)
user_db.put()
task.activate_user_notification(user_db)
cache.bump_auth_attempt()
return flask.redirect(flask.url_for('welcome'))
if form and form.errors:
cache.bump_auth_attempt()
title = 'Sign up' if config.CONFIG_DB.has_email_authentication else 'Sign in'
return flask.render_template(
'auth/auth.html',
title=title,
html_class='auth',
next_url=next_url,
form=form,
**urls_for_oauth(next_url)
)
###############################################################################
# Sign out stuff
###############################################################################
@app.route('/signout/')
def signout():
login.logout_user()
return flask.redirect(util.param('next') or flask.url_for('signin'))
###############################################################################
# Helpers
###############################################################################
def url_for_signin(service_name, next_url):
return flask.url_for('signin_%s' % service_name, next=next_url)
def urls_for_oauth(next_url):
return {
'bitbucket_signin_url': url_for_signin('bitbucket', next_url),
'dropbox_signin_url': url_for_signin('dropbox', next_url),
'facebook_signin_url': url_for_signin('facebook', next_url),
'github_signin_url': url_for_signin('github', next_url),
'google_signin_url': url_for_signin('google', next_url),
'gae_signin_url': url_for_signin('gae', next_url),
'instagram_signin_url': url_for_signin('instagram', next_url),
'linkedin_signin_url': url_for_signin('linkedin', next_url),
'microsoft_signin_url': url_for_signin('microsoft', next_url),
'reddit_signin_url': url_for_signin('reddit', next_url),
'twitter_signin_url': url_for_signin('twitter', next_url),
'vk_signin_url': url_for_signin('vk', next_url),
'yahoo_signin_url': url_for_signin('yahoo', next_url),
}
def create_oauth_app(service_config, name):
upper_name = name.upper()
app.config[upper_name] = service_config
service_oauth = oauth.OAuth()
service_app = service_oauth.remote_app(name, app_key=upper_name)
service_oauth.init_app(app)
return service_app
def decorator_order_guard(f, decorator_name):
if f in app.view_functions.values():
raise SyntaxError(
'Do not use %s above app.route decorators as it would not be checked. '
'Instead move the line below the app.route lines.' % decorator_name
)
def save_request_params():
flask.session['auth-params'] = {
'next': util.get_next_url(),
'remember': util.param('remember', bool),
}
def signin_oauth(oauth_app, scheme=None):
try:
flask.session.pop('oauth_token', None)
save_request_params()
return oauth_app.authorize(callback=flask.url_for(
'%s_authorized' % oauth_app.name, _external=True, _scheme=scheme
))
except oauth.OAuthException:
flask.flash(
'Something went wrong with sign in. Please try again.',
category='danger',
)
return flask.redirect(flask.url_for('signin', next=util.get_next_url()))
def form_with_recaptcha(form):
should_have_recaptcha = cache.get_auth_attempt() >= config.RECAPTCHA_LIMIT
if not (should_have_recaptcha and config.CONFIG_DB.has_recaptcha):
del form.recaptcha
return form
###############################################################################
# User related stuff
###############################################################################
def create_user_db(auth_id, name, username, email='', verified=False, **props):
email = email.lower() if email else ''
if verified and email:
user_dbs, cursors = model.User.get_dbs(
email=email, verified=True, limit=2)
if len(user_dbs) == 1:
user_db = user_dbs[0]
user_db.auth_ids.append(auth_id)
user_db.put()
task.new_user_notification(user_db)
return user_db
if isinstance(username, str):
username = username.decode('utf-8')
username = unidecode.unidecode(username.split('@')[0].lower()).strip()
username = re.sub(r'[\W_]+', '.', username)
new_username = username
n = 1
while not model.User.is_username_available(new_username):
new_username = '%s%d' % (username, n)
n += 1
user_db = model.User(
name=name,
email=email,
username=new_username,
auth_ids=[auth_id] if auth_id else [],
verified=verified,
token=util.uuid(),
**props
)
user_db.put()
task.new_user_notification(user_db)
return user_db
@ndb.toplevel
def signin_user_db(user_db):
if not user_db:
return flask.redirect(flask.url_for('signin'))
flask_user_db = FlaskUser(user_db)
auth_params = flask.session.get('auth-params', {
'next': flask.url_for('welcome'),
'remember': False,
})
flask.session.pop('auth-params', None)
if login.login_user(flask_user_db, remember=auth_params['remember']):
user_db.put_async()
return flask.redirect(util.get_next_url(auth_params['next']))
flask.flash('Sorry, but you could not sign in.', category='danger')
return flask.redirect(flask.url_for('signin'))
def get_user_db_from_email(email, password):
user_dbs, cursors = model.User.get_dbs(email=email, active=True, limit=2)
if not user_dbs:
return None
if len(user_dbs) > 1:
flask.flash('''We are sorry but it looks like there is a conflict with
your account. Our support team is already informed and we will get
back to you as soon as possible.''', category='danger')
task.email_conflict_notification(email)
return False
user_db = user_dbs[0]
if user_db.password_hash == util.password_hash(user_db, password):
return user_db
return None
| mit |
gelisam/evercal | lib/evernote/edam/notestore/ttypes.py | 10 | 103402 | #
# Autogenerated by Thrift Compiler
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py:new_style
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
import evernote.edam.userstore.ttypes
import evernote.edam.type.ttypes
import evernote.edam.error.ttypes
import evernote.edam.limits.ttypes
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class SyncState(object):
"""
This structure encapsulates the information about the state of the
user's account for the purpose of "state based" synchronization.
<dl>
<dt>currentTime</dt>
<dd>
The server's current date and time.
</dd>
<dt>fullSyncBefore</dt>
<dd>
The cutoff date and time for client caches to be
updated via incremental synchronization. Any clients that were last
synched with the server before this date/time must do a full resync of all
objects. This cutoff point will change over time as archival data is
deleted or special circumstances on the service require resynchronization.
</dd>
<dt>updateCount</dt>
<dd>
Indicates the total number of transactions that have
been committed within the account. This reflects (for example) the
number of discrete additions or modifications that have been made to
the data in this account (tags, notes, resources, etc.).
This number is the "high water mark" for Update Sequence Numbers (USN)
within the account.
</dd>
<dt>uploaded</dt>
<dd>
The total number of bytes that have been uploaded to
this account in the current monthly period. This can be compared against
Accounting.uploadLimit (from the UserStore) to determine how close the user
is to their monthly upload limit.
This value may not be present if the SyncState has been retrieved by
a caller that only has read access to the account.
</dd>
</dl>
Attributes:
- currentTime
- fullSyncBefore
- updateCount
- uploaded
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'currentTime', None, None, ), # 1
(2, TType.I64, 'fullSyncBefore', None, None, ), # 2
(3, TType.I32, 'updateCount', None, None, ), # 3
(4, TType.I64, 'uploaded', None, None, ), # 4
)
def __init__(self, currentTime=None, fullSyncBefore=None, updateCount=None, uploaded=None,):
self.currentTime = currentTime
self.fullSyncBefore = fullSyncBefore
self.updateCount = updateCount
self.uploaded = uploaded
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.currentTime = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.fullSyncBefore = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.updateCount = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.uploaded = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('SyncState')
if self.currentTime is not None:
oprot.writeFieldBegin('currentTime', TType.I64, 1)
oprot.writeI64(self.currentTime)
oprot.writeFieldEnd()
if self.fullSyncBefore is not None:
oprot.writeFieldBegin('fullSyncBefore', TType.I64, 2)
oprot.writeI64(self.fullSyncBefore)
oprot.writeFieldEnd()
if self.updateCount is not None:
oprot.writeFieldBegin('updateCount', TType.I32, 3)
oprot.writeI32(self.updateCount)
oprot.writeFieldEnd()
if self.uploaded is not None:
oprot.writeFieldBegin('uploaded', TType.I64, 4)
oprot.writeI64(self.uploaded)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.currentTime is None:
raise TProtocol.TProtocolException(message='Required field currentTime is unset!')
if self.fullSyncBefore is None:
raise TProtocol.TProtocolException(message='Required field fullSyncBefore is unset!')
if self.updateCount is None:
raise TProtocol.TProtocolException(message='Required field updateCount is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class SyncChunk(object):
"""
This structure is given out by the NoteStore when a client asks to
receive the current state of an account. The client asks for the server's
state one chunk at a time in order to allow clients to retrieve the state
of a large account without needing to transfer the entire account in
a single message.
The server always gives SyncChunks using an ascending series of Update
Sequence Numbers (USNs).
<dl>
<dt>currentTime</dt>
<dd>
The server's current date and time.
</dd>
<dt>chunkHighUSN</dt>
<dd>
The highest USN for any of the data objects represented
in this sync chunk. If there are no objects in the chunk, this will not be
set.
</dd>
<dt>updateCount</dt>
<dd>
The total number of updates that have been performed in
the service for this account. This is equal to the highest USN within the
account at the point that this SyncChunk was generated. If updateCount
and chunkHighUSN are identical, that means that this is the last chunk
in the account ... there is no more recent information.
</dd>
<dt>notes</dt>
<dd>
If present, this is a list of non-expunged notes that
have a USN in this chunk. This will include notes that are "deleted"
but not expunged (i.e. in the trash). The notes will include their list
of tags and resources, but the note content, resource content, resource
recognition data and resource alternate data will not be supplied.
</dd>
<dt>notebooks</dt>
<dd>
If present, this is a list of non-expunged notebooks that
have a USN in this chunk. This will include notebooks that are "deleted"
but not expunged (i.e. in the trash).
</dd>
<dt>tags</dt>
<dd>
If present, this is a list of the non-expunged tags that have a
USN in this chunk.
</dd>
<dt>searches</dt>
<dd>
If present, this is a list of non-expunged searches that
have a USN in this chunk.
</dd>
<dt>resources</dt>
<dd>
If present, this is a list of the non-expunged resources
that have a USN in this chunk. This will include the metadata for each
resource, but not its binary contents or recognition data, which must be
retrieved separately.
</dd>
<dt>expungedNotes</dt>
<dd>
If present, the GUIDs of all of the notes that were
permanently expunged in this chunk.
</dd>
<dt>expungedNotebooks</dt>
<dd>
If present, the GUIDs of all of the notebooks that
were permanently expunged in this chunk. When a notebook is expunged,
this implies that all of its child notes (and their resources) were
also expunged.
</dd>
<dt>expungedTags</dt>
<dd>
If present, the GUIDs of all of the tags that were
permanently expunged in this chunk.
</dd>
<dt>expungedSearches</dt>
<dd>
If present, the GUIDs of all of the saved searches
that were permanently expunged in this chunk.
</dd>
<dt>linkedNotebooks</dt>
<dd>
If present, this is a list of non-expunged LinkedNotebooks that
have a USN in this chunk.
</dd>
<dt>expungedLinkedNotebooks</dt>
<dd>
If present, the GUIDs of all of the LinkedNotebooks
that were permanently expunged in this chunk.
</dd>
</dl>
Attributes:
- currentTime
- chunkHighUSN
- updateCount
- notes
- notebooks
- tags
- searches
- resources
- expungedNotes
- expungedNotebooks
- expungedTags
- expungedSearches
- linkedNotebooks
- expungedLinkedNotebooks
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'currentTime', None, None, ), # 1
(2, TType.I32, 'chunkHighUSN', None, None, ), # 2
(3, TType.I32, 'updateCount', None, None, ), # 3
(4, TType.LIST, 'notes', (TType.STRUCT,(evernote.edam.type.ttypes.Note, evernote.edam.type.ttypes.Note.thrift_spec)), None, ), # 4
(5, TType.LIST, 'notebooks', (TType.STRUCT,(evernote.edam.type.ttypes.Notebook, evernote.edam.type.ttypes.Notebook.thrift_spec)), None, ), # 5
(6, TType.LIST, 'tags', (TType.STRUCT,(evernote.edam.type.ttypes.Tag, evernote.edam.type.ttypes.Tag.thrift_spec)), None, ), # 6
(7, TType.LIST, 'searches', (TType.STRUCT,(evernote.edam.type.ttypes.SavedSearch, evernote.edam.type.ttypes.SavedSearch.thrift_spec)), None, ), # 7
(8, TType.LIST, 'resources', (TType.STRUCT,(evernote.edam.type.ttypes.Resource, evernote.edam.type.ttypes.Resource.thrift_spec)), None, ), # 8
(9, TType.LIST, 'expungedNotes', (TType.STRING,None), None, ), # 9
(10, TType.LIST, 'expungedNotebooks', (TType.STRING,None), None, ), # 10
(11, TType.LIST, 'expungedTags', (TType.STRING,None), None, ), # 11
(12, TType.LIST, 'expungedSearches', (TType.STRING,None), None, ), # 12
(13, TType.LIST, 'linkedNotebooks', (TType.STRUCT,(evernote.edam.type.ttypes.LinkedNotebook, evernote.edam.type.ttypes.LinkedNotebook.thrift_spec)), None, ), # 13
(14, TType.LIST, 'expungedLinkedNotebooks', (TType.STRING,None), None, ), # 14
)
def __init__(self, currentTime=None, chunkHighUSN=None, updateCount=None, notes=None, notebooks=None, tags=None, searches=None, resources=None, expungedNotes=None, expungedNotebooks=None, expungedTags=None, expungedSearches=None, linkedNotebooks=None, expungedLinkedNotebooks=None,):
self.currentTime = currentTime
self.chunkHighUSN = chunkHighUSN
self.updateCount = updateCount
self.notes = notes
self.notebooks = notebooks
self.tags = tags
self.searches = searches
self.resources = resources
self.expungedNotes = expungedNotes
self.expungedNotebooks = expungedNotebooks
self.expungedTags = expungedTags
self.expungedSearches = expungedSearches
self.linkedNotebooks = linkedNotebooks
self.expungedLinkedNotebooks = expungedLinkedNotebooks
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.currentTime = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.chunkHighUSN = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.updateCount = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.notes = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in xrange(_size0):
_elem5 = evernote.edam.type.ttypes.Note()
_elem5.read(iprot)
self.notes.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.notebooks = []
(_etype9, _size6) = iprot.readListBegin()
for _i10 in xrange(_size6):
_elem11 = evernote.edam.type.ttypes.Notebook()
_elem11.read(iprot)
self.notebooks.append(_elem11)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.LIST:
self.tags = []
(_etype15, _size12) = iprot.readListBegin()
for _i16 in xrange(_size12):
_elem17 = evernote.edam.type.ttypes.Tag()
_elem17.read(iprot)
self.tags.append(_elem17)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.LIST:
self.searches = []
(_etype21, _size18) = iprot.readListBegin()
for _i22 in xrange(_size18):
_elem23 = evernote.edam.type.ttypes.SavedSearch()
_elem23.read(iprot)
self.searches.append(_elem23)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.LIST:
self.resources = []
(_etype27, _size24) = iprot.readListBegin()
for _i28 in xrange(_size24):
_elem29 = evernote.edam.type.ttypes.Resource()
_elem29.read(iprot)
self.resources.append(_elem29)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.LIST:
self.expungedNotes = []
(_etype33, _size30) = iprot.readListBegin()
for _i34 in xrange(_size30):
_elem35 = iprot.readString();
self.expungedNotes.append(_elem35)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.LIST:
self.expungedNotebooks = []
(_etype39, _size36) = iprot.readListBegin()
for _i40 in xrange(_size36):
_elem41 = iprot.readString();
self.expungedNotebooks.append(_elem41)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.LIST:
self.expungedTags = []
(_etype45, _size42) = iprot.readListBegin()
for _i46 in xrange(_size42):
_elem47 = iprot.readString();
self.expungedTags.append(_elem47)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.LIST:
self.expungedSearches = []
(_etype51, _size48) = iprot.readListBegin()
for _i52 in xrange(_size48):
_elem53 = iprot.readString();
self.expungedSearches.append(_elem53)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 13:
if ftype == TType.LIST:
self.linkedNotebooks = []
(_etype57, _size54) = iprot.readListBegin()
for _i58 in xrange(_size54):
_elem59 = evernote.edam.type.ttypes.LinkedNotebook()
_elem59.read(iprot)
self.linkedNotebooks.append(_elem59)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 14:
if ftype == TType.LIST:
self.expungedLinkedNotebooks = []
(_etype63, _size60) = iprot.readListBegin()
for _i64 in xrange(_size60):
_elem65 = iprot.readString();
self.expungedLinkedNotebooks.append(_elem65)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('SyncChunk')
if self.currentTime is not None:
oprot.writeFieldBegin('currentTime', TType.I64, 1)
oprot.writeI64(self.currentTime)
oprot.writeFieldEnd()
if self.chunkHighUSN is not None:
oprot.writeFieldBegin('chunkHighUSN', TType.I32, 2)
oprot.writeI32(self.chunkHighUSN)
oprot.writeFieldEnd()
if self.updateCount is not None:
oprot.writeFieldBegin('updateCount', TType.I32, 3)
oprot.writeI32(self.updateCount)
oprot.writeFieldEnd()
if self.notes is not None:
oprot.writeFieldBegin('notes', TType.LIST, 4)
oprot.writeListBegin(TType.STRUCT, len(self.notes))
for iter66 in self.notes:
iter66.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.notebooks is not None:
oprot.writeFieldBegin('notebooks', TType.LIST, 5)
oprot.writeListBegin(TType.STRUCT, len(self.notebooks))
for iter67 in self.notebooks:
iter67.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.tags is not None:
oprot.writeFieldBegin('tags', TType.LIST, 6)
oprot.writeListBegin(TType.STRUCT, len(self.tags))
for iter68 in self.tags:
iter68.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.searches is not None:
oprot.writeFieldBegin('searches', TType.LIST, 7)
oprot.writeListBegin(TType.STRUCT, len(self.searches))
for iter69 in self.searches:
iter69.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.resources is not None:
oprot.writeFieldBegin('resources', TType.LIST, 8)
oprot.writeListBegin(TType.STRUCT, len(self.resources))
for iter70 in self.resources:
iter70.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.expungedNotes is not None:
oprot.writeFieldBegin('expungedNotes', TType.LIST, 9)
oprot.writeListBegin(TType.STRING, len(self.expungedNotes))
for iter71 in self.expungedNotes:
oprot.writeString(iter71)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.expungedNotebooks is not None:
oprot.writeFieldBegin('expungedNotebooks', TType.LIST, 10)
oprot.writeListBegin(TType.STRING, len(self.expungedNotebooks))
for iter72 in self.expungedNotebooks:
oprot.writeString(iter72)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.expungedTags is not None:
oprot.writeFieldBegin('expungedTags', TType.LIST, 11)
oprot.writeListBegin(TType.STRING, len(self.expungedTags))
for iter73 in self.expungedTags:
oprot.writeString(iter73)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.expungedSearches is not None:
oprot.writeFieldBegin('expungedSearches', TType.LIST, 12)
oprot.writeListBegin(TType.STRING, len(self.expungedSearches))
for iter74 in self.expungedSearches:
oprot.writeString(iter74)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.linkedNotebooks is not None:
oprot.writeFieldBegin('linkedNotebooks', TType.LIST, 13)
oprot.writeListBegin(TType.STRUCT, len(self.linkedNotebooks))
for iter75 in self.linkedNotebooks:
iter75.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.expungedLinkedNotebooks is not None:
oprot.writeFieldBegin('expungedLinkedNotebooks', TType.LIST, 14)
oprot.writeListBegin(TType.STRING, len(self.expungedLinkedNotebooks))
for iter76 in self.expungedLinkedNotebooks:
oprot.writeString(iter76)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.currentTime is None:
raise TProtocol.TProtocolException(message='Required field currentTime is unset!')
if self.updateCount is None:
raise TProtocol.TProtocolException(message='Required field updateCount is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class SyncChunkFilter(object):
"""
This structure is used with the 'getFilteredSyncChunk' call to provide
fine-grained control over the data that's returned when a client needs
to synchronize with the service. Each flag in this structure specifies
whether to include one class of data in the results of that call.
<dl>
<dt>includeNotes</dt>
<dd>
If true, then the server will include the SyncChunks.notes field
</dd>
<dt>includeNoteResources</dt>
<dd>
If true, then the server will include the 'resources' field on all of
the Notes that are in SyncChunk.notes.
If 'includeNotes' is false, then this will have no effect.
</dd>
<dt>includeNoteAttributes</dt>
<dd>
If true, then the server will include the 'attributes' field on all of
the Notes that are in SyncChunks.notes.
If 'includeNotes' is false, then this will have no effect.
</dd>
<dt>includeNotebooks</dt>
<dd>
If true, then the server will include the SyncChunks.notebooks field
</dd>
<dt>includeTags</dt>
<dd>
If true, then the server will include the SyncChunks.tags field
</dd>
<dt>includeSearches</dt>
<dd>
If true, then the server will include the SyncChunks.searches field
</dd>
<dt>includeResources</dt>
<dd>
If true, then the server will include the SyncChunks.resources field.
Since the Resources are also provided with their Note
(in the Notes.resources list), this is primarily useful for clients that
want to watch for changes to individual Resources due to recognition data
being added.
</dd>
<dt>includeLinkedNotebooks</dt>
<dd>
If true, then the server will include the SyncChunks.linkedNotebooks field.
</dd>
<dt>includeExpunged</dt>
<dd>
If true, then the server will include the 'expunged' data for any type
of included data. For example, if 'includeTags' and 'includeExpunged'
are both true, then the SyncChunks.expungedTags field will be set with
the GUIDs of tags that have been expunged from the server.
</dd>
<dt>includeNoteApplicationDataFullMap</dt>
<dd>
If true, then the values for the applicationData map will be filled
in, assuming notes and note attributes are being returned. Otherwise,
only the keysOnly field will be filled in.
</dd>
<dt>includeResourceApplicationDataFullMap</dt>
<dd>
If true, then the fullMap values for the applicationData map will be
filled in, assuming resources and resource attributes are being returned
(includeResources is true). Otherwise, only the keysOnly field will be
filled in.
</dd>
<dt>includeNoteResourceApplicationDataFullMap</dt>
<dd>
If true, then the fullMap values for the applicationData map will be
filled in for resources found inside of notes, assuming resources are
being returned in notes (includeNoteResources is true). Otherwise,
only the keysOnly field will be filled in.
</dd>
<dt>requireNoteContentClass</dt>
<dd>
If set, then only send notes whose content class matches this value.
The value can be a literal match or, if the last character is an
asterisk, a prefix match.
</dd>
</dl>
Attributes:
- includeNotes
- includeNoteResources
- includeNoteAttributes
- includeNotebooks
- includeTags
- includeSearches
- includeResources
- includeLinkedNotebooks
- includeExpunged
- includeNoteApplicationDataFullMap
- includeResourceApplicationDataFullMap
- includeNoteResourceApplicationDataFullMap
- requireNoteContentClass
"""
thrift_spec = (
None, # 0
(1, TType.BOOL, 'includeNotes', None, None, ), # 1
(2, TType.BOOL, 'includeNoteResources', None, None, ), # 2
(3, TType.BOOL, 'includeNoteAttributes', None, None, ), # 3
(4, TType.BOOL, 'includeNotebooks', None, None, ), # 4
(5, TType.BOOL, 'includeTags', None, None, ), # 5
(6, TType.BOOL, 'includeSearches', None, None, ), # 6
(7, TType.BOOL, 'includeResources', None, None, ), # 7
(8, TType.BOOL, 'includeLinkedNotebooks', None, None, ), # 8
(9, TType.BOOL, 'includeExpunged', None, None, ), # 9
(10, TType.BOOL, 'includeNoteApplicationDataFullMap', None, None, ), # 10
(11, TType.STRING, 'requireNoteContentClass', None, None, ), # 11
(12, TType.BOOL, 'includeResourceApplicationDataFullMap', None, None, ), # 12
(13, TType.BOOL, 'includeNoteResourceApplicationDataFullMap', None, None, ), # 13
)
def __init__(self, includeNotes=None, includeNoteResources=None, includeNoteAttributes=None, includeNotebooks=None, includeTags=None, includeSearches=None, includeResources=None, includeLinkedNotebooks=None, includeExpunged=None, includeNoteApplicationDataFullMap=None, includeResourceApplicationDataFullMap=None, includeNoteResourceApplicationDataFullMap=None, requireNoteContentClass=None,):
self.includeNotes = includeNotes
self.includeNoteResources = includeNoteResources
self.includeNoteAttributes = includeNoteAttributes
self.includeNotebooks = includeNotebooks
self.includeTags = includeTags
self.includeSearches = includeSearches
self.includeResources = includeResources
self.includeLinkedNotebooks = includeLinkedNotebooks
self.includeExpunged = includeExpunged
self.includeNoteApplicationDataFullMap = includeNoteApplicationDataFullMap
self.includeResourceApplicationDataFullMap = includeResourceApplicationDataFullMap
self.includeNoteResourceApplicationDataFullMap = includeNoteResourceApplicationDataFullMap
self.requireNoteContentClass = requireNoteContentClass
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.includeNotes = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.includeNoteResources = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.BOOL:
self.includeNoteAttributes = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.includeNotebooks = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.BOOL:
self.includeTags = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.BOOL:
self.includeSearches = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.BOOL:
self.includeResources = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.BOOL:
self.includeLinkedNotebooks = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.BOOL:
self.includeExpunged = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.BOOL:
self.includeNoteApplicationDataFullMap = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.BOOL:
self.includeResourceApplicationDataFullMap = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 13:
if ftype == TType.BOOL:
self.includeNoteResourceApplicationDataFullMap = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.STRING:
self.requireNoteContentClass = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('SyncChunkFilter')
if self.includeNotes is not None:
oprot.writeFieldBegin('includeNotes', TType.BOOL, 1)
oprot.writeBool(self.includeNotes)
oprot.writeFieldEnd()
if self.includeNoteResources is not None:
oprot.writeFieldBegin('includeNoteResources', TType.BOOL, 2)
oprot.writeBool(self.includeNoteResources)
oprot.writeFieldEnd()
if self.includeNoteAttributes is not None:
oprot.writeFieldBegin('includeNoteAttributes', TType.BOOL, 3)
oprot.writeBool(self.includeNoteAttributes)
oprot.writeFieldEnd()
if self.includeNotebooks is not None:
oprot.writeFieldBegin('includeNotebooks', TType.BOOL, 4)
oprot.writeBool(self.includeNotebooks)
oprot.writeFieldEnd()
if self.includeTags is not None:
oprot.writeFieldBegin('includeTags', TType.BOOL, 5)
oprot.writeBool(self.includeTags)
oprot.writeFieldEnd()
if self.includeSearches is not None:
oprot.writeFieldBegin('includeSearches', TType.BOOL, 6)
oprot.writeBool(self.includeSearches)
oprot.writeFieldEnd()
if self.includeResources is not None:
oprot.writeFieldBegin('includeResources', TType.BOOL, 7)
oprot.writeBool(self.includeResources)
oprot.writeFieldEnd()
if self.includeLinkedNotebooks is not None:
oprot.writeFieldBegin('includeLinkedNotebooks', TType.BOOL, 8)
oprot.writeBool(self.includeLinkedNotebooks)
oprot.writeFieldEnd()
if self.includeExpunged is not None:
oprot.writeFieldBegin('includeExpunged', TType.BOOL, 9)
oprot.writeBool(self.includeExpunged)
oprot.writeFieldEnd()
if self.includeNoteApplicationDataFullMap is not None:
oprot.writeFieldBegin('includeNoteApplicationDataFullMap', TType.BOOL, 10)
oprot.writeBool(self.includeNoteApplicationDataFullMap)
oprot.writeFieldEnd()
if self.requireNoteContentClass is not None:
oprot.writeFieldBegin('requireNoteContentClass', TType.STRING, 11)
oprot.writeString(self.requireNoteContentClass)
oprot.writeFieldEnd()
if self.includeResourceApplicationDataFullMap is not None:
oprot.writeFieldBegin('includeResourceApplicationDataFullMap', TType.BOOL, 12)
oprot.writeBool(self.includeResourceApplicationDataFullMap)
oprot.writeFieldEnd()
if self.includeNoteResourceApplicationDataFullMap is not None:
oprot.writeFieldBegin('includeNoteResourceApplicationDataFullMap', TType.BOOL, 13)
oprot.writeBool(self.includeNoteResourceApplicationDataFullMap)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class NoteFilter(object):
"""
A list of criteria that are used to indicate which notes are desired from
the account. This is used in queries to the NoteStore to determine
which notes should be retrieved.
<dl>
<dt>order</dt>
<dd>
The NoteSortOrder value indicating what criterion should be
used to sort the results of the filter.
</dd>
<dt>ascending</dt>
<dd>
If true, the results will be ascending in the requested
sort order. If false, the results will be descending.
</dd>
<dt>words</dt>
<dd>
If present, a search query string that will filter the set of notes to be returned.
Accepts the full search grammar documented in the Evernote API Overview.
</dd>
<dt>notebookGuid</dt>
<dd>
If present, the Guid of the notebook that must contain
the notes.
</dd>
<dt>tagGuids</dt>
<dd>
If present, the list of tags (by GUID) that must be present
on the notes.
</dd>
<dt>timeZone</dt>
<dd>
The zone ID for the user, which will be used to interpret
any dates or times in the queries that do not include their desired zone
information.
For example, if a query requests notes created "yesterday", this
will be evaluated from the provided time zone, if provided.
The format must be encoded as a standard zone ID such as
"America/Los_Angeles".
</dd>
<dt>inactive</dt>
<dd>
If true, then only notes that are not active (i.e. notes in
the Trash) will be returned. Otherwise, only active notes will be returned.
There is no way to find both active and inactive notes in a single query.
</dd>
<dt>emphasized</dt>
<dd>
If present, a search query string that may or may not influence the notes
to be returned, both in terms of coverage as well as of order. Think of it
as a wish list, not a requirement.
Accepts the full search grammar documented in the Evernote API Overview.
</dd>
</dl>
Attributes:
- order
- ascending
- words
- notebookGuid
- tagGuids
- timeZone
- inactive
- emphasized
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'order', None, None, ), # 1
(2, TType.BOOL, 'ascending', None, None, ), # 2
(3, TType.STRING, 'words', None, None, ), # 3
(4, TType.STRING, 'notebookGuid', None, None, ), # 4
(5, TType.LIST, 'tagGuids', (TType.STRING,None), None, ), # 5
(6, TType.STRING, 'timeZone', None, None, ), # 6
(7, TType.BOOL, 'inactive', None, None, ), # 7
(8, TType.STRING, 'emphasized', None, None, ), # 8
)
def __init__(self, order=None, ascending=None, words=None, notebookGuid=None, tagGuids=None, timeZone=None, inactive=None, emphasized=None,):
self.order = order
self.ascending = ascending
self.words = words
self.notebookGuid = notebookGuid
self.tagGuids = tagGuids
self.timeZone = timeZone
self.inactive = inactive
self.emphasized = emphasized
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.order = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.ascending = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.words = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.notebookGuid = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.tagGuids = []
(_etype80, _size77) = iprot.readListBegin()
for _i81 in xrange(_size77):
_elem82 = iprot.readString();
self.tagGuids.append(_elem82)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.timeZone = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.BOOL:
self.inactive = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRING:
self.emphasized = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('NoteFilter')
if self.order is not None:
oprot.writeFieldBegin('order', TType.I32, 1)
oprot.writeI32(self.order)
oprot.writeFieldEnd()
if self.ascending is not None:
oprot.writeFieldBegin('ascending', TType.BOOL, 2)
oprot.writeBool(self.ascending)
oprot.writeFieldEnd()
if self.words is not None:
oprot.writeFieldBegin('words', TType.STRING, 3)
oprot.writeString(self.words)
oprot.writeFieldEnd()
if self.notebookGuid is not None:
oprot.writeFieldBegin('notebookGuid', TType.STRING, 4)
oprot.writeString(self.notebookGuid)
oprot.writeFieldEnd()
if self.tagGuids is not None:
oprot.writeFieldBegin('tagGuids', TType.LIST, 5)
oprot.writeListBegin(TType.STRING, len(self.tagGuids))
for iter83 in self.tagGuids:
oprot.writeString(iter83)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.timeZone is not None:
oprot.writeFieldBegin('timeZone', TType.STRING, 6)
oprot.writeString(self.timeZone)
oprot.writeFieldEnd()
if self.inactive is not None:
oprot.writeFieldBegin('inactive', TType.BOOL, 7)
oprot.writeBool(self.inactive)
oprot.writeFieldEnd()
if self.emphasized is not None:
oprot.writeFieldBegin('emphasized', TType.STRING, 8)
oprot.writeString(self.emphasized)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class NoteList(object):
"""
A small structure for returning a list of notes out of a larger set.
<dl>
<dt>startIndex</dt>
<dd>
The starting index within the overall set of notes. This
is also the number of notes that are "before" this list in the set.
</dd>
<dt>totalNotes</dt>
<dd>
The number of notes in the larger set. This can be used
to calculate how many notes are "after" this note in the set.
(I.e. remaining = totalNotes - (startIndex + notes.length) )
</dd>
<dt>notes</dt>
<dd>
The list of notes from this range. The Notes will include all
metadata (attributes, resources, etc.), but will not include the ENML
content of the note or the binary contents of any resources.
</dd>
<dt>stoppedWords</dt>
<dd>
If the NoteList was produced using a text based search
query that included words that are not indexed or searched by the service,
this will include a list of those ignored words.
</dd>
<dt>searchedWords</dt>
<dd>
If the NoteList was produced using a text based search
query that included viable search words or quoted expressions, this will
include a list of those words. Any stopped words will not be included
in this list.
</dd>
<dt>updateCount</dt>
<dd>
Indicates the total number of transactions that have
been committed within the account. This reflects (for example) the
number of discrete additions or modifications that have been made to
the data in this account (tags, notes, resources, etc.).
This number is the "high water mark" for Update Sequence Numbers (USN)
within the account.
</dd>
</dl>
Attributes:
- startIndex
- totalNotes
- notes
- stoppedWords
- searchedWords
- updateCount
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'startIndex', None, None, ), # 1
(2, TType.I32, 'totalNotes', None, None, ), # 2
(3, TType.LIST, 'notes', (TType.STRUCT,(evernote.edam.type.ttypes.Note, evernote.edam.type.ttypes.Note.thrift_spec)), None, ), # 3
(4, TType.LIST, 'stoppedWords', (TType.STRING,None), None, ), # 4
(5, TType.LIST, 'searchedWords', (TType.STRING,None), None, ), # 5
(6, TType.I32, 'updateCount', None, None, ), # 6
)
def __init__(self, startIndex=None, totalNotes=None, notes=None, stoppedWords=None, searchedWords=None, updateCount=None,):
self.startIndex = startIndex
self.totalNotes = totalNotes
self.notes = notes
self.stoppedWords = stoppedWords
self.searchedWords = searchedWords
self.updateCount = updateCount
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.startIndex = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.totalNotes = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.notes = []
(_etype87, _size84) = iprot.readListBegin()
for _i88 in xrange(_size84):
_elem89 = evernote.edam.type.ttypes.Note()
_elem89.read(iprot)
self.notes.append(_elem89)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.stoppedWords = []
(_etype93, _size90) = iprot.readListBegin()
for _i94 in xrange(_size90):
_elem95 = iprot.readString();
self.stoppedWords.append(_elem95)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.searchedWords = []
(_etype99, _size96) = iprot.readListBegin()
for _i100 in xrange(_size96):
_elem101 = iprot.readString();
self.searchedWords.append(_elem101)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.updateCount = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('NoteList')
if self.startIndex is not None:
oprot.writeFieldBegin('startIndex', TType.I32, 1)
oprot.writeI32(self.startIndex)
oprot.writeFieldEnd()
if self.totalNotes is not None:
oprot.writeFieldBegin('totalNotes', TType.I32, 2)
oprot.writeI32(self.totalNotes)
oprot.writeFieldEnd()
if self.notes is not None:
oprot.writeFieldBegin('notes', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.notes))
for iter102 in self.notes:
iter102.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.stoppedWords is not None:
oprot.writeFieldBegin('stoppedWords', TType.LIST, 4)
oprot.writeListBegin(TType.STRING, len(self.stoppedWords))
for iter103 in self.stoppedWords:
oprot.writeString(iter103)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.searchedWords is not None:
oprot.writeFieldBegin('searchedWords', TType.LIST, 5)
oprot.writeListBegin(TType.STRING, len(self.searchedWords))
for iter104 in self.searchedWords:
oprot.writeString(iter104)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.updateCount is not None:
oprot.writeFieldBegin('updateCount', TType.I32, 6)
oprot.writeI32(self.updateCount)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.startIndex is None:
raise TProtocol.TProtocolException(message='Required field startIndex is unset!')
if self.totalNotes is None:
raise TProtocol.TProtocolException(message='Required field totalNotes is unset!')
if self.notes is None:
raise TProtocol.TProtocolException(message='Required field notes is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class NoteMetadata(object):
"""
This structure is used in the set of results returned by the
findNotesMetadata function. It represents the high-level information about
a single Note, without some of the larger deep structure. This allows
for the information about a list of Notes to be returned relatively quickly
with less marshalling and data transfer to remote clients.
Most fields in this structure are identical to the corresponding field in
the Note structure, with the exception of:
<dl>
<dt>largestResourceMime</dt>
<dd>If set, then this will contain the MIME type of the largest Resource
(in bytes) within the Note. This may be useful, for example, to choose
an appropriate icon or thumbnail to represent the Note.
</dd>
<dt>largestResourceSize</dt>
<dd>If set, this will contain the size of the largest Resource file, in
bytes, within the Note. This may be useful, for example, to decide whether
to ask the server for a thumbnail to represent the Note.
</dd>
</dl>
Attributes:
- guid
- title
- contentLength
- created
- updated
- deleted
- updateSequenceNum
- notebookGuid
- tagGuids
- attributes
- largestResourceMime
- largestResourceSize
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'guid', None, None, ), # 1
(2, TType.STRING, 'title', None, None, ), # 2
None, # 3
None, # 4
(5, TType.I32, 'contentLength', None, None, ), # 5
(6, TType.I64, 'created', None, None, ), # 6
(7, TType.I64, 'updated', None, None, ), # 7
(8, TType.I64, 'deleted', None, None, ), # 8
None, # 9
(10, TType.I32, 'updateSequenceNum', None, None, ), # 10
(11, TType.STRING, 'notebookGuid', None, None, ), # 11
(12, TType.LIST, 'tagGuids', (TType.STRING,None), None, ), # 12
None, # 13
(14, TType.STRUCT, 'attributes', (evernote.edam.type.ttypes.NoteAttributes, evernote.edam.type.ttypes.NoteAttributes.thrift_spec), None, ), # 14
None, # 15
None, # 16
None, # 17
None, # 18
None, # 19
(20, TType.STRING, 'largestResourceMime', None, None, ), # 20
(21, TType.I32, 'largestResourceSize', None, None, ), # 21
)
def __init__(self, guid=None, title=None, contentLength=None, created=None, updated=None, deleted=None, updateSequenceNum=None, notebookGuid=None, tagGuids=None, attributes=None, largestResourceMime=None, largestResourceSize=None,):
self.guid = guid
self.title = title
self.contentLength = contentLength
self.created = created
self.updated = updated
self.deleted = deleted
self.updateSequenceNum = updateSequenceNum
self.notebookGuid = notebookGuid
self.tagGuids = tagGuids
self.attributes = attributes
self.largestResourceMime = largestResourceMime
self.largestResourceSize = largestResourceSize
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.guid = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.title = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.contentLength = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I64:
self.created = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I64:
self.updated = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.I64:
self.deleted = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.I32:
self.updateSequenceNum = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.STRING:
self.notebookGuid = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.LIST:
self.tagGuids = []
(_etype108, _size105) = iprot.readListBegin()
for _i109 in xrange(_size105):
_elem110 = iprot.readString();
self.tagGuids.append(_elem110)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 14:
if ftype == TType.STRUCT:
self.attributes = evernote.edam.type.ttypes.NoteAttributes()
self.attributes.read(iprot)
else:
iprot.skip(ftype)
elif fid == 20:
if ftype == TType.STRING:
self.largestResourceMime = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 21:
if ftype == TType.I32:
self.largestResourceSize = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('NoteMetadata')
if self.guid is not None:
oprot.writeFieldBegin('guid', TType.STRING, 1)
oprot.writeString(self.guid)
oprot.writeFieldEnd()
if self.title is not None:
oprot.writeFieldBegin('title', TType.STRING, 2)
oprot.writeString(self.title)
oprot.writeFieldEnd()
if self.contentLength is not None:
oprot.writeFieldBegin('contentLength', TType.I32, 5)
oprot.writeI32(self.contentLength)
oprot.writeFieldEnd()
if self.created is not None:
oprot.writeFieldBegin('created', TType.I64, 6)
oprot.writeI64(self.created)
oprot.writeFieldEnd()
if self.updated is not None:
oprot.writeFieldBegin('updated', TType.I64, 7)
oprot.writeI64(self.updated)
oprot.writeFieldEnd()
if self.deleted is not None:
oprot.writeFieldBegin('deleted', TType.I64, 8)
oprot.writeI64(self.deleted)
oprot.writeFieldEnd()
if self.updateSequenceNum is not None:
oprot.writeFieldBegin('updateSequenceNum', TType.I32, 10)
oprot.writeI32(self.updateSequenceNum)
oprot.writeFieldEnd()
if self.notebookGuid is not None:
oprot.writeFieldBegin('notebookGuid', TType.STRING, 11)
oprot.writeString(self.notebookGuid)
oprot.writeFieldEnd()
if self.tagGuids is not None:
oprot.writeFieldBegin('tagGuids', TType.LIST, 12)
oprot.writeListBegin(TType.STRING, len(self.tagGuids))
for iter111 in self.tagGuids:
oprot.writeString(iter111)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.attributes is not None:
oprot.writeFieldBegin('attributes', TType.STRUCT, 14)
self.attributes.write(oprot)
oprot.writeFieldEnd()
if self.largestResourceMime is not None:
oprot.writeFieldBegin('largestResourceMime', TType.STRING, 20)
oprot.writeString(self.largestResourceMime)
oprot.writeFieldEnd()
if self.largestResourceSize is not None:
oprot.writeFieldBegin('largestResourceSize', TType.I32, 21)
oprot.writeI32(self.largestResourceSize)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.guid is None:
raise TProtocol.TProtocolException(message='Required field guid is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class NotesMetadataList(object):
"""
This structure is returned from calls to the findNotesMetadata function to
give the high-level metadata about a subset of Notes that are found to
match a specified NoteFilter in a search.
<dl>
<dt>startIndex</dt>
<dd>
The starting index within the overall set of notes. This
is also the number of notes that are "before" this list in the set.
</dd>
<dt>totalNotes</dt>
<dd>
The number of notes in the larger set. This can be used
to calculate how many notes are "after" this note in the set.
(I.e. remaining = totalNotes - (startIndex + notes.length) )
</dd>
<dt>notes</dt>
<dd>
The list of metadata for Notes in this range. The set of optional fields
that are set in each metadata structure will depend on the
NotesMetadataResultSpec provided by the caller when the search was
performed. Only the 'guid' field will be guaranteed to be set in each
Note.
</dd>
<dt>stoppedWords</dt>
<dd>
If the NoteList was produced using a text based search
query that included words that are not indexed or searched by the service,
this will include a list of those ignored words.
</dd>
<dt>searchedWords</dt>
<dd>
If the NoteList was produced using a text based search
query that included viable search words or quoted expressions, this will
include a list of those words. Any stopped words will not be included
in this list.
</dd>
<dt>updateCount</dt>
<dd>
Indicates the total number of transactions that have
been committed within the account. This reflects (for example) the
number of discrete additions or modifications that have been made to
the data in this account (tags, notes, resources, etc.).
This number is the "high water mark" for Update Sequence Numbers (USN)
within the account.
</dd>
</dl>
Attributes:
- startIndex
- totalNotes
- notes
- stoppedWords
- searchedWords
- updateCount
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'startIndex', None, None, ), # 1
(2, TType.I32, 'totalNotes', None, None, ), # 2
(3, TType.LIST, 'notes', (TType.STRUCT,(NoteMetadata, NoteMetadata.thrift_spec)), None, ), # 3
(4, TType.LIST, 'stoppedWords', (TType.STRING,None), None, ), # 4
(5, TType.LIST, 'searchedWords', (TType.STRING,None), None, ), # 5
(6, TType.I32, 'updateCount', None, None, ), # 6
)
def __init__(self, startIndex=None, totalNotes=None, notes=None, stoppedWords=None, searchedWords=None, updateCount=None,):
self.startIndex = startIndex
self.totalNotes = totalNotes
self.notes = notes
self.stoppedWords = stoppedWords
self.searchedWords = searchedWords
self.updateCount = updateCount
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.startIndex = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.totalNotes = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.notes = []
(_etype115, _size112) = iprot.readListBegin()
for _i116 in xrange(_size112):
_elem117 = NoteMetadata()
_elem117.read(iprot)
self.notes.append(_elem117)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.stoppedWords = []
(_etype121, _size118) = iprot.readListBegin()
for _i122 in xrange(_size118):
_elem123 = iprot.readString();
self.stoppedWords.append(_elem123)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.searchedWords = []
(_etype127, _size124) = iprot.readListBegin()
for _i128 in xrange(_size124):
_elem129 = iprot.readString();
self.searchedWords.append(_elem129)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.updateCount = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('NotesMetadataList')
if self.startIndex is not None:
oprot.writeFieldBegin('startIndex', TType.I32, 1)
oprot.writeI32(self.startIndex)
oprot.writeFieldEnd()
if self.totalNotes is not None:
oprot.writeFieldBegin('totalNotes', TType.I32, 2)
oprot.writeI32(self.totalNotes)
oprot.writeFieldEnd()
if self.notes is not None:
oprot.writeFieldBegin('notes', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.notes))
for iter130 in self.notes:
iter130.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.stoppedWords is not None:
oprot.writeFieldBegin('stoppedWords', TType.LIST, 4)
oprot.writeListBegin(TType.STRING, len(self.stoppedWords))
for iter131 in self.stoppedWords:
oprot.writeString(iter131)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.searchedWords is not None:
oprot.writeFieldBegin('searchedWords', TType.LIST, 5)
oprot.writeListBegin(TType.STRING, len(self.searchedWords))
for iter132 in self.searchedWords:
oprot.writeString(iter132)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.updateCount is not None:
oprot.writeFieldBegin('updateCount', TType.I32, 6)
oprot.writeI32(self.updateCount)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.startIndex is None:
raise TProtocol.TProtocolException(message='Required field startIndex is unset!')
if self.totalNotes is None:
raise TProtocol.TProtocolException(message='Required field totalNotes is unset!')
if self.notes is None:
raise TProtocol.TProtocolException(message='Required field notes is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class NotesMetadataResultSpec(object):
"""
This structure is provided to the findNotesMetadata function to specify
the subset of fields that should be included in each NoteMetadata element
that is returned in the NotesMetadataList.
Each field on this structure is a boolean flag that indicates whether the
corresponding field should be included in the NoteMetadata structure when
it is returned. For example, if the 'includeTitle' field is set on this
structure when calling findNotesMetadata, then each NoteMetadata in the
list should have its 'title' field set.
If one of the fields in this spec is not set, then it will be treated as
'false' by the server, so the default behavior is to include nothing in
replies (but the mandatory GUID)
Attributes:
- includeTitle
- includeContentLength
- includeCreated
- includeUpdated
- includeDeleted
- includeUpdateSequenceNum
- includeNotebookGuid
- includeTagGuids
- includeAttributes
- includeLargestResourceMime
- includeLargestResourceSize
"""
thrift_spec = (
None, # 0
None, # 1
(2, TType.BOOL, 'includeTitle', None, None, ), # 2
None, # 3
None, # 4
(5, TType.BOOL, 'includeContentLength', None, None, ), # 5
(6, TType.BOOL, 'includeCreated', None, None, ), # 6
(7, TType.BOOL, 'includeUpdated', None, None, ), # 7
(8, TType.BOOL, 'includeDeleted', None, None, ), # 8
None, # 9
(10, TType.BOOL, 'includeUpdateSequenceNum', None, None, ), # 10
(11, TType.BOOL, 'includeNotebookGuid', None, None, ), # 11
(12, TType.BOOL, 'includeTagGuids', None, None, ), # 12
None, # 13
(14, TType.BOOL, 'includeAttributes', None, None, ), # 14
None, # 15
None, # 16
None, # 17
None, # 18
None, # 19
(20, TType.BOOL, 'includeLargestResourceMime', None, None, ), # 20
(21, TType.BOOL, 'includeLargestResourceSize', None, None, ), # 21
)
def __init__(self, includeTitle=None, includeContentLength=None, includeCreated=None, includeUpdated=None, includeDeleted=None, includeUpdateSequenceNum=None, includeNotebookGuid=None, includeTagGuids=None, includeAttributes=None, includeLargestResourceMime=None, includeLargestResourceSize=None,):
self.includeTitle = includeTitle
self.includeContentLength = includeContentLength
self.includeCreated = includeCreated
self.includeUpdated = includeUpdated
self.includeDeleted = includeDeleted
self.includeUpdateSequenceNum = includeUpdateSequenceNum
self.includeNotebookGuid = includeNotebookGuid
self.includeTagGuids = includeTagGuids
self.includeAttributes = includeAttributes
self.includeLargestResourceMime = includeLargestResourceMime
self.includeLargestResourceSize = includeLargestResourceSize
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 2:
if ftype == TType.BOOL:
self.includeTitle = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.BOOL:
self.includeContentLength = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.BOOL:
self.includeCreated = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.BOOL:
self.includeUpdated = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.BOOL:
self.includeDeleted = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.BOOL:
self.includeUpdateSequenceNum = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.BOOL:
self.includeNotebookGuid = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.BOOL:
self.includeTagGuids = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 14:
if ftype == TType.BOOL:
self.includeAttributes = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 20:
if ftype == TType.BOOL:
self.includeLargestResourceMime = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 21:
if ftype == TType.BOOL:
self.includeLargestResourceSize = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('NotesMetadataResultSpec')
if self.includeTitle is not None:
oprot.writeFieldBegin('includeTitle', TType.BOOL, 2)
oprot.writeBool(self.includeTitle)
oprot.writeFieldEnd()
if self.includeContentLength is not None:
oprot.writeFieldBegin('includeContentLength', TType.BOOL, 5)
oprot.writeBool(self.includeContentLength)
oprot.writeFieldEnd()
if self.includeCreated is not None:
oprot.writeFieldBegin('includeCreated', TType.BOOL, 6)
oprot.writeBool(self.includeCreated)
oprot.writeFieldEnd()
if self.includeUpdated is not None:
oprot.writeFieldBegin('includeUpdated', TType.BOOL, 7)
oprot.writeBool(self.includeUpdated)
oprot.writeFieldEnd()
if self.includeDeleted is not None:
oprot.writeFieldBegin('includeDeleted', TType.BOOL, 8)
oprot.writeBool(self.includeDeleted)
oprot.writeFieldEnd()
if self.includeUpdateSequenceNum is not None:
oprot.writeFieldBegin('includeUpdateSequenceNum', TType.BOOL, 10)
oprot.writeBool(self.includeUpdateSequenceNum)
oprot.writeFieldEnd()
if self.includeNotebookGuid is not None:
oprot.writeFieldBegin('includeNotebookGuid', TType.BOOL, 11)
oprot.writeBool(self.includeNotebookGuid)
oprot.writeFieldEnd()
if self.includeTagGuids is not None:
oprot.writeFieldBegin('includeTagGuids', TType.BOOL, 12)
oprot.writeBool(self.includeTagGuids)
oprot.writeFieldEnd()
if self.includeAttributes is not None:
oprot.writeFieldBegin('includeAttributes', TType.BOOL, 14)
oprot.writeBool(self.includeAttributes)
oprot.writeFieldEnd()
if self.includeLargestResourceMime is not None:
oprot.writeFieldBegin('includeLargestResourceMime', TType.BOOL, 20)
oprot.writeBool(self.includeLargestResourceMime)
oprot.writeFieldEnd()
if self.includeLargestResourceSize is not None:
oprot.writeFieldBegin('includeLargestResourceSize', TType.BOOL, 21)
oprot.writeBool(self.includeLargestResourceSize)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class NoteCollectionCounts(object):
"""
A data structure representing the number of notes for each notebook
and tag with a non-zero set of applicable notes.
<dl>
<dt>notebookCounts</dt>
<dd>
A mapping from the Notebook GUID to the number of
notes (from some selection) that are in the corresponding notebook.
</dd>
<dt>tagCounts</dt>
<dd>
A mapping from the Tag GUID to the number of notes (from some
selection) that have the corresponding tag.
</dd>
<dt>trashCount</dt>
<dd>
If this is set, then this is the number of notes that are in the trash.
If this is not set, then the number of notes in the trash hasn't been
reported. (I.e. if there are no notes in the trash, this will be set
to 0.)
</dd>
</dl>
Attributes:
- notebookCounts
- tagCounts
- trashCount
"""
thrift_spec = (
None, # 0
(1, TType.MAP, 'notebookCounts', (TType.STRING,None,TType.I32,None), None, ), # 1
(2, TType.MAP, 'tagCounts', (TType.STRING,None,TType.I32,None), None, ), # 2
(3, TType.I32, 'trashCount', None, None, ), # 3
)
def __init__(self, notebookCounts=None, tagCounts=None, trashCount=None,):
self.notebookCounts = notebookCounts
self.tagCounts = tagCounts
self.trashCount = trashCount
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.MAP:
self.notebookCounts = {}
(_ktype134, _vtype135, _size133 ) = iprot.readMapBegin()
for _i137 in xrange(_size133):
_key138 = iprot.readString();
_val139 = iprot.readI32();
self.notebookCounts[_key138] = _val139
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.MAP:
self.tagCounts = {}
(_ktype141, _vtype142, _size140 ) = iprot.readMapBegin()
for _i144 in xrange(_size140):
_key145 = iprot.readString();
_val146 = iprot.readI32();
self.tagCounts[_key145] = _val146
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.trashCount = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('NoteCollectionCounts')
if self.notebookCounts is not None:
oprot.writeFieldBegin('notebookCounts', TType.MAP, 1)
oprot.writeMapBegin(TType.STRING, TType.I32, len(self.notebookCounts))
for kiter147,viter148 in self.notebookCounts.items():
oprot.writeString(kiter147)
oprot.writeI32(viter148)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.tagCounts is not None:
oprot.writeFieldBegin('tagCounts', TType.MAP, 2)
oprot.writeMapBegin(TType.STRING, TType.I32, len(self.tagCounts))
for kiter149,viter150 in self.tagCounts.items():
oprot.writeString(kiter149)
oprot.writeI32(viter150)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.trashCount is not None:
oprot.writeFieldBegin('trashCount', TType.I32, 3)
oprot.writeI32(self.trashCount)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class NoteEmailParameters(object):
"""
Parameters that must be given to the NoteStore emailNote call. These allow
the caller to specify the note to send, the recipient addresses, etc.
<dl>
<dt>guid</dt>
<dd>
If set, this must be the GUID of a note within the user's account that
should be retrieved from the service and sent as email. If not set,
the 'note' field must be provided instead.
</dd>
<dt>note</dt>
<dd>
If the 'guid' field is not set, this field must be provided, including
the full contents of the note note (and all of its Resources) to send.
This can be used for a Note that as not been created in the service,
for example by a local client with local notes.
</dd>
<dt>toAddresses</dt>
<dd>
If provided, this should contain a list of the SMTP email addresses
that should be included in the "To:" line of the email.
Callers must specify at least one "to" or "cc" email address.
</dd>
<dt>ccAddresses</dt>
<dd>
If provided, this should contain a list of the SMTP email addresses
that should be included in the "Cc:" line of the email.
Callers must specify at least one "to" or "cc" email address.
</dd>
<dt>subject</dt>
<dd>
If provided, this should contain the subject line of the email that
will be sent. If not provided, the title of the note will be used
as the subject of the email.
</dd>
<dt>message</dt>
<dd>
If provided, this is additional personal text that should be included
into the email as a message from the owner to the recipient(s).
</dd>
</dl>
Attributes:
- guid
- note
- toAddresses
- ccAddresses
- subject
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'guid', None, None, ), # 1
(2, TType.STRUCT, 'note', (evernote.edam.type.ttypes.Note, evernote.edam.type.ttypes.Note.thrift_spec), None, ), # 2
(3, TType.LIST, 'toAddresses', (TType.STRING,None), None, ), # 3
(4, TType.LIST, 'ccAddresses', (TType.STRING,None), None, ), # 4
(5, TType.STRING, 'subject', None, None, ), # 5
(6, TType.STRING, 'message', None, None, ), # 6
)
def __init__(self, guid=None, note=None, toAddresses=None, ccAddresses=None, subject=None, message=None,):
self.guid = guid
self.note = note
self.toAddresses = toAddresses
self.ccAddresses = ccAddresses
self.subject = subject
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.guid = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.note = evernote.edam.type.ttypes.Note()
self.note.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.toAddresses = []
(_etype154, _size151) = iprot.readListBegin()
for _i155 in xrange(_size151):
_elem156 = iprot.readString();
self.toAddresses.append(_elem156)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.ccAddresses = []
(_etype160, _size157) = iprot.readListBegin()
for _i161 in xrange(_size157):
_elem162 = iprot.readString();
self.ccAddresses.append(_elem162)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.subject = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('NoteEmailParameters')
if self.guid is not None:
oprot.writeFieldBegin('guid', TType.STRING, 1)
oprot.writeString(self.guid)
oprot.writeFieldEnd()
if self.note is not None:
oprot.writeFieldBegin('note', TType.STRUCT, 2)
self.note.write(oprot)
oprot.writeFieldEnd()
if self.toAddresses is not None:
oprot.writeFieldBegin('toAddresses', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.toAddresses))
for iter163 in self.toAddresses:
oprot.writeString(iter163)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ccAddresses is not None:
oprot.writeFieldBegin('ccAddresses', TType.LIST, 4)
oprot.writeListBegin(TType.STRING, len(self.ccAddresses))
for iter164 in self.ccAddresses:
oprot.writeString(iter164)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.subject is not None:
oprot.writeFieldBegin('subject', TType.STRING, 5)
oprot.writeString(self.subject)
oprot.writeFieldEnd()
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 6)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class NoteVersionId(object):
"""
Identifying information about previous versions of a note that are backed up
within Evernote's servers. Used in the return value of the listNoteVersions
call.
<dl>
<dt>updateSequenceNum</dt>
<dd>
The update sequence number for the Note when it last had this content.
This serves to uniquely identify each version of the note, since USN
values are unique within an account for each update.
</dd>
<dt>updated</dt>
<dd>
The 'updated' time that was set on the Note when it had this version
of the content. This is the user-modifiable modification time on the
note, so it's not reliable for guaranteeing the order of various
versions. (E.g. if someone modifies the note, then changes this time
manually into the past and then updates the note again.)
</dd>
<dt>saved</dt>
<dd>
A timestamp that holds the date and time when this version of the note
was backed up by Evernote's servers. This
</dd>
<dt>title</dt>
<dd>
The title of the note when this particular version was saved. (The
current title of the note may differ from this value.)
</dd>
</dl>
Attributes:
- updateSequenceNum
- updated
- saved
- title
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'updateSequenceNum', None, None, ), # 1
(2, TType.I64, 'updated', None, None, ), # 2
(3, TType.I64, 'saved', None, None, ), # 3
(4, TType.STRING, 'title', None, None, ), # 4
)
def __init__(self, updateSequenceNum=None, updated=None, saved=None, title=None,):
self.updateSequenceNum = updateSequenceNum
self.updated = updated
self.saved = saved
self.title = title
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.updateSequenceNum = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.updated = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.saved = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.title = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('NoteVersionId')
if self.updateSequenceNum is not None:
oprot.writeFieldBegin('updateSequenceNum', TType.I32, 1)
oprot.writeI32(self.updateSequenceNum)
oprot.writeFieldEnd()
if self.updated is not None:
oprot.writeFieldBegin('updated', TType.I64, 2)
oprot.writeI64(self.updated)
oprot.writeFieldEnd()
if self.saved is not None:
oprot.writeFieldBegin('saved', TType.I64, 3)
oprot.writeI64(self.saved)
oprot.writeFieldEnd()
if self.title is not None:
oprot.writeFieldBegin('title', TType.STRING, 4)
oprot.writeString(self.title)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.updateSequenceNum is None:
raise TProtocol.TProtocolException(message='Required field updateSequenceNum is unset!')
if self.updated is None:
raise TProtocol.TProtocolException(message='Required field updated is unset!')
if self.saved is None:
raise TProtocol.TProtocolException(message='Required field saved is unset!')
if self.title is None:
raise TProtocol.TProtocolException(message='Required field title is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ClientUsageMetrics(object):
"""
This structure is passed from clients to the Evernote service when they wish
to relay coarse-grained usage metrics to the service to help improve
products.
<dl>
<dt>sessions</dt>
<dd>
This field contains a count of the number of usage "sessions" that have
occurred with this client which have not previously been reported to
the service.
A "session" is defined as one of the 96 fifteen-minute intervals of the
day when someone used Evernote's interface at least once.
So if a user interacts with an Evernote client at 12:18, 12:24, and 12:36,
and then the client synchronizes at 12:39, it would report that there were
two previously-unreported sessions (one session for the 12:15-12:30 time
period, and one for the 12:30-12:45 period).
If the user used Evernote again at 12:41 and synchronized at 12:43, it
would not report any new sessions, because the 12:30-12:45 session had
already been reported.
</dd>
</dl>
Attributes:
- sessions
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'sessions', None, None, ), # 1
)
def __init__(self, sessions=None,):
self.sessions = sessions
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.sessions = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ClientUsageMetrics')
if self.sessions is not None:
oprot.writeFieldBegin('sessions', TType.I32, 1)
oprot.writeI32(self.sessions)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class RelatedQuery(object):
"""
A description of the thing for which we are searching for related
entities.
You must specify either <em>noteGuid</em> or <em>plainText</em>, but
not both. <em>filter</em> and <em>referenceUri</em> are optional.
<dl>
<dt>noteGuid</dt>
<dd>The GUID of an existing note in your account for which related
entities will be found.</dd>
<dt>plainText</dt>
<dd>A string of plain text for which to find related entities.
You should provide a text block with a number of characters between
EDAM_RELATED_PLAINTEXT_LEN_MIN and EDAM_RELATED_PLAINTEXT_LEN_MAX.
</dd>
<dt>filter</dt>
<dd>The list of criteria that will constrain the notes being considered
related.
Please note that some of the parameters may be ignored, such as
<em>order</em> and <em>ascending</em>.
</dd>
<dt>referenceUri</dt>
<dd>A URI string specifying a reference entity, around which "relatedness"
should be based. This can be an URL pointing to a web page, for example.
</dd>
</dl>
Attributes:
- noteGuid
- plainText
- filter
- referenceUri
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'noteGuid', None, None, ), # 1
(2, TType.STRING, 'plainText', None, None, ), # 2
(3, TType.STRUCT, 'filter', (NoteFilter, NoteFilter.thrift_spec), None, ), # 3
(4, TType.STRING, 'referenceUri', None, None, ), # 4
)
def __init__(self, noteGuid=None, plainText=None, filter=None, referenceUri=None,):
self.noteGuid = noteGuid
self.plainText = plainText
self.filter = filter
self.referenceUri = referenceUri
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.noteGuid = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.plainText = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.filter = NoteFilter()
self.filter.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.referenceUri = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('RelatedQuery')
if self.noteGuid is not None:
oprot.writeFieldBegin('noteGuid', TType.STRING, 1)
oprot.writeString(self.noteGuid)
oprot.writeFieldEnd()
if self.plainText is not None:
oprot.writeFieldBegin('plainText', TType.STRING, 2)
oprot.writeString(self.plainText)
oprot.writeFieldEnd()
if self.filter is not None:
oprot.writeFieldBegin('filter', TType.STRUCT, 3)
self.filter.write(oprot)
oprot.writeFieldEnd()
if self.referenceUri is not None:
oprot.writeFieldBegin('referenceUri', TType.STRING, 4)
oprot.writeString(self.referenceUri)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class RelatedResult(object):
"""
The result of calling findRelated(). The contents of the notes,
notebooks, and tags fields will be in decreasing order of expected
relevance. It is possible that fewer results than requested will be
returned even if there are enough distinct entities in the account
in cases where the relevance is estimated to be low.
<dl>
<dt>notes</dt>
<dd>If notes have been requested to be included, this will be the
list of notes.</dd>
<dt>notebooks</dt>
<dd>If notebooks have been requested to be included, this will be the
list of notebooks.</dd>
<dt>tags</dt>
<dd>If tags have been requested to be included, this will be the list
of tags.</dd>
</dl>
<dt>containingNotebooks</dt>
<dd>If <code>includeContainingNotebooks</code> is set to <code>true</code>
in the RelatedResultSpec, return the list of notebooks to
to which the returned related notes belong. The notebooks in this
list will occur once per notebook GUID and are represented as
NotebookDescriptor objects.</dd>
</dl>
</dl>
Attributes:
- notes
- notebooks
- tags
- containingNotebooks
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'notes', (TType.STRUCT,(evernote.edam.type.ttypes.Note, evernote.edam.type.ttypes.Note.thrift_spec)), None, ), # 1
(2, TType.LIST, 'notebooks', (TType.STRUCT,(evernote.edam.type.ttypes.Notebook, evernote.edam.type.ttypes.Notebook.thrift_spec)), None, ), # 2
(3, TType.LIST, 'tags', (TType.STRUCT,(evernote.edam.type.ttypes.Tag, evernote.edam.type.ttypes.Tag.thrift_spec)), None, ), # 3
(4, TType.LIST, 'containingNotebooks', (TType.STRUCT,(evernote.edam.type.ttypes.NotebookDescriptor, evernote.edam.type.ttypes.NotebookDescriptor.thrift_spec)), None, ), # 4
)
def __init__(self, notes=None, notebooks=None, tags=None, containingNotebooks=None,):
self.notes = notes
self.notebooks = notebooks
self.tags = tags
self.containingNotebooks = containingNotebooks
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.notes = []
(_etype168, _size165) = iprot.readListBegin()
for _i169 in xrange(_size165):
_elem170 = evernote.edam.type.ttypes.Note()
_elem170.read(iprot)
self.notes.append(_elem170)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.notebooks = []
(_etype174, _size171) = iprot.readListBegin()
for _i175 in xrange(_size171):
_elem176 = evernote.edam.type.ttypes.Notebook()
_elem176.read(iprot)
self.notebooks.append(_elem176)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.tags = []
(_etype180, _size177) = iprot.readListBegin()
for _i181 in xrange(_size177):
_elem182 = evernote.edam.type.ttypes.Tag()
_elem182.read(iprot)
self.tags.append(_elem182)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.containingNotebooks = []
(_etype186, _size183) = iprot.readListBegin()
for _i187 in xrange(_size183):
_elem188 = evernote.edam.type.ttypes.NotebookDescriptor()
_elem188.read(iprot)
self.containingNotebooks.append(_elem188)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('RelatedResult')
if self.notes is not None:
oprot.writeFieldBegin('notes', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.notes))
for iter189 in self.notes:
iter189.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.notebooks is not None:
oprot.writeFieldBegin('notebooks', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.notebooks))
for iter190 in self.notebooks:
iter190.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.tags is not None:
oprot.writeFieldBegin('tags', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.tags))
for iter191 in self.tags:
iter191.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.containingNotebooks is not None:
oprot.writeFieldBegin('containingNotebooks', TType.LIST, 4)
oprot.writeListBegin(TType.STRUCT, len(self.containingNotebooks))
for iter192 in self.containingNotebooks:
iter192.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class RelatedResultSpec(object):
"""
A description of the thing for which the service will find related
entities, via findRelated(), together with a description of what
type of entities and how many you are seeking in the
RelatedResult.
<dl>
<dt>maxNotes</dt>
<dd>Return notes that are related to the query, but no more than
this many. Any value greater than EDAM_RELATED_MAX_NOTES
will be silently capped. If you do not set this field, then
no notes will be returned.</dd>
<dt>maxNotebooks</dt>
<dd>Return notebooks that are related to the query, but no more than
this many. Any value greater than EDAM_RELATED_MAX_NOTEBOOKS
will be silently capped. If you do not set this field, then
no notebooks will be returned.</dd>
<dt>maxTags</dt>
<dd>Return tags that are related to the query, but no more than
this many. Any value greater than EDAM_RELATED_MAX_TAGS
will be silently capped. If you do not set this field, then
no tags will be returned.</dd>
</dl>
<dt>writableNotebooksOnly</dt>
<dd>Require that all returned related notebooks are writable.
The user will be able to create notes in all returned notebooks.
However, individual notes returned may still belong to notebooks
in which the user lacks the ability to create notes.</dd>
</dl>
<dt>includeContainingNotebooks</dt>
<dd>If set to <code>true</code>, return the containingNotebooks field
in the RelatedResult, which will contain the list of notebooks to
to which the returned related notes belong.</dd>
</dl>
</dl>
Attributes:
- maxNotes
- maxNotebooks
- maxTags
- writableNotebooksOnly
- includeContainingNotebooks
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'maxNotes', None, None, ), # 1
(2, TType.I32, 'maxNotebooks', None, None, ), # 2
(3, TType.I32, 'maxTags', None, None, ), # 3
(4, TType.BOOL, 'writableNotebooksOnly', None, None, ), # 4
(5, TType.BOOL, 'includeContainingNotebooks', None, None, ), # 5
)
def __init__(self, maxNotes=None, maxNotebooks=None, maxTags=None, writableNotebooksOnly=None, includeContainingNotebooks=None,):
self.maxNotes = maxNotes
self.maxNotebooks = maxNotebooks
self.maxTags = maxTags
self.writableNotebooksOnly = writableNotebooksOnly
self.includeContainingNotebooks = includeContainingNotebooks
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.maxNotes = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.maxNotebooks = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.maxTags = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.writableNotebooksOnly = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.BOOL:
self.includeContainingNotebooks = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('RelatedResultSpec')
if self.maxNotes is not None:
oprot.writeFieldBegin('maxNotes', TType.I32, 1)
oprot.writeI32(self.maxNotes)
oprot.writeFieldEnd()
if self.maxNotebooks is not None:
oprot.writeFieldBegin('maxNotebooks', TType.I32, 2)
oprot.writeI32(self.maxNotebooks)
oprot.writeFieldEnd()
if self.maxTags is not None:
oprot.writeFieldBegin('maxTags', TType.I32, 3)
oprot.writeI32(self.maxTags)
oprot.writeFieldEnd()
if self.writableNotebooksOnly is not None:
oprot.writeFieldBegin('writableNotebooksOnly', TType.BOOL, 4)
oprot.writeBool(self.writableNotebooksOnly)
oprot.writeFieldEnd()
if self.includeContainingNotebooks is not None:
oprot.writeFieldBegin('includeContainingNotebooks', TType.BOOL, 5)
oprot.writeBool(self.includeContainingNotebooks)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| bsd-2-clause |
hongbin/magnum | magnum/db/sqlalchemy/alembic/env.py | 20 | 1767 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from logging import config as log_config
from alembic import context
from magnum.db.sqlalchemy import api as sqla_api
from magnum.db.sqlalchemy import models
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
log_config.fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
target_metadata = models.Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = sqla_api.get_engine()
with engine.connect() as connection:
context.configure(connection=connection,
target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
run_migrations_online()
| apache-2.0 |
minhphung171093/GreenERP_V7 | openerp/report/render/rml.py | 457 | 3244 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import render
import rml2pdf
import rml2html as htmlizer
import rml2txt as txtizer
import odt2odt as odt
import html2html as html
import makohtml2html as makohtml
class rml(render.render):
def __init__(self, rml, localcontext = None, datas=None, path='.', title=None):
render.render.__init__(self, datas, path)
self.localcontext = localcontext
self.rml = rml
self.output_type = 'pdf'
self.title=title
def _render(self):
return rml2pdf.parseNode(self.rml, self.localcontext, images=self.bin_datas, path=self.path,title=self.title)
class rml2html(render.render):
def __init__(self, rml,localcontext = None, datas=None):
super(rml2html, self).__init__(datas)
self.rml = rml
self.localcontext = localcontext
self.output_type = 'html'
def _render(self):
return htmlizer.parseString(self.rml,self.localcontext)
class rml2txt(render.render):
def __init__(self, rml, localcontext= None, datas=None):
super(rml2txt, self).__init__(datas)
self.rml = rml
self.localcontext = localcontext
self.output_type = 'txt'
def _render(self):
return txtizer.parseString(self.rml, self.localcontext)
class odt2odt(render.render):
def __init__(self, rml, localcontext=None, datas=None):
render.render.__init__(self, datas)
self.rml_dom = rml
self.localcontext = localcontext
self.output_type = 'odt'
def _render(self):
return odt.parseNode(self.rml_dom,self.localcontext)
class html2html(render.render):
def __init__(self, rml, localcontext=None, datas=None):
render.render.__init__(self, datas)
self.rml_dom = rml
self.localcontext = localcontext
self.output_type = 'html'
def _render(self):
return html.parseString(self.rml_dom,self.localcontext)
class makohtml2html(render.render):
def __init__(self, html, localcontext = None):
render.render.__init__(self)
self.html = html
self.localcontext = localcontext
self.output_type = 'html'
def _render(self):
return makohtml.parseNode(self.html,self.localcontext)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ifduyue/tornado | maint/test/cython/cythonapp_test.py | 29 | 1196 | from tornado.testing import AsyncTestCase, gen_test
from tornado.util import ArgReplacer
import unittest
import cythonapp
class CythonCoroutineTest(AsyncTestCase):
@gen_test
def test_native_coroutine(self):
x = yield cythonapp.native_coroutine()
self.assertEqual(x, "goodbye")
@gen_test
def test_decorated_coroutine(self):
x = yield cythonapp.decorated_coroutine()
self.assertEqual(x, "goodbye")
class CythonArgReplacerTest(unittest.TestCase):
def test_arg_replacer_function(self):
replacer = ArgReplacer(cythonapp.function_with_args, 'two')
args = (1, 'old', 3)
kwargs = {}
self.assertEqual(replacer.get_old_value(args, kwargs), 'old')
self.assertEqual(replacer.replace('new', args, kwargs),
('old', [1, 'new', 3], {}))
def test_arg_replacer_method(self):
replacer = ArgReplacer(cythonapp.AClass().method_with_args, 'two')
args = (1, 'old', 3)
kwargs = {}
self.assertEqual(replacer.get_old_value(args, kwargs), 'old')
self.assertEqual(replacer.replace('new', args, kwargs),
('old', [1, 'new', 3], {}))
| apache-2.0 |
svn2github/suds | suds/sax/date.py | 28 | 10472 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Nathan Van Gheem (vangheem@gmail.com)
"""
The I{xdate} module provides classes for converstion
between XML dates and python objects.
"""
from logging import getLogger
from suds import *
from suds.xsd import *
import time
import datetime as dt
import re
log = getLogger(__name__)
class Date:
"""
An XML date object.
Supported formats:
- YYYY-MM-DD
- YYYY-MM-DD(z|Z)
- YYYY-MM-DD+06:00
- YYYY-MM-DD-06:00
@ivar date: The object value.
@type date: B{datetime}.I{date}
"""
def __init__(self, date):
"""
@param date: The value of the object.
@type date: (date|str)
@raise ValueError: When I{date} is invalid.
"""
if isinstance(date, dt.date):
self.date = date
return
if isinstance(date, basestring):
self.date = self.__parse(date)
return
raise ValueError, type(date)
def year(self):
"""
Get the I{year} component.
@return: The year.
@rtype: int
"""
return self.date.year
def month(self):
"""
Get the I{month} component.
@return: The month.
@rtype: int
"""
return self.date.month
def day(self):
"""
Get the I{day} component.
@return: The day.
@rtype: int
"""
return self.date.day
def __parse(self, s):
"""
Parse the string date.
Supported formats:
- YYYY-MM-DD
- YYYY-MM-DD(z|Z)
- YYYY-MM-DD+06:00
- YYYY-MM-DD-06:00
Although, the TZ is ignored because it's meaningless
without the time, right?
@param s: A date string.
@type s: str
@return: A date object.
@rtype: I{date}
"""
try:
year, month, day = s[:10].split('-', 2)
year = int(year)
month = int(month)
day = int(day)
return dt.date(year, month, day)
except:
log.debug(s, exec_info=True)
raise ValueError, 'Invalid format "%s"' % s
def __str__(self):
return unicode(self)
def __unicode__(self):
return self.date.isoformat()
class Time:
"""
An XML time object.
Supported formats:
- HH:MI:SS
- HH:MI:SS(z|Z)
- HH:MI:SS.ms
- HH:MI:SS.ms(z|Z)
- HH:MI:SS(+|-)06:00
- HH:MI:SS.ms(+|-)06:00
@ivar tz: The timezone
@type tz: L{Timezone}
@ivar date: The object value.
@type date: B{datetime}.I{time}
"""
def __init__(self, time, adjusted=True):
"""
@param time: The value of the object.
@type time: (time|str)
@param adjusted: Adjust for I{local} Timezone.
@type adjusted: boolean
@raise ValueError: When I{time} is invalid.
"""
self.tz = Timezone()
if isinstance(time, dt.time):
self.time = time
return
if isinstance(time, basestring):
self.time = self.__parse(time)
if adjusted:
self.__adjust()
return
raise ValueError, type(time)
def hour(self):
"""
Get the I{hour} component.
@return: The hour.
@rtype: int
"""
return self.time.hour
def minute(self):
"""
Get the I{minute} component.
@return: The minute.
@rtype: int
"""
return self.time.minute
def second(self):
"""
Get the I{seconds} component.
@return: The seconds.
@rtype: int
"""
return self.time.second
def microsecond(self):
"""
Get the I{microsecond} component.
@return: The microsecond.
@rtype: int
"""
return self.time.microsecond
def __adjust(self):
"""
Adjust for TZ offset.
"""
if hasattr(self, 'offset'):
today = dt.date.today()
delta = self.tz.adjustment(self.offset)
d = dt.datetime.combine(today, self.time)
d = ( d + delta )
self.time = d.time()
def __parse(self, s):
"""
Parse the string date.
Patterns:
- HH:MI:SS
- HH:MI:SS(z|Z)
- HH:MI:SS.ms
- HH:MI:SS.ms(z|Z)
- HH:MI:SS(+|-)06:00
- HH:MI:SS.ms(+|-)06:00
@param s: A time string.
@type s: str
@return: A time object.
@rtype: B{datetime}.I{time}
"""
try:
offset = None
part = Timezone.split(s)
hour, minute, second = part[0].split(':', 2)
hour = int(hour)
minute = int(minute)
second, ms = self.__second(second)
if len(part) == 2:
self.offset = self.__offset(part[1])
if ms is None:
return dt.time(hour, minute, second)
else:
return dt.time(hour, minute, second, ms)
except:
log.debug(s, exec_info=True)
raise ValueError, 'Invalid format "%s"' % s
def __second(self, s):
"""
Parse the seconds and microseconds.
The microseconds are truncated to 999999 due to a restriction in
the python datetime.datetime object.
@param s: A string representation of the seconds.
@type s: str
@return: Tuple of (sec,ms)
@rtype: tuple.
"""
part = s.split('.')
if len(part) > 1:
return (int(part[0]), int(part[1][:6]))
else:
return (int(part[0]), None)
def __offset(self, s):
"""
Parse the TZ offset.
@param s: A string representation of the TZ offset.
@type s: str
@return: The signed offset in hours.
@rtype: str
"""
if len(s) == len('-00:00'):
return int(s[:3])
if len(s) == 0:
return self.tz.local
if len(s) == 1:
return 0
raise Exception()
def __str__(self):
return unicode(self)
def __unicode__(self):
time = self.time.isoformat()
if self.tz.local:
return '%s%+.2d:00' % (time, self.tz.local)
else:
return '%sZ' % time
class DateTime(Date,Time):
"""
An XML time object.
Supported formats:
- YYYY-MM-DDB{T}HH:MI:SS
- YYYY-MM-DDB{T}HH:MI:SS(z|Z)
- YYYY-MM-DDB{T}HH:MI:SS.ms
- YYYY-MM-DDB{T}HH:MI:SS.ms(z|Z)
- YYYY-MM-DDB{T}HH:MI:SS(+|-)06:00
- YYYY-MM-DDB{T}HH:MI:SS.ms(+|-)06:00
@ivar datetime: The object value.
@type datetime: B{datetime}.I{datedate}
"""
def __init__(self, date):
"""
@param date: The value of the object.
@type date: (datetime|str)
@raise ValueError: When I{tm} is invalid.
"""
if isinstance(date, dt.datetime):
Date.__init__(self, date.date())
Time.__init__(self, date.time())
self.datetime = \
dt.datetime.combine(self.date, self.time)
return
if isinstance(date, basestring):
part = date.split('T')
Date.__init__(self, part[0])
Time.__init__(self, part[1], 0)
self.datetime = \
dt.datetime.combine(self.date, self.time)
self.__adjust()
return
raise ValueError, type(date)
def __adjust(self):
"""
Adjust for TZ offset.
"""
if not hasattr(self, 'offset'):
return
delta = self.tz.adjustment(self.offset)
try:
d = ( self.datetime + delta )
self.datetime = d
self.date = d.date()
self.time = d.time()
except OverflowError:
log.warn('"%s" caused overflow, not-adjusted', self.datetime)
def __str__(self):
return unicode(self)
def __unicode__(self):
s = []
s.append(Date.__unicode__(self))
s.append(Time.__unicode__(self))
return 'T'.join(s)
class UTC(DateTime):
"""
Represents current UTC time.
"""
def __init__(self, date=None):
if date is None:
date = dt.datetime.utcnow()
DateTime.__init__(self, date)
self.tz.local = 0
class Timezone:
"""
Timezone object used to do TZ conversions
@cvar local: The (A) local TZ offset.
@type local: int
@cvar patten: The regex patten to match TZ.
@type patten: re.Pattern
"""
pattern = re.compile('([zZ])|([\-\+][0-9]{2}:[0-9]{2})')
LOCAL = ( 0-time.timezone/60/60 ) + time.daylight
def __init__(self, offset=None):
if offset is None:
offset = self.LOCAL
self.local = offset
@classmethod
def split(cls, s):
"""
Split the TZ from string.
@param s: A string containing a timezone
@type s: basestring
@return: The split parts.
@rtype: tuple
"""
m = cls.pattern.search(s)
if m is None:
return (s,)
x = m.start(0)
return (s[:x], s[x:])
def adjustment(self, offset):
"""
Get the adjustment to the I{local} TZ.
@return: The delta between I{offset} and local TZ.
@rtype: B{datetime}.I{timedelta}
"""
delta = ( self.local - offset )
return dt.timedelta(hours=delta)
| lgpl-3.0 |
40223123/finaltest2 | static/Brython3.1.1-20150328-091302/Lib/unittest/case.py | 743 | 48873 | """Test case implementation"""
import sys
import functools
import difflib
import pprint
import re
import warnings
import collections
from . import result
from .util import (strclass, safe_repr, _count_diff_all_purpose,
_count_diff_hashable)
__unittest = True
DIFF_OMITTED = ('\nDiff is %s characters long. '
'Set self.maxDiff to None to see it.')
class SkipTest(Exception):
"""
Raise this exception in a test to skip it.
Usually you can use TestCase.skipTest() or one of the skipping decorators
instead of raising this directly.
"""
class _ExpectedFailure(Exception):
"""
Raise this when a test is expected to fail.
This is an implementation detail.
"""
def __init__(self, exc_info):
super(_ExpectedFailure, self).__init__()
self.exc_info = exc_info
class _UnexpectedSuccess(Exception):
"""
The test was supposed to fail, but it didn't!
"""
class _Outcome(object):
def __init__(self):
self.success = True
self.skipped = None
self.unexpectedSuccess = None
self.expectedFailure = None
self.errors = []
self.failures = []
def _id(obj):
return obj
def skip(reason):
"""
Unconditionally skip a test.
"""
def decorator(test_item):
if not isinstance(test_item, type):
@functools.wraps(test_item)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
test_item = skip_wrapper
test_item.__unittest_skip__ = True
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIf(condition, reason):
"""
Skip a test if the condition is true.
"""
if condition:
return skip(reason)
return _id
def skipUnless(condition, reason):
"""
Skip a test unless the condition is true.
"""
if not condition:
return skip(reason)
return _id
def expectedFailure(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception:
raise _ExpectedFailure(sys.exc_info())
raise _UnexpectedSuccess
return wrapper
class _AssertRaisesBaseContext(object):
def __init__(self, expected, test_case, callable_obj=None,
expected_regex=None):
self.expected = expected
self.test_case = test_case
if callable_obj is not None:
try:
self.obj_name = callable_obj.__name__
except AttributeError:
self.obj_name = str(callable_obj)
else:
self.obj_name = None
if isinstance(expected_regex, (bytes, str)):
expected_regex = re.compile(expected_regex)
self.expected_regex = expected_regex
self.msg = None
def _raiseFailure(self, standardMsg):
msg = self.test_case._formatMessage(self.msg, standardMsg)
raise self.test_case.failureException(msg)
def handle(self, name, callable_obj, args, kwargs):
"""
If callable_obj is None, assertRaises/Warns is being used as a
context manager, so check for a 'msg' kwarg and return self.
If callable_obj is not None, call it passing args and kwargs.
"""
if callable_obj is None:
self.msg = kwargs.pop('msg', None)
return self
with self:
callable_obj(*args, **kwargs)
class _AssertRaisesContext(_AssertRaisesBaseContext):
"""A context manager used to implement TestCase.assertRaises* methods."""
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
if self.obj_name:
self._raiseFailure("{} not raised by {}".format(exc_name,
self.obj_name))
else:
self._raiseFailure("{} not raised".format(exc_name))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
# store exception, without traceback, for later retrieval
self.exception = exc_value.with_traceback(None)
if self.expected_regex is None:
return True
expected_regex = self.expected_regex
if not expected_regex.search(str(exc_value)):
self._raiseFailure('"{}" does not match "{}"'.format(
expected_regex.pattern, str(exc_value)))
return True
class _AssertWarnsContext(_AssertRaisesBaseContext):
"""A context manager used to implement TestCase.assertWarns* methods."""
def __enter__(self):
# The __warningregistry__'s need to be in a pristine state for tests
# to work properly.
for v in sys.modules.values():
if getattr(v, '__warningregistry__', None):
v.__warningregistry__ = {}
self.warnings_manager = warnings.catch_warnings(record=True)
self.warnings = self.warnings_manager.__enter__()
warnings.simplefilter("always", self.expected)
return self
def __exit__(self, exc_type, exc_value, tb):
self.warnings_manager.__exit__(exc_type, exc_value, tb)
if exc_type is not None:
# let unexpected exceptions pass through
return
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
first_matching = None
for m in self.warnings:
w = m.message
if not isinstance(w, self.expected):
continue
if first_matching is None:
first_matching = w
if (self.expected_regex is not None and
not self.expected_regex.search(str(w))):
continue
# store warning for later retrieval
self.warning = w
self.filename = m.filename
self.lineno = m.lineno
return
# Now we simply try to choose a helpful failure message
if first_matching is not None:
self._raiseFailure('"{}" does not match "{}"'.format(
self.expected_regex.pattern, str(first_matching)))
if self.obj_name:
self._raiseFailure("{} not triggered by {}".format(exc_name,
self.obj_name))
else:
self._raiseFailure("{} not triggered".format(exc_name))
class TestCase(object):
"""A class whose instances are single test cases.
By default, the test code itself should be placed in a method named
'runTest'.
If the fixture may be used for many test cases, create as
many test methods as are needed. When instantiating such a TestCase
subclass, specify in the constructor arguments the name of the test method
that the instance is to execute.
Test authors should subclass TestCase for their own tests. Construction
and deconstruction of the test's environment ('fixture') can be
implemented by overriding the 'setUp' and 'tearDown' methods respectively.
If it is necessary to override the __init__ method, the base class
__init__ method must always be called. It is important that subclasses
should not change the signature of their __init__ method, since instances
of the classes are instantiated automatically by parts of the framework
in order to be run.
When subclassing TestCase, you can set these attributes:
* failureException: determines which exception will be raised when
the instance's assertion methods fail; test methods raising this
exception will be deemed to have 'failed' rather than 'errored'.
* longMessage: determines whether long messages (including repr of
objects used in assert methods) will be printed on failure in *addition*
to any explicit message passed.
* maxDiff: sets the maximum length of a diff in failure messages
by assert methods using difflib. It is looked up as an instance
attribute so can be configured by individual tests if required.
"""
failureException = AssertionError
longMessage = True
maxDiff = 80*8
# If a string is longer than _diffThreshold, use normal comparison instead
# of difflib. See #11763.
_diffThreshold = 2**16
# Attribute used by TestSuite for classSetUp
_classSetupFailed = False
def __init__(self, methodName='runTest'):
"""Create an instance of the class that will use the named test
method when executed. Raises a ValueError if the instance does
not have a method with the specified name.
"""
self._testMethodName = methodName
self._outcomeForDoCleanups = None
self._testMethodDoc = 'No test'
try:
testMethod = getattr(self, methodName)
except AttributeError:
if methodName != 'runTest':
# we allow instantiation with no explicit method name
# but not an *incorrect* or missing method name
raise ValueError("no such test method in %s: %s" %
(self.__class__, methodName))
else:
self._testMethodDoc = testMethod.__doc__
self._cleanups = []
# Map types to custom assertEqual functions that will compare
# instances of said type in more detail to generate a more useful
# error message.
self._type_equality_funcs = {}
self.addTypeEqualityFunc(dict, 'assertDictEqual')
self.addTypeEqualityFunc(list, 'assertListEqual')
self.addTypeEqualityFunc(tuple, 'assertTupleEqual')
self.addTypeEqualityFunc(set, 'assertSetEqual')
self.addTypeEqualityFunc(frozenset, 'assertSetEqual')
self.addTypeEqualityFunc(str, 'assertMultiLineEqual')
def addTypeEqualityFunc(self, typeobj, function):
"""Add a type specific assertEqual style function to compare a type.
This method is for use by TestCase subclasses that need to register
their own type equality functions to provide nicer error messages.
Args:
typeobj: The data type to call this function on when both values
are of the same type in assertEqual().
function: The callable taking two arguments and an optional
msg= argument that raises self.failureException with a
useful error message when the two arguments are not equal.
"""
self._type_equality_funcs[typeobj] = function
def addCleanup(self, function, *args, **kwargs):
"""Add a function, with arguments, to be called when the test is
completed. Functions added are called on a LIFO basis and are
called after tearDown on test failure or success.
Cleanup items are called even if setUp fails (unlike tearDown)."""
self._cleanups.append((function, args, kwargs))
def setUp(self):
"Hook method for setting up the test fixture before exercising it."
pass
def tearDown(self):
"Hook method for deconstructing the test fixture after testing it."
pass
@classmethod
def setUpClass(cls):
"Hook method for setting up class fixture before running tests in the class."
@classmethod
def tearDownClass(cls):
"Hook method for deconstructing the class fixture after running all tests in the class."
def countTestCases(self):
return 1
def defaultTestResult(self):
return result.TestResult()
def shortDescription(self):
"""Returns a one-line description of the test, or None if no
description has been provided.
The default implementation of this method returns the first line of
the specified test method's docstring.
"""
doc = self._testMethodDoc
return doc and doc.split("\n")[0].strip() or None
def id(self):
return "%s.%s" % (strclass(self.__class__), self._testMethodName)
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self._testMethodName == other._testMethodName
def __hash__(self):
return hash((type(self), self._testMethodName))
def __str__(self):
return "%s (%s)" % (self._testMethodName, strclass(self.__class__))
def __repr__(self):
return "<%s testMethod=%s>" % \
(strclass(self.__class__), self._testMethodName)
def _addSkip(self, result, reason):
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None:
addSkip(self, reason)
else:
warnings.warn("TestResult has no addSkip method, skips not reported",
RuntimeWarning, 2)
result.addSuccess(self)
def _executeTestPart(self, function, outcome, isTest=False):
try:
function()
except KeyboardInterrupt:
raise
except SkipTest as e:
outcome.success = False
outcome.skipped = str(e)
except _UnexpectedSuccess:
exc_info = sys.exc_info()
outcome.success = False
if isTest:
outcome.unexpectedSuccess = exc_info
else:
outcome.errors.append(exc_info)
except _ExpectedFailure:
outcome.success = False
exc_info = sys.exc_info()
if isTest:
outcome.expectedFailure = exc_info
else:
outcome.errors.append(exc_info)
except self.failureException:
outcome.success = False
outcome.failures.append(sys.exc_info())
exc_info = sys.exc_info()
except:
outcome.success = False
outcome.errors.append(sys.exc_info())
def run(self, result=None):
orig_result = result
if result is None:
result = self.defaultTestResult()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
if (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)):
# If the class or method was skipped.
try:
skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
or getattr(testMethod, '__unittest_skip_why__', ''))
self._addSkip(result, skip_why)
finally:
result.stopTest(self)
return
try:
outcome = _Outcome()
self._outcomeForDoCleanups = outcome
self._executeTestPart(self.setUp, outcome)
if outcome.success:
self._executeTestPart(testMethod, outcome, isTest=True)
self._executeTestPart(self.tearDown, outcome)
self.doCleanups()
if outcome.success:
result.addSuccess(self)
else:
if outcome.skipped is not None:
self._addSkip(result, outcome.skipped)
for exc_info in outcome.errors:
result.addError(self, exc_info)
for exc_info in outcome.failures:
result.addFailure(self, exc_info)
if outcome.unexpectedSuccess is not None:
addUnexpectedSuccess = getattr(result, 'addUnexpectedSuccess', None)
if addUnexpectedSuccess is not None:
addUnexpectedSuccess(self)
else:
warnings.warn("TestResult has no addUnexpectedSuccess method, reporting as failures",
RuntimeWarning)
result.addFailure(self, outcome.unexpectedSuccess)
if outcome.expectedFailure is not None:
addExpectedFailure = getattr(result, 'addExpectedFailure', None)
if addExpectedFailure is not None:
addExpectedFailure(self, outcome.expectedFailure)
else:
warnings.warn("TestResult has no addExpectedFailure method, reporting as passes",
RuntimeWarning)
result.addSuccess(self)
return result
finally:
result.stopTest(self)
if orig_result is None:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
def doCleanups(self):
"""Execute all cleanup functions. Normally called for you after
tearDown."""
outcome = self._outcomeForDoCleanups or _Outcome()
while self._cleanups:
function, args, kwargs = self._cleanups.pop()
part = lambda: function(*args, **kwargs)
self._executeTestPart(part, outcome)
# return this for backwards compatibility
# even though we no longer us it internally
return outcome.success
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the test without collecting errors in a TestResult"""
self.setUp()
getattr(self, self._testMethodName)()
self.tearDown()
while self._cleanups:
function, args, kwargs = self._cleanups.pop(-1)
function(*args, **kwargs)
def skipTest(self, reason):
"""Skip this test."""
raise SkipTest(reason)
def fail(self, msg=None):
"""Fail immediately, with the given message."""
raise self.failureException(msg)
def assertFalse(self, expr, msg=None):
"""Check that the expression is false."""
if expr:
msg = self._formatMessage(msg, "%s is not false" % safe_repr(expr))
raise self.failureException(msg)
def assertTrue(self, expr, msg=None):
"""Check that the expression is true."""
if not expr:
msg = self._formatMessage(msg, "%s is not true" % safe_repr(expr))
raise self.failureException(msg)
def _formatMessage(self, msg, standardMsg):
"""Honour the longMessage attribute when generating failure messages.
If longMessage is False this means:
* Use only an explicit message if it is provided
* Otherwise use the standard message for the assert
If longMessage is True:
* Use the standard message
* If an explicit message is provided, plus ' : ' and the explicit message
"""
if not self.longMessage:
return msg or standardMsg
if msg is None:
return standardMsg
try:
# don't switch to '{}' formatting in Python 2.X
# it changes the way unicode input is handled
return '%s : %s' % (standardMsg, msg)
except UnicodeDecodeError:
return '%s : %s' % (safe_repr(standardMsg), safe_repr(msg))
def assertRaises(self, excClass, callableObj=None, *args, **kwargs):
"""Fail unless an exception of class excClass is raised
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
raised, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
If called with callableObj omitted or None, will return a
context object used like this::
with self.assertRaises(SomeException):
do_something()
An optional keyword argument 'msg' can be provided when assertRaises
is used as a context object.
The context manager keeps a reference to the exception as
the 'exception' attribute. This allows you to inspect the
exception after the assertion::
with self.assertRaises(SomeException) as cm:
do_something()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, 3)
"""
context = _AssertRaisesContext(excClass, self, callableObj)
return context.handle('assertRaises', callableObj, args, kwargs)
def assertWarns(self, expected_warning, callable_obj=None, *args, **kwargs):
"""Fail unless a warning of class warnClass is triggered
by callable_obj when invoked with arguments args and keyword
arguments kwargs. If a different type of warning is
triggered, it will not be handled: depending on the other
warning filtering rules in effect, it might be silenced, printed
out, or raised as an exception.
If called with callable_obj omitted or None, will return a
context object used like this::
with self.assertWarns(SomeWarning):
do_something()
An optional keyword argument 'msg' can be provided when assertWarns
is used as a context object.
The context manager keeps a reference to the first matching
warning as the 'warning' attribute; similarly, the 'filename'
and 'lineno' attributes give you information about the line
of Python code from which the warning was triggered.
This allows you to inspect the warning after the assertion::
with self.assertWarns(SomeWarning) as cm:
do_something()
the_warning = cm.warning
self.assertEqual(the_warning.some_attribute, 147)
"""
context = _AssertWarnsContext(expected_warning, self, callable_obj)
return context.handle('assertWarns', callable_obj, args, kwargs)
def _getAssertEqualityFunc(self, first, second):
"""Get a detailed comparison function for the types of the two args.
Returns: A callable accepting (first, second, msg=None) that will
raise a failure exception if first != second with a useful human
readable error message for those types.
"""
#
# NOTE(gregory.p.smith): I considered isinstance(first, type(second))
# and vice versa. I opted for the conservative approach in case
# subclasses are not intended to be compared in detail to their super
# class instances using a type equality func. This means testing
# subtypes won't automagically use the detailed comparison. Callers
# should use their type specific assertSpamEqual method to compare
# subclasses if the detailed comparison is desired and appropriate.
# See the discussion in http://bugs.python.org/issue2578.
#
if type(first) is type(second):
asserter = self._type_equality_funcs.get(type(first))
if asserter is not None:
if isinstance(asserter, str):
asserter = getattr(self, asserter)
return asserter
return self._baseAssertEqual
def _baseAssertEqual(self, first, second, msg=None):
"""The default assertEqual implementation, not type specific."""
if not first == second:
standardMsg = '%s != %s' % (safe_repr(first), safe_repr(second))
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertEqual(self, first, second, msg=None):
"""Fail if the two objects are unequal as determined by the '=='
operator.
"""
assertion_func = self._getAssertEqualityFunc(first, second)
assertion_func(first, second, msg=msg)
def assertNotEqual(self, first, second, msg=None):
"""Fail if the two objects are equal as determined by the '!='
operator.
"""
if not first != second:
msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first),
safe_repr(second)))
raise self.failureException(msg)
def assertAlmostEqual(self, first, second, places=None, msg=None,
delta=None):
"""Fail if the two objects are unequal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is more than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
If the two objects compare equal then they will automatically
compare almost equal.
"""
if first == second:
# shortcut
return
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if abs(first - second) <= delta:
return
standardMsg = '%s != %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if round(abs(second-first), places) == 0:
return
standardMsg = '%s != %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertNotAlmostEqual(self, first, second, places=None, msg=None,
delta=None):
"""Fail if the two objects are equal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is less than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
Objects that are equal automatically fail.
"""
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if not (first == second) and abs(first - second) > delta:
return
standardMsg = '%s == %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if not (first == second) and round(abs(second-first), places) != 0:
return
standardMsg = '%s == %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertSequenceEqual(self, seq1, seq2, msg=None, seq_type=None):
"""An equality assertion for ordered sequences (like lists and tuples).
For the purposes of this function, a valid ordered sequence type is one
which can be indexed, has a length, and has an equality operator.
Args:
seq1: The first sequence to compare.
seq2: The second sequence to compare.
seq_type: The expected datatype of the sequences, or None if no
datatype should be enforced.
msg: Optional message to use on failure instead of a list of
differences.
"""
if seq_type is not None:
seq_type_name = seq_type.__name__
if not isinstance(seq1, seq_type):
raise self.failureException('First sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq1)))
if not isinstance(seq2, seq_type):
raise self.failureException('Second sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq2)))
else:
seq_type_name = "sequence"
differing = None
try:
len1 = len(seq1)
except (TypeError, NotImplementedError):
differing = 'First %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
try:
len2 = len(seq2)
except (TypeError, NotImplementedError):
differing = 'Second %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
if seq1 == seq2:
return
seq1_repr = safe_repr(seq1)
seq2_repr = safe_repr(seq2)
if len(seq1_repr) > 30:
seq1_repr = seq1_repr[:30] + '...'
if len(seq2_repr) > 30:
seq2_repr = seq2_repr[:30] + '...'
elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr)
differing = '%ss differ: %s != %s\n' % elements
for i in range(min(len1, len2)):
try:
item1 = seq1[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of first %s\n' %
(i, seq_type_name))
break
try:
item2 = seq2[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of second %s\n' %
(i, seq_type_name))
break
if item1 != item2:
differing += ('\nFirst differing element %d:\n%s\n%s\n' %
(i, item1, item2))
break
else:
if (len1 == len2 and seq_type is None and
type(seq1) != type(seq2)):
# The sequences are the same, but have differing types.
return
if len1 > len2:
differing += ('\nFirst %s contains %d additional '
'elements.\n' % (seq_type_name, len1 - len2))
try:
differing += ('First extra element %d:\n%s\n' %
(len2, seq1[len2]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of first %s\n' % (len2, seq_type_name))
elif len1 < len2:
differing += ('\nSecond %s contains %d additional '
'elements.\n' % (seq_type_name, len2 - len1))
try:
differing += ('First extra element %d:\n%s\n' %
(len1, seq2[len1]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of second %s\n' % (len1, seq_type_name))
standardMsg = differing
diffMsg = '\n' + '\n'.join(
difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def _truncateMessage(self, message, diff):
max_diff = self.maxDiff
if max_diff is None or len(diff) <= max_diff:
return message + diff
return message + (DIFF_OMITTED % len(diff))
def assertListEqual(self, list1, list2, msg=None):
"""A list-specific equality assertion.
Args:
list1: The first list to compare.
list2: The second list to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(list1, list2, msg, seq_type=list)
def assertTupleEqual(self, tuple1, tuple2, msg=None):
"""A tuple-specific equality assertion.
Args:
tuple1: The first tuple to compare.
tuple2: The second tuple to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(tuple1, tuple2, msg, seq_type=tuple)
def assertSetEqual(self, set1, set2, msg=None):
"""A set-specific equality assertion.
Args:
set1: The first set to compare.
set2: The second set to compare.
msg: Optional message to use on failure instead of a list of
differences.
assertSetEqual uses ducktyping to support different types of sets, and
is optimized for sets specifically (parameters must support a
difference method).
"""
try:
difference1 = set1.difference(set2)
except TypeError as e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError as e:
self.fail('first argument does not support set difference: %s' % e)
try:
difference2 = set2.difference(set1)
except TypeError as e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError as e:
self.fail('second argument does not support set difference: %s' % e)
if not (difference1 or difference2):
return
lines = []
if difference1:
lines.append('Items in the first set but not the second:')
for item in difference1:
lines.append(repr(item))
if difference2:
lines.append('Items in the second set but not the first:')
for item in difference2:
lines.append(repr(item))
standardMsg = '\n'.join(lines)
self.fail(self._formatMessage(msg, standardMsg))
def assertIn(self, member, container, msg=None):
"""Just like self.assertTrue(a in b), but with a nicer default message."""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIn(self, member, container, msg=None):
"""Just like self.assertTrue(a not in b), but with a nicer default message."""
if member in container:
standardMsg = '%s unexpectedly found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertIs(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is b), but with a nicer default message."""
if expr1 is not expr2:
standardMsg = '%s is not %s' % (safe_repr(expr1),
safe_repr(expr2))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNot(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is not b), but with a nicer default message."""
if expr1 is expr2:
standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictEqual(self, d1, d2, msg=None):
self.assertIsInstance(d1, dict, 'First argument is not a dictionary')
self.assertIsInstance(d2, dict, 'Second argument is not a dictionary')
if d1 != d2:
standardMsg = '%s != %s' % (safe_repr(d1, True), safe_repr(d2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
pprint.pformat(d1).splitlines(),
pprint.pformat(d2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictContainsSubset(self, subset, dictionary, msg=None):
"""Checks whether dictionary is a superset of subset."""
warnings.warn('assertDictContainsSubset is deprecated',
DeprecationWarning)
missing = []
mismatched = []
for key, value in subset.items():
if key not in dictionary:
missing.append(key)
elif value != dictionary[key]:
mismatched.append('%s, expected: %s, actual: %s' %
(safe_repr(key), safe_repr(value),
safe_repr(dictionary[key])))
if not (missing or mismatched):
return
standardMsg = ''
if missing:
standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in
missing)
if mismatched:
if standardMsg:
standardMsg += '; '
standardMsg += 'Mismatched values: %s' % ','.join(mismatched)
self.fail(self._formatMessage(msg, standardMsg))
def assertCountEqual(self, first, second, msg=None):
"""An unordered sequence comparison asserting that the same elements,
regardless of order. If the same element occurs more than once,
it verifies that the elements occur the same number of times.
self.assertEqual(Counter(list(first)),
Counter(list(second)))
Example:
- [0, 1, 1] and [1, 0, 1] compare equal.
- [0, 0, 1] and [0, 1] compare unequal.
"""
first_seq, second_seq = list(first), list(second)
try:
first = collections.Counter(first_seq)
second = collections.Counter(second_seq)
except TypeError:
# Handle case with unhashable elements
differences = _count_diff_all_purpose(first_seq, second_seq)
else:
if first == second:
return
differences = _count_diff_hashable(first_seq, second_seq)
if differences:
standardMsg = 'Element counts were not equal:\n'
lines = ['First has %d, Second has %d: %r' % diff for diff in differences]
diffMsg = '\n'.join(lines)
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def assertMultiLineEqual(self, first, second, msg=None):
"""Assert that two multi-line strings are equal."""
self.assertIsInstance(first, str, 'First argument is not a string')
self.assertIsInstance(second, str, 'Second argument is not a string')
if first != second:
# don't use difflib if the strings are too long
if (len(first) > self._diffThreshold or
len(second) > self._diffThreshold):
self._baseAssertEqual(first, second, msg)
firstlines = first.splitlines(keepends=True)
secondlines = second.splitlines(keepends=True)
if len(firstlines) == 1 and first.strip('\r\n') == first:
firstlines = [first + '\n']
secondlines = [second + '\n']
standardMsg = '%s != %s' % (safe_repr(first, True),
safe_repr(second, True))
diff = '\n' + ''.join(difflib.ndiff(firstlines, secondlines))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertLess(self, a, b, msg=None):
"""Just like self.assertTrue(a < b), but with a nicer default message."""
if not a < b:
standardMsg = '%s not less than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertLessEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a <= b), but with a nicer default message."""
if not a <= b:
standardMsg = '%s not less than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreater(self, a, b, msg=None):
"""Just like self.assertTrue(a > b), but with a nicer default message."""
if not a > b:
standardMsg = '%s not greater than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreaterEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a >= b), but with a nicer default message."""
if not a >= b:
standardMsg = '%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNone(self, obj, msg=None):
"""Same as self.assertTrue(obj is None), with a nicer default message."""
if obj is not None:
standardMsg = '%s is not None' % (safe_repr(obj),)
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNotNone(self, obj, msg=None):
"""Included for symmetry with assertIsNone."""
if obj is None:
standardMsg = 'unexpectedly None'
self.fail(self._formatMessage(msg, standardMsg))
def assertIsInstance(self, obj, cls, msg=None):
"""Same as self.assertTrue(isinstance(obj, cls)), with a nicer
default message."""
if not isinstance(obj, cls):
standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIsInstance(self, obj, cls, msg=None):
"""Included for symmetry with assertIsInstance."""
if isinstance(obj, cls):
standardMsg = '%s is an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertRaisesRegex(self, expected_exception, expected_regex,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a raised exception matches a regex.
Args:
expected_exception: Exception class expected to be raised.
expected_regex: Regex (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
msg: Optional message used in case of failure. Can only be used
when assertRaisesRegex is used as a context manager.
args: Extra args.
kwargs: Extra kwargs.
"""
context = _AssertRaisesContext(expected_exception, self, callable_obj,
expected_regex)
return context.handle('assertRaisesRegex', callable_obj, args, kwargs)
def assertWarnsRegex(self, expected_warning, expected_regex,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a triggered warning matches a regexp.
Basic functioning is similar to assertWarns() with the addition
that only warnings whose messages also match the regular expression
are considered successful matches.
Args:
expected_warning: Warning class expected to be triggered.
expected_regex: Regex (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
msg: Optional message used in case of failure. Can only be used
when assertWarnsRegex is used as a context manager.
args: Extra args.
kwargs: Extra kwargs.
"""
context = _AssertWarnsContext(expected_warning, self, callable_obj,
expected_regex)
return context.handle('assertWarnsRegex', callable_obj, args, kwargs)
def assertRegex(self, text, expected_regex, msg=None):
"""Fail the test unless the text matches the regular expression."""
if isinstance(expected_regex, (str, bytes)):
assert expected_regex, "expected_regex must not be empty."
expected_regex = re.compile(expected_regex)
if not expected_regex.search(text):
msg = msg or "Regex didn't match"
msg = '%s: %r not found in %r' % (msg, expected_regex.pattern, text)
raise self.failureException(msg)
def assertNotRegex(self, text, unexpected_regex, msg=None):
"""Fail the test if the text matches the regular expression."""
if isinstance(unexpected_regex, (str, bytes)):
unexpected_regex = re.compile(unexpected_regex)
match = unexpected_regex.search(text)
if match:
msg = msg or "Regex matched"
msg = '%s: %r matches %r in %r' % (msg,
text[match.start():match.end()],
unexpected_regex.pattern,
text)
raise self.failureException(msg)
def _deprecate(original_func):
def deprecated_func(*args, **kwargs):
warnings.warn(
'Please use {0} instead.'.format(original_func.__name__),
DeprecationWarning, 2)
return original_func(*args, **kwargs)
return deprecated_func
# see #9424
failUnlessEqual = assertEquals = _deprecate(assertEqual)
failIfEqual = assertNotEquals = _deprecate(assertNotEqual)
failUnlessAlmostEqual = assertAlmostEquals = _deprecate(assertAlmostEqual)
failIfAlmostEqual = assertNotAlmostEquals = _deprecate(assertNotAlmostEqual)
failUnless = assert_ = _deprecate(assertTrue)
failUnlessRaises = _deprecate(assertRaises)
failIf = _deprecate(assertFalse)
assertRaisesRegexp = _deprecate(assertRaisesRegex)
assertRegexpMatches = _deprecate(assertRegex)
class FunctionTestCase(TestCase):
"""A test case that wraps a test function.
This is useful for slipping pre-existing test functions into the
unittest framework. Optionally, set-up and tidy-up functions can be
supplied. As with TestCase, the tidy-up ('tearDown') function will
always be called if the set-up ('setUp') function ran successfully.
"""
def __init__(self, testFunc, setUp=None, tearDown=None, description=None):
super(FunctionTestCase, self).__init__()
self._setUpFunc = setUp
self._tearDownFunc = tearDown
self._testFunc = testFunc
self._description = description
def setUp(self):
if self._setUpFunc is not None:
self._setUpFunc()
def tearDown(self):
if self._tearDownFunc is not None:
self._tearDownFunc()
def runTest(self):
self._testFunc()
def id(self):
return self._testFunc.__name__
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._setUpFunc == other._setUpFunc and \
self._tearDownFunc == other._tearDownFunc and \
self._testFunc == other._testFunc and \
self._description == other._description
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._setUpFunc, self._tearDownFunc,
self._testFunc, self._description))
def __str__(self):
return "%s (%s)" % (strclass(self.__class__),
self._testFunc.__name__)
def __repr__(self):
return "<%s tec=%s>" % (strclass(self.__class__),
self._testFunc)
def shortDescription(self):
if self._description is not None:
return self._description
doc = self._testFunc.__doc__
return doc and doc.split("\n")[0].strip() or None
| gpl-3.0 |
polaris/sol | lib/googletest-82b11b8/googlemock/scripts/generator/cpp/gmock_class.py | 520 | 8293 | #!/usr/bin/env python
#
# Copyright 2008 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate Google Mock classes from base classes.
This program will read in a C++ source file and output the Google Mock
classes for the specified classes. If no class is specified, all
classes in the source file are emitted.
Usage:
gmock_class.py header-file.h [ClassName]...
Output is sent to stdout.
"""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
import os
import re
import sys
from cpp import ast
from cpp import utils
# Preserve compatibility with Python 2.3.
try:
_dummy = set
except NameError:
import sets
set = sets.Set
_VERSION = (1, 0, 1) # The version of this script.
# How many spaces to indent. Can set me with the INDENT environment variable.
_INDENT = 2
def _GenerateMethods(output_lines, source, class_node):
function_type = (ast.FUNCTION_VIRTUAL | ast.FUNCTION_PURE_VIRTUAL |
ast.FUNCTION_OVERRIDE)
ctor_or_dtor = ast.FUNCTION_CTOR | ast.FUNCTION_DTOR
indent = ' ' * _INDENT
for node in class_node.body:
# We only care about virtual functions.
if (isinstance(node, ast.Function) and
node.modifiers & function_type and
not node.modifiers & ctor_or_dtor):
# Pick out all the elements we need from the original function.
const = ''
if node.modifiers & ast.FUNCTION_CONST:
const = 'CONST_'
return_type = 'void'
if node.return_type:
# Add modifiers like 'const'.
modifiers = ''
if node.return_type.modifiers:
modifiers = ' '.join(node.return_type.modifiers) + ' '
return_type = modifiers + node.return_type.name
template_args = [arg.name for arg in node.return_type.templated_types]
if template_args:
return_type += '<' + ', '.join(template_args) + '>'
if len(template_args) > 1:
for line in [
'// The following line won\'t really compile, as the return',
'// type has multiple template arguments. To fix it, use a',
'// typedef for the return type.']:
output_lines.append(indent + line)
if node.return_type.pointer:
return_type += '*'
if node.return_type.reference:
return_type += '&'
num_parameters = len(node.parameters)
if len(node.parameters) == 1:
first_param = node.parameters[0]
if source[first_param.start:first_param.end].strip() == 'void':
# We must treat T(void) as a function with no parameters.
num_parameters = 0
tmpl = ''
if class_node.templated_types:
tmpl = '_T'
mock_method_macro = 'MOCK_%sMETHOD%d%s' % (const, num_parameters, tmpl)
args = ''
if node.parameters:
# Due to the parser limitations, it is impossible to keep comments
# while stripping the default parameters. When defaults are
# present, we choose to strip them and comments (and produce
# compilable code).
# TODO(nnorwitz@google.com): Investigate whether it is possible to
# preserve parameter name when reconstructing parameter text from
# the AST.
if len([param for param in node.parameters if param.default]) > 0:
args = ', '.join(param.type.name for param in node.parameters)
else:
# Get the full text of the parameters from the start
# of the first parameter to the end of the last parameter.
start = node.parameters[0].start
end = node.parameters[-1].end
# Remove // comments.
args_strings = re.sub(r'//.*', '', source[start:end])
# Condense multiple spaces and eliminate newlines putting the
# parameters together on a single line. Ensure there is a
# space in an argument which is split by a newline without
# intervening whitespace, e.g.: int\nBar
args = re.sub(' +', ' ', args_strings.replace('\n', ' '))
# Create the mock method definition.
output_lines.extend(['%s%s(%s,' % (indent, mock_method_macro, node.name),
'%s%s(%s));' % (indent*3, return_type, args)])
def _GenerateMocks(filename, source, ast_list, desired_class_names):
processed_class_names = set()
lines = []
for node in ast_list:
if (isinstance(node, ast.Class) and node.body and
# desired_class_names being None means that all classes are selected.
(not desired_class_names or node.name in desired_class_names)):
class_name = node.name
parent_name = class_name
processed_class_names.add(class_name)
class_node = node
# Add namespace before the class.
if class_node.namespace:
lines.extend(['namespace %s {' % n for n in class_node.namespace]) # }
lines.append('')
# Add template args for templated classes.
if class_node.templated_types:
# TODO(paulchang): The AST doesn't preserve template argument order,
# so we have to make up names here.
# TODO(paulchang): Handle non-type template arguments (e.g.
# template<typename T, int N>).
template_arg_count = len(class_node.templated_types.keys())
template_args = ['T%d' % n for n in range(template_arg_count)]
template_decls = ['typename ' + arg for arg in template_args]
lines.append('template <' + ', '.join(template_decls) + '>')
parent_name += '<' + ', '.join(template_args) + '>'
# Add the class prolog.
lines.append('class Mock%s : public %s {' # }
% (class_name, parent_name))
lines.append('%spublic:' % (' ' * (_INDENT // 2)))
# Add all the methods.
_GenerateMethods(lines, source, class_node)
# Close the class.
if lines:
# If there are no virtual methods, no need for a public label.
if len(lines) == 2:
del lines[-1]
# Only close the class if there really is a class.
lines.append('};')
lines.append('') # Add an extra newline.
# Close the namespace.
if class_node.namespace:
for i in range(len(class_node.namespace)-1, -1, -1):
lines.append('} // namespace %s' % class_node.namespace[i])
lines.append('') # Add an extra newline.
if desired_class_names:
missing_class_name_list = list(desired_class_names - processed_class_names)
if missing_class_name_list:
missing_class_name_list.sort()
sys.stderr.write('Class(es) not found in %s: %s\n' %
(filename, ', '.join(missing_class_name_list)))
elif not processed_class_names:
sys.stderr.write('No class found in %s\n' % filename)
return lines
def main(argv=sys.argv):
if len(argv) < 2:
sys.stderr.write('Google Mock Class Generator v%s\n\n' %
'.'.join(map(str, _VERSION)))
sys.stderr.write(__doc__)
return 1
global _INDENT
try:
_INDENT = int(os.environ['INDENT'])
except KeyError:
pass
except:
sys.stderr.write('Unable to use indent of %s\n' % os.environ.get('INDENT'))
filename = argv[1]
desired_class_names = None # None means all classes in the source file.
if len(argv) >= 3:
desired_class_names = set(argv[2:])
source = utils.ReadFile(filename)
if source is None:
return 1
builder = ast.BuilderFromSource(source, filename)
try:
entire_ast = filter(None, builder.Generate())
except KeyboardInterrupt:
return
except:
# An error message was already printed since we couldn't parse.
sys.exit(1)
else:
lines = _GenerateMocks(filename, source, entire_ast, desired_class_names)
sys.stdout.write('\n'.join(lines))
if __name__ == '__main__':
main(sys.argv)
| mit |
sve-odoo/odoo | addons/product/report/product_pricelist.py | 341 | 5151 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class product_pricelist(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(product_pricelist, self).__init__(cr, uid, name, context=context)
self.pricelist=False
self.quantity=[]
self.localcontext.update({
'time': time,
'get_pricelist': self._get_pricelist,
'get_currency': self._get_currency,
'get_categories': self._get_categories,
'get_price': self._get_price,
'get_titles': self._get_titles,
})
def _get_titles(self, form):
lst = []
vals = {}
qtys = 1
for i in range(1,6):
if form['qty'+str(i)]!=0:
vals['qty'+str(qtys)] = str(form['qty'+str(i)]) + ' units'
qtys += 1
lst.append(vals)
return lst
def _set_quantity(self, form):
for i in range(1,6):
q = 'qty%d'%i
if form[q] >0 and form[q] not in self.quantity:
self.quantity.append(form[q])
else:
self.quantity.append(0)
return True
def _get_pricelist(self, pricelist_id):
pricelist = self.pool.get('product.pricelist').read(self.cr, self.uid, [pricelist_id], ['name'], context=self.localcontext)[0]
return pricelist['name']
def _get_currency(self, pricelist_id):
pricelist = self.pool.get('product.pricelist').read(self.cr, self.uid, [pricelist_id], ['currency_id'], context=self.localcontext)[0]
return pricelist['currency_id'][1]
def _get_categories(self, products, form):
cat_ids=[]
res=[]
self.pricelist = form['price_list']
self._set_quantity(form)
pro_ids=[]
for product in products:
pro_ids.append(product.id)
if product.categ_id.id not in cat_ids:
cat_ids.append(product.categ_id.id)
cats = self.pool.get('product.category').name_get(self.cr, self.uid, cat_ids, context=self.localcontext)
if not cats:
return res
for cat in cats:
product_ids=self.pool.get('product.product').search(self.cr, self.uid, [('id', 'in', pro_ids), ('categ_id', '=', cat[0])], context=self.localcontext)
products = []
for product in self.pool.get('product.product').read(self.cr, self.uid, product_ids, ['name', 'code'], context=self.localcontext):
val = {
'id':product['id'],
'name':product['name'],
'code':product['code']
}
i = 1
for qty in self.quantity:
if qty == 0:
val['qty'+str(i)] = 0.0
else:
val['qty'+str(i)]=self._get_price(self.pricelist, product['id'], qty)
i += 1
products.append(val)
res.append({'name':cat[1],'products': products})
return res
def _get_price(self, pricelist_id, product_id, qty):
sale_price_digits = self.get_digits(dp='Product Price')
pricelist = self.pool.get('product.pricelist').browse(self.cr, self.uid, [pricelist_id], context=self.localcontext)[0]
price_dict = self.pool.get('product.pricelist').price_get(self.cr, self.uid, [pricelist_id], product_id, qty, context=self.localcontext)
if price_dict[pricelist_id]:
price = self.formatLang(price_dict[pricelist_id], digits=sale_price_digits, currency_obj=pricelist.currency_id)
else:
res = self.pool.get('product.product').read(self.cr, self.uid, [product_id])
price = self.formatLang(res[0]['list_price'], digits=sale_price_digits, currency_obj=pricelist.currency_id)
return price
class report_product_pricelist(osv.AbstractModel):
_name = 'report.product.report_pricelist'
_inherit = 'report.abstract_report'
_template = 'product.report_pricelist'
_wrapped_report_class = product_pricelist
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
wadda/Bari | udp_spitter.py | 1 | 1474 | #!/usr/bin/python3
# coding=utf-8
"""reads barometric pressure sensor and writes it to UDP socket with timestamp available
"""
import socket
from datetime import datetime
from struct import pack
from time import sleep
from time import time
from os import _exit as dumbnrun
import ms5637
__author__ = 'Moe'
__copyright__ = 'Copyright 2017-2018 Moe'
__license__ = 'MIT'
__version__ = '0.0.3'
# Bari sensor of MS5637
sensor = ms5637.Chip()
host = "192.168.0.2" # The BIG machine for the number grinding
port = 6421 # bari port
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
while True:
try:
epochtime = time()
humantime = datetime.fromtimestamp(epochtime).strftime('%Y-%m-%dT%H:%M:%S')
pressure, temperature = sensor.get_data()
print(humantime, pressure)
outstring = pack('!d', *[pressure]) # .pack('!d', )
# outstring = pack('!2d',*[pressure, temperature])
# outstring = pack('!2d',*[epochtime, pressure])
sock.sendto(outstring, (host, port))
sleep(.1)
# print(humantime, pressure)
# outstring = str(humantime) + ', ' + str(pressure)
except OSError:
sensor.__init__()
pressure, temperature = sensor.get_data()
except KeyboardInterrupt:
sock.close() # from os import _exit as dumbnrun
dumbnrun(0) # https://bytes.com/topic/python/answers/156121-os-_exit-vs-sys-exit
#
# Someday a cleaner Python interface will live here
#
# End
| mit |
janusnic/youtube-dl-GUI | youtube_dl/extractor/bbccouk.py | 4 | 14360 | from __future__ import unicode_literals
import xml.etree.ElementTree
from .subtitles import SubtitlesInfoExtractor
from ..utils import ExtractorError
from ..compat import compat_HTTPError
class BBCCoUkIE(SubtitlesInfoExtractor):
IE_NAME = 'bbc.co.uk'
IE_DESC = 'BBC iPlayer'
_VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:(?:(?:programmes|iplayer/(?:episode|playlist))/)|music/clips[/#])(?P<id>[\da-z]{8})'
_TESTS = [
{
'url': 'http://www.bbc.co.uk/programmes/b039g8p7',
'info_dict': {
'id': 'b039d07m',
'ext': 'flv',
'title': 'Kaleidoscope, Leonard Cohen',
'description': 'The Canadian poet and songwriter reflects on his musical career.',
'duration': 1740,
},
'params': {
# rtmp download
'skip_download': True,
}
},
{
'url': 'http://www.bbc.co.uk/iplayer/episode/b00yng5w/The_Man_in_Black_Series_3_The_Printed_Name/',
'info_dict': {
'id': 'b00yng1d',
'ext': 'flv',
'title': 'The Man in Black: Series 3: The Printed Name',
'description': "Mark Gatiss introduces Nicholas Pierpan's chilling tale of a writer's devilish pact with a mysterious man. Stars Ewan Bailey.",
'duration': 1800,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Episode is no longer available on BBC iPlayer Radio',
},
{
'url': 'http://www.bbc.co.uk/iplayer/episode/b03vhd1f/The_Voice_UK_Series_3_Blind_Auditions_5/',
'info_dict': {
'id': 'b00yng1d',
'ext': 'flv',
'title': 'The Voice UK: Series 3: Blind Auditions 5',
'description': "Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.",
'duration': 5100,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
},
{
'url': 'http://www.bbc.co.uk/iplayer/episode/p026c7jt/tomorrows-worlds-the-unearthly-history-of-science-fiction-2-invasion',
'info_dict': {
'id': 'b03k3pb7',
'ext': 'flv',
'title': "Tomorrow's Worlds: The Unearthly History of Science Fiction",
'description': '2. Invasion',
'duration': 3600,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
}, {
'url': 'http://www.bbc.co.uk/programmes/b04v20dw',
'info_dict': {
'id': 'b04v209v',
'ext': 'flv',
'title': 'Pete Tong, The Essential New Tune Special',
'description': "Pete has a very special mix - all of 2014's Essential New Tunes!",
'duration': 10800,
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'http://www.bbc.co.uk/music/clips/p02frcc3',
'note': 'Audio',
'info_dict': {
'id': 'p02frcch',
'ext': 'flv',
'title': 'Pete Tong, Past, Present and Future Special, Madeon - After Hours mix',
'description': 'French house superstar Madeon takes us out of the club and onto the after party.',
'duration': 3507,
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'http://www.bbc.co.uk/music/clips/p025c0zz',
'note': 'Video',
'info_dict': {
'id': 'p025c103',
'ext': 'flv',
'title': 'Reading and Leeds Festival, 2014, Rae Morris - Closer (Live on BBC Three)',
'description': 'Rae Morris performs Closer for BBC Three at Reading 2014',
'duration': 226,
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'http://www.bbc.co.uk/iplayer/playlist/p01dvks4',
'only_matching': True,
}, {
'url': 'http://www.bbc.co.uk/music/clips#p02frcc3',
'only_matching': True,
}
]
def _extract_asx_playlist(self, connection, programme_id):
asx = self._download_xml(connection.get('href'), programme_id, 'Downloading ASX playlist')
return [ref.get('href') for ref in asx.findall('./Entry/ref')]
def _extract_connection(self, connection, programme_id):
formats = []
protocol = connection.get('protocol')
supplier = connection.get('supplier')
if protocol == 'http':
href = connection.get('href')
# ASX playlist
if supplier == 'asx':
for i, ref in enumerate(self._extract_asx_playlist(connection, programme_id)):
formats.append({
'url': ref,
'format_id': 'ref%s_%s' % (i, supplier),
})
# Direct link
else:
formats.append({
'url': href,
'format_id': supplier,
})
elif protocol == 'rtmp':
application = connection.get('application', 'ondemand')
auth_string = connection.get('authString')
identifier = connection.get('identifier')
server = connection.get('server')
formats.append({
'url': '%s://%s/%s?%s' % (protocol, server, application, auth_string),
'play_path': identifier,
'app': '%s?%s' % (application, auth_string),
'page_url': 'http://www.bbc.co.uk',
'player_url': 'http://www.bbc.co.uk/emp/releases/iplayer/revisions/617463_618125_4/617463_618125_4_emp.swf',
'rtmp_live': False,
'ext': 'flv',
'format_id': supplier,
})
return formats
def _extract_items(self, playlist):
return playlist.findall('./{http://bbc.co.uk/2008/emp/playlist}item')
def _extract_medias(self, media_selection):
error = media_selection.find('./{http://bbc.co.uk/2008/mp/mediaselection}error')
if error is not None:
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, error.get('id')), expected=True)
return media_selection.findall('./{http://bbc.co.uk/2008/mp/mediaselection}media')
def _extract_connections(self, media):
return media.findall('./{http://bbc.co.uk/2008/mp/mediaselection}connection')
def _extract_video(self, media, programme_id):
formats = []
vbr = int(media.get('bitrate'))
vcodec = media.get('encoding')
service = media.get('service')
width = int(media.get('width'))
height = int(media.get('height'))
file_size = int(media.get('media_file_size'))
for connection in self._extract_connections(media):
conn_formats = self._extract_connection(connection, programme_id)
for format in conn_formats:
format.update({
'format_id': '%s_%s' % (service, format['format_id']),
'width': width,
'height': height,
'vbr': vbr,
'vcodec': vcodec,
'filesize': file_size,
})
formats.extend(conn_formats)
return formats
def _extract_audio(self, media, programme_id):
formats = []
abr = int(media.get('bitrate'))
acodec = media.get('encoding')
service = media.get('service')
for connection in self._extract_connections(media):
conn_formats = self._extract_connection(connection, programme_id)
for format in conn_formats:
format.update({
'format_id': '%s_%s' % (service, format['format_id']),
'abr': abr,
'acodec': acodec,
})
formats.extend(conn_formats)
return formats
def _extract_captions(self, media, programme_id):
subtitles = {}
for connection in self._extract_connections(media):
captions = self._download_xml(connection.get('href'), programme_id, 'Downloading captions')
lang = captions.get('{http://www.w3.org/XML/1998/namespace}lang', 'en')
ps = captions.findall('./{0}body/{0}div/{0}p'.format('{http://www.w3.org/2006/10/ttaf1}'))
srt = ''
for pos, p in enumerate(ps):
srt += '%s\r\n%s --> %s\r\n%s\r\n\r\n' % (str(pos), p.get('begin'), p.get('end'),
p.text.strip() if p.text is not None else '')
subtitles[lang] = srt
return subtitles
def _download_media_selector(self, programme_id):
try:
media_selection = self._download_xml(
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s' % programme_id,
programme_id, 'Downloading media selection XML')
except ExtractorError as ee:
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
media_selection = xml.etree.ElementTree.fromstring(ee.cause.read().encode('utf-8'))
else:
raise
formats = []
subtitles = None
for media in self._extract_medias(media_selection):
kind = media.get('kind')
if kind == 'audio':
formats.extend(self._extract_audio(media, programme_id))
elif kind == 'video':
formats.extend(self._extract_video(media, programme_id))
elif kind == 'captions':
subtitles = self._extract_captions(media, programme_id)
return formats, subtitles
def _download_playlist(self, playlist_id):
try:
playlist = self._download_json(
'http://www.bbc.co.uk/programmes/%s/playlist.json' % playlist_id,
playlist_id, 'Downloading playlist JSON')
version = playlist.get('defaultAvailableVersion')
if version:
smp_config = version['smpConfig']
title = smp_config['title']
description = smp_config['summary']
for item in smp_config['items']:
kind = item['kind']
if kind != 'programme' and kind != 'radioProgramme':
continue
programme_id = item.get('vpid')
duration = int(item.get('duration'))
formats, subtitles = self._download_media_selector(programme_id)
return programme_id, title, description, duration, formats, subtitles
except ExtractorError as ee:
if not isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404:
raise
# fallback to legacy playlist
playlist = self._download_xml(
'http://www.bbc.co.uk/iplayer/playlist/%s' % playlist_id,
playlist_id, 'Downloading legacy playlist XML')
no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems')
if no_items is not None:
reason = no_items.get('reason')
if reason == 'preAvailability':
msg = 'Episode %s is not yet available' % playlist_id
elif reason == 'postAvailability':
msg = 'Episode %s is no longer available' % playlist_id
elif reason == 'noMedia':
msg = 'Episode %s is not currently available' % playlist_id
else:
msg = 'Episode %s is not available: %s' % (playlist_id, reason)
raise ExtractorError(msg, expected=True)
for item in self._extract_items(playlist):
kind = item.get('kind')
if kind != 'programme' and kind != 'radioProgramme':
continue
title = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}title').text
description = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}summary').text
programme_id = item.get('identifier')
duration = int(item.get('duration'))
formats, subtitles = self._download_media_selector(programme_id)
return programme_id, title, description, duration, formats, subtitles
def _real_extract(self, url):
group_id = self._match_id(url)
webpage = self._download_webpage(url, group_id, 'Downloading video page')
programme_id = self._search_regex(
r'"vpid"\s*:\s*"([\da-z]{8})"', webpage, 'vpid', fatal=False, default=None)
if programme_id:
player = self._download_json(
'http://www.bbc.co.uk/iplayer/episode/%s.json' % group_id,
group_id)['jsConf']['player']
title = player['title']
description = player['subtitle']
duration = player['duration']
formats, subtitles = self._download_media_selector(programme_id)
else:
programme_id, title, description, duration, formats, subtitles = self._download_playlist(group_id)
if self._downloader.params.get('listsubtitles', False):
self._list_available_subtitles(programme_id, subtitles)
return
self._sort_formats(formats)
return {
'id': programme_id,
'title': title,
'description': description,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
| mit |
hickford/youtube-dl | youtube_dl/extractor/canalc2.py | 145 | 1258 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class Canalc2IE(InfoExtractor):
IE_NAME = 'canalc2.tv'
_VALID_URL = r'http://.*?\.canalc2\.tv/video\.asp\?.*?idVideo=(?P<id>\d+)'
_TEST = {
'url': 'http://www.canalc2.tv/video.asp?idVideo=12163&voir=oui',
'md5': '060158428b650f896c542dfbb3d6487f',
'info_dict': {
'id': '12163',
'ext': 'mp4',
'title': 'Terrasses du Numérique'
}
}
def _real_extract(self, url):
video_id = re.match(self._VALID_URL, url).group('id')
# We need to set the voir field for getting the file name
url = 'http://www.canalc2.tv/video.asp?idVideo=%s&voir=oui' % video_id
webpage = self._download_webpage(url, video_id)
file_name = self._search_regex(
r"so\.addVariable\('file','(.*?)'\);",
webpage, 'file name')
video_url = 'http://vod-flash.u-strasbg.fr:8080/' + file_name
title = self._html_search_regex(
r'class="evenement8">(.*?)</a>', webpage, 'title')
return {
'id': video_id,
'ext': 'mp4',
'url': video_url,
'title': title,
}
| unlicense |
JoeLaMartina/aima-python | submissions/Fritz/c4-11-28/utils.py | 56 | 18937 | """Provides some utilities widely used by other modules"""
import bisect
import collections
import collections.abc
import functools
import operator
import os.path
import random
import math
# ______________________________________________________________________________
# Functions on Sequences and Iterables
def sequence(iterable):
"Coerce iterable to sequence, if it is not already one."
return (iterable if isinstance(iterable, collections.abc.Sequence)
else tuple(iterable))
def removeall(item, seq):
"""Return a copy of seq (or string) with all occurences of item removed."""
if isinstance(seq, str):
return seq.replace(item, '')
else:
return [x for x in seq if x != item]
def unique(seq): # TODO: replace with set
"""Remove duplicate elements from seq. Assumes hashable elements."""
return list(set(seq))
def count(seq):
"""Count the number of items in sequence that are interpreted as true."""
return sum(bool(x) for x in seq)
def product(numbers):
"""Return the product of the numbers, e.g. product([2, 3, 10]) == 60"""
result = 1
for x in numbers:
result *= x
return result
def first(iterable, default=None):
"Return the first element of an iterable or the next element of a generator; or default."
try:
return iterable[0]
except IndexError:
return default
except TypeError:
return next(iterable, default)
def is_in(elt, seq):
"""Similar to (elt in seq), but compares with 'is', not '=='."""
return any(x is elt for x in seq)
# ______________________________________________________________________________
# argmin and argmax
identity = lambda x: x
argmin = min
argmax = max
def argmin_random_tie(seq, key=identity):
"""Return a minimum element of seq; break ties at random."""
return argmin(shuffled(seq), key=key)
def argmax_random_tie(seq, key=identity):
"Return an element with highest fn(seq[i]) score; break ties at random."
return argmax(shuffled(seq), key=key)
def shuffled(iterable):
"Randomly shuffle a copy of iterable."
items = list(iterable)
random.shuffle(items)
return items
# ______________________________________________________________________________
# Statistical and mathematical functions
def histogram(values, mode=0, bin_function=None):
"""Return a list of (value, count) pairs, summarizing the input values.
Sorted by increasing value, or if mode=1, by decreasing count.
If bin_function is given, map it over values first."""
if bin_function:
values = map(bin_function, values)
bins = {}
for val in values:
bins[val] = bins.get(val, 0) + 1
if mode:
return sorted(list(bins.items()), key=lambda x: (x[1], x[0]),
reverse=True)
else:
return sorted(bins.items())
def dotproduct(X, Y):
"""Return the sum of the element-wise product of vectors X and Y."""
return sum(x * y for x, y in zip(X, Y))
def element_wise_product(X, Y):
"""Return vector as an element-wise product of vectors X and Y"""
assert len(X) == len(Y)
return [x * y for x, y in zip(X, Y)]
def matrix_multiplication(X_M, *Y_M):
"""Return a matrix as a matrix-multiplication of X_M and arbitary number of matrices *Y_M"""
def _mat_mult(X_M, Y_M):
"""Return a matrix as a matrix-multiplication of two matrices X_M and Y_M
>>> matrix_multiplication([[1, 2, 3],
[2, 3, 4]],
[[3, 4],
[1, 2],
[1, 0]])
[[8, 8],[13, 14]]
"""
assert len(X_M[0]) == len(Y_M)
result = [[0 for i in range(len(Y_M[0]))] for j in range(len(X_M))]
for i in range(len(X_M)):
for j in range(len(Y_M[0])):
for k in range(len(Y_M)):
result[i][j] += X_M[i][k] * Y_M[k][j]
return result
result = X_M
for Y in Y_M:
result = _mat_mult(result, Y)
return result
def vector_to_diagonal(v):
"""Converts a vector to a diagonal matrix with vector elements
as the diagonal elements of the matrix"""
diag_matrix = [[0 for i in range(len(v))] for j in range(len(v))]
for i in range(len(v)):
diag_matrix[i][i] = v[i]
return diag_matrix
def vector_add(a, b):
"""Component-wise addition of two vectors."""
return tuple(map(operator.add, a, b))
def scalar_vector_product(X, Y):
"""Return vector as a product of a scalar and a vector"""
return [X * y for y in Y]
def scalar_matrix_product(X, Y):
return [scalar_vector_product(X, y) for y in Y]
def inverse_matrix(X):
"""Inverse a given square matrix of size 2x2"""
assert len(X) == 2
assert len(X[0]) == 2
det = X[0][0] * X[1][1] - X[0][1] * X[1][0]
assert det != 0
inv_mat = scalar_matrix_product(1.0/det, [[X[1][1], -X[0][1]], [-X[1][0], X[0][0]]])
return inv_mat
def probability(p):
"Return true with probability p."
return p > random.uniform(0.0, 1.0)
def weighted_sample_with_replacement(seq, weights, n):
"""Pick n samples from seq at random, with replacement, with the
probability of each element in proportion to its corresponding
weight."""
sample = weighted_sampler(seq, weights)
return [sample() for _ in range(n)]
def weighted_sampler(seq, weights):
"Return a random-sample function that picks from seq weighted by weights."
totals = []
for w in weights:
totals.append(w + totals[-1] if totals else w)
return lambda: seq[bisect.bisect(totals, random.uniform(0, totals[-1]))]
def rounder(numbers, d=4):
"Round a single number, or sequence of numbers, to d decimal places."
if isinstance(numbers, (int, float)):
return round(numbers, d)
else:
constructor = type(numbers) # Can be list, set, tuple, etc.
return constructor(rounder(n, d) for n in numbers)
def num_or_str(x):
"""The argument is a string; convert to a number if
possible, or strip it.
"""
try:
return int(x)
except ValueError:
try:
return float(x)
except ValueError:
return str(x).strip()
def normalize(dist):
"""Multiply each number by a constant such that the sum is 1.0"""
if isinstance(dist, dict):
total = sum(dist.values())
for key in dist:
dist[key] = dist[key] / total
assert 0 <= dist[key] <= 1, "Probabilities must be between 0 and 1."
return dist
total = sum(dist)
return [(n / total) for n in dist]
def clip(x, lowest, highest):
"""Return x clipped to the range [lowest..highest]."""
return max(lowest, min(x, highest))
def sigmoid(x):
"""Return activation value of x with sigmoid function"""
return 1/(1 + math.exp(-x))
def step(x):
"""Return activation value of x with sign function"""
return 1 if x >= 0 else 0
try: # math.isclose was added in Python 3.5; but we might be in 3.4
from math import isclose
except ImportError:
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
"Return true if numbers a and b are close to each other."
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
# ______________________________________________________________________________
# Misc Functions
# TODO: Use functools.lru_cache memoization decorator
def memoize(fn, slot=None):
"""Memoize fn: make it remember the computed value for any argument list.
If slot is specified, store result in that slot of first argument.
If slot is false, store results in a dictionary."""
if slot:
def memoized_fn(obj, *args):
if hasattr(obj, slot):
return getattr(obj, slot)
else:
val = fn(obj, *args)
setattr(obj, slot, val)
return val
else:
def memoized_fn(*args):
if args not in memoized_fn.cache:
memoized_fn.cache[args] = fn(*args)
return memoized_fn.cache[args]
memoized_fn.cache = {}
return memoized_fn
def name(obj):
"Try to find some reasonable name for the object."
return (getattr(obj, 'name', 0) or getattr(obj, '__name__', 0) or
getattr(getattr(obj, '__class__', 0), '__name__', 0) or
str(obj))
def isnumber(x):
"Is x a number?"
return hasattr(x, '__int__')
def issequence(x):
"Is x a sequence?"
return isinstance(x, collections.abc.Sequence)
def print_table(table, header=None, sep=' ', numfmt='%g'):
"""Print a list of lists as a table, so that columns line up nicely.
header, if specified, will be printed as the first row.
numfmt is the format for all numbers; you might want e.g. '%6.2f'.
(If you want different formats in different columns,
don't use print_table.) sep is the separator between columns."""
justs = ['rjust' if isnumber(x) else 'ljust' for x in table[0]]
if header:
table.insert(0, header)
table = [[numfmt.format(x) if isnumber(x) else x for x in row]
for row in table]
sizes = list(
map(lambda seq: max(map(len, seq)),
list(zip(*[map(str, row) for row in table]))))
for row in table:
print(sep.join(getattr(
str(x), j)(size) for (j, size, x) in zip(justs, sizes, row)))
def AIMAFile(components, mode='r'):
"Open a file based at the AIMA root directory."
aima_root = os.path.dirname(__file__)
aima_file = os.path.join(aima_root, *components)
return open(aima_file)
def DataFile(name, mode='r'):
"Return a file in the AIMA /aima-data directory."
return AIMAFile(['aima-data', name], mode)
# ______________________________________________________________________________
# Expressions
# See https://docs.python.org/3/reference/expressions.html#operator-precedence
# See https://docs.python.org/3/reference/datamodel.html#special-method-names
class Expr(object):
"""A mathematical expression with an operator and 0 or more arguments.
op is a str like '+' or 'sin'; args are Expressions.
Expr('x') or Symbol('x') creates a symbol (a nullary Expr).
Expr('-', x) creates a unary; Expr('+', x, 1) creates a binary."""
def __init__(self, op, *args):
self.op = str(op)
self.args = args
# Operator overloads
def __neg__(self): return Expr('-', self)
def __pos__(self): return Expr('+', self)
def __invert__(self): return Expr('~', self)
def __add__(self, rhs): return Expr('+', self, rhs)
def __sub__(self, rhs): return Expr('-', self, rhs)
def __mul__(self, rhs): return Expr('*', self, rhs)
def __pow__(self, rhs): return Expr('**',self, rhs)
def __mod__(self, rhs): return Expr('%', self, rhs)
def __and__(self, rhs): return Expr('&', self, rhs)
def __xor__(self, rhs): return Expr('^', self, rhs)
def __rshift__(self, rhs): return Expr('>>', self, rhs)
def __lshift__(self, rhs): return Expr('<<', self, rhs)
def __truediv__(self, rhs): return Expr('/', self, rhs)
def __floordiv__(self, rhs): return Expr('//', self, rhs)
def __matmul__(self, rhs): return Expr('@', self, rhs)
def __or__(self, rhs):
"Allow both P | Q, and P |'==>'| Q."
if isinstance(rhs, Expression):
return Expr('|', self, rhs)
else:
return PartialExpr(rhs, self)
# Reverse operator overloads
def __radd__(self, lhs): return Expr('+', lhs, self)
def __rsub__(self, lhs): return Expr('-', lhs, self)
def __rmul__(self, lhs): return Expr('*', lhs, self)
def __rdiv__(self, lhs): return Expr('/', lhs, self)
def __rpow__(self, lhs): return Expr('**', lhs, self)
def __rmod__(self, lhs): return Expr('%', lhs, self)
def __rand__(self, lhs): return Expr('&', lhs, self)
def __rxor__(self, lhs): return Expr('^', lhs, self)
def __ror__(self, lhs): return Expr('|', lhs, self)
def __rrshift__(self, lhs): return Expr('>>', lhs, self)
def __rlshift__(self, lhs): return Expr('<<', lhs, self)
def __rtruediv__(self, lhs): return Expr('/', lhs, self)
def __rfloordiv__(self, lhs): return Expr('//', lhs, self)
def __rmatmul__(self, lhs): return Expr('@', lhs, self)
def __call__(self, *args):
"Call: if 'f' is a Symbol, then f(0) == Expr('f', 0)."
if self.args:
raise ValueError('can only do a call for a Symbol, not an Expr')
else:
return Expr(self.op, *args)
# Equality and repr
def __eq__(self, other):
"'x == y' evaluates to True or False; does not build an Expr."
return (isinstance(other, Expr)
and self.op == other.op
and self.args == other.args)
def __hash__(self): return hash(self.op) ^ hash(self.args)
def __repr__(self):
op = self.op
args = [str(arg) for arg in self.args]
if op.isidentifier(): # f(x) or f(x, y)
return '{}({})'.format(op, ', '.join(args)) if args else op
elif len(args) == 1: # -x or -(x + 1)
return op + args[0]
else: # (x - y)
opp = (' ' + op + ' ')
return '(' + opp.join(args) + ')'
# An 'Expression' is either an Expr or a Number.
# Symbol is not an explicit type; it is any Expr with 0 args.
Number = (int, float, complex)
Expression = (Expr, Number)
def Symbol(name):
"A Symbol is just an Expr with no args."
return Expr(name)
def symbols(names):
"Return a tuple of Symbols; names is a comma/whitespace delimited str."
return tuple(Symbol(name) for name in names.replace(',', ' ').split())
def subexpressions(x):
"Yield the subexpressions of an Expression (including x itself)."
yield x
if isinstance(x, Expr):
for arg in x.args:
yield from subexpressions(arg)
def arity(expression):
"The number of sub-expressions in this expression."
if isinstance(expression, Expr):
return len(expression.args)
else: # expression is a number
return 0
# For operators that are not defined in Python, we allow new InfixOps:
class PartialExpr:
"""Given 'P |'==>'| Q, first form PartialExpr('==>', P), then combine with Q."""
def __init__(self, op, lhs): self.op, self.lhs = op, lhs
def __or__(self, rhs): return Expr(self.op, self.lhs, rhs)
def __repr__(self): return "PartialExpr('{}', {})".format(self.op, self.lhs)
def expr(x):
"""Shortcut to create an Expression. x is a str in which:
- identifiers are automatically defined as Symbols.
- ==> is treated as an infix |'==>'|, as are <== and <=>.
If x is already an Expression, it is returned unchanged. Example:
>>> expr('P & Q ==> Q')
((P & Q) ==> Q)
"""
if isinstance(x, str):
return eval(expr_handle_infix_ops(x), defaultkeydict(Symbol))
else:
return x
infix_ops = '==> <== <=>'.split()
def expr_handle_infix_ops(x):
"""Given a str, return a new str with ==> replaced by |'==>'|, etc.
>>> expr_handle_infix_ops('P ==> Q')
"P |'==>'| Q"
"""
for op in infix_ops:
x = x.replace(op, '|' + repr(op) + '|')
return x
class defaultkeydict(collections.defaultdict):
"""Like defaultdict, but the default_factory is a function of the key.
>>> d = defaultkeydict(len); d['four']
4
"""
def __missing__(self, key):
self[key] = result = self.default_factory(key)
return result
# ______________________________________________________________________________
# Queues: Stack, FIFOQueue, PriorityQueue
# TODO: Possibly use queue.Queue, queue.PriorityQueue
# TODO: Priority queues may not belong here -- see treatment in search.py
class Queue:
"""Queue is an abstract class/interface. There are three types:
Stack(): A Last In First Out Queue.
FIFOQueue(): A First In First Out Queue.
PriorityQueue(order, f): Queue in sorted order (default min-first).
Each type supports the following methods and functions:
q.append(item) -- add an item to the queue
q.extend(items) -- equivalent to: for item in items: q.append(item)
q.pop() -- return the top item from the queue
len(q) -- number of items in q (also q.__len())
item in q -- does q contain item?
Note that isinstance(Stack(), Queue) is false, because we implement stacks
as lists. If Python ever gets interfaces, Queue will be an interface."""
def __init__(self):
raise NotImplementedError
def extend(self, items):
for item in items:
self.append(item)
def Stack():
"""Return an empty list, suitable as a Last-In-First-Out Queue."""
return []
class FIFOQueue(Queue):
"""A First-In-First-Out Queue."""
def __init__(self):
self.A = []
self.start = 0
def append(self, item):
self.A.append(item)
def __len__(self):
return len(self.A) - self.start
def extend(self, items):
self.A.extend(items)
def pop(self):
e = self.A[self.start]
self.start += 1
if self.start > 5 and self.start > len(self.A) / 2:
self.A = self.A[self.start:]
self.start = 0
return e
def __contains__(self, item):
return item in self.A[self.start:]
class PriorityQueue(Queue):
"""A queue in which the minimum (or maximum) element (as determined by f and
order) is returned first. If order is min, the item with minimum f(x) is
returned first; if order is max, then it is the item with maximum f(x).
Also supports dict-like lookup."""
def __init__(self, order=min, f=lambda x: x):
self.A = []
self.order = order
self.f = f
def append(self, item):
bisect.insort(self.A, (self.f(item), item))
def __len__(self):
return len(self.A)
def pop(self):
if self.order == min:
return self.A.pop(0)[1]
else:
return self.A.pop()[1]
def __contains__(self, item):
return any(item == pair[1] for pair in self.A)
def __getitem__(self, key):
for _, item in self.A:
if item == key:
return item
def __delitem__(self, key):
for i, (value, item) in enumerate(self.A):
if item == key:
self.A.pop(i)
# ______________________________________________________________________________
# Useful Shorthands
class Bool(int):
"""Just like `bool`, except values display as 'T' and 'F' instead of 'True' and 'False'"""
__str__ = __repr__ = lambda self: 'T' if self else 'F'
T = Bool(True)
F = Bool(False)
| mit |
mycFelix/heron | integration_test/src/python/integration_test/core/test_topology_builder.py | 4 | 7601 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''integration test topology builder'''
import copy
import heronpy.api.api_constants as api_constants
from heronpy.api.stream import Stream, Grouping
from heronpy.api.topology import TopologyBuilder, Topology, TopologyType
from ..core import constants as integ_const
from .aggregator_bolt import AggregatorBolt
from .integration_test_spout import IntegrationTestSpout
from .integration_test_bolt import IntegrationTestBolt
class TestTopologyBuilder(TopologyBuilder):
"""Topology Builder for integration tests
Given spouts and bolts will be delegated by IntegrationTestSpout and IntegrationTestBolt
classes respectively.
"""
TERMINAL_BOLT_NAME = '__integration_test_aggregator_bolt'
TERMINAL_BOLT_CLASS = AggregatorBolt
DEFAULT_CONFIG = {api_constants.TOPOLOGY_DEBUG: True,
api_constants.TOPOLOGY_RELIABILITY_MODE:
api_constants.TopologyReliabilityMode.ATLEAST_ONCE,
api_constants.TOPOLOGY_PROJECT_NAME: "heron-integration-test"}
def __init__(self, name, http_server_url):
super(TestTopologyBuilder, self).__init__(name)
self.output_location = "%s/%s" % (http_server_url, self.topology_name)
self.set_config(self.DEFAULT_CONFIG)
# map <name -> spout's component spec>
self.spouts = {}
# map <name -> bolt's component spec>
self.bolts = {}
# map <name -> set of parents>
self.prev = {}
def add_spout(self, name, spout_cls, par, config=None,
optional_outputs=None, max_executions=None):
"""Add an integration_test spout"""
user_spec = spout_cls.spec(name)
spout_classpath = user_spec.python_class_path
if hasattr(spout_cls, 'outputs'):
user_outputs = spout_cls.outputs
else:
user_outputs = []
if optional_outputs is not None:
user_outputs.extend(optional_outputs)
if config is None:
_config = {}
else:
_config = copy.copy(config)
if max_executions is not None:
_config[integ_const.USER_MAX_EXECUTIONS] = max_executions
test_spec = IntegrationTestSpout.spec(name, par, _config,
user_spout_classpath=spout_classpath,
user_output_fields=user_outputs)
self.add_spec(test_spec)
self.spouts[name] = test_spec
return test_spec
def add_bolt(self, name, bolt_cls, par, inputs, config=None, optional_outputs=None):
"""Add an integration_test bolt
Only dict based inputs is supported
"""
assert isinstance(inputs, dict)
user_spec = bolt_cls.spec(name)
bolt_classpath = user_spec.python_class_path
if hasattr(bolt_cls, 'outputs'):
user_outputs = bolt_cls.outputs
else:
user_outputs = []
if optional_outputs is not None:
user_outputs.extend(optional_outputs)
if config is None:
_config = {}
else:
_config = config
test_spec = IntegrationTestBolt.spec(name, par, inputs, _config,
user_bolt_classpath=bolt_classpath,
user_output_fields=user_outputs)
self.add_spec(test_spec)
self.bolts[name] = test_spec
return test_spec
# pylint: disable=too-many-branches
def create_topology(self):
"""Creates an integration_test topology class"""
# first add the aggregation_bolt
# inputs will be updated later
aggregator_config = {integ_const.HTTP_POST_URL_KEY: self.output_location}
self.add_bolt(self.TERMINAL_BOLT_NAME, self.TERMINAL_BOLT_CLASS, 1,
inputs={}, config=aggregator_config)
# building a graph directed from children to parents, by looking only on bolts
# since spouts don't have parents
for name, bolt_spec in self.bolts.iteritems():
if name == self.TERMINAL_BOLT_NAME:
continue
bolt_protobuf = bolt_spec.get_protobuf()
for istream in bolt_protobuf.inputs:
parent = istream.stream.component_name
if name in self.prev:
self.prev[name].add(parent)
else:
parents = set()
parents.add(parent)
self.prev[name] = parents
# Find the terminal bolts defined by users and link them with "AggregatorBolt".
# set of terminal component names
terminals = set()
# set of non-terminal component names
non_terminals = set()
# 1. terminal bolts need upstream components, because we don't want isolated bolts
# 2. terminal bolts should not exist in the prev.values(), meaning that no downstream
for parent_set in self.prev.values():
non_terminals.update(parent_set)
for bolt_name in self.prev.keys():
if bolt_name not in non_terminals:
terminals.add(bolt_name)
# will also consider the cases with spouts without children
for spout_name in self.spouts.keys():
if spout_name not in non_terminals:
terminals.add(spout_name)
# add all grouping to components
for child in self.prev.keys():
for parent in self.prev[child]:
self._add_all_grouping(child, parent, integ_const.INTEGRATION_TEST_CONTROL_STREAM_ID)
# then connect aggregator bolt with user's terminal components
# terminal_outputs are output fields for terminals, list of either str or Stream
for terminal in terminals:
if terminal in self.bolts:
terminal_outputs = self.bolts[terminal].outputs
else:
terminal_outputs = self.spouts[terminal].outputs
# now get a set of stream ids
stream_ids = ["default" if isinstance(out, str) else out.stream_id
for out in terminal_outputs]
for stream_id in set(stream_ids):
self._add_all_grouping(self.TERMINAL_BOLT_NAME, terminal, stream_id)
# create topology class
class_dict = self._construct_topo_class_dict()
return TopologyType(self.topology_name, (Topology,), class_dict)
def _add_all_grouping(self, child, parent, stream_id):
"""Adds all grouping between child component and parent component with a given stream id
:type child: str
:param child: child's component name
:type parent: str
:param parent: parent's component name
:type stream_id: str
:param stream_id: stream id
"""
# child has to be a bolt
child_component_spec = self.bolts[child]
# child_inputs is dict mapping from <HeronComponentSpec|GlobalStreamId -> grouping>
child_inputs = child_component_spec.inputs
if parent in self.bolts:
parent_component_spec = self.bolts[parent]
else:
parent_component_spec = self.spouts[parent]
if stream_id == Stream.DEFAULT_STREAM_ID:
child_inputs[parent_component_spec] = Grouping.ALL
else:
child_inputs[parent_component_spec[stream_id]] = Grouping.ALL
| apache-2.0 |
ISTweak/android_kernel_sony_msm8960 | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
dims/nova | nova/objects/monitor_metric.py | 11 | 4724 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from oslo_utils import versionutils
from nova.objects import base
from nova.objects import fields
from nova import utils
# NOTE(jwcroppe): Used to determine which fields whose value we need to adjust
# (read: divide by 100.0) before sending information to the RPC notifier since
# these values were expected to be within the range [0, 1].
FIELDS_REQUIRING_CONVERSION = [fields.MonitorMetricType.CPU_USER_PERCENT,
fields.MonitorMetricType.CPU_KERNEL_PERCENT,
fields.MonitorMetricType.CPU_IDLE_PERCENT,
fields.MonitorMetricType.CPU_IOWAIT_PERCENT,
fields.MonitorMetricType.CPU_PERCENT]
@base.NovaObjectRegistry.register
class MonitorMetric(base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added NUMA support
VERSION = '1.1'
fields = {
'name': fields.MonitorMetricTypeField(nullable=False),
'value': fields.IntegerField(nullable=False),
'numa_membw_values': fields.DictOfIntegersField(nullable=True),
'timestamp': fields.DateTimeField(nullable=False),
# This will be the stevedore extension full class name
# for the plugin from which the metric originates.
'source': fields.StringField(nullable=False),
}
def obj_make_compatible(self, primitive, target_version):
super(MonitorMetric, self).obj_make_compatible(primitive,
target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 1) and 'numa_nodes_values' in primitive:
del primitive['numa_membw_values']
# NOTE(jaypipes): This method exists to convert the object to the
# format expected by the RPC notifier for metrics events.
def to_dict(self):
dict_to_return = {
'name': self.name,
# NOTE(jaypipes): This is what jsonutils.dumps() does to
# datetime.datetime objects, which is what timestamp is in
# this object as well as the original simple dict metrics
'timestamp': utils.strtime(self.timestamp),
'source': self.source,
}
if self.obj_attr_is_set('value'):
if self.name in FIELDS_REQUIRING_CONVERSION:
dict_to_return['value'] = self.value / 100.0
else:
dict_to_return['value'] = self.value
elif self.obj_attr_is_set('numa_membw_values'):
dict_to_return['numa_membw_values'] = self.numa_membw_values
return dict_to_return
@base.NovaObjectRegistry.register
class MonitorMetricList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: MonitorMetric version 1.1
VERSION = '1.1'
fields = {
'objects': fields.ListOfObjectsField('MonitorMetric'),
}
@classmethod
def from_json(cls, metrics):
"""Converts a legacy json object into a list of MonitorMetric objs
and finally returns of MonitorMetricList
:param metrics: a string of json serialized objects
:returns: a MonitorMetricList Object.
"""
metrics = jsonutils.loads(metrics) if metrics else []
# NOTE(suro-patz): While instantiating the MonitorMetric() from
# JSON-ified string, we need to re-convert the
# normalized metrics to avoid truncation to 0 by
# typecasting into an integer.
metric_list = []
for metric in metrics:
if ('value' in metric and metric['name'] in
FIELDS_REQUIRING_CONVERSION):
metric['value'] = metric['value'] * 100
metric_list.append(MonitorMetric(**metric))
return MonitorMetricList(objects=metric_list)
# NOTE(jaypipes): This method exists to convert the object to the
# format expected by the RPC notifier for metrics events.
def to_list(self):
return [m.to_dict() for m in self.objects]
| apache-2.0 |
shoyer/xarray | xarray/backends/cfgrib_.py | 2 | 2208 | import numpy as np
from ..core import indexing
from ..core.utils import Frozen, FrozenDict
from ..core.variable import Variable
from .common import AbstractDataStore, BackendArray
from .locks import SerializableLock, ensure_lock
# FIXME: Add a dedicated lock, even if ecCodes is supposed to be thread-safe
# in most circumstances. See:
# https://confluence.ecmwf.int/display/ECC/Frequently+Asked+Questions
ECCODES_LOCK = SerializableLock()
class CfGribArrayWrapper(BackendArray):
def __init__(self, datastore, array):
self.datastore = datastore
self.shape = array.shape
self.dtype = array.dtype
self.array = array
def __getitem__(self, key):
return indexing.explicit_indexing_adapter(
key, self.shape, indexing.IndexingSupport.OUTER, self._getitem
)
def _getitem(self, key):
with self.datastore.lock:
return self.array[key]
class CfGribDataStore(AbstractDataStore):
"""
Implements the ``xr.AbstractDataStore`` read-only API for a GRIB file.
"""
def __init__(self, filename, lock=None, **backend_kwargs):
import cfgrib
if lock is None:
lock = ECCODES_LOCK
self.lock = ensure_lock(lock)
self.ds = cfgrib.open_file(filename, **backend_kwargs)
def open_store_variable(self, name, var):
if isinstance(var.data, np.ndarray):
data = var.data
else:
wrapped_array = CfGribArrayWrapper(self, var.data)
data = indexing.LazilyOuterIndexedArray(wrapped_array)
encoding = self.ds.encoding.copy()
encoding["original_shape"] = var.data.shape
return Variable(var.dimensions, data, var.attributes, encoding)
def get_variables(self):
return FrozenDict(
(k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()
)
def get_attrs(self):
return Frozen(self.ds.attributes)
def get_dimensions(self):
return Frozen(self.ds.dimensions)
def get_encoding(self):
dims = self.get_dimensions()
encoding = {"unlimited_dims": {k for k, v in dims.items() if v is None}}
return encoding
| apache-2.0 |
ashahi1/docker-volume-vsphere | esx_service/tools/sqlite/sqlite3/__init__.py | 239 | 1037 | #-*- coding: ISO-8859-1 -*-
# pysqlite2/__init__.py: the pysqlite2 package.
#
# Copyright (C) 2005 Gerhard Häring <gh@ghaering.de>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
from dbapi2 import *
| apache-2.0 |
kotfic/girder | tests/cases/external_data_core_test.py | 10 | 1529 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import unittest
import hashlib
import os
class ExternalDataCoreTest(unittest.TestCase):
def testExternalDataFile(self):
"""Asserts that the external data file was correctly downloaded."""
filepath = os.path.join(
os.environ['GIRDER_TEST_DATA_PREFIX'],
'test_file.txt'
)
self.assertTrue(
os.path.exists(filepath),
'The test file does not exist.'
)
hash = hashlib.md5()
with open(filepath, 'r') as f:
hash.update(f.read().encode('utf-8'))
self.assertEqual(
hash.hexdigest(),
'169293f7c9138e4b50ebcab4358dc509',
'Invalid test file content.'
)
| apache-2.0 |
ogonbat/django-shorty | docs/conf.py | 1 | 7842 | # -*- coding: utf-8 -*-
#
# Django-Shorty documentation build configuration file, created by
# sphinx-quickstart on Sun Mar 11 22:41:37 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django-Shorty'
copyright = u'2012, Andrea Mucci aKa cingusoft'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Django-Shortydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Django-Shorty.tex', u'Django-Shorty Documentation',
u'Andrea Mucci aKa cingusoft', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-shorty', u'Django-Shorty Documentation',
[u'Andrea Mucci aKa cingusoft'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Django-Shorty', u'Django-Shorty Documentation',
u'Andrea Mucci aKa cingusoft', 'Django-Shorty', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| lgpl-3.0 |
nvoron23/avos | openstack_dashboard/dashboards/project/data_processing/jobs/workflows/launch.py | 14 | 16820 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard.api import sahara as saharaclient
import openstack_dashboard.dashboards.project.data_processing. \
cluster_templates.workflows.create as t_flows
import openstack_dashboard.dashboards.project.data_processing. \
clusters.workflows.create as c_flow
import openstack_dashboard.dashboards.project.data_processing. \
utils.workflow_helpers as whelpers
LOG = logging.getLogger(__name__)
DATA_SOURCE_CREATE_URL = ("horizon:project:data_processing.data_sources"
":create-data-source")
class JobExecutionGeneralConfigAction(workflows.Action):
job_input = forms.DynamicChoiceField(
label=_("Input"),
initial=(None, "None"),
add_item_link=DATA_SOURCE_CREATE_URL)
job_output = forms.DynamicChoiceField(
label=_("Output"),
initial=(None, "None"),
add_item_link=DATA_SOURCE_CREATE_URL)
def __init__(self, request, *args, **kwargs):
super(JobExecutionGeneralConfigAction, self).__init__(request,
*args,
**kwargs)
if request.REQUEST.get("job_id", None) is None:
self.fields["job"] = forms.ChoiceField(
label=_("Job"))
self.fields["job"].choices = self.populate_job_choices(request)
else:
self.fields["job"] = forms.CharField(
widget=forms.HiddenInput(),
initial=request.REQUEST.get("job_id", None))
def populate_job_input_choices(self, request, context):
return self.get_data_source_choices(request, context)
def populate_job_output_choices(self, request, context):
return self.get_data_source_choices(request, context)
def get_data_source_choices(self, request, context):
try:
data_sources = saharaclient.data_source_list(request)
except Exception:
data_sources = []
exceptions.handle(request,
_("Unable to fetch data sources."))
choices = [(data_source.id, data_source.name)
for data_source in data_sources]
choices.insert(0, (None, 'None'))
return choices
def populate_job_choices(self, request):
try:
jobs = saharaclient.job_list(request)
except Exception:
jobs = []
exceptions.handle(request,
_("Unable to fetch jobs."))
choices = [(job.id, job.name)
for job in jobs]
return choices
class Meta:
name = _("Job")
help_text_template = (
"project/data_processing.jobs/_launch_job_help.html")
class JobExecutionExistingGeneralConfigAction(JobExecutionGeneralConfigAction):
cluster = forms.ChoiceField(
label=_("Cluster"),
initial=(None, "None"),
widget=forms.Select(attrs={"class": "cluster_choice"}))
def populate_cluster_choices(self, request, context):
try:
clusters = saharaclient.cluster_list(request)
except Exception:
clusters = []
exceptions.handle(request,
_("Unable to fetch clusters."))
choices = [(cluster.id, cluster.name)
for cluster in clusters]
return choices
class Meta:
name = _("Job")
help_text_template = (
"project/data_processing.jobs/_launch_job_help.html")
class JobConfigAction(workflows.Action):
MAIN_CLASS = "edp.java.main_class"
JAVA_OPTS = "edp.java.java_opts"
EDP_MAPPER = "edp.streaming.mapper"
EDP_REDUCER = "edp.streaming.reducer"
EDP_PREFIX = "edp."
property_name = forms.ChoiceField(
required=False,
)
job_configs = forms.CharField(
required=False,
widget=forms.HiddenInput())
job_params = forms.CharField(
required=False,
widget=forms.HiddenInput())
job_args_array = forms.CharField(
required=False,
widget=forms.HiddenInput())
job_type = forms.CharField(
required=False,
widget=forms.HiddenInput())
main_class = forms.CharField(label=_("Main Class"),
required=False)
java_opts = forms.CharField(label=_("Java Opts"),
required=False)
streaming_mapper = forms.CharField(label=_("Mapper"))
streaming_reducer = forms.CharField(label=_("Reducer"))
def __init__(self, request, *args, **kwargs):
super(JobConfigAction, self).__init__(request, *args, **kwargs)
job_ex_id = request.REQUEST.get("job_execution_id")
if job_ex_id is not None:
job_ex_id = request.REQUEST.get("job_execution_id")
job_ex = saharaclient.job_execution_get(request, job_ex_id)
job_configs = job_ex.job_configs
edp_configs = {}
if 'configs' in job_configs:
configs, edp_configs = (
self.clean_edp_configs(job_configs['configs']))
self.fields['job_configs'].initial = (
json.dumps(configs))
if 'params' in job_configs:
self.fields['job_params'].initial = (
json.dumps(job_configs['params']))
job_args = json.dumps(job_configs['args'])
self.fields['job_args_array'].initial = job_args
if self.MAIN_CLASS in edp_configs:
self.fields['main_class'].initial = (
edp_configs[self.MAIN_CLASS])
if self.JAVA_OPTS in edp_configs:
self.fields['java_opts'].initial = (
edp_configs[self.JAVA_OPTS])
if self.EDP_MAPPER in edp_configs:
self.fields['streaming_mapper'].initial = (
edp_configs[self.EDP_MAPPER])
if self.EDP_REDUCER in edp_configs:
self.fields['streaming_reducer'].initial = (
edp_configs[self.EDP_REDUCER])
def clean(self):
cleaned_data = super(workflows.Action, self).clean()
job_type = cleaned_data.get("job_type", None)
if job_type != "MapReduce.Streaming":
if "streaming_mapper" in self._errors:
del self._errors["streaming_mapper"]
if "streaming_reducer" in self._errors:
del self._errors["streaming_reducer"]
return cleaned_data
def populate_property_name_choices(self, request, context):
job_id = request.REQUEST.get("job_id") or request.REQUEST.get("job")
job_type = saharaclient.job_get(request, job_id).type
job_configs = (
saharaclient.job_get_configs(request, job_type).job_config)
choices = [(param['value'], param['name'])
for param in job_configs['configs']]
return choices
def clean_edp_configs(self, configs):
edp_configs = {}
for key, value in configs.iteritems():
if key.startswith(self.EDP_PREFIX):
edp_configs[key] = value
for rmkey in edp_configs.keys():
del configs[rmkey]
return (configs, edp_configs)
class Meta:
name = _("Configure")
help_text_template = (
"project/data_processing.jobs/_launch_job_configure_help.html")
class JobExecutionGeneralConfig(workflows.Step):
action_class = JobExecutionGeneralConfigAction
def contribute(self, data, context):
for k, v in data.items():
if k in ["job_input", "job_output"]:
context["job_general_" + k] = None if v == "None" else v
else:
context["job_general_" + k] = v
return context
class JobExecutionExistingGeneralConfig(workflows.Step):
action_class = JobExecutionExistingGeneralConfigAction
def contribute(self, data, context):
for k, v in data.items():
if k in ["job_input", "job_output"]:
context["job_general_" + k] = None if v == "None" else v
else:
context["job_general_" + k] = v
return context
class JobConfig(workflows.Step):
action_class = JobConfigAction
template_name = 'project/data_processing.jobs/config_template.html'
def contribute(self, data, context):
job_config = self.clean_configs(
json.loads(data.get("job_configs", '{}')))
job_params = self.clean_configs(
json.loads(data.get("job_params", '{}')))
job_args_array = self.clean_configs(
json.loads(data.get("job_args_array", '[]')))
job_type = data.get("job_type", '')
context["job_type"] = job_type
context["job_config"] = {"configs": job_config}
context["job_config"]["args"] = job_args_array
if job_type in ["Java", "Spark"]:
context["job_config"]["configs"][JobConfigAction.MAIN_CLASS] = (
data.get("main_class", ""))
context["job_config"]["configs"][JobConfigAction.JAVA_OPTS] = (
data.get("java_opts", ""))
elif job_type == "MapReduce.Streaming":
context["job_config"]["configs"][JobConfigAction.EDP_MAPPER] = (
data.get("streaming_mapper", ""))
context["job_config"]["configs"][JobConfigAction.EDP_REDUCER] = (
data.get("streaming_reducer", ""))
else:
context["job_config"]["params"] = job_params
return context
@staticmethod
def clean_configs(configs):
cleaned_conf = None
if isinstance(configs, dict):
cleaned_conf = dict([(k.strip(), v.strip())
for k, v in configs.items()
if len(v.strip()) > 0 and len(k.strip()) > 0])
elif isinstance(configs, list):
cleaned_conf = list([v.strip() for v in configs
if len(v.strip()) > 0])
return cleaned_conf
class NewClusterConfigAction(c_flow.GeneralConfigAction):
persist_cluster = forms.BooleanField(
label=_("Persist cluster after job exit"),
required=False)
class Meta:
name = _("Configure Cluster")
help_text_template = (
"project/data_processing.clusters/_configure_general_help.html")
class ClusterGeneralConfig(workflows.Step):
action_class = NewClusterConfigAction
contributes = ("hidden_configure_field", )
def contribute(self, data, context):
for k, v in data.items():
context["cluster_general_" + k] = v
return context
class LaunchJob(workflows.Workflow):
slug = "launch_job"
name = _("Launch Job")
finalize_button_name = _("Launch")
success_message = _("Job launched")
failure_message = _("Could not launch job")
success_url = "horizon:project:data_processing.job_executions:index"
default_steps = (JobExecutionExistingGeneralConfig, JobConfig)
def handle(self, request, context):
saharaclient.job_execution_create(
request,
context["job_general_job"],
context["job_general_cluster"],
context["job_general_job_input"],
context["job_general_job_output"],
context["job_config"])
return True
class SelectHadoopPluginAction(t_flows.SelectPluginAction):
def __init__(self, request, *args, **kwargs):
super(SelectHadoopPluginAction, self).__init__(request,
*args,
**kwargs)
self.fields["job_id"] = forms.ChoiceField(
label=_("Plugin name"),
initial=request.GET.get("job_id") or request.POST.get("job_id"),
widget=forms.HiddenInput(attrs={"class": "hidden_create_field"}))
self.fields["job_configs"] = forms.ChoiceField(
label=_("Job configs"),
widget=forms.HiddenInput(attrs={"class": "hidden_create_field"}))
self.fields["job_args"] = forms.ChoiceField(
label=_("Job args"),
widget=forms.HiddenInput(attrs={"class": "hidden_create_field"}))
self.fields["job_params"] = forms.ChoiceField(
label=_("Job params"),
widget=forms.HiddenInput(attrs={"class": "hidden_create_field"}))
job_ex_id = request.REQUEST.get("job_execution_id")
if job_ex_id is not None:
self.fields["job_execution_id"] = forms.ChoiceField(
label=_("Job Execution ID"),
initial=request.REQUEST.get("job_execution_id"),
widget=forms.HiddenInput(
attrs={"class": "hidden_create_field"}))
job_ex_id = request.REQUEST.get("job_execution_id")
job_configs = (
saharaclient.job_execution_get(request,
job_ex_id).job_configs)
if "configs" in job_configs:
self.fields["job_configs"].initial = (
json.dumps(job_configs["configs"]))
if "params" in job_configs:
self.fields["job_params"].initial = (
json.dumps(job_configs["params"]))
if "args" in job_configs:
self.fields["job_args"].initial = (
json.dumps(job_configs["args"]))
class Meta:
name = _("Select plugin and hadoop version for cluster")
help_text_template = ("project/data_processing.clusters/"
"_create_general_help.html")
class SelectHadoopPlugin(workflows.Step):
action_class = SelectHadoopPluginAction
class ChosePluginVersion(workflows.Workflow):
slug = "lunch_job"
name = _("Launch Job")
finalize_button_name = _("Create")
success_message = _("Created")
failure_message = _("Could not create")
success_url = "horizon:project:data_processing.cluster_templates:index"
default_steps = (SelectHadoopPlugin,)
class LaunchJobNewCluster(workflows.Workflow):
slug = "launch_job"
name = _("Launch Job")
finalize_button_name = _("Launch")
success_message = _("Job launched")
failure_message = _("Could not launch job")
success_url = "horizon:project:data_processing.jobs:index"
default_steps = (ClusterGeneralConfig,
JobExecutionGeneralConfig,
JobConfig)
def handle(self, request, context):
node_groups = None
plugin, hadoop_version = (
whelpers.get_plugin_and_hadoop_version(request))
ct_id = context["cluster_general_cluster_template"] or None
user_keypair = context["cluster_general_keypair"] or None
try:
cluster = saharaclient.cluster_create(
request,
context["cluster_general_cluster_name"],
plugin, hadoop_version,
cluster_template_id=ct_id,
default_image_id=context["cluster_general_image"],
description=context["cluster_general_description"],
node_groups=node_groups,
user_keypair_id=user_keypair,
is_transient=not(context["cluster_general_persist_cluster"]),
net_id=context.get(
"cluster_general_neutron_management_network",
None))
except Exception:
exceptions.handle(request,
_("Unable to create new cluster for job."))
return False
try:
saharaclient.job_execution_create(
request,
context["job_general_job"],
cluster.id,
context["job_general_job_input"],
context["job_general_job_output"],
context["job_config"])
except Exception:
exceptions.handle(request,
_("Unable to launch job."))
return False
return True
| apache-2.0 |
stregoika/aislib | geodjango/ais_www/define_ais/models.py | 1 | 3795 | from django.db import models
class Dac(models.Model):
country = models.CharField(max_length=60)
code = models.PositiveIntegerField()
def __str__(self):
return str(self.code) + ' - ' + self.country
class Admin:
list_display = ('country','code')
ordering = ( 'code', )
# pass
class Type(models.Model):
name = models.CharField(max_length=20)
def __str__(self):
return self.name
class Admin:
pass
class Unit(models.Model):
name = models.CharField(max_length=10)
shortdescription = models.CharField(max_length=30)
description = models.TextField()
refURL = models.URLField()
def __str__(self):
name = self.name
if len(name)==0: name = 'None'
return name + ' - ' + self.shortdescription
class Admin:
pass
class Aismsg(models.Model):
name = models.CharField(max_length=20,help_text='SQL compatible name [a-zA-Z][a-zA-Z0-9_]*')
msgnum = models.PositiveIntegerField(help_text='6 for address; 8 for broadcast; or ...')
#dac = models.PositiveIntegerField(help_text='Country code')
dac = models.ForeignKey(Dac,help_text='Country code')
fi = models.PositiveIntegerField(help_text='Functional Identifier number in 1..63 for a particular DAC')
description = models.TextField(help_text='First sentence should be stand alone, short description. Then detailed field description')
# Recommendations
#transmit_rec = models.XMLField(help_text='Description of how frequent to transmit and when to drop from the queue. How long to archive.. How long to fall off the earth')
transmit_rec = models.TextField(help_text='Description of how frequent to transmit and when to drop from the queue. How long to archive.. How long to fall off the earth')
#display_rec = models.XMLField(help_text='Similiar to S100. How to display')
#receive_rec How to handle received messages. Drop, keep, update, timeout etc.
display_rec = models.TextField(help_text='Similiar to S100. How to display')
note = models.TextField(help_text='Additional notes that are not as important as the description')
def __str__(self):
return self.name + ' ('+str(self.dac)+':'+str(self.fi)+')'
#return self.name + ' ( FIX: dac:'+str(self.fi)+')'
class Admin:
pass
class Field(models.Model):
aismsg = models.ForeignKey(Aismsg)
order = models.PositiveIntegerField(help_text='Where in the message should this field sit?')
name = models.CharField(max_length=20,help_text='SQL/XML compatible name. [a-zA-Z][a-zA-Z0-9_]*')
description = models.TextField(help_text='First line should stand alone')
numberofbits = models.PositiveIntegerField(help_text='total number of bits or if an array it is the size of each element (e.g. 6 for aisstr6)')
arraylength = models.PositiveIntegerField(help_text='Number of elements (e.g. string length for an aisstr6)')
type = models.ManyToManyField(Type,help_text='Type of each element')
unavailable = models.CharField(max_length=120,help_text='Value to use if the field value is unknown or unavailable')
units = models.ForeignKey(Unit,help_text='Remember that the display system can localize units')
#note = models.TextField(help_text='Additional notes that are not as important as the description')
#decimal_places = models.PositiveIntField(null=True,help_text='How many decimal places. Leave blank if necessary')
# decimalplaces
# scale
# range min and range max
# note
def __str__(self):
s = str(self.aismsg.dac.code)
s += ':' + str(self.aismsg.fi)
s += ' ' + str(self.aismsg.name)
s += ' - '
s += self.name
return s
class Admin:
pass
| gpl-3.0 |
viveksh13/gymkhana | venv/bin/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/__init__.py | 1778 | 1295 | ######################## BEGIN LICENSE BLOCK ########################
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
__version__ = "2.3.0"
from sys import version_info
def detect(aBuf):
if ((version_info < (3, 0) and isinstance(aBuf, unicode)) or
(version_info >= (3, 0) and not isinstance(aBuf, bytes))):
raise ValueError('Expected a bytes object, not a unicode object')
from . import universaldetector
u = universaldetector.UniversalDetector()
u.reset()
u.feed(aBuf)
u.close()
return u.result
| apache-2.0 |
corvorepack/REPOULTRA | plugin.video.movie.ultra.7k/resources/lib/wiz.py | 2 | 1230 | # -*- coding: utf-8 -*-
#--------------------------------------------------------
# Unpack para PalcoTV
# Version 0.0.4 (29.11.2014)
#--------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
#--------------------------------------------------------
from __main__ import *
def unpack(sJavascript,iteration=1, totaliterations=1 ):
aSplit = sJavascript.split("rn p}('")
p1,a1,c1,k1=('','0','0','')
ss="p1,a1,c1,k1=(\'"+aSplit[1].split(".spli")[0]+')';exec(ss)
k1=k1.split('|')
aSplit = aSplit[1].split("))'")
e = '';d = ''
sUnpacked1 = str(__unpack(p1, a1, c1, k1, e, d,iteration))
if iteration>=totaliterations: return sUnpacked1
else: return unpack(sUnpacked1,iteration+1)
def __unpack(p, a, c, k, e, d, iteration,v=1):
while (c >= 1):
c = c -1
if (k[c]):
aa=str(__itoaNew(c, a))
p=re.sub('\\b' + aa +'\\b', k[c], p)
return p
def __itoa(num, radix):
result = ""
if num==0: return '0'
while num > 0: result = "0123456789abcdefghijklmnopqrstuvwxyz"[num % radix] + result;num /= radix
return result
def __itoaNew(cc, a):
aa="" if cc < a else __itoaNew(int(cc / a),a)
cc = (cc % a)
bb=chr(cc + 29) if cc> 35 else str(__itoa(cc,36))
return aa+bb | gpl-2.0 |
kamalx/edx-platform | lms/djangoapps/instructor/tests/test_access.py | 46 | 6280 | """
Test instructor.access
"""
from nose.tools import raises
from nose.plugins.attrib import attr
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from student.roles import CourseBetaTesterRole, CourseStaffRole
from django_comment_common.models import (Role,
FORUM_ROLE_MODERATOR)
from instructor.access import (allow_access,
revoke_access,
list_with_level,
update_forum_role)
@attr('shard_1')
class TestInstructorAccessList(ModuleStoreTestCase):
""" Test access listings. """
def setUp(self):
super(TestInstructorAccessList, self).setUp()
self.course = CourseFactory.create()
self.instructors = [UserFactory.create() for _ in xrange(4)]
for user in self.instructors:
allow_access(self.course, user, 'instructor')
self.beta_testers = [UserFactory.create() for _ in xrange(4)]
for user in self.beta_testers:
allow_access(self.course, user, 'beta')
def test_list_instructors(self):
instructors = list_with_level(self.course, 'instructor')
self.assertEqual(set(instructors), set(self.instructors))
def test_list_beta(self):
beta_testers = list_with_level(self.course, 'beta')
self.assertEqual(set(beta_testers), set(self.beta_testers))
@attr('shard_1')
class TestInstructorAccessAllow(ModuleStoreTestCase):
""" Test access allow. """
def setUp(self):
super(TestInstructorAccessAllow, self).setUp()
self.course = CourseFactory.create()
def test_allow(self):
user = UserFactory()
allow_access(self.course, user, 'staff')
self.assertTrue(CourseStaffRole(self.course.id).has_user(user))
def test_allow_twice(self):
user = UserFactory()
allow_access(self.course, user, 'staff')
allow_access(self.course, user, 'staff')
self.assertTrue(CourseStaffRole(self.course.id).has_user(user))
def test_allow_beta(self):
""" Test allow beta against list beta. """
user = UserFactory()
allow_access(self.course, user, 'beta')
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(user))
@raises(ValueError)
def test_allow_badlevel(self):
user = UserFactory()
allow_access(self.course, user, 'robot-not-a-level')
@raises(Exception)
def test_allow_noneuser(self):
user = None
allow_access(self.course, user, 'staff')
@attr('shard_1')
class TestInstructorAccessRevoke(ModuleStoreTestCase):
""" Test access revoke. """
def setUp(self):
super(TestInstructorAccessRevoke, self).setUp()
self.course = CourseFactory.create()
self.staff = [UserFactory.create() for _ in xrange(4)]
for user in self.staff:
allow_access(self.course, user, 'staff')
self.beta_testers = [UserFactory.create() for _ in xrange(4)]
for user in self.beta_testers:
allow_access(self.course, user, 'beta')
def test_revoke(self):
user = self.staff[0]
revoke_access(self.course, user, 'staff')
self.assertFalse(CourseStaffRole(self.course.id).has_user(user))
def test_revoke_twice(self):
user = self.staff[0]
revoke_access(self.course, user, 'staff')
self.assertFalse(CourseStaffRole(self.course.id).has_user(user))
def test_revoke_beta(self):
user = self.beta_testers[0]
revoke_access(self.course, user, 'beta')
self.assertFalse(CourseBetaTesterRole(self.course.id).has_user(user))
@raises(ValueError)
def test_revoke_badrolename(self):
user = UserFactory()
revoke_access(self.course, user, 'robot-not-a-level')
@attr('shard_1')
class TestInstructorAccessForum(ModuleStoreTestCase):
"""
Test forum access control.
"""
def setUp(self):
super(TestInstructorAccessForum, self).setUp()
self.course = CourseFactory.create()
self.mod_role = Role.objects.create(
course_id=self.course.id,
name=FORUM_ROLE_MODERATOR
)
self.moderators = [UserFactory.create() for _ in xrange(4)]
for user in self.moderators:
self.mod_role.users.add(user)
def test_allow(self):
user = UserFactory.create()
update_forum_role(self.course.id, user, FORUM_ROLE_MODERATOR, 'allow')
self.assertIn(user, self.mod_role.users.all())
def test_allow_twice(self):
user = UserFactory.create()
update_forum_role(self.course.id, user, FORUM_ROLE_MODERATOR, 'allow')
self.assertIn(user, self.mod_role.users.all())
update_forum_role(self.course.id, user, FORUM_ROLE_MODERATOR, 'allow')
self.assertIn(user, self.mod_role.users.all())
@raises(Role.DoesNotExist)
def test_allow_badrole(self):
user = UserFactory.create()
update_forum_role(self.course.id, user, 'robot-not-a-real-role', 'allow')
def test_revoke(self):
user = self.moderators[0]
update_forum_role(self.course.id, user, FORUM_ROLE_MODERATOR, 'revoke')
self.assertNotIn(user, self.mod_role.users.all())
def test_revoke_twice(self):
user = self.moderators[0]
update_forum_role(self.course.id, user, FORUM_ROLE_MODERATOR, 'revoke')
self.assertNotIn(user, self.mod_role.users.all())
update_forum_role(self.course.id, user, FORUM_ROLE_MODERATOR, 'revoke')
self.assertNotIn(user, self.mod_role.users.all())
def test_revoke_notallowed(self):
user = UserFactory()
update_forum_role(self.course.id, user, FORUM_ROLE_MODERATOR, 'revoke')
self.assertNotIn(user, self.mod_role.users.all())
@raises(Role.DoesNotExist)
def test_revoke_badrole(self):
user = self.moderators[0]
update_forum_role(self.course.id, user, 'robot-not-a-real-role', 'allow')
@raises(ValueError)
def test_bad_mode(self):
user = UserFactory()
update_forum_role(self.course.id, user, FORUM_ROLE_MODERATOR, 'robot-not-a-mode')
| agpl-3.0 |
harry-7/addons-server | src/olympia/amo/tests/test_messages.py | 10 | 3073 | # -*- coding: utf-8 -*-
import django.contrib.messages as django_messages
from django.contrib.messages.storage import default_storage
from django.http import HttpRequest
from django.template import loader
from django.utils.translation import ugettext
import pytest
from olympia.amo.messages import _make_message, info
pytestmark = pytest.mark.django_db
def test_xss():
title = "<script>alert(1)</script>"
message = "<script>alert(2)</script>"
r = _make_message(title)
assert "<script>alert(1)</script>" in r
r = _make_message(None, message)
assert "<script>alert(2)</script>" in r
r = _make_message(title, title_safe=True)
assert "<script>alert(1)</script>" in r
r = _make_message(None, message, message_safe=True)
assert "<script>alert(2)</script>" in r
# Make sure safe flags are independent
r = _make_message(title, message_safe=True)
assert "<script>alert(1)</script>" in r
r = _make_message(None, message, title_safe=True)
assert "<script>alert(2)</script>" in r
def test_no_dupes():
"""Test that duplicate messages aren't saved."""
request = HttpRequest()
setattr(request, '_messages', default_storage(request))
info(request, 'Title', 'Body')
info(request, 'Title', 'Body')
info(request, 'Another Title', 'Another Body')
storage = django_messages.get_messages(request)
assert len(storage) == 2, 'Too few or too many messages recorded.'
def test_l10n_dups():
"""Test that L10n values are preserved."""
request = HttpRequest()
setattr(request, '_messages', default_storage(request))
info(request, ugettext('Title'), ugettext('Body'))
info(request, ugettext('Title'), ugettext('Body'))
info(request, ugettext('Another Title'), ugettext('Another Body'))
storage = django_messages.get_messages(request)
assert len(storage) == 2, 'Too few or too many messages recorded.'
def test_unicode_dups():
"""Test that unicode values are preserved."""
request = HttpRequest()
setattr(request, '_messages', default_storage(request))
info(request, u'Titlé', u'Body')
info(request, u'Titlé', u'Body')
info(request, u'Another Titlé', u'Another Body')
storage = django_messages.get_messages(request)
assert len(storage) == 2, 'Too few or too many messages recorded.'
def test_html_rendered_properly():
"""Html markup is properly displayed in final template."""
request = HttpRequest()
setattr(request, '_messages', default_storage(request))
# This will call _file_message, which in turn calls _make_message, which in
# turn renders the message_content.html template, which adds html markup.
# We want to make sure this markup reaches the final rendering unescaped.
info(request, 'Title', 'Body')
messages = django_messages.get_messages(request)
template = loader.get_template('messages.html')
html = template.render({'messages': messages})
assert "<h2>" in html # The html from _make_message is not escaped.
| bsd-3-clause |
maralla/vim-fixup | tests/test_rst.py | 3 | 1445 | import json
from lints.rst import RST2PseudoXMLLint
def test_rst2pseudoxml_severe_dot_regex():
msg = [
'Some Document.rst:355: (SEVERE/4) Unexpected section title or transition.', # noqa
]
res = RST2PseudoXMLLint().parse_loclist(msg, 1)
assert json.loads(res)[0] == {
"lnum": "355",
"bufnr": 1,
"col": -1,
"enum": 1,
"text": u'[rst2pseudoxml.py]Unexpected section title or transition.',
"type": "E",
"error": "SEVERE/4",
"warning": None,
}
def test_rst2pseudoxml_severe_colon_regex():
msg = [
'Another.rst:123: (SEVERE/4) Unexpected section title or transition:',
]
res = RST2PseudoXMLLint().parse_loclist(msg, 1)
assert json.loads(res)[0] == {
"lnum": "123",
"col": -1,
"bufnr": 1,
"enum": 1,
"text": u'[rst2pseudoxml.py]Unexpected section title or transition:',
"type": "E",
"error": "SEVERE/4",
"warning": None,
}
def test_rst2pseudoxml_warning_regex():
msg = [
'Inte.rst:251: (WARNING/2) Title level inconsistent.'
]
res = RST2PseudoXMLLint().parse_loclist(msg, 1)
assert json.loads(res)[0] == {
"lnum": "251",
"bufnr": 1,
"col": -1,
"enum": 1,
"text": u'[rst2pseudoxml.py]Title level inconsistent.',
"type": "W",
"error": None,
"warning": "WARNING/2",
}
| mit |
Pavaka/Pygorithms | tests/knapsack_problem_DP_test.py | 1 | 1103 | import unittest
import sys
import os
path = os.path.abspath("../pygorithms")
sys.path.append(path)
import knapsack_problem_DP as KPDP
class TestKnapsackDPoptimality(unittest.TestCase):
def test_knapsack_optimal_solution_exmp1(self):
items = [(3, 4), (2, 3), (4, 2), (4, 3)]
capacity = 6
answer = KPDP.knapsack_problem_DP(items, capacity)
self.assertEqual(answer, 8)
def test_knapsack_optimal_solution_exmp2(self):
items = [(1, 1), (6, 2), (18, 5), (22, 6), (28, 7)]
capacity = 11
answer = KPDP.knapsack_problem_DP(items, capacity)
self.assertEqual(answer, 40)
def test_knapsack_optimal_solution_exmp3(self):
items = [(1, 1), (6, 2), (18, 5), (22, 6), (28, 7)]
capacity = 0
answer = KPDP.knapsack_problem_DP(items, capacity)
self.assertEqual(answer, 0)
def test_knapsack_optimal_solution_exmp4(self):
items = []
capacity = 15
answer = KPDP.knapsack_problem_DP(items, capacity)
self.assertEqual(answer, 0)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
jmackie4/activityio | activityio/srm/_reading.py | 1 | 7630 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from contextlib import contextmanager
from datetime import datetime, timedelta
from itertools import accumulate
from math import nan
from struct import unpack, calcsize
from activityio._types import ActivityData, special_columns
from activityio._util import drydoc, exceptions
DATETIME_1880 = datetime(year=1880, month=1, day=1)
COLUMN_SPEC = {
'alt': special_columns.Altitude,
'cad': special_columns.Cadence,
'hr': special_columns.HeartRate,
'kph': special_columns.Speed._from_kph,
'lap': special_columns.LapCounter,
'lat': special_columns.Latitude,
'lon': special_columns.Longitude,
'metres': special_columns.Distance._from_discrete,
'temp': special_columns.Temperature,
'watts': special_columns.Power,
}
class SRMHeader:
__slots__ = ('days_since_1880', 'wheel_circum', 'recording_interval',
'block_count', 'marker_count', 'comment_len', '_comment')
def __init__(self, srmfile):
fmt = '<2H2B2HxB' + '70s'
raw = srmfile.read(calcsize(fmt))
values = list(unpack(fmt, raw)) # need a list for pop()
values[2] /= values.pop(3) # recording_interval (seconds; 1 / Hz)
for name, value in zip(self.__slots__, values):
setattr(self, name, value)
@property
def comment(self):
return self._comment.decode('utf-8', 'replace').rstrip('\x00')
@property
def date(self):
# Training date (days since Jan 1, 1880)
return DATETIME_1880 + timedelta(days=self.days_since_1880)
class SRMMarker:
__slots__ = ('_comment', 'active', 'start', 'end', 'average_watts',
'average_hr', 'average_cadence', 'average_speed', 'pwc150')
def __init__(self, srmfile):
fmt = self.fmt(srmfile.version)
raw = srmfile.read(calcsize(fmt))
values = unpack(fmt, raw)
for name, value in zip(self.__slots__, values):
setattr(self, name, value)
self._fixup()
@staticmethod
def fmt(version):
comment_len = 3 if version < 6 else 255
fmt = '<%ds' % comment_len
fmt += 'B7H' if version < 9 else 'B2L5H'
return fmt
@property
def comment(self):
return self._comment.decode('utf-8', 'replace').rstrip('\x00')
def _fixup(self):
# Make sure markers are consistently one-indexed,
# then zero-index them.
self.start = max(self.start, 1) - 1
self.end = max(self.end, 1) - 1
# Some srmwin versions wrote markers with start > end.
self.start, self.end = sorted([self.start, self.end])
class SRMSummaryMarker(SRMMarker):
"""SRM Files always contain at least one marker that encompasses
the entire file."""
class SRMBlock:
__slots__ = ('sec_since_midnight', 'chunk_count', 'end')
def __init__(self, srmfile):
fmt = self.fmt(srmfile.version)
raw = srmfile.read(calcsize(fmt))
hsec_since_midnight, self.chunk_count = unpack(fmt, raw)
# hsec --> sec.
self.sec_since_midnight = timedelta(seconds=hsec_since_midnight / 100)
self.end = None # set later
@staticmethod
def fmt(version):
return '<L' + ('H' if version < 9 else 'L')
class SRMCalibrationData:
__slots__ = ('zero', 'slope', '_data_count')
def __init__(self, srmfile):
self.zero, self.slope = unpack('<2H', srmfile.read(4))
# We'll also consume the data count here, as it's safer
# to use the sum of block chunk counts.
fmt = '<%sx' % ('H' if srmfile.version < 9 else 'L')
self._data_count, = unpack(fmt, srmfile.read(calcsize(fmt)))
class SRMPreamble:
__slots__ = ('header', 'summary_marker', 'markers', 'blocks',
'calibration', 'data_count')
def __init__(self, srmfile):
self.header = SRMHeader(srmfile)
self.summary_marker = SRMSummaryMarker(srmfile)
self.markers = [SRMMarker(srmfile)
for _ in range(self.header.marker_count)]
blocks = [SRMBlock(srmfile)
for _ in range(self.header.block_count)]
block_ends = accumulate(block.chunk_count for block in blocks)
for block, end in zip(blocks, block_ends):
setattr(block, 'end', end)
self.blocks = blocks
self.calibration = SRMCalibrationData(srmfile)
self.data_count = sum(block.chunk_count for block in blocks)
class SRMChunk:
__slots__ = ('watts', 'cad', 'hr', 'kph', 'alt', 'temp',
'metres', 'lat', 'lon') # variable
def __init__(self, srmfile, recording_interval):
self.metres = nan
self.lat, self.lon = nan, nan
if srmfile.version < 7:
self.watts, self.kph = self.compact_power_speed(srmfile)
self.cad, self.hr = unpack('<BB', srmfile.read(2))
self.alt, self.temp = nan, nan
else:
values = unpack('<HBBllh', srmfile.read(14))
for name, value in zip(self.__slots__, values):
setattr(self, name, value)
if srmfile.version == 9:
latlon = unpack('<ll', srmfile.read(8))
self.lat, self.lon = (l * 180 / 0x7fffffff for l in latlon)
self.temp *= 0.1
self.kph = 0 if (self.kph < 0) else self.kph * 3.6 / 1000
self.metres = recording_interval * self.kph / 3.6
@staticmethod
def compact_power_speed(srmfile):
pwr_spd = unpack('<3B', srmfile.read(3))
# Ew.
watts = (pwr_spd[1] & 0x0f) | (pwr_spd[2] << 0x4)
kph = ((pwr_spd[1] & 0xf0) << 3 | (pwr_spd[0] & 0x7f)) * 3 / 26
return watts, kph
def __iter__(self):
for name in self.__slots__:
yield name, getattr(self, name)
@contextmanager
def open_srm(file_path):
reader = open(file_path, 'rb')
magic = reader.read(4).decode('utf-8')
if magic[:3] != 'SRM':
raise exceptions.InvalidFileError('srm')
reader.version = int(magic[-1])
yield reader
reader.close()
@drydoc.gen_records
def gen_records(file_path):
with open_srm(file_path) as srmfile:
preamble = SRMPreamble(srmfile)
header = preamble.header
markers, blocks = preamble.markers, preamble.blocks
markers.reverse() # for
blocks.reverse() # popping
try: # there may only be a summary marker
current_marker = markers.pop()
except IndexError:
pass
current_block = blocks.pop()
timestamp = header.date + current_block.sec_since_midnight
rec_int = header.recording_interval
rec_int_td = timedelta(seconds=rec_int)
lap = 1
for i in range(preamble.data_count):
chunk = dict(SRMChunk(srmfile, rec_int))
chunk['metres']
if i == current_block.end:
current_block = blocks.pop()
timestamp = header.date + current_block.sec_since_midnight
else:
timestamp += rec_int_td
if markers and i == current_marker.end: # short-circuiting
lap += 1
current_marker = markers.pop()
chunk.update(timestamp=timestamp, lap=lap)
yield chunk
def read_and_format(file_path):
data = ActivityData.from_records(gen_records(file_path))
timestamps = data.pop('timestamp')
timeoffsets = timestamps - timestamps[0]
data._finish_up(column_spec=COLUMN_SPEC,
start=timestamps[0],
timeoffsets=timeoffsets)
return data
| mit |
OpenWinCon/OpenWinNet | web-gui/myvenv/lib/python3.4/site-packages/django/core/files/locks.py | 725 | 3516 | """
Portable file locking utilities.
Based partially on an example by Jonathan Feignberg in the Python
Cookbook [1] (licensed under the Python Software License) and a ctypes port by
Anatoly Techtonik for Roundup [2] (license [3]).
[1] http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65203
[2] http://sourceforge.net/p/roundup/code/ci/default/tree/roundup/backends/portalocker.py
[3] http://sourceforge.net/p/roundup/code/ci/default/tree/COPYING.txt
Example Usage::
>>> from django.core.files import locks
>>> with open('./file', 'wb') as f:
... locks.lock(f, locks.LOCK_EX)
... f.write('Django')
"""
import os
__all__ = ('LOCK_EX', 'LOCK_SH', 'LOCK_NB', 'lock', 'unlock')
def _fd(f):
"""Get a filedescriptor from something which could be a file or an fd."""
return f.fileno() if hasattr(f, 'fileno') else f
if os.name == 'nt':
import msvcrt
from ctypes import (sizeof, c_ulong, c_void_p, c_int64,
Structure, Union, POINTER, windll, byref)
from ctypes.wintypes import BOOL, DWORD, HANDLE
LOCK_SH = 0 # the default
LOCK_NB = 0x1 # LOCKFILE_FAIL_IMMEDIATELY
LOCK_EX = 0x2 # LOCKFILE_EXCLUSIVE_LOCK
# --- Adapted from the pyserial project ---
# detect size of ULONG_PTR
if sizeof(c_ulong) != sizeof(c_void_p):
ULONG_PTR = c_int64
else:
ULONG_PTR = c_ulong
PVOID = c_void_p
# --- Union inside Structure by stackoverflow:3480240 ---
class _OFFSET(Structure):
_fields_ = [
('Offset', DWORD),
('OffsetHigh', DWORD)]
class _OFFSET_UNION(Union):
_anonymous_ = ['_offset']
_fields_ = [
('_offset', _OFFSET),
('Pointer', PVOID)]
class OVERLAPPED(Structure):
_anonymous_ = ['_offset_union']
_fields_ = [
('Internal', ULONG_PTR),
('InternalHigh', ULONG_PTR),
('_offset_union', _OFFSET_UNION),
('hEvent', HANDLE)]
LPOVERLAPPED = POINTER(OVERLAPPED)
# --- Define function prototypes for extra safety ---
LockFileEx = windll.kernel32.LockFileEx
LockFileEx.restype = BOOL
LockFileEx.argtypes = [HANDLE, DWORD, DWORD, DWORD, DWORD, LPOVERLAPPED]
UnlockFileEx = windll.kernel32.UnlockFileEx
UnlockFileEx.restype = BOOL
UnlockFileEx.argtypes = [HANDLE, DWORD, DWORD, DWORD, LPOVERLAPPED]
def lock(f, flags):
hfile = msvcrt.get_osfhandle(_fd(f))
overlapped = OVERLAPPED()
ret = LockFileEx(hfile, flags, 0, 0, 0xFFFF0000, byref(overlapped))
return bool(ret)
def unlock(f):
hfile = msvcrt.get_osfhandle(_fd(f))
overlapped = OVERLAPPED()
ret = UnlockFileEx(hfile, 0, 0, 0xFFFF0000, byref(overlapped))
return bool(ret)
else:
try:
import fcntl
LOCK_SH = fcntl.LOCK_SH # shared lock
LOCK_NB = fcntl.LOCK_NB # non-blocking
LOCK_EX = fcntl.LOCK_EX
except (ImportError, AttributeError):
# File locking is not supported.
LOCK_EX = LOCK_SH = LOCK_NB = 0
# Dummy functions that don't do anything.
def lock(f, flags):
# File is not locked
return False
def unlock(f):
# File is unlocked
return True
else:
def lock(f, flags):
ret = fcntl.flock(_fd(f), flags)
return (ret == 0)
def unlock(f):
ret = fcntl.flock(_fd(f), fcntl.LOCK_UN)
return (ret == 0)
| apache-2.0 |
chronicwaffle/PokemonGo-DesktopMap | app/pywin/Lib/compileall.py | 144 | 7763 | """Module/script to byte-compile all .py files to .pyc (or .pyo) files.
When called as a script with arguments, this compiles the directories
given as arguments recursively; the -l option prevents it from
recursing into directories.
Without arguments, if compiles all modules on sys.path, without
recursing into subdirectories. (Even though it should do so for
packages -- for now, you'll have to deal with packages separately.)
See module py_compile for details of the actual byte-compilation.
"""
import os
import sys
import py_compile
import struct
import imp
__all__ = ["compile_dir","compile_file","compile_path"]
def compile_dir(dir, maxlevels=10, ddir=None,
force=0, rx=None, quiet=0):
"""Byte-compile all modules in the given directory tree.
Arguments (only dir is required):
dir: the directory to byte-compile
maxlevels: maximum recursion level (default 10)
ddir: the directory that will be prepended to the path to the
file as it is compiled into each byte-code file.
force: if 1, force compilation, even if timestamps are up-to-date
quiet: if 1, be quiet during compilation
"""
if not quiet:
print 'Listing', dir, '...'
try:
names = os.listdir(dir)
except os.error:
print "Can't list", dir
names = []
names.sort()
success = 1
for name in names:
fullname = os.path.join(dir, name)
if ddir is not None:
dfile = os.path.join(ddir, name)
else:
dfile = None
if not os.path.isdir(fullname):
if not compile_file(fullname, ddir, force, rx, quiet):
success = 0
elif maxlevels > 0 and \
name != os.curdir and name != os.pardir and \
os.path.isdir(fullname) and \
not os.path.islink(fullname):
if not compile_dir(fullname, maxlevels - 1, dfile, force, rx,
quiet):
success = 0
return success
def compile_file(fullname, ddir=None, force=0, rx=None, quiet=0):
"""Byte-compile one file.
Arguments (only fullname is required):
fullname: the file to byte-compile
ddir: if given, the directory name compiled in to the
byte-code file.
force: if 1, force compilation, even if timestamps are up-to-date
quiet: if 1, be quiet during compilation
"""
success = 1
name = os.path.basename(fullname)
if ddir is not None:
dfile = os.path.join(ddir, name)
else:
dfile = None
if rx is not None:
mo = rx.search(fullname)
if mo:
return success
if os.path.isfile(fullname):
head, tail = name[:-3], name[-3:]
if tail == '.py':
if not force:
try:
mtime = int(os.stat(fullname).st_mtime)
expect = struct.pack('<4sl', imp.get_magic(), mtime)
cfile = fullname + (__debug__ and 'c' or 'o')
with open(cfile, 'rb') as chandle:
actual = chandle.read(8)
if expect == actual:
return success
except IOError:
pass
if not quiet:
print 'Compiling', fullname, '...'
try:
ok = py_compile.compile(fullname, None, dfile, True)
except py_compile.PyCompileError,err:
if quiet:
print 'Compiling', fullname, '...'
print err.msg
success = 0
except IOError, e:
print "Sorry", e
success = 0
else:
if ok == 0:
success = 0
return success
def compile_path(skip_curdir=1, maxlevels=0, force=0, quiet=0):
"""Byte-compile all module on sys.path.
Arguments (all optional):
skip_curdir: if true, skip current directory (default true)
maxlevels: max recursion level (default 0)
force: as for compile_dir() (default 0)
quiet: as for compile_dir() (default 0)
"""
success = 1
for dir in sys.path:
if (not dir or dir == os.curdir) and skip_curdir:
print 'Skipping current directory'
else:
success = success and compile_dir(dir, maxlevels, None,
force, quiet=quiet)
return success
def expand_args(args, flist):
"""read names in flist and append to args"""
expanded = args[:]
if flist:
try:
if flist == '-':
fd = sys.stdin
else:
fd = open(flist)
while 1:
line = fd.readline()
if not line:
break
expanded.append(line[:-1])
except IOError:
print "Error reading file list %s" % flist
raise
return expanded
def main():
"""Script main program."""
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'lfqd:x:i:')
except getopt.error, msg:
print msg
print "usage: python compileall.py [-l] [-f] [-q] [-d destdir] " \
"[-x regexp] [-i list] [directory|file ...]"
print
print "arguments: zero or more file and directory names to compile; " \
"if no arguments given, "
print " defaults to the equivalent of -l sys.path"
print
print "options:"
print "-l: don't recurse into subdirectories"
print "-f: force rebuild even if timestamps are up-to-date"
print "-q: output only error messages"
print "-d destdir: directory to prepend to file paths for use in " \
"compile-time tracebacks and in"
print " runtime tracebacks in cases where the source " \
"file is unavailable"
print "-x regexp: skip files matching the regular expression regexp; " \
"the regexp is searched for"
print " in the full path of each file considered for " \
"compilation"
print "-i file: add all the files and directories listed in file to " \
"the list considered for"
print ' compilation; if "-", names are read from stdin'
sys.exit(2)
maxlevels = 10
ddir = None
force = 0
quiet = 0
rx = None
flist = None
for o, a in opts:
if o == '-l': maxlevels = 0
if o == '-d': ddir = a
if o == '-f': force = 1
if o == '-q': quiet = 1
if o == '-x':
import re
rx = re.compile(a)
if o == '-i': flist = a
if ddir:
if len(args) != 1 and not os.path.isdir(args[0]):
print "-d destdir require exactly one directory argument"
sys.exit(2)
success = 1
try:
if args or flist:
try:
if flist:
args = expand_args(args, flist)
except IOError:
success = 0
if success:
for arg in args:
if os.path.isdir(arg):
if not compile_dir(arg, maxlevels, ddir,
force, rx, quiet):
success = 0
else:
if not compile_file(arg, ddir, force, rx, quiet):
success = 0
else:
success = compile_path()
except KeyboardInterrupt:
print "\n[interrupted]"
success = 0
return success
if __name__ == '__main__':
exit_status = int(not main())
sys.exit(exit_status)
| mit |
ltilve/ChromiumGStreamerBackend | tools/memory_inspector/memory_inspector/backends/android_backend.py | 48 | 18803 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Android-specific implementation of the core backend interfaces.
See core/backends.py for more docs.
"""
import datetime
import glob
import hashlib
import json
import os
import posixpath
import tempfile
import time
from memory_inspector import constants
from memory_inspector.backends import adb_client
from memory_inspector.backends import memdump_parser
from memory_inspector.backends import native_heap_dump_parser
from memory_inspector.backends import prebuilts_fetcher
from memory_inspector.core import backends
from memory_inspector.core import exceptions
from memory_inspector.core import native_heap
from memory_inspector.core import symbol
_SUPPORTED_32BIT_ABIS = {'armeabi': 'arm', 'armeabi-v7a': 'arm', 'x86': 'x86'}
_SUPPORTED_64BIT_ABIS = {'arm64-v8a': 'arm64', 'x86_64': 'x86_64'}
_MEMDUMP_PREBUILT_PATH = os.path.join(constants.PREBUILTS_PATH,
'memdump-android-%(arch)s')
_MEMDUMP_PATH_ON_DEVICE = '/data/local/tmp/memdump'
_PSEXT_PREBUILT_PATH = os.path.join(constants.PREBUILTS_PATH,
'ps_ext-android-%(arch)s')
_PSEXT_PATH_ON_DEVICE = '/data/local/tmp/ps_ext'
_HEAP_DUMP_PREBUILT_PATH = os.path.join(constants.PREBUILTS_PATH,
'heap_dump-android-%(arch)s')
_HEAP_DUMP_PATH_ON_DEVICE = '/data/local/tmp/heap_dump'
_LIBHEAPPROF_PREBUILT_PATH = os.path.join(constants.PREBUILTS_PATH,
'libheap_profiler-android-%(arch)s')
_LIBHEAPPROF_FILE_NAME = 'libheap_profiler.so'
class AndroidBackend(backends.Backend):
"""Android-specific implementation of the core |Backend| interface."""
_SETTINGS_KEYS = {
'toolchain_path': 'Path of toolchain (for addr2line)'}
def __init__(self):
super(AndroidBackend, self).__init__(
settings=backends.Settings(AndroidBackend._SETTINGS_KEYS))
self._devices = {} # 'device id' -> |Device|.
def EnumerateDevices(self):
for adb_device in adb_client.ListDevices():
device = self._devices.get(adb_device.serial)
if not device:
device = AndroidDevice(self, adb_device)
self._devices[adb_device.serial] = device
yield device
def ExtractSymbols(self, native_heaps, sym_paths):
"""Performs symbolization. Returns a |symbol.Symbols| from |NativeHeap|s.
This method performs the symbolization but does NOT decorate (i.e. add
symbol/source info) to the stack frames of |native_heaps|. The heaps
can be decorated as needed using the native_heap.SymbolizeUsingSymbolDB()
method. Rationale: the most common use case in this application is:
symbolize-and-store-symbols and load-symbols-and-decorate-heaps (in two
different stages at two different times).
Args:
native_heaps: a collection of native_heap.NativeHeap instances.
sym_paths: either a list of or a string of semicolon-sep. symbol paths.
"""
assert(all(isinstance(x, native_heap.NativeHeap) for x in native_heaps))
symbols = symbol.Symbols()
# Find addr2line in toolchain_path.
if isinstance(sym_paths, basestring):
sym_paths = sym_paths.split(';')
matches = glob.glob(os.path.join(self.settings['toolchain_path'],
'*addr2line'))
if not matches:
raise exceptions.MemoryInspectorException('Cannot find addr2line')
addr2line_path = matches[0]
# First group all the stack frames together by lib path.
frames_by_lib = {}
for nheap in native_heaps:
for stack_frame in nheap.stack_frames.itervalues():
frames = frames_by_lib.setdefault(stack_frame.exec_file_rel_path, set())
frames.add(stack_frame)
# The symbolization process is asynchronous (but yet single-threaded). This
# callback is invoked every time the symbol info for a stack frame is ready.
def SymbolizeAsyncCallback(sym_info, stack_frame):
if not sym_info.name:
return
sym = symbol.Symbol(name=sym_info.name,
source_file_path=sym_info.source_path,
line_number=sym_info.source_line)
symbols.Add(stack_frame.exec_file_rel_path, stack_frame.offset, sym)
# TODO(primiano): support inline sym info (i.e. |sym_info.inlined_by|).
# Perform the actual symbolization (ordered by lib).
for exec_file_rel_path, frames in frames_by_lib.iteritems():
# Look up the full path of the symbol in the sym paths.
exec_file_name = posixpath.basename(exec_file_rel_path)
if exec_file_rel_path.startswith('/'):
exec_file_rel_path = exec_file_rel_path[1:]
if not exec_file_rel_path:
continue
exec_file_abs_path = ''
for sym_path in sym_paths:
# First try to locate the symbol file following the full relative path
# e.g. /host/syms/ + /system/lib/foo.so => /host/syms/system/lib/foo.so.
exec_file_abs_path = os.path.join(sym_path, exec_file_rel_path)
if os.path.exists(exec_file_abs_path):
break
# If no luck, try looking just for the file name in the sym path,
# e.g. /host/syms/ + (/system/lib/)foo.so => /host/syms/foo.so.
exec_file_abs_path = os.path.join(sym_path, exec_file_name)
if os.path.exists(exec_file_abs_path):
break
# In the case of a Chrome component=shared_library build, the libs are
# renamed to .cr.so. Look for foo.so => foo.cr.so.
exec_file_abs_path = os.path.join(
sym_path, exec_file_name.replace('.so', '.cr.so'))
if os.path.exists(exec_file_abs_path):
break
if not os.path.isfile(exec_file_abs_path):
continue
# The memory_inspector/__init__ module will add the /src/build/android
# deps to the PYTHONPATH for pylib.
from pylib.symbols import elf_symbolizer
symbolizer = elf_symbolizer.ELFSymbolizer(
elf_file_path=exec_file_abs_path,
addr2line_path=addr2line_path,
callback=SymbolizeAsyncCallback,
inlines=False)
# Kick off the symbolizer and then wait that all callbacks are issued.
for stack_frame in sorted(frames, key=lambda x: x.offset):
symbolizer.SymbolizeAsync(stack_frame.offset, stack_frame)
symbolizer.Join()
return symbols
@property
def name(self):
return 'Android'
class AndroidDevice(backends.Device):
"""Android-specific implementation of the core |Device| interface."""
_SETTINGS_KEYS = {
'native_symbol_paths': 'Semicolon-sep. list of native libs search path'}
def __init__(self, backend, adb):
super(AndroidDevice, self).__init__(
backend=backend,
settings=backends.Settings(AndroidDevice._SETTINGS_KEYS))
self.adb = adb
self._name = '%s %s' % (adb.GetProp('ro.product.model', cached=True),
adb.GetProp('ro.build.id', cached=True))
self._id = adb.serial
self._sys_stats = None
self._last_device_stats = None
self._sys_stats_last_update = None
self._processes = {} # pid (int) -> |Process|
self._initialized = False
# Determine the available ABIs, |_arch| will contain the primary ABI.
# TODO(primiano): For the moment we support only one ABI per device (i.e. we
# assume that all processes are 64 bit on 64 bit device, failing to profile
# 32 bit ones). Dealing properly with multi-ABIs requires work on ps_ext and
# at the moment is not an interesting use case.
self._arch = None
self._arch32 = None
self._arch64 = None
abi = adb.GetProp('ro.product.cpu.abi', cached=True)
if abi in _SUPPORTED_64BIT_ABIS:
self._arch = self._arch64 = _SUPPORTED_64BIT_ABIS[abi]
elif abi in _SUPPORTED_32BIT_ABIS:
self._arch = self._arch32 = _SUPPORTED_32BIT_ABIS[abi]
else:
raise exceptions.MemoryInspectorException('ABI %s not supported' % abi)
def Initialize(self):
"""Starts adb root and deploys the prebuilt binaries on initialization."""
try:
self.adb.RestartShellAsRoot()
self.adb.WaitForDevice()
except adb_client.ADBClientError:
raise exceptions.MemoryInspectorException(
'The device must be adb root-able in order to use memory_inspector')
# Download (from GCS) and deploy prebuilt helper binaries on the device.
self._DeployPrebuiltOnDeviceIfNeeded(
_MEMDUMP_PREBUILT_PATH % {'arch': self._arch}, _MEMDUMP_PATH_ON_DEVICE)
self._DeployPrebuiltOnDeviceIfNeeded(
_PSEXT_PREBUILT_PATH % {'arch': self._arch}, _PSEXT_PATH_ON_DEVICE)
self._DeployPrebuiltOnDeviceIfNeeded(
_HEAP_DUMP_PREBUILT_PATH % {'arch': self._arch},
_HEAP_DUMP_PATH_ON_DEVICE)
self._initialized = True
def IsNativeTracingEnabled(self):
"""Checks whether the libheap_profiler is preloaded in the zygote."""
zygote_name = 'zygote64' if self._arch64 else 'zygote'
zygote_process = [p for p in self.ListProcesses() if p.name == zygote_name]
if not zygote_process:
raise exceptions.MemoryInspectorException('Zygote process not found')
zygote_pid = zygote_process[0].pid
zygote_maps = self.adb.Shell(['cat', '/proc/%d/maps' % zygote_pid])
return 'libheap_profiler' in zygote_maps
def EnableNativeTracing(self, enabled):
"""Installs libheap_profiler in and injects it in the Zygote."""
def WrapZygote(app_process):
self.adb.Shell(['mv', app_process, app_process + '.real'])
with tempfile.NamedTemporaryFile() as wrapper_file:
wrapper_file.write('#!/system/bin/sh\n'
'LD_PRELOAD="libheap_profiler.so:$LD_PRELOAD" '
'exec %s.real "$@"\n' % app_process)
wrapper_file.flush()
self.adb.Push(wrapper_file.name, app_process)
self.adb.Shell(['chown', 'root.shell', app_process])
self.adb.Shell(['chmod', '755', app_process])
def UnwrapZygote():
for suffix in ('', '32', '64'):
# We don't really care if app_processX.real doesn't exists and mv fails.
# If app_processX.real doesn't exists, either app_processX is already
# unwrapped or it doesn't exists for the current arch.
app_process = '/system/bin/app_process' + suffix
self.adb.Shell(['mv', app_process + '.real', app_process])
assert(self._initialized)
self.adb.RemountSystemPartition()
# Start restoring the original state in any case.
UnwrapZygote()
if enabled:
# Temporarily disable SELinux (until next reboot).
self.adb.Shell(['setenforce', '0'])
# Wrap the Zygote startup binary (app_process) with a script which
# LD_PRELOADs libheap_profiler and invokes the original Zygote process.
if self._arch64:
app_process = '/system/bin/app_process64'
assert(self.adb.FileExists(app_process))
self._DeployPrebuiltOnDeviceIfNeeded(
_LIBHEAPPROF_PREBUILT_PATH % {'arch': self._arch64},
'/system/lib64/' + _LIBHEAPPROF_FILE_NAME)
WrapZygote(app_process)
if self._arch32:
# Path is app_process32 for Android >= L, app_process when < L.
app_process = '/system/bin/app_process32'
if not self.adb.FileExists(app_process):
app_process = '/system/bin/app_process'
assert(self.adb.FileExists(app_process))
self._DeployPrebuiltOnDeviceIfNeeded(
_LIBHEAPPROF_PREBUILT_PATH % {'arch': self._arch32},
'/system/lib/' + _LIBHEAPPROF_FILE_NAME)
WrapZygote(app_process)
# Respawn the zygote (the device will kind of reboot at this point).
self.adb.Shell('stop')
self.adb.Shell('start')
# Wait for the package manger to come back.
for _ in xrange(10):
found_pm = 'package:' in self.adb.Shell(['pm', 'path', 'android'])
if found_pm:
break
time.sleep(3)
if not found_pm:
raise exceptions.MemoryInspectorException('Device unresponsive (no pm)')
# Remove the wrapper. This won't have effect until the next reboot, when
# the profiler will be automatically disarmed.
UnwrapZygote()
# We can also unlink the lib files at this point. Once the Zygote has
# started it will keep the inodes refcounted anyways through its lifetime.
self.adb.Shell(['rm', '/system/lib*/' + _LIBHEAPPROF_FILE_NAME])
def ListProcesses(self):
"""Returns a sequence of |AndroidProcess|."""
self._RefreshProcessesList()
return self._processes.itervalues()
def GetProcess(self, pid):
"""Returns an instance of |AndroidProcess| (None if not found)."""
assert(isinstance(pid, int))
self._RefreshProcessesList()
return self._processes.get(pid)
def GetStats(self):
"""Returns an instance of |DeviceStats| with the OS CPU/Memory stats."""
cur = self.UpdateAndGetSystemStats()
old = self._last_device_stats or cur # Handle 1st call case.
uptime = cur['time']['ticks'] / cur['time']['rate']
ticks = max(1, cur['time']['ticks'] - old['time']['ticks'])
cpu_times = []
for i in xrange(len(cur['cpu'])):
cpu_time = {
'usr': 100 * (cur['cpu'][i]['usr'] - old['cpu'][i]['usr']) / ticks,
'sys': 100 * (cur['cpu'][i]['sys'] - old['cpu'][i]['sys']) / ticks,
'idle': 100 * (cur['cpu'][i]['idle'] - old['cpu'][i]['idle']) / ticks}
# The idle tick count on many Linux kernels is frozen when the CPU is
# offline, and bumps up (compensating all the offline period) when it
# reactivates. For this reason it needs to be saturated at [0, 100].
cpu_time['idle'] = max(0, min(cpu_time['idle'],
100 - cpu_time['usr'] - cpu_time['sys']))
cpu_times.append(cpu_time)
memory_stats = {'Free': cur['mem']['MemFree:'],
'Cache': cur['mem']['Buffers:'] + cur['mem']['Cached:'],
'Swap': cur['mem']['SwapCached:'],
'Anonymous': cur['mem']['AnonPages:'],
'Kernel': cur['mem']['VmallocUsed:']}
self._last_device_stats = cur
return backends.DeviceStats(uptime=uptime,
cpu_times=cpu_times,
memory_stats=memory_stats)
def UpdateAndGetSystemStats(self):
"""Grabs and caches system stats through ps_ext (max cache TTL = 0.5s).
Rationale of caching: avoid invoking adb too often, it is slow.
"""
assert(self._initialized)
max_ttl = datetime.timedelta(seconds=0.5)
if (self._sys_stats_last_update and
datetime.datetime.now() - self._sys_stats_last_update <= max_ttl):
return self._sys_stats
dump_out = self.adb.Shell(_PSEXT_PATH_ON_DEVICE)
stats = json.loads(dump_out)
assert(all([x in stats for x in ['cpu', 'processes', 'time', 'mem']])), (
'ps_ext returned a malformed JSON dictionary.')
self._sys_stats = stats
self._sys_stats_last_update = datetime.datetime.now()
return self._sys_stats
def _RefreshProcessesList(self):
sys_stats = self.UpdateAndGetSystemStats()
processes_to_delete = set(self._processes.keys())
for pid, proc in sys_stats['processes'].iteritems():
pid = int(pid)
process = self._processes.get(pid)
if not process or process.name != proc['name']:
process = AndroidProcess(self, int(pid), proc['name'])
self._processes[pid] = process
processes_to_delete.discard(pid)
for pid in processes_to_delete:
del self._processes[pid]
def _DeployPrebuiltOnDeviceIfNeeded(self, local_path, path_on_device):
# TODO(primiano): check that the md5 binary is built-in also on pre-KK.
# Alternatively add tools/android/md5sum to prebuilts and use that one.
prebuilts_fetcher.GetIfChanged(local_path)
with open(local_path, 'rb') as f:
local_hash = hashlib.md5(f.read()).hexdigest()
device_md5_out = self.adb.Shell(['md5', path_on_device])
if local_hash in device_md5_out:
return
self.adb.Push(local_path, path_on_device)
self.adb.Shell(['chmod', '755', path_on_device])
@property
def name(self):
"""Device name, as defined in the |backends.Device| interface."""
return self._name
@property
def id(self):
"""Device id, as defined in the |backends.Device| interface."""
return self._id
class AndroidProcess(backends.Process):
"""Android-specific implementation of the core |Process| interface."""
def __init__(self, device, pid, name):
super(AndroidProcess, self).__init__(device, pid, name)
self._last_sys_stats = None
def DumpMemoryMaps(self):
"""Grabs and parses memory maps through memdump."""
dump_out = self.device.adb.Shell([_MEMDUMP_PATH_ON_DEVICE, str(self.pid)])
return memdump_parser.Parse(dump_out)
def DumpNativeHeap(self):
"""Grabs and parses native heap traces using heap_dump."""
cmd = [_HEAP_DUMP_PATH_ON_DEVICE, '-n', '-x', str(self.pid)]
dump_out = self.device.adb.Shell(cmd)
return native_heap_dump_parser.Parse(dump_out)
def Freeze(self):
self.device.adb.Shell(['kill', '-STOP', str(self.pid)])
def Unfreeze(self):
self.device.adb.Shell(['kill', '-CONT', str(self.pid)])
def GetStats(self):
"""Calculate process CPU/VM stats (CPU stats are relative to last call)."""
# Process must retain its own copy of _last_sys_stats because CPU times
# are calculated relatively to the last GetStats() call (for the process).
cur_sys_stats = self.device.UpdateAndGetSystemStats()
old_sys_stats = self._last_sys_stats or cur_sys_stats
cur_proc_stats = cur_sys_stats['processes'].get(str(self.pid))
old_proc_stats = old_sys_stats['processes'].get(str(self.pid))
# The process might have gone in the meanwhile.
if (not cur_proc_stats or not old_proc_stats):
return None
run_time = (((cur_sys_stats['time']['ticks'] -
cur_proc_stats['start_time']) / cur_sys_stats['time']['rate']))
ticks = max(1, cur_sys_stats['time']['ticks'] -
old_sys_stats['time']['ticks'])
cpu_usage = (100 *
((cur_proc_stats['user_time'] + cur_proc_stats['sys_time']) -
(old_proc_stats['user_time'] + old_proc_stats['sys_time'])) /
ticks) / len(cur_sys_stats['cpu'])
proc_stats = backends.ProcessStats(
threads=cur_proc_stats['n_threads'],
run_time=run_time,
cpu_usage=cpu_usage,
vm_rss=cur_proc_stats['vm_rss'],
page_faults=(
(cur_proc_stats['maj_faults'] + cur_proc_stats['min_faults']) -
(old_proc_stats['maj_faults'] + old_proc_stats['min_faults'])))
self._last_sys_stats = cur_sys_stats
return proc_stats
| bsd-3-clause |
Solinea/horizon | openstack_dashboard/dashboards/admin/metadata_defs/tests.py | 29 | 11269 | # (c) Copyright 2014 Hewlett-Packard Development Company, L.P.
# Copyright 2014 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from django.core.urlresolvers import reverse
from django import http
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.metadata_defs \
import constants
from openstack_dashboard.test import helpers as test
class MetadataDefinitionsView(test.BaseAdminViewTests):
def test_namespace_object(self):
mock = self.mox.CreateMockAnything()
mock.name = 'hello'
mock.description = 'world'
mock.visibility = 'public'
mock.resource_type_associations = [{'name': 'sample'}]
namespace = api.glance.Namespace(mock)
self.assertEqual('world', namespace.description)
self.assertTrue(namespace.public)
self.assertEqual('sample', namespace.resource_type_names[0])
@test.create_stubs({api.glance: ('metadefs_namespace_list',)})
def test_metadata_defs_list(self):
namespace_list = self.metadata_defs.list()
api.glance.metadefs_namespace_list(
IsA(http.HttpRequest),
sort_dir='asc',
marker=None,
paginate=True).AndReturn((namespace_list, False, False))
self.mox.ReplayAll()
res = self.client.get(reverse(constants.METADATA_INDEX_URL))
self.assertTemplateUsed(res, constants.METADATA_INDEX_TEMPLATE)
self.assertEqual(len(res.context['namespaces_table'].data),
len(namespace_list))
@test.create_stubs({api.glance: ('metadefs_namespace_list',)})
def test_metadata_defs_no_results(self):
api.glance.metadefs_namespace_list(
IsA(http.HttpRequest),
sort_dir='asc',
marker=None,
paginate=True).AndReturn(((), False, False))
self.mox.ReplayAll()
res = self.client.get(reverse(constants.METADATA_INDEX_URL))
self.assertTemplateUsed(res, constants.METADATA_INDEX_TEMPLATE)
self.assertEqual(len(res.context['namespaces_table'].data), 0)
@test.create_stubs({api.glance: ('metadefs_namespace_list',)})
def test_metadata_defs_error(self):
api.glance.metadefs_namespace_list(
IsA(http.HttpRequest),
sort_dir='asc',
marker=None,
paginate=True).AndRaise(self.exceptions.glance)
self.mox.ReplayAll()
res = self.client.get(reverse(constants.METADATA_INDEX_URL))
self.assertTemplateUsed(res, constants.METADATA_INDEX_TEMPLATE)
@test.create_stubs({api.glance: ('metadefs_namespace_list',)})
def test_delete_availability(self):
namespace_list = self.metadata_defs.list()
api.glance.metadefs_namespace_list(
IsA(http.HttpRequest),
sort_dir='asc',
marker=None,
paginate=True).AndReturn((namespace_list, False, False))
self.mox.ReplayAll()
res = self.client.get(reverse(constants.METADATA_INDEX_URL))
self.assertIn('namespaces_table', res.context)
ns_table = res.context['namespaces_table']
namespaces = ns_table.data
for i in [1, 2]:
row_actions = ns_table.get_row_actions(namespaces[i])
self.assertTrue(len(row_actions), 2)
self.assertTrue('delete' in
[a.name for a in row_actions])
self.assertTrue('manage_resource_types' in
[a.name for a in row_actions])
@test.create_stubs({api.glance: ('metadefs_namespace_get',)})
def test_metadata_defs_get(self):
namespace = self.metadata_defs.first()
api.glance.metadefs_namespace_get(
IsA(http.HttpRequest),
'1',
wrap=True
).MultipleTimes().AndReturn(namespace)
self.mox.ReplayAll()
res = self.client.get(reverse(constants.METADATA_DETAIL_URL,
kwargs={'namespace_id': '1'}))
self.assertNoFormErrors(res)
self.assertTemplateUsed(res, constants.METADATA_DETAIL_TEMPLATE)
@test.create_stubs({api.glance: ('metadefs_namespace_get',)})
def test_metadata_defs_get_contents(self):
namespace = self.metadata_defs.first()
api.glance.metadefs_namespace_get(
IsA(http.HttpRequest),
'1',
wrap=True
).MultipleTimes().AndReturn(namespace)
self.mox.ReplayAll()
res = self.client.get(
'?'.join([reverse(constants.METADATA_DETAIL_URL,
kwargs={'namespace_id': '1'}),
'='.join(['tab', 'namespace_details__contents'])]))
self.assertNoFormErrors(res)
self.assertTemplateUsed(res, constants.METADATA_DETAIL_TEMPLATE)
@test.create_stubs({api.glance: ('metadefs_namespace_get',)})
def test_metadata_defs_get_overview(self):
namespace = self.metadata_defs.first()
api.glance.metadefs_namespace_get(
IsA(http.HttpRequest),
'1',
wrap=True
).MultipleTimes().AndReturn(namespace)
self.mox.ReplayAll()
res = self.client.get(
'?'.join([reverse(constants.METADATA_DETAIL_URL,
kwargs={'namespace_id': '1'}),
'='.join(['tab', 'namespace_details__overview'])]))
self.assertNoFormErrors(res)
self.assertTemplateUsed(res, constants.METADATA_DETAIL_TEMPLATE)
@test.create_stubs({api.glance: ('metadefs_resource_types_list',
'metadefs_namespace_resource_types')})
def test_metadata_defs_manage_resource_types(self):
namespace = self.metadata_defs.first()
api.glance.metadefs_namespace_resource_types(
IsA(http.HttpRequest),
'1'
).AndReturn(namespace.resource_type_associations)
api.glance.metadefs_resource_types_list(
IsA(http.HttpRequest)
).AndReturn(namespace.resource_type_associations)
self.mox.ReplayAll()
res = self.client.get(
reverse(constants.METADATA_MANAGE_RESOURCES_URL,
kwargs={'id': '1'}))
self.assertTemplateUsed(res,
constants.METADATA_MANAGE_RESOURCES_TEMPLATE)
self.assertContains(res, 'mock name')
@test.create_stubs({api.glance: ('metadefs_namespace_resource_types',
'metadefs_namespace_remove_resource_type',
'metadefs_namespace_add_resource_type')})
def test_metadata_defs_manage_resource_types_change(self):
resource_type_associations = [
{
'prefix': 'mock1_prefix',
'name': 'mock1'
},
{
'prefix': 'mock2_prefix',
'name': 'mock2',
'selected': True
}
]
api.glance.metadefs_namespace_resource_types(
IsA(http.HttpRequest),
'1'
).AndReturn(resource_type_associations)
api.glance.metadefs_namespace_remove_resource_type(
IsA(http.HttpRequest),
'1',
'mock1'
).AndReturn(resource_type_associations)
api.glance.metadefs_namespace_remove_resource_type(
IsA(http.HttpRequest),
'1',
'mock2'
).AndReturn(resource_type_associations)
api.glance.metadefs_namespace_add_resource_type(
IsA(http.HttpRequest),
'1',
{
'prefix': 'mock2_prefix',
'name': 'mock2'
}
).AndReturn(resource_type_associations)
self.mox.ReplayAll()
form_data = {'resource_types': json.dumps(resource_type_associations)}
res = self.client.post(
reverse(constants.METADATA_MANAGE_RESOURCES_URL,
kwargs={'id': '1'}),
form_data)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
self.assertRedirectsNoFollow(
res, reverse(constants.METADATA_INDEX_URL)
)
class MetadataDefinitionsCreateViewTest(test.BaseAdminViewTests):
def test_admin_metadata_defs_create_namespace_get(self):
res = self.client.get(reverse(constants.METADATA_CREATE_URL))
self.assertTemplateUsed(res, constants.METADATA_CREATE_TEMPLATE)
@test.create_stubs({api.glance: ('metadefs_namespace_create',)})
def test_admin_metadata_defs_create_namespace_post(self):
metadata = {}
metadata["namespace"] = "test_namespace"
metadata["display_name"] = "display_name"
metadata["description"] = "description"
metadata["visibility"] = "private"
metadata["protected"] = False
api.glance.metadefs_namespace_create(
IsA(http.HttpRequest),
metadata
).AndReturn(metadata)
self.mox.ReplayAll()
form_data = {
'source_type': 'raw',
'direct_input': json.dumps(metadata)
}
res = self.client.post(reverse(constants.METADATA_CREATE_URL),
form_data)
self.assertNoFormErrors(res)
def test_admin_metadata_defs_create_namespace_invalid_json_post_raw(self):
form_data = {
'source_type': 'raw',
'direct_input': 'invalidjson'
}
res = self.client.post(reverse(constants.METADATA_CREATE_URL),
form_data)
self.assertFormError(res, "form", None, ['There was a problem loading '
'the namespace: No JSON '
'object could be decoded.'])
def test_admin_metadata_defs_create_namespace_empty_json_post_raw(self):
form_data = {
'source_type': 'raw',
'direct_input': ''
}
res = self.client.post(reverse(constants.METADATA_CREATE_URL),
form_data)
self.assertFormError(res, "form", None, ['No input was provided for '
'the namespace content.'])
def test_admin_metadata_defs_create_namespace_empty_json_post_file(self):
form_data = {
'source_type': 'raw',
'direct_input': ''
}
res = self.client.post(reverse(constants.METADATA_CREATE_URL),
form_data)
self.assertFormError(res, "form", None, ['No input was provided for '
'the namespace content.'])
| apache-2.0 |
kpu/joshua | test/scripts/run_bundler_test.py | 3 | 13721 | import unittest
from mock import Mock
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "scripts", "support"))
from run_bundler import ConfigLine
from run_bundler import CopyFileConfigLine
from run_bundler import JOSHUA_PATH
from run_bundler import abs_file_path
from run_bundler import clear_non_empty_dir
from run_bundler import config_line_factory
from run_bundler import filter_through_copy_config_script
from run_bundler import handle_args
from run_bundler import main
from run_bundler import make_dest_dir
from run_bundler import processed_config_line
class TestRunBundler_cli(unittest.TestCase):
def test_force(self):
args = handle_args(["--force",
"/dev/null",
"/dev/null",
"haitian5-bundle"])
self.assertIsInstance(args.config, file)
def test_no_force(self):
args = handle_args(["/dev/null",
"/dev/null",
"haitian5-bundle"])
self.assertIsInstance(args.config, file)
def test_copy_config_options(self):
"""
For --copy_config_options, Space-separated options surrounded by a pair
of quotes should not be split.
"""
args = handle_args(["/dev/null",
"/dev/null",
"haitian5-bundle",
"--copy-config-options",
"-grammar grammar.gz"])
self.assertIsInstance(args.config, file)
self.assertEqual("-grammar grammar.gz", args.copy_config_options)
def test_copy_config_options__empty(self):
"""
An error should result from --copy-config-options with no options.
"""
with self.assertRaises(SystemExit):
handle_args(["/dev/null",
"/dev/null",
"haitian5-bundle",
"--copy-config-options"])
class TestRunBundler_bundle_dir(unittest.TestCase):
def setUp(self):
self.test_dest_dir = "newdir"
self.config_line_abs = 'tm = thrax pt 12 /home/hltcoe/lorland/expts/haitian-creole-sms/runs/5/data/test/grammar.filtered.gz'
self.config_line_rel = 'lm = berkeleylm 5 false false 100 lm.berkeleylm'
# Create the destination directory an put a file in it.
if not os.path.exists(self.test_dest_dir):
os.mkdir(self.test_dest_dir)
temp_file_path = os.path.join(self.test_dest_dir, 'temp')
open(temp_file_path, 'w').write('test text')
def tearDown(self):
if os.path.exists(self.test_dest_dir):
clear_non_empty_dir(self.test_dest_dir)
pass
def test_clear_non_empty_dir(self):
clear_non_empty_dir(self.test_dest_dir)
self.assertFalse(os.path.exists(self.test_dest_dir))
def test_force_make_dest_dir__extant_not_empty(self):
# The existing directory should be removed and a new empty directory
# should be in its place.
make_dest_dir(self.test_dest_dir, True)
self.assertTrue(os.path.exists(self.test_dest_dir))
self.assertEqual([], os.listdir(self.test_dest_dir))
def test_make_dest_dir__non_extant(self):
# Set up by removing (existing) directory.
clear_non_empty_dir(self.test_dest_dir)
# A new empty directory should be created.
make_dest_dir(self.test_dest_dir, False)
self.assertTrue(os.path.exists(self.test_dest_dir))
class TestProcessedConfigLine_blank(unittest.TestCase):
def setUp(self):
self.args = handle_args(['/dev/null', '/dev/null', '/dev/null'])
def test_output_is_input(self):
"""
The resulting processed config line of a comment line is that same
comment line.
"""
cl_object = processed_config_line('', self.args)
expect = ''
actual = cl_object.result()
self.assertEqual(expect, actual)
class TestProcessedConfigLine_comment(unittest.TestCase):
def setUp(self):
self.line = '# This is the location of the file containing model weights.'
self.args = handle_args(['/dev/null', '/dev/null', '/dev/null'])
def test_line_type(self):
cl_object = processed_config_line(self.line, self.args)
self.assertIsInstance(cl_object, ConfigLine)
def test_output_is_input(self):
"""
The resulting processed config line of a comment line is that same
comment line.
"""
expect = '# This is the location of the file containing model weights.'
actual = processed_config_line(expect, self.args).result()
self.assertEqual(expect, actual)
class TestProcessedConfigLine_copy1(unittest.TestCase):
def setUp(self):
self.line = 'weights-file = test/parser/weights # foo bar'
self.args = Mock()
self.args.origdir = JOSHUA_PATH
self.args.destdir = '/tmp/testdestdir'
if os.path.exists(self.args.destdir):
clear_non_empty_dir(self.args.destdir)
os.mkdir(self.args.destdir)
def tearDown(self):
if os.path.exists(self.args.destdir):
clear_non_empty_dir(self.args.destdir)
def test_line_type(self):
cl_object = config_line_factory(self.line, self.args)
self.assertIsInstance(cl_object, ConfigLine)
def test_output_is_input(self):
"""
The resulting processed config line of a comment line is that same
comment line.
"""
expect = '# This is the location of the file containing model weights.'
actual = processed_config_line(expect, self.args).result()
self.assertEqual(expect, actual)
class TestProcessedConfigLine_copy2(unittest.TestCase):
def setUp(self):
self.line = 'weights-file = test/parser/weights # foo bar'
args = Mock()
self.args = args
args.origdir = JOSHUA_PATH
args.destdir = './testdestdir'
self.destdir = args.destdir
# Create the destination directory.
if not os.path.exists(args.destdir):
os.mkdir(args.destdir)
self.cl_object = processed_config_line(self.line, args)
self.expected_source_file_path = os.path.abspath(os.path.join(args.origdir,
'test', 'parser', 'weights'))
self.expected_dest_file_path = os.path.abspath(os.path.join(args.destdir, 'weights'))
CopyFileConfigLine.clear_file_name_counts()
def tearDown(self):
if not os.path.exists(self.destdir):
os.mkdir(self.destdir)
def test_line_source_path(self):
actual = self.cl_object.source_file_path
self.assertEqual(self.expected_source_file_path, actual)
def test_line_parts(self):
cl_object = processed_config_line(self.line, self.args)
expect = {"command": ['weights-file', '=', 'test/parser/weights'],
"comment": '# foo bar'}
actual = cl_object.line_parts
self.assertEqual(expect["command"], actual["command"])
def test_line_dest_path(self):
actual = self.cl_object.dest_file_path
self.assertEqual(self.expected_dest_file_path, actual)
def test_line_copy_file(self):
self.assertTrue(os.path.exists(self.cl_object.dest_file_path))
class TestProcessedConfigLine_copy_dirtree(unittest.TestCase):
def setUp(self):
# N.B. specify a path to copytree that is not inside you application.
# Otherwise it ends with an infinite recursion.
self.line = 'tm = thrax pt 12 example # foo bar'
self.args = Mock()
self.args.origdir = os.path.join(JOSHUA_PATH, 'examples')
self.args.destdir = './testdestdir'
# Create the destination directory.
if os.path.exists(self.args.destdir):
clear_non_empty_dir(self.args.destdir)
os.mkdir(self.args.destdir)
CopyFileConfigLine.clear_file_name_counts()
def tearDown(self):
if os.path.exists(self.args.destdir):
clear_non_empty_dir(self.args.destdir)
def test_line_parts(self):
cl_object = processed_config_line(self.line, self.args)
expect = {"command": ['tm', '=', 'thrax', 'pt', '12', 'example'],
"comment": '# foo bar'}
actual = cl_object.line_parts
self.assertEqual(expect["command"], actual["command"])
def test_line_copy_dirtree(self):
processed_config_line(self.line, self.args)
expect = os.path.join(self.args.destdir, 'example', 'joshua.config')
self.assertTrue(os.path.exists(expect))
def test_line_copy_dirtree_result(self):
cl_object = processed_config_line(self.line, self.args)
expect = 'tm = thrax pt 12 example # foo bar'
actual = cl_object.result()
self.assertEqual(expect, actual)
class TestMain(unittest.TestCase):
def setUp(self):
CopyFileConfigLine.clear_file_name_counts()
self.line = 'weights-file = weights # foo bar\noutput-format = %1'
self.origdir = '/tmp/testorigdir'
self.destdir = '/tmp/testdestdir'
for d in [self.origdir, self.destdir]:
if os.path.exists(d):
clear_non_empty_dir(d)
# Create the destination directory.
os.mkdir(self.origdir)
os.mkdir(self.destdir)
# Write the files to be processed.
config_file = os.path.join(self.origdir, 'joshua.config')
with open(config_file, 'w') as fh:
fh.write(self.line)
with open(os.path.join(self.origdir, 'weights'), 'w') as fh:
fh.write("grammar data\n")
self.args = ['thisprogram', '-f', config_file, self.origdir,
self.destdir]
def tearDown(self):
for d in [self.origdir, self.destdir]:
if os.path.exists(d):
clear_non_empty_dir(d)
def test_main(self):
main(self.args)
actual = os.path.exists(os.path.join(self.destdir, 'weights'))
self.assertTrue(actual)
with open(os.path.join(self.destdir, 'joshua.config')) as fh:
actual = fh.read().splitlines()
expect = ['weights-file = weights # foo bar', 'output-format = %1']
self.assertEqual(expect, actual)
def test_main_with_copy_config_options(self):
"""
For --copy_config_options, Space-separated options surrounded by a pair
of quotes should not be split.
"""
main(self.args + ["--copy-config-options", "-topn 1"])
with open(os.path.join(self.destdir, 'joshua.config')) as fh:
actual = fh.read().splitlines()
expect = ['weights-file = weights # foo bar', 'output-format = %1',
"topn = 1"]
self.assertEqual(expect, actual)
self.assertEqual(3, len(actual))
class TestFilterThroughCopyConfigScript(unittest.TestCase):
def test_method(self):
expect = ["# hello", "topn = 1"]
actual = filter_through_copy_config_script(["# hello"], "-topn 1")
self.assertEqual(expect, actual)
class TestAbsFilePath(unittest.TestCase):
def test_abs_file_path_path_in_file_token_1(self):
"""
A file token that is already an absolute path outside the origdir should not be changed.
"""
dir_path = '/foo'
file_token = '/bar/file.txt'
expect = file_token
actual = abs_file_path(dir_path, file_token)
self.assertEqual(expect, actual)
def test_abs_file_path_path_in_file_token_2(self):
"""
A file token that is already an absolute path inside the origdir should not be changed.
"""
dir_path = '/bar'
file_token = '/bar/file.txt'
expect = file_token
actual = abs_file_path(dir_path, file_token)
self.assertEqual(expect, actual)
def test_rel_file_path_path_in_file_token_2(self):
"""
Relative file path should get the dir_path prepended.
"""
dir_path = '/foo'
file_token = 'bar/file.txt'
expect = '/foo/bar/file.txt'
actual = abs_file_path(dir_path, file_token)
self.assertEqual(expect, actual)
class TestUniqueFileNames(unittest.TestCase):
def setUp(self):
self.args = Mock()
self.args.origdir = '/dev/null'
self.args.destdir = '/dev/null'
CopyFileConfigLine.clear_file_name_counts()
def test_2_files_same_name__without_filename_extension(self):
line = 'weights-file = weights'
cl = config_line_factory(line, self.args)
self.assertEqual('weights-file = weights', cl.result())
# Another file with the same name appears.
line = 'weights-file = otherdir/weights'
cl = config_line_factory(line, self.args)
self.assertEqual('weights-file = weights-1', cl.result())
def test_2_files_same_name__with_filename_extension(self):
line = 'tm = blah blah blah grammar.packed'
cl = config_line_factory(line, self.args)
self.assertEqual('tm = blah blah blah grammar.packed', cl.result())
# Another file with the same name appears.
line = 'tm = blah blah blah otherdir/grammar.packed'
cl = config_line_factory(line, self.args)
self.assertEqual('tm = blah blah blah grammar-1.packed', cl.result())
def test_clear_file_name_counts(self):
line = 'tm = blah blah blah grammar.packed'
cl = config_line_factory(line, self.args)
cl = config_line_factory(line, self.args)
CopyFileConfigLine.clear_file_name_counts()
cl = config_line_factory(line, self.args)
self.assertEqual('tm = blah blah blah grammar.packed', cl.result())
| lgpl-2.1 |
xuraylei/fresco_floodlight | src/main/python/PythonServer.py | 150 | 1529 | #!/usr/bin/env python
import sys
import logging
sys.path.append('../../../target/gen-py')
from packetstreamer import PacketStreamer
from packetstreamer.ttypes import *
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
class PacketStreamerHandler:
def __init__(self):
logging.handlers.codecs = None
self.log = logging.getLogger("packetstreamer")
self.log.setLevel(logging.DEBUG)
handler = logging.handlers.SysLogHandler("/dev/log")
handler.setFormatter(logging.Formatter("%(name)s: %(levelname)s %(message)s"))
self.log.addHandler(handler)
def ping(self):
self.log.debug('ping()')
return true
def pushPacketSync(self, packet):
self.log.debug('receive a packet synchronously: %s' %(packet))
return 0
def pushPacketAsync(self, packet):
self.log.debug('receive a packet Asynchronously: %s' %(packet))
handler = PacketStreamerHandler()
processor = PacketStreamer.Processor(handler)
transport = TSocket.TServerSocket(9090)
tfactory = TTransport.TBufferedTransportFactory()
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
server = TServer.TSimpleServer(processor, transport, tfactory, pfactory)
# You could do one of these for a multithreaded server
#server = TServer.TThreadedServer(processor, transport, tfactory, pfactory)
#server = TServer.TThreadPoolServer(processor, transport, tfactory, pfactory)
print 'Starting the server...'
server.serve()
print 'done.'
| apache-2.0 |
hsu1994/Terminator | Server/RelyON/boost_1_61_0/libs/python/test/vector_indexing_suite.py | 46 | 9555 | # Copyright Joel de Guzman 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
'''
#####################################################################
# Check an object that we will use as container element
#####################################################################
>>> from vector_indexing_suite_ext import *
>>> x = X('hi')
>>> x
hi
>>> x.reset() # a member function that modifies X
>>> x
reset
>>> x.foo() # another member function that modifies X
>>> x
foo
# test that a string is implicitly convertible
# to an X
>>> x_value('bochi bochi')
'gotya bochi bochi'
#####################################################################
# Iteration
#####################################################################
>>> def print_xvec(xvec):
... s = '[ '
... for x in xvec:
... s += repr(x)
... s += ' '
... s += ']'
... print s
#####################################################################
# Replace all the contents using slice syntax
#####################################################################
>>> v = XVec()
>>> v[:] = [X('a'),X('b'),X('c'),X('d'),X('e')]
>>> print_xvec(v)
[ a b c d e ]
#####################################################################
# Indexing
#####################################################################
>>> len(v)
5
>>> v[0]
a
>>> v[1]
b
>>> v[2]
c
>>> v[3]
d
>>> v[4]
e
>>> v[-1]
e
>>> v[-2]
d
>>> v[-3]
c
>>> v[-4]
b
>>> v[-5]
a
#####################################################################
# Deleting an element
#####################################################################
>>> del v[0]
>>> v[0] = 'yaba' # must do implicit conversion
>>> print_xvec(v)
[ yaba c d e ]
#####################################################################
# Calling a mutating function of a container element
#####################################################################
>>> v[3].reset()
>>> v[3]
reset
#####################################################################
# Copying a container element
#####################################################################
>>> x = X(v[3])
>>> x
reset
>>> x.foo()
>>> x
foo
>>> v[3] # should not be changed to 'foo'
reset
#####################################################################
# Referencing a container element
#####################################################################
>>> x = v[3]
>>> x
reset
>>> x.foo()
>>> x
foo
>>> v[3] # should be changed to 'foo'
foo
#####################################################################
# Slice
#####################################################################
>>> sl = v[0:2]
>>> print_xvec(sl)
[ yaba c ]
>>> sl[0].reset()
>>> sl[0]
reset
#####################################################################
# Reset the container again
#####################################################################
>>> v[:] = ['a','b','c','d','e'] # perform implicit conversion to X
>>> print_xvec(v)
[ a b c d e ]
#####################################################################
# Slice: replace [1:3] with an element
#####################################################################
>>> v[1:3] = X('z')
>>> print_xvec(v)
[ a z d e ]
#####################################################################
# Slice: replace [0:2] with a list
#####################################################################
>>> v[0:2] = ['1','2','3','4'] # perform implicit conversion to X
>>> print_xvec(v)
[ 1 2 3 4 d e ]
#####################################################################
# Slice: delete [3:4]
#####################################################################
>>> del v[3:4]
>>> print_xvec(v)
[ 1 2 3 d e ]
#####################################################################
# Slice: set [3:] to a list
#####################################################################
>>> v[3:] = [X('trailing'), X('stuff')] # a list
>>> print_xvec(v)
[ 1 2 3 trailing stuff ]
#####################################################################
# Slice: delete [:3]
#####################################################################
>>> del v[:3]
>>> print_xvec(v)
[ trailing stuff ]
#####################################################################
# Slice: insert a tuple to [0:0]
#####################################################################
>>> v[0:0] = ('leading','stuff') # can also be a tuple
>>> print_xvec(v)
[ leading stuff trailing stuff ]
#####################################################################
# Reset the container again
#####################################################################
>>> v[:] = ['a','b','c','d','e']
#####################################################################
# Some references to the container elements
#####################################################################
>>> z0 = v[0]
>>> z1 = v[1]
>>> z2 = v[2]
>>> z3 = v[3]
>>> z4 = v[4]
>>> z0 # proxy
a
>>> z1 # proxy
b
>>> z2 # proxy
c
>>> z3 # proxy
d
>>> z4 # proxy
e
#####################################################################
# Delete a container element
#####################################################################
>>> del v[2]
>>> print_xvec(v)
[ a b d e ]
#####################################################################
# Show that the references are still valid
#####################################################################
>>> z0 # proxy
a
>>> z1 # proxy
b
>>> z2 # proxy detached
c
>>> z3 # proxy index adjusted
d
>>> z4 # proxy index adjusted
e
#####################################################################
# Delete all container elements
#####################################################################
>>> del v[:]
>>> print_xvec(v)
[ ]
#####################################################################
# Show that the references are still valid
#####################################################################
>>> z0 # proxy detached
a
>>> z1 # proxy detached
b
>>> z2 # proxy detached
c
>>> z3 # proxy detached
d
>>> z4 # proxy detached
e
#####################################################################
# Reset the container again
#####################################################################
>>> v[:] = ['a','b','c','d','e']
#####################################################################
# renew the references to the container elements
#####################################################################
>>> z0 = v[0]
>>> z1 = v[1]
>>> z2 = v[2]
>>> z3 = v[3]
>>> z4 = v[4]
>>> z0 # proxy
a
>>> z1 # proxy
b
>>> z2 # proxy
c
>>> z3 # proxy
d
>>> z4 # proxy
e
#####################################################################
# Set [2:4] to a list such that there will be more elements
#####################################################################
>>> v[2:4] = ['x','y','v']
>>> print_xvec(v)
[ a b x y v e ]
#####################################################################
# Show that the references are still valid
#####################################################################
>>> z0 # proxy
a
>>> z1 # proxy
b
>>> z2 # proxy detached
c
>>> z3 # proxy detached
d
>>> z4 # proxy index adjusted
e
#####################################################################
# Contains
#####################################################################
>>> v[:] = ['a','b','c','d','e'] # reset again
>>> assert 'a' in v
>>> assert 'b' in v
>>> assert 'c' in v
>>> assert 'd' in v
>>> assert 'e' in v
>>> assert not 'X' in v
>>> assert not 12345 in v
#####################################################################
# Show that iteration allows mutable access to the elements
#####################################################################
>>> v[:] = ['a','b','c','d','e'] # reset again
>>> for x in v:
... x.reset()
>>> print_xvec(v)
[ reset reset reset reset reset ]
#####################################################################
# append
#####################################################################
>>> v[:] = ['a','b','c','d','e'] # reset again
>>> v.append('f')
>>> print_xvec(v)
[ a b c d e f ]
#####################################################################
# extend
#####################################################################
>>> v[:] = ['a','b','c','d','e'] # reset again
>>> v.extend(['f','g','h','i','j'])
>>> print_xvec(v)
[ a b c d e f g h i j ]
#####################################################################
# extend using a generator expression
#####################################################################
>>> v[:] = ['a','b','c','d','e'] # reset again
>>> def generator():
... addlist = ['f','g','h','i','j']
... for i in addlist:
... if i != 'g':
... yield i
>>> v.extend(generator())
>>> print_xvec(v)
[ a b c d e f h i j ]
#####################################################################
# vector of strings
#####################################################################
>>> sv = StringVec()
>>> sv.append('a')
>>> print sv[0]
a
#####################################################################
# END....
#####################################################################
'''
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print 'running...'
import sys
status = run()[0]
if (status == 0): print "Done."
sys.exit(status)
| apache-2.0 |
smx-smx/dsl-n55u-bender | release/src/router/samba-3.5.8/source4/lib/messaging/tests/bindings.py | 22 | 1964 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Unix SMB/CIFS implementation.
# Copyright © Jelmer Vernooij <jelmer@samba.org> 2008
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from samba.messaging import Messaging
from unittest import TestCase
class MessagingTests(TestCase):
def get_context(self, *args, **kwargs):
kwargs["messaging_path"] = "."
return Messaging(*args, **kwargs)
def test_register(self):
x = self.get_context()
def callback():
pass
msg_type = x.register(callback)
x.deregister(callback, msg_type)
def test_assign_server_id(self):
x = self.get_context()
self.assertTrue(isinstance(x.server_id, tuple))
self.assertEquals(3, len(x.server_id))
def test_ping_speed(self):
server_ctx = self.get_context((0, 1))
def ping_callback(src, data):
server_ctx.send(src, data)
def exit_callback():
print "received exit"
msg_ping = server_ctx.register(ping_callback)
msg_exit = server_ctx.register(exit_callback)
def pong_callback():
print "received pong"
client_ctx = self.get_context((0, 2))
msg_pong = client_ctx.register(pong_callback)
client_ctx.send((0,1), msg_ping, "testing")
client_ctx.send((0,1), msg_ping, "")
| gpl-2.0 |
bzennn/blog_flask | python/lib/python3.5/site-packages/sqlalchemy/orm/dependency.py | 32 | 46192 | # orm/dependency.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Relationship dependencies.
"""
from .. import sql, util, exc as sa_exc
from . import attributes, exc, sync, unitofwork, \
util as mapperutil
from .interfaces import ONETOMANY, MANYTOONE, MANYTOMANY
class DependencyProcessor(object):
def __init__(self, prop):
self.prop = prop
self.cascade = prop.cascade
self.mapper = prop.mapper
self.parent = prop.parent
self.secondary = prop.secondary
self.direction = prop.direction
self.post_update = prop.post_update
self.passive_deletes = prop.passive_deletes
self.passive_updates = prop.passive_updates
self.enable_typechecks = prop.enable_typechecks
if self.passive_deletes:
self._passive_delete_flag = attributes.PASSIVE_NO_INITIALIZE
else:
self._passive_delete_flag = attributes.PASSIVE_OFF
if self.passive_updates:
self._passive_update_flag = attributes.PASSIVE_NO_INITIALIZE
else:
self._passive_update_flag = attributes.PASSIVE_OFF
self.key = prop.key
if not self.prop.synchronize_pairs:
raise sa_exc.ArgumentError(
"Can't build a DependencyProcessor for relationship %s. "
"No target attributes to populate between parent and "
"child are present" %
self.prop)
@classmethod
def from_relationship(cls, prop):
return _direction_to_processor[prop.direction](prop)
def hasparent(self, state):
"""return True if the given object instance has a parent,
according to the ``InstrumentedAttribute`` handled by this
``DependencyProcessor``.
"""
return self.parent.class_manager.get_impl(self.key).hasparent(state)
def per_property_preprocessors(self, uow):
"""establish actions and dependencies related to a flush.
These actions will operate on all relevant states in
the aggregate.
"""
uow.register_preprocessor(self, True)
def per_property_flush_actions(self, uow):
after_save = unitofwork.ProcessAll(uow, self, False, True)
before_delete = unitofwork.ProcessAll(uow, self, True, True)
parent_saves = unitofwork.SaveUpdateAll(
uow,
self.parent.primary_base_mapper
)
child_saves = unitofwork.SaveUpdateAll(
uow,
self.mapper.primary_base_mapper
)
parent_deletes = unitofwork.DeleteAll(
uow,
self.parent.primary_base_mapper
)
child_deletes = unitofwork.DeleteAll(
uow,
self.mapper.primary_base_mapper
)
self.per_property_dependencies(uow,
parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete
)
def per_state_flush_actions(self, uow, states, isdelete):
"""establish actions and dependencies related to a flush.
These actions will operate on all relevant states
individually. This occurs only if there are cycles
in the 'aggregated' version of events.
"""
parent_base_mapper = self.parent.primary_base_mapper
child_base_mapper = self.mapper.primary_base_mapper
child_saves = unitofwork.SaveUpdateAll(uow, child_base_mapper)
child_deletes = unitofwork.DeleteAll(uow, child_base_mapper)
# locate and disable the aggregate processors
# for this dependency
if isdelete:
before_delete = unitofwork.ProcessAll(uow, self, True, True)
before_delete.disabled = True
else:
after_save = unitofwork.ProcessAll(uow, self, False, True)
after_save.disabled = True
# check if the "child" side is part of the cycle
if child_saves not in uow.cycles:
# based on the current dependencies we use, the saves/
# deletes should always be in the 'cycles' collection
# together. if this changes, we will have to break up
# this method a bit more.
assert child_deletes not in uow.cycles
# child side is not part of the cycle, so we will link per-state
# actions to the aggregate "saves", "deletes" actions
child_actions = [
(child_saves, False), (child_deletes, True)
]
child_in_cycles = False
else:
child_in_cycles = True
# check if the "parent" side is part of the cycle
if not isdelete:
parent_saves = unitofwork.SaveUpdateAll(
uow,
self.parent.base_mapper)
parent_deletes = before_delete = None
if parent_saves in uow.cycles:
parent_in_cycles = True
else:
parent_deletes = unitofwork.DeleteAll(
uow,
self.parent.base_mapper)
parent_saves = after_save = None
if parent_deletes in uow.cycles:
parent_in_cycles = True
# now create actions /dependencies for each state.
for state in states:
# detect if there's anything changed or loaded
# by a preprocessor on this state/attribute. In the
# case of deletes we may try to load missing items here as well.
sum_ = state.manager[self.key].impl.get_all_pending(
state, state.dict,
self._passive_delete_flag
if isdelete
else attributes.PASSIVE_NO_INITIALIZE)
if not sum_:
continue
if isdelete:
before_delete = unitofwork.ProcessState(uow,
self, True, state)
if parent_in_cycles:
parent_deletes = unitofwork.DeleteState(
uow,
state,
parent_base_mapper)
else:
after_save = unitofwork.ProcessState(uow, self, False, state)
if parent_in_cycles:
parent_saves = unitofwork.SaveUpdateState(
uow,
state,
parent_base_mapper)
if child_in_cycles:
child_actions = []
for child_state, child in sum_:
if child_state not in uow.states:
child_action = (None, None)
else:
(deleted, listonly) = uow.states[child_state]
if deleted:
child_action = (
unitofwork.DeleteState(
uow, child_state,
child_base_mapper),
True)
else:
child_action = (
unitofwork.SaveUpdateState(
uow, child_state,
child_base_mapper),
False)
child_actions.append(child_action)
# establish dependencies between our possibly per-state
# parent action and our possibly per-state child action.
for child_action, childisdelete in child_actions:
self.per_state_dependencies(uow, parent_saves,
parent_deletes,
child_action,
after_save, before_delete,
isdelete, childisdelete)
def presort_deletes(self, uowcommit, states):
return False
def presort_saves(self, uowcommit, states):
return False
def process_deletes(self, uowcommit, states):
pass
def process_saves(self, uowcommit, states):
pass
def prop_has_changes(self, uowcommit, states, isdelete):
if not isdelete or self.passive_deletes:
passive = attributes.PASSIVE_NO_INITIALIZE
elif self.direction is MANYTOONE:
passive = attributes.PASSIVE_NO_FETCH_RELATED
else:
passive = attributes.PASSIVE_OFF
for s in states:
# TODO: add a high speed method
# to InstanceState which returns: attribute
# has a non-None value, or had one
history = uowcommit.get_attribute_history(
s,
self.key,
passive)
if history and not history.empty():
return True
else:
return states and \
not self.prop._is_self_referential and \
self.mapper in uowcommit.mappers
def _verify_canload(self, state):
if self.prop.uselist and state is None:
raise exc.FlushError(
"Can't flush None value found in "
"collection %s" % (self.prop, ))
elif state is not None and \
not self.mapper._canload(
state, allow_subtypes=not self.enable_typechecks):
if self.mapper._canload(state, allow_subtypes=True):
raise exc.FlushError('Attempting to flush an item of type '
'%(x)s as a member of collection '
'"%(y)s". Expected an object of type '
'%(z)s or a polymorphic subclass of '
'this type. If %(x)s is a subclass of '
'%(z)s, configure mapper "%(zm)s" to '
'load this subtype polymorphically, or '
'set enable_typechecks=False to allow '
'any subtype to be accepted for flush. '
% {
'x': state.class_,
'y': self.prop,
'z': self.mapper.class_,
'zm': self.mapper,
})
else:
raise exc.FlushError(
'Attempting to flush an item of type '
'%(x)s as a member of collection '
'"%(y)s". Expected an object of type '
'%(z)s or a polymorphic subclass of '
'this type.' % {
'x': state.class_,
'y': self.prop,
'z': self.mapper.class_,
})
def _synchronize(self, state, child, associationrow,
clearkeys, uowcommit):
raise NotImplementedError()
def _get_reversed_processed_set(self, uow):
if not self.prop._reverse_property:
return None
process_key = tuple(sorted(
[self.key] +
[p.key for p in self.prop._reverse_property]
))
return uow.memo(
('reverse_key', process_key),
set
)
def _post_update(self, state, uowcommit, related, is_m2o_delete=False):
for x in related:
if not is_m2o_delete or x is not None:
uowcommit.issue_post_update(
state,
[r for l, r in self.prop.synchronize_pairs]
)
break
def _pks_changed(self, uowcommit, state):
raise NotImplementedError()
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.prop)
class OneToManyDP(DependencyProcessor):
def per_property_dependencies(self, uow, parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete,
):
if self.post_update:
child_post_updates = unitofwork.IssuePostUpdate(
uow,
self.mapper.primary_base_mapper,
False)
child_pre_updates = unitofwork.IssuePostUpdate(
uow,
self.mapper.primary_base_mapper,
True)
uow.dependencies.update([
(child_saves, after_save),
(parent_saves, after_save),
(after_save, child_post_updates),
(before_delete, child_pre_updates),
(child_pre_updates, parent_deletes),
(child_pre_updates, child_deletes),
])
else:
uow.dependencies.update([
(parent_saves, after_save),
(after_save, child_saves),
(after_save, child_deletes),
(child_saves, parent_deletes),
(child_deletes, parent_deletes),
(before_delete, child_saves),
(before_delete, child_deletes),
])
def per_state_dependencies(self, uow,
save_parent,
delete_parent,
child_action,
after_save, before_delete,
isdelete, childisdelete):
if self.post_update:
child_post_updates = unitofwork.IssuePostUpdate(
uow,
self.mapper.primary_base_mapper,
False)
child_pre_updates = unitofwork.IssuePostUpdate(
uow,
self.mapper.primary_base_mapper,
True)
# TODO: this whole block is not covered
# by any tests
if not isdelete:
if childisdelete:
uow.dependencies.update([
(child_action, after_save),
(after_save, child_post_updates),
])
else:
uow.dependencies.update([
(save_parent, after_save),
(child_action, after_save),
(after_save, child_post_updates),
])
else:
if childisdelete:
uow.dependencies.update([
(before_delete, child_pre_updates),
(child_pre_updates, delete_parent),
])
else:
uow.dependencies.update([
(before_delete, child_pre_updates),
(child_pre_updates, delete_parent),
])
elif not isdelete:
uow.dependencies.update([
(save_parent, after_save),
(after_save, child_action),
(save_parent, child_action)
])
else:
uow.dependencies.update([
(before_delete, child_action),
(child_action, delete_parent)
])
def presort_deletes(self, uowcommit, states):
# head object is being deleted, and we manage its list of
# child objects the child objects have to have their
# foreign key to the parent set to NULL
should_null_fks = not self.cascade.delete and \
not self.passive_deletes == 'all'
for state in states:
history = uowcommit.get_attribute_history(
state,
self.key,
self._passive_delete_flag)
if history:
for child in history.deleted:
if child is not None and self.hasparent(child) is False:
if self.cascade.delete_orphan:
uowcommit.register_object(child, isdelete=True)
else:
uowcommit.register_object(child)
if should_null_fks:
for child in history.unchanged:
if child is not None:
uowcommit.register_object(
child, operation="delete", prop=self.prop)
def presort_saves(self, uowcommit, states):
children_added = uowcommit.memo(('children_added', self), set)
for state in states:
pks_changed = self._pks_changed(uowcommit, state)
if not pks_changed or self.passive_updates:
passive = attributes.PASSIVE_NO_INITIALIZE
else:
passive = attributes.PASSIVE_OFF
history = uowcommit.get_attribute_history(
state,
self.key,
passive)
if history:
for child in history.added:
if child is not None:
uowcommit.register_object(child, cancel_delete=True,
operation="add",
prop=self.prop)
children_added.update(history.added)
for child in history.deleted:
if not self.cascade.delete_orphan:
uowcommit.register_object(child, isdelete=False,
operation='delete',
prop=self.prop)
elif self.hasparent(child) is False:
uowcommit.register_object(
child, isdelete=True,
operation="delete", prop=self.prop)
for c, m, st_, dct_ in self.mapper.cascade_iterator(
'delete', child):
uowcommit.register_object(
st_,
isdelete=True)
if pks_changed:
if history:
for child in history.unchanged:
if child is not None:
uowcommit.register_object(
child,
False,
self.passive_updates,
operation="pk change",
prop=self.prop)
def process_deletes(self, uowcommit, states):
# head object is being deleted, and we manage its list of
# child objects the child objects have to have their foreign
# key to the parent set to NULL this phase can be called
# safely for any cascade but is unnecessary if delete cascade
# is on.
if self.post_update or not self.passive_deletes == 'all':
children_added = uowcommit.memo(('children_added', self), set)
for state in states:
history = uowcommit.get_attribute_history(
state,
self.key,
self._passive_delete_flag)
if history:
for child in history.deleted:
if child is not None and \
self.hasparent(child) is False:
self._synchronize(
state,
child,
None, True,
uowcommit, False)
if self.post_update and child:
self._post_update(child, uowcommit, [state])
if self.post_update or not self.cascade.delete:
for child in set(history.unchanged).\
difference(children_added):
if child is not None:
self._synchronize(
state,
child,
None, True,
uowcommit, False)
if self.post_update and child:
self._post_update(child,
uowcommit,
[state])
# technically, we can even remove each child from the
# collection here too. but this would be a somewhat
# inconsistent behavior since it wouldn't happen
# if the old parent wasn't deleted but child was moved.
def process_saves(self, uowcommit, states):
for state in states:
history = uowcommit.get_attribute_history(
state,
self.key,
attributes.PASSIVE_NO_INITIALIZE)
if history:
for child in history.added:
self._synchronize(state, child, None,
False, uowcommit, False)
if child is not None and self.post_update:
self._post_update(child, uowcommit, [state])
for child in history.deleted:
if not self.cascade.delete_orphan and \
not self.hasparent(child):
self._synchronize(state, child, None, True,
uowcommit, False)
if self._pks_changed(uowcommit, state):
for child in history.unchanged:
self._synchronize(state, child, None,
False, uowcommit, True)
def _synchronize(self, state, child,
associationrow, clearkeys, uowcommit,
pks_changed):
source = state
dest = child
self._verify_canload(child)
if dest is None or \
(not self.post_update and uowcommit.is_deleted(dest)):
return
if clearkeys:
sync.clear(dest, self.mapper, self.prop.synchronize_pairs)
else:
sync.populate(source, self.parent, dest, self.mapper,
self.prop.synchronize_pairs, uowcommit,
self.passive_updates and pks_changed)
def _pks_changed(self, uowcommit, state):
return sync.source_modified(
uowcommit,
state,
self.parent,
self.prop.synchronize_pairs)
class ManyToOneDP(DependencyProcessor):
def __init__(self, prop):
DependencyProcessor.__init__(self, prop)
self.mapper._dependency_processors.append(DetectKeySwitch(prop))
def per_property_dependencies(self, uow,
parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete):
if self.post_update:
parent_post_updates = unitofwork.IssuePostUpdate(
uow,
self.parent.primary_base_mapper,
False)
parent_pre_updates = unitofwork.IssuePostUpdate(
uow,
self.parent.primary_base_mapper,
True)
uow.dependencies.update([
(child_saves, after_save),
(parent_saves, after_save),
(after_save, parent_post_updates),
(after_save, parent_pre_updates),
(before_delete, parent_pre_updates),
(parent_pre_updates, child_deletes),
])
else:
uow.dependencies.update([
(child_saves, after_save),
(after_save, parent_saves),
(parent_saves, child_deletes),
(parent_deletes, child_deletes)
])
def per_state_dependencies(self, uow,
save_parent,
delete_parent,
child_action,
after_save, before_delete,
isdelete, childisdelete):
if self.post_update:
if not isdelete:
parent_post_updates = unitofwork.IssuePostUpdate(
uow,
self.parent.primary_base_mapper,
False)
if childisdelete:
uow.dependencies.update([
(after_save, parent_post_updates),
(parent_post_updates, child_action)
])
else:
uow.dependencies.update([
(save_parent, after_save),
(child_action, after_save),
(after_save, parent_post_updates)
])
else:
parent_pre_updates = unitofwork.IssuePostUpdate(
uow,
self.parent.primary_base_mapper,
True)
uow.dependencies.update([
(before_delete, parent_pre_updates),
(parent_pre_updates, delete_parent),
(parent_pre_updates, child_action)
])
elif not isdelete:
if not childisdelete:
uow.dependencies.update([
(child_action, after_save),
(after_save, save_parent),
])
else:
uow.dependencies.update([
(after_save, save_parent),
])
else:
if childisdelete:
uow.dependencies.update([
(delete_parent, child_action)
])
def presort_deletes(self, uowcommit, states):
if self.cascade.delete or self.cascade.delete_orphan:
for state in states:
history = uowcommit.get_attribute_history(
state,
self.key,
self._passive_delete_flag)
if history:
if self.cascade.delete_orphan:
todelete = history.sum()
else:
todelete = history.non_deleted()
for child in todelete:
if child is None:
continue
uowcommit.register_object(
child, isdelete=True,
operation="delete", prop=self.prop)
t = self.mapper.cascade_iterator('delete', child)
for c, m, st_, dct_ in t:
uowcommit.register_object(
st_, isdelete=True)
def presort_saves(self, uowcommit, states):
for state in states:
uowcommit.register_object(state, operation="add", prop=self.prop)
if self.cascade.delete_orphan:
history = uowcommit.get_attribute_history(
state,
self.key,
self._passive_delete_flag)
if history:
for child in history.deleted:
if self.hasparent(child) is False:
uowcommit.register_object(
child, isdelete=True,
operation="delete", prop=self.prop)
t = self.mapper.cascade_iterator('delete', child)
for c, m, st_, dct_ in t:
uowcommit.register_object(st_, isdelete=True)
def process_deletes(self, uowcommit, states):
if self.post_update and \
not self.cascade.delete_orphan and \
not self.passive_deletes == 'all':
# post_update means we have to update our
# row to not reference the child object
# before we can DELETE the row
for state in states:
self._synchronize(state, None, None, True, uowcommit)
if state and self.post_update:
history = uowcommit.get_attribute_history(
state,
self.key,
self._passive_delete_flag)
if history:
self._post_update(
state, uowcommit, history.sum(),
is_m2o_delete=True)
def process_saves(self, uowcommit, states):
for state in states:
history = uowcommit.get_attribute_history(
state,
self.key,
attributes.PASSIVE_NO_INITIALIZE)
if history:
if history.added:
for child in history.added:
self._synchronize(state, child, None, False,
uowcommit, "add")
if self.post_update:
self._post_update(state, uowcommit, history.sum())
def _synchronize(self, state, child, associationrow,
clearkeys, uowcommit, operation=None):
if state is None or \
(not self.post_update and uowcommit.is_deleted(state)):
return
if operation is not None and \
child is not None and \
not uowcommit.session._contains_state(child):
util.warn(
"Object of type %s not in session, %s "
"operation along '%s' won't proceed" %
(mapperutil.state_class_str(child), operation, self.prop))
return
if clearkeys or child is None:
sync.clear(state, self.parent, self.prop.synchronize_pairs)
else:
self._verify_canload(child)
sync.populate(child, self.mapper, state,
self.parent,
self.prop.synchronize_pairs,
uowcommit,
False)
class DetectKeySwitch(DependencyProcessor):
"""For many-to-one relationships with no one-to-many backref,
searches for parents through the unit of work when a primary
key has changed and updates them.
Theoretically, this approach could be expanded to support transparent
deletion of objects referenced via many-to-one as well, although
the current attribute system doesn't do enough bookkeeping for this
to be efficient.
"""
def per_property_preprocessors(self, uow):
if self.prop._reverse_property:
if self.passive_updates:
return
else:
if False in (prop.passive_updates for
prop in self.prop._reverse_property):
return
uow.register_preprocessor(self, False)
def per_property_flush_actions(self, uow):
parent_saves = unitofwork.SaveUpdateAll(
uow,
self.parent.base_mapper)
after_save = unitofwork.ProcessAll(uow, self, False, False)
uow.dependencies.update([
(parent_saves, after_save)
])
def per_state_flush_actions(self, uow, states, isdelete):
pass
def presort_deletes(self, uowcommit, states):
pass
def presort_saves(self, uow, states):
if not self.passive_updates:
# for non-passive updates, register in the preprocess stage
# so that mapper save_obj() gets a hold of changes
self._process_key_switches(states, uow)
def prop_has_changes(self, uow, states, isdelete):
if not isdelete and self.passive_updates:
d = self._key_switchers(uow, states)
return bool(d)
return False
def process_deletes(self, uowcommit, states):
assert False
def process_saves(self, uowcommit, states):
# for passive updates, register objects in the process stage
# so that we avoid ManyToOneDP's registering the object without
# the listonly flag in its own preprocess stage (results in UPDATE)
# statements being emitted
assert self.passive_updates
self._process_key_switches(states, uowcommit)
def _key_switchers(self, uow, states):
switched, notswitched = uow.memo(
('pk_switchers', self),
lambda: (set(), set())
)
allstates = switched.union(notswitched)
for s in states:
if s not in allstates:
if self._pks_changed(uow, s):
switched.add(s)
else:
notswitched.add(s)
return switched
def _process_key_switches(self, deplist, uowcommit):
switchers = self._key_switchers(uowcommit, deplist)
if switchers:
# if primary key values have actually changed somewhere, perform
# a linear search through the UOW in search of a parent.
for state in uowcommit.session.identity_map.all_states():
if not issubclass(state.class_, self.parent.class_):
continue
dict_ = state.dict
related = state.get_impl(self.key).get(
state, dict_, passive=self._passive_update_flag)
if related is not attributes.PASSIVE_NO_RESULT and \
related is not None:
related_state = attributes.instance_state(dict_[self.key])
if related_state in switchers:
uowcommit.register_object(state,
False,
self.passive_updates)
sync.populate(
related_state,
self.mapper, state,
self.parent, self.prop.synchronize_pairs,
uowcommit, self.passive_updates)
def _pks_changed(self, uowcommit, state):
return bool(state.key) and sync.source_modified(
uowcommit, state, self.mapper, self.prop.synchronize_pairs)
class ManyToManyDP(DependencyProcessor):
def per_property_dependencies(self, uow, parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete
):
uow.dependencies.update([
(parent_saves, after_save),
(child_saves, after_save),
(after_save, child_deletes),
# a rowswitch on the parent from deleted to saved
# can make this one occur, as the "save" may remove
# an element from the
# "deleted" list before we have a chance to
# process its child rows
(before_delete, parent_saves),
(before_delete, parent_deletes),
(before_delete, child_deletes),
(before_delete, child_saves),
])
def per_state_dependencies(self, uow,
save_parent,
delete_parent,
child_action,
after_save, before_delete,
isdelete, childisdelete):
if not isdelete:
if childisdelete:
uow.dependencies.update([
(save_parent, after_save),
(after_save, child_action),
])
else:
uow.dependencies.update([
(save_parent, after_save),
(child_action, after_save),
])
else:
uow.dependencies.update([
(before_delete, child_action),
(before_delete, delete_parent)
])
def presort_deletes(self, uowcommit, states):
# TODO: no tests fail if this whole
# thing is removed !!!!
if not self.passive_deletes:
# if no passive deletes, load history on
# the collection, so that prop_has_changes()
# returns True
for state in states:
uowcommit.get_attribute_history(
state,
self.key,
self._passive_delete_flag)
def presort_saves(self, uowcommit, states):
if not self.passive_updates:
# if no passive updates, load history on
# each collection where parent has changed PK,
# so that prop_has_changes() returns True
for state in states:
if self._pks_changed(uowcommit, state):
history = uowcommit.get_attribute_history(
state,
self.key,
attributes.PASSIVE_OFF)
if not self.cascade.delete_orphan:
return
# check for child items removed from the collection
# if delete_orphan check is turned on.
for state in states:
history = uowcommit.get_attribute_history(
state,
self.key,
attributes.PASSIVE_NO_INITIALIZE)
if history:
for child in history.deleted:
if self.hasparent(child) is False:
uowcommit.register_object(
child, isdelete=True,
operation="delete", prop=self.prop)
for c, m, st_, dct_ in self.mapper.cascade_iterator(
'delete',
child):
uowcommit.register_object(
st_, isdelete=True)
def process_deletes(self, uowcommit, states):
secondary_delete = []
secondary_insert = []
secondary_update = []
processed = self._get_reversed_processed_set(uowcommit)
tmp = set()
for state in states:
# this history should be cached already, as
# we loaded it in preprocess_deletes
history = uowcommit.get_attribute_history(
state,
self.key,
self._passive_delete_flag)
if history:
for child in history.non_added():
if child is None or \
(processed is not None and
(state, child) in processed):
continue
associationrow = {}
if not self._synchronize(
state,
child,
associationrow,
False, uowcommit, "delete"):
continue
secondary_delete.append(associationrow)
tmp.update((c, state) for c in history.non_added())
if processed is not None:
processed.update(tmp)
self._run_crud(uowcommit, secondary_insert,
secondary_update, secondary_delete)
def process_saves(self, uowcommit, states):
secondary_delete = []
secondary_insert = []
secondary_update = []
processed = self._get_reversed_processed_set(uowcommit)
tmp = set()
for state in states:
need_cascade_pks = not self.passive_updates and \
self._pks_changed(uowcommit, state)
if need_cascade_pks:
passive = attributes.PASSIVE_OFF
else:
passive = attributes.PASSIVE_NO_INITIALIZE
history = uowcommit.get_attribute_history(state, self.key,
passive)
if history:
for child in history.added:
if (processed is not None and
(state, child) in processed):
continue
associationrow = {}
if not self._synchronize(state,
child,
associationrow,
False, uowcommit, "add"):
continue
secondary_insert.append(associationrow)
for child in history.deleted:
if (processed is not None and
(state, child) in processed):
continue
associationrow = {}
if not self._synchronize(state,
child,
associationrow,
False, uowcommit, "delete"):
continue
secondary_delete.append(associationrow)
tmp.update((c, state)
for c in history.added + history.deleted)
if need_cascade_pks:
for child in history.unchanged:
associationrow = {}
sync.update(state,
self.parent,
associationrow,
"old_",
self.prop.synchronize_pairs)
sync.update(child,
self.mapper,
associationrow,
"old_",
self.prop.secondary_synchronize_pairs)
secondary_update.append(associationrow)
if processed is not None:
processed.update(tmp)
self._run_crud(uowcommit, secondary_insert,
secondary_update, secondary_delete)
def _run_crud(self, uowcommit, secondary_insert,
secondary_update, secondary_delete):
connection = uowcommit.transaction.connection(self.mapper)
if secondary_delete:
associationrow = secondary_delete[0]
statement = self.secondary.delete(sql.and_(*[
c == sql.bindparam(c.key, type_=c.type)
for c in self.secondary.c
if c.key in associationrow
]))
result = connection.execute(statement, secondary_delete)
if result.supports_sane_multi_rowcount() and \
result.rowcount != len(secondary_delete):
raise exc.StaleDataError(
"DELETE statement on table '%s' expected to delete "
"%d row(s); Only %d were matched." %
(self.secondary.description, len(secondary_delete),
result.rowcount)
)
if secondary_update:
associationrow = secondary_update[0]
statement = self.secondary.update(sql.and_(*[
c == sql.bindparam("old_" + c.key, type_=c.type)
for c in self.secondary.c
if c.key in associationrow
]))
result = connection.execute(statement, secondary_update)
if result.supports_sane_multi_rowcount() and \
result.rowcount != len(secondary_update):
raise exc.StaleDataError(
"UPDATE statement on table '%s' expected to update "
"%d row(s); Only %d were matched." %
(self.secondary.description, len(secondary_update),
result.rowcount)
)
if secondary_insert:
statement = self.secondary.insert()
connection.execute(statement, secondary_insert)
def _synchronize(self, state, child, associationrow,
clearkeys, uowcommit, operation):
# this checks for None if uselist=True
self._verify_canload(child)
# but if uselist=False we get here. If child is None,
# no association row can be generated, so return.
if child is None:
return False
if child is not None and not uowcommit.session._contains_state(child):
if not child.deleted:
util.warn(
"Object of type %s not in session, %s "
"operation along '%s' won't proceed" %
(mapperutil.state_class_str(child), operation, self.prop))
return False
sync.populate_dict(state, self.parent, associationrow,
self.prop.synchronize_pairs)
sync.populate_dict(child, self.mapper, associationrow,
self.prop.secondary_synchronize_pairs)
return True
def _pks_changed(self, uowcommit, state):
return sync.source_modified(
uowcommit,
state,
self.parent,
self.prop.synchronize_pairs)
_direction_to_processor = {
ONETOMANY: OneToManyDP,
MANYTOONE: ManyToOneDP,
MANYTOMANY: ManyToManyDP,
}
| gpl-3.0 |
marshall007/rethinkdb | drivers/python/rethinkdb/net_asyncio.py | 5 | 10153 | # Copyright 2015 RethinkDB, all rights reserved.
import asyncio
import contextlib
import socket
import struct
from . import ql2_pb2 as p
from .net import decodeUTF, Query, Response, Cursor, maybe_profile, convert_pseudo
from .net import Connection as ConnectionBase
from .errors import *
__all__ = ['Connection']
pResponse = p.Response.ResponseType
pQuery = p.Query.QueryType
@asyncio.coroutine
def _read_until(streamreader, delimiter):
"""Naive implementation of reading until a delimiter"""
buffer = bytearray()
while True:
c = yield from streamreader.read(1)
if c == b'':
break # EOF
buffer.append(c[0])
if c == delimiter:
break
return bytes(buffer)
def reusable_waiter(loop, timeout):
"""Wait for something, with a timeout from when the waiter was created.
This can be used in loops::
waiter = reusable_waiter(event_loop, 10.0)
while some_condition:
yield from waiter(some_future)
"""
if timeout is not None:
deadline = loop.time() + timeout
else:
deadline = None
@asyncio.coroutine
def wait(future):
if deadline is not None:
new_timeout = max(deadline - loop.time(), 0)
else:
new_timeout = None
return (yield from asyncio.wait_for(future, new_timeout, loop=loop))
return wait
@contextlib.contextmanager
def translate_timeout_errors():
try:
yield
except asyncio.TimeoutError:
raise RqlTimeoutError
# The asyncio implementation of the Cursor object:
# The `new_response` Future notifies any waiting coroutines that the can attempt
# to grab the next result. In addition, the waiting coroutine will schedule a
# timeout at the given deadline (if provided), at which point the future will be
# errored.
class AsyncioCursor(Cursor):
def __init__(self, *args, **kwargs):
Cursor.__init__(self, *args, **kwargs)
self.new_response = asyncio.Future()
def _extend(self, res):
Cursor._extend(self, res)
self.new_response.set_result(True)
self.new_response = asyncio.Future()
# Convenience function so users know when they've hit the end of the cursor
# without having to catch an exception
@asyncio.coroutine
def fetch_next(self, wait=True):
timeout = Cursor._wait_to_timeout(wait)
waiter = reusable_waiter(self.conn._io_loop, timeout)
while len(self.items) == 0 and self.error is None:
self._maybe_fetch_batch()
with translate_timeout_errors():
yield from waiter(asyncio.shield(self.new_response))
# If there is a (non-empty) error to be received, we return True, so the
# user will receive it on the next `next` call.
return len(self.items) != 0 or not isinstance(self.error, RqlCursorEmpty)
def _empty_error(self):
# We do not have RqlCursorEmpty inherit from StopIteration as that interferes
# with mechanisms to return from a coroutine.
return RqlCursorEmpty(self.query.term)
@asyncio.coroutine
def _get_next(self, timeout):
waiter = reusable_waiter(self.conn._io_loop, timeout)
while len(self.items) == 0:
self._maybe_fetch_batch()
if self.error is not None:
raise self.error
with translate_timeout_errors():
yield from waiter(asyncio.shield(self.new_response))
return convert_pseudo(self.items.pop(0), self.query)
def _maybe_fetch_batch(self):
if self.error is None and \
len(self.items) <= self.threshold and \
self.outstanding_requests == 0:
self.outstanding_requests += 1
asyncio.async(self.conn._parent._continue(self))
class ConnectionInstance(object):
_streamreader = None
_streamwriter = None
def __init__(self, parent, io_loop=None):
self._parent = parent
self._closing = False
self._user_queries = { }
self._cursor_cache = { }
self._ready = asyncio.Future()
self._io_loop = io_loop
if self._io_loop is None:
self._io_loop = asyncio.get_event_loop()
@asyncio.coroutine
def connect(self, timeout):
try:
self._streamreader, self._streamwriter = yield from \
asyncio.open_connection(self._parent.host, self._parent.port,
family=socket.AF_INET, loop=self._io_loop)
self._streamwriter.get_extra_info('socket').setsockopt(
socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except Exception as err:
raise RqlDriverError('Could not connect to %s:%s. Error: %s' %
(self._parent.host, self._parent.port, str(err)))
try:
self._streamwriter.write(self._parent.handshake)
with translate_timeout_errors():
response = yield from asyncio.wait_for(
_read_until(self._streamreader, b'\0'),
timeout, loop=self._io_loop,
)
except Exception as err:
raise RqlDriverError(
'Connection interrupted during handshake with %s:%s. Error: %s' %
(self._parent.host, self._parent.port, str(err)))
message = decodeUTF(response[:-1]).split('\n')[0]
if message != 'SUCCESS':
self.close(False, None)
raise RqlDriverError('Server dropped connection with message: "%s"' %
message)
# Start a parallel function to perform reads
# store a reference to it so it doesn't get destroyed
self._reader_task = asyncio.async(self._reader(), loop=self._io_loop)
return self._parent
def is_open(self):
return not (self._closing or self._streamreader.at_eof())
@asyncio.coroutine
def close(self, noreply_wait, token, exception=None):
self._closing = True
if exception is not None:
err_message = "Connection is closed (%s)." + str(exception)
else:
err_message = "Connection is closed."
# Cursors may remove themselves when errored, so copy a list of them
for cursor in list(self._cursor_cache.values()):
cursor._error(err_message)
for query, future in iter(self._user_queries.values()):
future.set_exception(RqlDriverError(err_message))
self._user_queries = { }
self._cursor_cache = { }
if noreply_wait:
noreply = Query(pQuery.NOREPLY_WAIT, token, None, None)
yield from self.run_query(noreply, False)
self._streamwriter.close()
return None
@asyncio.coroutine
def run_query(self, query, noreply):
self._streamwriter.write(query.serialize())
if noreply:
return None
response_future = asyncio.Future()
self._user_queries[query.token] = (query, response_future)
return (yield from response_future)
# The _reader coroutine runs in parallel, reading responses
# off of the socket and forwarding them to the appropriate Future or Cursor.
# This is shut down as a consequence of closing the stream, or an error in the
# socket/protocol from the server. Unexpected errors in this coroutine will
# close the ConnectionInstance and be passed to any open Futures or Cursors.
@asyncio.coroutine
def _reader(self):
try:
while True:
buf = yield from self._streamreader.readexactly(12)
(token, length,) = struct.unpack("<qL", buf)
buf = yield from self._streamreader.readexactly(length)
res = Response(token, buf)
cursor = self._cursor_cache.get(token)
if cursor is not None:
cursor._extend(res)
elif token in self._user_queries:
# Do not pop the query from the dict until later, so
# we don't lose track of it in case of an exception
query, future = self._user_queries[token]
if res.type == pResponse.SUCCESS_ATOM:
value = convert_pseudo(res.data[0], query)
future.set_result(maybe_profile(value, res))
elif res.type in (pResponse.SUCCESS_SEQUENCE,
pResponse.SUCCESS_PARTIAL):
cursor = AsyncioCursor(self, query)
self._cursor_cache[token] = cursor
cursor._extend(res)
future.set_result(maybe_profile(cursor, res))
elif res.type == pResponse.WAIT_COMPLETE:
future.set_result(None)
else:
future.set_exception(res.make_error(query))
del self._user_queries[token]
elif not self._closing:
raise RqlDriverError("Unexpected response received.")
except Exception as ex:
if not self._closing:
yield from self.close(False, None, ex)
class Connection(ConnectionBase):
def __init__(self, *args, **kwargs):
ConnectionBase.__init__(self, ConnectionInstance, *args, **kwargs)
try:
self.port = int(self.port)
except ValueError:
raise RqlDriverError("Could not convert port %s to an integer." % self.port)
@asyncio.coroutine
def reconnect(self, noreply_wait=True, timeout=None):
# We close before reconnect so reconnect doesn't try to close us
# and then fail to return the Future (this is a little awkward).
yield from self.close(noreply_wait)
self._instance = self._conn_type(self, **self._child_kwargs)
return (yield from self._instance.connect(timeout))
@asyncio.coroutine
def close(self, *args, **kwargs):
if self._instance is None:
return None
return (yield from ConnectionBase.close(self, *args, **kwargs))
| agpl-3.0 |
Hurence/log-island | logisland-components/logisland-processors/logisland-processor-scripting/src/main/resources/nltk/help.py | 7 | 1649 | # Natural Language Toolkit (NLTK) Help
#
# Copyright (C) 2001-2016 NLTK Project
# Authors: Steven Bird <stevenbird1@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Provide structured access to documentation.
"""
from __future__ import print_function
import re
from textwrap import wrap
from nltk.data import load
def brown_tagset(tagpattern=None):
_format_tagset("brown_tagset", tagpattern)
def claws5_tagset(tagpattern=None):
_format_tagset("claws5_tagset", tagpattern)
def upenn_tagset(tagpattern=None):
_format_tagset("upenn_tagset", tagpattern)
#####################################################################
# UTILITIES
#####################################################################
def _print_entries(tags, tagdict):
for tag in tags:
entry = tagdict[tag]
defn = [tag + ": " + entry[0]]
examples = wrap(entry[1], width=75, initial_indent=' ', subsequent_indent=' ')
print("\n".join(defn + examples))
def _format_tagset(tagset, tagpattern=None):
tagdict = load("help/tagsets/" + tagset + ".pickle")
if not tagpattern:
_print_entries(sorted(tagdict), tagdict)
elif tagpattern in tagdict:
_print_entries([tagpattern], tagdict)
else:
tagpattern = re.compile(tagpattern)
tags = [tag for tag in sorted(tagdict) if tagpattern.match(tag)]
if tags:
_print_entries(tags, tagdict)
else:
print("No matching tags found.")
if __name__ == '__main__':
brown_tagset(r'NN.*')
upenn_tagset(r'.*\$')
claws5_tagset('UNDEFINED')
brown_tagset(r'NN')
| apache-2.0 |
followloda/PornGuys | FlaskServer/venv/Lib/site-packages/pip/_vendor/requests/packages/chardet/langbulgarianmodel.py | 2965 | 12784 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
# this table is modified base on win1251BulgarianCharToOrderMap, so
# only number <64 is sure valid
Latin5_BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, # 80
210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, # 90
81,226,227,228,229,230,105,231,232,233,234,235,236, 45,237,238, # a0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # b0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,239, 67,240, 60, 56, # c0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # d0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,241, 42, 16, # e0
62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253, # f0
)
win1251BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
206,207,208,209,210,211,212,213,120,214,215,216,217,218,219,220, # 80
221, 78, 64, 83,121, 98,117,105,222,223,224,225,226,227,228,229, # 90
88,230,231,232,233,122, 89,106,234,235,236,237,238, 45,239,240, # a0
73, 80,118,114,241,242,243,244,245, 62, 58,246,247,248,249,250, # b0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # c0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,251, 67,252, 60, 56, # d0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # e0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 96.9392%
# first 1024 sequences:3.0618%
# rest sequences: 0.2992%
# negative sequences: 0.0020%
BulgarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2,
3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1,
0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,0,3,1,0,
0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,2,2,1,3,3,3,3,2,2,2,1,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,2,2,3,3,1,1,2,3,3,2,3,3,3,3,2,1,2,0,2,0,3,0,0,
0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,1,3,3,3,3,3,2,3,2,3,3,3,3,3,2,3,3,1,3,0,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,1,3,3,2,3,3,3,1,3,3,2,3,2,2,2,0,0,2,0,2,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,3,3,1,2,2,3,2,1,1,2,0,2,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,3,1,2,3,2,2,2,3,3,3,3,3,2,2,3,1,2,0,2,1,2,0,0,
0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,3,3,3,3,2,3,3,3,2,3,3,2,3,2,2,2,3,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,2,2,1,3,1,3,2,2,3,0,0,1,0,1,0,1,0,0,
0,0,0,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,2,3,2,2,3,1,2,1,1,1,2,3,1,3,1,2,2,0,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,2,2,3,3,1,2,3,1,1,3,3,3,3,1,2,2,1,1,1,0,2,0,2,0,1,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,2,3,3,3,2,2,1,1,2,0,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,0,1,2,1,3,3,2,3,3,3,3,3,2,3,2,1,0,3,1,2,1,2,1,2,3,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,1,3,3,2,3,3,2,2,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,3,3,3,3,3,2,1,1,2,1,3,3,0,3,1,1,1,1,3,2,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,1,1,3,1,3,3,2,3,2,2,2,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,2,3,2,1,1,1,1,1,3,1,3,1,1,0,0,0,1,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,2,0,3,2,0,3,0,2,0,0,2,1,3,1,0,0,1,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,1,1,1,2,1,1,2,1,1,1,2,2,1,2,1,1,1,0,1,1,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,3,1,1,2,1,3,2,1,1,0,1,2,3,2,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,2,2,1,0,1,0,0,1,0,0,0,2,1,0,3,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,2,3,2,3,3,1,3,2,1,1,1,2,1,1,2,1,3,0,1,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,2,3,2,2,2,3,1,2,2,1,1,2,1,1,2,2,0,1,1,0,1,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,1,0,2,2,1,3,2,1,0,0,2,0,2,0,1,0,0,0,0,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,1,2,0,2,3,1,2,3,2,0,1,3,1,2,1,1,1,0,0,1,0,0,2,2,2,3,
2,2,2,2,1,2,1,1,2,2,1,1,2,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,1,
3,3,3,3,3,2,1,2,2,1,2,0,2,0,1,0,1,2,1,2,1,1,0,0,0,1,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,3,3,1,1,3,1,0,3,2,1,0,0,0,1,2,0,2,0,1,0,0,0,1,0,1,2,1,2,2,
1,1,1,1,1,1,1,2,2,2,1,1,1,1,1,1,1,0,1,2,1,1,1,0,0,0,0,0,1,1,0,0,
3,1,0,1,0,2,3,2,2,2,3,2,2,2,2,2,1,0,2,1,2,1,1,1,0,1,2,1,2,2,2,1,
1,1,2,2,2,2,1,2,1,1,0,1,2,1,2,2,2,1,1,1,0,1,1,1,1,2,0,1,0,0,0,0,
2,3,2,3,3,0,0,2,1,0,2,1,0,0,0,0,2,3,0,2,0,0,0,0,0,1,0,0,2,0,1,2,
2,1,2,1,2,2,1,1,1,2,1,1,1,0,1,2,2,1,1,1,1,1,0,1,1,1,0,0,1,2,0,0,
3,3,2,2,3,0,2,3,1,1,2,0,0,0,1,0,0,2,0,2,0,0,0,1,0,1,0,1,2,0,2,2,
1,1,1,1,2,1,0,1,2,2,2,1,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,0,
2,3,2,3,3,0,0,3,0,1,1,0,1,0,0,0,2,2,1,2,0,0,0,0,0,0,0,0,2,0,1,2,
2,2,1,1,1,1,1,2,2,2,1,0,2,0,1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
3,3,3,3,2,2,2,2,2,0,2,1,1,1,1,2,1,2,1,1,0,2,0,1,0,1,0,0,2,0,1,2,
1,1,1,1,1,1,1,2,2,1,1,0,2,0,1,0,2,0,0,1,1,1,0,0,2,0,0,0,1,1,0,0,
2,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,0,0,0,1,2,0,1,2,
2,2,2,1,1,2,1,1,2,2,2,1,2,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,0,0,
2,3,3,3,3,0,2,2,0,2,1,0,0,0,1,1,1,2,0,2,0,0,0,3,0,0,0,0,2,0,2,2,
1,1,1,2,1,2,1,1,2,2,2,1,2,0,1,1,1,0,1,1,1,1,0,2,1,0,0,0,1,1,0,0,
2,3,3,3,3,0,2,1,0,0,2,0,0,0,0,0,1,2,0,2,0,0,0,0,0,0,0,0,2,0,1,2,
1,1,1,2,1,1,1,1,2,2,2,0,1,0,1,1,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0,
3,3,2,2,3,0,1,0,1,0,0,0,0,0,0,0,1,1,0,3,0,0,0,0,0,0,0,0,1,0,2,2,
1,1,1,1,1,2,1,1,2,2,1,2,2,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,1,0,0,
3,1,0,1,0,2,2,2,2,3,2,1,1,1,2,3,0,0,1,0,2,1,1,0,1,1,1,1,2,1,1,1,
1,2,2,1,2,1,2,2,1,1,0,1,2,1,2,2,1,1,1,0,0,1,1,1,2,1,0,1,0,0,0,0,
2,1,0,1,0,3,1,2,2,2,2,1,2,2,1,1,1,0,2,1,2,2,1,1,2,1,1,0,2,1,1,1,
1,2,2,2,2,2,2,2,1,2,0,1,1,0,2,1,1,1,1,1,0,0,1,1,1,1,0,1,0,0,0,0,
2,1,1,1,1,2,2,2,2,1,2,2,2,1,2,2,1,1,2,1,2,3,2,2,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,1,2,0,1,2,1,1,0,1,0,1,2,1,2,0,0,0,1,1,0,0,0,1,0,0,2,
1,1,0,0,1,1,0,1,1,1,1,0,2,0,1,1,1,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0,
2,0,0,0,0,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,2,1,1,1,
1,2,2,2,2,1,1,2,1,2,1,1,1,0,2,1,2,1,1,1,0,2,1,1,1,1,0,1,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,0,1,0,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,0,0,0,1,0,0,0,0,0,0,1,1,0,2,0,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,1,1,0,0,2,2,2,2,2,0,1,1,0,1,1,1,1,1,0,0,1,0,0,0,1,1,0,1,
2,3,1,2,1,0,1,1,0,2,2,2,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,2,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
2,2,2,2,2,0,0,2,0,0,2,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,0,2,2,
1,1,1,1,1,0,0,1,2,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,2,0,1,1,0,0,0,1,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,1,1,
0,0,0,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,3,2,0,0,1,0,0,1,0,0,0,0,0,0,1,0,2,0,0,0,1,0,0,0,0,0,0,0,2,
1,1,0,0,1,0,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,2,2,1,2,1,2,2,1,1,2,1,1,1,0,1,1,1,1,2,0,1,0,1,1,1,1,0,1,1,
1,1,2,1,1,1,1,1,1,0,0,1,2,1,1,1,1,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,
1,0,0,1,3,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,1,0,0,1,0,2,0,0,0,0,0,1,1,1,0,1,0,0,0,0,0,0,0,0,2,0,0,1,
0,2,0,1,0,0,1,1,2,0,1,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,1,1,0,2,1,0,1,1,1,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,0,0,1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,1,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,1,2,1,1,1,1,1,1,2,2,1,0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0,
1,1,2,1,1,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,1,2,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
0,1,1,0,1,1,1,0,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,2,0,0,2,0,1,0,0,1,0,0,1,
1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,1,1,1,1,1,2,0,0,0,0,0,0,2,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
)
Latin5BulgarianModel = {
'charToOrderMap': Latin5_BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
Win1251BulgarianModel = {
'charToOrderMap': win1251BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
# flake8: noqa
| gpl-3.0 |
abhattad4/Digi-Menu | digimenu2/build/lib.linux-x86_64-2.7/django/template/context.py | 44 | 8912 | import warnings
from contextlib import contextmanager
from copy import copy
from django.utils.deprecation import RemovedInDjango20Warning
# Hard-coded processor for easier use of CSRF protection.
_builtin_context_processors = ('django.template.context_processors.csrf',)
_current_app_undefined = object()
class ContextPopException(Exception):
"pop() has been called more times than push()"
pass
class ContextDict(dict):
def __init__(self, context, *args, **kwargs):
super(ContextDict, self).__init__(*args, **kwargs)
context.dicts.append(self)
self.context = context
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.context.pop()
class BaseContext(object):
def __init__(self, dict_=None):
self._reset_dicts(dict_)
def _reset_dicts(self, value=None):
builtins = {'True': True, 'False': False, 'None': None}
self.dicts = [builtins]
if value is not None:
self.dicts.append(value)
def __copy__(self):
duplicate = copy(super(BaseContext, self))
duplicate.dicts = self.dicts[:]
return duplicate
def __repr__(self):
return repr(self.dicts)
def __iter__(self):
for d in reversed(self.dicts):
yield d
def push(self, *args, **kwargs):
return ContextDict(self, *args, **kwargs)
def pop(self):
if len(self.dicts) == 1:
raise ContextPopException
return self.dicts.pop()
def __setitem__(self, key, value):
"Set a variable in the current context"
self.dicts[-1][key] = value
def __getitem__(self, key):
"Get a variable's value, starting at the current context and going upward"
for d in reversed(self.dicts):
if key in d:
return d[key]
raise KeyError(key)
def __delitem__(self, key):
"Delete a variable from the current context"
del self.dicts[-1][key]
def has_key(self, key):
for d in self.dicts:
if key in d:
return True
return False
def __contains__(self, key):
return self.has_key(key)
def get(self, key, otherwise=None):
for d in reversed(self.dicts):
if key in d:
return d[key]
return otherwise
def new(self, values=None):
"""
Returns a new context with the same properties, but with only the
values given in 'values' stored.
"""
new_context = copy(self)
new_context._reset_dicts(values)
return new_context
def flatten(self):
"""
Returns self.dicts as one dictionary
"""
flat = {}
for d in self.dicts:
flat.update(d)
return flat
def __eq__(self, other):
"""
Compares two contexts by comparing theirs 'dicts' attributes.
"""
if isinstance(other, BaseContext):
# because dictionaries can be put in different order
# we have to flatten them like in templates
return self.flatten() == other.flatten()
# if it's not comparable return false
return False
class Context(BaseContext):
"A stack container for variable context"
def __init__(self, dict_=None, autoescape=True,
current_app=_current_app_undefined,
use_l10n=None, use_tz=None):
if current_app is not _current_app_undefined:
warnings.warn(
"The current_app argument of Context is deprecated. Use "
"RequestContext and set the current_app attribute of its "
"request instead.", RemovedInDjango20Warning, stacklevel=2)
self.autoescape = autoescape
self._current_app = current_app
self.use_l10n = use_l10n
self.use_tz = use_tz
self.render_context = RenderContext()
# Set to the original template -- as opposed to extended or included
# templates -- during rendering, see bind_template.
self.template = None
super(Context, self).__init__(dict_)
@property
def current_app(self):
return None if self._current_app is _current_app_undefined else self._current_app
@contextmanager
def bind_template(self, template):
if self.template is not None:
raise RuntimeError("Context is already bound to a template")
self.template = template
try:
yield
finally:
self.template = None
def __copy__(self):
duplicate = super(Context, self).__copy__()
duplicate.render_context = copy(self.render_context)
return duplicate
def update(self, other_dict):
"Pushes other_dict to the stack of dictionaries in the Context"
if not hasattr(other_dict, '__getitem__'):
raise TypeError('other_dict must be a mapping (dictionary-like) object.')
self.dicts.append(other_dict)
return other_dict
class RenderContext(BaseContext):
"""
A stack container for storing Template state.
RenderContext simplifies the implementation of template Nodes by providing a
safe place to store state between invocations of a node's `render` method.
The RenderContext also provides scoping rules that are more sensible for
'template local' variables. The render context stack is pushed before each
template is rendered, creating a fresh scope with nothing in it. Name
resolution fails if a variable is not found at the top of the RequestContext
stack. Thus, variables are local to a specific template and don't affect the
rendering of other templates as they would if they were stored in the normal
template context.
"""
def __iter__(self):
for d in self.dicts[-1]:
yield d
def has_key(self, key):
return key in self.dicts[-1]
def get(self, key, otherwise=None):
return self.dicts[-1].get(key, otherwise)
def __getitem__(self, key):
return self.dicts[-1][key]
class RequestContext(Context):
"""
This subclass of template.Context automatically populates itself using
the processors defined in the engine's configuration.
Additional processors can be specified as a list of callables
using the "processors" keyword argument.
"""
def __init__(self, request, dict_=None, processors=None,
current_app=_current_app_undefined,
use_l10n=None, use_tz=None):
# current_app isn't passed here to avoid triggering the deprecation
# warning in Context.__init__.
super(RequestContext, self).__init__(
dict_, use_l10n=use_l10n, use_tz=use_tz)
if current_app is not _current_app_undefined:
warnings.warn(
"The current_app argument of RequestContext is deprecated. "
"Set the current_app attribute of its request instead.",
RemovedInDjango20Warning, stacklevel=2)
self._current_app = current_app
self.request = request
self._processors = () if processors is None else tuple(processors)
self._processors_index = len(self.dicts)
self.update({}) # placeholder for context processors output
@contextmanager
def bind_template(self, template):
if self.template is not None:
raise RuntimeError("Context is already bound to a template")
self.template = template
# Set context processors according to the template engine's settings.
processors = (template.engine.template_context_processors +
self._processors)
updates = {}
for processor in processors:
updates.update(processor(self.request))
self.dicts[self._processors_index] = updates
try:
yield
finally:
self.template = None
# Unset context processors.
self.dicts[self._processors_index] = {}
def new(self, values=None):
new_context = super(RequestContext, self).new(values)
# This is for backwards-compatibility: RequestContexts created via
# Context.new don't include values from context processors.
if hasattr(new_context, '_processors_index'):
del new_context._processors_index
return new_context
def make_context(context, request=None):
"""
Create a suitable Context from a plain dict and optionally an HttpRequest.
"""
if request is None:
context = Context(context)
else:
# The following pattern is required to ensure values from
# context override those from template context processors.
original_context = context
context = RequestContext(request)
if original_context:
context.push(original_context)
return context
| bsd-3-clause |
JoeGlancy/linux | tools/perf/scripts/python/export-to-postgresql.py | 238 | 25591 | # export-to-postgresql.py: export perf data to a postgresql database
# Copyright (c) 2014, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
import os
import sys
import struct
import datetime
# To use this script you will need to have installed package python-pyside which
# provides LGPL-licensed Python bindings for Qt. You will also need the package
# libqt4-sql-psql for Qt postgresql support.
#
# The script assumes postgresql is running on the local machine and that the
# user has postgresql permissions to create databases. Examples of installing
# postgresql and adding such a user are:
#
# fedora:
#
# $ sudo yum install postgresql postgresql-server python-pyside qt-postgresql
# $ sudo su - postgres -c initdb
# $ sudo service postgresql start
# $ sudo su - postgres
# $ createuser <your user id here>
# Shall the new role be a superuser? (y/n) y
#
# ubuntu:
#
# $ sudo apt-get install postgresql
# $ sudo su - postgres
# $ createuser <your user id here>
# Shall the new role be a superuser? (y/n) y
#
# An example of using this script with Intel PT:
#
# $ perf record -e intel_pt//u ls
# $ perf script -s ~/libexec/perf-core/scripts/python/export-to-postgresql.py pt_example branches calls
# 2015-05-29 12:49:23.464364 Creating database...
# 2015-05-29 12:49:26.281717 Writing to intermediate files...
# 2015-05-29 12:49:27.190383 Copying to database...
# 2015-05-29 12:49:28.140451 Removing intermediate files...
# 2015-05-29 12:49:28.147451 Adding primary keys
# 2015-05-29 12:49:28.655683 Adding foreign keys
# 2015-05-29 12:49:29.365350 Done
#
# To browse the database, psql can be used e.g.
#
# $ psql pt_example
# pt_example=# select * from samples_view where id < 100;
# pt_example=# \d+
# pt_example=# \d+ samples_view
# pt_example=# \q
#
# An example of using the database is provided by the script
# call-graph-from-postgresql.py. Refer to that script for details.
#
# Tables:
#
# The tables largely correspond to perf tools' data structures. They are largely self-explanatory.
#
# samples
#
# 'samples' is the main table. It represents what instruction was executing at a point in time
# when something (a selected event) happened. The memory address is the instruction pointer or 'ip'.
#
# calls
#
# 'calls' represents function calls and is related to 'samples' by 'call_id' and 'return_id'.
# 'calls' is only created when the 'calls' option to this script is specified.
#
# call_paths
#
# 'call_paths' represents all the call stacks. Each 'call' has an associated record in 'call_paths'.
# 'calls_paths' is only created when the 'calls' option to this script is specified.
#
# branch_types
#
# 'branch_types' provides descriptions for each type of branch.
#
# comm_threads
#
# 'comm_threads' shows how 'comms' relates to 'threads'.
#
# comms
#
# 'comms' contains a record for each 'comm' - the name given to the executable that is running.
#
# dsos
#
# 'dsos' contains a record for each executable file or library.
#
# machines
#
# 'machines' can be used to distinguish virtual machines if virtualization is supported.
#
# selected_events
#
# 'selected_events' contains a record for each kind of event that has been sampled.
#
# symbols
#
# 'symbols' contains a record for each symbol. Only symbols that have samples are present.
#
# threads
#
# 'threads' contains a record for each thread.
#
# Views:
#
# Most of the tables have views for more friendly display. The views are:
#
# calls_view
# call_paths_view
# comm_threads_view
# dsos_view
# machines_view
# samples_view
# symbols_view
# threads_view
#
# More examples of browsing the database with psql:
# Note that some of the examples are not the most optimal SQL query.
# Note that call information is only available if the script's 'calls' option has been used.
#
# Top 10 function calls (not aggregated by symbol):
#
# SELECT * FROM calls_view ORDER BY elapsed_time DESC LIMIT 10;
#
# Top 10 function calls (aggregated by symbol):
#
# SELECT symbol_id,(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,
# SUM(elapsed_time) AS tot_elapsed_time,SUM(branch_count) AS tot_branch_count
# FROM calls_view GROUP BY symbol_id ORDER BY tot_elapsed_time DESC LIMIT 10;
#
# Note that the branch count gives a rough estimation of cpu usage, so functions
# that took a long time but have a relatively low branch count must have spent time
# waiting.
#
# Find symbols by pattern matching on part of the name (e.g. names containing 'alloc'):
#
# SELECT * FROM symbols_view WHERE name LIKE '%alloc%';
#
# Top 10 function calls for a specific symbol (e.g. whose symbol_id is 187):
#
# SELECT * FROM calls_view WHERE symbol_id = 187 ORDER BY elapsed_time DESC LIMIT 10;
#
# Show function calls made by function in the same context (i.e. same call path) (e.g. one with call_path_id 254):
#
# SELECT * FROM calls_view WHERE parent_call_path_id = 254;
#
# Show branches made during a function call (e.g. where call_id is 29357 and return_id is 29370 and tid is 29670)
#
# SELECT * FROM samples_view WHERE id >= 29357 AND id <= 29370 AND tid = 29670 AND event LIKE 'branches%';
#
# Show transactions:
#
# SELECT * FROM samples_view WHERE event = 'transactions';
#
# Note transaction start has 'in_tx' true whereas, transaction end has 'in_tx' false.
# Transaction aborts have branch_type_name 'transaction abort'
#
# Show transaction aborts:
#
# SELECT * FROM samples_view WHERE event = 'transactions' AND branch_type_name = 'transaction abort';
#
# To print a call stack requires walking the call_paths table. For example this python script:
# #!/usr/bin/python2
#
# import sys
# from PySide.QtSql import *
#
# if __name__ == '__main__':
# if (len(sys.argv) < 3):
# print >> sys.stderr, "Usage is: printcallstack.py <database name> <call_path_id>"
# raise Exception("Too few arguments")
# dbname = sys.argv[1]
# call_path_id = sys.argv[2]
# db = QSqlDatabase.addDatabase('QPSQL')
# db.setDatabaseName(dbname)
# if not db.open():
# raise Exception("Failed to open database " + dbname + " error: " + db.lastError().text())
# query = QSqlQuery(db)
# print " id ip symbol_id symbol dso_id dso_short_name"
# while call_path_id != 0 and call_path_id != 1:
# ret = query.exec_('SELECT * FROM call_paths_view WHERE id = ' + str(call_path_id))
# if not ret:
# raise Exception("Query failed: " + query.lastError().text())
# if not query.next():
# raise Exception("Query failed")
# print "{0:>6} {1:>10} {2:>9} {3:<30} {4:>6} {5:<30}".format(query.value(0), query.value(1), query.value(2), query.value(3), query.value(4), query.value(5))
# call_path_id = query.value(6)
from PySide.QtSql import *
# Need to access PostgreSQL C library directly to use COPY FROM STDIN
from ctypes import *
libpq = CDLL("libpq.so.5")
PQconnectdb = libpq.PQconnectdb
PQconnectdb.restype = c_void_p
PQfinish = libpq.PQfinish
PQstatus = libpq.PQstatus
PQexec = libpq.PQexec
PQexec.restype = c_void_p
PQresultStatus = libpq.PQresultStatus
PQputCopyData = libpq.PQputCopyData
PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ]
PQputCopyEnd = libpq.PQputCopyEnd
PQputCopyEnd.argtypes = [ c_void_p, c_void_p ]
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
# These perf imports are not used at present
#from perf_trace_context import *
#from Core import *
perf_db_export_mode = True
perf_db_export_calls = False
def usage():
print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>]"
print >> sys.stderr, "where: columns 'all' or 'branches'"
print >> sys.stderr, " calls 'calls' => create calls table"
raise Exception("Too few arguments")
if (len(sys.argv) < 2):
usage()
dbname = sys.argv[1]
if (len(sys.argv) >= 3):
columns = sys.argv[2]
else:
columns = "all"
if columns not in ("all", "branches"):
usage()
branches = (columns == "branches")
if (len(sys.argv) >= 4):
if (sys.argv[3] == "calls"):
perf_db_export_calls = True
else:
usage()
output_dir_name = os.getcwd() + "/" + dbname + "-perf-data"
os.mkdir(output_dir_name)
def do_query(q, s):
if (q.exec_(s)):
return
raise Exception("Query failed: " + q.lastError().text())
print datetime.datetime.today(), "Creating database..."
db = QSqlDatabase.addDatabase('QPSQL')
query = QSqlQuery(db)
db.setDatabaseName('postgres')
db.open()
try:
do_query(query, 'CREATE DATABASE ' + dbname)
except:
os.rmdir(output_dir_name)
raise
query.finish()
query.clear()
db.close()
db.setDatabaseName(dbname)
db.open()
query = QSqlQuery(db)
do_query(query, 'SET client_min_messages TO WARNING')
do_query(query, 'CREATE TABLE selected_events ('
'id bigint NOT NULL,'
'name varchar(80))')
do_query(query, 'CREATE TABLE machines ('
'id bigint NOT NULL,'
'pid integer,'
'root_dir varchar(4096))')
do_query(query, 'CREATE TABLE threads ('
'id bigint NOT NULL,'
'machine_id bigint,'
'process_id bigint,'
'pid integer,'
'tid integer)')
do_query(query, 'CREATE TABLE comms ('
'id bigint NOT NULL,'
'comm varchar(16))')
do_query(query, 'CREATE TABLE comm_threads ('
'id bigint NOT NULL,'
'comm_id bigint,'
'thread_id bigint)')
do_query(query, 'CREATE TABLE dsos ('
'id bigint NOT NULL,'
'machine_id bigint,'
'short_name varchar(256),'
'long_name varchar(4096),'
'build_id varchar(64))')
do_query(query, 'CREATE TABLE symbols ('
'id bigint NOT NULL,'
'dso_id bigint,'
'sym_start bigint,'
'sym_end bigint,'
'binding integer,'
'name varchar(2048))')
do_query(query, 'CREATE TABLE branch_types ('
'id integer NOT NULL,'
'name varchar(80))')
if branches:
do_query(query, 'CREATE TABLE samples ('
'id bigint NOT NULL,'
'evsel_id bigint,'
'machine_id bigint,'
'thread_id bigint,'
'comm_id bigint,'
'dso_id bigint,'
'symbol_id bigint,'
'sym_offset bigint,'
'ip bigint,'
'time bigint,'
'cpu integer,'
'to_dso_id bigint,'
'to_symbol_id bigint,'
'to_sym_offset bigint,'
'to_ip bigint,'
'branch_type integer,'
'in_tx boolean)')
else:
do_query(query, 'CREATE TABLE samples ('
'id bigint NOT NULL,'
'evsel_id bigint,'
'machine_id bigint,'
'thread_id bigint,'
'comm_id bigint,'
'dso_id bigint,'
'symbol_id bigint,'
'sym_offset bigint,'
'ip bigint,'
'time bigint,'
'cpu integer,'
'to_dso_id bigint,'
'to_symbol_id bigint,'
'to_sym_offset bigint,'
'to_ip bigint,'
'period bigint,'
'weight bigint,'
'transaction bigint,'
'data_src bigint,'
'branch_type integer,'
'in_tx boolean)')
if perf_db_export_calls:
do_query(query, 'CREATE TABLE call_paths ('
'id bigint NOT NULL,'
'parent_id bigint,'
'symbol_id bigint,'
'ip bigint)')
do_query(query, 'CREATE TABLE calls ('
'id bigint NOT NULL,'
'thread_id bigint,'
'comm_id bigint,'
'call_path_id bigint,'
'call_time bigint,'
'return_time bigint,'
'branch_count bigint,'
'call_id bigint,'
'return_id bigint,'
'parent_call_path_id bigint,'
'flags integer)')
do_query(query, 'CREATE VIEW machines_view AS '
'SELECT '
'id,'
'pid,'
'root_dir,'
'CASE WHEN id=0 THEN \'unknown\' WHEN pid=-1 THEN \'host\' ELSE \'guest\' END AS host_or_guest'
' FROM machines')
do_query(query, 'CREATE VIEW dsos_view AS '
'SELECT '
'id,'
'machine_id,'
'(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
'short_name,'
'long_name,'
'build_id'
' FROM dsos')
do_query(query, 'CREATE VIEW symbols_view AS '
'SELECT '
'id,'
'name,'
'(SELECT short_name FROM dsos WHERE id=dso_id) AS dso,'
'dso_id,'
'sym_start,'
'sym_end,'
'CASE WHEN binding=0 THEN \'local\' WHEN binding=1 THEN \'global\' ELSE \'weak\' END AS binding'
' FROM symbols')
do_query(query, 'CREATE VIEW threads_view AS '
'SELECT '
'id,'
'machine_id,'
'(SELECT host_or_guest FROM machines_view WHERE id = machine_id) AS host_or_guest,'
'process_id,'
'pid,'
'tid'
' FROM threads')
do_query(query, 'CREATE VIEW comm_threads_view AS '
'SELECT '
'comm_id,'
'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
'thread_id,'
'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
'(SELECT tid FROM threads WHERE id = thread_id) AS tid'
' FROM comm_threads')
if perf_db_export_calls:
do_query(query, 'CREATE VIEW call_paths_view AS '
'SELECT '
'c.id,'
'to_hex(c.ip) AS ip,'
'c.symbol_id,'
'(SELECT name FROM symbols WHERE id = c.symbol_id) AS symbol,'
'(SELECT dso_id FROM symbols WHERE id = c.symbol_id) AS dso_id,'
'(SELECT dso FROM symbols_view WHERE id = c.symbol_id) AS dso_short_name,'
'c.parent_id,'
'to_hex(p.ip) AS parent_ip,'
'p.symbol_id AS parent_symbol_id,'
'(SELECT name FROM symbols WHERE id = p.symbol_id) AS parent_symbol,'
'(SELECT dso_id FROM symbols WHERE id = p.symbol_id) AS parent_dso_id,'
'(SELECT dso FROM symbols_view WHERE id = p.symbol_id) AS parent_dso_short_name'
' FROM call_paths c INNER JOIN call_paths p ON p.id = c.parent_id')
do_query(query, 'CREATE VIEW calls_view AS '
'SELECT '
'calls.id,'
'thread_id,'
'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
'(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
'call_path_id,'
'to_hex(ip) AS ip,'
'symbol_id,'
'(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
'call_time,'
'return_time,'
'return_time - call_time AS elapsed_time,'
'branch_count,'
'call_id,'
'return_id,'
'CASE WHEN flags=1 THEN \'no call\' WHEN flags=2 THEN \'no return\' WHEN flags=3 THEN \'no call/return\' ELSE \'\' END AS flags,'
'parent_call_path_id'
' FROM calls INNER JOIN call_paths ON call_paths.id = call_path_id')
do_query(query, 'CREATE VIEW samples_view AS '
'SELECT '
'id,'
'time,'
'cpu,'
'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
'(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
'(SELECT name FROM selected_events WHERE id = evsel_id) AS event,'
'to_hex(ip) AS ip_hex,'
'(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
'sym_offset,'
'(SELECT short_name FROM dsos WHERE id = dso_id) AS dso_short_name,'
'to_hex(to_ip) AS to_ip_hex,'
'(SELECT name FROM symbols WHERE id = to_symbol_id) AS to_symbol,'
'to_sym_offset,'
'(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,'
'(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,'
'in_tx'
' FROM samples')
file_header = struct.pack("!11sii", "PGCOPY\n\377\r\n\0", 0, 0)
file_trailer = "\377\377"
def open_output_file(file_name):
path_name = output_dir_name + "/" + file_name
file = open(path_name, "w+")
file.write(file_header)
return file
def close_output_file(file):
file.write(file_trailer)
file.close()
def copy_output_file_direct(file, table_name):
close_output_file(file)
sql = "COPY " + table_name + " FROM '" + file.name + "' (FORMAT 'binary')"
do_query(query, sql)
# Use COPY FROM STDIN because security may prevent postgres from accessing the files directly
def copy_output_file(file, table_name):
conn = PQconnectdb("dbname = " + dbname)
if (PQstatus(conn)):
raise Exception("COPY FROM STDIN PQconnectdb failed")
file.write(file_trailer)
file.seek(0)
sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')"
res = PQexec(conn, sql)
if (PQresultStatus(res) != 4):
raise Exception("COPY FROM STDIN PQexec failed")
data = file.read(65536)
while (len(data)):
ret = PQputCopyData(conn, data, len(data))
if (ret != 1):
raise Exception("COPY FROM STDIN PQputCopyData failed, error " + str(ret))
data = file.read(65536)
ret = PQputCopyEnd(conn, None)
if (ret != 1):
raise Exception("COPY FROM STDIN PQputCopyEnd failed, error " + str(ret))
PQfinish(conn)
def remove_output_file(file):
name = file.name
file.close()
os.unlink(name)
evsel_file = open_output_file("evsel_table.bin")
machine_file = open_output_file("machine_table.bin")
thread_file = open_output_file("thread_table.bin")
comm_file = open_output_file("comm_table.bin")
comm_thread_file = open_output_file("comm_thread_table.bin")
dso_file = open_output_file("dso_table.bin")
symbol_file = open_output_file("symbol_table.bin")
branch_type_file = open_output_file("branch_type_table.bin")
sample_file = open_output_file("sample_table.bin")
if perf_db_export_calls:
call_path_file = open_output_file("call_path_table.bin")
call_file = open_output_file("call_table.bin")
def trace_begin():
print datetime.datetime.today(), "Writing to intermediate files..."
# id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
evsel_table(0, "unknown")
machine_table(0, 0, "unknown")
thread_table(0, 0, 0, -1, -1)
comm_table(0, "unknown")
dso_table(0, 0, "unknown", "unknown", "")
symbol_table(0, 0, 0, 0, 0, "unknown")
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
if perf_db_export_calls:
call_path_table(0, 0, 0, 0)
unhandled_count = 0
def trace_end():
print datetime.datetime.today(), "Copying to database..."
copy_output_file(evsel_file, "selected_events")
copy_output_file(machine_file, "machines")
copy_output_file(thread_file, "threads")
copy_output_file(comm_file, "comms")
copy_output_file(comm_thread_file, "comm_threads")
copy_output_file(dso_file, "dsos")
copy_output_file(symbol_file, "symbols")
copy_output_file(branch_type_file, "branch_types")
copy_output_file(sample_file, "samples")
if perf_db_export_calls:
copy_output_file(call_path_file, "call_paths")
copy_output_file(call_file, "calls")
print datetime.datetime.today(), "Removing intermediate files..."
remove_output_file(evsel_file)
remove_output_file(machine_file)
remove_output_file(thread_file)
remove_output_file(comm_file)
remove_output_file(comm_thread_file)
remove_output_file(dso_file)
remove_output_file(symbol_file)
remove_output_file(branch_type_file)
remove_output_file(sample_file)
if perf_db_export_calls:
remove_output_file(call_path_file)
remove_output_file(call_file)
os.rmdir(output_dir_name)
print datetime.datetime.today(), "Adding primary keys"
do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE comms ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE comm_threads ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE dsos ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE symbols ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE branch_types ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE samples ADD PRIMARY KEY (id)')
if perf_db_export_calls:
do_query(query, 'ALTER TABLE call_paths ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)')
print datetime.datetime.today(), "Adding foreign keys"
do_query(query, 'ALTER TABLE threads '
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)')
do_query(query, 'ALTER TABLE comm_threads '
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id)')
do_query(query, 'ALTER TABLE dsos '
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id)')
do_query(query, 'ALTER TABLE symbols '
'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id)')
do_query(query, 'ALTER TABLE samples '
'ADD CONSTRAINT evselfk FOREIGN KEY (evsel_id) REFERENCES selected_events (id),'
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),'
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id),'
'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id),'
'ADD CONSTRAINT todsofk FOREIGN KEY (to_dso_id) REFERENCES dsos (id),'
'ADD CONSTRAINT tosymbolfk FOREIGN KEY (to_symbol_id) REFERENCES symbols (id)')
if perf_db_export_calls:
do_query(query, 'ALTER TABLE call_paths '
'ADD CONSTRAINT parentfk FOREIGN KEY (parent_id) REFERENCES call_paths (id),'
'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id)')
do_query(query, 'ALTER TABLE calls '
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),'
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT call_pathfk FOREIGN KEY (call_path_id) REFERENCES call_paths (id),'
'ADD CONSTRAINT callfk FOREIGN KEY (call_id) REFERENCES samples (id),'
'ADD CONSTRAINT returnfk FOREIGN KEY (return_id) REFERENCES samples (id),'
'ADD CONSTRAINT parent_call_pathfk FOREIGN KEY (parent_call_path_id) REFERENCES call_paths (id)')
do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
if (unhandled_count):
print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
print datetime.datetime.today(), "Done"
def trace_unhandled(event_name, context, event_fields_dict):
global unhandled_count
unhandled_count += 1
def sched__sched_switch(*x):
pass
def evsel_table(evsel_id, evsel_name, *x):
n = len(evsel_name)
fmt = "!hiqi" + str(n) + "s"
value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name)
evsel_file.write(value)
def machine_table(machine_id, pid, root_dir, *x):
n = len(root_dir)
fmt = "!hiqiii" + str(n) + "s"
value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir)
machine_file.write(value)
def thread_table(thread_id, machine_id, process_id, pid, tid, *x):
value = struct.pack("!hiqiqiqiiii", 5, 8, thread_id, 8, machine_id, 8, process_id, 4, pid, 4, tid)
thread_file.write(value)
def comm_table(comm_id, comm_str, *x):
n = len(comm_str)
fmt = "!hiqi" + str(n) + "s"
value = struct.pack(fmt, 2, 8, comm_id, n, comm_str)
comm_file.write(value)
def comm_thread_table(comm_thread_id, comm_id, thread_id, *x):
fmt = "!hiqiqiq"
value = struct.pack(fmt, 3, 8, comm_thread_id, 8, comm_id, 8, thread_id)
comm_thread_file.write(value)
def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
n1 = len(short_name)
n2 = len(long_name)
n3 = len(build_id)
fmt = "!hiqiqi" + str(n1) + "si" + str(n2) + "si" + str(n3) + "s"
value = struct.pack(fmt, 5, 8, dso_id, 8, machine_id, n1, short_name, n2, long_name, n3, build_id)
dso_file.write(value)
def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x):
n = len(symbol_name)
fmt = "!hiqiqiqiqiii" + str(n) + "s"
value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name)
symbol_file.write(value)
def branch_type_table(branch_type, name, *x):
n = len(name)
fmt = "!hiii" + str(n) + "s"
value = struct.pack(fmt, 2, 4, branch_type, n, name)
branch_type_file.write(value)
def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, *x):
if branches:
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiB", 17, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx)
else:
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiB", 21, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx)
sample_file.write(value)
def call_path_table(cp_id, parent_id, symbol_id, ip, *x):
fmt = "!hiqiqiqiq"
value = struct.pack(fmt, 4, 8, cp_id, 8, parent_id, 8, symbol_id, 8, ip)
call_path_file.write(value)
def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, *x):
fmt = "!hiqiqiqiqiqiqiqiqiqiqii"
value = struct.pack(fmt, 11, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags)
call_file.write(value)
| gpl-2.0 |
amalakar/dd-agent | tests/core/test_wmi.py | 2 | 17580 | # stdlib
from functools import partial
import logging
import unittest
# 3rd
from mock import Mock
# project
from tests.checks.common import Fixtures
log = logging.getLogger(__name__)
WMISampler = None
# Thoughts
# Log WMI activity
# Mechanism to timeout
# Check when pywintypes.com_error are raised
# Check the role of the flags
def load_fixture(f, args=None):
"""
Build a WMI query result from a file and given parameters.
"""
properties = []
def extract_line(line):
"""
Extract a property name, value and the qualifiers from a fixture line.
Return (property name, property value, property qualifiers)
"""
property_counter_type = ""
try:
property_name, property_value, property_counter_type = line.split(" ")
except ValueError:
property_name, property_value = line.split(" ")
property_qualifiers = [Mock(Name='CounterType', Value=int(property_counter_type))] \
if property_counter_type else []
return property_name, property_value, property_qualifiers
# Build from file
data = Fixtures.read_file(f)
for l in data.splitlines():
property_name, property_value, property_qualifiers = extract_line(l)
properties.append(
Mock(Name=property_name, Value=property_value, Qualifiers_=property_qualifiers)
)
# Append extra information
if args:
property_name, property_value = args
properties.append(Mock(Name=property_name, Value=property_value, Qualifiers_=[]))
return [Mock(Properties_=properties)]
class Counter(object):
def __init__(self):
self.value = 0
def __iadd__(self, other):
self.value += other
return self
def __eq__(self, other):
return self.value == other
def __str__(self):
return str(self.value)
def reset(self):
self.value = 0
class SWbemServices(object):
"""
SWbemServices a.k.a. (mocked) WMI connection.
Save connection parameters so it can be tested.
"""
_exec_query_call_count = Counter()
def __init__(self, wmi_conn_args):
super(SWbemServices, self).__init__()
self._wmi_conn_args = wmi_conn_args
self._last_wmi_query = None
self._last_wmi_flags = None
@classmethod
def reset(cls):
"""
FIXME - Dirty patch to reset `SWbemServices.ExecQuery` to 0.
"""
cls._exec_query_call_count.reset()
def get_conn_args(self):
"""
Return parameters used to set up the WMI connection.
"""
return self._wmi_conn_args
def get_last_wmi_query(self):
"""
Return the last WMI query submitted via the WMI connection.
"""
return self._last_wmi_query
def get_last_wmi_flags(self):
"""
Return the last WMI flags submitted via the WMI connection.
"""
return self._last_wmi_flags
def ExecQuery(self, query, query_language, flags):
SWbemServices._exec_query_call_count += 1
self._last_wmi_query = query
self._last_wmi_flags = flags
results = []
if query == "Select AvgDiskBytesPerWrite,FreeMegabytes from Win32_PerfFormattedData_PerfDisk_LogicalDisk": # noqa
results += load_fixture("win32_perfformatteddata_perfdisk_logicaldisk", ("Name", "C:"))
results += load_fixture("win32_perfformatteddata_perfdisk_logicaldisk", ("Name", "D:"))
if query == "Select CounterRawCount,CounterCounter,Timestamp_Sys100NS,Frequency_Sys100NS from Win32_PerfRawData_PerfOS_System": # noqa
# Mock a previous and a current sample
sample_file = "win32_perfrawdata_perfos_system_previous" if flags == 131120\
else "win32_perfrawdata_perfos_system_current"
results += load_fixture(sample_file, ("Name", "C:"))
results += load_fixture(sample_file, ("Name", "D:"))
if query == "Select UnknownCounter,MissingProperty,Timestamp_Sys100NS,Frequency_Sys100NS from Win32_PerfRawData_PerfOS_System": # noqa
results += load_fixture("win32_perfrawdata_perfos_system_unknown", ("Name", "C:"))
results += load_fixture("win32_perfrawdata_perfos_system_unknown", ("Name", "D:"))
if query == "Select IOReadBytesPerSec,IDProcess from Win32_PerfFormattedData_PerfProc_Process WHERE Name = 'chrome'" \
or query == "Select IOReadBytesPerSec,UnknownProperty from Win32_PerfFormattedData_PerfProc_Process WHERE Name = 'chrome'": # noqa
results += load_fixture("win32_perfformatteddata_perfproc_process")
if query == "Select IOReadBytesPerSec,ResultNotMatchingAnyTargetProperty from Win32_PerfFormattedData_PerfProc_Process WHERE Name = 'chrome'": # noqa
results += load_fixture("win32_perfformatteddata_perfproc_process_alt")
if query == "Select CommandLine from Win32_Process WHERE Handle = '4036'" \
or query == "Select UnknownProperty from Win32_Process WHERE Handle = '4036'":
results += load_fixture("win32_process")
return results
ExecQuery.call_count = _exec_query_call_count
class Dispatch(object):
"""
Mock for win32com.client Dispatch class.
"""
_connect_call_count = Counter()
def __init__(self, *args, **kwargs):
pass
@classmethod
def reset(cls):
"""
FIXME - Dirty patch to reset `ConnectServer.call_count` to 0.
"""
cls._connect_call_count.reset()
def ConnectServer(self, *args, **kwargs):
"""
Return a WMI connection, a.k.a. a SWbemServices object.
"""
Dispatch._connect_call_count += 1
wmi_conn_args = (args, kwargs)
return SWbemServices(wmi_conn_args)
ConnectServer.call_count = _connect_call_count
class TestCommonWMI(unittest.TestCase):
"""
Common toolbox for WMI unit testing.
"""
def setUp(self):
"""
Mock WMI related Python packages, so it can be tested on any environment.
"""
import sys
global WMISampler
sys.modules['pywintypes'] = Mock()
sys.modules['win32com'] = Mock()
sys.modules['win32com.client'] = Mock(Dispatch=Dispatch)
from checks.libs.wmi import sampler
WMISampler = partial(sampler.WMISampler, log)
def tearDown(self):
"""
Reset Mock counters, flush samplers and connections
"""
# Reset counters
Dispatch.reset()
SWbemServices.reset()
# Flush cache
from checks.libs.wmi.sampler import WMISampler
WMISampler._wmi_locators = {}
WMISampler._wmi_connections = {}
def assertWMIConnWith(self, wmi_sampler, param):
"""
Helper, assert that the WMI connection was established with the right parameter and value.
"""
wmi_instance = wmi_sampler._get_connection()
wmi_conn_args, wmi_conn_kwargs = wmi_instance.get_conn_args()
if isinstance(param, tuple):
key, value = param
self.assertIn(key, wmi_conn_kwargs)
self.assertEquals(wmi_conn_kwargs[key], value)
else:
self.assertIn(param, wmi_conn_args)
def assertWMIQuery(self, wmi_sampler, query=None, flags=None):
"""
Helper, assert that the given WMI query and flags were submitted.
"""
wmi_instance = wmi_sampler._get_connection()
if query:
last_wmi_query = wmi_instance.get_last_wmi_query()
self.assertEquals(last_wmi_query, query)
if flags:
last_wmi_flags = wmi_instance.get_last_wmi_flags()
self.assertEquals(last_wmi_flags, flags)
def assertWMIObject(self, wmi_obj, property_names):
"""
Assert the WMI object integrity.
"""
for prop in property_names:
self.assertIn(prop, wmi_obj)
def assertIn(self, first, second):
"""
Assert `first` in `second`.
Note: needs to be defined for Python 2.6
"""
self.assertTrue(first in second, "{0} not in {1}".format(first, second))
class TestUnitWMISampler(TestCommonWMI):
"""
Unit tests for WMISampler.
"""
def test_wmi_connection(self):
"""
Establish a WMI connection to the specified host/namespace, with the right credentials.
"""
wmi_sampler = WMISampler(
"Win32_PerfRawData_PerfOS_System",
["ProcessorQueueLength"],
host="myhost",
namespace="some/namespace",
username="datadog",
password="password"
)
wmi_conn = wmi_sampler._get_connection()
# WMI connection is cached
self.assertIn('myhost:some/namespace:datadog', wmi_sampler._wmi_connections)
# Connection was established with the right parameters
self.assertWMIConnWith(wmi_sampler, "myhost")
self.assertWMIConnWith(wmi_sampler, "some/namespace")
def test_wmi_connection_pooling(self):
"""
Share WMI connections among WMISampler objects.
"""
from win32com.client import Dispatch
wmi_sampler_1 = WMISampler("Win32_PerfRawData_PerfOS_System", ["ProcessorQueueLength"])
wmi_sampler_2 = WMISampler("Win32_OperatingSystem", ["TotalVisibleMemorySize"])
wmi_sampler_3 = WMISampler("Win32_PerfRawData_PerfOS_System", ["ProcessorQueueLength"], host="myhost") # noqa
wmi_sampler_1.sample()
wmi_sampler_2.sample()
self.assertEquals(Dispatch.ConnectServer.call_count, 1, Dispatch.ConnectServer.call_count)
wmi_sampler_3.sample()
self.assertEquals(Dispatch.ConnectServer.call_count, 2, Dispatch.ConnectServer.call_count)
def test_wql_filtering(self):
"""
Format the filters to a comprehensive WQL `WHERE` clause.
"""
from checks.libs.wmi import sampler
format_filter = sampler.WMISampler._format_filter
# Check `_format_filter` logic
no_filters = []
filters = [{'Name': "SomeName"}, {'Id': "SomeId"}]
self.assertEquals("", format_filter(no_filters))
self.assertEquals(" WHERE Id = 'SomeId' AND Name = 'SomeName'",
format_filter(filters))
def test_wmi_query(self):
"""
Query WMI using WMI Query Language (WQL).
"""
# No filters
wmi_sampler = WMISampler("Win32_PerfFormattedData_PerfDisk_LogicalDisk",
["AvgDiskBytesPerWrite", "FreeMegabytes"])
wmi_sampler.sample()
self.assertWMIQuery(
wmi_sampler,
"Select AvgDiskBytesPerWrite,FreeMegabytes"
" from Win32_PerfFormattedData_PerfDisk_LogicalDisk"
)
# Single filter
wmi_sampler = WMISampler("Win32_PerfFormattedData_PerfDisk_LogicalDisk",
["AvgDiskBytesPerWrite", "FreeMegabytes"],
filters=[{'Name': "C:"}])
wmi_sampler.sample()
self.assertWMIQuery(
wmi_sampler,
"Select AvgDiskBytesPerWrite,FreeMegabytes"
" from Win32_PerfFormattedData_PerfDisk_LogicalDisk"
" WHERE Name = 'C:'"
)
# Multiple filters
wmi_sampler = WMISampler("Win32_PerfFormattedData_PerfDisk_LogicalDisk",
["AvgDiskBytesPerWrite", "FreeMegabytes"],
filters=[{'Name': "C:"}, {'Id': "123"}])
wmi_sampler.sample()
self.assertWMIQuery(
wmi_sampler,
"Select AvgDiskBytesPerWrite,FreeMegabytes"
" from Win32_PerfFormattedData_PerfDisk_LogicalDisk"
" WHERE Id = '123' AND Name = 'C:'"
)
def test_wmi_parser(self):
"""
Parse WMI objects from WMI query results.
"""
wmi_sampler = WMISampler("Win32_PerfFormattedData_PerfDisk_LogicalDisk",
["AvgDiskBytesPerWrite", "FreeMegabytes"])
wmi_sampler.sample()
# Assert `results`
expected_results = [
{
'freemegabytes': 19742.0,
'name': 'C:',
'avgdiskbytesperwrite': 1536.0
}, {
'freemegabytes': 19742.0,
'name': 'D:',
'avgdiskbytesperwrite': 1536.0
}
]
self.assertEquals(wmi_sampler, expected_results, wmi_sampler)
def test_wmi_sampler_iterator_getter(self):
"""
Iterate/Get on the WMISampler object iterates/gets on its current sample.
"""
wmi_sampler = WMISampler("Win32_PerfFormattedData_PerfDisk_LogicalDisk",
["AvgDiskBytesPerWrite", "FreeMegabytes"])
wmi_sampler.sample()
self.assertEquals(len(wmi_sampler), 2)
# Using an iterator
for wmi_obj in wmi_sampler:
self.assertWMIObject(wmi_obj, ["AvgDiskBytesPerWrite", "FreeMegabytes", "name"])
# Using an accessor
for index in xrange(0, 2):
self.assertWMIObject(wmi_sampler[index], ["AvgDiskBytesPerWrite", "FreeMegabytes", "name"])
def test_raw_perf_properties(self):
"""
Extend the list of properties to query for RAW Performance classes.
"""
# Formatted Performance class
wmi_sampler = WMISampler("Win32_PerfFormattedData_PerfOS_System", ["ProcessorQueueLength"])
self.assertEquals(len(wmi_sampler.property_names), 1)
# Raw Performance class
wmi_sampler = WMISampler("Win32_PerfRawData_PerfOS_System", ["CounterRawCount", "CounterCounter"]) # noqa
self.assertEquals(len(wmi_sampler.property_names), 4)
def test_raw_initial_sampling(self):
"""
Query for initial sample for RAW Performance classes.
"""
wmi_sampler = WMISampler("Win32_PerfRawData_PerfOS_System", ["CounterRawCount", "CounterCounter"]) # noqa
wmi_sampler.sample()
# 2 queries should have been made: one for initialization, one for sampling
self.assertEquals(SWbemServices.ExecQuery.call_count, 2, SWbemServices.ExecQuery.call_count)
# Repeat
wmi_sampler.sample()
self.assertEquals(SWbemServices.ExecQuery.call_count, 3, SWbemServices.ExecQuery.call_count)
def test_raw_cache_qualifiers(self):
"""
Cache the qualifiers on the first query against RAW Performance classes.
"""
# Append `flag_use_amended_qualifiers` flag on the first query
wmi_raw_sampler = WMISampler("Win32_PerfRawData_PerfOS_System", ["CounterRawCount", "CounterCounter"]) # noqa
wmi_raw_sampler._query()
self.assertWMIQuery(wmi_raw_sampler, flags=131120)
wmi_raw_sampler._query()
self.assertWMIQuery(wmi_raw_sampler, flags=48)
# Qualifiers are cached
self.assertTrue(wmi_raw_sampler.property_counter_types)
self.assertIn('CounterRawCount', wmi_raw_sampler.property_counter_types)
self.assertIn('CounterCounter', wmi_raw_sampler.property_counter_types)
def test_raw_properties_formatting(self):
"""
WMI Object's RAW data are returned formatted.
"""
wmi_raw_sampler = WMISampler("Win32_PerfRawData_PerfOS_System", ["CounterRawCount", "CounterCounter"]) # noqa
wmi_raw_sampler.sample()
self.assertEquals(len(wmi_raw_sampler), 2)
# Using an iterator
for wmi_obj in wmi_raw_sampler:
self.assertWMIObject(wmi_obj, ["CounterRawCount", "CounterCounter", "Timestamp_Sys100NS", "Frequency_Sys100NS", "name"]) # noqa
self.assertEquals(wmi_obj['CounterRawCount'], 500)
self.assertEquals(wmi_obj['CounterCounter'], 50)
# Using an accessor
for index in xrange(0, 2):
self.assertWMIObject(wmi_raw_sampler[index], ["CounterRawCount", "CounterCounter", "Timestamp_Sys100NS", "Frequency_Sys100NS", "name"]) # noqa
self.assertEquals(wmi_raw_sampler[index]['CounterRawCount'], 500)
self.assertEquals(wmi_raw_sampler[index]['CounterCounter'], 50)
def test_raw_properties_fallback(self):
"""
Print a warning on RAW Performance classes if the calculator is undefined.
Returns the original RAW value.
"""
from checks.libs.wmi.sampler import WMISampler
logger = Mock()
wmi_raw_sampler = WMISampler(logger, "Win32_PerfRawData_PerfOS_System", ["UnknownCounter", "MissingProperty"]) # noqa
wmi_raw_sampler.sample()
self.assertEquals(len(wmi_raw_sampler), 2)
for wmi_obj in wmi_raw_sampler:
self.assertWMIObject(wmi_obj, ["UnknownCounter", "Timestamp_Sys100NS", "Frequency_Sys100NS", "name"]) # noqa
self.assertEquals(wmi_obj['UnknownCounter'], 999)
self.assertTrue(logger.warning.called)
def test_missing_property(self):
"""
Do not raise on missing properties.
"""
wmi_raw_sampler = WMISampler("Win32_PerfRawData_PerfOS_System", ["UnknownCounter", "MissingProperty"]) # noqa
wmi_raw_sampler.sample()
self.assertEquals(len(wmi_raw_sampler), 2)
for wmi_obj in wmi_raw_sampler:
# Access a non existent property
self.assertFalse(wmi_obj['MissingProperty'])
class TestIntegrationWMI(unittest.TestCase):
"""
Integration tests for WMISampler.
"""
pass
| bsd-3-clause |
zackmdavis/swift | swift/proxy/server.py | 10 | 27924 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mimetypes
import os
import socket
from swift import gettext_ as _
from random import shuffle
from time import time
import itertools
import functools
import sys
from eventlet import Timeout
from swift import __canonical_version__ as swift_version
from swift.common import constraints
from swift.common.storage_policy import POLICIES
from swift.common.ring import Ring
from swift.common.utils import cache_from_env, get_logger, \
get_remote_client, split_path, config_true_value, generate_trans_id, \
affinity_key_function, affinity_locality_predicate, list_from_csv, \
register_swift_info
from swift.common.constraints import check_utf8, valid_api_version
from swift.proxy.controllers import AccountController, ContainerController, \
ObjectControllerRouter, InfoController
from swift.proxy.controllers.base import get_container_info
from swift.common.swob import HTTPBadRequest, HTTPForbidden, \
HTTPMethodNotAllowed, HTTPNotFound, HTTPPreconditionFailed, \
HTTPServerError, HTTPException, Request, HTTPServiceUnavailable
from swift.common.exceptions import APIVersionError
# List of entry points for mandatory middlewares.
#
# Fields:
#
# "name" (required) is the entry point name from setup.py.
#
# "after_fn" (optional) a function that takes a PipelineWrapper object as its
# single argument and returns a list of middlewares that this middleware
# should come after. Any middlewares in the returned list that are not present
# in the pipeline will be ignored, so you can safely name optional middlewares
# to come after. For example, ["catch_errors", "bulk"] would install this
# middleware after catch_errors and bulk if both were present, but if bulk
# were absent, would just install it after catch_errors.
required_filters = [
{'name': 'catch_errors'},
{'name': 'gatekeeper',
'after_fn': lambda pipe: (['catch_errors']
if pipe.startswith('catch_errors')
else [])},
{'name': 'dlo', 'after_fn': lambda _junk: [
'staticweb', 'tempauth', 'keystoneauth',
'catch_errors', 'gatekeeper', 'proxy_logging']}]
class Application(object):
"""WSGI application for the proxy server."""
def __init__(self, conf, memcache=None, logger=None, account_ring=None,
container_ring=None):
if conf is None:
conf = {}
if logger is None:
self.logger = get_logger(conf, log_route='proxy-server')
else:
self.logger = logger
self._error_limiting = {}
swift_dir = conf.get('swift_dir', '/etc/swift')
self.swift_dir = swift_dir
self.node_timeout = int(conf.get('node_timeout', 10))
self.recoverable_node_timeout = int(
conf.get('recoverable_node_timeout', self.node_timeout))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.client_timeout = int(conf.get('client_timeout', 60))
self.put_queue_depth = int(conf.get('put_queue_depth', 10))
self.object_chunk_size = int(conf.get('object_chunk_size', 65536))
self.client_chunk_size = int(conf.get('client_chunk_size', 65536))
self.trans_id_suffix = conf.get('trans_id_suffix', '')
self.post_quorum_timeout = float(conf.get('post_quorum_timeout', 0.5))
self.error_suppression_interval = \
int(conf.get('error_suppression_interval', 60))
self.error_suppression_limit = \
int(conf.get('error_suppression_limit', 10))
self.recheck_container_existence = \
int(conf.get('recheck_container_existence', 60))
self.recheck_account_existence = \
int(conf.get('recheck_account_existence', 60))
self.allow_account_management = \
config_true_value(conf.get('allow_account_management', 'no'))
self.object_post_as_copy = \
config_true_value(conf.get('object_post_as_copy', 'true'))
self.container_ring = container_ring or Ring(swift_dir,
ring_name='container')
self.account_ring = account_ring or Ring(swift_dir,
ring_name='account')
# ensure rings are loaded for all configured storage policies
for policy in POLICIES:
policy.load_ring(swift_dir)
self.obj_controller_router = ObjectControllerRouter()
self.memcache = memcache
mimetypes.init(mimetypes.knownfiles +
[os.path.join(swift_dir, 'mime.types')])
self.account_autocreate = \
config_true_value(conf.get('account_autocreate', 'no'))
self.auto_create_account_prefix = (
conf.get('auto_create_account_prefix') or '.')
self.expiring_objects_account = self.auto_create_account_prefix + \
(conf.get('expiring_objects_account_name') or 'expiring_objects')
self.expiring_objects_container_divisor = \
int(conf.get('expiring_objects_container_divisor') or 86400)
self.max_containers_per_account = \
int(conf.get('max_containers_per_account') or 0)
self.max_containers_whitelist = [
a.strip()
for a in conf.get('max_containers_whitelist', '').split(',')
if a.strip()]
self.deny_host_headers = [
host.strip() for host in
conf.get('deny_host_headers', '').split(',') if host.strip()]
self.log_handoffs = config_true_value(conf.get('log_handoffs', 'true'))
self.cors_allow_origin = [
a.strip()
for a in conf.get('cors_allow_origin', '').split(',')
if a.strip()]
self.strict_cors_mode = config_true_value(
conf.get('strict_cors_mode', 't'))
self.node_timings = {}
self.timing_expiry = int(conf.get('timing_expiry', 300))
self.sorting_method = conf.get('sorting_method', 'shuffle').lower()
self.max_large_object_get_time = float(
conf.get('max_large_object_get_time', '86400'))
value = conf.get('request_node_count', '2 * replicas').lower().split()
if len(value) == 1:
rnc_value = int(value[0])
self.request_node_count = lambda replicas: rnc_value
elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
rnc_value = int(value[0])
self.request_node_count = lambda replicas: rnc_value * replicas
else:
raise ValueError(
'Invalid request_node_count value: %r' % ''.join(value))
try:
self._read_affinity = read_affinity = conf.get('read_affinity', '')
self.read_affinity_sort_key = affinity_key_function(read_affinity)
except ValueError as err:
# make the message a little more useful
raise ValueError("Invalid read_affinity value: %r (%s)" %
(read_affinity, err.message))
try:
write_affinity = conf.get('write_affinity', '')
self.write_affinity_is_local_fn \
= affinity_locality_predicate(write_affinity)
except ValueError as err:
# make the message a little more useful
raise ValueError("Invalid write_affinity value: %r (%s)" %
(write_affinity, err.message))
value = conf.get('write_affinity_node_count',
'2 * replicas').lower().split()
if len(value) == 1:
wanc_value = int(value[0])
self.write_affinity_node_count = lambda replicas: wanc_value
elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas':
wanc_value = int(value[0])
self.write_affinity_node_count = \
lambda replicas: wanc_value * replicas
else:
raise ValueError(
'Invalid write_affinity_node_count value: %r' % ''.join(value))
# swift_owner_headers are stripped by the account and container
# controllers; we should extend header stripping to object controller
# when a privileged object header is implemented.
swift_owner_headers = conf.get(
'swift_owner_headers',
'x-container-read, x-container-write, '
'x-container-sync-key, x-container-sync-to, '
'x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, '
'x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, '
'x-account-access-control')
self.swift_owner_headers = [
name.strip().title()
for name in swift_owner_headers.split(',') if name.strip()]
# Initialization was successful, so now apply the client chunk size
# parameter as the default read / write buffer size for the network
# sockets.
#
# NOTE WELL: This is a class setting, so until we get set this on a
# per-connection basis, this affects reading and writing on ALL
# sockets, those between the proxy servers and external clients, and
# those between the proxy servers and the other internal servers.
#
# ** Because it affects the client as well, currently, we use the
# client chunk size as the govenor and not the object chunk size.
socket._fileobject.default_bufsize = self.client_chunk_size
self.expose_info = config_true_value(
conf.get('expose_info', 'yes'))
self.disallowed_sections = list_from_csv(
conf.get('disallowed_sections', 'swift.valid_api_versions'))
self.admin_key = conf.get('admin_key', None)
register_swift_info(
version=swift_version,
strict_cors_mode=self.strict_cors_mode,
policies=POLICIES.get_policy_info(),
allow_account_management=self.allow_account_management,
account_autocreate=self.account_autocreate,
**constraints.EFFECTIVE_CONSTRAINTS)
def check_config(self):
"""
Check the configuration for possible errors
"""
if self._read_affinity and self.sorting_method != 'affinity':
self.logger.warn("sorting_method is set to '%s', not 'affinity'; "
"read_affinity setting will have no effect." %
self.sorting_method)
def get_object_ring(self, policy_idx):
"""
Get the ring object to use to handle a request based on its policy.
:param policy_idx: policy index as defined in swift.conf
:returns: appropriate ring object
"""
return POLICIES.get_object_ring(policy_idx, self.swift_dir)
def get_controller(self, req):
"""
Get the controller to handle a request.
:param req: the request
:returns: tuple of (controller class, path dictionary)
:raises: ValueError (thrown by split_path) if given invalid path
"""
if req.path == '/info':
d = dict(version=None,
expose_info=self.expose_info,
disallowed_sections=self.disallowed_sections,
admin_key=self.admin_key)
return InfoController, d
version, account, container, obj = split_path(req.path, 1, 4, True)
d = dict(version=version,
account_name=account,
container_name=container,
object_name=obj)
if account and not valid_api_version(version):
raise APIVersionError('Invalid path')
if obj and container and account:
info = get_container_info(req.environ, self)
policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
info['storage_policy'])
policy = POLICIES.get_by_index(policy_index)
if not policy:
# This indicates that a new policy has been created,
# with rings, deployed, released (i.e. deprecated =
# False), used by a client to create a container via
# another proxy that was restarted after the policy
# was released, and is now cached - all before this
# worker was HUPed to stop accepting new
# connections. There should never be an "unknown"
# index - but when there is - it's probably operator
# error and hopefully temporary.
raise HTTPServiceUnavailable('Unknown Storage Policy')
return self.obj_controller_router[policy], d
elif container and account:
return ContainerController, d
elif account and not container and not obj:
return AccountController, d
return None, d
def __call__(self, env, start_response):
"""
WSGI entry point.
Wraps env in swob.Request object and passes it down.
:param env: WSGI environment dictionary
:param start_response: WSGI callable
"""
try:
if self.memcache is None:
self.memcache = cache_from_env(env, True)
req = self.update_request(Request(env))
return self.handle_request(req)(env, start_response)
except UnicodeError:
err = HTTPPreconditionFailed(
request=req, body='Invalid UTF8 or contains NULL')
return err(env, start_response)
except (Exception, Timeout):
start_response('500 Server Error',
[('Content-Type', 'text/plain')])
return ['Internal server error.\n']
def update_request(self, req):
if 'x-storage-token' in req.headers and \
'x-auth-token' not in req.headers:
req.headers['x-auth-token'] = req.headers['x-storage-token']
return req
def handle_request(self, req):
"""
Entry point for proxy server.
Should return a WSGI-style callable (such as swob.Response).
:param req: swob.Request object
"""
try:
self.logger.set_statsd_prefix('proxy-server')
if req.content_length and req.content_length < 0:
self.logger.increment('errors')
return HTTPBadRequest(request=req,
body='Invalid Content-Length')
try:
if not check_utf8(req.path_info):
self.logger.increment('errors')
return HTTPPreconditionFailed(
request=req, body='Invalid UTF8 or contains NULL')
except UnicodeError:
self.logger.increment('errors')
return HTTPPreconditionFailed(
request=req, body='Invalid UTF8 or contains NULL')
try:
controller, path_parts = self.get_controller(req)
p = req.path_info
if isinstance(p, unicode):
p = p.encode('utf-8')
except APIVersionError:
self.logger.increment('errors')
return HTTPBadRequest(request=req)
except ValueError:
self.logger.increment('errors')
return HTTPNotFound(request=req)
if not controller:
self.logger.increment('errors')
return HTTPPreconditionFailed(request=req, body='Bad URL')
if self.deny_host_headers and \
req.host.split(':')[0] in self.deny_host_headers:
return HTTPForbidden(request=req, body='Invalid host header')
self.logger.set_statsd_prefix('proxy-server.' +
controller.server_type.lower())
controller = controller(self, **path_parts)
if 'swift.trans_id' not in req.environ:
# if this wasn't set by an earlier middleware, set it now
trans_id_suffix = self.trans_id_suffix
trans_id_extra = req.headers.get('x-trans-id-extra')
if trans_id_extra:
trans_id_suffix += '-' + trans_id_extra[:32]
trans_id = generate_trans_id(trans_id_suffix)
req.environ['swift.trans_id'] = trans_id
self.logger.txn_id = trans_id
req.headers['x-trans-id'] = req.environ['swift.trans_id']
controller.trans_id = req.environ['swift.trans_id']
self.logger.client_ip = get_remote_client(req)
try:
handler = getattr(controller, req.method)
getattr(handler, 'publicly_accessible')
except AttributeError:
allowed_methods = getattr(controller, 'allowed_methods', set())
return HTTPMethodNotAllowed(
request=req, headers={'Allow': ', '.join(allowed_methods)})
if 'swift.authorize' in req.environ:
# We call authorize before the handler, always. If authorized,
# we remove the swift.authorize hook so isn't ever called
# again. If not authorized, we return the denial unless the
# controller's method indicates it'd like to gather more
# information and try again later.
resp = req.environ['swift.authorize'](req)
if not resp and not req.headers.get('X-Copy-From-Account') \
and not req.headers.get('Destination-Account'):
# No resp means authorized, no delayed recheck required.
del req.environ['swift.authorize']
else:
# Response indicates denial, but we might delay the denial
# and recheck later. If not delayed, return the error now.
if not getattr(handler, 'delay_denial', None):
return resp
# Save off original request method (GET, POST, etc.) in case it
# gets mutated during handling. This way logging can display the
# method the client actually sent.
req.environ['swift.orig_req_method'] = req.method
return handler(req)
except HTTPException as error_response:
return error_response
except (Exception, Timeout):
self.logger.exception(_('ERROR Unhandled exception in request'))
return HTTPServerError(request=req)
def sort_nodes(self, nodes):
'''
Sorts nodes in-place (and returns the sorted list) according to
the configured strategy. The default "sorting" is to randomly
shuffle the nodes. If the "timing" strategy is chosen, the nodes
are sorted according to the stored timing data.
'''
# In the case of timing sorting, shuffling ensures that close timings
# (ie within the rounding resolution) won't prefer one over another.
# Python's sort is stable (http://wiki.python.org/moin/HowTo/Sorting/)
shuffle(nodes)
if self.sorting_method == 'timing':
now = time()
def key_func(node):
timing, expires = self.node_timings.get(node['ip'], (-1.0, 0))
return timing if expires > now else -1.0
nodes.sort(key=key_func)
elif self.sorting_method == 'affinity':
nodes.sort(key=self.read_affinity_sort_key)
return nodes
def set_node_timing(self, node, timing):
if self.sorting_method != 'timing':
return
now = time()
timing = round(timing, 3) # sort timings to the millisecond
self.node_timings[node['ip']] = (timing, now + self.timing_expiry)
def _error_limit_node_key(self, node):
return "{ip}:{port}/{device}".format(**node)
def error_limited(self, node):
"""
Check if the node is currently error limited.
:param node: dictionary of node to check
:returns: True if error limited, False otherwise
"""
now = time()
node_key = self._error_limit_node_key(node)
error_stats = self._error_limiting.get(node_key)
if error_stats is None or 'errors' not in error_stats:
return False
if 'last_error' in error_stats and error_stats['last_error'] < \
now - self.error_suppression_interval:
self._error_limiting.pop(node_key, None)
return False
limited = error_stats['errors'] > self.error_suppression_limit
if limited:
self.logger.debug(
_('Node error limited %(ip)s:%(port)s (%(device)s)'), node)
return limited
def error_limit(self, node, msg):
"""
Mark a node as error limited. This immediately pretends the
node received enough errors to trigger error suppression. Use
this for errors like Insufficient Storage. For other errors
use :func:`error_occurred`.
:param node: dictionary of node to error limit
:param msg: error message
"""
node_key = self._error_limit_node_key(node)
error_stats = self._error_limiting.setdefault(node_key, {})
error_stats['errors'] = self.error_suppression_limit + 1
error_stats['last_error'] = time()
self.logger.error(_('%(msg)s %(ip)s:%(port)s/%(device)s'),
{'msg': msg, 'ip': node['ip'],
'port': node['port'], 'device': node['device']})
def _incr_node_errors(self, node):
node_key = self._error_limit_node_key(node)
error_stats = self._error_limiting.setdefault(node_key, {})
error_stats['errors'] = error_stats.get('errors', 0) + 1
error_stats['last_error'] = time()
def error_occurred(self, node, msg):
"""
Handle logging, and handling of errors.
:param node: dictionary of node to handle errors for
:param msg: error message
"""
self._incr_node_errors(node)
self.logger.error(_('%(msg)s %(ip)s:%(port)s/%(device)s'),
{'msg': msg, 'ip': node['ip'],
'port': node['port'], 'device': node['device']})
def iter_nodes(self, ring, partition, node_iter=None):
"""
Yields nodes for a ring partition, skipping over error
limited nodes and stopping at the configurable number of nodes. If a
node yielded subsequently gets error limited, an extra node will be
yielded to take its place.
Note that if you're going to iterate over this concurrently from
multiple greenthreads, you'll want to use a
swift.common.utils.GreenthreadSafeIterator to serialize access.
Otherwise, you may get ValueErrors from concurrent access. (You also
may not, depending on how logging is configured, the vagaries of
socket IO and eventlet, and the phase of the moon.)
:param ring: ring to get yield nodes from
:param partition: ring partition to yield nodes for
:param node_iter: optional iterable of nodes to try. Useful if you
want to filter or reorder the nodes.
"""
part_nodes = ring.get_part_nodes(partition)
if node_iter is None:
node_iter = itertools.chain(part_nodes,
ring.get_more_nodes(partition))
num_primary_nodes = len(part_nodes)
# Use of list() here forcibly yanks the first N nodes (the primary
# nodes) from node_iter, so the rest of its values are handoffs.
primary_nodes = self.sort_nodes(
list(itertools.islice(node_iter, num_primary_nodes)))
handoff_nodes = node_iter
nodes_left = self.request_node_count(len(primary_nodes))
log_handoffs_threshold = nodes_left - len(primary_nodes)
for node in primary_nodes:
if not self.error_limited(node):
yield node
if not self.error_limited(node):
nodes_left -= 1
if nodes_left <= 0:
return
handoffs = 0
for node in handoff_nodes:
if not self.error_limited(node):
handoffs += 1
if self.log_handoffs and handoffs > log_handoffs_threshold:
self.logger.increment('handoff_count')
self.logger.warning(
'Handoff requested (%d)' % handoffs)
if handoffs - log_handoffs_threshold == len(primary_nodes):
self.logger.increment('handoff_all_count')
yield node
if not self.error_limited(node):
nodes_left -= 1
if nodes_left <= 0:
return
def exception_occurred(self, node, typ, additional_info,
**kwargs):
"""
Handle logging of generic exceptions.
:param node: dictionary of node to log the error for
:param typ: server type
:param additional_info: additional information to log
"""
self._incr_node_errors(node)
if 'level' in kwargs:
log = functools.partial(self.logger.log, kwargs.pop('level'))
if 'exc_info' not in kwargs:
kwargs['exc_info'] = sys.exc_info()
else:
log = self.logger.exception
log(_('ERROR with %(type)s server %(ip)s:%(port)s/%(device)s'
' re: %(info)s'), {
'type': typ, 'ip': node['ip'], 'port':
node['port'], 'device': node['device'],
'info': additional_info
}, **kwargs)
def modify_wsgi_pipeline(self, pipe):
"""
Called during WSGI pipeline creation. Modifies the WSGI pipeline
context to ensure that mandatory middleware is present in the pipeline.
:param pipe: A PipelineWrapper object
"""
pipeline_was_modified = False
for filter_spec in reversed(required_filters):
filter_name = filter_spec['name']
if filter_name not in pipe:
afters = filter_spec.get('after_fn', lambda _junk: [])(pipe)
insert_at = 0
for after in afters:
try:
insert_at = max(insert_at, pipe.index(after) + 1)
except ValueError: # not in pipeline; ignore it
pass
self.logger.info(
'Adding required filter %s to pipeline at position %d' %
(filter_name, insert_at))
ctx = pipe.create_filter(filter_name)
pipe.insert_filter(ctx, index=insert_at)
pipeline_was_modified = True
if pipeline_was_modified:
self.logger.info("Pipeline was modified. New pipeline is \"%s\".",
pipe)
else:
self.logger.debug("Pipeline is \"%s\"", pipe)
def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI proxy apps."""
conf = global_conf.copy()
conf.update(local_conf)
app = Application(conf)
app.check_config()
return app
| apache-2.0 |
tedelhourani/ansible | lib/ansible/galaxy/__init__.py | 93 | 2070 | ########################################################################
#
# (C) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
''' This manages remote shared Ansible objects, mainly roles'''
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleError
from ansible.module_utils.six import string_types
# default_readme_template
# default_meta_template
class Galaxy(object):
''' Keeps global galaxy info '''
def __init__(self, options):
self.options = options
# self.options.roles_path needs to be a list and will be by default
roles_path = getattr(self.options, 'roles_path', [])
# cli option handling is responsible for making roles_path a list
self.roles_paths = roles_path
self.roles = {}
# load data path for resource usage
this_dir, this_filename = os.path.split(__file__)
type_path = 'container_enabled' if getattr(self.options, 'container_enabled', False) else 'default'
self.DATA_PATH = os.path.join(this_dir, 'data', type_path)
@property
def default_role_skeleton_path(self):
return self.DATA_PATH
def add_role(self, role):
self.roles[role.name] = role
def remove_role(self, role_name):
del self.roles[role_name]
| gpl-3.0 |
nprapps/factlist | gzip_assets.py | 17 | 1928 | #!/usr/bin/env python
"""
Given an input path and an output path, will put
Gzipped versions of all files from the input path
to the output path.
If the file is not gzippable it will be copied
uncompressed.
"""
from fnmatch import fnmatch
import gzip
import os
import shutil
import sys
class FakeTime:
def time(self):
return 1261130520.0
# Hack to override gzip's time implementation
# See: http://stackoverflow.com/questions/264224/setting-the-gzip-timestamp-from-python
gzip.time = FakeTime()
def is_compressable(filename, gzip_globs):
"""
Determine if a filename is a gzippable type
by comparing to a known list.
"""
return any([fnmatch(filename, glob) for glob in gzip_globs])
def compress(file_path):
"""
Gzip a single file in place.
"""
f_in = open(file_path, 'rb')
contents = f_in.readlines()
f_in.close()
f_out = gzip.open(file_path, 'wb')
f_out.writelines(contents)
f_out.close()
def main():
in_path = sys.argv[1]
out_path = sys.argv[2]
with open('gzip_types.txt') as f:
gzip_globs = [glob.strip() for glob in f]
# Folders
if os.path.isdir(in_path):
shutil.rmtree(out_path, ignore_errors=True)
shutil.copytree(in_path, out_path)
for path, dirs, files in os.walk(sys.argv[2]):
for filename in files:
# Is it a gzippable file type?
if not is_compressable(filename, gzip_globs):
continue
file_path = os.path.join(path, filename)
compress(file_path)
# Single files
else:
filename = os.path.split(in_path)[-1]
try:
os.remove(out_path)
except OSError:
pass
shutil.copy(in_path, out_path)
if not is_compressable(filename, gzip_globs):
return
compress(out_path)
if __name__ == '__main__':
main()
| mit |
bartveurink/bart.scripts | calcudoku.py | 1 | 2642 | #!/usr/bin/env python3
import itertools, operator
# n is de grootte van de calcudoku.
# total is de som van het block.
# operator is de bewerking
# cel_string is een lijst van decimale getallen dat de vorm van het vak weergeeft
# printitems is een array van de juiste oplossingen.
class problem:
def Set(self,n,total,operator, cel_string):
self.n=n
self.total=total
self.operator=operator
self.cel_string=cel_string
self.printitems=[]
def printSet(self):
print("n=",self.n)
print("total=",self.total)
print("operator=",self.operator)
print("cel_string=",self.cel_string)
def checkduplicate(self,item):
if item in self.printitems:
return True
return False
def add(self,item):
item=sorted(item)
if not self.checkduplicate(item):
self.printitems.append(item)
def printoutput(self):
print(self.printitems)
def getsum(self,item):
result=0
if self.operator == '+':
result=sum(item)
elif self.operator == 'x':
result=1
for i in item:
result*=i
elif self.operator == '/':
itemcalc=sorted(item, reverse=True)
l=len(item)
i=0
while i < l:
if i == 0:
result=itemcalc[0]
else:
result/=itemcalc[i]
i+=1
elif self.operator == '-':
itemcalc=sorted(item, reverse=True)
l=len(item)
i=0
while i < l:
if i == 0:
result=itemcalc[0]
else:
result-=itemcalc[i]
i+=1
return result
def iscorrect(self,item):
if self.getsum(item) == self.total:
return True
return False
def Calculate(self):
cel_array=self.cel_string.split(',')
cel_array_bin= []
lines=0
vakjes=0
for i in cel_array:
cijfer=int(i)
countones=bin(cijfer).count("1")
vakjes+=countones
cel_array_bin.append(countones)
lines+=1
it=list(range(1,self.n+1))
#combinations=[]
#i=0
#while i < lines:
# combinations.append(itertools.combinations(it, cel_array_bin[i]))
# i += 1
if lines == 1:
comb1=itertools.combinations(it, cel_array_bin[0])
for item1 in comb1:
if self.iscorrect(item1):
self.add(item1)
elif lines == 2:
comb1=itertools.combinations(it, cel_array_bin[0])
for item1 in comb1:
comb2=itertools.combinations(it, cel_array_bin[1])
for item2 in comb2:
check=item1+item2
if self.iscorrect(check):
self.add(check)
elif lines == 3:
comb1=itertools.combinations(it, cel_array_bin[0])
for item1 in comb1:
comb2=itertools.combinations(it, cel_array_bin[1])
for item2 in comb2:
comb3=itertools.combinations(it, cel_array_bin[2])
for item3 in comb3:
check=item1+item2+item3
if self.iscorrect(check):
self.add(check)
| gpl-3.0 |
idovear/odoo | addons/account/account_analytic_line.py | 304 | 7914 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv import osv
from openerp.tools.translate import _
class account_analytic_line(osv.osv):
_inherit = 'account.analytic.line'
_description = 'Analytic Line'
_columns = {
'product_uom_id': fields.many2one('product.uom', 'Unit of Measure'),
'product_id': fields.many2one('product.product', 'Product'),
'general_account_id': fields.many2one('account.account', 'General Account', required=True, ondelete='restrict'),
'move_id': fields.many2one('account.move.line', 'Move Line', ondelete='cascade', select=True),
'journal_id': fields.many2one('account.analytic.journal', 'Analytic Journal', required=True, ondelete='restrict', select=True),
'code': fields.char('Code', size=8),
'ref': fields.char('Ref.'),
'currency_id': fields.related('move_id', 'currency_id', type='many2one', relation='res.currency', string='Account Currency', store=True, help="The related account currency if not equal to the company one.", readonly=True),
'amount_currency': fields.related('move_id', 'amount_currency', type='float', string='Amount Currency', store=True, help="The amount expressed in the related account currency if not equal to the company one.", readonly=True),
}
_defaults = {
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.analytic.line', context=c),
}
_order = 'date desc'
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
if context.get('from_date',False):
args.append(['date', '>=', context['from_date']])
if context.get('to_date',False):
args.append(['date','<=', context['to_date']])
return super(account_analytic_line, self).search(cr, uid, args, offset, limit,
order, context=context, count=count)
def _check_company(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.move_id and not l.account_id.company_id.id == l.move_id.account_id.company_id.id:
return False
return True
# Compute the cost based on the price type define into company
# property_valuation_price_type property
def on_change_unit_amount(self, cr, uid, id, prod_id, quantity, company_id,
unit=False, journal_id=False, context=None):
if context==None:
context={}
if not journal_id:
j_ids = self.pool.get('account.analytic.journal').search(cr, uid, [('type','=','purchase')])
journal_id = j_ids and j_ids[0] or False
if not journal_id or not prod_id:
return {}
product_obj = self.pool.get('product.product')
analytic_journal_obj =self.pool.get('account.analytic.journal')
product_price_type_obj = self.pool.get('product.price.type')
product_uom_obj = self.pool.get('product.uom')
j_id = analytic_journal_obj.browse(cr, uid, journal_id, context=context)
prod = product_obj.browse(cr, uid, prod_id, context=context)
result = 0.0
if prod_id:
unit_obj = False
if unit:
unit_obj = product_uom_obj.browse(cr, uid, unit, context=context)
if not unit_obj or prod.uom_id.category_id.id != unit_obj.category_id.id:
unit = prod.uom_id.id
if j_id.type == 'purchase':
if not unit_obj or prod.uom_po_id.category_id.id != unit_obj.category_id.id:
unit = prod.uom_po_id.id
if j_id.type <> 'sale':
a = prod.property_account_expense.id
if not a:
a = prod.categ_id.property_account_expense_categ.id
if not a:
raise osv.except_osv(_('Error!'),
_('There is no expense account defined ' \
'for this product: "%s" (id:%d).') % \
(prod.name, prod.id,))
else:
a = prod.property_account_income.id
if not a:
a = prod.categ_id.property_account_income_categ.id
if not a:
raise osv.except_osv(_('Error!'),
_('There is no income account defined ' \
'for this product: "%s" (id:%d).') % \
(prod.name, prod_id,))
flag = False
# Compute based on pricetype
product_price_type_ids = product_price_type_obj.search(cr, uid, [('field','=','standard_price')], context=context)
pricetype = product_price_type_obj.browse(cr, uid, product_price_type_ids, context=context)[0]
if journal_id:
journal = analytic_journal_obj.browse(cr, uid, journal_id, context=context)
if journal.type == 'sale':
product_price_type_ids = product_price_type_obj.search(cr, uid, [('field','=','list_price')], context=context)
if product_price_type_ids:
pricetype = product_price_type_obj.browse(cr, uid, product_price_type_ids, context=context)[0]
# Take the company currency as the reference one
if pricetype.field == 'list_price':
flag = True
ctx = context.copy()
if unit:
# price_get() will respect a 'uom' in its context, in order
# to return a default price for those units
ctx['uom'] = unit
amount_unit = prod.price_get(pricetype.field, context=ctx)[prod.id]
prec = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
amount = amount_unit * quantity or 0.0
result = round(amount, prec)
if not flag:
result *= -1
return {'value': {
'amount': result,
'general_account_id': a,
'product_uom_id': unit
}
}
def view_header_get(self, cr, user, view_id, view_type, context=None):
if context is None:
context = {}
if context.get('account_id', False):
# account_id in context may also be pointing to an account.account.id
cr.execute('select name from account_analytic_account where id=%s', (context['account_id'],))
res = cr.fetchone()
if res:
res = _('Entries: ')+ (res[0] or '')
return res
return False
class res_partner(osv.osv):
""" Inherits partner and adds contract information in the partner form """
_inherit = 'res.partner'
_columns = {
'contract_ids': fields.one2many('account.analytic.account', \
'partner_id', 'Contracts', readonly=True),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
glenioborges/ibis | ibis/expr/tests/test_temporal.py | 9 | 5955 | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ibis.common import IbisError
import ibis.expr.operations as ops
import ibis.expr.types as ir
import ibis.expr.temporal as T
from ibis.expr.tests.mocks import MockConnection
from ibis.compat import unittest
class TestFixedOffsets(unittest.TestCase):
def setUp(self):
self.con = MockConnection()
self.table = self.con.table('alltypes')
def test_upconvert(self):
cases = [
(T.day(14), 'w', T.week(2)),
(T.hour(72), 'd', T.day(3)),
(T.minute(240), 'h', T.hour(4)),
(T.second(360), 'm', T.minute(6)),
(T.second(3 * 86400), 'd', T.day(3)),
(T.millisecond(5000), 's', T.second(5)),
(T.microsecond(5000000), 's', T.second(5)),
(T.nanosecond(5000000000), 's', T.second(5)),
]
for offset, unit, expected in cases:
result = offset.to_unit(unit)
assert result.equals(expected)
def test_multiply(self):
offset = T.day(2)
assert (offset * 2).equals(T.day(4))
assert (offset * (-2)).equals(T.day(-4))
assert (3 * offset).equals(T.day(6))
assert ((-3) * offset).equals(T.day(-6))
def test_repr(self):
assert repr(T.day()) == '<Timedelta: 1 day>'
assert repr(T.day(2)) == '<Timedelta: 2 days>'
assert repr(T.year()) == '<Timedelta: 1 year>'
assert repr(T.month(2)) == '<Timedelta: 2 months>'
assert repr(T.second(40)) == '<Timedelta: 40 seconds>'
def test_cannot_upconvert(self):
cases = [
(T.day(), 'w'),
(T.hour(), 'd'),
(T.minute(), 'h'),
(T.second(), 'm'),
(T.second(), 'd'),
(T.millisecond(), 's'),
(T.microsecond(), 's'),
(T.nanosecond(), 's'),
]
for delta, target in cases:
self.assertRaises(IbisError, delta.to_unit, target)
def test_downconvert_second_parts(self):
K = 2
sec = T.second(K)
milli = T.millisecond(K)
micro = T.microsecond(K)
nano = T.nanosecond(K)
cases = [
(sec.to_unit('s'), T.second(K)),
(sec.to_unit('ms'), T.millisecond(K * 1000)),
(sec.to_unit('us'), T.microsecond(K * 1000000)),
(sec.to_unit('ns'), T.nanosecond(K * 1000000000)),
(milli.to_unit('ms'), T.millisecond(K)),
(milli.to_unit('us'), T.microsecond(K * 1000)),
(milli.to_unit('ns'), T.nanosecond(K * 1000000)),
(micro.to_unit('us'), T.microsecond(K)),
(micro.to_unit('ns'), T.nanosecond(K * 1000)),
(nano.to_unit('ns'), T.nanosecond(K))
]
self._check_cases(cases)
def test_downconvert_hours(self):
K = 2
offset = T.hour(K)
cases = [
(offset.to_unit('h'), T.hour(K)),
(offset.to_unit('m'), T.minute(K * 60)),
(offset.to_unit('s'), T.second(K * 3600)),
(offset.to_unit('ms'), T.millisecond(K * 3600000)),
(offset.to_unit('us'), T.microsecond(K * 3600000000)),
(offset.to_unit('ns'), T.nanosecond(K * 3600000000000))
]
self._check_cases(cases)
def test_downconvert_day(self):
K = 2
week = T.week(K)
day = T.day(K)
cases = [
(week.to_unit('d'), T.day(K * 7)),
(week.to_unit('h'), T.hour(K * 7 * 24)),
(day.to_unit('d'), T.day(K)),
(day.to_unit('h'), T.hour(K * 24)),
(day.to_unit('m'), T.minute(K * 1440)),
(day.to_unit('s'), T.second(K * 86400)),
(day.to_unit('ms'), T.millisecond(K * 86400000)),
(day.to_unit('us'), T.microsecond(K * 86400000000)),
(day.to_unit('ns'), T.nanosecond(K * 86400000000000))
]
self._check_cases(cases)
def test_combine_with_different_kinds(self):
cases = [
(T.day() + T.minute(), T.minute(1441)),
(T.second() + T.millisecond(10), T.millisecond(1010)),
(T.hour() + T.minute(5) + T.second(10), T.second(3910))
]
self._check_cases(cases)
def test_timedelta_generic_api(self):
cases = [
(T.timedelta(weeks=2), T.week(2)),
(T.timedelta(days=3), T.day(3)),
(T.timedelta(hours=4), T.hour(4)),
(T.timedelta(minutes=5), T.minute(5)),
(T.timedelta(seconds=6), T.second(6)),
(T.timedelta(milliseconds=7), T.millisecond(7)),
(T.timedelta(microseconds=8), T.microsecond(8)),
(T.timedelta(nanoseconds=9), T.nanosecond(9)),
]
self._check_cases(cases)
def _check_cases(self, cases):
for x, y in cases:
assert x.equals(y)
def test_offset_timestamp_expr(self):
c = self.table.i
x = T.timedelta(days=1)
expr = x + c
assert isinstance(expr, ir.TimestampArray)
assert isinstance(expr.op(), ops.TimestampDelta)
# test radd
expr = c + x
assert isinstance(expr, ir.TimestampArray)
assert isinstance(expr.op(), ops.TimestampDelta)
class TestTimedelta(unittest.TestCase):
def test_compound_offset(self):
# These are not yet allowed (e.g. 1 month + 1 hour)
pass
def test_offset_months(self):
pass
| apache-2.0 |
luceatnobis/youtube-dl | youtube_dl/extractor/nextmedia.py | 49 | 8975 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
clean_html,
get_element_by_class,
int_or_none,
parse_iso8601,
remove_start,
unified_timestamp,
)
class NextMediaIE(InfoExtractor):
IE_DESC = '蘋果日報'
_VALID_URL = r'https?://hk\.apple\.nextmedia\.com/[^/]+/[^/]+/(?P<date>\d+)/(?P<id>\d+)'
_TESTS = [{
'url': 'http://hk.apple.nextmedia.com/realtime/news/20141108/53109199',
'md5': 'dff9fad7009311c421176d1ac90bfe4f',
'info_dict': {
'id': '53109199',
'ext': 'mp4',
'title': '【佔領金鐘】50外國領事議員撐場 讚學生勇敢香港有希望',
'thumbnail': r're:^https?://.*\.jpg$',
'description': 'md5:28222b9912b6665a21011b034c70fcc7',
'timestamp': 1415456273,
'upload_date': '20141108',
}
}]
_URL_PATTERN = r'\{ url: \'(.+)\' \}'
def _real_extract(self, url):
news_id = self._match_id(url)
page = self._download_webpage(url, news_id)
return self._extract_from_nextmedia_page(news_id, url, page)
def _extract_from_nextmedia_page(self, news_id, url, page):
redirection_url = self._search_regex(
r'window\.location\.href\s*=\s*([\'"])(?P<url>(?!\1).+)\1',
page, 'redirection URL', default=None, group='url')
if redirection_url:
return self.url_result(compat_urlparse.urljoin(url, redirection_url))
title = self._fetch_title(page)
video_url = self._search_regex(self._URL_PATTERN, page, 'video url')
attrs = {
'id': news_id,
'title': title,
'url': video_url, # ext can be inferred from url
'thumbnail': self._fetch_thumbnail(page),
'description': self._fetch_description(page),
}
timestamp = self._fetch_timestamp(page)
if timestamp:
attrs['timestamp'] = timestamp
else:
attrs['upload_date'] = self._fetch_upload_date(url)
return attrs
def _fetch_title(self, page):
return self._og_search_title(page)
def _fetch_thumbnail(self, page):
return self._og_search_thumbnail(page)
def _fetch_timestamp(self, page):
dateCreated = self._search_regex('"dateCreated":"([^"]+)"', page, 'created time')
return parse_iso8601(dateCreated)
def _fetch_upload_date(self, url):
return self._search_regex(self._VALID_URL, url, 'upload date', group='date')
def _fetch_description(self, page):
return self._og_search_property('description', page)
class NextMediaActionNewsIE(NextMediaIE):
IE_DESC = '蘋果日報 - 動新聞'
_VALID_URL = r'https?://hk\.dv\.nextmedia\.com/actionnews/[^/]+/(?P<date>\d+)/(?P<id>\d+)/\d+'
_TESTS = [{
'url': 'http://hk.dv.nextmedia.com/actionnews/hit/20150121/19009428/20061460',
'md5': '05fce8ffeed7a5e00665d4b7cf0f9201',
'info_dict': {
'id': '19009428',
'ext': 'mp4',
'title': '【壹週刊】細10年男友偷食 50歲邵美琪再失戀',
'thumbnail': r're:^https?://.*\.jpg$',
'description': 'md5:cd802fad1f40fd9ea178c1e2af02d659',
'timestamp': 1421791200,
'upload_date': '20150120',
}
}]
def _real_extract(self, url):
news_id = self._match_id(url)
actionnews_page = self._download_webpage(url, news_id)
article_url = self._og_search_url(actionnews_page)
article_page = self._download_webpage(article_url, news_id)
return self._extract_from_nextmedia_page(news_id, url, article_page)
class AppleDailyIE(NextMediaIE):
IE_DESC = '臺灣蘋果日報'
_VALID_URL = r'https?://(www|ent)\.appledaily\.com\.tw/[^/]+/[^/]+/[^/]+/(?P<date>\d+)/(?P<id>\d+)(/.*)?'
_TESTS = [{
'url': 'http://ent.appledaily.com.tw/enews/article/entertainment/20150128/36354694',
'md5': 'a843ab23d150977cc55ef94f1e2c1e4d',
'info_dict': {
'id': '36354694',
'ext': 'mp4',
'title': '周亭羽走過摩鐵陰霾2男陪吃 九把刀孤寒看醫生',
'thumbnail': r're:^https?://.*\.jpg$',
'description': 'md5:2acd430e59956dc47cd7f67cb3c003f4',
'upload_date': '20150128',
}
}, {
'url': 'http://www.appledaily.com.tw/realtimenews/article/strange/20150128/550549/%E4%B8%8D%E6%BB%BF%E8%A2%AB%E8%B8%A9%E8%85%B3%E3%80%80%E5%B1%B1%E6%9D%B1%E5%85%A9%E5%A4%A7%E5%AA%BD%E4%B8%80%E8%B7%AF%E6%89%93%E4%B8%8B%E8%BB%8A',
'md5': '86b4e9132d158279c7883822d94ccc49',
'info_dict': {
'id': '550549',
'ext': 'mp4',
'title': '不滿被踩腳 山東兩大媽一路打下車',
'thumbnail': r're:^https?://.*\.jpg$',
'description': 'md5:175b4260c1d7c085993474217e4ab1b4',
'upload_date': '20150128',
}
}, {
'url': 'http://www.appledaily.com.tw/animation/realtimenews/new/20150128/5003671',
'md5': '03df296d95dedc2d5886debbb80cb43f',
'info_dict': {
'id': '5003671',
'ext': 'mp4',
'title': '20正妹熱舞 《刀龍傳說Online》火辣上市',
'thumbnail': r're:^https?://.*\.jpg$',
'description': 'md5:23c0aac567dc08c9c16a3161a2c2e3cd',
'upload_date': '20150128',
},
'skip': 'redirect to http://www.appledaily.com.tw/animation/',
}, {
# No thumbnail
'url': 'http://www.appledaily.com.tw/animation/realtimenews/new/20150128/5003673/',
'md5': 'b06182cd386ea7bc6115ec7ff0f72aeb',
'info_dict': {
'id': '5003673',
'ext': 'mp4',
'title': '半夜尿尿 好像會看到___',
'description': 'md5:61d2da7fe117fede148706cdb85ac066',
'upload_date': '20150128',
},
'expected_warnings': [
'video thumbnail',
],
'skip': 'redirect to http://www.appledaily.com.tw/animation/',
}, {
'url': 'http://www.appledaily.com.tw/appledaily/article/supplement/20140417/35770334/',
'md5': 'eaa20e6b9df418c912d7f5dec2ba734d',
'info_dict': {
'id': '35770334',
'ext': 'mp4',
'title': '咖啡占卜測 XU裝熟指數',
'thumbnail': r're:^https?://.*\.jpg$',
'description': 'md5:7b859991a6a4fedbdf3dd3b66545c748',
'upload_date': '20140417',
},
}, {
'url': 'http://www.appledaily.com.tw/actionnews/appledaily/7/20161003/960588/',
'only_matching': True,
}, {
# Redirected from http://ent.appledaily.com.tw/enews/article/entertainment/20150128/36354694
'url': 'http://ent.appledaily.com.tw/section/article/headline/20150128/36354694',
'only_matching': True,
}]
_URL_PATTERN = r'\{url: \'(.+)\'\}'
def _fetch_title(self, page):
return (self._html_search_regex(r'<h1 id="h1">([^<>]+)</h1>', page, 'news title', default=None) or
self._html_search_meta('description', page, 'news title'))
def _fetch_thumbnail(self, page):
return self._html_search_regex(r"setInitialImage\(\'([^']+)'\)", page, 'video thumbnail', fatal=False)
def _fetch_timestamp(self, page):
return None
def _fetch_description(self, page):
return self._html_search_meta('description', page, 'news description')
class NextTVIE(InfoExtractor):
IE_DESC = '壹電視'
_VALID_URL = r'https?://(?:www\.)?nexttv\.com\.tw/(?:[^/]+/)+(?P<id>\d+)'
_TEST = {
'url': 'http://www.nexttv.com.tw/news/realtime/politics/11779671',
'info_dict': {
'id': '11779671',
'ext': 'mp4',
'title': '「超收稅」近4千億! 藍議員籲發消費券',
'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': 1484825400,
'upload_date': '20170119',
'view_count': int,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<h1[^>]*>([^<]+)</h1>', webpage, 'title')
data = self._hidden_inputs(webpage)
video_url = data['ntt-vod-src-detailview']
date_str = get_element_by_class('date', webpage)
timestamp = unified_timestamp(date_str + '+0800') if date_str else None
view_count = int_or_none(remove_start(
clean_html(get_element_by_class('click', webpage)), '點閱:'))
return {
'id': video_id,
'title': title,
'url': video_url,
'thumbnail': data.get('ntt-vod-img-src'),
'timestamp': timestamp,
'view_count': view_count,
}
| unlicense |
glwagner/py2Periodic | tests/twoLayerQG/testBathymetryTwoLayerQG.py | 1 | 1572 | import time, sys
import numpy as np
import matplotlib.pyplot as plt
sys.path.append('../../')
from py2Periodic.physics import twoLayerQG
from numpy import pi
params = {
'f0' : 1.0e-4,
'Lx' : 1.0e6,
'beta' : 1.5e-11,
'defRadius' : 1.5e4,
'H1' : 500.0,
'H2' : 2000.0,
'U1' : 2.5e-2,
'U2' : 0.0,
'bottomDrag' : 1.0e-7,
'nx' : 128,
'ny' : 128,
'dt' : 1.0e3,
'visc' : 0.0e8,
'viscOrder' : 4.0,
'timeStepper': 'RK4',
'nThreads' : 4,
'useFilter' : True,
'flatBottom' : True,
}
# Create the two-layer model
qg = twoLayerQG.model(**params)
qg.describe_model()
# Initial condition:
Ro = 1.0e-3
f0 = 1.0e-4
q1 = Ro*f0*np.random.standard_normal(qg.physVarShape)
q2 = Ro*f0*np.random.standard_normal(qg.physVarShape)
qg.set_q1_and_q2(q1, q2)
# Bathymetry
R = qg.Lx/20
(x0, y0) = (qg.Lx/2.0, qg.Ly/2.0)
h = 0.1*qg.H2*np.exp( (-(qg.x-x0)**2.0 - (qg.y-y0)**2.0)/(2.0*R**2.0) )
qg.set_bathymetry(h)
# Run a loop
nt = 1e3
for ii in np.arange(0, 1e3):
qg.step_nSteps(nSteps=nt, dnLog=nt)
qg.update_state_variables()
fig = plt.figure('Perturbation vorticity', figsize=(8, 8)); plt.clf()
plt.subplot(221); plt.imshow(qg.q1)
plt.subplot(222); plt.imshow(qg.q2)
plt.subplot(223); plt.imshow(np.abs(qg.soln[0:qg.ny//2-1, :, 0]))
plt.subplot(224); plt.imshow(np.abs(qg.soln[0:qg.ny//2-1, :, 1]))
plt.pause(0.01), plt.draw()
print("Close the plot to end the program")
plt.show()
| mit |
Neozaru/depot_tools | third_party/fancy_urllib/__init__.py | 155 | 14277 | #!/usr/bin/env python
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007 Python Software
# Foundation; All Rights Reserved
"""A HTTPSConnection/Handler with additional proxy and cert validation features.
In particular, monkey patches in Python r74203 to provide support for CONNECT
proxies and adds SSL cert validation if the ssl module is present.
"""
__author__ = "{frew,nick.johnson}@google.com (Fred Wulff and Nick Johnson)"
import base64
import httplib
import logging
import re
import socket
import urllib2
from urllib import splittype
from urllib import splituser
from urllib import splitpasswd
class InvalidCertificateException(httplib.HTTPException):
"""Raised when a certificate is provided with an invalid hostname."""
def __init__(self, host, cert, reason):
"""Constructor.
Args:
host: The hostname the connection was made to.
cert: The SSL certificate (as a dictionary) the host returned.
"""
httplib.HTTPException.__init__(self)
self.host = host
self.cert = cert
self.reason = reason
def __str__(self):
return ('Host %s returned an invalid certificate (%s): %s\n'
'To learn more, see '
'http://code.google.com/appengine/kb/general.html#rpcssl' %
(self.host, self.reason, self.cert))
def can_validate_certs():
"""Return True if we have the SSL package and can validate certificates."""
try:
import ssl
return True
except ImportError:
return False
def _create_fancy_connection(tunnel_host=None, key_file=None,
cert_file=None, ca_certs=None):
# This abomination brought to you by the fact that
# the HTTPHandler creates the connection instance in the middle
# of do_open so we need to add the tunnel host to the class.
class PresetProxyHTTPSConnection(httplib.HTTPSConnection):
"""An HTTPS connection that uses a proxy defined by the enclosing scope."""
def __init__(self, *args, **kwargs):
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
self._tunnel_host = tunnel_host
if tunnel_host:
logging.debug("Creating preset proxy https conn: %s", tunnel_host)
self.key_file = key_file
self.cert_file = cert_file
self.ca_certs = ca_certs
try:
import ssl
if self.ca_certs:
self.cert_reqs = ssl.CERT_REQUIRED
else:
self.cert_reqs = ssl.CERT_NONE
except ImportError:
pass
def _tunnel(self):
self._set_hostport(self._tunnel_host, None)
logging.info("Connecting through tunnel to: %s:%d",
self.host, self.port)
self.send("CONNECT %s:%d HTTP/1.0\r\n\r\n" % (self.host, self.port))
response = self.response_class(self.sock, strict=self.strict,
method=self._method)
(_, code, message) = response._read_status()
if code != 200:
self.close()
raise socket.error, "Tunnel connection failed: %d %s" % (
code, message.strip())
while True:
line = response.fp.readline()
if line == "\r\n":
break
def _get_valid_hosts_for_cert(self, cert):
"""Returns a list of valid host globs for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
Returns:
list: A list of valid host globs.
"""
if 'subjectAltName' in cert:
return [x[1] for x in cert['subjectAltName'] if x[0].lower() == 'dns']
else:
# Return a list of commonName fields
return [x[0][1] for x in cert['subject']
if x[0][0].lower() == 'commonname']
def _validate_certificate_hostname(self, cert, hostname):
"""Validates that a given hostname is valid for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
hostname: The hostname to test.
Returns:
bool: Whether or not the hostname is valid for this certificate.
"""
hosts = self._get_valid_hosts_for_cert(cert)
for host in hosts:
# Convert the glob-style hostname expression (eg, '*.google.com') into a
# valid regular expression.
host_re = host.replace('.', '\.').replace('*', '[^.]*')
if re.search('^%s$' % (host_re,), hostname, re.I):
return True
return False
def connect(self):
# TODO(frew): When we drop support for <2.6 (in the far distant future),
# change this to socket.create_connection.
self.sock = _create_connection((self.host, self.port))
if self._tunnel_host:
self._tunnel()
# ssl and FakeSocket got deprecated. Try for the new hotness of wrap_ssl,
# with fallback.
try:
import ssl
self.sock = ssl.wrap_socket(self.sock,
keyfile=self.key_file,
certfile=self.cert_file,
ca_certs=self.ca_certs,
cert_reqs=self.cert_reqs)
if self.cert_reqs & ssl.CERT_REQUIRED:
cert = self.sock.getpeercert()
hostname = self.host.split(':', 0)[0]
if not self._validate_certificate_hostname(cert, hostname):
raise InvalidCertificateException(hostname, cert,
'hostname mismatch')
except ImportError:
ssl = socket.ssl(self.sock,
keyfile=self.key_file,
certfile=self.cert_file)
self.sock = httplib.FakeSocket(self.sock, ssl)
return PresetProxyHTTPSConnection
# Here to end of _create_connection copied wholesale from Python 2.6"s socket.py
_GLOBAL_DEFAULT_TIMEOUT = object()
def _create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used.
"""
msg = "getaddrinfo returns an empty list"
host, port = address
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
sock.connect(sa)
return sock
except socket.error, msg:
if sock is not None:
sock.close()
raise socket.error, msg
class FancyRequest(urllib2.Request):
"""A request that allows the use of a CONNECT proxy."""
def __init__(self, *args, **kwargs):
urllib2.Request.__init__(self, *args, **kwargs)
self._tunnel_host = None
self._key_file = None
self._cert_file = None
self._ca_certs = None
def set_proxy(self, host, type):
saved_type = None
if self.get_type() == "https" and not self._tunnel_host:
self._tunnel_host = self.get_host()
saved_type = self.get_type()
urllib2.Request.set_proxy(self, host, type)
if saved_type:
# Don't set self.type, we want to preserve the
# type for tunneling.
self.type = saved_type
def set_ssl_info(self, key_file=None, cert_file=None, ca_certs=None):
self._key_file = key_file
self._cert_file = cert_file
self._ca_certs = ca_certs
class FancyProxyHandler(urllib2.ProxyHandler):
"""A ProxyHandler that works with CONNECT-enabled proxies."""
# Taken verbatim from /usr/lib/python2.5/urllib2.py
def _parse_proxy(self, proxy):
"""Return (scheme, user, password, host/port) given a URL or an authority.
If a URL is supplied, it must have an authority (host:port) component.
According to RFC 3986, having an authority component means the URL must
have two slashes after the scheme:
>>> _parse_proxy('file:/ftp.example.com/')
Traceback (most recent call last):
ValueError: proxy URL with no authority: 'file:/ftp.example.com/'
The first three items of the returned tuple may be None.
Examples of authority parsing:
>>> _parse_proxy('proxy.example.com')
(None, None, None, 'proxy.example.com')
>>> _parse_proxy('proxy.example.com:3128')
(None, None, None, 'proxy.example.com:3128')
The authority component may optionally include userinfo (assumed to be
username:password):
>>> _parse_proxy('joe:password@proxy.example.com')
(None, 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('joe:password@proxy.example.com:3128')
(None, 'joe', 'password', 'proxy.example.com:3128')
Same examples, but with URLs instead:
>>> _parse_proxy('http://proxy.example.com/')
('http', None, None, 'proxy.example.com')
>>> _parse_proxy('http://proxy.example.com:3128/')
('http', None, None, 'proxy.example.com:3128')
>>> _parse_proxy('http://joe:password@proxy.example.com/')
('http', 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('http://joe:password@proxy.example.com:3128')
('http', 'joe', 'password', 'proxy.example.com:3128')
Everything after the authority is ignored:
>>> _parse_proxy('ftp://joe:password@proxy.example.com/rubbish:3128')
('ftp', 'joe', 'password', 'proxy.example.com')
Test for no trailing '/' case:
>>> _parse_proxy('http://joe:password@proxy.example.com')
('http', 'joe', 'password', 'proxy.example.com')
"""
scheme, r_scheme = splittype(proxy)
if not r_scheme.startswith("/"):
# authority
scheme = None
authority = proxy
else:
# URL
if not r_scheme.startswith("//"):
raise ValueError("proxy URL with no authority: %r" % proxy)
# We have an authority, so for RFC 3986-compliant URLs (by ss 3.
# and 3.3.), path is empty or starts with '/'
end = r_scheme.find("/", 2)
if end == -1:
end = None
authority = r_scheme[2:end]
userinfo, hostport = splituser(authority)
if userinfo is not None:
user, password = splitpasswd(userinfo)
else:
user = password = None
return scheme, user, password, hostport
def proxy_open(self, req, proxy, type):
# This block is copied wholesale from Python2.6 urllib2.
# It is idempotent, so the superclass method call executes as normal
# if invoked.
orig_type = req.get_type()
proxy_type, user, password, hostport = self._parse_proxy(proxy)
if proxy_type is None:
proxy_type = orig_type
if user and password:
user_pass = "%s:%s" % (urllib2.unquote(user), urllib2.unquote(password))
creds = base64.b64encode(user_pass).strip()
# Later calls overwrite earlier calls for the same header
req.add_header("Proxy-authorization", "Basic " + creds)
hostport = urllib2.unquote(hostport)
req.set_proxy(hostport, proxy_type)
# This condition is the change
if orig_type == "https":
return None
return urllib2.ProxyHandler.proxy_open(self, req, proxy, type)
class FancyHTTPSHandler(urllib2.HTTPSHandler):
"""An HTTPSHandler that works with CONNECT-enabled proxies."""
def do_open(self, http_class, req):
# Intentionally very specific so as to opt for false negatives
# rather than false positives.
try:
return urllib2.HTTPSHandler.do_open(
self,
_create_fancy_connection(req._tunnel_host,
req._key_file,
req._cert_file,
req._ca_certs),
req)
except urllib2.URLError, url_error:
try:
import ssl
if (type(url_error.reason) == ssl.SSLError and
url_error.reason.args[0] == 1):
# Display the reason to the user. Need to use args for python2.5
# compat.
raise InvalidCertificateException(req.host, '',
url_error.reason.args[1])
except ImportError:
pass
raise url_error
# We have to implement this so that we persist the tunneling behavior
# through redirects.
class FancyRedirectHandler(urllib2.HTTPRedirectHandler):
"""A redirect handler that persists CONNECT-enabled proxy information."""
def redirect_request(self, req, *args, **kwargs):
new_req = urllib2.HTTPRedirectHandler.redirect_request(
self, req, *args, **kwargs)
# Same thing as in our set_proxy implementation, but in this case
# we"ve only got a Request to work with, so it was this or copy
# everything over piecemeal.
#
# Note that we do not persist tunneling behavior from an http request
# to an https request, because an http request does not set _tunnel_host.
#
# Also note that in Python < 2.6, you will get an error in
# FancyHTTPSHandler.do_open() on an https urllib2.Request that uses an http
# proxy, since the proxy type will be set to http instead of https.
# (FancyRequest, and urllib2.Request in Python >= 2.6 set the proxy type to
# https.) Such an urllib2.Request could result from this redirect
# if you are redirecting from an http request (since an an http request
# does not have _tunnel_host set, and thus you will not set the proxy
# in the code below), and if you have defined a proxy for https in, say,
# FancyProxyHandler, and that proxy has type http.
if hasattr(req, "_tunnel_host") and isinstance(new_req, urllib2.Request):
if new_req.get_type() == "https":
if req._tunnel_host:
# req is proxied, so copy the proxy info.
new_req._tunnel_host = new_req.get_host()
new_req.set_proxy(req.host, "https")
else:
# req is not proxied, so just make sure _tunnel_host is defined.
new_req._tunnel_host = None
new_req.type = "https"
if hasattr(req, "_key_file") and isinstance(new_req, urllib2.Request):
# Copy the auxiliary data in case this or any further redirect is https
new_req._key_file = req._key_file
new_req._cert_file = req._cert_file
new_req._ca_certs = req._ca_certs
return new_req
| bsd-3-clause |
kontais/EFI-MIPS | ToolKit/cmds/python/Lib/test/skipped/test_cookielib.py | 6 | 68006 | # -*- coding: utf-8 -*-
"""Tests for cookielib.py."""
import re, os, time
from unittest import TestCase
from test import test_support
class DateTimeTests(TestCase):
def test_time2isoz(self):
from cookielib import time2isoz
base = 1019227000
day = 24*3600
self.assertEquals(time2isoz(base), "2002-04-19 14:36:40Z")
self.assertEquals(time2isoz(base+day), "2002-04-20 14:36:40Z")
self.assertEquals(time2isoz(base+2*day), "2002-04-21 14:36:40Z")
self.assertEquals(time2isoz(base+3*day), "2002-04-22 14:36:40Z")
az = time2isoz()
bz = time2isoz(500000)
for text in (az, bz):
self.assert_(re.search(r"^\d{4}-\d\d-\d\d \d\d:\d\d:\d\dZ$", text),
"bad time2isoz format: %s %s" % (az, bz))
def test_http2time(self):
from cookielib import http2time
def parse_date(text):
return time.gmtime(http2time(text))[:6]
self.assertEquals(parse_date("01 Jan 2001"), (2001, 1, 1, 0, 0, 0.0))
# this test will break around year 2070
self.assertEquals(parse_date("03-Feb-20"), (2020, 2, 3, 0, 0, 0.0))
# this test will break around year 2048
self.assertEquals(parse_date("03-Feb-98"), (1998, 2, 3, 0, 0, 0.0))
def test_http2time_formats(self):
from cookielib import http2time, time2isoz
# test http2time for supported dates. Test cases with 2 digit year
# will probably break in year 2044.
tests = [
'Thu, 03 Feb 1994 00:00:00 GMT', # proposed new HTTP format
'Thursday, 03-Feb-94 00:00:00 GMT', # old rfc850 HTTP format
'Thursday, 03-Feb-1994 00:00:00 GMT', # broken rfc850 HTTP format
'03 Feb 1994 00:00:00 GMT', # HTTP format (no weekday)
'03-Feb-94 00:00:00 GMT', # old rfc850 (no weekday)
'03-Feb-1994 00:00:00 GMT', # broken rfc850 (no weekday)
'03-Feb-1994 00:00 GMT', # broken rfc850 (no weekday, no seconds)
'03-Feb-1994 00:00', # broken rfc850 (no weekday, no seconds, no tz)
'03-Feb-94', # old rfc850 HTTP format (no weekday, no time)
'03-Feb-1994', # broken rfc850 HTTP format (no weekday, no time)
'03 Feb 1994', # proposed new HTTP format (no weekday, no time)
# A few tests with extra space at various places
' 03 Feb 1994 0:00 ',
' 03-Feb-1994 ',
]
test_t = 760233600 # assume broken POSIX counting of seconds
result = time2isoz(test_t)
expected = "1994-02-03 00:00:00Z"
self.assertEquals(result, expected,
"%s => '%s' (%s)" % (test_t, result, expected))
for s in tests:
t = http2time(s)
t2 = http2time(s.lower())
t3 = http2time(s.upper())
self.assert_(t == t2 == t3 == test_t,
"'%s' => %s, %s, %s (%s)" % (s, t, t2, t3, test_t))
def test_http2time_garbage(self):
from cookielib import http2time
for test in [
'',
'Garbage',
'Mandag 16. September 1996',
'01-00-1980',
'01-13-1980',
'00-01-1980',
'32-01-1980',
'01-01-1980 25:00:00',
'01-01-1980 00:61:00',
'01-01-1980 00:00:62',
]:
self.assert_(http2time(test) is None,
"http2time(%s) is not None\n"
"http2time(test) %s" % (test, http2time(test))
)
class HeaderTests(TestCase):
def test_parse_ns_headers(self):
from cookielib import parse_ns_headers
# quotes should be stripped
expected = [[('foo', 'bar'), ('expires', 2209069412L), ('version', '0')]]
for hdr in [
'foo=bar; expires=01 Jan 2040 22:23:32 GMT',
'foo=bar; expires="01 Jan 2040 22:23:32 GMT"',
]:
self.assertEquals(parse_ns_headers([hdr]), expected)
def test_parse_ns_headers_special_names(self):
# names such as 'expires' are not special in first name=value pair
# of Set-Cookie: header
from cookielib import parse_ns_headers
# Cookie with name 'expires'
hdr = 'expires=01 Jan 2040 22:23:32 GMT'
expected = [[("expires", "01 Jan 2040 22:23:32 GMT"), ("version", "0")]]
self.assertEquals(parse_ns_headers([hdr]), expected)
def test_join_header_words(self):
from cookielib import join_header_words
joined = join_header_words([[("foo", None), ("bar", "baz")]])
self.assertEquals(joined, "foo; bar=baz")
self.assertEquals(join_header_words([[]]), "")
def test_split_header_words(self):
from cookielib import split_header_words
tests = [
("foo", [[("foo", None)]]),
("foo=bar", [[("foo", "bar")]]),
(" foo ", [[("foo", None)]]),
(" foo= ", [[("foo", "")]]),
(" foo=", [[("foo", "")]]),
(" foo= ; ", [[("foo", "")]]),
(" foo= ; bar= baz ", [[("foo", ""), ("bar", "baz")]]),
("foo=bar bar=baz", [[("foo", "bar"), ("bar", "baz")]]),
# doesn't really matter if this next fails, but it works ATM
("foo= bar=baz", [[("foo", "bar=baz")]]),
("foo=bar;bar=baz", [[("foo", "bar"), ("bar", "baz")]]),
('foo bar baz', [[("foo", None), ("bar", None), ("baz", None)]]),
("a, b, c", [[("a", None)], [("b", None)], [("c", None)]]),
(r'foo; bar=baz, spam=, foo="\,\;\"", bar= ',
[[("foo", None), ("bar", "baz")],
[("spam", "")], [("foo", ',;"')], [("bar", "")]]),
]
for arg, expect in tests:
try:
result = split_header_words([arg])
except:
import traceback, StringIO
f = StringIO.StringIO()
traceback.print_exc(None, f)
result = "(error -- traceback follows)\n\n%s" % f.getvalue()
self.assertEquals(result, expect, """
When parsing: '%s'
Expected: '%s'
Got: '%s'
""" % (arg, expect, result))
def test_roundtrip(self):
from cookielib import split_header_words, join_header_words
tests = [
("foo", "foo"),
("foo=bar", "foo=bar"),
(" foo ", "foo"),
("foo=", 'foo=""'),
("foo=bar bar=baz", "foo=bar; bar=baz"),
("foo=bar;bar=baz", "foo=bar; bar=baz"),
('foo bar baz', "foo; bar; baz"),
(r'foo="\"" bar="\\"', r'foo="\""; bar="\\"'),
('foo,,,bar', 'foo, bar'),
('foo=bar,bar=baz', 'foo=bar, bar=baz'),
('text/html; charset=iso-8859-1',
'text/html; charset="iso-8859-1"'),
('foo="bar"; port="80,81"; discard, bar=baz',
'foo=bar; port="80,81"; discard, bar=baz'),
(r'Basic realm="\"foo\\\\bar\""',
r'Basic; realm="\"foo\\\\bar\""')
]
for arg, expect in tests:
input = split_header_words([arg])
res = join_header_words(input)
self.assertEquals(res, expect, """
When parsing: '%s'
Expected: '%s'
Got: '%s'
Input was: '%s'
""" % (arg, expect, res, input))
class FakeResponse:
def __init__(self, headers=[], url=None):
"""
headers: list of RFC822-style 'Key: value' strings
"""
import mimetools, StringIO
f = StringIO.StringIO("\n".join(headers))
self._headers = mimetools.Message(f)
self._url = url
def info(self): return self._headers
def interact_2965(cookiejar, url, *set_cookie_hdrs):
return _interact(cookiejar, url, set_cookie_hdrs, "Set-Cookie2")
def interact_netscape(cookiejar, url, *set_cookie_hdrs):
return _interact(cookiejar, url, set_cookie_hdrs, "Set-Cookie")
def _interact(cookiejar, url, set_cookie_hdrs, hdr_name):
"""Perform a single request / response cycle, returning Cookie: header."""
from urllib2 import Request
req = Request(url)
cookiejar.add_cookie_header(req)
cookie_hdr = req.get_header("Cookie", "")
headers = []
for hdr in set_cookie_hdrs:
headers.append("%s: %s" % (hdr_name, hdr))
res = FakeResponse(headers, url)
cookiejar.extract_cookies(res, req)
return cookie_hdr
class FileCookieJarTests(TestCase):
def test_lwp_valueless_cookie(self):
# cookies with no value should be saved and loaded consistently
from cookielib import LWPCookieJar
filename = test_support.TESTFN
c = LWPCookieJar()
interact_netscape(c, "http://www.acme.com/", 'boo')
self.assertEqual(c._cookies["www.acme.com"]["/"]["boo"].value, None)
try:
c.save(filename, ignore_discard=True)
c = LWPCookieJar()
c.load(filename, ignore_discard=True)
finally:
try: os.unlink(filename)
except OSError: pass
self.assertEqual(c._cookies["www.acme.com"]["/"]["boo"].value, None)
class CookieTests(TestCase):
# XXX
# Get rid of string comparisons where not actually testing str / repr.
# .clear() etc.
# IP addresses like 50 (single number, no dot) and domain-matching
# functions (and is_HDN)? See draft RFC 2965 errata.
# Strictness switches
# is_third_party()
# unverifiability / third-party blocking
# Netscape cookies work the same as RFC 2965 with regard to port.
# Set-Cookie with negative max age.
# If turn RFC 2965 handling off, Set-Cookie2 cookies should not clobber
# Set-Cookie cookies.
# Cookie2 should be sent if *any* cookies are not V1 (ie. V0 OR V2 etc.).
# Cookies (V1 and V0) with no expiry date should be set to be discarded.
# RFC 2965 Quoting:
# Should accept unquoted cookie-attribute values? check errata draft.
# Which are required on the way in and out?
# Should always return quoted cookie-attribute values?
# Proper testing of when RFC 2965 clobbers Netscape (waiting for errata).
# Path-match on return (same for V0 and V1).
# RFC 2965 acceptance and returning rules
# Set-Cookie2 without version attribute is rejected.
# Netscape peculiarities list from Ronald Tschalar.
# The first two still need tests, the rest are covered.
## - Quoting: only quotes around the expires value are recognized as such
## (and yes, some folks quote the expires value); quotes around any other
## value are treated as part of the value.
## - White space: white space around names and values is ignored
## - Default path: if no path parameter is given, the path defaults to the
## path in the request-uri up to, but not including, the last '/'. Note
## that this is entirely different from what the spec says.
## - Commas and other delimiters: Netscape just parses until the next ';'.
## This means it will allow commas etc inside values (and yes, both
## commas and equals are commonly appear in the cookie value). This also
## means that if you fold multiple Set-Cookie header fields into one,
## comma-separated list, it'll be a headache to parse (at least my head
## starts hurting everytime I think of that code).
## - Expires: You'll get all sorts of date formats in the expires,
## including emtpy expires attributes ("expires="). Be as flexible as you
## can, and certainly don't expect the weekday to be there; if you can't
## parse it, just ignore it and pretend it's a session cookie.
## - Domain-matching: Netscape uses the 2-dot rule for _all_ domains, not
## just the 7 special TLD's listed in their spec. And folks rely on
## that...
def test_domain_return_ok(self):
# test optimization: .domain_return_ok() should filter out most
# domains in the CookieJar before we try to access them (because that
# may require disk access -- in particular, with MSIECookieJar)
# This is only a rough check for performance reasons, so it's not too
# critical as long as it's sufficiently liberal.
import cookielib, urllib2
pol = cookielib.DefaultCookiePolicy()
for url, domain, ok in [
("http://foo.bar.com/", "blah.com", False),
("http://foo.bar.com/", "rhubarb.blah.com", False),
("http://foo.bar.com/", "rhubarb.foo.bar.com", False),
("http://foo.bar.com/", ".foo.bar.com", True),
("http://foo.bar.com/", "foo.bar.com", True),
("http://foo.bar.com/", ".bar.com", True),
("http://foo.bar.com/", "com", True),
("http://foo.com/", "rhubarb.foo.com", False),
("http://foo.com/", ".foo.com", True),
("http://foo.com/", "foo.com", True),
("http://foo.com/", "com", True),
("http://foo/", "rhubarb.foo", False),
("http://foo/", ".foo", True),
("http://foo/", "foo", True),
("http://foo/", "foo.local", True),
("http://foo/", ".local", True),
]:
request = urllib2.Request(url)
r = pol.domain_return_ok(domain, request)
if ok: self.assert_(r)
else: self.assert_(not r)
def test_missing_value(self):
from cookielib import MozillaCookieJar, lwp_cookie_str
# missing = sign in Cookie: header is regarded by Mozilla as a missing
# name, and by cookielib as a missing value
filename = test_support.TESTFN
c = MozillaCookieJar(filename)
interact_netscape(c, "http://www.acme.com/", 'eggs')
interact_netscape(c, "http://www.acme.com/", '"spam"; path=/foo/')
cookie = c._cookies["www.acme.com"]["/"]["eggs"]
self.assert_(cookie.value is None)
self.assertEquals(cookie.name, "eggs")
cookie = c._cookies["www.acme.com"]['/foo/']['"spam"']
self.assert_(cookie.value is None)
self.assertEquals(cookie.name, '"spam"')
self.assertEquals(lwp_cookie_str(cookie), (
r'"spam"; path="/foo/"; domain="www.acme.com"; '
'path_spec; discard; version=0'))
old_str = repr(c)
c.save(ignore_expires=True, ignore_discard=True)
try:
c = MozillaCookieJar(filename)
c.revert(ignore_expires=True, ignore_discard=True)
finally:
os.unlink(c.filename)
# cookies unchanged apart from lost info re. whether path was specified
self.assertEquals(
repr(c),
re.sub("path_specified=%s" % True, "path_specified=%s" % False,
old_str)
)
self.assertEquals(interact_netscape(c, "http://www.acme.com/foo/"),
'"spam"; eggs')
def test_ns_parser(self):
from cookielib import CookieJar, DEFAULT_HTTP_PORT
c = CookieJar()
interact_netscape(c, "http://www.acme.com/",
'spam=eggs; DoMain=.acme.com; port; blArgh="feep"')
interact_netscape(c, "http://www.acme.com/", 'ni=ni; port=80,8080')
interact_netscape(c, "http://www.acme.com:80/", 'nini=ni')
interact_netscape(c, "http://www.acme.com:80/", 'foo=bar; expires=')
interact_netscape(c, "http://www.acme.com:80/", 'spam=eggs; '
'expires="Foo Bar 25 33:22:11 3022"')
cookie = c._cookies[".acme.com"]["/"]["spam"]
self.assertEquals(cookie.domain, ".acme.com")
self.assert_(cookie.domain_specified)
self.assertEquals(cookie.port, DEFAULT_HTTP_PORT)
self.assert_(not cookie.port_specified)
# case is preserved
self.assert_(cookie.has_nonstandard_attr("blArgh") and
not cookie.has_nonstandard_attr("blargh"))
cookie = c._cookies["www.acme.com"]["/"]["ni"]
self.assertEquals(cookie.domain, "www.acme.com")
self.assert_(not cookie.domain_specified)
self.assertEquals(cookie.port, "80,8080")
self.assert_(cookie.port_specified)
cookie = c._cookies["www.acme.com"]["/"]["nini"]
self.assert_(cookie.port is None)
self.assert_(not cookie.port_specified)
# invalid expires should not cause cookie to be dropped
foo = c._cookies["www.acme.com"]["/"]["foo"]
spam = c._cookies["www.acme.com"]["/"]["foo"]
self.assert_(foo.expires is None)
self.assert_(spam.expires is None)
def test_ns_parser_special_names(self):
# names such as 'expires' are not special in first name=value pair
# of Set-Cookie: header
from cookielib import CookieJar
c = CookieJar()
interact_netscape(c, "http://www.acme.com/", 'expires=eggs')
interact_netscape(c, "http://www.acme.com/", 'version=eggs; spam=eggs')
cookies = c._cookies["www.acme.com"]["/"]
self.assert_('expires' in cookies)
self.assert_('version' in cookies)
def test_expires(self):
from cookielib import time2netscape, CookieJar
# if expires is in future, keep cookie...
c = CookieJar()
future = time2netscape(time.time()+3600)
interact_netscape(c, "http://www.acme.com/", 'spam="bar"; expires=%s' %
future)
self.assertEquals(len(c), 1)
now = time2netscape(time.time()-1)
# ... and if in past or present, discard it
interact_netscape(c, "http://www.acme.com/", 'foo="eggs"; expires=%s' %
now)
h = interact_netscape(c, "http://www.acme.com/")
self.assertEquals(len(c), 1)
self.assert_('spam="bar"' in h and "foo" not in h)
# max-age takes precedence over expires, and zero max-age is request to
# delete both new cookie and any old matching cookie
interact_netscape(c, "http://www.acme.com/", 'eggs="bar"; expires=%s' %
future)
interact_netscape(c, "http://www.acme.com/", 'bar="bar"; expires=%s' %
future)
self.assertEquals(len(c), 3)
interact_netscape(c, "http://www.acme.com/", 'eggs="bar"; '
'expires=%s; max-age=0' % future)
interact_netscape(c, "http://www.acme.com/", 'bar="bar"; '
'max-age=0; expires=%s' % future)
h = interact_netscape(c, "http://www.acme.com/")
self.assertEquals(len(c), 1)
# test expiry at end of session for cookies with no expires attribute
interact_netscape(c, "http://www.rhubarb.net/", 'whum="fizz"')
self.assertEquals(len(c), 2)
c.clear_session_cookies()
self.assertEquals(len(c), 1)
self.assert_('spam="bar"' in h)
# XXX RFC 2965 expiry rules (some apply to V0 too)
def test_default_path(self):
from cookielib import CookieJar, DefaultCookiePolicy
# RFC 2965
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/", 'spam="bar"; Version="1"')
self.assert_("/" in c._cookies["www.acme.com"])
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah", 'eggs="bar"; Version="1"')
self.assert_("/" in c._cookies["www.acme.com"])
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah/rhubarb",
'eggs="bar"; Version="1"')
self.assert_("/blah/" in c._cookies["www.acme.com"])
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah/rhubarb/",
'eggs="bar"; Version="1"')
self.assert_("/blah/rhubarb/" in c._cookies["www.acme.com"])
# Netscape
c = CookieJar()
interact_netscape(c, "http://www.acme.com/", 'spam="bar"')
self.assert_("/" in c._cookies["www.acme.com"])
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah", 'eggs="bar"')
self.assert_("/" in c._cookies["www.acme.com"])
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah/rhubarb", 'eggs="bar"')
self.assert_("/blah" in c._cookies["www.acme.com"])
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah/rhubarb/", 'eggs="bar"')
self.assert_("/blah/rhubarb" in c._cookies["www.acme.com"])
def test_escape_path(self):
from cookielib import escape_path
cases = [
# quoted safe
("/foo%2f/bar", "/foo%2F/bar"),
("/foo%2F/bar", "/foo%2F/bar"),
# quoted %
("/foo%%/bar", "/foo%%/bar"),
# quoted unsafe
("/fo%19o/bar", "/fo%19o/bar"),
("/fo%7do/bar", "/fo%7Do/bar"),
# unquoted safe
("/foo/bar&", "/foo/bar&"),
("/foo//bar", "/foo//bar"),
("\176/foo/bar", "\176/foo/bar"),
# unquoted unsafe
("/foo\031/bar", "/foo%19/bar"),
("/\175foo/bar", "/%7Dfoo/bar"),
# unicode
(u"/foo/bar\uabcd", "/foo/bar%EA%AF%8D"), # UTF-8 encoded
]
for arg, result in cases:
self.assertEquals(escape_path(arg), result)
def test_request_path(self):
from urllib2 import Request
from cookielib import request_path
# with parameters
req = Request("http://www.example.com/rheum/rhaponicum;"
"foo=bar;sing=song?apples=pears&spam=eggs#ni")
self.assertEquals(request_path(req), "/rheum/rhaponicum;"
"foo=bar;sing=song?apples=pears&spam=eggs#ni")
# without parameters
req = Request("http://www.example.com/rheum/rhaponicum?"
"apples=pears&spam=eggs#ni")
self.assertEquals(request_path(req), "/rheum/rhaponicum?"
"apples=pears&spam=eggs#ni")
# missing final slash
req = Request("http://www.example.com")
self.assertEquals(request_path(req), "/")
def test_request_port(self):
from urllib2 import Request
from cookielib import request_port, DEFAULT_HTTP_PORT
req = Request("http://www.acme.com:1234/",
headers={"Host": "www.acme.com:4321"})
self.assertEquals(request_port(req), "1234")
req = Request("http://www.acme.com/",
headers={"Host": "www.acme.com:4321"})
self.assertEquals(request_port(req), DEFAULT_HTTP_PORT)
def test_request_host(self):
from urllib2 import Request
from cookielib import request_host
# this request is illegal (RFC2616, 14.2.3)
req = Request("http://1.1.1.1/",
headers={"Host": "www.acme.com:80"})
# libwww-perl wants this response, but that seems wrong (RFC 2616,
# section 5.2, point 1., and RFC 2965 section 1, paragraph 3)
#self.assertEquals(request_host(req), "www.acme.com")
self.assertEquals(request_host(req), "1.1.1.1")
req = Request("http://www.acme.com/",
headers={"Host": "irrelevant.com"})
self.assertEquals(request_host(req), "www.acme.com")
# not actually sure this one is valid Request object, so maybe should
# remove test for no host in url in request_host function?
req = Request("/resource.html",
headers={"Host": "www.acme.com"})
self.assertEquals(request_host(req), "www.acme.com")
# port shouldn't be in request-host
req = Request("http://www.acme.com:2345/resource.html",
headers={"Host": "www.acme.com:5432"})
self.assertEquals(request_host(req), "www.acme.com")
def test_is_HDN(self):
from cookielib import is_HDN
self.assert_(is_HDN("foo.bar.com"))
self.assert_(is_HDN("1foo2.3bar4.5com"))
self.assert_(not is_HDN("192.168.1.1"))
self.assert_(not is_HDN(""))
self.assert_(not is_HDN("."))
self.assert_(not is_HDN(".foo.bar.com"))
self.assert_(not is_HDN("..foo"))
self.assert_(not is_HDN("foo."))
def test_reach(self):
from cookielib import reach
self.assertEquals(reach("www.acme.com"), ".acme.com")
self.assertEquals(reach("acme.com"), "acme.com")
self.assertEquals(reach("acme.local"), ".local")
self.assertEquals(reach(".local"), ".local")
self.assertEquals(reach(".com"), ".com")
self.assertEquals(reach("."), ".")
self.assertEquals(reach(""), "")
self.assertEquals(reach("192.168.0.1"), "192.168.0.1")
def test_domain_match(self):
from cookielib import domain_match, user_domain_match
self.assert_(domain_match("192.168.1.1", "192.168.1.1"))
self.assert_(not domain_match("192.168.1.1", ".168.1.1"))
self.assert_(domain_match("x.y.com", "x.Y.com"))
self.assert_(domain_match("x.y.com", ".Y.com"))
self.assert_(not domain_match("x.y.com", "Y.com"))
self.assert_(domain_match("a.b.c.com", ".c.com"))
self.assert_(not domain_match(".c.com", "a.b.c.com"))
self.assert_(domain_match("example.local", ".local"))
self.assert_(not domain_match("blah.blah", ""))
self.assert_(not domain_match("", ".rhubarb.rhubarb"))
self.assert_(domain_match("", ""))
self.assert_(user_domain_match("acme.com", "acme.com"))
self.assert_(not user_domain_match("acme.com", ".acme.com"))
self.assert_(user_domain_match("rhubarb.acme.com", ".acme.com"))
self.assert_(user_domain_match("www.rhubarb.acme.com", ".acme.com"))
self.assert_(user_domain_match("x.y.com", "x.Y.com"))
self.assert_(user_domain_match("x.y.com", ".Y.com"))
self.assert_(not user_domain_match("x.y.com", "Y.com"))
self.assert_(user_domain_match("y.com", "Y.com"))
self.assert_(not user_domain_match(".y.com", "Y.com"))
self.assert_(user_domain_match(".y.com", ".Y.com"))
self.assert_(user_domain_match("x.y.com", ".com"))
self.assert_(not user_domain_match("x.y.com", "com"))
self.assert_(not user_domain_match("x.y.com", "m"))
self.assert_(not user_domain_match("x.y.com", ".m"))
self.assert_(not user_domain_match("x.y.com", ""))
self.assert_(not user_domain_match("x.y.com", "."))
self.assert_(user_domain_match("192.168.1.1", "192.168.1.1"))
# not both HDNs, so must string-compare equal to match
self.assert_(not user_domain_match("192.168.1.1", ".168.1.1"))
self.assert_(not user_domain_match("192.168.1.1", "."))
# empty string is a special case
self.assert_(not user_domain_match("192.168.1.1", ""))
def test_wrong_domain(self):
# Cookies whose effective request-host name does not domain-match the
# domain are rejected.
# XXX far from complete
from cookielib import CookieJar
c = CookieJar()
interact_2965(c, "http://www.nasty.com/",
'foo=bar; domain=friendly.org; Version="1"')
self.assertEquals(len(c), 0)
def test_two_component_domain_ns(self):
# Netscape: .www.bar.com, www.bar.com, .bar.com, bar.com, no domain
# should all get accepted, as should .acme.com, acme.com and no domain
# for 2-component domains like acme.com.
from cookielib import CookieJar, DefaultCookiePolicy
c = CookieJar()
# two-component V0 domain is OK
interact_netscape(c, "http://foo.net/", 'ns=bar')
self.assertEquals(len(c), 1)
self.assertEquals(c._cookies["foo.net"]["/"]["ns"].value, "bar")
self.assertEquals(interact_netscape(c, "http://foo.net/"), "ns=bar")
# *will* be returned to any other domain (unlike RFC 2965)...
self.assertEquals(interact_netscape(c, "http://www.foo.net/"),
"ns=bar")
# ...unless requested otherwise
pol = DefaultCookiePolicy(
strict_ns_domain=DefaultCookiePolicy.DomainStrictNonDomain)
c.set_policy(pol)
self.assertEquals(interact_netscape(c, "http://www.foo.net/"), "")
# unlike RFC 2965, even explicit two-component domain is OK,
# because .foo.net matches foo.net
interact_netscape(c, "http://foo.net/foo/",
'spam1=eggs; domain=foo.net')
# even if starts with a dot -- in NS rules, .foo.net matches foo.net!
interact_netscape(c, "http://foo.net/foo/bar/",
'spam2=eggs; domain=.foo.net')
self.assertEquals(len(c), 3)
self.assertEquals(c._cookies[".foo.net"]["/foo"]["spam1"].value,
"eggs")
self.assertEquals(c._cookies[".foo.net"]["/foo/bar"]["spam2"].value,
"eggs")
self.assertEquals(interact_netscape(c, "http://foo.net/foo/bar/"),
"spam2=eggs; spam1=eggs; ns=bar")
# top-level domain is too general
interact_netscape(c, "http://foo.net/", 'nini="ni"; domain=.net')
self.assertEquals(len(c), 3)
## # Netscape protocol doesn't allow non-special top level domains (such
## # as co.uk) in the domain attribute unless there are at least three
## # dots in it.
# Oh yes it does! Real implementations don't check this, and real
# cookies (of course) rely on that behaviour.
interact_netscape(c, "http://foo.co.uk", 'nasty=trick; domain=.co.uk')
## self.assertEquals(len(c), 2)
self.assertEquals(len(c), 4)
def test_two_component_domain_rfc2965(self):
from cookielib import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
# two-component V1 domain is OK
interact_2965(c, "http://foo.net/", 'foo=bar; Version="1"')
self.assertEquals(len(c), 1)
self.assertEquals(c._cookies["foo.net"]["/"]["foo"].value, "bar")
self.assertEquals(interact_2965(c, "http://foo.net/"),
"$Version=1; foo=bar")
# won't be returned to any other domain (because domain was implied)
self.assertEquals(interact_2965(c, "http://www.foo.net/"), "")
# unless domain is given explicitly, because then it must be
# rewritten to start with a dot: foo.net --> .foo.net, which does
# not domain-match foo.net
interact_2965(c, "http://foo.net/foo",
'spam=eggs; domain=foo.net; path=/foo; Version="1"')
self.assertEquals(len(c), 1)
self.assertEquals(interact_2965(c, "http://foo.net/foo"),
"$Version=1; foo=bar")
# explicit foo.net from three-component domain www.foo.net *does* get
# set, because .foo.net domain-matches .foo.net
interact_2965(c, "http://www.foo.net/foo/",
'spam=eggs; domain=foo.net; Version="1"')
self.assertEquals(c._cookies[".foo.net"]["/foo/"]["spam"].value,
"eggs")
self.assertEquals(len(c), 2)
self.assertEquals(interact_2965(c, "http://foo.net/foo/"),
"$Version=1; foo=bar")
self.assertEquals(interact_2965(c, "http://www.foo.net/foo/"),
'$Version=1; spam=eggs; $Domain="foo.net"')
# top-level domain is too general
interact_2965(c, "http://foo.net/",
'ni="ni"; domain=".net"; Version="1"')
self.assertEquals(len(c), 2)
# RFC 2965 doesn't require blocking this
interact_2965(c, "http://foo.co.uk/",
'nasty=trick; domain=.co.uk; Version="1"')
self.assertEquals(len(c), 3)
def test_domain_allow(self):
from cookielib import CookieJar, DefaultCookiePolicy
from urllib2 import Request
c = CookieJar(policy=DefaultCookiePolicy(
blocked_domains=["acme.com"],
allowed_domains=["www.acme.com"]))
req = Request("http://acme.com/")
headers = ["Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/"]
res = FakeResponse(headers, "http://acme.com/")
c.extract_cookies(res, req)
self.assertEquals(len(c), 0)
req = Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
self.assertEquals(len(c), 1)
req = Request("http://www.coyote.com/")
res = FakeResponse(headers, "http://www.coyote.com/")
c.extract_cookies(res, req)
self.assertEquals(len(c), 1)
# set a cookie with non-allowed domain...
req = Request("http://www.coyote.com/")
res = FakeResponse(headers, "http://www.coyote.com/")
cookies = c.make_cookies(res, req)
c.set_cookie(cookies[0])
self.assertEquals(len(c), 2)
# ... and check is doesn't get returned
c.add_cookie_header(req)
self.assert_(not req.has_header("Cookie"))
def test_domain_block(self):
from cookielib import CookieJar, DefaultCookiePolicy
from urllib2 import Request
pol = DefaultCookiePolicy(
rfc2965=True, blocked_domains=[".acme.com"])
c = CookieJar(policy=pol)
headers = ["Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/"]
req = Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
self.assertEquals(len(c), 0)
p = pol.set_blocked_domains(["acme.com"])
c.extract_cookies(res, req)
self.assertEquals(len(c), 1)
c.clear()
req = Request("http://www.roadrunner.net/")
res = FakeResponse(headers, "http://www.roadrunner.net/")
c.extract_cookies(res, req)
self.assertEquals(len(c), 1)
req = Request("http://www.roadrunner.net/")
c.add_cookie_header(req)
self.assert_((req.has_header("Cookie") and
req.has_header("Cookie2")))
c.clear()
pol.set_blocked_domains([".acme.com"])
c.extract_cookies(res, req)
self.assertEquals(len(c), 1)
# set a cookie with blocked domain...
req = Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
cookies = c.make_cookies(res, req)
c.set_cookie(cookies[0])
self.assertEquals(len(c), 2)
# ... and check is doesn't get returned
c.add_cookie_header(req)
self.assert_(not req.has_header("Cookie"))
def test_secure(self):
from cookielib import CookieJar, DefaultCookiePolicy
for ns in True, False:
for whitespace in " ", "":
c = CookieJar()
if ns:
pol = DefaultCookiePolicy(rfc2965=False)
int = interact_netscape
vs = ""
else:
pol = DefaultCookiePolicy(rfc2965=True)
int = interact_2965
vs = "; Version=1"
c.set_policy(pol)
url = "http://www.acme.com/"
int(c, url, "foo1=bar%s%s" % (vs, whitespace))
int(c, url, "foo2=bar%s; secure%s" % (vs, whitespace))
self.assert_(
not c._cookies["www.acme.com"]["/"]["foo1"].secure,
"non-secure cookie registered secure")
self.assert_(
c._cookies["www.acme.com"]["/"]["foo2"].secure,
"secure cookie registered non-secure")
def test_quote_cookie_value(self):
from cookielib import CookieJar, DefaultCookiePolicy
c = CookieJar(policy=DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://www.acme.com/", r'foo=\b"a"r; Version=1')
h = interact_2965(c, "http://www.acme.com/")
self.assertEquals(h, r'$Version=1; foo=\\b\"a\"r')
def test_missing_final_slash(self):
# Missing slash from request URL's abs_path should be assumed present.
from cookielib import CookieJar, DefaultCookiePolicy
from urllib2 import Request
url = "http://www.acme.com"
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, url, "foo=bar; Version=1")
req = Request(url)
self.assertEquals(len(c), 1)
c.add_cookie_header(req)
self.assert_(req.has_header("Cookie"))
def test_domain_mirror(self):
from cookielib import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1")
h = interact_2965(c, url)
self.assert_("Domain" not in h,
"absent domain returned with domain present")
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Domain=.bar.com')
h = interact_2965(c, url)
self.assert_('$Domain=".bar.com"' in h, "domain not returned")
c = CookieJar(pol)
url = "http://foo.bar.com/"
# note missing initial dot in Domain
interact_2965(c, url, 'spam=eggs; Version=1; Domain=bar.com')
h = interact_2965(c, url)
self.assert_('$Domain="bar.com"' in h, "domain not returned")
def test_path_mirror(self):
from cookielib import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1")
h = interact_2965(c, url)
self.assert_("Path" not in h,
"absent path returned with path present")
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Path=/')
h = interact_2965(c, url)
self.assert_('$Path="/"' in h, "path not returned")
def test_port_mirror(self):
from cookielib import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1")
h = interact_2965(c, url)
self.assert_("Port" not in h,
"absent port returned with port present")
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1; Port")
h = interact_2965(c, url)
self.assert_(re.search("\$Port([^=]|$)", h),
"port with no value not returned with no value")
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Port="80"')
h = interact_2965(c, url)
self.assert_('$Port="80"' in h,
"port with single value not returned with single value")
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Port="80,8080"')
h = interact_2965(c, url)
self.assert_('$Port="80,8080"' in h,
"port with multiple values not returned with multiple "
"values")
def test_no_return_comment(self):
from cookielib import CookieJar, DefaultCookiePolicy
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; '
'Comment="does anybody read these?"; '
'CommentURL="http://foo.bar.net/comment.html"')
h = interact_2965(c, url)
self.assert_(
"Comment" not in h,
"Comment or CommentURL cookie-attributes returned to server")
def test_Cookie_iterator(self):
from cookielib import CookieJar, Cookie, DefaultCookiePolicy
cs = CookieJar(DefaultCookiePolicy(rfc2965=True))
# add some random cookies
interact_2965(cs, "http://blah.spam.org/", 'foo=eggs; Version=1; '
'Comment="does anybody read these?"; '
'CommentURL="http://foo.bar.net/comment.html"')
interact_netscape(cs, "http://www.acme.com/blah/", "spam=bar; secure")
interact_2965(cs, "http://www.acme.com/blah/",
"foo=bar; secure; Version=1")
interact_2965(cs, "http://www.acme.com/blah/",
"foo=bar; path=/; Version=1")
interact_2965(cs, "http://www.sol.no",
r'bang=wallop; version=1; domain=".sol.no"; '
r'port="90,100, 80,8080"; '
r'max-age=100; Comment = "Just kidding! (\"|\\\\) "')
versions = [1, 1, 1, 0, 1]
names = ["bang", "foo", "foo", "spam", "foo"]
domains = [".sol.no", "blah.spam.org", "www.acme.com",
"www.acme.com", "www.acme.com"]
paths = ["/", "/", "/", "/blah", "/blah/"]
for i in range(4):
i = 0
for c in cs:
self.assert_(isinstance(c, Cookie))
self.assertEquals(c.version, versions[i])
self.assertEquals(c.name, names[i])
self.assertEquals(c.domain, domains[i])
self.assertEquals(c.path, paths[i])
i = i + 1
def test_parse_ns_headers(self):
from cookielib import parse_ns_headers
# missing domain value (invalid cookie)
self.assertEquals(
parse_ns_headers(["foo=bar; path=/; domain"]),
[[("foo", "bar"),
("path", "/"), ("domain", None), ("version", "0")]]
)
# invalid expires value
self.assertEquals(
parse_ns_headers(["foo=bar; expires=Foo Bar 12 33:22:11 2000"]),
[[("foo", "bar"), ("expires", None), ("version", "0")]]
)
# missing cookie value (valid cookie)
self.assertEquals(
parse_ns_headers(["foo"]),
[[("foo", None), ("version", "0")]]
)
# shouldn't add version if header is empty
self.assertEquals(parse_ns_headers([""]), [])
def test_bad_cookie_header(self):
def cookiejar_from_cookie_headers(headers):
from cookielib import CookieJar
from urllib2 import Request
c = CookieJar()
req = Request("http://www.example.com/")
r = FakeResponse(headers, "http://www.example.com/")
c.extract_cookies(r, req)
return c
# none of these bad headers should cause an exception to be raised
for headers in [
["Set-Cookie: "], # actually, nothing wrong with this
["Set-Cookie2: "], # ditto
# missing domain value
["Set-Cookie2: a=foo; path=/; Version=1; domain"],
# bad max-age
["Set-Cookie: b=foo; max-age=oops"],
]:
c = cookiejar_from_cookie_headers(headers)
# these bad cookies shouldn't be set
self.assertEquals(len(c), 0)
# cookie with invalid expires is treated as session cookie
headers = ["Set-Cookie: c=foo; expires=Foo Bar 12 33:22:11 2000"]
c = cookiejar_from_cookie_headers(headers)
cookie = c._cookies["www.example.com"]["/"]["c"]
self.assert_(cookie.expires is None)
class LWPCookieTests(TestCase):
# Tests taken from libwww-perl, with a few modifications and additions.
def test_netscape_example_1(self):
from cookielib import CookieJar, DefaultCookiePolicy
from urllib2 import Request
#-------------------------------------------------------------------
# First we check that it works for the original example at
# http://www.netscape.com/newsref/std/cookie_spec.html
# Client requests a document, and receives in the response:
#
# Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/; expires=Wednesday, 09-Nov-99 23:12:40 GMT
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE
#
# Client requests a document, and receives in the response:
#
# Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# Client receives:
#
# Set-Cookie: SHIPPING=FEDEX; path=/fo
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# When client requests a URL in path "/foo" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001; SHIPPING=FEDEX
#
# The last Cookie is buggy, because both specifications say that the
# most specific cookie must be sent first. SHIPPING=FEDEX is the
# most specific and should thus be first.
year_plus_one = time.localtime()[0] + 1
headers = []
c = CookieJar(DefaultCookiePolicy(rfc2965 = True))
#req = Request("http://1.1.1.1/",
# headers={"Host": "www.acme.com:80"})
req = Request("http://www.acme.com:80/",
headers={"Host": "www.acme.com:80"})
headers.append(
"Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/ ; "
"expires=Wednesday, 09-Nov-%d 23:12:40 GMT" % year_plus_one)
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/")
c.add_cookie_header(req)
self.assertEqual(req.get_header("Cookie"), "CUSTOMER=WILE_E_COYOTE")
self.assertEqual(req.get_header("Cookie2"), '$Version="1"')
headers.append("Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/foo/bar")
c.add_cookie_header(req)
h = req.get_header("Cookie")
self.assert_("PART_NUMBER=ROCKET_LAUNCHER_0001" in h and
"CUSTOMER=WILE_E_COYOTE" in h)
headers.append('Set-Cookie: SHIPPING=FEDEX; path=/foo')
res = FakeResponse(headers, "http://www.acme.com")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/")
c.add_cookie_header(req)
h = req.get_header("Cookie")
self.assert_("PART_NUMBER=ROCKET_LAUNCHER_0001" in h and
"CUSTOMER=WILE_E_COYOTE" in h and
"SHIPPING=FEDEX" not in h)
req = Request("http://www.acme.com/foo/")
c.add_cookie_header(req)
h = req.get_header("Cookie")
self.assert_(("PART_NUMBER=ROCKET_LAUNCHER_0001" in h and
"CUSTOMER=WILE_E_COYOTE" in h and
h.startswith("SHIPPING=FEDEX;")))
def test_netscape_example_2(self):
from cookielib import CookieJar
from urllib2 import Request
# Second Example transaction sequence:
#
# Assume all mappings from above have been cleared.
#
# Client receives:
#
# Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001
#
# Client receives:
#
# Set-Cookie: PART_NUMBER=RIDING_ROCKET_0023; path=/ammo
#
# When client requests a URL in path "/ammo" on this server, it sends:
#
# Cookie: PART_NUMBER=RIDING_ROCKET_0023; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# NOTE: There are two name/value pairs named "PART_NUMBER" due to
# the inheritance of the "/" mapping in addition to the "/ammo" mapping.
c = CookieJar()
headers = []
req = Request("http://www.acme.com/")
headers.append("Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/")
c.add_cookie_header(req)
self.assertEquals(req.get_header("Cookie"),
"PART_NUMBER=ROCKET_LAUNCHER_0001")
headers.append(
"Set-Cookie: PART_NUMBER=RIDING_ROCKET_0023; path=/ammo")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/ammo")
c.add_cookie_header(req)
self.assert_(re.search(r"PART_NUMBER=RIDING_ROCKET_0023;\s*"
"PART_NUMBER=ROCKET_LAUNCHER_0001",
req.get_header("Cookie")))
def test_ietf_example_1(self):
from cookielib import CookieJar, DefaultCookiePolicy
#-------------------------------------------------------------------
# Then we test with the examples from draft-ietf-http-state-man-mec-03.txt
#
# 5. EXAMPLES
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
#
# 5.1 Example 1
#
# Most detail of request and response headers has been omitted. Assume
# the user agent has no stored cookies.
#
# 1. User Agent -> Server
#
# POST /acme/login HTTP/1.1
# [form data]
#
# User identifies self via a form.
#
# 2. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"
#
# Cookie reflects user's identity.
cookie = interact_2965(
c, 'http://www.acme.com/acme/login',
'Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"')
self.assert_(not cookie)
#
# 3. User Agent -> Server
#
# POST /acme/pickitem HTTP/1.1
# Cookie: $Version="1"; Customer="WILE_E_COYOTE"; $Path="/acme"
# [form data]
#
# User selects an item for ``shopping basket.''
#
# 4. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Part_Number="Rocket_Launcher_0001"; Version="1";
# Path="/acme"
#
# Shopping basket contains an item.
cookie = interact_2965(c, 'http://www.acme.com/acme/pickitem',
'Part_Number="Rocket_Launcher_0001"; '
'Version="1"; Path="/acme"');
self.assert_(re.search(
r'^\$Version="?1"?; Customer="?WILE_E_COYOTE"?; \$Path="/acme"$',
cookie))
#
# 5. User Agent -> Server
#
# POST /acme/shipping HTTP/1.1
# Cookie: $Version="1";
# Customer="WILE_E_COYOTE"; $Path="/acme";
# Part_Number="Rocket_Launcher_0001"; $Path="/acme"
# [form data]
#
# User selects shipping method from form.
#
# 6. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Shipping="FedEx"; Version="1"; Path="/acme"
#
# New cookie reflects shipping method.
cookie = interact_2965(c, "http://www.acme.com/acme/shipping",
'Shipping="FedEx"; Version="1"; Path="/acme"')
self.assert_(re.search(r'^\$Version="?1"?;', cookie))
self.assert_(re.search(r'Part_Number="?Rocket_Launcher_0001"?;'
'\s*\$Path="\/acme"', cookie))
self.assert_(re.search(r'Customer="?WILE_E_COYOTE"?;\s*\$Path="\/acme"',
cookie))
#
# 7. User Agent -> Server
#
# POST /acme/process HTTP/1.1
# Cookie: $Version="1";
# Customer="WILE_E_COYOTE"; $Path="/acme";
# Part_Number="Rocket_Launcher_0001"; $Path="/acme";
# Shipping="FedEx"; $Path="/acme"
# [form data]
#
# User chooses to process order.
#
# 8. Server -> User Agent
#
# HTTP/1.1 200 OK
#
# Transaction is complete.
cookie = interact_2965(c, "http://www.acme.com/acme/process")
self.assert_(
re.search(r'Shipping="?FedEx"?;\s*\$Path="\/acme"', cookie) and
"WILE_E_COYOTE" in cookie)
#
# The user agent makes a series of requests on the origin server, after
# each of which it receives a new cookie. All the cookies have the same
# Path attribute and (default) domain. Because the request URLs all have
# /acme as a prefix, and that matches the Path attribute, each request
# contains all the cookies received so far.
def test_ietf_example_2(self):
from cookielib import CookieJar, DefaultCookiePolicy
# 5.2 Example 2
#
# This example illustrates the effect of the Path attribute. All detail
# of request and response headers has been omitted. Assume the user agent
# has no stored cookies.
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
# Imagine the user agent has received, in response to earlier requests,
# the response headers
#
# Set-Cookie2: Part_Number="Rocket_Launcher_0001"; Version="1";
# Path="/acme"
#
# and
#
# Set-Cookie2: Part_Number="Riding_Rocket_0023"; Version="1";
# Path="/acme/ammo"
interact_2965(
c, "http://www.acme.com/acme/ammo/specific",
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"',
'Part_Number="Riding_Rocket_0023"; Version="1"; Path="/acme/ammo"')
# A subsequent request by the user agent to the (same) server for URLs of
# the form /acme/ammo/... would include the following request header:
#
# Cookie: $Version="1";
# Part_Number="Riding_Rocket_0023"; $Path="/acme/ammo";
# Part_Number="Rocket_Launcher_0001"; $Path="/acme"
#
# Note that the NAME=VALUE pair for the cookie with the more specific Path
# attribute, /acme/ammo, comes before the one with the less specific Path
# attribute, /acme. Further note that the same cookie name appears more
# than once.
cookie = interact_2965(c, "http://www.acme.com/acme/ammo/...")
self.assert_(
re.search(r"Riding_Rocket_0023.*Rocket_Launcher_0001", cookie))
# A subsequent request by the user agent to the (same) server for a URL of
# the form /acme/parts/ would include the following request header:
#
# Cookie: $Version="1"; Part_Number="Rocket_Launcher_0001"; $Path="/acme"
#
# Here, the second cookie's Path attribute /acme/ammo is not a prefix of
# the request URL, /acme/parts/, so the cookie does not get forwarded to
# the server.
cookie = interact_2965(c, "http://www.acme.com/acme/parts/")
self.assert_("Rocket_Launcher_0001" in cookie and
"Riding_Rocket_0023" not in cookie)
def test_rejection(self):
# Test rejection of Set-Cookie2 responses based on domain, path, port.
from cookielib import DefaultCookiePolicy, LWPCookieJar
pol = DefaultCookiePolicy(rfc2965=True)
c = LWPCookieJar(policy=pol)
max_age = "max-age=3600"
# illegal domain (no embedded dots)
cookie = interact_2965(c, "http://www.acme.com",
'foo=bar; domain=".com"; version=1')
self.assert_(not c)
# legal domain
cookie = interact_2965(c, "http://www.acme.com",
'ping=pong; domain="acme.com"; version=1')
self.assertEquals(len(c), 1)
# illegal domain (host prefix "www.a" contains a dot)
cookie = interact_2965(c, "http://www.a.acme.com",
'whiz=bang; domain="acme.com"; version=1')
self.assertEquals(len(c), 1)
# legal domain
cookie = interact_2965(c, "http://www.a.acme.com",
'wow=flutter; domain=".a.acme.com"; version=1')
self.assertEquals(len(c), 2)
# can't partially match an IP-address
cookie = interact_2965(c, "http://125.125.125.125",
'zzzz=ping; domain="125.125.125"; version=1')
self.assertEquals(len(c), 2)
# illegal path (must be prefix of request path)
cookie = interact_2965(c, "http://www.sol.no",
'blah=rhubarb; domain=".sol.no"; path="/foo"; '
'version=1')
self.assertEquals(len(c), 2)
# legal path
cookie = interact_2965(c, "http://www.sol.no/foo/bar",
'bing=bong; domain=".sol.no"; path="/foo"; '
'version=1')
self.assertEquals(len(c), 3)
# illegal port (request-port not in list)
cookie = interact_2965(c, "http://www.sol.no",
'whiz=ffft; domain=".sol.no"; port="90,100"; '
'version=1')
self.assertEquals(len(c), 3)
# legal port
cookie = interact_2965(
c, "http://www.sol.no",
r'bang=wallop; version=1; domain=".sol.no"; '
r'port="90,100, 80,8080"; '
r'max-age=100; Comment = "Just kidding! (\"|\\\\) "')
self.assertEquals(len(c), 4)
# port attribute without any value (current port)
cookie = interact_2965(c, "http://www.sol.no",
'foo9=bar; version=1; domain=".sol.no"; port; '
'max-age=100;')
self.assertEquals(len(c), 5)
# encoded path
# LWP has this test, but unescaping allowed path characters seems
# like a bad idea, so I think this should fail:
## cookie = interact_2965(c, "http://www.sol.no/foo/",
## r'foo8=bar; version=1; path="/%66oo"')
# but this is OK, because '<' is not an allowed HTTP URL path
# character:
cookie = interact_2965(c, "http://www.sol.no/<oo/",
r'foo8=bar; version=1; path="/%3coo"')
self.assertEquals(len(c), 6)
# save and restore
filename = test_support.TESTFN
try:
c.save(filename, ignore_discard=True)
old = repr(c)
c = LWPCookieJar(policy=pol)
c.load(filename, ignore_discard=True)
finally:
try: os.unlink(filename)
except OSError: pass
self.assertEquals(old, repr(c))
def test_url_encoding(self):
# Try some URL encodings of the PATHs.
# (the behaviour here has changed from libwww-perl)
from cookielib import CookieJar, DefaultCookiePolicy
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://www.acme.com/foo%2f%25/%3c%3c%0Anew%E5/%E5",
"foo = bar; version = 1")
cookie = interact_2965(
c, "http://www.acme.com/foo%2f%25/<<%0anewå/æøå",
'bar=baz; path="/foo/"; version=1');
version_re = re.compile(r'^\$version=\"?1\"?', re.I)
self.assert_("foo=bar" in cookie and version_re.search(cookie))
cookie = interact_2965(
c, "http://www.acme.com/foo/%25/<<%0anewå/æøå")
self.assert_(not cookie)
# unicode URL doesn't raise exception
cookie = interact_2965(c, u"http://www.acme.com/\xfc")
def test_mozilla(self):
# Save / load Mozilla/Netscape cookie file format.
from cookielib import MozillaCookieJar, DefaultCookiePolicy
year_plus_one = time.localtime()[0] + 1
filename = test_support.TESTFN
c = MozillaCookieJar(filename,
policy=DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://www.acme.com/",
"foo1=bar; max-age=100; Version=1")
interact_2965(c, "http://www.acme.com/",
'foo2=bar; port="80"; max-age=100; Discard; Version=1')
interact_2965(c, "http://www.acme.com/", "foo3=bar; secure; Version=1")
expires = "expires=09-Nov-%d 23:12:40 GMT" % (year_plus_one,)
interact_netscape(c, "http://www.foo.com/",
"fooa=bar; %s" % expires)
interact_netscape(c, "http://www.foo.com/",
"foob=bar; Domain=.foo.com; %s" % expires)
interact_netscape(c, "http://www.foo.com/",
"fooc=bar; Domain=www.foo.com; %s" % expires)
def save_and_restore(cj, ignore_discard):
try:
cj.save(ignore_discard=ignore_discard)
new_c = MozillaCookieJar(filename,
DefaultCookiePolicy(rfc2965=True))
new_c.load(ignore_discard=ignore_discard)
finally:
try: os.unlink(filename)
except OSError: pass
return new_c
new_c = save_and_restore(c, True)
self.assertEquals(len(new_c), 6) # none discarded
self.assert_("name='foo1', value='bar'" in repr(new_c))
new_c = save_and_restore(c, False)
self.assertEquals(len(new_c), 4) # 2 of them discarded on save
self.assert_("name='foo1', value='bar'" in repr(new_c))
def test_netscape_misc(self):
# Some additional Netscape cookies tests.
from cookielib import CookieJar
from urllib2 import Request
c = CookieJar()
headers = []
req = Request("http://foo.bar.acme.com/foo")
# Netscape allows a host part that contains dots
headers.append("Set-Cookie: Customer=WILE_E_COYOTE; domain=.acme.com")
res = FakeResponse(headers, "http://www.acme.com/foo")
c.extract_cookies(res, req)
# and that the domain is the same as the host without adding a leading
# dot to the domain. Should not quote even if strange chars are used
# in the cookie value.
headers.append("Set-Cookie: PART_NUMBER=3,4; domain=foo.bar.acme.com")
res = FakeResponse(headers, "http://www.acme.com/foo")
c.extract_cookies(res, req)
req = Request("http://foo.bar.acme.com/foo")
c.add_cookie_header(req)
self.assert_(
"PART_NUMBER=3,4" in req.get_header("Cookie") and
"Customer=WILE_E_COYOTE" in req.get_header("Cookie"))
def test_intranet_domains_2965(self):
# Test handling of local intranet hostnames without a dot.
from cookielib import CookieJar, DefaultCookiePolicy
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://example/",
"foo1=bar; PORT; Discard; Version=1;")
cookie = interact_2965(c, "http://example/",
'foo2=bar; domain=".local"; Version=1')
self.assert_("foo1=bar" in cookie)
interact_2965(c, "http://example/", 'foo3=bar; Version=1')
cookie = interact_2965(c, "http://example/")
self.assert_("foo2=bar" in cookie and len(c) == 3)
def test_intranet_domains_ns(self):
from cookielib import CookieJar, DefaultCookiePolicy
c = CookieJar(DefaultCookiePolicy(rfc2965 = False))
interact_netscape(c, "http://example/", "foo1=bar")
cookie = interact_netscape(c, "http://example/",
'foo2=bar; domain=.local')
self.assertEquals(len(c), 2)
self.assert_("foo1=bar" in cookie)
cookie = interact_netscape(c, "http://example/")
self.assert_("foo2=bar" in cookie)
self.assertEquals(len(c), 2)
def test_empty_path(self):
from cookielib import CookieJar, DefaultCookiePolicy
from urllib2 import Request
# Test for empty path
# Broken web-server ORION/1.3.38 returns to the client response like
#
# Set-Cookie: JSESSIONID=ABCDERANDOM123; Path=
#
# ie. with Path set to nothing.
# In this case, extract_cookies() must set cookie to / (root)
c = CookieJar(DefaultCookiePolicy(rfc2965 = True))
headers = []
req = Request("http://www.ants.com/")
headers.append("Set-Cookie: JSESSIONID=ABCDERANDOM123; Path=")
res = FakeResponse(headers, "http://www.ants.com/")
c.extract_cookies(res, req)
req = Request("http://www.ants.com/")
c.add_cookie_header(req)
self.assertEquals(req.get_header("Cookie"),
"JSESSIONID=ABCDERANDOM123")
self.assertEquals(req.get_header("Cookie2"), '$Version="1"')
# missing path in the request URI
req = Request("http://www.ants.com:8080")
c.add_cookie_header(req)
self.assertEquals(req.get_header("Cookie"),
"JSESSIONID=ABCDERANDOM123")
self.assertEquals(req.get_header("Cookie2"), '$Version="1"')
def test_session_cookies(self):
from cookielib import CookieJar
from urllib2 import Request
year_plus_one = time.localtime()[0] + 1
# Check session cookies are deleted properly by
# CookieJar.clear_session_cookies method
req = Request('http://www.perlmeister.com/scripts')
headers = []
headers.append("Set-Cookie: s1=session;Path=/scripts")
headers.append("Set-Cookie: p1=perm; Domain=.perlmeister.com;"
"Path=/;expires=Fri, 02-Feb-%d 23:24:20 GMT" %
year_plus_one)
headers.append("Set-Cookie: p2=perm;Path=/;expires=Fri, "
"02-Feb-%d 23:24:20 GMT" % year_plus_one)
headers.append("Set-Cookie: s2=session;Path=/scripts;"
"Domain=.perlmeister.com")
headers.append('Set-Cookie2: s3=session;Version=1;Discard;Path="/"')
res = FakeResponse(headers, 'http://www.perlmeister.com/scripts')
c = CookieJar()
c.extract_cookies(res, req)
# How many session/permanent cookies do we have?
counter = {"session_after": 0,
"perm_after": 0,
"session_before": 0,
"perm_before": 0}
for cookie in c:
key = "%s_before" % cookie.value
counter[key] = counter[key] + 1
c.clear_session_cookies()
# How many now?
for cookie in c:
key = "%s_after" % cookie.value
counter[key] = counter[key] + 1
self.assert_(not (
# a permanent cookie got lost accidently
counter["perm_after"] != counter["perm_before"] or
# a session cookie hasn't been cleared
counter["session_after"] != 0 or
# we didn't have session cookies in the first place
counter["session_before"] == 0))
def test_main(verbose=None):
from test import test_sets
test_support.run_unittest(
DateTimeTests,
HeaderTests,
CookieTests,
FileCookieJarTests,
LWPCookieTests,
)
if __name__ == "__main__":
test_main(verbose=True)
| bsd-3-clause |
sburnett/seattle | deploymentscripts/attic/old/reinstall.py | 1 | 5481 | """
<Program Name>
reinstall.py
<Started>
January 2009
<Author>
Carter Butaud
<Purpose>
Removes an old installation of seattle from
a Linux computer and installs a fresh version.
"""
import os
import sys
import shutil
import uninstall
class ReinstallError(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
def error(text):
"""
<Purpose>
Prints an error message, formatting it according to the
agreed upon standard.
<Arguments>
text:
The string to be printed.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
None.
"""
print "ERROR: " + text
def warn(text):
"""
<Purpose>
Prints a warning message in the agreed upon format.
<Arguments>
text:
The text to be printed.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
None.
"""
print "WARNING: " + text
def remove_old(seattle_dir):
"""
<Purpose>
Runs the uninstaller to delete the seattle starter line
from the crontab, then deletes the seattle install
directory.
<Arguments>
seattle_dir:
The directory that seattle is installed in, which
will be deleted.
<Exceptions>
None, but prints error messages if seattle_dir doesn't exist.
<Side Effects>
None.
<Returns>
None.
"""
try:
uninstall.uninstall(1)
if not os.path.exists(seattle_dir):
raise ReinstallError("Couldn't find old installation directory.")
shutil.rmtree(seattle_dir)
except ReinstallError, e:
error(e.parameter)
def check_cleaned(seattle_dir):
"""
<Purpose>
Checks to make sure that the crontab doesn't
have the seattle starter line in it and that
seattle_dir is indeed gone. If so, produces
no output. Else, prints output prefaced with
error_pref.
<Arguments>
seattle_dir:
The directory that originally contained seattle on
the system.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
None.
"""
# First, check that the crontab line is gone
crontab_f = os.popen("crontab -l")
found = False
for line in crontab_f:
if uninstall.STARTER_SCRIPT_NAME in line:
found = True
if found:
error("Failed to remove starter line from crontab.")
# Next, check that the installation directory is gone
if os.path.exists(seattle_dir):
error("Failed to delete seattle directory.")
def install_new(parent_dir):
"""
<Purpose>
Copies the installer package from the current directory to
the parent of the intended install directory, unzips it,
and runs the installer.
<Arguments>
parent_dir:
The directory where the install directory should be located.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
None.
"""
installer_name = "seattle_linux.tgz"
install_dir = "seattle_repy"
try:
if not os.path.exists(parent_dir):
raise ReinstallError("Intended parent directory could not be found.")
# Copy the installer package to the parent directory and unzip it
shutil.copyfile(installer_name, parent_dir + "/" + installer_name)
orig_dir = os.getcwd()
# Navigate to the parent directory so that the package is untarred
# in the right place.
os.chdir(parent_dir)
os.popen("tar -xzf " + installer_name)
os.chdir(orig_dir)
# Run the installer script, passing it the install directory
# as its argument and telling it to run silently.
os.popen("python " + parent_dir + "/" + install_dir + "/seattleinstaller.py " +
parent_dir + "/" + install_dir + " -s")
except ReinstallError, e:
# Have ReinstallErrors printed to stdout
error(e.parameter)
def check_installed():
"""
<Purpose>
Checks that the program was installed correctly, printing
errors if it isn't.
<Arguments>
None.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
None.
"""
crontab_f = os.popen("crontab -l")
found = False
for line in crontab_f:
if uninstall.STARTER_SCRIPT_NAME in line:
found = True
if not found:
error("Could not find starter line in crontab.")
def reinstall(parent_dir):
"""
<Purpose>
Removes an old installation of seattle located in parent_dir,
and installs a fresh copy in the same directoryfrom an
installer package. Prints errors if any of the above steps fail.
<Arguments>
parent_dir:
The parent directory of the old seattle installation directory
(so it should contain a directory called seattle_repy).
<Exceptions>
None.
<Side Effects>
None.
<Returns>
None.
"""
install_dir = "seattle_repy"
remove_old(parent_dir + "/" + install_dir)
check_cleaned(parent_dir + "/" + install_dir)
install_new(parent_dir)
check_installed()
def main():
if len(sys.argv) < 2:
print "Usage: python reinstall.py parent_dir"
else:
reinstall(sys.argv[1])
if __name__ == "__main__":
main()
| mit |
gregswift/ansible | lib/ansible/parsing/yaml/constructor.py | 27 | 4933 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from yaml.constructor import Constructor, ConstructorError
from yaml.nodes import MappingNode
from ansible.parsing.yaml.objects import AnsibleMapping, AnsibleSequence, AnsibleUnicode
from ansible.vars.unsafe_proxy import wrap_var
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class AnsibleConstructor(Constructor):
def __init__(self, file_name=None):
self._ansible_file_name = file_name
super(AnsibleConstructor, self).__init__()
def construct_yaml_map(self, node):
data = AnsibleMapping()
yield data
value = self.construct_mapping(node)
data.update(value)
data.ansible_pos = self._node_position_info(node)
def construct_mapping(self, node, deep=False):
# Most of this is from yaml.constructor.SafeConstructor. We replicate
# it here so that we can warn users when they have duplicate dict keys
# (pyyaml silently allows overwriting keys)
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
self.flatten_mapping(node)
mapping = AnsibleMapping()
# Add our extra information to the returned value
mapping.ansible_pos = self._node_position_info(node)
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
raise ConstructorError("while constructing a mapping", node.start_mark,
"found unacceptable key (%s)" % exc, key_node.start_mark)
if key in mapping:
display.warning(u'While constructing a mapping from {1}, line {2}, column {3}, found a duplicate dict key ({0}). Using last defined value only.'.format(key, *mapping.ansible_pos))
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
def construct_yaml_str(self, node, unsafe=False):
# Override the default string handling function
# to always return unicode objects
value = self.construct_scalar(node)
ret = AnsibleUnicode(value)
ret.ansible_pos = self._node_position_info(node)
if unsafe:
ret = wrap_var(ret)
return ret
def construct_yaml_seq(self, node):
data = AnsibleSequence()
yield data
data.extend(self.construct_sequence(node))
data.ansible_pos = self._node_position_info(node)
def construct_yaml_unsafe(self, node):
return self.construct_yaml_str(node, unsafe=True)
def _node_position_info(self, node):
# the line number where the previous token has ended (plus empty lines)
# Add one so that the first line is line 1 rather than line 0
column = node.start_mark.column + 1
line = node.start_mark.line + 1
# in some cases, we may have pre-read the data and then
# passed it to the load() call for YAML, in which case we
# want to override the default datasource (which would be
# '<string>') to the actual filename we read in
datasource = self._ansible_file_name or node.start_mark.name
return (datasource, line, column)
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:map',
AnsibleConstructor.construct_yaml_map)
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:python/dict',
AnsibleConstructor.construct_yaml_map)
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:str',
AnsibleConstructor.construct_yaml_str)
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:python/unicode',
AnsibleConstructor.construct_yaml_str)
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:seq',
AnsibleConstructor.construct_yaml_seq)
AnsibleConstructor.add_constructor(
u'!unsafe',
AnsibleConstructor.construct_yaml_unsafe)
| gpl-3.0 |
GeographicaGS/Elcano-iepg | www-srv/src/varengine/example2.py | 1 | 2042 | import common.blockfunctions
reload(common.blockfunctions)
# import common.helpers as h
# reload(h)
import varengine.varengine as e
reload(e)
import maplex.maplex as m
reload(m)
import maplex.maplexmodel
reload(maplex.maplexmodel)
# import varengine.varenginemodel
# reload(varengine.varenginemodel)
import common.arrayops as arrayops
reload(arrayops)
import common.const
reload(common.const)
import common.config as config
reload(config)
from collections import OrderedDict
# import common.datacache as dc
# # reload(dc)
"""
Classes:
DataStore: storage of data in symbolic form (drivers: PostgreSQL)
DataCache: storage of postprocessed data (drivers: Numpy)
DataSet: set of variables
Variable: variable
DataInterface: interface to origin data sources (drivers: PostgreSQL)
"""
iepg = e.DataSet("iepg")
energy = e.Variable("energy", True, "float", dataSet=iepg)
information = e.Variable("information", True, "float", dataSet=iepg)
dataInterface = e.DataInterfacePostgreSql()
dataInterface.readAll("iepg_data_redux.iepg_data", "code", "date_in", "date_out")
iepg.loadVariableDataFromDataInterface(dataInterface)
energy.addValue("XBAP", 1990, "blockfunc::common.blockfunctions.blockFunctionLumpSum", None)
energy.addValue("XBAP", 1995, "blockfunc::common.blockfunctions.blockFunctionLumpSum", None)
energy.addValue("XBAP", 2000, "blockfunc::common.blockfunctions.blockFunctionLumpSum", None)
energy.addValue("XBAP", 2005, "blockfunc::common.blockfunctions.blockFunctionLumpSum", None)
energy.addValue("XBAP", 2010, "blockfunc::common.blockfunctions.blockFunctionLumpSum", None)
energy.addValue("XBAP", 2011, "blockfunc::common.blockfunctions.blockFunctionLumpSum", None)
energy.addValue("XBAP", 2012, "blockfunc::common.blockfunctions.blockFunctionLumpSum", None)
energy.addValue("XBAP", 2013, "blockfunc::common.blockfunctions.blockFunctionLumpSum", None)
dataStore = e.DataStorePostgreSql("store","store")
dataStore.remove()
dataStore.setup()
dataStore.registerDataSet(iepg)
energy.setupCache(e.DataCacheNumpy)
energy.cacheData()
| gpl-3.0 |
SeanXP/Nao-Robot | python/motion/moveToward.py | 1 | 3534 | #-*- coding: utf-8 -*-
from naoqi import ALProxy
import time
import argparse # 命令行参数解析
""" moveToward()函数用法示例
void ALMotionProxy::moveToward(const float& x, const float& y, const float& theta)
控制机器人以给定的速率移动(归一化的单位,并非m/s,与moveToward()的区别),参考系w为FRAME_ROBOT;非阻塞函数。
参数x, X轴的速率(+1表示正方向的最大速率, -1为反方向)。向X轴负方向则使用负速率。
参数y, Y轴的速率。
参数theta, 绕Z轴旋转的速率。(单位:弧度radians/s)逆时针为正,顺时针为负。
重载函数:
void ALMotionProxy::moveToward(const float& x, const float& y, const float& theta, const AL::ALValue moveConfig)
本程序中,先以正常频率(frequency = 1.0)行走,然后切换为慢频率(frequency = 0.5)行走;
"""
def main(robotIP, PORT=9559):
motionProxy = ALProxy("ALMotion", robotIP, PORT)
# Wake up robot
motionProxy.wakeUp()
# motionProxy.moveInit()
# Example showing the use of moveToward
# The parameters are fractions of the maximums
# Here we are asking for full speed forwards, 即x = 1.0
x = 1.0
y = 0.0
theta = 0.0
frequency = 1.0
motionProxy.moveToward(x, y, theta, [["Frequency", frequency]])
# If we don't send another command, he will move forever
# Lets make him slow down(step length) and turn after 3 seconds
time.sleep(3) # 延时3秒运动
x = 0.5
theta = 0.6
motionProxy.moveToward(x, y, theta, [["Frequency", frequency]])
# Lets make him slow down(frequency) after 3 seconds
time.sleep(3)
frequency = 0.5 # 频率1为最快, 0.5表示50%的频率运动
motionProxy.moveToward(x, y, theta, [["Frequency", frequency]])
# Lets make him stop after 3 seconds
time.sleep(3)
motionProxy.stopMove()
# 详细的Move Config配置
##################################
#
# TARGET VELOCITY
X = 1.0
Y = 0.0
Theta = 0.0
Frequency = 1.0
# Defined a limp walk
try:
motionProxy.moveToward(X, Y, Theta,[["Frequency", Frequency],
# LEFT FOOT
["LeftStepHeight", 0.02],
["LeftTorsoWy", 5.0*almath.TO_RAD],
# RIGHT FOOT
["RightStepHeight", 0.005],
["RightMaxStepX", 0.001],
["RightMaxStepFrequency", 0.0],
["RightTorsoWx", -7.0*almath.TO_RAD],
["RightTorsoWy", 5.0*almath.TO_RAD]] )
except Exception, errorMsg:
print str(errorMsg)
print "This example is not allowed on this robot."
exit()
motionProxy.stopMove()
# Note that at any time, you can use a moveTo command
# to run a precise distance. The last command received,
# of velocity or position always wins
# Go to rest position
motionProxy.rest()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", type=str, default="192.168.1.100",
help="Robot ip address")
parser.add_argument("--port", type=int, default=9559,
help="Robot port number")
args = parser.parse_args()
main(args.ip, args.port)
| gpl-2.0 |
gustavo-momente/word2vec_variations | run_dics.py | 1 | 3464 | #!/usr/bin/python
__author__ = 'vilelag'
import os
import argparse
import subprocess
from multiprocessing import Pool
import sys
def create_parsers():
#parser for the main program
parser = argparse.ArgumentParser(description='Run the compute-accuracy test over dictionaries created from word2vec.'
'\nThis file is out-dated, using genereate_logs.py is preferable.')
parser.add_argument('-ca', metavar='<compute-accuracy_exec>', default='./word2vec/compute-accuracy',
help='Path to the compute-accuracy executable')
parser.add_argument('-test', metavar='<train_file>', default='./word2vec/questions-words.txt',
help='Use text data from <file> to test the model')
parser .add_argument('-folder', metavar='<folder>', default='./OutFolder',
help='Folder where all the dictionaries to be tested are saved')
parser.add_argument('-threads', metavar='<int>', nargs=1, default=[8], type=int,
help='Use <int> threads (default 8)')
parser.add_argument('-nc', nargs='?', const='1', default=None,
help='If present, will calculate the number of cases and exit')
parser.add_argument('-lf', metavar='<folder>', default='./LogFolder',
help='Folder where the logs will be saved')
parser.add_argument('-p', nargs='?', default=None, const=1,
help='If present the process will be persistent, will keep checking "<folder>" until stopped')
parser.add_argument('-t', metavar='<int>', nargs=1, default=[30000], type=int,
help='Threshold is used to reduce vocabulary of the model for fast approximate evaluation '
'(0 = off, otherwise typical value is 30000, default=30000)')
return parser
def create_output_folder(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def get_file_list(dir):
fl = []
for f in os.listdir(dir):
if f.endswith(".bin"):
fl.append(dir+'/'+f)
return fl
def run_ca(file):
try:
flag = data[file]
except KeyError:
flag = 0
if flag == 0:
out = subprocess.check_output([ca, file, threshold], stdin=open(test, 'r'))
log_f = os.path.splitext(os.path.basename(file))[0]
log_f = log_folder+'/'+log_f+'.log'
with open(log_f, "w") as text_file:
text_file.write(os.path.basename(file)+'\n')
text_file.write(out)
print file
def main():
global data
global n_dics
fl = get_file_list(folder)
n_dics = len(fl)
if args['nc'] is not None:
print '{0} dictionaries to test\n'.format(n_dics)
os.exit(0)
pool = Pool(threads)
pool.map(run_ca, fl)
for file in fl:
data[file] = 1
parser = create_parsers()
args = vars(parser.parse_args())
data = dict()
max_run = 10000
cur_run = 0
ca = args['ca']
test = args['test']
folder = args['folder']
threads = args['threads'][0]
threshold = str(args['t'][0])
log_folder = args['lf']
create_output_folder(log_folder)
n_dics = int()
if args['p'] is not None:
while True:
main()
cur_run += 1
if (cur_run%1000) == 0:
print('Persistent mode: on\n')
if cur_run >= max_run:
print 'Max iterations arrived, process exit\n'
exit(0)
else:
main() | mit |
dronefly/dronefly.github.io | flask/lib/python2.7/site-packages/sqlalchemy/dialects/postgresql/ranges.py | 80 | 4814 | # Copyright (C) 2013-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .base import ischema_names
from ... import types as sqltypes
__all__ = ('INT4RANGE', 'INT8RANGE', 'NUMRANGE')
class RangeOperators(object):
"""
This mixin provides functionality for the Range Operators
listed in Table 9-44 of the `postgres documentation`__ for Range
Functions and Operators. It is used by all the range types
provided in the ``postgres`` dialect and can likely be used for
any range types you create yourself.
__ http://www.postgresql.org/docs/devel/static/functions-range.html
No extra support is provided for the Range Functions listed in
Table 9-45 of the postgres documentation. For these, the normal
:func:`~sqlalchemy.sql.expression.func` object should be used.
.. versionadded:: 0.8.2 Support for Postgresql RANGE operations.
"""
class comparator_factory(sqltypes.Concatenable.Comparator):
"""Define comparison operations for range types."""
def __ne__(self, other):
"Boolean expression. Returns true if two ranges are not equal"
return self.expr.op('<>')(other)
def contains(self, other, **kw):
"""Boolean expression. Returns true if the right hand operand,
which can be an element or a range, is contained within the
column.
"""
return self.expr.op('@>')(other)
def contained_by(self, other):
"""Boolean expression. Returns true if the column is contained
within the right hand operand.
"""
return self.expr.op('<@')(other)
def overlaps(self, other):
"""Boolean expression. Returns true if the column overlaps
(has points in common with) the right hand operand.
"""
return self.expr.op('&&')(other)
def strictly_left_of(self, other):
"""Boolean expression. Returns true if the column is strictly
left of the right hand operand.
"""
return self.expr.op('<<')(other)
__lshift__ = strictly_left_of
def strictly_right_of(self, other):
"""Boolean expression. Returns true if the column is strictly
right of the right hand operand.
"""
return self.expr.op('>>')(other)
__rshift__ = strictly_right_of
def not_extend_right_of(self, other):
"""Boolean expression. Returns true if the range in the column
does not extend right of the range in the operand.
"""
return self.expr.op('&<')(other)
def not_extend_left_of(self, other):
"""Boolean expression. Returns true if the range in the column
does not extend left of the range in the operand.
"""
return self.expr.op('&>')(other)
def adjacent_to(self, other):
"""Boolean expression. Returns true if the range in the column
is adjacent to the range in the operand.
"""
return self.expr.op('-|-')(other)
def __add__(self, other):
"""Range expression. Returns the union of the two ranges.
Will raise an exception if the resulting range is not
contigous.
"""
return self.expr.op('+')(other)
class INT4RANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the Postgresql INT4RANGE type.
.. versionadded:: 0.8.2
"""
__visit_name__ = 'INT4RANGE'
ischema_names['int4range'] = INT4RANGE
class INT8RANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the Postgresql INT8RANGE type.
.. versionadded:: 0.8.2
"""
__visit_name__ = 'INT8RANGE'
ischema_names['int8range'] = INT8RANGE
class NUMRANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the Postgresql NUMRANGE type.
.. versionadded:: 0.8.2
"""
__visit_name__ = 'NUMRANGE'
ischema_names['numrange'] = NUMRANGE
class DATERANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the Postgresql DATERANGE type.
.. versionadded:: 0.8.2
"""
__visit_name__ = 'DATERANGE'
ischema_names['daterange'] = DATERANGE
class TSRANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the Postgresql TSRANGE type.
.. versionadded:: 0.8.2
"""
__visit_name__ = 'TSRANGE'
ischema_names['tsrange'] = TSRANGE
class TSTZRANGE(RangeOperators, sqltypes.TypeEngine):
"""Represent the Postgresql TSTZRANGE type.
.. versionadded:: 0.8.2
"""
__visit_name__ = 'TSTZRANGE'
ischema_names['tstzrange'] = TSTZRANGE
| apache-2.0 |
yoer/hue | desktop/core/src/desktop/log/formatter.py | 32 | 1620 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from pytz import UnknownTimeZoneError, datetime, timezone
class Formatter(logging.Formatter):
def formatTime(self, record, datefmt=None):
try:
tz = timezone(os.environ['TZ'])
except (KeyError, UnknownTimeZoneError):
tz = None
try:
ct = datetime.datetime.fromtimestamp(record.created, tz=tz)
except (OverflowError, TypeError, ValueError):
# Fallback to original.
return super(Formatter, self).formatTime(record, datefmt=datefmt)
if datefmt:
s = ct.strftime(datefmt)
else:
t = ct.strftime("%Y-%m-%d %H:%M:%S")
s = "%s,%03d" % (t, record.msecs)
return s
class MessageOnlyFormatter(logging.Formatter):
def formatTime(self, record, datefmt=None):
return ''
def format(self, record):
return record.getMessage()
| apache-2.0 |
402231466/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/importlib/basehook.py | 608 | 1396 | from javascript import JSObject
from browser import window
import urllib.request
class TempMod:
def __init__(self, name):
self.name=name
#define my custom import hook (just to see if it get called etc).
class BaseHook:
def __init__(self, fullname=None, path=None):
self._fullname=fullname
self._path=path # we don't are about this...
self._modpath=''
self._module=''
def find_module(self, name=None, path=None):
if name is None:
name=self._fullname
for _i in ('libs/%s.js' % name, 'Lib/%s.py' % name,
'Lib/%s/__init__.py' % name):
_path="%s%s" % (__BRYTHON__.brython_path, _i)
try:
_fp,_,_headers=urllib.request.urlopen(_path)
if _headers['status'] != 200:
continue
self._module=_fp.read()
self._modpath=_path
return self
except urllib.error.HTTPError as e:
print(str(e))
self._modpath=''
self._module=''
raise ImportError
def is_package(self):
return '.' in self._fullname
def load_module(self, name):
if name is None:
name=self._fullname
window.eval('__BRYTHON__.imported["%s"]={}' % name)
return JSObject(__BRYTHON__.run_py)(TempMod(name),
self._modpath, self._module)
| gpl-3.0 |
Pawamoy/archan | src/archan/logging.py | 1 | 2373 | # -*- coding: utf-8 -*-
"""Logging module."""
from __future__ import absolute_import
import logging
from colorama import Back, Fore, Style
class Logger(object):
"""Static class to store loggers."""
loggers = {}
level = None
@staticmethod
def set_level(level):
"""
Set level of logging for all loggers.
Args:
level (int): level of logging.
"""
Logger.level = level
for logger in Logger.loggers.values():
logger.setLevel(level)
@staticmethod
def get_logger(name, level=None, fmt=':%(lineno)d: %(message)s'):
"""
Return a logger.
Args:
name (str): name to pass to the logging module.
level (int): level of logging.
fmt (str): format string.
Returns:
logging.Logger: logger from ``logging.getLogger``.
"""
if name not in Logger.loggers:
if Logger.level is None and level is None:
Logger.level = level = logging.ERROR
elif Logger.level is None:
Logger.level = level
elif level is None:
level = Logger.level
logger = logging.getLogger(name)
logger_handler = logging.StreamHandler()
logger_handler.setFormatter(LoggingFormatter(fmt=name + fmt))
logger.addHandler(logger_handler)
logger.setLevel(level)
Logger.loggers[name] = logger
return Logger.loggers[name]
class LoggingFormatter(logging.Formatter):
"""Custom logging formatter."""
def format(self, record):
"""Override default format method."""
if record.levelno == logging.DEBUG:
string = Back.WHITE + Fore.BLACK + ' debug '
elif record.levelno == logging.INFO:
string = Back.BLUE + Fore.WHITE + ' info '
elif record.levelno == logging.WARNING:
string = Back.YELLOW + Fore.BLACK + ' warning '
elif record.levelno == logging.ERROR:
string = Back.RED + Fore.WHITE + ' error '
elif record.levelno == logging.CRITICAL:
string = Back.BLACK + Fore.WHITE + ' critical '
else:
string = ''
return '{none}{string}{none} {super}'.format(
none=Style.RESET_ALL, string=string, super=super().format(record))
| isc |
wfxiang08/django178 | tests/admin_changelist/admin.py | 57 | 3552 | from django.contrib import admin
from django.core.paginator import Paginator
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from .models import Event, Child, Parent, Swallow
site = admin.AdminSite(name="admin")
site.register(User, UserAdmin)
class CustomPaginator(Paginator):
def __init__(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
super(CustomPaginator, self).__init__(queryset, 5, orphans=2,
allow_empty_first_page=allow_empty_first_page)
class EventAdmin(admin.ModelAdmin):
list_display = ['event_date_func']
def event_date_func(self, event):
return event.date
site.register(Event, EventAdmin)
class ParentAdmin(admin.ModelAdmin):
list_filter = ['child__name']
search_fields = ['child__name']
class ChildAdmin(admin.ModelAdmin):
list_display = ['name', 'parent']
list_per_page = 10
list_filter = ['parent', 'age']
def get_queryset(self, request):
return super(ChildAdmin, self).get_queryset(request).select_related("parent__name")
class CustomPaginationAdmin(ChildAdmin):
paginator = CustomPaginator
class FilteredChildAdmin(admin.ModelAdmin):
list_display = ['name', 'parent']
list_per_page = 10
def get_queryset(self, request):
return super(FilteredChildAdmin, self).get_queryset(request).filter(
name__contains='filtered')
class BandAdmin(admin.ModelAdmin):
list_filter = ['genres']
class GroupAdmin(admin.ModelAdmin):
list_filter = ['members']
class QuartetAdmin(admin.ModelAdmin):
list_filter = ['members']
class ChordsBandAdmin(admin.ModelAdmin):
list_filter = ['members']
class InvitationAdmin(admin.ModelAdmin):
list_display = ('band', 'player')
list_select_related = ('player',)
class DynamicListDisplayChildAdmin(admin.ModelAdmin):
list_display = ('parent', 'name', 'age')
def get_list_display(self, request):
my_list_display = super(DynamicListDisplayChildAdmin, self).get_list_display(request)
if request.user.username == 'noparents':
my_list_display = list(my_list_display)
my_list_display.remove('parent')
return my_list_display
class DynamicListDisplayLinksChildAdmin(admin.ModelAdmin):
list_display = ('parent', 'name', 'age')
list_display_links = ['parent', 'name']
def get_list_display_links(self, request, list_display):
return ['age']
site.register(Child, DynamicListDisplayChildAdmin)
class NoListDisplayLinksParentAdmin(admin.ModelAdmin):
list_display_links = None
site.register(Parent, NoListDisplayLinksParentAdmin)
class SwallowAdmin(admin.ModelAdmin):
actions = None # prevent ['action_checkbox'] + list(list_display)
list_display = ('origin', 'load', 'speed')
site.register(Swallow, SwallowAdmin)
class DynamicListFilterChildAdmin(admin.ModelAdmin):
list_filter = ('parent', 'name', 'age')
def get_list_filter(self, request):
my_list_filter = super(DynamicListFilterChildAdmin, self).get_list_filter(request)
if request.user.username == 'noparents':
my_list_filter = list(my_list_filter)
my_list_filter.remove('parent')
return my_list_filter
class DynamicSearchFieldsChildAdmin(admin.ModelAdmin):
search_fields = ('name',)
def get_search_fields(self, request):
search_fields = super(DynamicSearchFieldsChildAdmin, self).get_search_fields(request)
search_fields += ('age',)
return search_fields
| bsd-3-clause |
akozumpl/anaconda | pyanaconda/ui/tui/__init__.py | 4 | 10917 | # The main file for anaconda TUI interface
#
# Copyright (C) (2012) Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Martin Sivak <msivak@redhat.com>
#
from pyanaconda import ui
from pyanaconda.ui.communication import hubQ
from pyanaconda.flags import flags
from pyanaconda.threads import threadMgr
from pyanaconda.ui.tui import simpleline as tui
from pyanaconda.ui.tui.hubs.summary import SummaryHub
from pyanaconda.ui.tui.spokes import StandaloneSpoke
from pyanaconda.ui.tui.tuiobject import YesNoDialog, ErrorDialog
import os
import sys
import site
import Queue
import meh.ui.text
def exception_msg_handler(event, data):
"""
Handler for the HUB_CODE_EXCEPTION message in the hubQ.
:param event: event data
:type event: (event_type, message_data)
:param data: additional data
:type data: any
"""
# get data from the event data structure
msg_data = event[1]
# msg_data is a list
sys.excepthook(*msg_data[0])
class TextUserInterface(ui.UserInterface):
"""This is the main class for Text user interface."""
ENVIRONMENT = "anaconda"
def __init__(self, storage, payload, instclass,
productTitle = u"Anaconda", isFinal = True,
quitMessage = None):
"""
For detailed description of the arguments see
the parent class.
:param storage: storage backend reference
:type storage: instance of pyanaconda.Storage
:param payload: payload (usually yum) reference
:type payload: instance of payload handler
:param instclass: install class reference
:type instclass: instance of install class
:param productTitle: the name of the product
:type productTitle: unicode string
:param isFinal: Boolean that marks the release
as final (True) or development
(False) version.
:type isFinal: bool
:param quitMessage: The text to be used in quit
dialog question. It should not
be translated to allow for change
of language.
:type quitMessage: unicode string
"""
ui.UserInterface.__init__(self, storage, payload, instclass)
self._app = None
self._meh_interface = meh.ui.text.TextIntf()
self.productTitle = productTitle
self.isFinal = isFinal
self.quitMessage = quitMessage
basemask = "pyanaconda.ui.tui"
basepath = os.path.dirname(__file__)
updatepath = "/tmp/updates/pyanaconda/ui/tui"
sitepackages = [os.path.join(dir, "pyanaconda", "ui", "tui")
for dir in site.getsitepackages()]
pathlist = set([updatepath, basepath] + sitepackages)
paths = ui.UserInterface.paths + {
"spokes": [(basemask + ".spokes.%s",
os.path.join(path, "spokes"))
for path in pathlist],
"hubs": [(basemask + ".hubs.%s",
os.path.join(path, "hubs"))
for path in pathlist]
}
@property
def tty_num(self):
return 1
@property
def meh_interface(self):
return self._meh_interface
def _list_hubs(self):
"""returns the list of hubs to use"""
return [SummaryHub]
def _is_standalone(self, spoke):
"""checks if the passed spoke is standalone"""
return isinstance(spoke, StandaloneSpoke)
def setup(self, data):
"""Construct all the objects required to implement this interface.
This method must be provided by all subclasses.
"""
self._app = tui.App(self.productTitle, yes_or_no_question = YesNoDialog,
quit_message = self.quitMessage, queue = hubQ.q)
# tell python-meh it should use our raw_input
self._meh_interface.set_io_handler(meh.ui.text.IOHandler(in_func=self._app.raw_input))
# register handlers for various messages
self._app.register_event_handler(hubQ.HUB_CODE_EXCEPTION, exception_msg_handler)
self._app.register_event_handler(hubQ.HUB_CODE_SHOW_MESSAGE, self._handle_show_message)
_hubs = self._list_hubs()
# First, grab a list of all the standalone spokes.
path = os.path.join(os.path.dirname(__file__), "spokes")
spokes = self._collectActionClasses(self.paths["spokes"], StandaloneSpoke)
actionClasses = self._orderActionClasses(spokes, _hubs)
for klass in actionClasses:
obj = klass(self._app, data, self.storage, self.payload, self.instclass)
# If we are doing a kickstart install, some standalone spokes
# could already be filled out. In taht case, we do not want
# to display them.
if self._is_standalone(obj) and obj.completed:
del(obj)
continue
if hasattr(obj, "set_path"):
obj.set_path("spokes", self.paths["spokes"])
should_schedule = obj.setup(self.ENVIRONMENT)
if should_schedule:
self._app.schedule_screen(obj)
def run(self):
"""Run the interface. This should do little more than just pass
through to something else's run method, but is provided here in
case more is needed. This method must be provided by all subclasses.
"""
return self._app.run()
###
### MESSAGE HANDLING METHODS
###
def _send_show_message(self, msg_fn, args, ret_queue):
"""
Send message requesting to show some message dialog specified by the
message function.
:param msg_fn: message dialog function requested to be called
:type msg_fn: a function taking the same number of arguments as is the
length of the args param
:param args: arguments to be passed to the message dialog function
:type args: any
:param ret_queue: the queue which the return value of the message dialog
function should be put
:type ret_queue: a Queue.Queue instance
"""
self._app.queue.put((hubQ.HUB_CODE_SHOW_MESSAGE,
[msg_fn, args, ret_queue]))
def _handle_show_message(self, event, data):
"""
Handler for the HUB_CODE_SHOW_MESSAGE message in the hubQ.
:param event: event data
:type event: (event_type, message_data)
:param data: additional data
:type data: any
"""
# event_type, message_data
msg_data = event[1]
msg_fn, args, ret_queue = msg_data
ret_queue.put(msg_fn(*args))
def _show_message_in_main_thread(self, msg_fn, args):
"""
If running in the main thread, run the message dialog function and
return its return value. If running in a non-main thread, request the
message function to be called in the main thread.
:param msg_fn: message dialog function to be run
:type msg_fn: a function taking the same number of arguments as is the
length of the args param
:param args: arguments to be passed to the message dialog function
:type args: any
"""
if threadMgr.in_main_thread():
# call the function directly
return msg_fn(*args)
else:
# create a queue for the result returned by the function
ret_queue = Queue.Queue()
# request the function to be called in the main thread
self._send_show_message(msg_fn, args, ret_queue)
# wait and return the result from the queue
return ret_queue.get()
def showError(self, message):
"""Display an error dialog with the given message. After this dialog
is displayed, anaconda will quit. There is no return value. This
method must be implemented by all UserInterface subclasses.
In the code, this method should be used sparingly and only for
critical errors that anaconda cannot figure out how to recover from.
"""
return self._show_message_in_main_thread(self._showError, (message,))
def _showError(self, message):
"""Internal helper function that MUST BE CALLED FROM THE MAIN THREAD"""
error_window = ErrorDialog(self._app, message)
self._app.switch_screen_modal(error_window)
def showDetailedError(self, message, details):
return self._show_message_in_main_thread(self._showDetailedError, (message, details))
def _showDetailedError(self, message, details):
"""Internal helper function that MUST BE CALLED FROM THE MAIN THREAD"""
return self.showError(message + "\n\n" + details)
def showYesNoQuestion(self, message):
"""Display a dialog with the given message that presents the user a yes
or no choice. This method returns True if the yes choice is selected,
and False if the no choice is selected. From here, anaconda can
figure out what to do next. This method must be implemented by all
UserInterface subclasses.
In the code, this method should be used sparingly and only for those
times where anaconda cannot make a reasonable decision. We don't
want to overwhelm the user with choices.
When cmdline mode is active, the default will be to answer no.
"""
return self._show_message_in_main_thread(self._showYesNoQuestion, (message,))
def _showYesNoQuestion(self, message):
"""Internal helper function that MUST BE CALLED FROM THE MAIN THREAD"""
if flags.automatedInstall and not flags.ksprompt:
# If we're in cmdline mode, just say no.
return False
question_window = YesNoDialog(self._app, message)
self._app.switch_screen_modal(question_window)
return question_window.answer
| gpl-2.0 |
Foxugly/medagenda | patient/models.py | 1 | 1459 | # -*- coding: utf-8 -*-
#
# Copyright 2015, Foxugly. All rights reserved.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
from django.db import models
from django.forms import ModelForm
from django.utils.translation import ugettext_lazy as _
class Patient(models.Model):
email = models.EmailField(verbose_name=_(u'Email'), blank=True, null=True)
first_name = models.CharField(verbose_name=_(u'First name'), max_length=100, blank=True, null=True)
last_name = models.CharField(verbose_name=_(u'Last_name'), max_length=100, blank=True, null=True)
telephone = models.CharField(verbose_name=_(u'Telephone'), help_text=_(u'format : +32475123456'), max_length=20, blank=True, null=True)
active = models.BooleanField(verbose_name=_(u'Confirmed'), default=False)
confirm = models.TextField(verbose_name=_(u'Confirm key'), blank=True, null=True)
def __str__(self):
return u'%d : %s' % (self.id, self.email)
def as_json(self):
return dict(id=self.id, email=self.email, first_name=self.first_name, last_name=self.last_name,
telephone=self.telephone)
class PatientForm(ModelForm):
class Meta:
model = Patient
fields = ['email', 'first_name', 'last_name', 'telephone']
| gpl-3.0 |
mlaitinen/odoo | addons/knowledge/__init__.py | 436 | 1064 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Triv90/Nova | nova/cells/messaging.py | 4 | 56137 | # Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cell messaging module.
This module defines the different message types that are passed between
cells and the methods that they can call when the target cell has been
reached.
The interface into this module is the MessageRunner class.
"""
import sys
from eventlet import queue
from oslo.config import cfg
from nova.cells import state as cells_state
from nova.cells import utils as cells_utils
from nova import compute
from nova.compute import rpcapi as compute_rpcapi
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
from nova.db import base
from nova import exception
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova.openstack.common.rpc import common as rpc_common
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import utils
cell_messaging_opts = [
cfg.IntOpt('max_hop_count',
default=10,
help='Maximum number of hops for cells routing.'),
cfg.StrOpt('scheduler',
default='nova.cells.scheduler.CellsScheduler',
help='Cells scheduler to use')]
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('call_timeout', 'nova.cells.opts', group='cells')
CONF.register_opts(cell_messaging_opts, group='cells')
LOG = logging.getLogger(__name__)
# Separator used between cell names for the 'full cell name' and routing
# path.
_PATH_CELL_SEP = cells_utils._PATH_CELL_SEP
def _reverse_path(path):
"""Reverse a path. Used for sending responses upstream."""
path_parts = path.split(_PATH_CELL_SEP)
path_parts.reverse()
return _PATH_CELL_SEP.join(path_parts)
def _response_cell_name_from_path(routing_path, neighbor_only=False):
"""Reverse the routing_path. If we only want to send to our parent,
set neighbor_only to True.
"""
path = _reverse_path(routing_path)
if not neighbor_only or len(path) == 1:
return path
return _PATH_CELL_SEP.join(path.split(_PATH_CELL_SEP)[:2])
#
# Message classes.
#
class _BaseMessage(object):
"""Base message class. It defines data that is passed with every
single message through every cell.
Messages are JSON-ified before sending and turned back into a
class instance when being received.
Every message has a unique ID. This is used to route responses
back to callers. In the future, this might be used to detect
receiving the same message more than once.
routing_path is updated on every hop through a cell. The current
cell name is appended to it (cells are separated by
_PATH_CELL_SEP ('!')). This is used to tell if we've reached the
target cell and also to determine the source of a message for
responses by reversing it.
hop_count is incremented and compared against max_hop_count. The
only current usefulness of this is to break out of a routing loop
if someone has a broken config.
fanout means to send to all nova-cells services running in a cell.
This is useful for capacity and capability broadcasting as well
as making sure responses get back to the nova-cells service that
is waiting.
"""
# Override message_type in a subclass
message_type = None
base_attrs_to_json = ['message_type',
'ctxt',
'method_name',
'method_kwargs',
'direction',
'need_response',
'fanout',
'uuid',
'routing_path',
'hop_count',
'max_hop_count']
def __init__(self, msg_runner, ctxt, method_name, method_kwargs,
direction, need_response=False, fanout=False, uuid=None,
routing_path=None, hop_count=0, max_hop_count=None,
**kwargs):
self.ctxt = ctxt
self.resp_queue = None
self.msg_runner = msg_runner
self.state_manager = msg_runner.state_manager
# Copy these.
self.base_attrs_to_json = self.base_attrs_to_json[:]
# Normally this would just be CONF.cells.name, but going through
# the msg_runner allows us to stub it more easily.
self.our_path_part = self.msg_runner.our_name
self.uuid = uuid
if self.uuid is None:
self.uuid = uuidutils.generate_uuid()
self.method_name = method_name
self.method_kwargs = method_kwargs
self.direction = direction
self.need_response = need_response
self.fanout = fanout
self.routing_path = routing_path
self.hop_count = hop_count
if max_hop_count is None:
max_hop_count = CONF.cells.max_hop_count
self.max_hop_count = max_hop_count
self.is_broadcast = False
self._append_hop()
# Each sub-class should set this when the message is inited
self.next_hops = []
self.resp_queue = None
def __repr__(self):
_dict = self._to_dict()
_dict.pop('method_kwargs')
return "<%s: %s>" % (self.__class__.__name__, _dict)
def _append_hop(self):
"""Add our hop to the routing_path."""
routing_path = (self.routing_path and
self.routing_path + _PATH_CELL_SEP or '')
self.routing_path = routing_path + self.our_path_part
self.hop_count += 1
def _at_max_hop_count(self, do_raise=True):
"""Check if we're at the max hop count. If we are and do_raise is
True, raise CellMaxHopCountReached. If we are at the max and
do_raise is False... return True, else False.
"""
if self.hop_count >= self.max_hop_count:
if do_raise:
raise exception.CellMaxHopCountReached(
hop_count=self.hop_count)
return True
return False
def _process_locally(self):
"""Its been determined that we should process this message in this
cell. Go through the MessageRunner to call the appropriate
method for this message. Catch the response and/or exception and
encode it within a Response instance. Return it so the caller
can potentially return it to another cell... or return it to
a caller waiting in this cell.
"""
try:
resp_value = self.msg_runner._process_message_locally(self)
failure = False
except Exception as exc:
resp_value = sys.exc_info()
failure = True
LOG.exception(_("Error processing message locally: %(exc)s"),
locals())
return Response(self.routing_path, resp_value, failure)
def _setup_response_queue(self):
"""Shortcut to creating a response queue in the MessageRunner."""
self.resp_queue = self.msg_runner._setup_response_queue(self)
def _cleanup_response_queue(self):
"""Shortcut to deleting a response queue in the MessageRunner."""
if self.resp_queue:
self.msg_runner._cleanup_response_queue(self)
self.resp_queue = None
def _wait_for_json_responses(self, num_responses=1):
"""Wait for response(s) to be put into the eventlet queue. Since
each queue entry actually contains a list of JSON-ified responses,
combine them all into a single list to return.
Destroy the eventlet queue when done.
"""
if not self.resp_queue:
# Source is not actually expecting a response
return
responses = []
wait_time = CONF.cells.call_timeout
try:
for x in xrange(num_responses):
json_responses = self.resp_queue.get(timeout=wait_time)
responses.extend(json_responses)
except queue.Empty:
raise exception.CellTimeout()
finally:
self._cleanup_response_queue()
return responses
def _send_json_responses(self, json_responses, neighbor_only=False,
fanout=False):
"""Send list of responses to this message. Responses passed here
are JSON-ified. Targeted messages have a single response while
Broadcast messages may have multiple responses.
If this cell was the source of the message, these responses will
be returned from self.process().
Otherwise, we will route the response to the source of the
request. If 'neighbor_only' is True, the response will be sent
to the neighbor cell, not the original requester. Broadcast
messages get aggregated at each hop, so neighbor_only will be
True for those messages.
"""
if not self.need_response:
return
if self.source_is_us():
responses = []
for json_response in json_responses:
responses.append(Response.from_json(json_response))
return responses
direction = self.direction == 'up' and 'down' or 'up'
response_kwargs = {'orig_message': self.to_json(),
'responses': json_responses}
target_cell = _response_cell_name_from_path(self.routing_path,
neighbor_only=neighbor_only)
response = self.msg_runner._create_response_message(self.ctxt,
direction, target_cell, self.uuid, response_kwargs,
fanout=fanout)
response.process()
def _send_response(self, response, neighbor_only=False):
"""Send a response to this message. If the source of the
request was ourselves, just return the response. It'll be
passed back to the caller of self.process(). See DocString for
_send_json_responses() as it handles most of the real work for
this method.
'response' is an instance of Response class.
"""
if not self.need_response:
return
if self.source_is_us():
return response
self._send_json_responses([response.to_json()],
neighbor_only=neighbor_only)
def _send_response_from_exception(self, exc_info):
"""Take an exception as returned from sys.exc_info(), encode
it in a Response, and send it.
"""
response = Response(self.routing_path, exc_info, True)
return self._send_response(response)
def _to_dict(self):
"""Convert a message to a dictionary. Only used internally."""
_dict = {}
for key in self.base_attrs_to_json:
_dict[key] = getattr(self, key)
return _dict
def to_json(self):
"""Convert a message into JSON for sending to a sibling cell."""
_dict = self._to_dict()
# Convert context to dict.
_dict['ctxt'] = _dict['ctxt'].to_dict()
return jsonutils.dumps(_dict)
def source_is_us(self):
"""Did this cell create this message?"""
return self.routing_path == self.our_path_part
def process(self):
"""Process a message. Deal with it locally and/or forward it to a
sibling cell.
Override in a subclass.
"""
raise NotImplementedError()
class _TargetedMessage(_BaseMessage):
"""A targeted message is a message that is destined for a specific
single cell.
'target_cell' can be a full cell name like 'api!child-cell' or it can
be an instance of the CellState class if the target is a neighbor cell.
"""
message_type = 'targeted'
def __init__(self, msg_runner, ctxt, method_name, method_kwargs,
direction, target_cell, **kwargs):
super(_TargetedMessage, self).__init__(msg_runner, ctxt,
method_name, method_kwargs, direction, **kwargs)
if isinstance(target_cell, cells_state.CellState):
# Neighbor cell or ourselves. Convert it to a 'full path'.
if target_cell.is_me:
target_cell = self.our_path_part
else:
target_cell = '%s%s%s' % (self.our_path_part,
_PATH_CELL_SEP,
target_cell.name)
self.target_cell = target_cell
self.base_attrs_to_json.append('target_cell')
def _get_next_hop(self):
"""Return the cell name for the next hop. If the next hop is
the current cell, return None.
"""
if self.target_cell == self.routing_path:
return self.state_manager.my_cell_state
target_cell = self.target_cell
routing_path = self.routing_path
current_hops = routing_path.count(_PATH_CELL_SEP)
next_hop_num = current_hops + 1
dest_hops = target_cell.count(_PATH_CELL_SEP)
if dest_hops < current_hops:
reason = _("destination is %(target_cell)s but routing_path "
"is %(routing_path)s") % locals()
raise exception.CellRoutingInconsistency(reason=reason)
dest_name_parts = target_cell.split(_PATH_CELL_SEP)
if (_PATH_CELL_SEP.join(dest_name_parts[:next_hop_num]) !=
routing_path):
reason = _("destination is %(target_cell)s but routing_path "
"is %(routing_path)s") % locals()
raise exception.CellRoutingInconsistency(reason=reason)
next_hop_name = dest_name_parts[next_hop_num]
if self.direction == 'up':
next_hop = self.state_manager.get_parent_cell(next_hop_name)
else:
next_hop = self.state_manager.get_child_cell(next_hop_name)
if not next_hop:
cell_type = 'parent' if self.direction == 'up' else 'child'
reason = _("Unknown %(cell_type)s when routing to "
"%(target_cell)s") % locals()
raise exception.CellRoutingInconsistency(reason=reason)
return next_hop
def process(self):
"""Process a targeted message. This is called for all cells
that touch this message. If the local cell is the one that
created this message, we reply directly with a Response instance.
If the local cell is not the target, an eventlet queue is created
and we wait for the response to show up via another thread
receiving the Response back.
Responses to targeted messages are routed directly back to the
source. No eventlet queues are created in intermediate hops.
All exceptions for processing the message across the whole
routing path are caught and encoded within the Response and
returned to the caller.
"""
try:
next_hop = self._get_next_hop()
except Exception as exc:
exc_info = sys.exc_info()
LOG.exception(_("Error locating next hop for message: %(exc)s"),
locals())
return self._send_response_from_exception(exc_info)
if next_hop.is_me:
# Final destination.
response = self._process_locally()
return self._send_response(response)
# Need to forward via neighbor cell.
if self.need_response and self.source_is_us():
# A response is needed and the source of the message is
# this cell. Create the eventlet queue.
self._setup_response_queue()
wait_for_response = True
else:
wait_for_response = False
try:
# This is inside the try block, so we can encode the
# exception and return it to the caller.
if self.hop_count >= self.max_hop_count:
raise exception.CellMaxHopCountReached(
hop_count=self.hop_count)
next_hop.send_message(self)
except Exception as exc:
exc_info = sys.exc_info()
err_str = _("Failed to send message to cell: %(next_hop)s: "
"%(exc)s")
LOG.exception(err_str, locals())
self._cleanup_response_queue()
return self._send_response_from_exception(exc_info)
if wait_for_response:
# Targeted messages only have 1 response.
remote_response = self._wait_for_json_responses()[0]
return Response.from_json(remote_response)
class _BroadcastMessage(_BaseMessage):
"""A broadcast message. This means to call a method in every single
cell going in a certain direction.
"""
message_type = 'broadcast'
def __init__(self, msg_runner, ctxt, method_name, method_kwargs,
direction, run_locally=True, **kwargs):
super(_BroadcastMessage, self).__init__(msg_runner, ctxt,
method_name, method_kwargs, direction, **kwargs)
# The local cell creating this message has the option
# to be able to process the message locally or not.
self.run_locally = run_locally
self.is_broadcast = True
def _get_next_hops(self):
"""Set the next hops and return the number of hops. The next
hops may include ourself.
"""
if self.hop_count >= self.max_hop_count:
return []
if self.direction == 'down':
return self.state_manager.get_child_cells()
else:
return self.state_manager.get_parent_cells()
def _send_to_cells(self, target_cells):
"""Send a message to multiple cells."""
for cell in target_cells:
cell.send_message(self)
def _send_json_responses(self, json_responses):
"""Responses to broadcast messages always need to go to the
neighbor cell from which we received this message. That
cell aggregates the responses and makes sure to forward them
to the correct source.
"""
return super(_BroadcastMessage, self)._send_json_responses(
json_responses, neighbor_only=True, fanout=True)
def process(self):
"""Process a broadcast message. This is called for all cells
that touch this message.
The message is sent to all cells in the certain direction and
the creator of this message has the option of whether or not
to process it locally as well.
If responses from all cells are required, each hop creates an
eventlet queue and waits for responses from its immediate
neighbor cells. All responses are then aggregated into a
single list and are returned to the neighbor cell until the
source is reached.
When the source is reached, a list of Response instances are
returned to the caller.
All exceptions for processing the message across the whole
routing path are caught and encoded within the Response and
returned to the caller. It is possible to get a mix of
successful responses and failure responses. The caller is
responsible for dealing with this.
"""
try:
next_hops = self._get_next_hops()
except Exception as exc:
exc_info = sys.exc_info()
LOG.exception(_("Error locating next hops for message: %(exc)s"),
locals())
return self._send_response_from_exception(exc_info)
# Short circuit if we don't need to respond
if not self.need_response:
if self.run_locally:
self._process_locally()
self._send_to_cells(next_hops)
return
# We'll need to aggregate all of the responses (from ourself
# and our sibling cells) into 1 response
try:
self._setup_response_queue()
self._send_to_cells(next_hops)
except Exception as exc:
# Error just trying to send to cells. Send a single response
# with the failure.
exc_info = sys.exc_info()
LOG.exception(_("Error sending message to next hops: %(exc)s"),
locals())
self._cleanup_response_queue()
return self._send_response_from_exception(exc_info)
if self.run_locally:
# Run locally and store the Response.
local_response = self._process_locally()
else:
local_response = None
try:
remote_responses = self._wait_for_json_responses(
num_responses=len(next_hops))
except Exception as exc:
# Error waiting for responses, most likely a timeout.
# Send a single response back with the failure.
exc_info = sys.exc_info()
err_str = _("Error waiting for responses from neighbor cells: "
"%(exc)s")
LOG.exception(err_str, locals())
return self._send_response_from_exception(exc_info)
if local_response:
remote_responses.append(local_response.to_json())
return self._send_json_responses(remote_responses)
class _ResponseMessage(_TargetedMessage):
"""A response message is really just a special targeted message,
saying to call 'parse_responses' when we reach the source of a 'call'.
The 'fanout' attribute on this message may be true if we're responding
to a broadcast or if we're about to respond to the source of an
original target message. Because multiple nova-cells services may
be running within a cell, we need to make sure the response gets
back to the correct one, so we have to fanout.
"""
message_type = 'response'
def __init__(self, msg_runner, ctxt, method_name, method_kwargs,
direction, target_cell, response_uuid, **kwargs):
super(_ResponseMessage, self).__init__(msg_runner, ctxt,
method_name, method_kwargs, direction, target_cell, **kwargs)
self.response_uuid = response_uuid
self.base_attrs_to_json.append('response_uuid')
def process(self):
"""Process a response. If the target is the local cell, process
the response here. Otherwise, forward it to where it needs to
go.
"""
next_hop = self._get_next_hop()
if next_hop.is_me:
self._process_locally()
return
if self.fanout is False:
# Really there's 1 more hop on each of these below, but
# it doesn't matter for this logic.
target_hops = self.target_cell.count(_PATH_CELL_SEP)
current_hops = self.routing_path.count(_PATH_CELL_SEP)
if current_hops + 1 == target_hops:
# Next hop is the target.. so we must fanout. See
# DocString above.
self.fanout = True
next_hop.send_message(self)
#
# Methods that may be called when processing messages after reaching
# a target cell.
#
class _BaseMessageMethods(base.Base):
"""Base class for defining methods by message types."""
def __init__(self, msg_runner):
super(_BaseMessageMethods, self).__init__()
self.msg_runner = msg_runner
self.state_manager = msg_runner.state_manager
self.compute_api = compute.API()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
def task_log_get_all(self, message, task_name, period_beginning,
period_ending, host, state):
"""Get task logs from the DB. The message could have
directly targeted this cell, or it could have been a broadcast
message.
If 'host' is not None, filter by host.
If 'state' is not None, filter by state.
"""
task_logs = self.db.task_log_get_all(message.ctxt, task_name,
period_beginning,
period_ending,
host=host,
state=state)
return jsonutils.to_primitive(task_logs)
class _ResponseMessageMethods(_BaseMessageMethods):
"""Methods that are called from a ResponseMessage. There's only
1 method (parse_responses) and it is called when the message reaches
the source of a 'call'. All we do is stuff the response into the
eventlet queue to signal the caller that's waiting.
"""
def parse_responses(self, message, orig_message, responses):
self.msg_runner._put_response(message.response_uuid,
responses)
class _TargetedMessageMethods(_BaseMessageMethods):
"""These are the methods that can be called when routing a message
to a specific cell.
"""
def __init__(self, *args, **kwargs):
super(_TargetedMessageMethods, self).__init__(*args, **kwargs)
def schedule_run_instance(self, message, host_sched_kwargs):
"""Parent cell told us to schedule new instance creation."""
self.msg_runner.scheduler.run_instance(message, host_sched_kwargs)
def run_compute_api_method(self, message, method_info):
"""Run a method in the compute api class."""
method = method_info['method']
fn = getattr(self.compute_api, method, None)
if not fn:
detail = _("Unknown method '%(method)s' in compute API")
raise exception.CellServiceAPIMethodNotFound(
detail=detail % locals())
args = list(method_info['method_args'])
# 1st arg is instance_uuid that we need to turn into the
# instance object.
instance_uuid = args[0]
try:
instance = self.db.instance_get_by_uuid(message.ctxt,
instance_uuid)
except exception.InstanceNotFound:
with excutils.save_and_reraise_exception():
# Must be a race condition. Let's try to resolve it by
# telling the top level cells that this instance doesn't
# exist.
instance = {'uuid': instance_uuid}
self.msg_runner.instance_destroy_at_top(message.ctxt,
instance)
args[0] = instance
return fn(message.ctxt, *args, **method_info['method_kwargs'])
def update_capabilities(self, message, cell_name, capabilities):
"""A child cell told us about their capabilities."""
LOG.debug(_("Received capabilities from child cell "
"%(cell_name)s: %(capabilities)s"), locals())
self.state_manager.update_cell_capabilities(cell_name,
capabilities)
# Go ahead and update our parents now that a child updated us
self.msg_runner.tell_parents_our_capabilities(message.ctxt)
def update_capacities(self, message, cell_name, capacities):
"""A child cell told us about their capacity."""
LOG.debug(_("Received capacities from child cell "
"%(cell_name)s: %(capacities)s"), locals())
self.state_manager.update_cell_capacities(cell_name,
capacities)
# Go ahead and update our parents now that a child updated us
self.msg_runner.tell_parents_our_capacities(message.ctxt)
def announce_capabilities(self, message):
"""A parent cell has told us to send our capabilities, so let's
do so.
"""
self.msg_runner.tell_parents_our_capabilities(message.ctxt)
def announce_capacities(self, message):
"""A parent cell has told us to send our capacity, so let's
do so.
"""
self.msg_runner.tell_parents_our_capacities(message.ctxt)
def service_get_by_compute_host(self, message, host_name):
"""Return the service entry for a compute host."""
service = self.db.service_get_by_compute_host(message.ctxt,
host_name)
return jsonutils.to_primitive(service)
def proxy_rpc_to_manager(self, message, host_name, rpc_message,
topic, timeout):
"""Proxy RPC to the given compute topic."""
# Check that the host exists.
self.db.service_get_by_compute_host(message.ctxt, host_name)
if message.need_response:
return rpc.call(message.ctxt, topic, rpc_message,
timeout=timeout)
rpc.cast(message.ctxt, topic, rpc_message)
def compute_node_get(self, message, compute_id):
"""Get compute node by ID."""
compute_node = self.db.compute_node_get(message.ctxt,
compute_id)
return jsonutils.to_primitive(compute_node)
def actions_get(self, message, instance_uuid):
actions = self.db.actions_get(message.ctxt, instance_uuid)
return jsonutils.to_primitive(actions)
def action_get_by_request_id(self, message, instance_uuid, request_id):
action = self.db.action_get_by_request_id(message.ctxt, instance_uuid,
request_id)
return jsonutils.to_primitive(action)
def action_events_get(self, message, action_id):
action_events = self.db.action_events_get(message.ctxt, action_id)
return jsonutils.to_primitive(action_events)
def validate_console_port(self, message, instance_uuid, console_port,
console_type):
"""Validate console port with child cell compute node."""
# 1st arg is instance_uuid that we need to turn into the
# instance object.
try:
instance = self.db.instance_get_by_uuid(message.ctxt,
instance_uuid)
except exception.InstanceNotFound:
with excutils.save_and_reraise_exception():
# Must be a race condition. Let's try to resolve it by
# telling the top level cells that this instance doesn't
# exist.
instance = {'uuid': instance_uuid}
self.msg_runner.instance_destroy_at_top(message.ctxt,
instance)
return self.compute_rpcapi.validate_console_port(message.ctxt,
instance, console_port, console_type)
class _BroadcastMessageMethods(_BaseMessageMethods):
"""These are the methods that can be called as a part of a broadcast
message.
"""
def _at_the_top(self):
"""Are we the API level?"""
return not self.state_manager.get_parent_cells()
def instance_update_at_top(self, message, instance, **kwargs):
"""Update an instance in the DB if we're a top level cell."""
if not self._at_the_top():
return
instance_uuid = instance['uuid']
# Remove things that we can't update in the top level cells.
# 'metadata' is only updated in the API cell, so don't overwrite
# it based on what child cells say. Make sure to update
# 'cell_name' based on the routing path.
items_to_remove = ['id', 'security_groups', 'volumes', 'cell_name',
'name', 'metadata']
for key in items_to_remove:
instance.pop(key, None)
instance['cell_name'] = _reverse_path(message.routing_path)
# Fixup info_cache. We'll have to update this separately if
# it exists.
info_cache = instance.pop('info_cache', None)
if info_cache is not None:
info_cache.pop('id', None)
info_cache.pop('instance', None)
# Fixup system_metadata (should be a dict for update, not a list)
if ('system_metadata' in instance and
isinstance(instance['system_metadata'], list)):
sys_metadata = dict([(md['key'], md['value'])
for md in instance['system_metadata']])
instance['system_metadata'] = sys_metadata
LOG.debug(_("Got update for instance %(instance_uuid)s: "
"%(instance)s") % locals())
# It's possible due to some weird condition that the instance
# was already set as deleted... so we'll attempt to update
# it with permissions that allows us to read deleted.
with utils.temporary_mutation(message.ctxt, read_deleted="yes"):
try:
self.db.instance_update(message.ctxt, instance_uuid,
instance, update_cells=False)
except exception.NotFound:
# FIXME(comstud): Strange. Need to handle quotas here,
# if we actually want this code to remain..
self.db.instance_create(message.ctxt, instance)
if info_cache:
try:
self.db.instance_info_cache_update(message.ctxt,
instance_uuid, info_cache, update_cells=False)
except exception.InstanceInfoCacheNotFound:
# Can happen if we try to update a deleted instance's
# network information.
pass
def instance_destroy_at_top(self, message, instance, **kwargs):
"""Destroy an instance from the DB if we're a top level cell."""
if not self._at_the_top():
return
instance_uuid = instance['uuid']
LOG.debug(_("Got update to delete instance %(instance_uuid)s") %
locals())
try:
self.db.instance_destroy(message.ctxt, instance_uuid,
update_cells=False)
except exception.InstanceNotFound:
pass
def instance_delete_everywhere(self, message, instance, delete_type,
**kwargs):
"""Call compute API delete() or soft_delete() in every cell.
This is used when the API cell doesn't know what cell an instance
belongs to but the instance was requested to be deleted or
soft-deleted. So, we'll run it everywhere.
"""
LOG.debug(_("Got broadcast to %(delete_type)s delete instance"),
locals(), instance=instance)
if delete_type == 'soft':
self.compute_api.soft_delete(message.ctxt, instance)
else:
self.compute_api.delete(message.ctxt, instance)
def instance_fault_create_at_top(self, message, instance_fault, **kwargs):
"""Destroy an instance from the DB if we're a top level cell."""
if not self._at_the_top():
return
items_to_remove = ['id']
for key in items_to_remove:
instance_fault.pop(key, None)
log_str = _("Got message to create instance fault: "
"%(instance_fault)s")
LOG.debug(log_str, locals())
self.db.instance_fault_create(message.ctxt, instance_fault)
def bw_usage_update_at_top(self, message, bw_update_info, **kwargs):
"""Update Bandwidth usage in the DB if we're a top level cell."""
if not self._at_the_top():
return
self.db.bw_usage_update(message.ctxt, **bw_update_info)
def _sync_instance(self, ctxt, instance):
if instance['deleted']:
self.msg_runner.instance_destroy_at_top(ctxt, instance)
else:
self.msg_runner.instance_update_at_top(ctxt, instance)
def sync_instances(self, message, project_id, updated_since, deleted,
**kwargs):
projid_str = project_id is None and "<all>" or project_id
since_str = updated_since is None and "<all>" or updated_since
LOG.info(_("Forcing a sync of instances, project_id="
"%(projid_str)s, updated_since=%(since_str)s"), locals())
if updated_since is not None:
updated_since = timeutils.parse_isotime(updated_since)
instances = cells_utils.get_instances_to_sync(message.ctxt,
updated_since=updated_since, project_id=project_id,
deleted=deleted)
for instance in instances:
self._sync_instance(message.ctxt, instance)
def service_get_all(self, message, filters):
if filters is None:
filters = {}
disabled = filters.pop('disabled', None)
services = self.db.service_get_all(message.ctxt, disabled=disabled)
ret_services = []
for service in services:
service = jsonutils.to_primitive(service)
for key, val in filters.iteritems():
if service[key] != val:
break
else:
ret_services.append(service)
return ret_services
def compute_node_get_all(self, message, hypervisor_match):
"""Return compute nodes in this cell."""
if hypervisor_match is not None:
nodes = self.db.compute_node_search_by_hypervisor(message.ctxt,
hypervisor_match)
else:
nodes = self.db.compute_node_get_all(message.ctxt)
return jsonutils.to_primitive(nodes)
def compute_node_stats(self, message):
"""Return compute node stats from this cell."""
return self.db.compute_node_statistics(message.ctxt)
def consoleauth_delete_tokens(self, message, instance_uuid):
"""Delete consoleauth tokens for an instance in API cells."""
if not self._at_the_top():
return
self.consoleauth_rpcapi.delete_tokens_for_instance(message.ctxt,
instance_uuid)
_CELL_MESSAGE_TYPE_TO_MESSAGE_CLS = {'targeted': _TargetedMessage,
'broadcast': _BroadcastMessage,
'response': _ResponseMessage}
_CELL_MESSAGE_TYPE_TO_METHODS_CLS = {'targeted': _TargetedMessageMethods,
'broadcast': _BroadcastMessageMethods,
'response': _ResponseMessageMethods}
#
# Below are the public interfaces into this module.
#
class MessageRunner(object):
"""This class is the main interface into creating messages and
processing them.
Public methods in this class are typically called by the CellsManager
to create a new message and process it with the exception of
'message_from_json' which should be used by CellsDrivers to convert
a JSONified message it has received back into the appropriate Message
class.
Private methods are used internally when we need to keep some
'global' state. For instance, eventlet queues used for responses are
held in this class. Also, when a Message is process()ed above and
it's determined we should take action locally,
_process_message_locally() will be called.
When needing to add a new method to call in a Cell2Cell message,
define the new method below and also add it to the appropriate
MessageMethods class where the real work will be done.
"""
def __init__(self, state_manager):
self.state_manager = state_manager
cells_scheduler_cls = importutils.import_class(
CONF.cells.scheduler)
self.scheduler = cells_scheduler_cls(self)
self.response_queues = {}
self.methods_by_type = {}
self.our_name = CONF.cells.name
for msg_type, cls in _CELL_MESSAGE_TYPE_TO_METHODS_CLS.iteritems():
self.methods_by_type[msg_type] = cls(self)
def _process_message_locally(self, message):
"""Message processing will call this when its determined that
the message should be processed within this cell. Find the
method to call based on the message type, and call it. The
caller is responsible for catching exceptions and returning
results to cells, if needed.
"""
methods = self.methods_by_type[message.message_type]
fn = getattr(methods, message.method_name)
return fn(message, **message.method_kwargs)
def _put_response(self, response_uuid, response):
"""Put a response into a response queue. This is called when
a _ResponseMessage is processed in the cell that initiated a
'call' to another cell.
"""
resp_queue = self.response_queues.get(response_uuid)
if not resp_queue:
# Response queue is gone. We must have restarted or we
# received a response after our timeout period.
return
resp_queue.put(response)
def _setup_response_queue(self, message):
"""Set up an eventlet queue to use to wait for replies.
Replies come back from the target cell as a _ResponseMessage
being sent back to the source.
"""
resp_queue = queue.Queue()
self.response_queues[message.uuid] = resp_queue
return resp_queue
def _cleanup_response_queue(self, message):
"""Stop tracking the response queue either because we're
done receiving responses, or we've timed out.
"""
try:
del self.response_queues[message.uuid]
except KeyError:
# Ignore if queue is gone already somehow.
pass
def _create_response_message(self, ctxt, direction, target_cell,
response_uuid, response_kwargs, **kwargs):
"""Create a ResponseMessage. This is used internally within
the messaging module.
"""
return _ResponseMessage(self, ctxt, 'parse_responses',
response_kwargs, direction, target_cell,
response_uuid, **kwargs)
def message_from_json(self, json_message):
"""Turns a message in JSON format into an appropriate Message
instance. This is called when cells receive a message from
another cell.
"""
message_dict = jsonutils.loads(json_message)
message_type = message_dict.pop('message_type')
# Need to convert context back.
ctxt = message_dict['ctxt']
message_dict['ctxt'] = context.RequestContext.from_dict(ctxt)
message_cls = _CELL_MESSAGE_TYPE_TO_MESSAGE_CLS[message_type]
return message_cls(self, **message_dict)
def ask_children_for_capabilities(self, ctxt):
"""Tell child cells to send us capabilities. This is typically
called on startup of the nova-cells service.
"""
child_cells = self.state_manager.get_child_cells()
for child_cell in child_cells:
message = _TargetedMessage(self, ctxt,
'announce_capabilities',
dict(), 'down', child_cell)
message.process()
def ask_children_for_capacities(self, ctxt):
"""Tell child cells to send us capacities. This is typically
called on startup of the nova-cells service.
"""
child_cells = self.state_manager.get_child_cells()
for child_cell in child_cells:
message = _TargetedMessage(self, ctxt, 'announce_capacities',
dict(), 'down', child_cell)
message.process()
def tell_parents_our_capabilities(self, ctxt):
"""Send our capabilities to parent cells."""
parent_cells = self.state_manager.get_parent_cells()
if not parent_cells:
return
my_cell_info = self.state_manager.get_my_state()
capabs = self.state_manager.get_our_capabilities()
LOG.debug(_("Updating parents with our capabilities: %(capabs)s"),
locals())
# We have to turn the sets into lists so they can potentially
# be json encoded when the raw message is sent.
for key, values in capabs.items():
capabs[key] = list(values)
method_kwargs = {'cell_name': my_cell_info.name,
'capabilities': capabs}
for cell in parent_cells:
message = _TargetedMessage(self, ctxt, 'update_capabilities',
method_kwargs, 'up', cell, fanout=True)
message.process()
def tell_parents_our_capacities(self, ctxt):
"""Send our capacities to parent cells."""
parent_cells = self.state_manager.get_parent_cells()
if not parent_cells:
return
my_cell_info = self.state_manager.get_my_state()
capacities = self.state_manager.get_our_capacities()
LOG.debug(_("Updating parents with our capacities: %(capacities)s"),
locals())
method_kwargs = {'cell_name': my_cell_info.name,
'capacities': capacities}
for cell in parent_cells:
message = _TargetedMessage(self, ctxt, 'update_capacities',
method_kwargs, 'up', cell, fanout=True)
message.process()
def schedule_run_instance(self, ctxt, target_cell, host_sched_kwargs):
"""Called by the scheduler to tell a child cell to schedule
a new instance for build.
"""
method_kwargs = dict(host_sched_kwargs=host_sched_kwargs)
message = _TargetedMessage(self, ctxt, 'schedule_run_instance',
method_kwargs, 'down',
target_cell)
message.process()
def run_compute_api_method(self, ctxt, cell_name, method_info, call):
"""Call a compute API method in a specific cell."""
message = _TargetedMessage(self, ctxt, 'run_compute_api_method',
dict(method_info=method_info), 'down',
cell_name, need_response=call)
return message.process()
def instance_update_at_top(self, ctxt, instance):
"""Update an instance at the top level cell."""
message = _BroadcastMessage(self, ctxt, 'instance_update_at_top',
dict(instance=instance), 'up',
run_locally=False)
message.process()
def instance_destroy_at_top(self, ctxt, instance):
"""Destroy an instance at the top level cell."""
message = _BroadcastMessage(self, ctxt, 'instance_destroy_at_top',
dict(instance=instance), 'up',
run_locally=False)
message.process()
def instance_delete_everywhere(self, ctxt, instance, delete_type):
"""This is used by API cell when it didn't know what cell
an instance was in, but the instance was requested to be
deleted or soft_deleted. So, we'll broadcast this everywhere.
"""
method_kwargs = dict(instance=instance, delete_type=delete_type)
message = _BroadcastMessage(self, ctxt,
'instance_delete_everywhere',
method_kwargs, 'down',
run_locally=False)
message.process()
def instance_fault_create_at_top(self, ctxt, instance_fault):
"""Create an instance fault at the top level cell."""
message = _BroadcastMessage(self, ctxt,
'instance_fault_create_at_top',
dict(instance_fault=instance_fault),
'up', run_locally=False)
message.process()
def bw_usage_update_at_top(self, ctxt, bw_update_info):
"""Update bandwidth usage at top level cell."""
message = _BroadcastMessage(self, ctxt, 'bw_usage_update_at_top',
dict(bw_update_info=bw_update_info),
'up', run_locally=False)
message.process()
def sync_instances(self, ctxt, project_id, updated_since, deleted):
"""Force a sync of all instances, potentially by project_id,
and potentially since a certain date/time.
"""
method_kwargs = dict(project_id=project_id,
updated_since=updated_since,
deleted=deleted)
message = _BroadcastMessage(self, ctxt, 'sync_instances',
method_kwargs, 'down',
run_locally=False)
message.process()
def service_get_all(self, ctxt, filters=None):
method_kwargs = dict(filters=filters)
message = _BroadcastMessage(self, ctxt, 'service_get_all',
method_kwargs, 'down',
run_locally=True, need_response=True)
return message.process()
def service_get_by_compute_host(self, ctxt, cell_name, host_name):
method_kwargs = dict(host_name=host_name)
message = _TargetedMessage(self, ctxt,
'service_get_by_compute_host',
method_kwargs, 'down', cell_name,
need_response=True)
return message.process()
def proxy_rpc_to_manager(self, ctxt, cell_name, host_name, topic,
rpc_message, call, timeout):
method_kwargs = {'host_name': host_name,
'topic': topic,
'rpc_message': rpc_message,
'timeout': timeout}
message = _TargetedMessage(self, ctxt,
'proxy_rpc_to_manager',
method_kwargs, 'down', cell_name,
need_response=call)
return message.process()
def task_log_get_all(self, ctxt, cell_name, task_name,
period_beginning, period_ending,
host=None, state=None):
"""Get task logs from the DB from all cells or a particular
cell.
If 'cell_name' is None or '', get responses from all cells.
If 'host' is not None, filter by host.
If 'state' is not None, filter by state.
Return a list of Response objects.
"""
method_kwargs = dict(task_name=task_name,
period_beginning=period_beginning,
period_ending=period_ending,
host=host, state=state)
if cell_name:
message = _TargetedMessage(self, ctxt, 'task_log_get_all',
method_kwargs, 'down',
cell_name, need_response=True)
# Caller should get a list of Responses.
return [message.process()]
message = _BroadcastMessage(self, ctxt, 'task_log_get_all',
method_kwargs, 'down',
run_locally=True, need_response=True)
return message.process()
def compute_node_get_all(self, ctxt, hypervisor_match=None):
"""Return list of compute nodes in all child cells."""
method_kwargs = dict(hypervisor_match=hypervisor_match)
message = _BroadcastMessage(self, ctxt, 'compute_node_get_all',
method_kwargs, 'down',
run_locally=True, need_response=True)
return message.process()
def compute_node_stats(self, ctxt):
"""Return compute node stats from all child cells."""
method_kwargs = dict()
message = _BroadcastMessage(self, ctxt, 'compute_node_stats',
method_kwargs, 'down',
run_locally=True, need_response=True)
return message.process()
def compute_node_get(self, ctxt, cell_name, compute_id):
"""Return compute node entry from a specific cell by ID."""
method_kwargs = dict(compute_id=compute_id)
message = _TargetedMessage(self, ctxt, 'compute_node_get',
method_kwargs, 'down',
cell_name, need_response=True)
return message.process()
def actions_get(self, ctxt, cell_name, instance_uuid):
method_kwargs = dict(instance_uuid=instance_uuid)
message = _TargetedMessage(self, ctxt, 'actions_get',
method_kwargs, 'down',
cell_name, need_response=True)
return message.process()
def action_get_by_request_id(self, ctxt, cell_name, instance_uuid,
request_id):
method_kwargs = dict(instance_uuid=instance_uuid,
request_id=request_id)
message = _TargetedMessage(self, ctxt, 'action_get_by_request_id',
method_kwargs, 'down',
cell_name, need_response=True)
return message.process()
def action_events_get(self, ctxt, cell_name, action_id):
method_kwargs = dict(action_id=action_id)
message = _TargetedMessage(self, ctxt, 'action_events_get',
method_kwargs, 'down',
cell_name, need_response=True)
return message.process()
def consoleauth_delete_tokens(self, ctxt, instance_uuid):
"""Delete consoleauth tokens for an instance in API cells."""
message = _BroadcastMessage(self, ctxt, 'consoleauth_delete_tokens',
dict(instance_uuid=instance_uuid),
'up', run_locally=False)
message.process()
def validate_console_port(self, ctxt, cell_name, instance_uuid,
console_port, console_type):
"""Validate console port with child cell compute node."""
method_kwargs = {'instance_uuid': instance_uuid,
'console_port': console_port,
'console_type': console_type}
message = _TargetedMessage(self, ctxt, 'validate_console_port',
method_kwargs, 'down',
cell_name, need_response=True)
return message.process()
@staticmethod
def get_message_types():
return _CELL_MESSAGE_TYPE_TO_MESSAGE_CLS.keys()
class Response(object):
"""Holds a response from a cell. If there was a failure, 'failure'
will be True and 'response' will contain an encoded Exception.
"""
def __init__(self, cell_name, value, failure):
self.failure = failure
self.cell_name = cell_name
self.value = value
def to_json(self):
resp_value = self.value
if self.failure:
resp_value = rpc_common.serialize_remote_exception(resp_value,
log_failure=False)
_dict = {'cell_name': self.cell_name,
'value': resp_value,
'failure': self.failure}
return jsonutils.dumps(_dict)
@classmethod
def from_json(cls, json_message):
_dict = jsonutils.loads(json_message)
if _dict['failure']:
resp_value = rpc_common.deserialize_remote_exception(
CONF, _dict['value'])
_dict['value'] = resp_value
return cls(**_dict)
def value_or_raise(self):
if self.failure:
if isinstance(self.value, (tuple, list)):
raise self.value[0], self.value[1], self.value[2]
else:
raise self.value
return self.value
| apache-2.0 |
inetprocess/docker-lamp | tests/configreader_test.py | 2 | 2846 | #!/usr/bin/env python
import io
import os
import re
import sys
import unittest
import pytest
from stakkr.configreader import Config
base_dir = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, base_dir + '/../')
# https://docs.python.org/3/library/unittest.html#assert-methods
class ConfigReaderTest(unittest.TestCase):
def test_bad_config(self):
"""Test a non existing configuration file"""
c = Config('/does/not/exists.yml')
with self.assertRaisesRegex(IOError, "No such file or directory: '/does/not/exists.yml'"):
c.read()
def test_invalid_config(self):
"""Test an existing configuration file but invalid"""
c = Config(base_dir + '/static/config_invalid.yml')
self.assertFalse(c.read())
self.assertGreater(len(c.error), 0)
self.assertRegex(c.error, '.*Additional properties are not allowed.*')
# The rest doesn't work, for an unknown reason
pytest.skip('Error trying to capture stderr')
return
# Don't go further with python < 3.5
try:
from contextlib import redirect_stderr
except Exception:
return
f = io.StringIO()
with redirect_stderr(f):
c.display_errors()
err = f.getvalue()
regex = re.compile('.*config_invalid.yml.*', re.MULTILINE)
self.assertRegex(err, regex)
regex = re.compile('.*Failed validating main config or plugin configs.*', re.MULTILINE)
self.assertRegex(err, regex)
regex = re.compile('Additional properties are not allowed.*', re.MULTILINE)
self.assertRegex(err, regex)
def test_valid_config(self):
"""Test an existing and valid configuration file"""
c = Config(base_dir + '/static/stakkr.yml')
config = c.read()
self.assertIs(dict, type(config))
self.assertTrue('services' in config)
self.assertTrue('php' in config['services'])
self.assertTrue('version' in config['services']['php'])
self.assertTrue('enabled' in config['services']['php'])
self.assertEqual(7.2, config['services']['php']['version'])
self.assertTrue(config['services']['php']['enabled'])
self.assertTrue('apache' not in config['services'])
self.assertTrue('project_name' in config)
self.assertEqual('static', config['project_name'])
def test_valid_config_no_project(self):
"""Test an existing and valid configuration file"""
c = Config(base_dir + '/static/config_valid_network.yml')
config = c.read()
self.assertIs(dict, type(config))
self.assertTrue('services' in config)
self.assertTrue('project_name' in config)
self.assertEqual('testnet', config['project_name'])
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
svost/bitcoin | qa/rpc-tests/invalidateblock.py | 104 | 3077 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test InvalidateBlock code
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class InvalidateTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
self.nodes = []
self.is_network_split = False
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"]))
def run_test(self):
print("Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:")
print("Mine 4 blocks on Node 0")
self.nodes[0].generate(4)
assert(self.nodes[0].getblockcount() == 4)
besthash = self.nodes[0].getbestblockhash()
print("Mine competing 6 blocks on Node 1")
self.nodes[1].generate(6)
assert(self.nodes[1].getblockcount() == 6)
print("Connect nodes to force a reorg")
connect_nodes_bi(self.nodes,0,1)
sync_blocks(self.nodes[0:2])
assert(self.nodes[0].getblockcount() == 6)
badhash = self.nodes[1].getblockhash(2)
print("Invalidate block 2 on node 0 and verify we reorg to node 0's original chain")
self.nodes[0].invalidateblock(badhash)
newheight = self.nodes[0].getblockcount()
newhash = self.nodes[0].getbestblockhash()
if (newheight != 4 or newhash != besthash):
raise AssertionError("Wrong tip for node0, hash %s, height %d"%(newhash,newheight))
print("\nMake sure we won't reorg to a lower work chain:")
connect_nodes_bi(self.nodes,1,2)
print("Sync node 2 to node 1 so both have 6 blocks")
sync_blocks(self.nodes[1:3])
assert(self.nodes[2].getblockcount() == 6)
print("Invalidate block 5 on node 1 so its tip is now at 4")
self.nodes[1].invalidateblock(self.nodes[1].getblockhash(5))
assert(self.nodes[1].getblockcount() == 4)
print("Invalidate block 3 on node 2, so its tip is now 2")
self.nodes[2].invalidateblock(self.nodes[2].getblockhash(3))
assert(self.nodes[2].getblockcount() == 2)
print("..and then mine a block")
self.nodes[2].generate(1)
print("Verify all nodes are at the right height")
time.sleep(5)
for i in range(3):
print(i,self.nodes[i].getblockcount())
assert(self.nodes[2].getblockcount() == 3)
assert(self.nodes[0].getblockcount() == 4)
node1height = self.nodes[1].getblockcount()
if node1height < 4:
raise AssertionError("Node 1 reorged to a lower height: %d"%node1height)
if __name__ == '__main__':
InvalidateTest().main()
| mit |
chuckchen/spark | python/pyspark/mllib/stat/KernelDensity.py | 22 | 1969 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from pyspark.mllib.common import callMLlibFunc
from pyspark.rdd import RDD
class KernelDensity(object):
"""
Estimate probability density at required points given an RDD of samples
from the population.
Examples
--------
>>> kd = KernelDensity()
>>> sample = sc.parallelize([0.0, 1.0])
>>> kd.setSample(sample)
>>> kd.estimate([0.0, 1.0])
array([ 0.12938758, 0.12938758])
"""
def __init__(self):
self._bandwidth = 1.0
self._sample = None
def setBandwidth(self, bandwidth):
"""Set bandwidth of each sample. Defaults to 1.0"""
self._bandwidth = bandwidth
def setSample(self, sample):
"""Set sample points from the population. Should be a RDD"""
if not isinstance(sample, RDD):
raise TypeError("samples should be a RDD, received %s" % type(sample))
self._sample = sample
def estimate(self, points):
"""Estimate the probability density at points"""
points = list(points)
densities = callMLlibFunc(
"estimateKernelDensity", self._sample, self._bandwidth, points)
return np.asarray(densities)
| apache-2.0 |
neilhan/tensorflow | tensorflow/contrib/distributions/python/ops/distribution.py | 3 | 29570 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base classes for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import inspect
import types
import numpy as np
import six
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
_DISTRIBUTION_PUBLIC_METHOD_WRAPPERS = [
"batch_shape", "get_batch_shape", "event_shape", "get_event_shape",
"sample_n", "log_prob", "prob", "log_cdf", "cdf", "log_survival_function",
"survival_function", "entropy", "mean", "variance", "std", "mode"]
@six.add_metaclass(abc.ABCMeta)
class BaseDistribution(object):
"""Simple abstract base class for probability distributions.
Implementations of core distributions to be included in the `distributions`
module should subclass `Distribution`. This base class may be useful to users
that want to fulfill a simpler distribution contract.
"""
@abc.abstractmethod
def sample_n(self, n, seed=None, name="sample"):
# See `Distribution.sample_n` for docstring.
pass
@abc.abstractmethod
def log_prob(self, value, name="log_prob"):
# See `Distribution.log_prob` for docstring.
pass
def _copy_fn(fn):
"""Create a deep copy of fn.
Args:
fn: a callable
Returns:
A `FunctionType`: a deep copy of fn.
Raises:
TypeError: if `fn` is not a callable.
"""
if not callable(fn):
raise TypeError("fn is not callable: %s" % fn)
# The blessed way to copy a function. copy.deepcopy fails to create
# a non-reference copy. Since:
# types.FunctionType == type(lambda: None),
# and the docstring for the function type states:
#
# function(code, globals[, name[, argdefs[, closure]]])
#
# Create a function object from a code object and a dictionary.
# ...
#
# Here we can use this to create a new function with the old function's
# code, globals, closure, etc.
return types.FunctionType(
code=fn.__code__, globals=fn.__globals__,
name=fn.__name__, argdefs=fn.__defaults__,
closure=fn.__closure__)
def _update_docstring(old_str, append_str):
"""Update old_str by inserting append_str just before the "Args:" section."""
old_str_lines = old_str.split("\n")
# Step 0: Prepend spaces to all lines of append_str. This is
# necessary for correct markdown generation.
append_str = "\n".join(" %s" % line for line in append_str.split("\n"))
# Step 1: Find mention of "Args":
has_args_ix = [
ix for ix, line in enumerate(old_str_lines)
if line.strip().lower() == "args:"]
if has_args_ix:
final_args_ix = has_args_ix[-1]
return ("\n".join(old_str_lines[:final_args_ix])
+ "\n\n" + append_str + "\n\n"
+ "\n".join(old_str_lines[final_args_ix:]))
else:
return old_str + "\n\n" + append_str
class _DistributionMeta(abc.ABCMeta):
def __new__(mcs, classname, baseclasses, attrs):
"""Control the creation of subclasses of the Distribution class.
The main purpose of this method is to properly propagate docstrings
from private Distribution methods, like `_log_prob`, into their
public wrappers as inherited by the Distribution base class
(e.g. `log_prob`).
Args:
classname: The name of the subclass being created.
baseclasses: A tuple of parent classes.
attrs: A dict mapping new attributes to their values.
Returns:
The class object.
Raises:
TypeError: If `Distribution` is not a subclass of `BaseDistribution`, or
the the new class is derived via multiple inheritance and the first
parent class is not a subclass of `BaseDistribution`.
AttributeError: If `Distribution` does not implement e.g. `log_prob`.
ValueError: If a `Distribution` public method lacks a docstring.
"""
if not baseclasses: # Nothing to be done for Distribution
raise TypeError("Expected non-empty baseclass. Does Distribution "
"not subclass BaseDistribution?")
base = baseclasses[0]
if base == BaseDistribution: # Nothing to be done for Distribution
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
if not issubclass(base, Distribution):
raise TypeError("First parent class declared for %s must be "
"Distribution, but saw '%s'" % (classname, base.__name__))
for attr in _DISTRIBUTION_PUBLIC_METHOD_WRAPPERS:
special_attr = "_%s" % attr
class_attr_value = attrs.get(attr, None)
if attr in attrs:
# The method is being overridden, do not update its docstring
continue
base_attr_value = getattr(base, attr, None)
if not base_attr_value:
raise AttributeError(
"Internal error: expected base class '%s' to implement method '%s'"
% (base.__name__, attr))
class_special_attr_value = attrs.get(special_attr, None)
if class_special_attr_value is None:
# No _special method available, no need to update the docstring.
continue
class_special_attr_docstring = inspect.getdoc(class_special_attr_value)
if not class_special_attr_docstring:
# No docstring to append.
continue
class_attr_value = _copy_fn(base_attr_value)
class_attr_docstring = inspect.getdoc(base_attr_value)
if class_attr_docstring is None:
raise ValueError(
"Expected base class fn to contain a docstring: %s.%s"
% (base.__name__, attr))
class_attr_value.__doc__ = _update_docstring(
class_attr_value.__doc__,
("Additional documentation from `%s`:\n\n%s"
% (classname, class_special_attr_docstring)))
attrs[attr] = class_attr_value
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
@six.add_metaclass(_DistributionMeta)
class Distribution(BaseDistribution):
"""A generic probability distribution base class.
`Distribution` is a base class for constructing and organizing properties
(e.g., mean, variance) of random variables (e.g, Bernoulli, Gaussian).
### Subclassing
Subclasess are expected to implement a leading-underscore version of the
same-named function. The argument signature should be identical except for
the omission of `name="..."`. For example, to enable `log_prob(value,
name="log_prob")` a subclass should implement `_log_prob(value)`.
Subclasses can append to public-level docstrings by providing
docstrings for their method specializations. For example:
```python
@distribution_util.AppendDocstring("Some other details.")
def _log_prob(self, value):
...
```
would add the string "Some other details." to the `log_prob` function
docstring. This is implemented as a simple decorator to avoid python
linter complaining about missing Args/Returns/Raises sections in the
partial docstrings.
### Broadcasting, batching, and shapes
All distributions support batches of independent distributions of that type.
The batch shape is determined by broadcasting together the parameters.
The shape of arguments to `__init__`, `cdf`, `log_cdf`, `prob`, and
`log_prob` reflect this broadcasting, as does the return value of `sample` and
`sample_n`.
`sample_n_shape = (n,) + batch_shape + event_shape`, where `sample_n_shape` is
the shape of the `Tensor` returned from `sample_n`, `n` is the number of
samples, `batch_shape` defines how many independent distributions there are,
and `event_shape` defines the shape of samples from each of those independent
distributions. Samples are independent along the `batch_shape` dimensions, but
not necessarily so along the `event_shape` dimensions (depending on the
particulars of the underlying distribution).
Using the `Uniform` distribution as an example:
```python
minval = 3.0
maxval = [[4.0, 6.0],
[10.0, 12.0]]
# Broadcasting:
# This instance represents 4 Uniform distributions. Each has a lower bound at
# 3.0 as the `minval` parameter was broadcasted to match `maxval`'s shape.
u = Uniform(minval, maxval)
# `event_shape` is `TensorShape([])`.
event_shape = u.get_event_shape()
# `event_shape_t` is a `Tensor` which will evaluate to [].
event_shape_t = u.event_shape
# Sampling returns a sample per distribution. `samples` has shape
# (5, 2, 2), which is (n,) + batch_shape + event_shape, where n=5,
# batch_shape=(2, 2), and event_shape=().
samples = u.sample_n(5)
# The broadcasting holds across methods. Here we use `cdf` as an example. The
# same holds for `log_cdf` and the likelihood functions.
# `cum_prob` has shape (2, 2) as the `value` argument was broadcasted to the
# shape of the `Uniform` instance.
cum_prob_broadcast = u.cdf(4.0)
# `cum_prob`'s shape is (2, 2), one per distribution. No broadcasting
# occurred.
cum_prob_per_dist = u.cdf([[4.0, 5.0],
[6.0, 7.0]])
# INVALID as the `value` argument is not broadcastable to the distribution's
# shape.
cum_prob_invalid = u.cdf([4.0, 5.0, 6.0])
### Parameter values leading to undefined statistics or distributions.
Some distributions do not have well-defined statistics for all initialization
parameter values. For example, the beta distribution is parameterized by
positive real numbers `a` and `b`, and does not have well-defined mode if
`a < 1` or `b < 1`.
The user is given the option of raising an exception or returning `NaN`.
```python
a = tf.exp(tf.matmul(logits, weights_a))
b = tf.exp(tf.matmul(logits, weights_b))
# Will raise exception if ANY batch member has a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=False)
mode = dist.mode().eval()
# Will return NaN for batch members with either a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=True) # Default behavior
mode = dist.mode().eval()
```
In all cases, an exception is raised if *invalid* parameters are passed, e.g.
```python
# Will raise an exception if any Op is run.
negative_a = -1.0 * a # beta distribution by definition has a > 0.
dist = distributions.beta(negative_a, b, allow_nan_stats=True)
dist.mean().eval()
```
"""
def __init__(self,
dtype,
parameters,
is_continuous,
is_reparameterized,
validate_args,
allow_nan_stats,
name=None):
"""Constructs the `Distribution`.
**This is a private method for subclass use.**
Args:
dtype: The type of the event samples. `None` implies no type-enforcement.
parameters: Python dictionary of parameters used by this `Distribution`.
is_continuous: Python boolean. If `True` this
`Distribution` is continuous over its supported domain.
is_reparameterized: Python boolean. If `True` this
`Distribution` can be reparameterized in terms of some standard
distribution with a function whose Jacobian is constant for the support
of the standard distribution.
validate_args: Python boolean. Whether to validate input with asserts.
If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
allow_nan_stats: Python boolean. If `False`, raise an
exception if a statistic (e.g., mean, mode) is undefined for any batch
member. If True, batch members with valid parameters leading to
undefined statistics will return `NaN` for this statistic.
name: A name for this distribution (optional).
"""
self._name = name
if self._name is None:
with ops.name_scope(type(self).__name__) as ns:
self._name = ns
self._dtype = dtype
self._parameters = parameters or {}
self._is_continuous = is_continuous
self._is_reparameterized = is_reparameterized
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
@classmethod
def param_shapes(cls, sample_shape, name="DistributionParamShapes"):
"""Shapes of parameters given the desired shape of a call to `sample()`.
Subclasses should override static method `_param_shapes`.
Args:
sample_shape: `Tensor` or python list/tuple. Desired shape of a call to
`sample()`.
name: name to prepend ops with.
Returns:
`dict` of parameter name to `Tensor` shapes.
"""
with ops.name_scope(name, values=[sample_shape]):
return cls._param_shapes(sample_shape)
@classmethod
def param_static_shapes(cls, sample_shape):
"""param_shapes with static (i.e. TensorShape) shapes.
Args:
sample_shape: `TensorShape` or python list/tuple. Desired shape of a call
to `sample()`.
Returns:
`dict` of parameter name to `TensorShape`.
Raises:
ValueError: if `sample_shape` is a `TensorShape` and is not fully defined.
"""
if isinstance(sample_shape, tensor_shape.TensorShape):
if not sample_shape.is_fully_defined():
raise ValueError("TensorShape sample_shape must be fully defined")
sample_shape = sample_shape.as_list()
params = cls.param_shapes(sample_shape)
static_params = {}
for name, shape in params.items():
static_shape = tensor_util.constant_value(shape)
if static_shape is None:
raise ValueError(
"sample_shape must be a fully-defined TensorShape or list/tuple")
static_params[name] = tensor_shape.TensorShape(static_shape)
return static_params
@staticmethod
def _param_shapes(sample_shape):
raise NotImplementedError("_param_shapes not implemented")
@property
def name(self):
"""Name prepended to all ops created by this `Distribution`."""
return self._name
@property
def dtype(self):
"""The `DType` of `Tensor`s handled by this `Distribution`."""
return self._dtype
@property
def parameters(self):
"""Dictionary of parameters used by this `Distribution`."""
return self._parameters
@property
def is_continuous(self):
return self._is_continuous
@property
def is_reparameterized(self):
return self._is_reparameterized
@property
def allow_nan_stats(self):
"""Python boolean describing behavior when a stat is undefined.
Stats return +/- infinity when it makes sense. E.g., the variance
of a Cauchy distribution is infinity. However, sometimes the
statistic is undefined, e.g., if a distribution's pdf does not achieve a
maximum within the support of the distribution, the mode is undefined.
If the mean is undefined, then by definition the variance is undefined.
E.g. the mean for Student's T for df = 1 is undefined (no clear way to say
it is either + or - infinity), so the variance = E[(X - mean)^2] is also
undefined.
Returns:
allow_nan_stats: Python boolean.
"""
return self._allow_nan_stats
@property
def validate_args(self):
"""Python boolean indicated possibly expensive checks are enabled."""
return self._validate_args
def _batch_shape(self):
raise NotImplementedError("batch_shape is not implemented")
def batch_shape(self, name="batch_shape"):
"""Shape of a single sample from a single event index as a 1-D `Tensor`.
The product of the dimensions of the `batch_shape` is the number of
independent distributions of this kind the instance represents.
Args:
name: name to give to the op
Returns:
batch_shape: `Tensor`.
"""
with self._name_scope(name):
return self._batch_shape()
def _get_batch_shape(self):
raise NotImplementedError("get_batch_shape is not implemented")
def get_batch_shape(self):
"""Shape of a single sample from a single event index as a `TensorShape`.
Same meaning as `batch_shape`. May be only partially defined.
Returns:
batch_shape: `TensorShape`, possibly unknown.
"""
return self._get_batch_shape()
def _event_shape(self):
raise NotImplementedError("event_shape is not implemented")
def event_shape(self, name="event_shape"):
"""Shape of a single sample from a single batch as a 1-D int32 `Tensor`.
Args:
name: name to give to the op
Returns:
event_shape: `Tensor`.
"""
with self._name_scope(name):
return self._event_shape()
def _get_event_shape(self):
raise NotImplementedError("get_event_shape is not implemented")
def get_event_shape(self):
"""Shape of a single sample from a single batch as a `TensorShape`.
Same meaning as `event_shape`. May be only partially defined.
Returns:
event_shape: `TensorShape`, possibly unknown.
"""
return self._get_event_shape()
def _sample_n(self, n, seed=None):
raise NotImplementedError("sample_n is not implemented")
def sample(self, sample_shape=(), seed=None, name="sample"):
"""Generate samples of the specified shape.
Note that a call to `sample()` without arguments will generate a single
sample.
Args:
sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.
seed: Python integer seed for RNG
name: name to give to the op.
Returns:
samples: a `Tensor` with prepended dimensions `sample_shape`.
"""
with self._name_scope(name, values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
if sample_shape.get_shape().ndims == 0:
return self.sample_n(sample_shape, seed)
sample_shape, total = self._expand_sample_shape(sample_shape)
samples = self.sample_n(total, seed)
output_shape = array_ops.concat(0, [sample_shape, array_ops.slice(
array_ops.shape(samples), [1], [-1])])
output = array_ops.reshape(samples, output_shape)
output.set_shape(tensor_util.constant_value_as_shape(
sample_shape).concatenate(samples.get_shape()[1:]))
return output
def sample_n(self, n, seed=None, name="sample_n"):
"""Generate `n` samples.
Args:
n: `Scalar` `Tensor` of type `int32` or `int64`, the number of
observations to sample.
seed: Python integer seed for RNG
name: name to give to the op.
Returns:
samples: a `Tensor` with a prepended dimension (n,).
Raises:
TypeError: if `n` is not an integer type.
"""
with self._name_scope(name, values=[n]):
n = ops.convert_to_tensor(n, name="n")
if not n.dtype.is_integer:
raise TypeError("n.dtype=%s is not an integer type" % n.dtype)
x = self._sample_n(n, seed)
# Set shape hints.
sample_shape = tensor_shape.TensorShape(
tensor_util.constant_value(n))
batch_ndims = self.get_batch_shape().ndims
event_ndims = self.get_event_shape().ndims
if batch_ndims is not None and event_ndims is not None:
inferred_shape = sample_shape.concatenate(
self.get_batch_shape().concatenate(
self.get_event_shape()))
x.set_shape(inferred_shape)
elif x.get_shape().ndims is not None and x.get_shape().ndims > 0:
x.get_shape()[0].merge_with(sample_shape)
if batch_ndims is not None and batch_ndims > 0:
x.get_shape()[1:1+batch_ndims].merge_with(self.get_batch_shape())
if event_ndims is not None and event_ndims > 0:
x.get_shape()[-event_ndims:].merge_with(self.get_event_shape())
return x
def _log_prob(self, value):
raise NotImplementedError("log_prob is not implemented")
def log_prob(self, value, name="log_prob"):
"""Log probability density/mass function (depending on `is_continuous`).
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
return self._log_prob(value)
def _prob(self, value):
raise NotImplementedError("prob is not implemented")
def prob(self, value, name="prob"):
"""Probability density/mass function (depending on `is_continuous`).
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
return self._prob(value)
def _log_cdf(self, value):
raise NotImplementedError("log_cdf is not implemented")
def log_cdf(self, value, name="log_cdf"):
"""Log cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```
log_cdf(x) := Log[ P[X <= x] ]
```
Often, a numerical approximation can be used for `log_cdf(x)` that yields
a more accurate answer than simply taking the logarithm of the `cdf` when
`x << -1`.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
logcdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
return self._log_cdf(value)
def _cdf(self, value):
raise NotImplementedError("cdf is not implemented")
def cdf(self, value, name="cdf"):
"""Cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```
cdf(x) := P[X <= x]
```
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
cdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
return self._cdf(value)
def _log_survival_function(self, value):
raise NotImplementedError("log_survival_function is not implemented")
def log_survival_function(self, value, name="log_survival_function"):
"""Log survival function.
Given random variable `X`, the survival function is defined:
```
log_survival_function(x) = Log[ P[X > x] ]
= Log[ 1 - P[X <= x] ]
= Log[ 1 - cdf(x) ]
```
Typically, different numerical approximations can be used for the log
survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
return self._log_survival_function(value)
def _survival_function(self, value):
raise NotImplementedError("survival_function is not implemented")
def survival_function(self, value, name="survival_function"):
"""Survival function.
Given random variable `X`, the survival function is defined:
```
survival_function(x) = P[X > x]
= 1 - P[X <= x]
= 1 - cdf(x).
```
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
return self._survival_function(value)
def _entropy(self):
raise NotImplementedError("entropy is not implemented")
def entropy(self, name="entropy"):
"""Shanon entropy in nats."""
with self._name_scope(name):
return self._entropy()
def _mean(self):
raise NotImplementedError("mean is not implemented")
def mean(self, name="mean"):
"""Mean."""
with self._name_scope(name):
return self._mean()
def _variance(self):
raise NotImplementedError("variance is not implemented")
def variance(self, name="variance"):
"""Variance."""
with self._name_scope(name):
return self._variance()
def _std(self):
raise NotImplementedError("std is not implemented")
def std(self, name="std"):
"""Standard deviation."""
with self._name_scope(name):
return self._std()
def _mode(self):
raise NotImplementedError("mode is not implemented")
def mode(self, name="mode"):
"""Mode."""
with self._name_scope(name):
return self._mode()
def log_pdf(self, value, name="log_pdf"):
"""Log probability density function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
Raises:
TypeError: if not `is_continuous`.
"""
if not self.is_continuous:
raise TypeError("log_pdf is undefined for non-continuous distributions.")
return self.log_prob(value, name=name)
def pdf(self, value, name="pdf"):
"""Probability density function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
Raises:
TypeError: if not `is_continuous`.
"""
if not self.is_continuous:
raise TypeError("pdf is undefined for non-continuous distributions.")
return self.prob(value, name)
def log_pmf(self, value, name="log_pmf"):
"""Log probability mass function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
log_pmf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
Raises:
TypeError: if `is_continuous`.
"""
if self.is_continuous:
raise TypeError("log_pmf is undefined for continuous distributions.")
return self.log_prob(value, name=name)
def pmf(self, value, name="pmf"):
"""Probability mass function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
Returns:
pmf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
Raises:
TypeError: if `is_continuous`.
"""
if self.is_continuous:
raise TypeError("pmf is undefined for continuous distributions.")
return self.prob(value, name=name)
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=(
(values or []) + list(self.parameters.values()))) as scope:
yield scope
def _expand_sample_shape(self, sample_shape):
"""Helper to `sample` which ensures sample_shape is 1D."""
sample_shape_static_val = tensor_util.constant_value(sample_shape)
ndims = sample_shape.get_shape().ndims
if sample_shape_static_val is None:
if ndims is None or not sample_shape.get_shape().is_fully_defined():
ndims = array_ops.rank(sample_shape)
expanded_shape = distribution_util.pick_vector(
math_ops.equal(ndims, 0),
np.array((1,), dtype=dtypes.int32.as_numpy_dtype()),
array_ops.shape(sample_shape))
sample_shape = array_ops.reshape(sample_shape, expanded_shape)
total = math_ops.reduce_prod(sample_shape) # reduce_prod([]) == 1
else:
if ndims is None:
raise ValueError(
"Shouldn't be here; ndims cannot be none when we have a "
"tf.constant shape.")
if ndims == 0:
sample_shape_static_val = np.reshape(sample_shape_static_val, [1])
sample_shape = ops.convert_to_tensor(
sample_shape_static_val,
dtype=dtypes.int32,
name="sample_shape")
total = np.prod(sample_shape_static_val,
dtype=dtypes.int32.as_numpy_dtype())
return sample_shape, total
distribution_util.override_docstring_if_empty(
BaseDistribution.sample_n, doc_str=Distribution.sample_n.__doc__)
distribution_util.override_docstring_if_empty(
BaseDistribution.log_prob, doc_str=Distribution.log_prob.__doc__)
| apache-2.0 |
googleapis/googleapis-gen | google/ads/googleads/v8/googleads-py/tests/unit/gapic/googleads.v8/services/test_geographic_view_service.py | 1 | 30577 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from unittest import mock
import grpc
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.ads.googleads.v8.enums.types import geo_targeting_type
from google.ads.googleads.v8.resources.types import geographic_view
from google.ads.googleads.v8.services.services.geographic_view_service import GeographicViewServiceClient
from google.ads.googleads.v8.services.services.geographic_view_service import transports
from google.ads.googleads.v8.services.types import geographic_view_service
from google.api_core import client_options
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.oauth2 import service_account
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert GeographicViewServiceClient._get_default_mtls_endpoint(None) is None
assert GeographicViewServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert GeographicViewServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert GeographicViewServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert GeographicViewServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert GeographicViewServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
def test_geographic_view_service_client_from_service_account_info():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = GeographicViewServiceClient.from_service_account_info(info)
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_geographic_view_service_client_from_service_account_file():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = GeographicViewServiceClient.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
client = GeographicViewServiceClient.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_geographic_view_service_client_get_transport_class():
transport = GeographicViewServiceClient.get_transport_class()
assert transport == transports.GeographicViewServiceGrpcTransport
transport = GeographicViewServiceClient.get_transport_class("grpc")
assert transport == transports.GeographicViewServiceGrpcTransport
@mock.patch.object(GeographicViewServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(GeographicViewServiceClient))
def test_geographic_view_service_client_client_options():
# Check that if channel is provided we won't create a new one.
with mock.patch('google.ads.googleads.v8.services.services.geographic_view_service.GeographicViewServiceClient.get_transport_class') as gtc:
transport = transports.GeographicViewServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials()
)
client = GeographicViewServiceClient(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch('google.ads.googleads.v8.services.services.geographic_view_service.GeographicViewServiceClient.get_transport_class') as gtc:
client = GeographicViewServiceClient(transport="grpc")
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch('google.ads.googleads.v8.services.services.geographic_view_service.transports.GeographicViewServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = GeographicViewServiceClient(client_options=options)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT
# is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch('google.ads.googleads.v8.services.services.geographic_view_service.transports.GeographicViewServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = GeographicViewServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch('google.ads.googleads.v8.services.services.geographic_view_service.transports.GeographicViewServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = GeographicViewServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_MTLS_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = GeographicViewServiceClient()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = GeographicViewServiceClient()
@mock.patch.object(GeographicViewServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(GeographicViewServiceClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
@pytest.mark.parametrize("use_client_cert_env", ["true", "false"])
def test_geographic_view_service_client_mtls_env_auto(use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch('google.ads.googleads.v8.services.services.geographic_view_service.transports.GeographicViewServiceGrpcTransport.__init__') as grpc_transport:
ssl_channel_creds = mock.Mock()
with mock.patch('grpc.ssl_channel_credentials', return_value=ssl_channel_creds):
grpc_transport.return_value = None
client = GeographicViewServiceClient(client_options=options)
if use_client_cert_env == "false":
expected_ssl_channel_creds = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_ssl_channel_creds = ssl_channel_creds
expected_host = client.DEFAULT_MTLS_ENDPOINT
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v8.services.services.geographic_view_service.transports.GeographicViewServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
with mock.patch('google.auth.transport.grpc.SslCredentials.ssl_credentials', new_callable=mock.PropertyMock) as ssl_credentials_mock:
if use_client_cert_env == "false":
is_mtls_mock.return_value = False
ssl_credentials_mock.return_value = None
expected_host = client.DEFAULT_ENDPOINT
expected_ssl_channel_creds = None
else:
is_mtls_mock.return_value = True
ssl_credentials_mock.return_value = mock.Mock()
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_ssl_channel_creds = ssl_credentials_mock.return_value
grpc_transport.return_value = None
client = GeographicViewServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v8.services.services.geographic_view_service.transports.GeographicViewServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
is_mtls_mock.return_value = False
grpc_transport.return_value = None
client = GeographicViewServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_geographic_view_service_client_client_options_from_dict():
with mock.patch('google.ads.googleads.v8.services.services.geographic_view_service.transports.GeographicViewServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = GeographicViewServiceClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_get_geographic_view(transport: str = 'grpc', request_type=geographic_view_service.GetGeographicViewRequest):
client = GeographicViewServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_geographic_view),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = geographic_view.GeographicView(
resource_name='resource_name_value',
location_type=geo_targeting_type.GeoTargetingTypeEnum.GeoTargetingType.UNKNOWN,
country_criterion_id=2158,
)
response = client.get_geographic_view(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == geographic_view_service.GetGeographicViewRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, geographic_view.GeographicView)
assert response.resource_name == 'resource_name_value'
assert response.location_type == geo_targeting_type.GeoTargetingTypeEnum.GeoTargetingType.UNKNOWN
assert response.country_criterion_id == 2158
def test_get_geographic_view_from_dict():
test_get_geographic_view(request_type=dict)
def test_get_geographic_view_field_headers():
client = GeographicViewServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = geographic_view_service.GetGeographicViewRequest()
request.resource_name = 'resource_name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_geographic_view),
'__call__') as call:
call.return_value = geographic_view.GeographicView()
client.get_geographic_view(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'resource_name=resource_name/value',
) in kw['metadata']
def test_get_geographic_view_flattened():
client = GeographicViewServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_geographic_view),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = geographic_view.GeographicView()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_geographic_view(
resource_name='resource_name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].resource_name == 'resource_name_value'
def test_get_geographic_view_flattened_error():
client = GeographicViewServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_geographic_view(
geographic_view_service.GetGeographicViewRequest(),
resource_name='resource_name_value',
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.GeographicViewServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = GeographicViewServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.GeographicViewServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = GeographicViewServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.GeographicViewServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = GeographicViewServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.GeographicViewServiceGrpcTransport,
)
@pytest.mark.parametrize("transport_class", [
transports.GeographicViewServiceGrpcTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_geographic_view_service_base_transport():
# Instantiate the base transport.
with mock.patch('google.ads.googleads.v8.services.services.geographic_view_service.transports.GeographicViewServiceTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.GeographicViewServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'get_geographic_view',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
def test_geographic_view_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default') as adc, mock.patch('google.ads.googleads.v8.services.services.geographic_view_service.transports.GeographicViewServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.GeographicViewServiceTransport()
adc.assert_called_once()
def test_geographic_view_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
GeographicViewServiceClient()
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_geographic_view_service_transport_auth_adc():
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transports.GeographicViewServiceGrpcTransport(host="squid.clam.whelk")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_geographic_view_service_host_no_port():
client = GeographicViewServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com'),
)
assert client.transport._host == 'googleads.googleapis.com:443'
def test_geographic_view_service_host_with_port():
client = GeographicViewServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com:8000'),
)
assert client.transport._host == 'googleads.googleapis.com:8000'
def test_geographic_view_service_grpc_transport_channel():
channel = grpc.insecure_channel('http://localhost/')
# Check that channel is used if provided.
transport = transports.GeographicViewServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
@pytest.mark.parametrize("transport_class", [transports.GeographicViewServiceGrpcTransport])
def test_geographic_view_service_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize("transport_class", [transports.GeographicViewServiceGrpcTransport,])
def test_geographic_view_service_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_geographic_view_path():
customer_id = "squid"
country_criterion_id = "clam"
location_type = "whelk"
expected = "customers/{customer_id}/geographicViews/{country_criterion_id}~{location_type}".format(customer_id=customer_id, country_criterion_id=country_criterion_id, location_type=location_type, )
actual = GeographicViewServiceClient.geographic_view_path(customer_id, country_criterion_id, location_type)
assert expected == actual
def test_parse_geographic_view_path():
expected = {
"customer_id": "octopus",
"country_criterion_id": "oyster",
"location_type": "nudibranch",
}
path = GeographicViewServiceClient.geographic_view_path(**expected)
# Check that the path construction is reversible.
actual = GeographicViewServiceClient.parse_geographic_view_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "cuttlefish"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = GeographicViewServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "mussel",
}
path = GeographicViewServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = GeographicViewServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "winkle"
expected = "folders/{folder}".format(folder=folder, )
actual = GeographicViewServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "nautilus",
}
path = GeographicViewServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = GeographicViewServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "scallop"
expected = "organizations/{organization}".format(organization=organization, )
actual = GeographicViewServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "abalone",
}
path = GeographicViewServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = GeographicViewServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "squid"
expected = "projects/{project}".format(project=project, )
actual = GeographicViewServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "clam",
}
path = GeographicViewServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = GeographicViewServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "whelk"
location = "octopus"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = GeographicViewServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "oyster",
"location": "nudibranch",
}
path = GeographicViewServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = GeographicViewServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.GeographicViewServiceTransport, '_prep_wrapped_messages') as prep:
client = GeographicViewServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.GeographicViewServiceTransport, '_prep_wrapped_messages') as prep:
transport_class = GeographicViewServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
| apache-2.0 |
sposs/DIRAC | Core/scripts/dirac-admin-bdii-info.py | 13 | 4779 | #! /usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-admin-bdii-info
# Author : Aresh Vedaee
########################################################################
"""
Check info on BDII for a given CE or site
"""
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
def registerSwitches():
'''
Registers all switches that can be used while calling the script from the
command line interface.
'''
Script.registerSwitch( "H:", "host=", "BDII host" )
Script.registerSwitch( "V:", "vo=", "vo" )
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... <info> <Site|CE>' % Script.scriptName,
'Arguments:',
' Site: Name of the Site (i.e. CERN-PROD)',
' CE: Name of the CE (i.e. cccreamceli05.in2p3.fr)',
' info: Accepted values (ce|ce-state|ce-cluster|ce-vo|site|site-se)' ] ) )
def parseSwitches():
'''
Parses the arguments passed by the user
'''
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
if not len( args ) == 2:
Script.showHelp()
params = {}
params['ce'] = None
params['site'] = None
params['host'] = None
params['vo'] = None
params['info'] = args[0]
ret = getProxyInfo( disableVOMS = True )
if ret['OK'] and 'group' in ret['Value']:
params['vo'] = getVOForGroup( ret['Value']['group'] )
else:
Script.gLogger.error( 'Could not determine VO' )
Script.showHelp()
if params['info'] in ['ce', 'ce-state', 'ce-cluster', 'ce-vo']:
params['ce'] = args[1]
elif params['info']in ['site', 'site-se']:
params['site'] = args[1]
else:
Script.gLogger.error( 'Wrong argument value' )
Script.showHelp()
for unprocSw in Script.getUnprocessedSwitches():
if unprocSw[0] in ( "H", "host" ):
params['host'] = unprocSw[1]
if unprocSw[0] in ( "V", "vo" ):
params['vo'] = unprocSw[1]
return params
def getInfo( params ):
'''
Retrieve information from BDII
'''
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
diracAdmin = DiracAdmin()
if params['info'] == 'ce':
result = diracAdmin.getBDIICE( params['ce'], host = params['host'] )
if params['info'] == 'ce-state':
result = diracAdmin.getBDIICEState( params['ce'], useVO = params['vo'], host = params['host'] )
if params['info'] == 'ce-cluster':
result = diracAdmin.getBDIICluster( params['ce'], host = params['host'] )
if params['info'] == 'ce-vo':
result = diracAdmin.getBDIICEVOView( params['ce'], useVO = params['vo'], host = params['host'] )
if params['info'] == 'site':
result = diracAdmin.getBDIISite( params['site'], host = params['host'] )
if params['info'] == 'site-se':
result = diracAdmin.getBDIISE( params['site'], useVO = params['vo'], host = params['host'] )
if not result['OK']:
print result['Message']
DIRAC.exit( 2 )
return result
def showInfo( result, info ):
'''
Display information
'''
elements = result['Value']
for element in elements:
if info == 'ce' or info == 'all':
print "CE: %s \n{" % element.get( 'GlueSubClusterName', 'Unknown' )
if info == 'ce-state' or info == 'all':
print "CE: %s \n{" % element.get( 'GlueCEUniqueID', 'Unknown' )
if info == 'ce-cluster' or info == 'all':
print "Cluster: %s \n{" % element.get( 'GlueClusterName', 'Unknown' )
if info == 'ce-vo' or info == 'all':
print "CEVOView: %s \n{" % element.get( 'GlueChunkKey', 'Unknown' )
if info == 'site' or info == 'all':
print "Site: %s \n{" % element.get( 'GlueSiteName', 'Unknown' )
if info == 'site-se' or info == 'all':
print "SE: %s \n{" % element.get( 'GlueSEUniqueID', 'Unknown' )
for item in element.iteritems():
print " %s: %s" % item
print "}"
#...............................................................................
if __name__ == "__main__":
#Script initialization
registerSwitches()
#registerUsageMessage()
params = parseSwitches()
result = getInfo( params )
showInfo( result, params['info'] )
DIRAC.exit( 0 )
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| gpl-3.0 |
jendrikseipp/rednotebook | win/fetch-dictionary.py | 1 | 1177 | #! /usr/bin/env python3
import argparse
import os.path
import shutil
import utils
DIR = os.path.dirname(os.path.abspath(__file__))
DICT_DIR = os.path.join(DIR, "dicts")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"url", help="URL from ftp://ftp.gnu.org/gnu/aspell/dict/0index.html"
)
parser.add_argument("dest", help="Destination directory")
return parser.parse_args()
def fetch_dict(url, dest):
assert url.endswith(".tar.bz2"), url
filename = os.path.basename(url)
utils.fetch(url, os.path.join(DICT_DIR, filename))
utils.run(["tar", "xjvf", filename], cwd=DICT_DIR)
name = filename[: -len(".tar.bz2")]
path = os.path.join(DICT_DIR, name)
utils.run(["./configure", "--vars", "DESTDIR=tmp"], cwd=path)
utils.run(["make"], cwd=path)
utils.run(["make", "install"], cwd=path)
result_dir = os.path.join(path, "tmp/usr/lib/aspell")
utils.ensure_path(dest)
for dict_file in os.listdir(result_dir):
shutil.copy2(os.path.join(result_dir, dict_file), os.path.join(dest, dict_file))
def main():
args = parse_args()
fetch_dict(args.url, args.dest)
main()
| gpl-2.0 |
fernandog/Medusa | ext/html5lib/_tokenizer.py | 64 | 76568 | from __future__ import absolute_import, division, unicode_literals
from six import unichr as chr
from collections import deque
from .constants import spaceCharacters
from .constants import entities
from .constants import asciiLetters, asciiUpper2Lower
from .constants import digits, hexDigits, EOF
from .constants import tokenTypes, tagTokenTypes
from .constants import replacementCharacters
from ._inputstream import HTMLInputStream
from ._trie import Trie
entitiesTrie = Trie(entities)
class HTMLTokenizer(object):
""" This class takes care of tokenizing HTML.
* self.currentToken
Holds the token that is currently being processed.
* self.state
Holds a reference to the method to be invoked... XXX
* self.stream
Points to HTMLInputStream object.
"""
def __init__(self, stream, parser=None, **kwargs):
self.stream = HTMLInputStream(stream, **kwargs)
self.parser = parser
# Setup the initial tokenizer state
self.escapeFlag = False
self.lastFourChars = []
self.state = self.dataState
self.escape = False
# The current token being created
self.currentToken = None
super(HTMLTokenizer, self).__init__()
def __iter__(self):
""" This is where the magic happens.
We do our usually processing through the states and when we have a token
to return we yield the token which pauses processing until the next token
is requested.
"""
self.tokenQueue = deque([])
# Start processing. When EOF is reached self.state will return False
# instead of True and the loop will terminate.
while self.state():
while self.stream.errors:
yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)}
while self.tokenQueue:
yield self.tokenQueue.popleft()
def consumeNumberEntity(self, isHex):
"""This function returns either U+FFFD or the character based on the
decimal or hexadecimal representation. It also discards ";" if present.
If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked.
"""
allowed = digits
radix = 10
if isHex:
allowed = hexDigits
radix = 16
charStack = []
# Consume all the characters that are in range while making sure we
# don't hit an EOF.
c = self.stream.char()
while c in allowed and c is not EOF:
charStack.append(c)
c = self.stream.char()
# Convert the set of characters consumed to an int.
charAsInt = int("".join(charStack), radix)
# Certain characters get replaced with others
if charAsInt in replacementCharacters:
char = replacementCharacters[charAsInt]
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
elif ((0xD800 <= charAsInt <= 0xDFFF) or
(charAsInt > 0x10FFFF)):
char = "\uFFFD"
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
else:
# Should speed up this check somehow (e.g. move the set to a constant)
if ((0x0001 <= charAsInt <= 0x0008) or
(0x000E <= charAsInt <= 0x001F) or
(0x007F <= charAsInt <= 0x009F) or
(0xFDD0 <= charAsInt <= 0xFDEF) or
charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE,
0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE,
0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE,
0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE,
0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE,
0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE,
0xFFFFF, 0x10FFFE, 0x10FFFF])):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
try:
# Try/except needed as UCS-2 Python builds' unichar only works
# within the BMP.
char = chr(charAsInt)
except ValueError:
v = charAsInt - 0x10000
char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF))
# Discard the ; if present. Otherwise, put it back on the queue and
# invoke parseError on parser.
if c != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"numeric-entity-without-semicolon"})
self.stream.unget(c)
return char
def consumeEntity(self, allowedChar=None, fromAttribute=False):
# Initialise to the default output for when no entity is matched
output = "&"
charStack = [self.stream.char()]
if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&") or
(allowedChar is not None and allowedChar == charStack[0])):
self.stream.unget(charStack[0])
elif charStack[0] == "#":
# Read the next character to see if it's hex or decimal
hex = False
charStack.append(self.stream.char())
if charStack[-1] in ("x", "X"):
hex = True
charStack.append(self.stream.char())
# charStack[-1] should be the first digit
if (hex and charStack[-1] in hexDigits) \
or (not hex and charStack[-1] in digits):
# At least one digit found, so consume the whole number
self.stream.unget(charStack[-1])
output = self.consumeNumberEntity(hex)
else:
# No digits found
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "expected-numeric-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
# At this point in the process might have named entity. Entities
# are stored in the global variable "entities".
#
# Consume characters and compare to these to a substring of the
# entity names in the list until the substring no longer matches.
while (charStack[-1] is not EOF):
if not entitiesTrie.has_keys_with_prefix("".join(charStack)):
break
charStack.append(self.stream.char())
# At this point we have a string that starts with some characters
# that may match an entity
# Try to find the longest entity the string will match to take care
# of ¬i for instance.
try:
entityName = entitiesTrie.longest_prefix("".join(charStack[:-1]))
entityLength = len(entityName)
except KeyError:
entityName = None
if entityName is not None:
if entityName[-1] != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"named-entity-without-semicolon"})
if (entityName[-1] != ";" and fromAttribute and
(charStack[entityLength] in asciiLetters or
charStack[entityLength] in digits or
charStack[entityLength] == "=")):
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
output = entities[entityName]
self.stream.unget(charStack.pop())
output += "".join(charStack[entityLength:])
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-named-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
if fromAttribute:
self.currentToken["data"][-1][1] += output
else:
if output in spaceCharacters:
tokenType = "SpaceCharacters"
else:
tokenType = "Characters"
self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output})
def processEntityInAttribute(self, allowedChar):
"""This method replaces the need for "entityInAttributeValueState".
"""
self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
def emitCurrentToken(self):
"""This method is a generic handler for emitting the tags. It also sets
the state to "data" because that's what's needed after a token has been
emitted.
"""
token = self.currentToken
# Add token to the queue to be yielded
if (token["type"] in tagTokenTypes):
token["name"] = token["name"].translate(asciiUpper2Lower)
if token["type"] == tokenTypes["EndTag"]:
if token["data"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "attributes-in-end-tag"})
if token["selfClosing"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "self-closing-flag-on-end-tag"})
self.tokenQueue.append(token)
self.state = self.dataState
# Below are the various tokenizer states worked out.
def dataState(self):
data = self.stream.char()
if data == "&":
self.state = self.entityDataState
elif data == "<":
self.state = self.tagOpenState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\u0000"})
elif data is EOF:
# Tokenization ends.
return False
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def entityDataState(self):
self.consumeEntity()
self.state = self.dataState
return True
def rcdataState(self):
data = self.stream.char()
if data == "&":
self.state = self.characterReferenceInRcdata
elif data == "<":
self.state = self.rcdataLessThanSignState
elif data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def characterReferenceInRcdata(self):
self.consumeEntity()
self.state = self.rcdataState
return True
def rawtextState(self):
data = self.stream.char()
if data == "<":
self.state = self.rawtextLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataState(self):
data = self.stream.char()
if data == "<":
self.state = self.scriptDataLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def plaintextState(self):
data = self.stream.char()
if data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + self.stream.charsUntil("\u0000")})
return True
def tagOpenState(self):
data = self.stream.char()
if data == "!":
self.state = self.markupDeclarationOpenState
elif data == "/":
self.state = self.closeTagOpenState
elif data in asciiLetters:
self.currentToken = {"type": tokenTypes["StartTag"],
"name": data, "data": [],
"selfClosing": False,
"selfClosingAcknowledged": False}
self.state = self.tagNameState
elif data == ">":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-right-bracket"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"})
self.state = self.dataState
elif data == "?":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-question-mark"})
self.stream.unget(data)
self.state = self.bogusCommentState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.dataState
return True
def closeTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.currentToken = {"type": tokenTypes["EndTag"], "name": data,
"data": [], "selfClosing": False}
self.state = self.tagNameState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-right-bracket"})
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-eof"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.state = self.dataState
else:
# XXX data can be _'_...
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-char",
"datavars": {"data": data}})
self.stream.unget(data)
self.state = self.bogusCommentState
return True
def tagNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-tag-name"})
self.state = self.dataState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
else:
self.currentToken["name"] += data
# (Don't use charsUntil here, because tag names are
# very short and it's faster to not do anything fancy)
return True
def rcdataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rcdataEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rcdataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rawtextLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rawtextEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rawtextEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rawtextState
return True
def scriptDataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEndTagOpenState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"})
self.state = self.scriptDataEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.scriptDataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapeStartDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.state = self.dataState
else:
chars = self.stream.charsUntil(("<", "-", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEscapedEndTagOpenState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data})
self.temporaryBuffer = data
self.state = self.scriptDataDoubleEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer = data
self.state = self.scriptDataEscapedEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapeStartState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataDoubleEscapedState
else:
self.state = self.scriptDataEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
return True
def scriptDataDoubleEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"})
self.temporaryBuffer = ""
self.state = self.scriptDataDoubleEscapeEndState
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapeEndState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataEscapedState
else:
self.state = self.scriptDataDoubleEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def beforeAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data in ("'", '"', "=", "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-in-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-name-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def attributeNameState(self):
data = self.stream.char()
leavingThisState = True
emitToken = False
if data == "=":
self.state = self.beforeAttributeValueState
elif data in asciiLetters:
self.currentToken["data"][-1][0] += data +\
self.stream.charsUntil(asciiLetters, True)
leavingThisState = False
elif data == ">":
# XXX If we emit here the attributes are converted to a dict
# without being checked and when the code below runs we error
# because data is a dict not a list
emitToken = True
elif data in spaceCharacters:
self.state = self.afterAttributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][0] += "\uFFFD"
leavingThisState = False
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"invalid-character-in-attribute-name"})
self.currentToken["data"][-1][0] += data
leavingThisState = False
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-attribute-name"})
self.state = self.dataState
else:
self.currentToken["data"][-1][0] += data
leavingThisState = False
if leavingThisState:
# Attributes are not dropped at this stage. That happens when the
# start tag token is emitted so values can still be safely appended
# to attributes, but we do want to report the parse error in time.
self.currentToken["data"][-1][0] = (
self.currentToken["data"][-1][0].translate(asciiUpper2Lower))
for name, _ in self.currentToken["data"][:-1]:
if self.currentToken["data"][-1][0] == name:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"duplicate-attribute"})
break
# XXX Fix for above XXX
if emitToken:
self.emitCurrentToken()
return True
def afterAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "=":
self.state = self.beforeAttributeValueState
elif data == ">":
self.emitCurrentToken()
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-after-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-end-of-tag-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def beforeAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "\"":
self.state = self.attributeValueDoubleQuotedState
elif data == "&":
self.state = self.attributeValueUnQuotedState
self.stream.unget(data)
elif data == "'":
self.state = self.attributeValueSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-right-bracket"})
self.emitCurrentToken()
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
self.state = self.attributeValueUnQuotedState
elif data in ("=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"equals-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
return True
def attributeValueDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute('"')
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-double-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("\"", "&", "\u0000"))
return True
def attributeValueSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute("'")
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-single-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("'", "&", "\u0000"))
return True
def attributeValueUnQuotedState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == "&":
self.processEntityInAttribute(">")
elif data == ">":
self.emitCurrentToken()
elif data in ('"', "'", "=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-no-quotes"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data + self.stream.charsUntil(
frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters)
return True
def afterAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-EOF-after-attribute-value"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-attribute-value"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def selfClosingStartTagState(self):
data = self.stream.char()
if data == ">":
self.currentToken["selfClosing"] = True
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"unexpected-EOF-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def bogusCommentState(self):
# Make a new comment token and give it as value all the characters
# until the first > or EOF (charsUntil checks for EOF automatically)
# and emit it.
data = self.stream.charsUntil(">")
data = data.replace("\u0000", "\uFFFD")
self.tokenQueue.append(
{"type": tokenTypes["Comment"], "data": data})
# Eat the character directly after the bogus comment which is either a
# ">" or an EOF.
self.stream.char()
self.state = self.dataState
return True
def markupDeclarationOpenState(self):
charStack = [self.stream.char()]
if charStack[-1] == "-":
charStack.append(self.stream.char())
if charStack[-1] == "-":
self.currentToken = {"type": tokenTypes["Comment"], "data": ""}
self.state = self.commentStartState
return True
elif charStack[-1] in ('d', 'D'):
matched = True
for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'),
('y', 'Y'), ('p', 'P'), ('e', 'E')):
charStack.append(self.stream.char())
if charStack[-1] not in expected:
matched = False
break
if matched:
self.currentToken = {"type": tokenTypes["Doctype"],
"name": "",
"publicId": None, "systemId": None,
"correct": True}
self.state = self.doctypeState
return True
elif (charStack[-1] == "[" and
self.parser is not None and
self.parser.tree.openElements and
self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace):
matched = True
for expected in ["C", "D", "A", "T", "A", "["]:
charStack.append(self.stream.char())
if charStack[-1] != expected:
matched = False
break
if matched:
self.state = self.cdataSectionState
return True
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-dashes-or-doctype"})
while charStack:
self.stream.unget(charStack.pop())
self.state = self.bogusCommentState
return True
def commentStartState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentStartDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data
self.state = self.commentState
return True
def commentStartDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data + \
self.stream.charsUntil(("-", "\u0000"))
return True
def commentEndDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentEndState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--\uFFFD"
self.state = self.commentState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-bang-after-double-dash-in-comment"})
self.state = self.commentEndBangState
elif data == "-":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-dash-after-double-dash-in-comment"})
self.currentToken["data"] += data
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-double-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-comment"})
self.currentToken["data"] += "--" + data
self.state = self.commentState
return True
def commentEndBangState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "-":
self.currentToken["data"] += "--!"
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--!\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-bang-state"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "--!" + data
self.state = self.commentState
return True
def doctypeState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"need-space-after-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeNameState
return True
def beforeDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-right-bracket"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] = "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] = data
self.state = self.doctypeNameState
return True
def doctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.state = self.afterDoctypeNameState
elif data == ">":
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype-name"})
self.currentToken["correct"] = False
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] += data
return True
def afterDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.currentToken["correct"] = False
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
if data in ("p", "P"):
matched = True
for expected in (("u", "U"), ("b", "B"), ("l", "L"),
("i", "I"), ("c", "C")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypePublicKeywordState
return True
elif data in ("s", "S"):
matched = True
for expected in (("y", "Y"), ("s", "S"), ("t", "T"),
("e", "E"), ("m", "M")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypeSystemKeywordState
return True
# All the characters read before the current 'data' will be
# [a-zA-Z], so they're garbage in the bogus doctype and can be
# discarded; only the latest character might be '>' or EOF
# and needs to be ungetted
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-space-or-right-bracket-in-doctype", "datavars":
{"data": data}})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypePublicKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypePublicIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
return True
def beforeDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypePublicIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def doctypePublicIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def afterDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.betweenDoctypePublicAndSystemIdentifiersState
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def betweenDoctypePublicAndSystemIdentifiersState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypeSystemKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeSystemIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
return True
def beforeDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypeSystemIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def doctypeSystemIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def afterDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.state = self.bogusDoctypeState
return True
def bogusDoctypeState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
# XXX EMIT
self.stream.unget(data)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
pass
return True
def cdataSectionState(self):
data = []
while True:
data.append(self.stream.charsUntil("]"))
data.append(self.stream.charsUntil(">"))
char = self.stream.char()
if char == EOF:
break
else:
assert char == ">"
if data[-1][-2:] == "]]":
data[-1] = data[-1][:-2]
break
else:
data.append(char)
data = "".join(data) # pylint:disable=redefined-variable-type
# Deal with null here rather than in the parser
nullCount = data.count("\u0000")
if nullCount > 0:
for _ in range(nullCount):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
data = data.replace("\u0000", "\uFFFD")
if data:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": data})
self.state = self.dataState
return True
| gpl-3.0 |
nanditav/15712-TensorFlow | tensorflow/contrib/learn/__init__.py | 1 | 1719 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# TODO(ptucker,ipolosukhin): Improve descriptions.
"""High level API for learning with TensorFlow.
## Estimators
Train and evaluate TensorFlow models.
@@BaseEstimator
@@Estimator
@@ModeKeys
@@DNNClassifier
@@DNNRegressor
@@LinearClassifier
@@LinearRegressor
## Graph actions
Perform various training, evaluation, and inference actions on a graph.
@@NanLossDuringTrainingError
@@RunConfig
@@evaluate
@@infer
@@run_feeds
@@run_n
@@train
## Input processing
Queue and read batched input data.
@@extract_dask_data
@@extract_dask_labels
@@extract_pandas_data
@@extract_pandas_labels
@@extract_pandas_matrix
@@read_batch_examples
@@read_batch_features
@@read_batch_record_features
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import make_all
__all__ = make_all(__name__)
__all__.append('datasets')
| apache-2.0 |
pgrazaitis/BCDS | Model/scripts/ear/python/aggregateContention.py | 1 | 9445 | import os
import re
import cx_Oracle
import collections
import datetime
earContentionCode = [2200,2210,2220,3140,3150,4130,4210,4700,4920,5000,5010,5710,6850]
#Primary query, Look for all claims/contentions where the participant has at least one contention with an ear-related contention code.
#Organize them based first by participant id, then claim id and finally by profile date descending.
SQL="select rcc.ptcpnt_vet_id, \
bnft_claim_id, \
date_of_claim, \
prfil_dt, \
claim_ro_number, \
cntntn_id, \
cntntn_clsfcn_id, \
cntntn_clmant_txt, \
p.dob, \
end_prdct_type_cd \
from ah4929_rating_corp_claim rcc \
left join ah4929_person p on p.ptcpnt_vet_id = rcc.ptcpnt_vet_id \
inner join ear_claim_source cs on cs.vet_id = rcc.ptcpnt_vet_id and cs.claim_id = rcc.bnft_claim_id \
where prfil_dt >= date_of_claim \
order by rcc.ptcpnt_vet_id desc,bnft_claim_id,prfil_dt"
class AggregateContention:
def __init__(self):
self.VET_ID = None
self.CLAIM_ID = None
self.DOB = 0
self.END_PRODUCT_CODE = None
self.RO_NUMBER = 0
self.CLAIM_DATE = None
self.MAX_PROFILE_DATE = None
self.CONTENTION_COUNT = 0
self.EAR_CONTENTION_COUNT = 0
self.C2200 = 0
self.C2210 = 0
self.C2220 = 0
self.C3140 = 0
self.C3150 = 0
self.C4130 = 0
self.C4210 = 0
self.C4700 = 0
self.C4920 = 0
self.C5000 = 0
self.C5010 = 0
self.C5710 = 0
self.C6850 = 0
self.TXT_LOSS = 0
self.TXT_TINITU = 0
def __str__(self):
from pprint import pprint
return str(vars(self))
class Contention:
def __init__(self, ptcpnt_vet_id, bnft_claim_id, claim_date, prfil_dt, claim_ro_number, cntntn_id, cntntn_clsfcn_id, cntntn_clmant_txt, dob, end_prdct_type_cd):
self.ptcpnt_vet_id = ptcpnt_vet_id
self.bnft_claim_id = bnft_claim_id
self.claim_date = claim_date
self.prfil_dt = prfil_dt
self.claim_ro_number = claim_ro_number
self.cntntn_id = cntntn_id
self.cntntn_clsfcn_id = cntntn_clsfcn_id
self.cntntn_clmant_txt = cntntn_clmant_txt
if not dob is None:
self.dob = int(dob)
else:
self.dob = None
self.end_prdct_type_cd = end_prdct_type_cd
def __str__(self):
from pprint import pprint
return str(vars(self))
connection = cx_Oracle.connect('developer/D3vVV0Rd@127.0.0.1:1521/DEV.BCDSS')
writeCursor = connection.cursor()
writeCursor.prepare('INSERT INTO DEVELOPER.EAR_AGGREGATE_CONTENTION (VET_ID, CLAIM_ID, END_PRODUCT_CODE, CLAIM_DATE, CONTENTION_COUNT, EAR_CONTENTION_COUNT, C2200,C2210, C2220,C3140,C3150,C4130,C4210,C4700,C4920,C5000,C5010,C5710, C6850, TXT_LOSS, TXT_TINITU, DOB, RO_NUMBER, MAX_PROFILE_DATE) \
VALUES (:VET_ID, :CLAIM_ID, :END_PRODUCT_CODE, :CLAIM_DATE, :CONTENTION_COUNT, :EAR_CONTENTION_COUNT, \
:C2200, :C2210, :C2220, :C3140, :C3150, :C4130 , :C4210, :C4700, :C4920, :C5000, :C5010, :C5710, :C6850, \
:TXT_LOSS, :TXT_TINITU, \
:DOB, :RO_NUMBER, :MAX_PROFILE_DATE)')
print(str(datetime.datetime.now()))
cursor = connection.cursor()
cursor.execute(SQL)
aggregateContention = None
counterAggregateContention = None
totalContentions = None
totalEarContentions = None
maxProfileDate = None
currBenefitClaim = -1
currParticipant = -1
counter = 0;
for row in cursor:
if counter == 1000: #Commit every 1000 records. Improvement would be to look into aggregate inserts
connection.commit()
counter=0
contention = Contention(row[0],row[1],row[2],row[3],row[4],row[5],row[6], row[7], row[8], row[9]) #Map loose fields into a Contention object. (Contention is a convenience object)
if currBenefitClaim != contention.bnft_claim_id: #Process insert statement and reset aggregation variables when claim id changes
if currBenefitClaim != -1: #Skip if first time through
#Perform all aggregation calculations before inserting and resetting
aggregateContention.CONTENTION_COUNT = sum(totalContentions.values())
aggregateContention.EAR_CONTENTION_COUNT = sum(totalEarContentions.values())
aggregateContention.MAX_PROFILE_DATE = maxProfileDate[currBenefitClaim]
writeCursor.execute(None, {'VET_ID' :aggregateContention.VET_ID, 'CLAIM_ID' :aggregateContention.CLAIM_ID, 'END_PRODUCT_CODE' :aggregateContention.END_PRODUCT_CODE, 'CLAIM_DATE' :aggregateContention.CLAIM_DATE, 'CONTENTION_COUNT' :aggregateContention.CONTENTION_COUNT, 'EAR_CONTENTION_COUNT' :aggregateContention.EAR_CONTENTION_COUNT,
'C2200' :counterAggregateContention.C2200, 'C2210' :counterAggregateContention.C2210, 'C2220' :counterAggregateContention.C2220, 'C3140' :counterAggregateContention.C3140, 'C3150' :counterAggregateContention.C3150, 'C4130' :counterAggregateContention.C4130, 'C4210' :counterAggregateContention.C4210, 'C4700' :counterAggregateContention.C4700, 'C4920' :counterAggregateContention.C4920, 'C5000' :counterAggregateContention.C5000, 'C5010' :counterAggregateContention.C5010, 'C5710' :counterAggregateContention.C5710, 'C6850' :counterAggregateContention.C6850,
'TXT_LOSS' :counterAggregateContention.TXT_LOSS, 'TXT_TINITU' :counterAggregateContention.TXT_TINITU,
'DOB' :aggregateContention.DOB, 'RO_NUMBER' :aggregateContention.RO_NUMBER, 'MAX_PROFILE_DATE' :aggregateContention.MAX_PROFILE_DATE})
counter += 1
currBenefitClaim = contention.bnft_claim_id #Reset claim id
if currParticipant != contention.ptcpnt_vet_id :
currParticipant = contention.ptcpnt_vet_id #Reset participant id
counterAggregateContention = AggregateContention()
#Capture all claim/person level items that do not change per contention
aggregateContention = AggregateContention()
aggregateContention.VET_ID = contention.ptcpnt_vet_id
aggregateContention.CLAIM_ID = currBenefitClaim
aggregateContention.RO_NUMBER = contention.claim_ro_number
aggregateContention.DOB = contention.dob
aggregateContention.CLAIM_DATE = contention.claim_date
aggregateContention.END_PRODUCT_CODE = contention.end_prdct_type_cd
#Reset the counters
totalContentions = collections.Counter();
totalEarContentions = collections.Counter();
maxProfileDate = collections.Counter();
maxProfileDate[currBenefitClaim] = contention.prfil_dt #If a claim has multiple profile dates, because of the sorting, we always end up with the most recent profile date
totalContentions[currBenefitClaim] += 1 #For every contention add one
if contention.cntntn_clsfcn_id in earContentionCode:
totalEarContentions[currBenefitClaim] +=1 #For any contention that is ear-related, add one
#Use regex to look for a hit and then if it hits make it true. No need to track how many times, just true or false
if re.search("Loss",contention.cntntn_clmant_txt,re.IGNORECASE):
counterAggregateContention.TXT_LOSS += 1
if re.search("Tinnitus",contention.cntntn_clmant_txt,re.IGNORECASE):
counterAggregateContention.TXT_TINITU += 1
#Simply test the codes and again true or false
if contention.cntntn_clsfcn_id == 2200:
counterAggregateContention.C2200 += 1
if contention.cntntn_clsfcn_id == 2210:
counterAggregateContention.C2210 += 1
if contention.cntntn_clsfcn_id == 2220:
counterAggregateContention.C2220 += 1
if contention.cntntn_clsfcn_id == 3140:
counterAggregateContention.C3140 += 1
if contention.cntntn_clsfcn_id == 3150:
counterAggregateContention.C3150 += 1
if contention.cntntn_clsfcn_id == 4130:
counterAggregateContention.C4130 += 1
if contention.cntntn_clsfcn_id == 4210:
counterAggregateContention.C4210 += 1
if contention.cntntn_clsfcn_id == 4700:
counterAggregateContention.C4700 += 1
if contention.cntntn_clsfcn_id == 4920:
counterAggregateContention.C4920 += 1
if contention.cntntn_clsfcn_id == 5000:
counterAggregateContention.C5000 += 1
if contention.cntntn_clsfcn_id == 5010:
counterAggregateContention.C5010 += 1
if contention.cntntn_clsfcn_id == 5710:
counterAggregateContention.C5710 += 1
if contention.cntntn_clsfcn_id == 6850:
counterAggregateContention.C6850 += 1
#A bit strange looking but due to Python's identation approach this occurs after the for loop in order to capture the last claim.
aggregateContention.CONTENTION_COUNT = sum(totalContentions.values())
aggregateContention.EAR_CONTENTION_COUNT = sum(totalEarContentions.values())
aggregateContention.MAX_PROFILE_DATE = maxProfileDate[currBenefitClaim]
writeCursor.execute(None, {'VET_ID' :aggregateContention.VET_ID, 'CLAIM_ID' :aggregateContention.CLAIM_ID, 'END_PRODUCT_CODE' :aggregateContention.END_PRODUCT_CODE, 'CLAIM_DATE' :aggregateContention.CLAIM_DATE, 'CONTENTION_COUNT' :aggregateContention.CONTENTION_COUNT, 'EAR_CONTENTION_COUNT' :aggregateContention.EAR_CONTENTION_COUNT,
'C2200' :counterAggregateContention.C2200, 'C2210' :counterAggregateContention.C2210, 'C2220' :counterAggregateContention.C2220, 'C3140' :counterAggregateContention.C3140, 'C3150' :counterAggregateContention.C3150, 'C4130' :counterAggregateContention.C4130, 'C4210' :counterAggregateContention.C4210, 'C4700' :counterAggregateContention.C4700, 'C4920' :counterAggregateContention.C4920, 'C5000' :counterAggregateContention.C5000, 'C5010' :counterAggregateContention.C5010, 'C5710' :counterAggregateContention.C5710, 'C6850' :counterAggregateContention.C6850,
'TXT_LOSS' :counterAggregateContention.TXT_LOSS, 'TXT_TINITU' :counterAggregateContention.TXT_TINITU,
'DOB' :aggregateContention.DOB, 'RO_NUMBER' :aggregateContention.RO_NUMBER, 'MAX_PROFILE_DATE' :aggregateContention.MAX_PROFILE_DATE})
connection.commit()
print(str(datetime.datetime.now()))
writeCursor.close()
cursor.close()
connection.close()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.