after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def get_resource_type(resource_id):
parsed = parse_resource_id(resource_id)
# parse_resource_id returns dictionary with "child_type_#" to represent
# types sequence. "type" stores root type.
child_type_keys = [k for k in parsed.keys() if k.find("child_type_") != -1]
types = [parsed.get(k) for k in sorted(child_type_keys)]
types.insert(0, parsed.get("type"))
return "/".join(types)
|
def get_resource_type(resource_id):
parsed = parse_resource_id(resource_id)
# parse_resource_id returns dictionary with "child_type_#" to represent
# types sequence. "type" stores root type.
child_type_keys = [k for k in parsed.keys() if k.find("child_type_") != -1]
types = [parsed.get(k) for k in sorted(child_type_keys)]
if not types:
types.insert(0, parsed.get("type"))
return "/".join(types)
|
https://github.com/cloud-custodian/cloud-custodian/issues/4937
|
====================================================================== FAILURES =======================================================================
________________________________________________________ ActionsMarkForOpTest.test_mark_for_op ________________________________________________________
[gw4] darwin -- Python 3.7.3 /Users/kapilt/projects/release-custodian/.tox/py37/bin/python
Traceback (most recent call last):
File "/Users/kapilt/projects/release-custodian/tools/c7n_azure/tests_azure/test_actions_mark-for-op.py", line 58, in test_mark_for_op
self.assertEqual(tags, expected_tags)
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/unittest/case.py", line 839, in assertEqual
assertion_func(first, second, msg=msg)
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/unittest/case.py", line 1138, in assertDictEqual
self.fail(self._formatMessage(msg, standardMsg))
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/unittest/case.py", line 680, in fail
raise self.failureException(msg)
AssertionError: {'pre[66 chars]dian_status': 'Resource does not meet policy: stop@2019/10/21'} != {'pre[66 chars]dian_status': 'Resource does not meet policy: stop@2019/10/20'}
- {'custodian_status': 'Resource does not meet policy: stop@2019/10/21',
? ^
+ {'custodian_status': 'Resource does not meet policy: stop@2019/10/20',
? ^
|
AssertionError
|
def get_sts_client(session, region):
"""Get the AWS STS endpoint specific for the given region.
Returns the global endpoint if region is not specified.
For the list of regional endpoints, see https://amzn.to/2ohJgtR
"""
if region and USE_STS_REGIONAL:
endpoint_url = "https://sts.{}.amazonaws.com".format(region)
region_name = region
else:
endpoint_url = "https://sts.amazonaws.com"
region_name = None
return session.client("sts", endpoint_url=endpoint_url, region_name=region_name)
|
def get_sts_client(session, region):
"""Get the AWS STS endpoint specific for the given region.
Returns the global endpoint if region is not specified.
For the list of regional endpoints, see https://amzn.to/2ohJgtR
"""
if region and not USE_STS_GLOBAL:
endpoint_url = "https://sts.{}.amazonaws.com".format(region)
region_name = region
else:
endpoint_url = "https://sts.amazonaws.com"
region_name = None
return session.client("sts", endpoint_url=endpoint_url, region_name=region_name)
|
https://github.com/cloud-custodian/cloud-custodian/issues/5023
|
2019-10-31 22:12:24,498: custodian.commands:DEBUG Loaded file policy.yml
Traceback (most recent call last):
File "/usr/local/bin/custodian", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.7/site-packages/c7n/cli.py", line 374, in main
command(config)
File "/usr/local/lib/python3.7/site-packages/c7n/commands.py", line 141, in _load_policies
return f(options, list(policies))
File "/usr/local/lib/python3.7/site-packages/c7n/commands.py", line 275, in run
local_session(clouds['aws']().get_session_factory(options))
File "/usr/local/lib/python3.7/site-packages/c7n/utils.py", line 274, in local_session
s = factory()
File "/usr/local/lib/python3.7/site-packages/c7n/credentials.py", line 57, in __call__
region or self.region, self.external_id)
File "/usr/local/lib/python3.7/site-packages/c7n/credentials.py", line 115, in assumed_session
metadata=refresh(),
File "/usr/local/lib/python3.7/site-packages/c7n/credentials.py", line 106, in refresh
session, region).assume_role, **parameters)['Credentials']
File "/usr/local/lib/python3.7/site-packages/c7n/utils.py", line 390, in _retry
return func(*args, **kw)
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 648, in _make_api_call
operation_model, request_dict, request_context)
File "/usr/local/lib/python3.7/site-packages/botocore/client.py", line 667, in _make_request
return self._endpoint.make_request(operation_model, request_dict)
File "/usr/local/lib/python3.7/site-packages/botocore/endpoint.py", line 102, in make_request
return self._send_request(request_dict, operation_model)
File "/usr/local/lib/python3.7/site-packages/botocore/endpoint.py", line 132, in _send_request
request = self.create_request(request_dict, operation_model)
File "/usr/local/lib/python3.7/site-packages/botocore/endpoint.py", line 116, in create_request
operation_name=operation_model.name)
File "/usr/local/lib/python3.7/site-packages/botocore/hooks.py", line 356, in emit
return self._emitter.emit(aliased_event_name, **kwargs)
File "/usr/local/lib/python3.7/site-packages/botocore/hooks.py", line 228, in emit
return self._emit(event_name, kwargs)
File "/usr/local/lib/python3.7/site-packages/botocore/hooks.py", line 211, in _emit
response = handler(**kwargs)
File "/usr/local/lib/python3.7/site-packages/botocore/signers.py", line 90, in handler
return self.sign(operation_name, request)
File "/usr/local/lib/python3.7/site-packages/botocore/signers.py", line 149, in sign
auth = self.get_auth_instance(**kwargs)
File "/usr/local/lib/python3.7/site-packages/botocore/signers.py", line 233, in get_auth_instance
raise botocore.exceptions.NoRegionError()
botocore.exceptions.NoRegionError: You must specify a region.
end of run
|
botocore.exceptions.NoRegionError
|
def load_resource(self, item):
resource = super(ConfigS3, self).load_resource(item)
cfg = item["supplementaryConfiguration"]
# aka standard
if "awsRegion" in item and item["awsRegion"] != "us-east-1":
resource["Location"] = {"LocationConstraint": item["awsRegion"]}
else:
resource["Location"] = {}
# owner is under acl per describe
resource.pop("Owner", None)
resource["CreationDate"] = parse_date(resource["CreationDate"])
for k, null_value in S3_CONFIG_SUPPLEMENT_NULL_MAP.items():
if cfg.get(k) == null_value:
continue
method = getattr(self, "handle_%s" % k, None)
if method is None:
raise ValueError("unhandled supplementary config %s", k)
continue
v = cfg[k]
if isinstance(cfg[k], six.string_types):
v = json.loads(cfg[k])
method(resource, v)
for el in S3_AUGMENT_TABLE:
if el[1] not in resource:
resource[el[1]] = el[2]
return resource
|
def load_resource(self, item):
resource = super(ConfigS3, self).load_resource(item)
cfg = item["supplementaryConfiguration"]
# aka standard
if "awsRegion" in item and item["awsRegion"] != "us-east-1":
resource["Location"] = {"LocationConstraint": item["awsRegion"]}
# owner is under acl per describe
resource.pop("Owner", None)
resource["CreationDate"] = parse_date(resource["CreationDate"])
for k, null_value in S3_CONFIG_SUPPLEMENT_NULL_MAP.items():
if cfg.get(k) == null_value:
continue
method = getattr(self, "handle_%s" % k, None)
if method is None:
raise ValueError("unhandled supplementary config %s", k)
continue
v = cfg[k]
if isinstance(cfg[k], six.string_types):
v = json.loads(cfg[k])
method(resource, v)
for el in S3_AUGMENT_TABLE:
if el[1] not in resource:
resource[el[1]] = el[2]
return resource
|
https://github.com/cloud-custodian/cloud-custodian/issues/4885
|
[ERROR] AttributeError: 'NoneType' object has no attribute 'get'
Traceback (most recent call last):
File "/var/task/custodian_policy.py", line 4, in run
return handler.dispatch_event(event, context)
File "/var/task/c7n/handler.py", line 168, in dispatch_event
p.push(event, context)
File "/var/task/c7n/policy.py", line 932, in push
return mode.run(event, lambda_ctx)
File "/var/task/c7n/policy.py", line 729, in run
resources = super(ConfigRuleMode, self).run(event, lambda_context)
File "/var/task/c7n/policy.py", line 484, in run
results = action.process(resources)
File "/var/task/c7n/tags.py", line 474, in process
self.process_resource_set, self.id_key, resources, tags, self.log)
File "/var/task/c7n/tags.py", line 136, in _common_tag_processer
raise error
File "/var/lang/lib/python3.7/concurrent/futures/thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "/var/task/c7n/resources/s3.py", line 2274, in process_resource_set
self.manager.session_factory, resource_set, remove_tags=tags)
File "/var/task/c7n/resources/s3.py", line 505, in modify_bucket_tags
client = bucket_client(local_session(session_factory), bucket)
File "/var/task/c7n/resources/s3.py", line 490, in bucket_client
region = get_region(b)
File "/var/task/c7n/resources/s3.py", line 558, in get_region
region = b.get('Location',
{}
).get('LocationConstraint')
|
AttributeError
|
def initialize_policies(self, policy_collection, options):
"""Return a set of policies targetted to the given regions.
Supports symbolic regions like 'all'. This will automatically
filter out policies if their being targetted to a region that
does not support the service. Global services will target a
single region (us-east-1 if only all specified, else first
region in the list).
Note for region partitions (govcloud and china) an explicit
region from the partition must be passed in.
"""
from c7n.policy import Policy, PolicyCollection
policies = []
service_region_map, resource_service_map = get_service_region_map(
options.regions, policy_collection.resource_types
)
if "all" in options.regions:
enabled_regions = set(
[
r["RegionName"]
for r in get_profile_session(options)
.client("ec2")
.describe_regions(
Filters=[
{
"Name": "opt-in-status",
"Values": ["opt-in-not-required", "opted-in"],
}
]
)
.get("Regions")
]
)
for p in policy_collection:
if "aws." in p.resource_type:
_, resource_type = p.resource_type.split(".", 1)
else:
resource_type = p.resource_type
available_regions = service_region_map.get(
resource_service_map.get(resource_type), ()
)
# its a global service/endpoint, use user provided region
# or us-east-1.
if not available_regions and options.regions:
candidates = [r for r in options.regions if r != "all"]
candidate = candidates and candidates[0] or "us-east-1"
svc_regions = [candidate]
elif "all" in options.regions:
svc_regions = list(set(available_regions).intersection(enabled_regions))
else:
svc_regions = options.regions
for region in svc_regions:
if available_regions and region not in available_regions:
level = "all" in options.regions and logging.DEBUG or logging.WARNING
# TODO: fixme
policy_collection.log.log(
level,
"policy:%s resources:%s not available in region:%s",
p.name,
p.resource_type,
region,
)
continue
options_copy = copy.copy(options)
options_copy.region = str(region)
if (
len(options.regions) > 1
or "all" in options.regions
and getattr(options, "output_dir", None)
):
options_copy.output_dir = (
options.output_dir.rstrip("/") + "/%s" % region
)
policies.append(
Policy(
p.data,
options_copy,
session_factory=policy_collection.session_factory(),
)
)
return PolicyCollection(
# order policies by region to minimize local session invalidation.
# note relative ordering of policies must be preserved, python sort
# is stable.
sorted(policies, key=operator.attrgetter("options.region")),
options,
)
|
def initialize_policies(self, policy_collection, options):
"""Return a set of policies targetted to the given regions.
Supports symbolic regions like 'all'. This will automatically
filter out policies if their being targetted to a region that
does not support the service. Global services will target a
single region (us-east-1 if only all specified, else first
region in the list).
Note for region partitions (govcloud and china) an explicit
region from the partition must be passed in.
"""
from c7n.policy import Policy, PolicyCollection
policies = []
service_region_map, resource_service_map = get_service_region_map(
options.regions, policy_collection.resource_types
)
for p in policy_collection:
if "aws." in p.resource_type:
_, resource_type = p.resource_type.split(".", 1)
else:
resource_type = p.resource_type
available_regions = service_region_map.get(
resource_service_map.get(resource_type), ()
)
# its a global service/endpoint, use user provided region
# or us-east-1.
if not available_regions and options.regions:
candidates = [r for r in options.regions if r != "all"]
candidate = candidates and candidates[0] or "us-east-1"
svc_regions = [candidate]
elif "all" in options.regions:
svc_regions = available_regions
else:
svc_regions = options.regions
for region in svc_regions:
if available_regions and region not in available_regions:
level = "all" in options.regions and logging.DEBUG or logging.WARNING
# TODO: fixme
policy_collection.log.log(
level,
"policy:%s resources:%s not available in region:%s",
p.name,
p.resource_type,
region,
)
continue
options_copy = copy.copy(options)
options_copy.region = str(region)
if (
len(options.regions) > 1
or "all" in options.regions
and getattr(options, "output_dir", None)
):
options_copy.output_dir = (
options.output_dir.rstrip("/") + "/%s" % region
)
policies.append(
Policy(
p.data,
options_copy,
session_factory=policy_collection.session_factory(),
)
)
return PolicyCollection(
# order policies by region to minimize local session invalidation.
# note relative ordering of policies must be preserved, python sort
# is stable.
sorted(policies, key=operator.attrgetter("options.region")),
options,
)
|
https://github.com/cloud-custodian/cloud-custodian/issues/4290
|
$ python --version
Python 3.7.3
$ pip --version
pip 19.1.1 from /home/ec2-user/custodian/lib/python3.7/site-packages/pip (python 3.7)
$ pip list|grep c7n
c7n 0.8.44.2
c7n-org 0.5.3
$ custodian run -s ~/output -r all list-ec2-instances.yaml --cache-period 0
2019-06-29 02:53:26,186: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/home/ec2-user/custodian/lib/python3.7/site-packages/c7n/policy.py", line 229, in run
resources = self.policy.resource_manager.resources()
File "/home/ec2-user/custodian/lib/python3.7/site-packages/c7n/resources/ec2.py", line 92, in resources
return super(EC2, self).resources(query=query)
File "/home/ec2-user/custodian/lib/python3.7/site-packages/c7n/query.py", line 453, in resources
resources = self.source.resources(query)
File "/home/ec2-user/custodian/lib/python3.7/site-packages/c7n/query.py", line 230, in resources
return self.query.filter(self.manager, **query)
File "/home/ec2-user/custodian/lib/python3.7/site-packages/c7n/query.py", line 90, in filter
getattr(resource_manager, 'retry', None)) or []
File "/home/ec2-user/custodian/lib/python3.7/site-packages/c7n/query.py", line 69, in _invoke_client_enum
data = results.build_full_result()
File "/home/ec2-user/custodian/lib/python3.7/site-packages/botocore/paginate.py", line 449, in build_full_result
for response in self:
File "/home/ec2-user/custodian/lib/python3.7/site-packages/botocore/paginate.py", line 255, in __iter__
response = self._make_request(current_kwargs)
File "/home/ec2-user/custodian/lib/python3.7/site-packages/c7n/query.py", line 679, in _make_request
return self.retry(self._method, **current_kwargs)
File "/home/ec2-user/custodian/lib/python3.7/site-packages/c7n/utils.py", line 383, in _retry
return func(*args, **kw)
File "/home/ec2-user/custodian/lib/python3.7/site-packages/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/home/ec2-user/custodian/lib/python3.7/site-packages/botocore/client.py", line 661, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (AuthFailure) when calling the DescribeInstances operation: AWS was not able to validate the provided access credentials
2019-06-29 02:53:26,187: custodian.commands:ERROR Error while executing policy list-ec2-instances, continuing
Traceback (most recent call last):
File "/home/ec2-user/custodian/lib/python3.7/site-packages/c7n/commands.py", line 269, in run
policy()
File "/home/ec2-user/custodian/lib/python3.7/site-packages/c7n/policy.py", line 945, in __call__
resources = mode.run()
File "/home/ec2-user/custodian/lib/python3.7/site-packages/c7n/policy.py", line 229, in run
resources = self.policy.resource_manager.resources()
File "/home/ec2-user/custodian/lib/python3.7/site-packages/c7n/resources/ec2.py", line 92, in resources
return super(EC2, self).resources(query=query)
File "/home/ec2-user/custodian/lib/python3.7/site-packages/c7n/query.py", line 453, in resources
resources = self.source.resources(query)
File "/home/ec2-user/custodian/lib/python3.7/site-packages/c7n/query.py", line 230, in resources
return self.query.filter(self.manager, **query)
File "/home/ec2-user/custodian/lib/python3.7/site-packages/c7n/query.py", line 90, in filter
getattr(resource_manager, 'retry', None)) or []
File "/home/ec2-user/custodian/lib/python3.7/site-packages/c7n/query.py", line 69, in _invoke_client_enum
data = results.build_full_result()
File "/home/ec2-user/custodian/lib/python3.7/site-packages/botocore/paginate.py", line 449, in build_full_result
for response in self:
File "/home/ec2-user/custodian/lib/python3.7/site-packages/botocore/paginate.py", line 255, in __iter__
response = self._make_request(current_kwargs)
File "/home/ec2-user/custodian/lib/python3.7/site-packages/c7n/query.py", line 679, in _make_request
return self.retry(self._method, **current_kwargs)
File "/home/ec2-user/custodian/lib/python3.7/site-packages/c7n/utils.py", line 383, in _retry
return func(*args, **kw)
File "/home/ec2-user/custodian/lib/python3.7/site-packages/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/home/ec2-user/custodian/lib/python3.7/site-packages/botocore/client.py", line 661, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (AuthFailure) when calling the DescribeInstances operation: AWS was not able to validate the provided access credentials
2019-06-29 02:53:26,834: custodian.policy:INFO policy:list-ec2-instances resource:ec2 region:ap-northeast-1 count:0 time:0.63
2019-06-29 02:53:27,456: custodian.policy:INFO policy:list-ec2-instances resource:ec2 region:ap-northeast-2 count:0 time:0.60
2019-06-29 02:53:28,463: custodian.policy:INFO policy:list-ec2-instances resource:ec2 region:ap-south-1 count:0 time:0.99
2019-06-29 02:53:29,238: custodian.policy:INFO policy:list-ec2-instances resource:ec2 region:ap-southeast-1 count:0 time:0.76
2019-06-29 02:53:30,093: custodian.policy:INFO policy:list-ec2-instances resource:ec2 region:ap-southeast-2 count:0 time:0.84
2019-06-29 02:53:30,505: custodian.policy:INFO policy:list-ec2-instances resource:ec2 region:ca-central-1 count:0 time:0.39
2019-06-29 02:53:31,297: custodian.policy:INFO policy:list-ec2-instances resource:ec2 region:eu-central-1 count:0 time:0.77
2019-06-29 02:53:32,292: custodian.policy:INFO policy:list-ec2-instances resource:ec2 region:eu-north-1 count:0 time:0.97
2019-06-29 02:53:32,955: custodian.policy:INFO policy:list-ec2-instances resource:ec2 region:eu-west-1 count:0 time:0.64
2019-06-29 02:53:33,677: custodian.policy:INFO policy:list-ec2-instances resource:ec2 region:eu-west-2 count:0 time:0.70
2019-06-29 02:53:34,443: custodian.policy:INFO policy:list-ec2-instances resource:ec2 region:eu-west-3 count:0 time:0.75
2019-06-29 02:53:35,446: custodian.policy:INFO policy:list-ec2-instances resource:ec2 region:sa-east-1 count:0 time:0.98
2019-06-29 02:53:35,984: custodian.policy:INFO policy:list-ec2-instances resource:ec2 region:us-east-1 count:0 time:0.52
2019-06-29 02:53:36,393: custodian.policy:INFO policy:list-ec2-instances resource:ec2 region:us-east-2 count:0 time:0.39
2019-06-29 02:53:36,611: custodian.policy:INFO policy:list-ec2-instances resource:ec2 region:us-west-1 count:0 time:0.20
2019-06-29 02:53:36,756: custodian.policy:INFO policy:list-ec2-instances resource:ec2 region:us-west-2 count:2 time:0.13
|
botocore.exceptions.ClientError
|
def add(self, func):
rule = self.get(func.name)
params = self.get_rule_params(func)
if rule and self.delta(rule, params):
log.debug("Updating config rule for %s" % self)
rule.update(params)
return LambdaRetry(self.client.put_config_rule, ConfigRule=rule)
elif rule:
log.debug("Config rule up to date")
return
client = self.session.client("lambda")
try:
client.add_permission(
FunctionName=func.name,
StatementId=func.name,
SourceAccount=func.arn.split(":")[4],
Action="lambda:InvokeFunction",
Principal="config.amazonaws.com",
)
except client.exceptions.ResourceConflictException:
pass
log.debug("Adding config rule for %s" % func.name)
return LambdaRetry(self.client.put_config_rule, ConfigRule=params)
|
def add(self, func):
rule = self.get(func.name)
params = self.get_rule_params(func)
if rule and self.delta(rule, params):
log.debug("Updating config rule for %s" % self)
rule.update(params)
return self.client.put_config_rule(ConfigRule=rule)
elif rule:
log.debug("Config rule up to date")
return
client = self.session.client("lambda")
try:
client.add_permission(
FunctionName=func.name,
StatementId=func.name,
SourceAccount=func.arn.split(":")[4],
Action="lambda:InvokeFunction",
Principal="config.amazonaws.com",
)
except client.exceptions.ResourceConflictException:
pass
log.debug("Adding config rule for %s" % func.name)
return self.client.put_config_rule(ConfigRule=params)
|
https://github.com/cloud-custodian/cloud-custodian/issues/4559
|
2019-08-08 16:03:56,496: custodian.policy:INFO Provisioning policy lambda cross-account-rds-snapshot
2019-08-08 16:03:58,918: custodian.policy:INFO Provisioning policy lambda cross-account-lambda
2019-08-08 16:04:02,008: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/Library/Python/2.7/site-packages/c7n/policy.py", line 497, in provision
role=self.policy.options.assume_role)
File "/Library/Python/2.7/site-packages/c7n/mu.py", line 309, in publish
if e.add(func):
File "/Library/Python/2.7/site-packages/c7n/mu.py", line 1543, in add
return self.client.put_config_rule(ConfigRule=params)
File "/Users/varun.tomar/Library/Python/2.7/lib/python/site-packages/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/Users/varun.tomar/Library/Python/2.7/lib/python/site-packages/botocore/client.py", line 661, in _make_api_call
raise error_class(parsed_response, operation_name)
InsufficientPermissionsException: An error occurred (InsufficientPermissionsException) when calling the PutConfigRule operation: The AWS Lambda function arn:aws:lambda:us-east-2:xxxxxxxxx:function:custodian-cross-account-lambda cannot be invoked. Check the specified function ARN, and check the function's permissions.
Traceback (most recent call last):
File "/Library/Python/2.7/site-packages/c7n/cli.py", line 368, in main
command(config)
File "/Library/Python/2.7/site-packages/c7n/commands.py", line 136, in _load_policies
return f(options, list(policies))
File "/Library/Python/2.7/site-packages/c7n/commands.py", line 268, in run
policy()
File "/Library/Python/2.7/site-packages/c7n/policy.py", line 945, in __call__
resources = mode.provision()
File "/Library/Python/2.7/site-packages/c7n/policy.py", line 497, in provision
role=self.policy.options.assume_role)
File "/Library/Python/2.7/site-packages/c7n/mu.py", line 309, in publish
if e.add(func):
File "/Library/Python/2.7/site-packages/c7n/mu.py", line 1543, in add
return self.client.put_config_rule(ConfigRule=params)
File "/Users/varun.tomar/Library/Python/2.7/lib/python/site-packages/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/Users/varun.tomar/Library/Python/2.7/lib/python/site-packages/botocore/client.py", line 661, in _make_api_call
raise error_class(parsed_response, operation_name)
InsufficientPermissionsException: An error occurred (InsufficientPermissionsException) when calling the PutConfigRule operation: The AWS Lambda function arn:aws:lambda:us-east-2:xxxxxxxxxxx:function:custodian-cross-account-lambda cannot be invoked. Check the specified function ARN, and check the function's permissions.
/Users/varun.tomar/Library/Python/2.7/lib/python/site-packages/botocore/client.py(661)_make_api_call()
-> raise error_class(parsed_response, operation_name)
(Pdb) Traceback (most recent call last):
File "/usr/local/bin/custodian", line 9, in <module>
load_entry_point('c7n==0.8.43.1', 'console_scripts', 'custodian')()
File "/Library/Python/2.7/site-packages/c7n/cli.py", line 373, in main
pdb.post_mortem(sys.exc_info()[-1])
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/pdb.py", line 1267, in post_mortem
p.interaction(None, t)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/pdb.py", line 210, in interaction
self.cmdloop()
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/cmd.py", line 130, in cmdloop
line = raw_input(self.prompt)
KeyboardInterrupt
|
InsufficientPermissionsException
|
def get_resources(self, rids, cache=True):
# Launch template versions have a compound primary key
#
# Support one of four forms of resource ids:
#
# - array of launch template ids
# - array of tuples (launch template id, version id)
# - array of dicts (with LaunchTemplateId and VersionNumber)
# - array of dicts (with LaunchTemplateId and LatestVersionNumber)
#
# If an alias version is given $Latest, $Default, the alias will be
# preserved as an annotation on the returned object 'c7n:VersionAlias'
if not rids:
return []
t_versions = {}
if isinstance(rids[0], tuple):
for tid, tversion in rids:
t_versions.setdefault(tid, []).append(tversion)
elif isinstance(rids[0], dict):
for tinfo in rids:
t_versions.setdefault(tinfo["LaunchTemplateId"], []).append(
tinfo.get("VersionNumber", tinfo.get("LatestVersionNumber"))
)
elif isinstance(rids[0], six.string_types):
for tid in rids:
t_versions[tid] = []
client = utils.local_session(self.session_factory).client("ec2")
results = []
# We may end up fetching duplicates on $Latest and $Version
for tid, tversions in t_versions.items():
try:
ltv = client.describe_launch_template_versions(
LaunchTemplateId=tid, Versions=tversions
).get("LaunchTemplateVersions")
except ClientError as e:
if e.response["Error"]["Code"] == "InvalidLaunchTemplateId.NotFound":
continue
if e.response["Error"]["Code"] == "InvalidLaunchTemplateId.VersionNotFound":
continue
raise
if not tversions:
tversions = [str(t["VersionNumber"]) for t in ltv]
for tversion, t in zip(tversions, ltv):
if not tversion.isdigit():
t["c7n:VersionAlias"] = tversion
results.append(t)
return results
|
def get_resources(self, rids, cache=True):
# Launch template versions have a compound primary key
#
# Support one of four forms of resource ids:
#
# - array of launch template ids
# - array of tuples (launch template id, version id)
# - array of dicts (with LaunchTemplateId and VersionNumber)
# - array of dicts (with LaunchTemplateId and LatestVersionNumber)
#
# If an alias version is given $Latest, $Default, the alias will be
# preserved as an annotation on the returned object 'c7n:VersionAlias'
if not rids:
return []
t_versions = {}
if isinstance(rids[0], tuple):
for tid, tversion in rids:
t_versions.setdefault(tid, []).append(tversion)
elif isinstance(rids[0], dict):
for tinfo in rids:
t_versions.setdefault(tinfo["LaunchTemplateId"], []).append(
tinfo.get("VersionNumber", tinfo.get("LatestVersionNumber"))
)
elif isinstance(rids[0], six.string_types):
for tid in rids:
t_versions[tid] = []
client = utils.local_session(self.session_factory).client("ec2")
results = []
# We may end up fetching duplicates on $Latest and $Version
for tid, tversions in t_versions.items():
ltv = client.describe_launch_template_versions(
LaunchTemplateId=tid, Versions=tversions
).get("LaunchTemplateVersions")
if not tversions:
tversions = [str(t["VersionNumber"]) for t in ltv]
for tversion, t in zip(tversions, ltv):
if not tversion.isdigit():
t["c7n:VersionAlias"] = tversion
results.append(t)
return results
|
https://github.com/cloud-custodian/cloud-custodian/issues/4568
|
2019-08-11 07:22:23,823: custodian.commands:ERROR Error while executing policy remove-outdated-amis, continuing
Traceback (most recent call last):
File "/Users/user/.local/share/virtualenvs/cloudcustodian-ymoVWLAP/lib/python2.7/site-packages/c7n/commands.py", line 269, in run
policy()
File "/Users/user/.local/share/virtualenvs/cloudcustodian-ymoVWLAP/lib/python2.7/site-packages/c7n/policy.py", line 941, in __call__
resources = PullMode(self).run()
File "/Users/user/.local/share/virtualenvs/cloudcustodian-ymoVWLAP/lib/python2.7/site-packages/c7n/policy.py", line 229, in run
resources = self.policy.resource_manager.resources()
File "/Users/user/.local/share/virtualenvs/cloudcustodian-ymoVWLAP/lib/python2.7/site-packages/c7n/resources/ami.py", line 53, in resources
return super(AMI, self).resources(query=query)
File "/Users/user/.local/share/virtualenvs/cloudcustodian-ymoVWLAP/lib/python2.7/site-packages/c7n/query.py", line 460, in resources
resources = self.filter_resources(resources)
File "/Users/user/.local/share/virtualenvs/cloudcustodian-ymoVWLAP/lib/python2.7/site-packages/c7n/manager.py", line 108, in filter_resources
resources = f.process(resources, event)
File "/Users/user/.local/share/virtualenvs/cloudcustodian-ymoVWLAP/lib/python2.7/site-packages/c7n/resources/ami.py", line 305, in process
images = self._pull_ec2_images().union(self._pull_asg_images())
File "/Users/user/.local/share/virtualenvs/cloudcustodian-ymoVWLAP/lib/python2.7/site-packages/c7n/resources/ami.py", line 296, in _pull_asg_images
list(tmpl_mgr.get_asg_templates(asgs).keys())):
File "/Users/user/.local/share/virtualenvs/cloudcustodian-ymoVWLAP/lib/python2.7/site-packages/c7n/resources/ec2.py", line 1870, in get_resources
LaunchTemplateId=tid, Versions=tversions).get(
File "/Users/user/.local/share/virtualenvs/cloudcustodian-ymoVWLAP/lib/python2.7/site-packages/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/Users/user/.local/share/virtualenvs/cloudcustodian-ymoVWLAP/lib/python2.7/site-packages/botocore/client.py", line 661, in _make_api_call
raise error_class(parsed_response, operation_name)
ClientError: An error occurred (InvalidLaunchTemplateId.NotFound) when calling the DescribeLaunchTemplateVersions operation: The specified launch template, with template ID lt-0634c8a44afa8b953, does not exist.
|
ClientError
|
def send_to_azure_queue(self, queue_uri, message, session):
try:
queue_service, queue_name = StorageUtilities.get_queue_client_by_uri(
queue_uri, session
)
return StorageUtilities.put_queue_message(
queue_service, queue_name, self.pack(message)
).id
except AzureHttpError as e:
if e.status_code == 403:
self.log.error(
"Access Error - Storage Queue Data Contributor Role is required "
"to enqueue messages to the Azure Queue Storage."
)
else:
self.log.error("Error putting message to the queue.\n" + str(e))
|
def send_to_azure_queue(self, queue_uri, message, session):
queue_service, queue_name = StorageUtilities.get_queue_client_by_uri(
queue_uri, session
)
return StorageUtilities.put_queue_message(
queue_service, queue_name, self.pack(message)
).id
|
https://github.com/cloud-custodian/cloud-custodian/issues/4341
|
(cloud-custodian) ➜ cloud-custodian git:(master) ✗ custodian-cask run -s=. policies/policy.yml
Custodian Cask (cloudcustodian/c7n:latest)
Skipped image pull - Last checked 23 minutes ago.
2019-07-08 20:57:46,374: custodian.azure.session:INFO Creating session with Azure CLI Authentication
2019-07-08 20:57:46,374: custodian.azure.session:INFO Session using Subscription ID: <my sub id>
2019-07-08 20:57:46,505: custodian.cache:WARNING Could not save cache /home/custodian/.cache/cloud-custodian.cache err: [Errno 2] No such file or directory: '/home/custodian/.cache/cloud-custodian.cache'
2019-07-08 20:57:46,506: custodian.cache:INFO Generating Cache directory: /home/custodian/.cache.
2019-07-08 20:57:49,283: custodian.policy:INFO policy:delete-empty-resource-groups resource:azure.resourcegroup region: count:1 time:2.90
2019-07-08 20:57:49,290: custodian.resources.resourcegroup:INFO Removing resource group test1
2019-07-08 20:57:49,383: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/c7n/policy.py", line 261, in run
results = a.process(resources)
File "/usr/local/lib/python3.7/site-packages/c7n_azure/resources/resourcegroup.py", line 93, in process
self.manager.get_client().resource_groups.delete(group['name'])
File "/usr/local/lib/python3.7/site-packages/azure/mgmt/resource/resources/v2018_05_01/operations/resource_groups_operations.py", line 231, in delete
**operation_config
File "/usr/local/lib/python3.7/site-packages/azure/mgmt/resource/resources/v2018_05_01/operations/resource_groups_operations.py", line 199, in _delete_initial
raise exp
msrestazure.azure_exceptions.CloudError: Azure Error: ScopeLocked
Message: The scope '/subscriptions/<my sub id>/resourcegroups/test1' cannot perform delete operation because following scope(s) are locked: '/subscriptions/<my sub id>/resourceGroups/test1'. Please remove the lock and try again.
2019-07-08 20:57:49,384: custodian.commands:ERROR Error while executing policy delete-empty-resource-groups, continuing
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/c7n/commands.py", line 269, in run
policy()
File "/usr/local/lib/python3.7/site-packages/c7n/policy.py", line 945, in __call__
resources = mode.run()
File "/usr/local/lib/python3.7/site-packages/c7n/policy.py", line 261, in run
results = a.process(resources)
File "/usr/local/lib/python3.7/site-packages/c7n_azure/resources/resourcegroup.py", line 93, in process
self.manager.get_client().resource_groups.delete(group['name'])
File "/usr/local/lib/python3.7/site-packages/azure/mgmt/resource/resources/v2018_05_01/operations/resource_groups_operations.py", line 231, in delete
**operation_config
File "/usr/local/lib/python3.7/site-packages/azure/mgmt/resource/resources/v2018_05_01/operations/resource_groups_operations.py", line 199, in _delete_initial
raise exp
msrestazure.azure_exceptions.CloudError: Azure Error: ScopeLocked
Message: The scope '/subscriptions/<my sub id>/resourcegroups/test1' cannot perform delete operation because following scope(s) are locked: '/subscriptions/<my sub id>/resourceGroups/test1'. Please remove the lock and try again.
|
msrestazure.azure_exceptions.CloudError
|
def upload(self):
for root, dirs, files in os.walk(self.root_dir):
for f in files:
blob_name = self.join(self.file_prefix, root[len(self.root_dir) :], f)
blob_name.strip("/")
try:
self.blob_service.create_blob_from_path(
self.container, blob_name, os.path.join(root, f)
)
except AzureHttpError as e:
if e.status_code == 403:
self.log.error(
"Access Error: Storage Blob Data Contributor Role "
"is required to write to Azure Blob Storage."
)
else:
self.log.error(
"Error writing output. "
"Confirm output storage URL is correct. \n" + str(e)
)
self.log.debug("%s uploaded" % blob_name)
|
def upload(self):
for root, dirs, files in os.walk(self.root_dir):
for f in files:
blob_name = self.join(self.file_prefix, root[len(self.root_dir) :], f)
blob_name.strip("/")
try:
self.blob_service.create_blob_from_path(
self.container, blob_name, os.path.join(root, f)
)
except AzureHttpError as e:
self.log.error(
"Error writing output. Confirm output storage URL is correct "
"and that 'Storage Blob Contributor' role is assigned. \n" + str(e)
)
self.log.debug("%s uploaded" % blob_name)
|
https://github.com/cloud-custodian/cloud-custodian/issues/4341
|
(cloud-custodian) ➜ cloud-custodian git:(master) ✗ custodian-cask run -s=. policies/policy.yml
Custodian Cask (cloudcustodian/c7n:latest)
Skipped image pull - Last checked 23 minutes ago.
2019-07-08 20:57:46,374: custodian.azure.session:INFO Creating session with Azure CLI Authentication
2019-07-08 20:57:46,374: custodian.azure.session:INFO Session using Subscription ID: <my sub id>
2019-07-08 20:57:46,505: custodian.cache:WARNING Could not save cache /home/custodian/.cache/cloud-custodian.cache err: [Errno 2] No such file or directory: '/home/custodian/.cache/cloud-custodian.cache'
2019-07-08 20:57:46,506: custodian.cache:INFO Generating Cache directory: /home/custodian/.cache.
2019-07-08 20:57:49,283: custodian.policy:INFO policy:delete-empty-resource-groups resource:azure.resourcegroup region: count:1 time:2.90
2019-07-08 20:57:49,290: custodian.resources.resourcegroup:INFO Removing resource group test1
2019-07-08 20:57:49,383: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/c7n/policy.py", line 261, in run
results = a.process(resources)
File "/usr/local/lib/python3.7/site-packages/c7n_azure/resources/resourcegroup.py", line 93, in process
self.manager.get_client().resource_groups.delete(group['name'])
File "/usr/local/lib/python3.7/site-packages/azure/mgmt/resource/resources/v2018_05_01/operations/resource_groups_operations.py", line 231, in delete
**operation_config
File "/usr/local/lib/python3.7/site-packages/azure/mgmt/resource/resources/v2018_05_01/operations/resource_groups_operations.py", line 199, in _delete_initial
raise exp
msrestazure.azure_exceptions.CloudError: Azure Error: ScopeLocked
Message: The scope '/subscriptions/<my sub id>/resourcegroups/test1' cannot perform delete operation because following scope(s) are locked: '/subscriptions/<my sub id>/resourceGroups/test1'. Please remove the lock and try again.
2019-07-08 20:57:49,384: custodian.commands:ERROR Error while executing policy delete-empty-resource-groups, continuing
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/c7n/commands.py", line 269, in run
policy()
File "/usr/local/lib/python3.7/site-packages/c7n/policy.py", line 945, in __call__
resources = mode.run()
File "/usr/local/lib/python3.7/site-packages/c7n/policy.py", line 261, in run
results = a.process(resources)
File "/usr/local/lib/python3.7/site-packages/c7n_azure/resources/resourcegroup.py", line 93, in process
self.manager.get_client().resource_groups.delete(group['name'])
File "/usr/local/lib/python3.7/site-packages/azure/mgmt/resource/resources/v2018_05_01/operations/resource_groups_operations.py", line 231, in delete
**operation_config
File "/usr/local/lib/python3.7/site-packages/azure/mgmt/resource/resources/v2018_05_01/operations/resource_groups_operations.py", line 199, in _delete_initial
raise exp
msrestazure.azure_exceptions.CloudError: Azure Error: ScopeLocked
Message: The scope '/subscriptions/<my sub id>/resourcegroups/test1' cannot perform delete operation because following scope(s) are locked: '/subscriptions/<my sub id>/resourceGroups/test1'. Please remove the lock and try again.
|
msrestazure.azure_exceptions.CloudError
|
def get_finding(self, resources, existing_finding_id, created_at, updated_at):
policy = self.manager.ctx.policy
model = self.manager.resource_type
region = self.data.get("region", self.manager.config.region)
if existing_finding_id:
finding_id = existing_finding_id
else:
finding_id = "{}/{}/{}/{}".format(
self.manager.config.region,
self.manager.config.account_id,
hashlib.md5(json.dumps(policy.data).encode("utf8")).hexdigest(),
hashlib.md5(
json.dumps(list(sorted([r[model.id] for r in resources]))).encode(
"utf8"
)
).hexdigest(),
)
finding = {
"SchemaVersion": self.FindingVersion,
"ProductArn": "arn:aws:securityhub:{}:{}:product/{}/{}".format(
region,
self.manager.config.account_id,
self.manager.config.account_id,
self.ProductName,
),
"AwsAccountId": self.manager.config.account_id,
"Description": self.data.get(
"description", policy.data.get("description", "")
).strip(),
"Title": self.data.get("title", policy.name),
"Id": finding_id,
"GeneratorId": policy.name,
"CreatedAt": created_at,
"UpdatedAt": updated_at,
"RecordState": "ACTIVE",
}
severity = {"Product": 0, "Normalized": 0}
if self.data.get("severity") is not None:
severity["Product"] = self.data["severity"]
if self.data.get("severity_normalized") is not None:
severity["Normalized"] = self.data["severity_normalized"]
if severity:
finding["Severity"] = severity
recommendation = {}
if self.data.get("recommendation"):
recommendation["Text"] = self.data["recommendation"]
if self.data.get("recommendation_url"):
recommendation["Url"] = self.data["recommendation_url"]
if recommendation:
finding["Remediation"] = {"Recommendation": recommendation}
if "confidence" in self.data:
finding["Confidence"] = self.data["confidence"]
if "criticality" in self.data:
finding["Criticality"] = self.data["criticality"]
if "compliance_status" in self.data:
finding["Compliance"] = {"Status": self.data["compliance_status"]}
fields = {
"resource": policy.resource_type,
"ProviderName": "CloudCustodian",
"ProviderVersion": version,
}
if "fields" in self.data:
fields.update(self.data["fields"])
else:
tags = {}
for t in policy.tags:
if ":" in t:
k, v = t.split(":", 1)
else:
k, v = t, ""
tags[k] = v
fields.update(tags)
if fields:
finding["ProductFields"] = fields
finding_resources = []
for r in resources:
finding_resources.append(self.format_resource(r))
finding["Resources"] = finding_resources
finding["Types"] = list(self.data["types"])
return filter_empty(finding)
|
def get_finding(self, resources, existing_finding_id, created_at, updated_at):
policy = self.manager.ctx.policy
model = self.manager.resource_type
if existing_finding_id:
finding_id = existing_finding_id
else:
finding_id = "{}/{}/{}/{}".format(
self.manager.config.region,
self.manager.config.account_id,
hashlib.md5(json.dumps(policy.data).encode("utf8")).hexdigest(),
hashlib.md5(
json.dumps(list(sorted([r[model.id] for r in resources]))).encode(
"utf8"
)
).hexdigest(),
)
finding = {
"SchemaVersion": self.FindingVersion,
"ProductArn": "arn:aws:securityhub:{}:{}:product/{}/{}".format(
self.manager.config.region,
self.manager.config.account_id,
self.manager.config.account_id,
self.ProductName,
),
"AwsAccountId": self.manager.config.account_id,
"Description": self.data.get(
"description", policy.data.get("description", "")
).strip(),
"Title": self.data.get("title", policy.name),
"Id": finding_id,
"GeneratorId": policy.name,
"CreatedAt": created_at,
"UpdatedAt": updated_at,
"RecordState": "ACTIVE",
}
severity = {"Product": 0, "Normalized": 0}
if self.data.get("severity") is not None:
severity["Product"] = self.data["severity"]
if self.data.get("severity_normalized") is not None:
severity["Normalized"] = self.data["severity_normalized"]
if severity:
finding["Severity"] = severity
recommendation = {}
if self.data.get("recommendation"):
recommendation["Text"] = self.data["recommendation"]
if self.data.get("recommendation_url"):
recommendation["Url"] = self.data["recommendation_url"]
if recommendation:
finding["Remediation"] = {"Recommendation": recommendation}
if "confidence" in self.data:
finding["Confidence"] = self.data["confidence"]
if "criticality" in self.data:
finding["Criticality"] = self.data["criticality"]
if "compliance_status" in self.data:
finding["Compliance"] = {"Status": self.data["compliance_status"]}
fields = {
"resource": policy.resource_type,
"ProviderName": "CloudCustodian",
"ProviderVersion": version,
}
if "fields" in self.data:
fields.update(self.data["fields"])
else:
tags = {}
for t in policy.tags:
if ":" in t:
k, v = t.split(":", 1)
else:
k, v = t, ""
tags[k] = v
fields.update(tags)
if fields:
finding["ProductFields"] = fields
finding_resources = []
for r in resources:
finding_resources.append(self.format_resource(r))
finding["Resources"] = finding_resources
finding["Types"] = list(self.data["types"])
return filter_empty(finding)
|
https://github.com/cloud-custodian/cloud-custodian/issues/3975
|
botocore.errorfactory.AccessDeniedException: An error occurred (AccessDeniedException) when calling the BatchImportFindings operation: User: arn:aws:sts::<account>:assumed-role/CloudCustodianAdminRole/CloudCustodian is not authorized to perform: securityhub:BatchImportFindings
[ERROR] 2019-05-08T08:46:16.357Z error during policy execution
Traceback (most recent call last):
File "/var/task/c7n/handler.py", line 168, in dispatch_event
p.push(event, context)
File "/var/task/c7n/policy.py", line 909, in push
return mode.run(event, lambda_ctx)
File "/var/task/c7n/policy.py", line 478, in run
results = action.process(resources)
File "/var/task/c7n/actions/securityhub.py", line 213, in process
Findings=[finding])
File "/var/task/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/var/task/botocore/client.py", line 661, in _make_api_call
raise error_class(parsed_response, operation_name)
|
botocore.error
|
def dispatch_event(event, context):
error = event.get("detail", {}).get("errorCode")
if error and C7N_SKIP_EVTERR:
log.debug("Skipping failed operation: %s" % error)
return
if C7N_DEBUG_EVENT:
event["debug"] = True
log.info("Processing event\n %s", format_event(event))
# Policies file should always be valid in lambda so do loading naively
global policy_config
if policy_config is None:
with open("config.json") as f:
policy_config = json.load(f)
if not policy_config or not policy_config.get("policies"):
return False
options = init_config(policy_config)
policies = PolicyCollection.from_data(policy_config, options)
if policies:
for p in policies:
try:
# validation provides for an initialization point for
# some filters/actions.
p.validate()
p.push(event, context)
except Exception:
log.exception("error during policy execution")
if C7N_CATCH_ERR:
continue
raise
return True
|
def dispatch_event(event, context):
error = event.get("detail", {}).get("errorCode")
if error and C7N_SKIP_EVTERR:
log.debug("Skipping failed operation: %s" % error)
return
if C7N_DEBUG_EVENT:
event["debug"] = True
log.info("Processing event\n %s", format_event(event))
# Policies file should always be valid in lambda so do loading naively
global policy_config
if policy_config is None:
with open("config.json") as f:
policy_config = json.load(f)
if not policy_config or not policy_config.get("policies"):
return False
options = init_config(policy_config)
policies = PolicyCollection.from_data(policy_config, options)
if policies:
for p in policies:
try:
p.push(event, context)
except Exception:
log.exception("error during policy execution")
if C7N_CATCH_ERR:
continue
raise
return True
|
https://github.com/cloud-custodian/cloud-custodian/issues/3022
|
START RequestId: bd7b329c-d677-11e8-8925-01840da49e11 Version: $LATEST
[INFO] 2018-10-23T03:57:20.426Z bd7b329c-d677-11e8-8925-01840da49e11 Processing event
{
"account": "[sanitized]",
"region": "us-east-1",
"detail": {},
"detail-type": "Scheduled Event",
"source": "aws.events",
"version": "0",
"time": "2018-10-23T03:56:45Z",
"debug": true,
"id": "a83eb09d-de5e-2155-62ef-24490c335d43",
"resources": [
"arn:aws:events:us-east-1:8[sanitized]:rule/custodian-enable-vpc-flow-logs"
]
}
[DEBUG] 2018-10-23T03:57:20.427Z bd7b329c-d677-11e8-8925-01840da49e11 Storing output with <CloudWatchLogOutput to group:/aws/enable-vpc-flow-logs/resources stream:enable-vpc-flow-logs>
[DEBUG] 2018-10-23T03:57:20.899Z bd7b329c-d677-11e8-8925-01840da49e11 Running policy enable-vpc-flow-logs resource: vpc region:us-east-1 c7n:0.8.31.2
[DEBUG] 2018-10-23T03:57:21.312Z bd7b329c-d677-11e8-8925-01840da49e11 Filtered from 1 to 1 vpc
[INFO] 2018-10-23T03:57:21.312Z bd7b329c-d677-11e8-8925-01840da49e11 policy: enable-vpc-flow-logs resource:vpc region:us-east-1 count:1 time:0.25
[DEBUG] 2018-10-23T03:57:21.321Z bd7b329c-d677-11e8-8925-01840da49e11 metric:ResourceCount Count:1 policy:enable-vpc-flow-logs restype:vpc scope:policy
[DEBUG] 2018-10-23T03:57:21.321Z bd7b329c-d677-11e8-8925-01840da49e11 metric:PolicyException Count:1 policy:enable-vpc-flow-logs restype:vpc
[DEBUG] 2018-10-23T03:57:21.321Z bd7b329c-d677-11e8-8925-01840da49e11 metric:ApiCalls Count:0 policy:enable-vpc-flow-logs restype:vpc
[ERROR] 2018-10-23T03:57:21.522Z bd7b329c-d677-11e8-8925-01840da49e11 Error while executing policy
Traceback (most recent call last):
File "/var/task/c7n/policy.py", line 273, in run
results = a.process(resources)
File "/var/task/c7n/resources/vpc.py", line 1810, in process
if not self.state:
AttributeError: 'CreateFlowLogs' object has no attribute 'state'
'CreateFlowLogs' object has no attribute 'state': AttributeError
Traceback (most recent call last):
File "/var/task/custodian_policy.py", line 4, in run
return handler.dispatch_event(event, context)
File "/var/task/c7n/handler.py", line 91, in dispatch_event
p.push(event, context)
File "/var/task/c7n/policy.py", line 834, in push
return mode.run(event, lambda_ctx)
File "/var/task/c7n/policy.py", line 516, in run
return PullMode.run(self)
File "/var/task/c7n/policy.py", line 273, in run
results = a.process(resources)
File "/var/task/c7n/resources/vpc.py", line 1810, in process
if not self.state:
AttributeError: 'CreateFlowLogs' object has no attribute 'state'
END RequestId: bd7b329c-d677-11e8-8925-01840da49e11
|
AttributeError
|
def register_resource(klass, registry, event):
for rtype, resource_manager in registry.items():
if not resource_manager.has_arn():
continue
if "post-finding" in resource_manager.action_registry:
continue
resource_manager.action_registry.register("post-finding", klass)
|
def register_resource(klass, registry, event):
for rtype, resource_manager in registry.items():
if "post-finding" in resource_manager.action_registry:
continue
resource_manager.action_registry.register("post-finding", klass)
|
https://github.com/cloud-custodian/cloud-custodian/issues/3388
|
2019-01-18 02:34:48,434: custodian.policy:INFO policy: support-tag-compliance-import-findings resource:aws.support-case region:us-east-1 count:2 time:0.47
2019-01-18 02:34:48,440: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/policy.py", line 264, in run
results = a.process(resources)
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/actions/securityhub.py", line 202, in process
resource_set, finding_id, created_at, updated_at)
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/actions/securityhub.py", line 312, in get_finding
finding_resources.append(self.format_resource(r))
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/actions/securityhub.py", line 353, in format_resource
'Id': self.manager.get_arns([r])[0],
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/query.py", line 500, in get_arns
arns.append(self.generate_arn(_id))
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/query.py", line 513, in generate_arn
resource_type=self.get_model().type,
AttributeError: type object 'resource_type' has no attribute 'type'
|
AttributeError
|
def register_resources(klass, registry, resource_class):
"""meta model subscriber on resource registration.
SecurityHub Findings Filter
"""
for rtype, resource_manager in registry.items():
if not resource_manager.has_arn():
continue
if "post-finding" in resource_manager.action_registry:
continue
resource_class.filter_registry.register("finding", klass)
|
def register_resources(klass, registry, resource_class):
"""meta model subscriber on resource registration.
SecurityHub Findings Filter
"""
for rtype, resource_manager in registry.items():
if "post-finding" in resource_manager.action_registry:
continue
resource_class.filter_registry.register("finding", klass)
|
https://github.com/cloud-custodian/cloud-custodian/issues/3388
|
2019-01-18 02:34:48,434: custodian.policy:INFO policy: support-tag-compliance-import-findings resource:aws.support-case region:us-east-1 count:2 time:0.47
2019-01-18 02:34:48,440: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/policy.py", line 264, in run
results = a.process(resources)
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/actions/securityhub.py", line 202, in process
resource_set, finding_id, created_at, updated_at)
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/actions/securityhub.py", line 312, in get_finding
finding_resources.append(self.format_resource(r))
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/actions/securityhub.py", line 353, in format_resource
'Id': self.manager.get_arns([r])[0],
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/query.py", line 500, in get_arns
arns.append(self.generate_arn(_id))
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/query.py", line 513, in generate_arn
resource_type=self.get_model().type,
AttributeError: type object 'resource_type' has no attribute 'type'
|
AttributeError
|
def get_arns(self, resources):
arns = []
m = self.get_model()
arn_key = getattr(m, "arn", None)
if arn_key is False:
raise ValueError("%s do not have arns" % self.type)
id_key = m.id
for r in resources:
_id = r[id_key]
if arn_key:
arns.append(r[arn_key])
elif "arn" in _id[:3]:
arns.append(_id)
else:
arns.append(self.generate_arn(_id))
return arns
|
def get_arns(self, resources):
arns = []
m = self.get_model()
arn_key = getattr(m, "arn", None)
id_key = m.id
for r in resources:
_id = r[id_key]
if arn_key:
arns.append(r[arn_key])
elif "arn" in _id[:3]:
arns.append(_id)
else:
arns.append(self.generate_arn(_id))
return arns
|
https://github.com/cloud-custodian/cloud-custodian/issues/3388
|
2019-01-18 02:34:48,434: custodian.policy:INFO policy: support-tag-compliance-import-findings resource:aws.support-case region:us-east-1 count:2 time:0.47
2019-01-18 02:34:48,440: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/policy.py", line 264, in run
results = a.process(resources)
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/actions/securityhub.py", line 202, in process
resource_set, finding_id, created_at, updated_at)
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/actions/securityhub.py", line 312, in get_finding
finding_resources.append(self.format_resource(r))
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/actions/securityhub.py", line 353, in format_resource
'Id': self.manager.get_arns([r])[0],
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/query.py", line 500, in get_arns
arns.append(self.generate_arn(_id))
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/query.py", line 513, in generate_arn
resource_type=self.get_model().type,
AttributeError: type object 'resource_type' has no attribute 'type'
|
AttributeError
|
def process_attached(self, client, associated_addrs):
for aa in list(associated_addrs):
try:
client.disassociate_address(AssociationId=aa["AssociationId"])
except ClientError as e:
# If its already been diassociated ignore, else raise.
if not (
e.response["Error"]["Code"] == "InvalidAssocationID.NotFound"
and aa["AssocationId"] in e.response["Error"]["Message"]
):
raise e
associated_addrs.remove(aa)
return associated_addrs
|
def process_attached(self, client, associated_addrs):
for aa in list(associated_addrs):
try:
client.disassociate_address(AssociationId=aa["AssociationId"])
except BotoClientError as e:
# If its already been diassociated ignore, else raise.
if not (
e.response["Error"]["Code"] == "InvalidAssocationID.NotFound"
and aa["AssocationId"] in e.response["Error"]["Message"]
):
raise e
associated_addrs.remove(aa)
return associated_addrs
|
https://github.com/cloud-custodian/cloud-custodian/issues/3388
|
2019-01-18 02:34:48,434: custodian.policy:INFO policy: support-tag-compliance-import-findings resource:aws.support-case region:us-east-1 count:2 time:0.47
2019-01-18 02:34:48,440: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/policy.py", line 264, in run
results = a.process(resources)
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/actions/securityhub.py", line 202, in process
resource_set, finding_id, created_at, updated_at)
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/actions/securityhub.py", line 312, in get_finding
finding_resources.append(self.format_resource(r))
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/actions/securityhub.py", line 353, in format_resource
'Id': self.manager.get_arns([r])[0],
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/query.py", line 500, in get_arns
arns.append(self.generate_arn(_id))
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/query.py", line 513, in generate_arn
resource_type=self.get_model().type,
AttributeError: type object 'resource_type' has no attribute 'type'
|
AttributeError
|
def process(self, network_addrs):
client = local_session(self.manager.session_factory).client("ec2")
force = self.data.get("force")
assoc_addrs = [addr for addr in network_addrs if "AssociationId" in addr]
unassoc_addrs = [addr for addr in network_addrs if "AssociationId" not in addr]
if len(assoc_addrs) and not force:
self.log.warning(
"Filtered %d attached eips of %d eips. Use 'force: true' to release them.",
len(assoc_addrs),
len(network_addrs),
)
elif len(assoc_addrs) and force:
unassoc_addrs = itertools.chain(
unassoc_addrs, self.process_attached(client, assoc_addrs)
)
for r in unassoc_addrs:
try:
client.release_address(AllocationId=r["AllocationId"])
except ClientError as e:
# If its already been released, ignore, else raise.
if e.response["Error"]["Code"] == "InvalidAllocationID.NotFound":
raise
|
def process(self, network_addrs):
client = local_session(self.manager.session_factory).client("ec2")
force = self.data.get("force")
assoc_addrs = [addr for addr in network_addrs if "AssociationId" in addr]
unassoc_addrs = [addr for addr in network_addrs if "AssociationId" not in addr]
if len(assoc_addrs) and not force:
self.log.warning(
"Filtered %d attached eips of %d eips. Use 'force: true' to release them.",
len(assoc_addrs),
len(network_addrs),
)
elif len(assoc_addrs) and force:
unassoc_addrs = itertools.chain(
unassoc_addrs, self.process_attached(client, assoc_addrs)
)
for r in unassoc_addrs:
try:
client.release_address(AllocationId=r["AllocationId"])
except BotoClientError as e:
# If its already been released, ignore, else raise.
if e.response["Error"]["Code"] == "InvalidAllocationID.NotFound":
raise
|
https://github.com/cloud-custodian/cloud-custodian/issues/3388
|
2019-01-18 02:34:48,434: custodian.policy:INFO policy: support-tag-compliance-import-findings resource:aws.support-case region:us-east-1 count:2 time:0.47
2019-01-18 02:34:48,440: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/policy.py", line 264, in run
results = a.process(resources)
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/actions/securityhub.py", line 202, in process
resource_set, finding_id, created_at, updated_at)
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/actions/securityhub.py", line 312, in get_finding
finding_resources.append(self.format_resource(r))
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/actions/securityhub.py", line 353, in format_resource
'Id': self.manager.get_arns([r])[0],
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/query.py", line 500, in get_arns
arns.append(self.generate_arn(_id))
File "/home/ec2-user/environment/cloud-custodian/.tox/py37/lib/python3.7/site-packages/c7n/query.py", line 513, in generate_arn
resource_type=self.get_model().type,
AttributeError: type object 'resource_type' has no attribute 'type'
|
AttributeError
|
def metrics_cmd(options, policies):
log.warning("metrics command is deprecated, and will be removed in future")
policies = [p for p in policies if p.provider_name == "aws"]
start, end = _metrics_get_endpoints(options)
data = {}
for p in policies:
log.info("Getting %s metrics", p)
data[p.name] = p.get_metrics(start, end, options.period)
print(dumps(data, indent=2))
|
def metrics_cmd(options, policies):
start, end = _metrics_get_endpoints(options)
data = {}
for p in policies:
log.info("Getting %s metrics", p)
data[p.name] = p.get_metrics(start, end, options.period)
print(dumps(data, indent=2))
|
https://github.com/cloud-custodian/cloud-custodian/issues/3555
|
(cloud-custodian) $ custodian metrics policies/policy.yml
2019-02-20 11:19:18,346: custodian.azure.session:INFO Creating session with Azure CLI Authentication
2019-02-20 11:19:18,347: custodian.azure.session:INFO Session using Subscription ID: <my sub redacted>
2019-02-20 11:19:18,347: custodian.commands:INFO Getting <Policy resource: azure.resourcegroup name: delete-empty-resource-groups region: > metrics
Traceback (most recent call last):
File "/Users/andyluong/Projects/forks/cloud-custodian/bin/custodian", line 11, in <module>
load_entry_point('c7n', 'console_scripts', 'custodian')()
File "/Users/andyluong/Projects/forks/cloud-custodian/c7n/cli.py", line 368, in main
command(config)
File "/Users/andyluong/Projects/forks/cloud-custodian/c7n/commands.py", line 136, in _load_policies
return f(options, list(policies))
File "/Users/andyluong/Projects/forks/cloud-custodian/c7n/commands.py", line 491, in metrics_cmd
data[p.name] = p.get_metrics(start, end, options.period)
File "/Users/andyluong/Projects/forks/cloud-custodian/c7n/policy.py", line 912, in get_metrics
return mode.get_metrics(start, end, period)
File "/Users/andyluong/Projects/forks/cloud-custodian/c7n/policy.py", line 170, in get_metrics
client = session.client('cloudwatch')
File "/Users/andyluong/Projects/forks/cloud-custodian/tools/c7n_azure/c7n_azure/session.py", line 148, in client
service_name, client_name = client.rsplit('.', 1)
ValueError: not enough values to unpack (expected 2, got 1)
|
ValueError
|
def initialize(self, options):
""" """
_default_region(options)
_default_account_id(options)
if options.tracer and options.tracer.startswith("xray") and HAVE_XRAY:
XrayTracer.initialize()
return options
|
def initialize(self, options):
""" """
_default_region(options)
_default_account_id(options)
if options.tracer:
XrayTracer.initialize()
return options
|
https://github.com/cloud-custodian/cloud-custodian/issues/3301
|
$ pip list|grep c7n
c7n 0.8.33.0
c7n-org 0.5.0
$ cat list-ec2-instances.yaml
policies:
- name: list-ec2-instances
resource: ec2
$ custodian run -s ~/output -r us-east-1 list-ec2-instances.yaml
2019-01-03 22:20:42,075: custodian.policy:INFO policy: list-ec2-instances resource:ec2 region:us-east-1 count:1 time:0.37
$ custodian report -s ~/output -r us-east-1 list-ec2-instances.yaml
Traceback (most recent call last):
File "/home/ec2-user/custodian/bin/custodian", line 11, in <module>
sys.exit(main())
File "/home/ec2-user/custodian/local/lib/python2.7/site-packages/c7n/cli.py", line 368, in main
command(config)
File "/home/ec2-user/custodian/local/lib/python2.7/site-packages/c7n/commands.py", line 105, in _load_policies
p_options = provider.initialize(options)
File "/home/ec2-user/custodian/local/lib/python2.7/site-packages/c7n/resources/aws.py", line 399, in initialize
XrayTracer.initialize()
File "/home/ec2-user/custodian/local/lib/python2.7/site-packages/c7n/resources/aws.py", line 222, in initialize
xray_recorder.configure(
NameError: global name 'xray_recorder' is not defined
$ cat rollback.txt
c7n==0.8.32.0
$ pip install --upgrade --force-reinstall -r rollback.txt
$ pip list|grep c7n
c7n 0.8.32.0
c7n-org 0.5.0
$ custodian report -s ~/output -r us-east-1 list-ec2-instances.yaml
CustodianDate,InstanceId,tag:Name,InstanceType,LaunchTime,VpcId,PrivateIpAddress
2019-01-03 22:20:42.074099,i-028454455c3bb6e1c,Cloud Custodian Host,t3.micro,2019-01-03T21:23:51+00:00,vpc-b869c1c0,172.31.37.231
|
NameError
|
def process(self, resources, events=None):
if self.manager:
sweeper = AnnotationSweeper(self.manager.get_model().id, resources)
for f in self.filters:
resources = f.process(resources, events)
if not resources:
break
if self.manager:
sweeper.sweep(resources)
return resources
|
def process(self, resources, events=None):
if self.manager:
sweeper = AnnotationSweeper(self.manager.get_model().id, resources)
for f in self.filters:
resources = f.process(resources, events)
if self.manager:
sweeper.sweep(resources)
return resources
|
https://github.com/cloud-custodian/cloud-custodian/issues/3052
|
Traceback (most recent call last):
File "/cloud-custodian/c7n/policy.py", line 241, in run
resources = self.policy.resource_manager.resources()
File "/cloud-custodian/c7n/resources/account.py", line 71, in resources
return self.filter_resources([get_account(self.session_factory, self.config)])
File "/cloud-custodian/c7n/manager.py", line 104, in filter_resources
resources = f.process(resources, event)
File "/cloud-custodian/c7n/filters/core.py", line 243, in process
return self.process_set(resources, event)
File "/cloud-custodian/c7n/filters/core.py", line 262, in process_set
resources = f.process(resources, event)
File "/cloud-custodian/c7n/filters/core.py", line 225, in process
resources = f.process(resources, events)
File "/cloud-custodian/c7n/resources/account.py", line 1010, in process
resources[0]['c7n:XrayEncryptionConfig'] = gec_result
IndexError: list index out of range
|
IndexError
|
def process_set(self, resources, event):
resource_type = self.manager.get_model()
resource_map = {r[resource_type.id]: r for r in resources}
sweeper = AnnotationSweeper(resource_type.id, resources)
for f in self.filters:
resources = f.process(resources, event)
if not resources:
break
before = set(resource_map.keys())
after = set([r[resource_type.id] for r in resources])
results = before - after
sweeper.sweep([])
return [resource_map[r_id] for r_id in results]
|
def process_set(self, resources, event):
resource_type = self.manager.get_model()
resource_map = {r[resource_type.id]: r for r in resources}
sweeper = AnnotationSweeper(resource_type.id, resources)
for f in self.filters:
resources = f.process(resources, event)
before = set(resource_map.keys())
after = set([r[resource_type.id] for r in resources])
results = before - after
sweeper.sweep([])
return [resource_map[r_id] for r_id in results]
|
https://github.com/cloud-custodian/cloud-custodian/issues/3052
|
Traceback (most recent call last):
File "/cloud-custodian/c7n/policy.py", line 241, in run
resources = self.policy.resource_manager.resources()
File "/cloud-custodian/c7n/resources/account.py", line 71, in resources
return self.filter_resources([get_account(self.session_factory, self.config)])
File "/cloud-custodian/c7n/manager.py", line 104, in filter_resources
resources = f.process(resources, event)
File "/cloud-custodian/c7n/filters/core.py", line 243, in process
return self.process_set(resources, event)
File "/cloud-custodian/c7n/filters/core.py", line 262, in process_set
resources = f.process(resources, event)
File "/cloud-custodian/c7n/filters/core.py", line 225, in process
resources = f.process(resources, events)
File "/cloud-custodian/c7n/resources/account.py", line 1010, in process
resources[0]['c7n:XrayEncryptionConfig'] = gec_result
IndexError: list index out of range
|
IndexError
|
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client("logs")
accounts = self.get_accounts()
results = []
with self.executor_factory(max_workers=1) as w:
futures = []
for rset in chunks(resources, 50):
futures.append(w.submit(self.process_resource_set, client, accounts, rset))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Error checking log groups cross-account %s", f.exception()
)
continue
results.extend(f.result())
return results
|
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client("logs")
accounts = self.get_accounts()
results = []
with self.executor_factory(max_workers=2) as w:
futures = []
for rset in chunks(resources, 50):
futures.append(w.submit(self.process_resource_set, client, accounts, rset))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Error checking log groups cross-account %s", f.exception()
)
continue
results.extend(f.result())
return results
|
https://github.com/cloud-custodian/cloud-custodian/issues/2939
|
type object 'resource_type' has no attribute 'filter_type': AttributeError
Traceback (most recent call last):
File "/var/task/custodian_policy.py", line 4, in run
return handler.dispatch_event(event, context)
File "/var/task/c7n/handler.py", line 91, in dispatch_event
p.push(event, context)
File "/var/task/c7n/policy.py", line 776, in push
return mode.run(event, lambda_ctx)
File "/var/task/c7n/policy.py", line 441, in run
resources = self.resolve_resources(event)
File "/var/task/c7n/policy.py", line 418, in resolve_resources
resources = self.policy.resource_manager.get_resources(resource_ids)
File "/var/task/c7n/query.py", line 441, in get_resources
resources = self.source.get_resources(ids)
File "/var/task/c7n/query.py", line 212, in get_resources
return self.query.get(self.manager, ids)
File "/var/task/c7n/query.py", line 93, in get
if m.filter_type == 'list':
AttributeError: type object 'resource_type' has no attribute 'filter_type'
|
AttributeError
|
def get_snapshots(self, ec2, snap_ids):
"""get snapshots corresponding to id, but tolerant of invalid id's."""
while snap_ids:
try:
result = ec2.describe_snapshots(SnapshotIds=snap_ids)
except ClientError as e:
bad_snap = NotEncryptedFilter.get_bad_snapshot(e)
if bad_snap:
snap_ids.remove(bad_snap)
continue
raise
else:
return result.get("Snapshots", ())
return ()
|
def get_snapshots(self, ec2, snap_ids):
"""get snapshots corresponding to id, but tolerant of invalid id's."""
while True:
try:
result = ec2.describe_snapshots(SnapshotIds=snap_ids)
except ClientError as e:
bad_snap = NotEncryptedFilter.get_bad_snapshot(e)
if bad_snap:
snap_ids.remove(bad_snap)
continue
raise
else:
return result.get("Snapshots", ())
|
https://github.com/cloud-custodian/cloud-custodian/issues/2467
|
'snap-xxxxxxxx': KeyError
Traceback (most recent call last):
File "/var/task/custodian_policy.py", line 4, in run
return handler.dispatch_event(event, context)
File "/var/task/c7n/handler.py", line 89, in dispatch_event
p.push(event, context)
File "/var/task/c7n/policy.py", line 739, in push
return mode.run(event, lambda_ctx)
File "/var/task/c7n/policy.py", line 413, in run
resources, event)
File "/var/task/c7n/manager.py", line 90, in filter_resources
resources = f.process(resources, event)
File "/var/task/c7n/resources/asg.py", line 433, in process
self.initialize(asgs)
File "/var/task/c7n/resources/asg.py", line 457, in initialize
self.unencrypted_configs = self.get_unencrypted_configs(ec2)
File "/var/task/c7n/resources/asg.py", line 526, in get_unencrypted_configs
unencrypted_configs.update(snaps[s['SnapshotId']])
KeyError: 'snap-xxxxxxxx'
|
KeyError
|
def main():
parser = setup_parser()
argcomplete.autocomplete(parser)
options = parser.parse_args()
if options.subparser is None:
parser.print_help(file=sys.stderr)
return sys.exit(2)
_setup_logger(options)
# Support the deprecated -c option
if getattr(options, "config", None) is not None:
options.configs.append(options.config)
config = Config.empty(**vars(options))
try:
command = options.command
if not callable(command):
command = getattr(
importlib.import_module(command.rsplit(".", 1)[0]),
command.rsplit(".", 1)[-1],
)
# Set the process name to something cleaner
process_name = [os.path.basename(sys.argv[0])]
process_name.extend(sys.argv[1:])
setproctitle(" ".join(process_name))
command(config)
except Exception:
if not options.debug:
raise
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[-1])
|
def main():
parser = setup_parser()
argcomplete.autocomplete(parser)
options = parser.parse_args()
_setup_logger(options)
# Support the deprecated -c option
if getattr(options, "config", None) is not None:
options.configs.append(options.config)
config = Config.empty(**vars(options))
try:
command = options.command
if not callable(command):
command = getattr(
importlib.import_module(command.rsplit(".", 1)[0]),
command.rsplit(".", 1)[-1],
)
# Set the process name to something cleaner
process_name = [os.path.basename(sys.argv[0])]
process_name.extend(sys.argv[1:])
setproctitle(" ".join(process_name))
command(config)
except Exception:
if not options.debug:
raise
traceback.print_exc()
pdb.post_mortem(sys.exc_info()[-1])
|
https://github.com/cloud-custodian/cloud-custodian/issues/2332
|
$ custodian
Traceback (most recent call last):
File "/Users/kapil/projects/cloud-custodian/bin/custodian", line 11, in <module>
load_entry_point('c7n', 'console_scripts', 'custodian')()
File "/Users/kapil/projects/cloud-custodian/c7n/cli.py", line 335, in main
_setup_logger(options)
File "/Users/kapil/projects/cloud-custodian/c7n/cli.py", line 301, in _setup_logger
level = 3 + (options.verbose or 0) - (options.quiet or 0)
AttributeError: 'Namespace' object has no attribute 'verbose'
|
AttributeError
|
def add_directory(self, path):
"""Add ``*.py`` files under the directory ``path`` to the archive."""
for root, dirs, files in os.walk(path):
arc_prefix = os.path.relpath(root, os.path.dirname(path))
for f in files:
if f.endswith(".pyc") or f.endswith(".c"):
continue
f_path = os.path.join(root, f)
dest_path = os.path.join(arc_prefix, f)
self.add_file(f_path, dest_path)
|
def add_directory(self, path):
"""Add ``*.py`` files under the directory ``path`` to the archive."""
for root, dirs, files in os.walk(path):
arc_prefix = os.path.relpath(root, os.path.dirname(path))
for f in files:
if not f.endswith(".py"):
continue
f_path = os.path.join(root, f)
dest_path = os.path.join(arc_prefix, f)
self.add_file(f_path, dest_path)
|
https://github.com/cloud-custodian/cloud-custodian/issues/2332
|
$ custodian
Traceback (most recent call last):
File "/Users/kapil/projects/cloud-custodian/bin/custodian", line 11, in <module>
load_entry_point('c7n', 'console_scripts', 'custodian')()
File "/Users/kapil/projects/cloud-custodian/c7n/cli.py", line 335, in main
_setup_logger(options)
File "/Users/kapil/projects/cloud-custodian/c7n/cli.py", line 301, in _setup_logger
level = 3 + (options.verbose or 0) - (options.quiet or 0)
AttributeError: 'Namespace' object has no attribute 'verbose'
|
AttributeError
|
def __init__(self, ctx):
super(DirectoryOutput, self).__init__(ctx)
if self.root_dir.startswith("file://"):
self.root_dir = self.root_dir[len("file://") :]
if self.ctx.output_path is not None:
if not os.path.exists(self.root_dir):
os.makedirs(self.root_dir)
|
def __init__(self, ctx):
super(DirectoryOutput, self).__init__(ctx)
if self.root_dir.startswith("file://"):
self.root_dir = self.root_dir[len("file://") :]
if self.ctx.output_path is not None:
if not os.path.exists(self.ctx.output_path):
os.makedirs(self.ctx.output_path)
|
https://github.com/cloud-custodian/cloud-custodian/issues/2332
|
$ custodian
Traceback (most recent call last):
File "/Users/kapil/projects/cloud-custodian/bin/custodian", line 11, in <module>
load_entry_point('c7n', 'console_scripts', 'custodian')()
File "/Users/kapil/projects/cloud-custodian/c7n/cli.py", line 335, in main
_setup_logger(options)
File "/Users/kapil/projects/cloud-custodian/c7n/cli.py", line 301, in _setup_logger
level = 3 + (options.verbose or 0) - (options.quiet or 0)
AttributeError: 'Namespace' object has no attribute 'verbose'
|
AttributeError
|
def get_archive(config):
archive = PythonPackageArchive(
"c7n_mailer",
# core deps
"jinja2",
"markupsafe",
"ruamel",
"ldap3",
"pyasn1",
"redis",
# transport datadog - recursive deps
"datadog",
"simplejson",
"decorator",
# transport slack - recursive deps
"slackclient",
"websocket",
# requests (recursive deps), needed by datadog and slackclient
"requests",
"urllib3",
"idna",
"chardet",
"certifi",
)
template_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "msg-templates")
)
for t in os.listdir(template_dir):
with open(os.path.join(template_dir, t)) as fh:
archive.add_contents("msg-templates/%s" % t, fh.read())
archive.add_contents("config.json", json.dumps(config))
archive.add_contents("periodic.py", entry_source)
archive.close()
return archive
|
def get_archive(config):
archive = PythonPackageArchive(
"c7n_mailer",
"ldap3",
"pyasn1",
"jinja2",
"markupsafe",
"ruamel",
"redis",
"datadog",
"slackclient",
"requests",
)
template_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "msg-templates")
)
for t in os.listdir(template_dir):
with open(os.path.join(template_dir, t)) as fh:
archive.add_contents("msg-templates/%s" % t, fh.read())
archive.add_contents("config.json", json.dumps(config))
archive.add_contents("periodic.py", entry_source)
archive.close()
return archive
|
https://github.com/cloud-custodian/cloud-custodian/issues/2332
|
$ custodian
Traceback (most recent call last):
File "/Users/kapil/projects/cloud-custodian/bin/custodian", line 11, in <module>
load_entry_point('c7n', 'console_scripts', 'custodian')()
File "/Users/kapil/projects/cloud-custodian/c7n/cli.py", line 335, in main
_setup_logger(options)
File "/Users/kapil/projects/cloud-custodian/c7n/cli.py", line 301, in _setup_logger
level = 3 + (options.verbose or 0) - (options.quiet or 0)
AttributeError: 'Namespace' object has no attribute 'verbose'
|
AttributeError
|
def account_tags(account):
tags = {"AccountName": account["name"], "AccountId": account["account_id"]}
for t in account.get("tags", ()):
if ":" not in t:
continue
k, v = t.split(":", 1)
k = "Account%s" % k.capitalize()
tags[k] = v
return tags
|
def account_tags(account):
tags = {"AccountName": account["name"], "AccountId": account["account_id"]}
for t in account.get("tags"):
if not ":" in t:
continue
k, v = t.split(":", 1)
k = "Account%s" % k.capitalize()
tags[k] = v
return tags
|
https://github.com/cloud-custodian/cloud-custodian/issues/1828
|
c7n-org run --debug -c config/accounts.yml -a tscloud -u policies/tscloud/tag-compliance.yml -s /tmp/c7n --region us-west-2
Traceback (most recent call last):
File "/home/cbouscal/.local/bin/c7n-org", line 11, in <module>
sys.exit(cli())
File "/usr/lib/python2.7/site-packages/click/core.py", line 716, in __call__
return self.main(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/click/core.py", line 696, in main
rv = self.invoke(ctx)
File "/usr/lib/python2.7/site-packages/click/core.py", line 1060, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/lib/python2.7/site-packages/click/core.py", line 889, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/lib/python2.7/site-packages/click/core.py", line 534, in invoke
return callback(*args, **kwargs)
File "/home/cbouscal/.local/lib/python2.7/site-packages/c7n_org/cli.py", line 387, in run
debug)] = (a, r)
File "/usr/lib/python2.7/site-packages/c7n/executor.py", line 63, in submit
return MainThreadFuture(func(*args, **kw))
File "/home/cbouscal/.local/lib/python2.7/site-packages/c7n_org/cli.py", line 334, in run_account
with environ(**account_tags(account)):
File "/home/cbouscal/.local/lib/python2.7/site-packages/c7n_org/utils.py", line 6, in account_tags
for t in account.get('tags'):
TypeError: 'NoneType' object is not iterable
|
TypeError
|
def handle_BucketLifecycleConfiguration(self, resource, item_value):
rules = []
for r in item_value.get("rules"):
rr = {}
rules.append(rr)
expiry = {}
for ek, ck in (
("Date", "expirationDate"),
("ExpiredObjectDeleteMarker", "expiredObjectDeleteMarker"),
("Days", "expirationInDays"),
):
if r[ck] and r[ck] != -1:
expiry[ek] = r[ck]
if expiry:
rr["Expiration"] = expiry
transitions = []
for t in r.get("transitions") or ():
tr = {}
for k in ("date", "days", "storageClass"):
if t[k]:
tr["%s%s" % (k[0].upper(), k[1:])] = t[k]
transitions.append(tr)
if transitions:
rr["Transitions"] = transitions
if r.get("abortIncompleteMultipartUpload"):
rr["AbortIncompleteMultipartUpload"] = {
"DaysAfterInitiation": r["abortIncompleteMultipartUpload"][
"daysAfterInitiation"
]
}
if r.get("noncurrentVersionExpirationInDays"):
rr["NoncurrentVersionExpiration"] = {
"NoncurrentDays": r["noncurrentVersionExpirationInDays"]
}
nonc_transitions = []
for t in r.get("noncurrentVersionTransitions") or ():
nonc_transitions.append(
{"NoncurrentDays": t["days"], "StorageClass": t["storageClass"]}
)
if nonc_transitions:
rr["NoncurrentVersionTransitions"] = nonc_transitions
rr["Status"] = r["status"]
rr["ID"] = r["id"]
if r.get("prefix"):
rr["Prefix"] = r["prefix"]
if "filter" not in r or not r["filter"]:
continue
if r["filter"]["predicate"]:
rr["Filter"] = self.convertLifePredicate(r["filter"]["predicate"])
resource["Lifecycle"] = {"Rules": rules}
|
def handle_BucketLifecycleConfiguration(self, resource, item_value):
rules = []
for r in item_value.get("rules"):
rr = {}
rules.append(rr)
expiry = {}
for ek, ck in (
("Date", "expirationDate"),
("ExpiredObjectDeleteMarker", "expiredObjectDeleteMarker"),
("Days", "expirationInDays"),
):
if r[ck] and r[ck] != -1:
expiry[ek] = r[ck]
if expiry:
rr["Expiration"] = expiry
transitions = []
for t in r.get("transitions") or ():
tr = {}
for k in ("date", "days", "storageClass"):
if t[k]:
tr["%s%s" % (k[0].upper(), k[1:])] = t[k]
transitions.append(tr)
if transitions:
rr["Transitions"] = transitions
if r.get("abortIncompleteMultipartUpload"):
rr["AbortIncompleteMultipartUpload"] = {
"DaysAfterInitiation": r["abortIncompleteMultipartUpload"][
"daysAfterInitiation"
]
}
if r.get("noncurrentVersionExpirationInDays"):
rr["NoncurrentVersionExpiration"] = {
"NoncurrentDays": r["noncurrentVersionExpirationInDays"]
}
nonc_transitions = []
for t in r.get("noncurrentVersionTransitions") or ():
nonc_transitions.append(
{"NoncurrentDays": t["days"], "StorageClass": t["storageClass"]}
)
if nonc_transitions:
rr["NoncurrentVersionTransitions"] = nonc_transitions
rr["Status"] = r["status"]
rr["ID"] = r["id"]
if r.get("prefix"):
rr["Prefix"] = r["prefix"]
if "filter" not in r or not r["filter"]:
continue
rr["Filter"] = self.convertLifePredicate(r["filter"]["predicate"])
resource["Lifecycle"] = {"Rules": rules}
|
https://github.com/cloud-custodian/cloud-custodian/issues/1691
|
2017-10-22 11:25:17,078: policyrun:DEBUG Running policy:blahblah account:acct1 region:us-east-1
2017-10-22 11:25:44,412: custodian.resources.s3:ERROR Exception getting resources from config
u'redirectsAllRequestsTo'
2017-10-22 11:25:44,414: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/usr/local/custodian/site-packages/c7n/policy.py", line 305, in run
resources = self.policy.resource_manager.resources()
File "/usr/local/custodian/site-packages/c7n/query.py", line 395, in resources
resources = self.augment(self.source.resources(query))
File "/usr/local/custodian/site-packages/c7n/query.py", line 305, in resources
results.extend(f.result())
File "/usr/local/custodian/site-packages/concurrent/futures/_base.py", line 422, in result
return self.__get_result()
File "/usr/local/custodian/site-packages/concurrent/futures/thread.py", line 62, in run
result = self.fn(*self.args, **self.kwargs)
File "/usr/local/custodian/site-packages/c7n/query.py", line 271, in get_resources
results.append(self.load_resource(revisions[0]))
File "/usr/local/custodian/site-packages/c7n/resources/s3.py", line 152, in load_resource
method(resource, v)
File "/usr/local/custodian/site-packages/c7n/resources/s3.py", line 343, in handle_BucketWebsiteConfiguration
'HostName': item_value['redirectsAllRequestsTo']['hostName'],
KeyError: u'redirectsAllRequestsTo'
|
KeyError
|
def handle_BucketWebsiteConfiguration(self, resource, item_value):
website = {}
if item_value["indexDocumentSuffix"]:
website["IndexDocument"] = {"Suffix": item_value["indexDocumentSuffix"]}
if item_value["errorDocument"]:
website["ErrorDocument"] = {"Key": item_value["errorDocument"]}
if item_value["redirectAllRequestsTo"]:
website["RedirectAllRequestsTo"] = {
"HostName": item_value["redirectAllRequestsTo"]["hostName"],
"Protocol": item_value["redirectAllRequestsTo"]["protocol"],
}
for r in item_value["routingRules"]:
redirect = {}
rule = {"Redirect": redirect}
website.setdefault("RoutingRules", []).append(rule)
if "condition" in r:
cond = {}
for ck, rk in (
("keyPrefixEquals", "KeyPrefixEquals"),
("httpErrorCodeReturnedEquals", "HttpErrorCodeReturnedEquals"),
):
if r["condition"][ck]:
cond[rk] = r["condition"][ck]
rule["Condition"] = cond
for ck, rk in (
("protocol", "Protocol"),
("hostName", "HostName"),
("replaceKeyPrefixWith", "ReplaceKeyPrefixWith"),
("replaceKeyWith", "ReplaceKeyWith"),
("httpRedirectCode", "HttpRedirectCode"),
):
if r["redirect"][ck]:
redirect[rk] = r["redirect"][ck]
resource["Website"] = website
|
def handle_BucketWebsiteConfiguration(self, resource, item_value):
website = {}
if item_value["indexDocumentSuffix"]:
website["IndexDocument"] = {"Suffix": item_value["indexDocumentSuffix"]}
if item_value["errorDocument"]:
website["ErrorDocument"] = {"Key": item_value["errorDocument"]}
if item_value["redirectAllRequestsTo"]:
website["RedirectAllRequestsTo"] = {
"HostName": item_value["redirectsAllRequestsTo"]["hostName"],
"Protocol": item_value["redirectsAllRequestsTo"]["protocol"],
}
for r in item_value["routingRules"]:
redirect = {}
rule = {"Redirect": redirect}
website.setdefault("RoutingRules", []).append(rule)
if "condition" in r:
cond = {}
for ck, rk in (
("keyPrefixEquals", "KeyPrefixEquals"),
("httpErrorCodeReturnedEquals", "HttpErrorCodeReturnedEquals"),
):
if r["condition"][ck]:
cond[rk] = r["condition"][ck]
rule["Condition"] = cond
for ck, rk in (
("protocol", "Protocol"),
("hostName", "HostName"),
("replaceKeyPrefixWith", "ReplaceKeyPrefixWith"),
("replaceKeyWith", "ReplaceKeyWith"),
("httpRedirectCode", "HttpRedirectCode"),
):
if r["redirect"][ck]:
redirect[rk] = r["redirect"][ck]
resource["Website"] = website
|
https://github.com/cloud-custodian/cloud-custodian/issues/1691
|
2017-10-22 11:25:17,078: policyrun:DEBUG Running policy:blahblah account:acct1 region:us-east-1
2017-10-22 11:25:44,412: custodian.resources.s3:ERROR Exception getting resources from config
u'redirectsAllRequestsTo'
2017-10-22 11:25:44,414: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/usr/local/custodian/site-packages/c7n/policy.py", line 305, in run
resources = self.policy.resource_manager.resources()
File "/usr/local/custodian/site-packages/c7n/query.py", line 395, in resources
resources = self.augment(self.source.resources(query))
File "/usr/local/custodian/site-packages/c7n/query.py", line 305, in resources
results.extend(f.result())
File "/usr/local/custodian/site-packages/concurrent/futures/_base.py", line 422, in result
return self.__get_result()
File "/usr/local/custodian/site-packages/concurrent/futures/thread.py", line 62, in run
result = self.fn(*self.args, **self.kwargs)
File "/usr/local/custodian/site-packages/c7n/query.py", line 271, in get_resources
results.append(self.load_resource(revisions[0]))
File "/usr/local/custodian/site-packages/c7n/resources/s3.py", line 152, in load_resource
method(resource, v)
File "/usr/local/custodian/site-packages/c7n/resources/s3.py", line 343, in handle_BucketWebsiteConfiguration
'HostName': item_value['redirectsAllRequestsTo']['hostName'],
KeyError: u'redirectsAllRequestsTo'
|
KeyError
|
def camelResource(obj):
"""Some sources from apis return lowerCased where as describe calls
always return TitleCase, this function turns the former to the later
"""
if not isinstance(obj, dict):
return obj
for k in list(obj.keys()):
v = obj.pop(k)
obj["%s%s" % (k[0].upper(), k[1:])] = v
if isinstance(v, dict):
camelResource(v)
elif isinstance(v, list):
list(map(camelResource, v))
return obj
|
def camelResource(obj):
"""Some sources from apis return lowerCased where as describe calls
always return TitleCase, this function turns the former to the later
"""
if not isinstance(obj, dict):
return obj
for k in list(obj.keys()):
v = obj.pop(k)
obj["%s%s" % (k[0].upper(), k[1:])] = v
if isinstance(v, dict):
camelResource(v)
elif isinstance(v, list):
map(camelResource, v)
return obj
|
https://github.com/cloud-custodian/cloud-custodian/issues/1221
|
argument of type 'NoneType' is not iterable: TypeError
Traceback (most recent call last):
File "/var/task/custodian_policy.py", line 4, in run
return handler.dispatch_event(event, context)
File "/var/task/c7n/handler.py", line 86, in dispatch_event
p.push(event, context)
File "/var/task/c7n/policy.py", line 579, in push
return mode.run(event, lambda_ctx)
File "/var/task/c7n/policy.py", line 489, in run
resources = super(ConfigRuleMode, self).run(event, lambda_context)
File "/var/task/c7n/policy.py", line 387, in run
resources, event)
File "/var/task/c7n/manager.py", line 86, in filter_resources
resources = f.process(resources, event)
File "/var/task/c7n/resources/vpc.py", line 692, in process
return super(SGPermission, self).process(resources, event)
File "/var/task/c7n/filters/core.py", line 161, in process
return filter(self, resources)
File "/var/task/c7n/resources/vpc.py", line 775, in __call__
cidr_found = self.process_cidrs(perm)
File "/var/task/c7n/resources/vpc.py", line 718, in process_cidrs
found = vf(ip_range)
File "/var/task/c7n/filters/core.py", line 330, in __call__
matched = self.match(i)
File "/var/task/c7n/filters/core.py", line 386, in match
v, r = self.process_value_type(self.v, r)
File "/var/task/c7n/filters/core.py", line 445, in process_value_type
v = parse_cidr(value)
File "/var/task/c7n/utils.py", line 325, in parse_cidr
if '/' not in value:
TypeError: argument of type 'NoneType' is not iterable
|
TypeError
|
def get(self, resource_type, identities):
"""Get resources by identities"""
m = self.resolve(resource_type)
params = {}
client_filter = False
# Try to formulate server side query
if m.filter_name:
if m.filter_type == "list":
params[m.filter_name] = identities
elif m.filter_type == "scalar":
assert len(identities) == 1, "Scalar server side filter"
params[m.filter_name] = identities[0]
else:
client_filter = True
resources = self.filter(resource_type, **params)
if client_filter:
# This logic was added to prevent the issue from:
# https://github.com/capitalone/cloud-custodian/issues/1398
if all(map(lambda r: isinstance(r, six.string_types), resources)):
resources = [r for r in resources if r in identities]
else:
resources = [r for r in resources if r[m.id] in identities]
return resources
|
def get(self, resource_type, identities):
"""Get resources by identities"""
m = self.resolve(resource_type)
params = {}
client_filter = False
# Try to formulate server side query
if m.filter_name:
if m.filter_type == "list":
params[m.filter_name] = identities
elif m.filter_type == "scalar":
assert len(identities) == 1, "Scalar server side filter"
params[m.filter_name] = identities[0]
else:
client_filter = True
resources = self.filter(resource_type, **params)
if client_filter:
resources = [r for r in resources if r[m.id] in identities]
return resources
|
https://github.com/cloud-custodian/cloud-custodian/issues/1398
|
string indices must be integers: TypeError
Traceback (most recent call last):
File "/var/task/custodian_policy.py", line 4, in run
return handler.dispatch_event(event, context)
File "/var/task/c7n/handler.py", line 109, in dispatch_event
p.push(event, context)
File "/var/task/c7n/policy.py", line 650, in push
return mode.run(event, lambda_ctx)
File "/var/task/c7n/policy.py", line 428, in run
resources = self.resolve_resources(event)
File "/var/task/c7n/policy.py", line 412, in resolve_resources
resources = self.policy.resource_manager.get_resources(resource_ids)
File "/var/task/c7n/query.py", line 308, in get_resources
resources = self.augment(self.source.get_resources(ids))
File "/var/task/c7n/query.py", line 147, in get_resources
return self.query.get(self.manager.resource_type, ids)
File "/var/task/c7n/query.py", line 94, in get
resources = [r for r in resources if r[m.id] in identities]
TypeError: string indices must be integers
|
TypeError
|
def get_appelb_target_groups(self):
manager = self.manager.get_resource_manager("app-elb-target-group")
return set([a["TargetGroupArn"] for a in manager.resources()])
|
def get_appelb_target_groups(self):
manager = self.manager.get_resource_manager("app-elb")
return set([a["TargetGroupArn"] for a in manager.resources()])
|
https://github.com/cloud-custodian/cloud-custodian/issues/825
|
2017-01-06 20:10:44,873: custodian.resources.keypair:INFO Filtered from 110 to 110 keypair
2017-01-06 20:10:46,097: custodian.resources.elb:INFO Filtered from 2 to 2 elb
2017-01-06 20:10:47,047: custodian.resources.appelb:INFO Filtered from 1 to 1 appelb
2017-01-06 20:10:47,048: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/src/c7n/policy.py", line 174, in run
resources = self.policy.resource_manager.resources()
File "/src/c7n/query.py", line 171, in resources
return self.filter_resources(resources)
File "/src/c7n/manager.py", line 73, in filter_resources
resources = f.process(resources, event)
File "/src/c7n/resources/asg.py", line 236, in process
self.initialize(asgs)
File "/src/c7n/resources/asg.py", line 188, in initialize
self.appelb_target_groups = self.get_appelb_target_groups()
File "/src/c7n/resources/asg.py", line 210, in get_appelb_target_groups
return set([a['TargetGroupArn'] for a in manager.resources()])
KeyError: 'TargetGroupArn'
|
KeyError
|
def check_cross_account(policy_text, allowed_accounts):
"""Find cross account access policy grant not explicitly allowed"""
if isinstance(policy_text, basestring):
policy = json.loads(policy_text)
else:
policy = policy_text
violations = []
for s in policy["Statement"]:
principal_ok = True
if s["Effect"] != "Allow":
continue
# Highly suspect in an allow
if "NotPrincipal" in s:
violations.append(s)
continue
# Does this wildcard
if "Principal" not in s:
violations.append(s)
continue
# Skip relays for events to sns
if "Service" in s["Principal"]:
s["Principal"].pop("Service")
if not s["Principal"]:
continue
assert len(s["Principal"]) == 1, "Too many principals %s" % s
# At this point principal is required?
p = (
isinstance(s["Principal"], basestring)
and s["Principal"]
or s["Principal"]["AWS"]
)
p = isinstance(p, basestring) and (p,) or p
for pid in p:
if pid == "*":
principal_ok = False
elif pid.startswith("arn:aws:iam::cloudfront:user"):
continue
else:
account_id = _account(pid)
if account_id not in allowed_accounts:
principal_ok = False
if principal_ok:
continue
if "Condition" not in s:
violations.append(s)
continue
if "StringEquals" in s["Condition"]:
# Default SNS Policy does this
if "AWS:SourceOwner" in s["Condition"]["StringEquals"]:
so = s["Condition"]["StringEquals"]["AWS:SourceOwner"]
if not isinstance(so, list):
so = [so]
so = [pso for pso in so if pso not in allowed_accounts]
if not so:
principal_ok = True
# Default keys in kms do this
if "kms:CallerAccount" in s["Condition"]["StringEquals"]:
so = s["Condition"]["StringEquals"]["kms:CallerAccount"]
if so in allowed_accounts:
principal_ok = True
## BEGIN S3 WhiteList
## Note these are transient white lists for s3
## we need to refactor this to verify ip against a
## cidr white list, and verify vpce/vpc against the
## accounts.
# For now allow vpce/vpc conditions as sufficient on s3
if s["Condition"]["StringEquals"].keys()[0] in (
"aws:sourceVpce",
"aws:sourceVpce",
):
principal_ok = True
if "StringLike" in s["Condition"]:
# For now allow vpce/vpc conditions as sufficient on s3
if s["Condition"]["StringLike"].keys()[0].lower() == "aws:sourcevpce":
principal_ok = True
if "ForAnyValue:StringLike" in s["Condition"]:
if (
s["Condition"]["ForAnyValue:StringLike"].keys()[0].lower()
== "aws:sourcevpce"
):
principal_ok = True
if "IpAddress" in s["Condition"]:
principal_ok = True
## END S3 WhiteList
if "ArnEquals" in s["Condition"]:
# Other valid arn equals? / are invalids allowed?
# duplicate block from below, inline closure func
# would remove, but slower, else move to class eval
principal_ok = True
keys = ("aws:SourceArn", "AWS:SourceArn")
for k in keys:
if k in s["Condition"]["ArnEquals"]:
v = s["Condition"]["ArnEquals"][k]
if v is None:
violations.append(s)
else:
v = isinstance(v, basestring) and (v,) or v
for arn in v:
aid = _account(arn)
if aid not in allowed_accounts:
violations.append(s)
if "ArnLike" in s["Condition"]:
# Other valid arn equals? / are invalids allowed?
for k in ("aws:SourceArn", "AWS:SourceArn"):
v = s["Condition"]["ArnLike"].get(k)
if v:
break
v = isinstance(v, basestring) and (v,) or v
principal_ok = True
for arn in v:
aid = _account(arn)
if aid not in allowed_accounts:
violations.append(s)
if not principal_ok:
violations.append(s)
return violations
|
def check_cross_account(policy_text, allowed_accounts):
"""Find cross account access policy grant not explicitly allowed"""
if isinstance(policy_text, basestring):
policy = json.loads(policy_text)
else:
policy = policy_text
violations = []
for s in policy["Statement"]:
principal_ok = True
if s["Effect"] != "Allow":
continue
# Highly suspect in an allow
if "NotPrincipal" in s:
violations.append(s)
continue
# Does this wildcard
if "Principal" not in s:
violations.append(s)
continue
# Skip relays for events to sns
if "Service" in s["Principal"]:
s["Principal"].pop("Service")
if not s["Principal"]:
continue
assert len(s["Principal"]) == 1, "Too many principals %s" % s
# At this point principal is required?
p = (
isinstance(s["Principal"], basestring)
and s["Principal"]
or s["Principal"]["AWS"]
)
p = isinstance(p, basestring) and (p,) or p
for pid in p:
if pid == "*":
principal_ok = False
elif pid.startswith("arn:aws:iam::cloudfront:user"):
continue
else:
account_id = _account(pid)
if account_id not in allowed_accounts:
principal_ok = False
if principal_ok:
continue
if "Condition" not in s:
violations.append(s)
continue
if "StringEquals" in s["Condition"]:
# Default SNS Policy does this
if "AWS:SourceOwner" in s["Condition"]["StringEquals"]:
so = s["Condition"]["StringEquals"]["AWS:SourceOwner"]
if not isinstance(so, list):
so = [so]
so = [pso for pso in so if pso not in allowed_accounts]
if not so:
principal_ok = True
# Default keys in kms do this
if "kms:CallerAccount" in s["Condition"]["StringEquals"]:
so = s["Condition"]["StringEquals"]["kms:CallerAccount"]
if so in allowed_accounts:
principal_ok = True
## BEGIN S3 WhiteList
## Note these are transient white lists for s3
## we need to refactor this to verify ip against a
## cidr white list, and verify vpce/vpc against the
## accounts.
# For now allow vpce/vpc conditions as sufficient on s3
if s["Condition"]["StringEquals"].keys()[0] in (
"aws:sourceVpce",
"aws:sourceVpce",
):
principal_ok = True
if "StringLike" in s["Condition"]:
# For now allow vpce/vpc conditions as sufficient on s3
if s["Condition"]["StringLike"].keys()[0].lower() == "aws:sourcevpce":
principal_ok = True
if "ForAnyValue:StringLike" in s["Condition"]:
if (
s["Condition"]["ForAnyValue:StringLike"].keys()[0].lower()
== "aws:sourcevpce"
):
principal_ok = True
if "IpAddress" in s["Condition"]:
principal_ok = True
## END S3 WhiteList
if "ArnEquals" in s["Condition"]:
# Other valid arn equals? / are invalids allowed?
# duplicate block from below, inline closure func
# would remove, but slower, else move to class eval
principal_ok = True
keys = ("aws:SourceArn", "AWS:SourceArn")
for k in keys:
if k in s["Condition"]["ArnEquals"]:
v = s["Condition"]["ArnEquals"][k]
if v is None:
violations.append(s)
else:
v = isinstance(v, basestring) and (v,) or v
for arn in v:
aid = _account(arn)
if aid not in allowed_accounts:
violations.append(s)
if "ArnLike" in s["Condition"]:
# Other valid arn equals? / are invalids allowed?
v = s["Condition"]["ArnLike"]["aws:SourceArn"]
v = isinstance(v, basestring) and (v,) or v
principal_ok = True
for arn in v:
aid = _account(arn)
if aid not in allowed_accounts:
violations.append(s)
if not principal_ok:
violations.append(s)
return violations
|
https://github.com/cloud-custodian/cloud-custodian/issues/805
|
2016-12-26 00:27:01,755 - ERROR - custodian.output - Error while executing policy
Traceback (most recent call last):
File "/usr/local/custodian/local/lib/python2.7/site-packages/c7n/policy.py", line 167, in run
resources = self.policy.resource_manager.resources()
File "/usr/local/custodian/local/lib/python2.7/site-packages/c7n/query.py", line 164, in resources
return self.filter_resources(resources)
File "/usr/local/custodian/local/lib/python2.7/site-packages/c7n/manager.py", line 63, in filter_resources
resources = f.process(resources, event)
File "/usr/local/custodian/local/lib/python2.7/site-packages/c7n/filters/iamaccess.py", line 54, in process
return super(CrossAccountAccessFilter, self).process(resources, event)
File "/usr/local/custodian/local/lib/python2.7/site-packages/c7n/filters/core.py", line 154, in process
return filter(self, resources)
File "/usr/local/custodian/local/lib/python2.7/site-packages/c7n/filters/iamaccess.py", line 72, in __call__
violations = check_cross_account(p, self.accounts)
File "/usr/local/custodian/local/lib/python2.7/site-packages/c7n/filters/iamaccess.py", line 205, in check_cross_account
v = s['Condition']['ArnLike']['aws:SourceArn']
KeyError: 'aws:SourceArn'
|
KeyError
|
def process(self, resources, event=None):
days = self.data.get("days", 14)
duration = timedelta(days)
self.metric = self.data["name"]
self.end = datetime.utcnow()
self.start = self.end - duration
self.period = int(self.data.get("period", duration.total_seconds()))
self.statistics = self.data.get("statistics", "Average")
self.model = self.manager.get_model()
self.op = OPERATORS[self.data.get("op", "less-than")]
self.value = self.data["value"]
ns = self.data.get("namespace")
if not ns:
ns = getattr(self.model, "default_namespace", None)
if not ns:
ns = self.DEFAULT_NAMESPACE[self.model.service]
self.namespace = ns
self.log.debug("Querying metrics for %d", len(resources))
matched = []
with self.executor_factory(max_workers=3) as w:
futures = []
for resource_set in chunks(resources, 50):
futures.append(w.submit(self.process_resource_set, resource_set))
for f in as_completed(futures):
if f.exception():
self.log.warning("CW Retrieval error: %s" % f.exception())
continue
matched.extend(f.result())
return matched
|
def process(self, resources, event=None):
days = self.data.get("days", 14)
duration = timedelta(days)
self.metric = self.data["name"]
self.end = datetime.utcnow()
self.start = self.end - duration
self.period = int(self.data.get("period", duration.total_seconds()))
self.statistics = self.data.get("statistics", "Average")
self.model = self.manager.query.resolve(self.manager.resource_type)
self.op = OPERATORS[self.data.get("op", "less-than")]
self.value = self.data["value"]
ns = self.data.get("namespace")
if not ns:
ns = getattr(self.model, "default_namespace", None)
if not ns:
ns = self.DEFAULT_NAMESPACE[self.model.service]
self.namespace = ns
self.log.debug("Querying metrics for %d", len(resources))
matched = []
with self.executor_factory(max_workers=3) as w:
futures = []
for resource_set in chunks(resources, 50):
futures.append(w.submit(self.process_resource_set, resource_set))
for f in as_completed(futures):
if f.exception():
self.log.warning("CW Retrieval error: %s" % f.exception())
continue
matched.extend(f.result())
return matched
|
https://github.com/cloud-custodian/cloud-custodian/issues/273
|
2016-07-06 17:38:43,679: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/development/workspace/cloud-custodian/c7n/policy.py", line 213, in poll
results = a.process(resources)
File "/development/workspace/cloud-custodian/c7n/tags.py", line 339, in process
tags, self.id_key, f.exception()))
File "/development/workspace/cloud-custodian/c7n/tags.py", line 60, in id_key
raise NotImplementedError()
NotImplementedError
|
NotImplementedError
|
def augment(self, resources):
"""EC2 API and AWOL Tags
While ec2 api generally returns tags when doing describe_x on for
various resources, it may also silently fail to do so unless a tag
is used as a filter.
See footnote on http://goo.gl/YozD9Q for official documentation.
Apriori we may be using custodian to ensure tags (including
name), so there isn't a good default to ensure that we will
always get tags from describe_ calls.
"""
# First if we're in event based lambda go ahead and skip this,
# tags can't be trusted in ec2 instances anyways.
if not resources or self.data.get("mode", {}).get("type", "") in (
"cloudtrail",
"ec2-instance-state",
):
return resources
# AWOL detector, so we don't make extraneous api calls.
resource_count = len(resources)
search_count = min(int(resource_count % 0.05) + 1, 5)
if search_count > resource_count:
search_count = resource_count
found = False
for r in random.sample(resources, search_count):
if "Tags" in r:
found = True
break
if found:
return resources
# Okay go and do the tag lookup
client = utils.local_session(self.session_factory).client("ec2")
tag_set = client.describe_tags(
Filters=[{"Name": "resource-type", "Values": ["instance"]}]
)["Tags"]
resource_tags = {}
for t in tag_set:
t.pop("ResourceType")
rid = t.pop("ResourceId")
resource_tags.setdefault(rid, []).append(t)
m = self.get_model()
for r in resources:
r["Tags"] = resource_tags.get(r[m.id], ())
return resources
|
def augment(self, resources):
"""EC2 API and AWOL Tags
While ec2 api generally returns tags when doing describe_x on for
various resources, it may also silently fail to do so unless a tag
is used as a filter.
See footnote on http://goo.gl/YozD9Q for official documentation.
Apriori we may be using custodian to ensure tags (including
name), so there isn't a good default to ensure that we will
always get tags from describe_ calls.
"""
# First if we're in event based lambda go ahead and skip this,
# tags can't be trusted in ec2 instances anyways.
if not resources or self.data.get("mode", {}).get("type", "") in (
"cloudtrail",
"ec2-instance-state",
):
return resources
# AWOL detector, so we don't make extraneous api calls.
resource_count = len(resources)
search_count = min(int(resource_count % 0.05) + 1, 5)
if search_count > resource_count:
search_count = resource_count
found = False
for r in random.sample(resources, search_count):
if "Tags" in r:
found = True
break
if found:
return resources
# Okay go and do the tag lookup
client = utils.local_session(self.session_factory).client("ec2")
tag_set = client.describe_tags(
Filters=[{"Name": "resource-type", "Values": ["instance"]}]
)["Tags"]
resource_tags = {}
for t in tag_set:
t.pop("ResourceType")
rid = t.pop("ResourceId")
resource_tags.setdefault(rid, []).append(t)
m = self.query.resolve(self.resource_type)
for r in resources:
r["Tags"] = resource_tags.get(r[m.id], ())
return resources
|
https://github.com/cloud-custodian/cloud-custodian/issues/273
|
2016-07-06 17:38:43,679: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/development/workspace/cloud-custodian/c7n/policy.py", line 213, in poll
results = a.process(resources)
File "/development/workspace/cloud-custodian/c7n/tags.py", line 339, in process
tags, self.id_key, f.exception()))
File "/development/workspace/cloud-custodian/c7n/tags.py", line 60, in id_key
raise NotImplementedError()
NotImplementedError
|
NotImplementedError
|
def augment(self, resources):
_rds_tags(
self.get_model(),
resources,
self.session_factory,
self.executor_factory,
self.arn_generator,
)
return resources
|
def augment(self, resources):
session = local_session(self.session_factory)
if self.account_id is None:
self.account_id = get_account_id(session)
_rds_tags(
self.query.resolve(self.resource_type),
resources,
self.session_factory,
self.executor_factory,
self.account_id,
region=self.config.region,
)
return resources
|
https://github.com/cloud-custodian/cloud-custodian/issues/273
|
2016-07-06 17:38:43,679: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/development/workspace/cloud-custodian/c7n/policy.py", line 213, in poll
results = a.process(resources)
File "/development/workspace/cloud-custodian/c7n/tags.py", line 339, in process
tags, self.id_key, f.exception()))
File "/development/workspace/cloud-custodian/c7n/tags.py", line 60, in id_key
raise NotImplementedError()
NotImplementedError
|
NotImplementedError
|
def _rds_tags(model, dbs, session_factory, executor_factory, arn_generator):
"""Augment rds instances with their respective tags."""
def process_tags(db):
client = local_session(session_factory).client("rds")
arn = arn_generator.generate(db[model.id])
tag_list = client.list_tags_for_resource(ResourceName=arn)["TagList"]
db["Tags"] = tag_list or []
return db
# Rds maintains a low api call limit, so this can take some time :-(
with executor_factory(max_workers=1) as w:
list(w.map(process_tags, dbs))
|
def _rds_tags(model, dbs, session_factory, executor_factory, account_id, region):
"""Augment rds instances with their respective tags."""
def process_tags(db):
client = local_session(session_factory).client("rds")
arn = "arn:aws:rds:%s:%s:db:%s" % (region, account_id, db[model.id])
tag_list = client.list_tags_for_resource(ResourceName=arn)["TagList"]
db["Tags"] = tag_list or []
return db
# Rds maintains a low api call limit, so this can take some time :-(
with executor_factory(max_workers=1) as w:
list(w.map(process_tags, dbs))
|
https://github.com/cloud-custodian/cloud-custodian/issues/273
|
2016-07-06 17:38:43,679: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/development/workspace/cloud-custodian/c7n/policy.py", line 213, in poll
results = a.process(resources)
File "/development/workspace/cloud-custodian/c7n/tags.py", line 339, in process
tags, self.id_key, f.exception()))
File "/development/workspace/cloud-custodian/c7n/tags.py", line 60, in id_key
raise NotImplementedError()
NotImplementedError
|
NotImplementedError
|
def process_tags(db):
client = local_session(session_factory).client("rds")
arn = arn_generator.generate(db[model.id])
tag_list = client.list_tags_for_resource(ResourceName=arn)["TagList"]
db["Tags"] = tag_list or []
return db
|
def process_tags(db):
client = local_session(session_factory).client("rds")
arn = "arn:aws:rds:%s:%s:db:%s" % (region, account_id, db[model.id])
tag_list = client.list_tags_for_resource(ResourceName=arn)["TagList"]
db["Tags"] = tag_list or []
return db
|
https://github.com/cloud-custodian/cloud-custodian/issues/273
|
2016-07-06 17:38:43,679: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/development/workspace/cloud-custodian/c7n/policy.py", line 213, in poll
results = a.process(resources)
File "/development/workspace/cloud-custodian/c7n/tags.py", line 339, in process
tags, self.id_key, f.exception()))
File "/development/workspace/cloud-custodian/c7n/tags.py", line 60, in id_key
raise NotImplementedError()
NotImplementedError
|
NotImplementedError
|
def process_resource_set(self, resources, tags):
client = local_session(self.manager.session_factory).client("rds")
for r in resources:
arn = self.manager.arn_generator.generate(r["DBInstanceIdentifier"])
client.add_tags_to_resource(ResourceName=arn, Tags=tags)
|
def process_resource_set(self, resources, tags):
client = local_session(self.manager.session_factory).client("rds")
for r in resources:
arn = "arn:aws:rds:%s:%s:db:%s" % (
self.manager.config.region,
self.manager.account_id,
r["DBInstanceIdentifier"],
)
client.add_tags_to_resource(ResourceName=arn, Tags=tags)
|
https://github.com/cloud-custodian/cloud-custodian/issues/273
|
2016-07-06 17:38:43,679: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/development/workspace/cloud-custodian/c7n/policy.py", line 213, in poll
results = a.process(resources)
File "/development/workspace/cloud-custodian/c7n/tags.py", line 339, in process
tags, self.id_key, f.exception()))
File "/development/workspace/cloud-custodian/c7n/tags.py", line 60, in id_key
raise NotImplementedError()
NotImplementedError
|
NotImplementedError
|
def process_resource_set(self, resources, tags):
client = local_session(self.manager.session_factory).client("rds")
for r in resources:
arn = self.manager.arn_generator.generate(r["DBInstanceIdentifier"])
client.add_tags_to_resource(ResourceName=arn, Tags=tags)
|
def process_resource_set(self, resources, tags):
client = local_session(self.manager.session_factory).client("rds")
for r in resources:
arn = "arn:aws:rds:%s:%s:db:%s" % (
self.manager.config.region,
self.manager.account_id,
r["DBInstanceIdentifier"],
)
client.add_tags_to_resource(ResourceName=arn, Tags=tags)
|
https://github.com/cloud-custodian/cloud-custodian/issues/273
|
2016-07-06 17:38:43,679: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/development/workspace/cloud-custodian/c7n/policy.py", line 213, in poll
results = a.process(resources)
File "/development/workspace/cloud-custodian/c7n/tags.py", line 339, in process
tags, self.id_key, f.exception()))
File "/development/workspace/cloud-custodian/c7n/tags.py", line 60, in id_key
raise NotImplementedError()
NotImplementedError
|
NotImplementedError
|
def process_resource_set(self, resources, tag_keys):
client = local_session(self.manager.session_factory).client("rds")
for r in resources:
arn = self.manager.arn_generator.generate(r["DBInstanceIdentifier"])
client.remove_tags_from_resource(ResourceName=arn, TagKeys=tag_keys)
|
def process_resource_set(self, resources, tag_keys):
client = local_session(self.manager.session_factory).client("rds")
for r in resources:
arn = "arn:aws:rds:%s:%s:db:%s" % (
self.manager.config.region,
self.manager.account_id,
r["DBInstanceIdentifier"],
)
client.remove_tags_from_resource(ResourceName=arn, TagKeys=tag_keys)
|
https://github.com/cloud-custodian/cloud-custodian/issues/273
|
2016-07-06 17:38:43,679: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/development/workspace/cloud-custodian/c7n/policy.py", line 213, in poll
results = a.process(resources)
File "/development/workspace/cloud-custodian/c7n/tags.py", line 339, in process
tags, self.id_key, f.exception()))
File "/development/workspace/cloud-custodian/c7n/tags.py", line 60, in id_key
raise NotImplementedError()
NotImplementedError
|
NotImplementedError
|
def register_tags(filters, actions):
filters.register("marked-for-op", TagActionFilter)
filters.register("tag-count", TagCountFilter)
actions.register("mark-for-op", TagDelayedAction)
actions.register("tag-trim", TagTrim)
actions.register("mark", Tag)
actions.register("tag", Tag)
actions.register("unmark", RemoveTag)
actions.register("untag", RemoveTag)
actions.register("remove-tag", RemoveTag)
|
def register_tags(filters, actions, id_key):
filters.register("marked-for-op", TagActionFilter)
filters.register("tag-count", TagCountFilter)
actions.register("mark-for-op", TagDelayedAction.set_id(id_key))
actions.register("tag-trim", TagTrim.set_id(id_key))
tag = Tag.set_id(id_key)
actions.register("mark", tag)
actions.register("tag", tag)
remove_tag = RemoveTag.set_id(id_key)
actions.register("unmark", remove_tag)
actions.register("untag", remove_tag)
actions.register("remove-tag", remove_tag)
|
https://github.com/cloud-custodian/cloud-custodian/issues/273
|
2016-07-06 17:38:43,679: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/development/workspace/cloud-custodian/c7n/policy.py", line 213, in poll
results = a.process(resources)
File "/development/workspace/cloud-custodian/c7n/tags.py", line 339, in process
tags, self.id_key, f.exception()))
File "/development/workspace/cloud-custodian/c7n/tags.py", line 60, in id_key
raise NotImplementedError()
NotImplementedError
|
NotImplementedError
|
def process(self, resources):
self.id_key = self.manager.get_model().id
self.preserve = set(self.data.get("preserve"))
self.space = self.data.get("space", 3)
with self.executor_factory(max_workers=3) as w:
list(w.map(self.process_resource, resources))
|
def process(self, resources):
self.preserve = set(self.data.get("preserve"))
self.space = self.data.get("space", 3)
with self.executor_factory(max_workers=3) as w:
list(w.map(self.process_resource, resources))
|
https://github.com/cloud-custodian/cloud-custodian/issues/273
|
2016-07-06 17:38:43,679: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/development/workspace/cloud-custodian/c7n/policy.py", line 213, in poll
results = a.process(resources)
File "/development/workspace/cloud-custodian/c7n/tags.py", line 339, in process
tags, self.id_key, f.exception()))
File "/development/workspace/cloud-custodian/c7n/tags.py", line 60, in id_key
raise NotImplementedError()
NotImplementedError
|
NotImplementedError
|
def process(self, resources):
self.id_key = self.manager.get_model().id
# Legacy
msg = self.data.get("msg")
msg = self.data.get("value") or msg
tag = self.data.get("tag", DEFAULT_TAG)
tag = self.data.get("key") or tag
# Support setting multiple tags in a single go with a mapping
tags = self.data.get("tags")
if tags is None:
tags = []
else:
tags = [{"Key": k, "Value": v} for k, v in tags.items()]
if msg:
tags.append({"Key": tag, "Value": msg})
batch_size = self.data.get("batch_size", self.batch_size)
with self.executor_factory(max_workers=self.concurrency) as w:
futures = []
for resource_set in utils.chunks(resources, size=batch_size):
futures.append(w.submit(self.process_resource_set, resource_set, tags))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception removing tags: %s on resources:%s \n %s"
% (
tags,
", ".join([r[self.id_key] for r in resource_set]),
f.exception(),
)
)
|
def process(self, resources):
# Legacy
msg = self.data.get("msg")
msg = self.data.get("value") or msg
tag = self.data.get("tag", DEFAULT_TAG)
tag = self.data.get("key") or tag
# Support setting multiple tags in a single go with a mapping
tags = self.data.get("tags")
if tags is None:
tags = []
else:
tags = [{"Key": k, "Value": v} for k, v in tags.items()]
if msg:
tags.append({"Key": tag, "Value": msg})
batch_size = self.data.get("batch_size", self.batch_size)
with self.executor_factory(max_workers=self.concurrency) as w:
futures = []
for resource_set in utils.chunks(resources, size=batch_size):
futures.append(w.submit(self.process_resource_set, resource_set, tags))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception removing tags: %s on resources:%s \n %s"
% (tags, self.id_key, f.exception())
)
|
https://github.com/cloud-custodian/cloud-custodian/issues/273
|
2016-07-06 17:38:43,679: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/development/workspace/cloud-custodian/c7n/policy.py", line 213, in poll
results = a.process(resources)
File "/development/workspace/cloud-custodian/c7n/tags.py", line 339, in process
tags, self.id_key, f.exception()))
File "/development/workspace/cloud-custodian/c7n/tags.py", line 60, in id_key
raise NotImplementedError()
NotImplementedError
|
NotImplementedError
|
def process(self, resources):
self.id_key = self.manager.get_model().id
tags = self.data.get("tags", [DEFAULT_TAG])
batch_size = self.data.get("batch_size", self.batch_size)
with self.executor_factory(max_workers=self.concurrency) as w:
futures = []
for resource_set in utils.chunks(resources, size=batch_size):
futures.append(w.submit(self.process_resource_set, resource_set, tags))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception removing tags: %s on resources:%s \n %s"
% (
tags,
", ".join([r[self.id_key] for r in resource_set]),
f.exception(),
)
)
|
def process(self, resources):
tags = self.data.get("tags", [DEFAULT_TAG])
batch_size = self.data.get("batch_size", self.batch_size)
with self.executor_factory(max_workers=self.concurrency) as w:
futures = []
for resource_set in utils.chunks(resources, size=batch_size):
futures.append(w.submit(self.process_resource_set, resource_set, tags))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception removing tags: %s on resources:%s \n %s"
% (tags, self.id_key, f.exception())
)
|
https://github.com/cloud-custodian/cloud-custodian/issues/273
|
2016-07-06 17:38:43,679: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/development/workspace/cloud-custodian/c7n/policy.py", line 213, in poll
results = a.process(resources)
File "/development/workspace/cloud-custodian/c7n/tags.py", line 339, in process
tags, self.id_key, f.exception()))
File "/development/workspace/cloud-custodian/c7n/tags.py", line 60, in id_key
raise NotImplementedError()
NotImplementedError
|
NotImplementedError
|
def process(self, resources):
self.id_key = self.manager.get_model().id
# Move this to policy? / no resources bypasses actions?
if not len(resources):
return
msg_tmpl = self.data.get("msg", "Resource does not meet policy: {op}@{action_date}")
op = self.data.get("op", "stop")
tag = self.data.get("tag", DEFAULT_TAG)
date = self.data.get("days", 4)
n = datetime.now(tz=tzutc())
action_date = n + timedelta(days=date)
msg = msg_tmpl.format(op=op, action_date=action_date.strftime("%Y/%m/%d"))
self.log.info(
"Tagging %d resources for %s on %s"
% (len(resources), op, action_date.strftime("%Y/%m/%d"))
)
tags = [{"Key": tag, "Value": msg}]
with self.executor_factory(max_workers=2) as w:
futures = []
for resource_set in utils.chunks(resources, size=self.batch_size):
futures.append(w.submit(self.process_resource_set, resource_set, tags))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception tagging resource set: %s \n %s" % (tags, f.exception())
)
|
def process(self, resources):
# Move this to policy? / no resources bypasses actions?
if not len(resources):
return
msg_tmpl = self.data.get("msg", "Resource does not meet policy: {op}@{action_date}")
op = self.data.get("op", "stop")
tag = self.data.get("tag", DEFAULT_TAG)
date = self.data.get("days", 4)
n = datetime.now(tz=tzutc())
action_date = n + timedelta(days=date)
msg = msg_tmpl.format(op=op, action_date=action_date.strftime("%Y/%m/%d"))
self.log.info(
"Tagging %d resources for %s on %s"
% (len(resources), op, action_date.strftime("%Y/%m/%d"))
)
tags = [{"Key": tag, "Value": msg}]
with self.executor_factory(max_workers=2) as w:
futures = []
for resource_set in utils.chunks(resources, size=self.batch_size):
futures.append(w.submit(self.process_resource_set, resource_set, tags))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception tagging resource set: %s \n %s" % (tags, f.exception())
)
|
https://github.com/cloud-custodian/cloud-custodian/issues/273
|
2016-07-06 17:38:43,679: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/development/workspace/cloud-custodian/c7n/policy.py", line 213, in poll
results = a.process(resources)
File "/development/workspace/cloud-custodian/c7n/tags.py", line 339, in process
tags, self.id_key, f.exception()))
File "/development/workspace/cloud-custodian/c7n/tags.py", line 60, in id_key
raise NotImplementedError()
NotImplementedError
|
NotImplementedError
|
def _getEmbedding(self, obj):
parent = obj.parent
if not parent:
# obj is probably dead.
return None
# optimisation: Passing an Offsets position checks nCharacters, which is an extra call we don't need.
info = self._makeRawTextInfo(parent, textInfos.POSITION_FIRST)
if isinstance(info, FakeEmbeddingTextInfo):
info._startOffset = obj.indexInParent
info._endOffset = info._startOffset + 1
return info
try:
hl = obj.IAccessibleObject.QueryInterface(
IAccessibleHandler.IAccessibleHyperlink
)
hlOffset = hl.startIndex
info._startOffset = hlOffset
info._endOffset = hlOffset + 1
return info
except COMError:
pass
return None
|
def _getEmbedding(self, obj):
# optimisation: Passing an Offsets position checks nCharacters, which is an extra call we don't need.
info = self._makeRawTextInfo(obj.parent, textInfos.POSITION_FIRST)
if isinstance(info, FakeEmbeddingTextInfo):
info._startOffset = obj.indexInParent
info._endOffset = info._startOffset + 1
return info
try:
hl = obj.IAccessibleObject.QueryInterface(
IAccessibleHandler.IAccessibleHyperlink
)
hlOffset = hl.startIndex
info._startOffset = hlOffset
info._endOffset = hlOffset + 1
return info
except COMError:
pass
return None
|
https://github.com/nvaccess/nvda/issues/5454
|
ERROR - scriptHandler.executeScript (20:38:17):
error executing script: <bound method Dynamic_EditableTextWithAutoSelectDetectionEditorMozillaIAccessible.script_caret_newLine of <NVDAObjects.Dynamic_EditableTextWithAutoSelectDetectionEditorMozillaIAccessible object at 0x02ABF190>> with gesture u'enter'
Traceback (most recent call last):
File "scriptHandler.pyc", line 186, in executeScript
File "editableText.pyc", line 128, in script_caret_newLine
File "speech.pyc", line 673, in speakTextInfo
File "NVDAObjects\IAccessible\ia2TextMozilla.pyc", line 291, in getTextWithFields
File "NVDAObjects\IAccessible\ia2TextMozilla.pyc", line 240, in _getText
AttributeError: 'NoneType' object has no attribute 'obj'
DEBUGWARNING - NVDAObjects.IAccessible.Dynamic_EditableTextWithAutoSelectDetectionEditorMozillaIAccessible._get_IA2Attributes (20:38:17):
IAccessibleObject.attributes COMError (-2147220995, 'Object is not connected to server', (None, None, None, 0, None))
DEBUGWARNING - NVDAObjects.IAccessible.IA2TextTextInfo._getCaretOffset (20:38:17):
IAccessibleText::caretOffset failed
Traceback (most recent call last):
File "NVDAObjects\IAccessible\__init__.pyc", line 151, in _getCaretOffset
COMError: (-2147220995, 'Object is not connected to server', (None, None, None, 0, None))
|
AttributeError
|
def _getText(self, withFields, formatConfig=None):
fields = []
if self.isCollapsed:
return fields
if withFields:
# Get the initial control fields.
controlStack = []
rootObj = self.obj
obj = self._startObj
ti = self._start
cannotBeStart = False
while obj and obj != rootObj:
field = self._getControlFieldForObject(obj)
if field:
if ti._startOffset == 0:
if not cannotBeStart:
field["_startOfNode"] = True
else:
# We're not at the start of this object, which also means we're not at the start of any ancestors.
cannotBeStart = True
fields.insert(0, textInfos.FieldCommand("controlStart", field))
controlStack.insert(0, field)
ti = self._getEmbedding(obj)
if not ti:
log.debugWarning(
"_getEmbedding returned None while getting initial fields. "
"Object probably dead."
)
return []
obj = ti.obj
else:
controlStack = None
# Get the fields for start.
fields += list(self._iterRecursiveText(self._start, controlStack, formatConfig))
if not fields:
# We're not getting anything, so the object must be dead.
# (We already handled collapsed above.)
return fields
obj = self._startObj
while fields[-1] is not None:
# The end hasn't yet been reached, which means it isn't a descendant of obj.
# Therefore, continue from where obj was embedded.
if withFields:
try:
field = controlStack.pop()
except IndexError:
# We're trying to walk up past our root. This can happen if a descendant
# object within the range died, in which case _iterRecursiveText will
# never reach our end object and thus won't yield None. This means this
# range is invalid, so just return nothing.
log.debugWarning(
"Tried to walk up past the root. Objects probably dead."
)
return []
if field:
# This object had a control field.
field["_endOfNode"] = True
fields.append(textInfos.FieldCommand("controlEnd", None))
ti = self._getEmbedding(obj)
if not ti:
log.debugWarning(
"_getEmbedding returned None while ascending to get more text. "
"Object probably dead."
)
return []
obj = ti.obj
if ti.move(textInfos.UNIT_OFFSET, 1) == 0:
# There's no more text in this object.
continue
ti.setEndPoint(self._makeRawTextInfo(obj, textInfos.POSITION_ALL), "endToEnd")
fields.extend(self._iterRecursiveText(ti, controlStack, formatConfig))
del fields[-1]
if withFields:
# Determine whether the range covers the end of any ancestors of endObj.
obj = self._endObj
ti = self._end
while obj and obj != rootObj:
field = controlStack.pop()
if field:
fields.append(textInfos.FieldCommand("controlEnd", None))
if (
ti.compareEndPoints(
self._makeRawTextInfo(obj, textInfos.POSITION_ALL), "endToEnd"
)
== 0
):
if field:
field["_endOfNode"] = True
else:
# We're not at the end of this object, which also means we're not at the end of any ancestors.
break
ti = self._getEmbedding(obj)
obj = ti.obj
return fields
|
def _getText(self, withFields, formatConfig=None):
fields = []
if self.isCollapsed:
return fields
if withFields:
# Get the initial control fields.
controlStack = []
rootObj = self.obj
obj = self._startObj
ti = self._start
cannotBeStart = False
while obj and obj != rootObj:
field = self._getControlFieldForObject(obj)
if field:
if ti._startOffset == 0:
if not cannotBeStart:
field["_startOfNode"] = True
else:
# We're not at the start of this object, which also means we're not at the start of any ancestors.
cannotBeStart = True
fields.insert(0, textInfos.FieldCommand("controlStart", field))
controlStack.insert(0, field)
ti = self._getEmbedding(obj)
obj = ti.obj
else:
controlStack = None
# Get the fields for start.
fields += list(self._iterRecursiveText(self._start, controlStack, formatConfig))
if not fields:
# We're not getting anything, so the object must be dead.
# (We already handled collapsed above.)
return fields
obj = self._startObj
while fields[-1] is not None:
# The end hasn't yet been reached, which means it isn't a descendant of obj.
# Therefore, continue from where obj was embedded.
if withFields:
try:
field = controlStack.pop()
except IndexError:
# We're trying to walk up past our root. This can happen if a descendant
# object within the range died, in which case _iterRecursiveText will
# never reach our end object and thus won't yield None. This means this
# range is invalid, so just return nothing.
log.debugWarning(
"Tried to walk up past the root. Objects probably dead."
)
return []
if field:
# This object had a control field.
field["_endOfNode"] = True
fields.append(textInfos.FieldCommand("controlEnd", None))
ti = self._getEmbedding(obj)
obj = ti.obj
if ti.move(textInfos.UNIT_OFFSET, 1) == 0:
# There's no more text in this object.
continue
ti.setEndPoint(self._makeRawTextInfo(obj, textInfos.POSITION_ALL), "endToEnd")
fields.extend(self._iterRecursiveText(ti, controlStack, formatConfig))
del fields[-1]
if withFields:
# Determine whether the range covers the end of any ancestors of endObj.
obj = self._endObj
ti = self._end
while obj and obj != rootObj:
field = controlStack.pop()
if field:
fields.append(textInfos.FieldCommand("controlEnd", None))
if (
ti.compareEndPoints(
self._makeRawTextInfo(obj, textInfos.POSITION_ALL), "endToEnd"
)
== 0
):
if field:
field["_endOfNode"] = True
else:
# We're not at the end of this object, which also means we're not at the end of any ancestors.
break
ti = self._getEmbedding(obj)
obj = ti.obj
return fields
|
https://github.com/nvaccess/nvda/issues/5454
|
ERROR - scriptHandler.executeScript (20:38:17):
error executing script: <bound method Dynamic_EditableTextWithAutoSelectDetectionEditorMozillaIAccessible.script_caret_newLine of <NVDAObjects.Dynamic_EditableTextWithAutoSelectDetectionEditorMozillaIAccessible object at 0x02ABF190>> with gesture u'enter'
Traceback (most recent call last):
File "scriptHandler.pyc", line 186, in executeScript
File "editableText.pyc", line 128, in script_caret_newLine
File "speech.pyc", line 673, in speakTextInfo
File "NVDAObjects\IAccessible\ia2TextMozilla.pyc", line 291, in getTextWithFields
File "NVDAObjects\IAccessible\ia2TextMozilla.pyc", line 240, in _getText
AttributeError: 'NoneType' object has no attribute 'obj'
DEBUGWARNING - NVDAObjects.IAccessible.Dynamic_EditableTextWithAutoSelectDetectionEditorMozillaIAccessible._get_IA2Attributes (20:38:17):
IAccessibleObject.attributes COMError (-2147220995, 'Object is not connected to server', (None, None, None, 0, None))
DEBUGWARNING - NVDAObjects.IAccessible.IA2TextTextInfo._getCaretOffset (20:38:17):
IAccessibleText::caretOffset failed
Traceback (most recent call last):
File "NVDAObjects\IAccessible\__init__.pyc", line 151, in _getCaretOffset
COMError: (-2147220995, 'Object is not connected to server', (None, None, None, 0, None))
|
AttributeError
|
def _getText(self, withFields, formatConfig=None):
fields = []
if self.isCollapsed:
return fields
if withFields:
# Get the initial control fields.
controlStack = []
rootObj = self.obj
obj = self._startObj
ti = self._start
cannotBeStart = False
while obj and obj != rootObj:
field = self._getControlFieldForObject(obj)
if field:
if ti._startOffset == 0:
if not cannotBeStart:
field["_startOfNode"] = True
else:
# We're not at the start of this object, which also means we're not at the start of any ancestors.
cannotBeStart = True
fields.insert(0, textInfos.FieldCommand("controlStart", field))
controlStack.insert(0, field)
ti = self._getEmbedding(obj)
obj = ti.obj
else:
controlStack = None
# Get the fields for start.
fields += list(self._iterRecursiveText(self._start, controlStack, formatConfig))
if not fields:
# We're not getting anything, so the object must be dead.
# (We already handled collapsed above.)
return fields
obj = self._startObj
while fields[-1] is not None:
# The end hasn't yet been reached, which means it isn't a descendant of obj.
# Therefore, continue from where obj was embedded.
if withFields:
try:
field = controlStack.pop()
except IndexError:
# We're trying to walk up past our root. This can happen if a descendant
# object within the range died, in which case _iterRecursiveText will
# never reach our end object and thus won't yield None. This means this
# range is invalid, so just return nothing.
log.debugWarning(
"Tried to walk up past the root. Objects probably dead."
)
return []
if field:
# This object had a control field.
field["_endOfNode"] = True
fields.append(textInfos.FieldCommand("controlEnd", None))
ti = self._getEmbedding(obj)
obj = ti.obj
if ti.move(textInfos.UNIT_OFFSET, 1) == 0:
# There's no more text in this object.
continue
ti.setEndPoint(self._makeRawTextInfo(obj, textInfos.POSITION_ALL), "endToEnd")
fields.extend(self._iterRecursiveText(ti, controlStack, formatConfig))
del fields[-1]
if withFields:
# Determine whether the range covers the end of any ancestors of endObj.
obj = self._endObj
ti = self._end
while obj and obj != rootObj:
field = controlStack.pop()
if field:
fields.append(textInfos.FieldCommand("controlEnd", None))
if (
ti.compareEndPoints(
self._makeRawTextInfo(obj, textInfos.POSITION_ALL), "endToEnd"
)
== 0
):
if field:
field["_endOfNode"] = True
else:
# We're not at the end of this object, which also means we're not at the end of any ancestors.
break
ti = self._getEmbedding(obj)
obj = ti.obj
return fields
|
def _getText(self, withFields, formatConfig=None):
fields = []
if self.isCollapsed:
return fields
if withFields:
# Get the initial control fields.
controlStack = []
rootObj = self.obj
obj = self._startObj
ti = self._start
cannotBeStart = False
while obj and obj != rootObj:
field = self._getControlFieldForObject(obj)
if field:
if ti._startOffset == 0:
if not cannotBeStart:
field["_startOfNode"] = True
else:
# We're not at the start of this object, which also means we're not at the start of any ancestors.
cannotBeStart = True
fields.insert(0, textInfos.FieldCommand("controlStart", field))
controlStack.insert(0, field)
ti = self._getEmbedding(obj)
obj = ti.obj
else:
controlStack = None
# Get the fields for start.
fields += list(self._iterRecursiveText(self._start, controlStack, formatConfig))
if not fields:
# We're not getting anything, so the object must be dead.
# (We already handled collapsed above.)
return fields
obj = self._startObj
while fields[-1] is not None:
# The end hasn't yet been reached, which means it isn't a descendant of obj.
# Therefore, continue from where obj was embedded.
if withFields:
field = controlStack.pop()
if field:
# This object had a control field.
field["_endOfNode"] = True
fields.append(textInfos.FieldCommand("controlEnd", None))
ti = self._getEmbedding(obj)
obj = ti.obj
if ti.move(textInfos.UNIT_OFFSET, 1) == 0:
# There's no more text in this object.
continue
ti.setEndPoint(self._makeRawTextInfo(obj, textInfos.POSITION_ALL), "endToEnd")
fields.extend(self._iterRecursiveText(ti, controlStack, formatConfig))
del fields[-1]
if withFields:
# Determine whether the range covers the end of any ancestors of endObj.
obj = self._endObj
ti = self._end
while obj and obj != rootObj:
field = controlStack.pop()
if field:
fields.append(textInfos.FieldCommand("controlEnd", None))
if (
ti.compareEndPoints(
self._makeRawTextInfo(obj, textInfos.POSITION_ALL), "endToEnd"
)
== 0
):
if field:
field["_endOfNode"] = True
else:
# We're not at the end of this object, which also means we're not at the end of any ancestors.
break
ti = self._getEmbedding(obj)
obj = ti.obj
return fields
|
https://github.com/nvaccess/nvda/issues/10619
|
ERROR - core.CorePump.run (08:48:53.754) - MainThread (9024):
errors in this core pump cycle
Traceback (most recent call last):
File "core.pyc", line 517, in run
File "braille.pyc", line 2216, in pumpAll
File "braille.pyc", line 1952, in handlePendingCaretUpdate
File "braille.pyc", line 1958, in _doCursorMove
File "braille.pyc", line 1011, in update
File "braille.pyc", line 849, in _addTextWithFields
File "NVDAObjects\IAccessible\ia2TextMozilla.pyc", line 342, in getTextWithFields
File "NVDAObjects\IAccessible\ia2TextMozilla.pyc", line 305, in _getText
IndexError: pop from empty list
|
IndexError
|
def _getAvailableAddonsFromPath(path):
"""Gets available add-ons from path.
An addon is only considered available if the manifest file is loaded with no errors.
@param path: path from where to find addon directories.
@type path: string
@rtype generator of Addon instances
"""
log.debug("Listing add-ons from %s", path)
for p in os.listdir(path):
if p.endswith(DELETEDIR_SUFFIX):
continue
addon_path = os.path.join(path, p)
if os.path.isdir(addon_path) and addon_path not in (".", ".."):
if not len(os.listdir(addon_path)):
log.error("Error loading Addon from path: %s", addon_path)
else:
log.debug("Loading add-on from %s", addon_path)
try:
a = Addon(addon_path)
name = a.manifest["name"]
log.debug(
"Found add-on {name} - {a.version}."
" Requires API: {a.minimumNVDAVersion}."
" Last-tested API: {a.lastTestedNVDAVersion}".format(
name=name, a=a
)
)
if a.isDisabled:
log.debug("Disabling add-on %s", name)
if not isAddonCompatible(a):
log.debugWarning("Add-on %s is considered incompatible", name)
_blockedAddons.add(a.name)
yield a
except:
log.error(
"Error loading Addon from path: %s", addon_path, exc_info=True
)
|
def _getAvailableAddonsFromPath(path):
"""Gets available add-ons from path.
An addon is only considered available if the manifest file is loaded with no errors.
@param path: path from where to find addon directories.
@type path: string
@rtype generator of Addon instances
"""
log.debug("Listing add-ons from %s", path)
for p in os.listdir(path):
if p.endswith(DELETEDIR_SUFFIX):
continue
addon_path = os.path.join(path, p)
if os.path.isdir(addon_path) and addon_path not in (".", ".."):
log.debug("Loading add-on from %s", addon_path)
try:
a = Addon(addon_path)
name = a.manifest["name"]
log.debug(
"Found add-on {name} - {a.version}."
" Requires API: {a.minimumNVDAVersion}."
" Last-tested API: {a.lastTestedNVDAVersion}".format(name=name, a=a)
)
if a.isDisabled:
log.debug("Disabling add-on %s", name)
if not isAddonCompatible(a):
log.debugWarning("Add-on %s is considered incompatible", name)
_blockedAddons.add(a.name)
yield a
except:
log.error(
"Error loading Addon from path: %s", addon_path, exc_info=True
)
|
https://github.com/nvaccess/nvda/issues/7686
|
INFO - __main__ (13:35:20.844):
Starting NVDA
INFO - core.main (13:35:21.078):
Config dir: C:\Users\Username\AppData\Roaming\nvda
INFO - config.ConfigManager._loadConfig (13:35:21.078):
Loading config: C:\Users\Username\AppData\Roaming\nvda\nvda.ini
INFO - core.main (13:35:21.128):
NVDA version next-14528,b476430a
INFO - core.main (13:35:21.128):
Using Windows version 10.0.15063 workstation
INFO - core.main (13:35:21.128):
Using Python version 2.7.13 (v2.7.13:a06454b1afa1, Dec 17 2016, 20:42:59) [MSC v.1500 32 bit (Intel)]
INFO - core.main (13:35:21.128):
Using comtypes version 0.6.2
CRITICAL - __main__ (13:35:21.134):
core failure
Traceback (most recent call last):
File "nvda.pyw", line 202, in <module>
File "core.pyc", line 202, in main
File "addonHandler.pyc", line 124, in initialize
File "addonHandler.pyc", line 74, in completePendingAddonRemoves
File "addonHandler.pyc", line 215, in __init__
IOError: [Errno 2] No such file or directory: u'C:\\Users\\Username\\AppData\\Roaming\\nvda\\addons\\ocr\\manifest.ini'
|
IOError
|
def _getFormatFieldAndOffsets(self, offset, formatConfig, calculateOffsets=True):
formatField = textInfos.FormatField()
versionMajor = int(self.obj.excelCellObject.Application.Version.split(".")[0])
if versionMajor >= excel2010VersionMajor:
# displayFormat includes conditional formatting calculated at runtime
# However it is only available in Excel 2010 and higher
cellObj = self.obj.excelCellObject.DisplayFormat
else:
cellObj = self.obj.excelCellObject
fontObj = cellObj.font
if formatConfig["reportAlignment"]:
value = alignmentLabels.get(self.obj.excelCellObject.horizontalAlignment)
if value:
formatField["text-align"] = value
value = alignmentLabels.get(self.obj.excelCellObject.verticalAlignment)
if value:
formatField["vertical-align"] = value
if formatConfig["reportFontName"]:
formatField["font-name"] = fontObj.name
if formatConfig["reportFontSize"]:
formatField["font-size"] = str(fontObj.size)
if formatConfig["reportFontAttributes"]:
formatField["bold"] = fontObj.bold
formatField["italic"] = fontObj.italic
underline = fontObj.underline
formatField["underline"] = (
False if underline is None or underline == xlUnderlineStyleNone else True
)
if formatConfig["reportStyle"]:
try:
styleName = self.obj.excelCellObject.style.nameLocal
except COMError:
styleName = None
if styleName:
formatField["style"] = styleName
if formatConfig["reportColor"]:
try:
formatField["color"] = colors.RGB.fromCOLORREF(int(fontObj.color))
except COMError:
pass
try:
pattern = cellObj.Interior.Pattern
formatField["background-pattern"] = backgroundPatternLabels.get(pattern)
if pattern in (xlPatternLinearGradient, xlPatternRectangularGradient):
formatField["background-color"] = colors.RGB.fromCOLORREF(
int(cellObj.Interior.Gradient.ColorStops(1).Color)
)
formatField["background-color2"] = colors.RGB.fromCOLORREF(
int(cellObj.Interior.Gradient.ColorStops(2).Color)
)
else:
formatField["background-color"] = colors.RGB.fromCOLORREF(
int(cellObj.interior.color)
)
except COMError:
pass
if formatConfig["reportBorderStyle"]:
borders = None
hasMergedCells = self.obj.excelCellObject.mergeCells
if hasMergedCells:
mergeArea = self.obj.excelCellObject.mergeArea
try:
borders = (
mergeArea.DisplayFormat.borders
) # for later versions of office
except COMError:
borders = mergeArea.borders # for office 2007
else:
borders = cellObj.borders
try:
formatField["border-style"] = getCellBorderStyleDescription(
borders, reportBorderColor=formatConfig["reportBorderColor"]
)
except COMError:
pass
return formatField, (self._startOffset, self._endOffset)
|
def _getFormatFieldAndOffsets(self, offset, formatConfig, calculateOffsets=True):
formatField = textInfos.FormatField()
if self.obj.excelCellObject.Application.Version > "12.0":
cellObj = self.obj.excelCellObject.DisplayFormat
else:
cellObj = self.obj.excelCellObject
fontObj = cellObj.font
if formatConfig["reportAlignment"]:
value = alignmentLabels.get(self.obj.excelCellObject.horizontalAlignment)
if value:
formatField["text-align"] = value
value = alignmentLabels.get(self.obj.excelCellObject.verticalAlignment)
if value:
formatField["vertical-align"] = value
if formatConfig["reportFontName"]:
formatField["font-name"] = fontObj.name
if formatConfig["reportFontSize"]:
formatField["font-size"] = str(fontObj.size)
if formatConfig["reportFontAttributes"]:
formatField["bold"] = fontObj.bold
formatField["italic"] = fontObj.italic
underline = fontObj.underline
formatField["underline"] = (
False if underline is None or underline == xlUnderlineStyleNone else True
)
if formatConfig["reportStyle"]:
try:
styleName = self.obj.excelCellObject.style.nameLocal
except COMError:
styleName = None
if styleName:
formatField["style"] = styleName
if formatConfig["reportColor"]:
try:
formatField["color"] = colors.RGB.fromCOLORREF(int(fontObj.color))
except COMError:
pass
try:
pattern = cellObj.Interior.Pattern
formatField["background-pattern"] = backgroundPatternLabels.get(pattern)
if pattern in (xlPatternLinearGradient, xlPatternRectangularGradient):
formatField["background-color"] = colors.RGB.fromCOLORREF(
int(cellObj.Interior.Gradient.ColorStops(1).Color)
)
formatField["background-color2"] = colors.RGB.fromCOLORREF(
int(cellObj.Interior.Gradient.ColorStops(2).Color)
)
else:
formatField["background-color"] = colors.RGB.fromCOLORREF(
int(cellObj.interior.color)
)
except COMError:
pass
if formatConfig["reportBorderStyle"]:
borders = None
hasMergedCells = self.obj.excelCellObject.mergeCells
if hasMergedCells:
mergeArea = self.obj.excelCellObject.mergeArea
try:
borders = (
mergeArea.DisplayFormat.borders
) # for later versions of office
except COMError:
borders = mergeArea.borders # for office 2007
else:
borders = cellObj.borders
try:
formatField["border-style"] = getCellBorderStyleDescription(
borders, reportBorderColor=formatConfig["reportBorderColor"]
)
except COMError:
pass
return formatField, (self._startOffset, self._endOffset)
|
https://github.com/nvaccess/nvda/issues/7243
|
ERROR - eventHandler.executeEvent (09:03:43):
error executing event: gainFocus on <NVDAObjects.window.excel.ExcelCell object at 0x04F92BB0> with extra args of {}
Traceback (most recent call last):
File "eventHandler.pyo", line 143, in executeEvent
File "eventHandler.pyo", line 91, in __init__
File "eventHandler.pyo", line 98, in next
File "C:\Users\User\AppData\Roaming\nvda\addons\remote\globalPlugins\remoteClient\__init__.py", line 341, in event_gainFocus
File "eventHandler.pyo", line 98, in next
File "NVDAObjects\__init__.pyo", line 899, in event_gainFocus
File "NVDAObjects\window\excel.pyo", line 1413, in reportFocus
File "textInfos\offsets.pyo", line 392, in getTextWithFields
File "NVDAObjects\window\excel.pyo", line 948, in _getFormatFieldAndOffsets
File "comtypesMonkeyPatches.pyo", line 37, in new__getattr__
File "comtypes\client\dynamic.pyo", line 93, in __getattr__
File "comtypes\automation.pyo", line 643, in GetIDsOfNames
COMError: (-2147352570, 'Unknown name.', (None, None, None, 0, None))
|
COMError
|
def __init__(self, dispatcher):
"""
Args:
dispatcher (:class:`telegram.ext.Dispatcher`):
"""
if not dispatcher.use_context:
raise ValueError(
"CallbackContext should not be used with a non context aware dispatcher!"
)
self._dispatcher = dispatcher
self._bot_data = dispatcher.bot_data
self._chat_data = None
self._user_data = None
self.args = None
self.matches = None
self.error = None
self.job = None
self.async_args = None
self.async_kwargs = None
|
def __init__(self, dispatcher):
"""
Args:
dispatcher (:class:`telegram.ext.Dispatcher`):
"""
if not dispatcher.use_context:
raise ValueError(
"CallbackContext should not be used with a non context aware dispatcher!"
)
self._dispatcher = dispatcher
self._bot_data = dispatcher.bot_data
self._chat_data = None
self._user_data = None
self.args = None
self.matches = None
self.error = None
self.job = None
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/682
|
<project_path>/venv/bin/python <project_path>/issue.py
2017-06-20 23:24:11,743 - __main__ - INFO - /start
2017-06-20 23:24:11,906 - telegram.ext.dispatcher - WARNING - A TelegramError was raised while processing the Update.
2017-06-20 23:24:11,907 - __main__ - ERROR - Boom!
2017-06-20 23:24:11,907 - __main__ - INFO - /async_start
2017-06-20 23:24:12,188 - telegram.utils.promise - ERROR - An uncaught error was raised while running the promise
Traceback (most recent call last):
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/promise.py", line 42, in run
self._result = self.pooled_function(*self.args, **self.kwargs)
File "<project_path>/issue.py", line 25, in action_async_start
update.message.reply_text(msg, parse_mode=ParseMode.HTML)
File "<project_path>/venv/src/python-telegram-bot/telegram/message.py", line 340, in reply_text
return self.bot.send_message(self.chat_id, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 125, in decorator
result = func(self, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 158, in decorator
return Bot._message_wrapper(self, url, data, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 146, in _message_wrapper
result = self._request.post(url, data, timeout=kwargs.get('timeout'))
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 252, in post
**urlopen_kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 194, in _request_wrapper
raise BadRequest(message)
telegram.error.BadRequest: Can't parse entities in message text: unsupported start tag "invalid_tag" at byte offset 0
Process finished with exit code 0
|
telegram.error.BadRequest
|
def from_error(cls, update, error, dispatcher, async_args=None, async_kwargs=None):
self = cls.from_update(update, dispatcher)
self.error = error
self.async_args = async_args
self.async_kwargs = async_kwargs
return self
|
def from_error(cls, update, error, dispatcher):
self = cls.from_update(update, dispatcher)
self.error = error
return self
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/682
|
<project_path>/venv/bin/python <project_path>/issue.py
2017-06-20 23:24:11,743 - __main__ - INFO - /start
2017-06-20 23:24:11,906 - telegram.ext.dispatcher - WARNING - A TelegramError was raised while processing the Update.
2017-06-20 23:24:11,907 - __main__ - ERROR - Boom!
2017-06-20 23:24:11,907 - __main__ - INFO - /async_start
2017-06-20 23:24:12,188 - telegram.utils.promise - ERROR - An uncaught error was raised while running the promise
Traceback (most recent call last):
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/promise.py", line 42, in run
self._result = self.pooled_function(*self.args, **self.kwargs)
File "<project_path>/issue.py", line 25, in action_async_start
update.message.reply_text(msg, parse_mode=ParseMode.HTML)
File "<project_path>/venv/src/python-telegram-bot/telegram/message.py", line 340, in reply_text
return self.bot.send_message(self.chat_id, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 125, in decorator
result = func(self, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 158, in decorator
return Bot._message_wrapper(self, url, data, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 146, in _message_wrapper
result = self._request.post(url, data, timeout=kwargs.get('timeout'))
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 252, in post
**urlopen_kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 194, in _request_wrapper
raise BadRequest(message)
telegram.error.BadRequest: Can't parse entities in message text: unsupported start tag "invalid_tag" at byte offset 0
Process finished with exit code 0
|
telegram.error.BadRequest
|
def __init__(
self,
callback,
pass_update_queue=False,
pass_job_queue=False,
pattern=None,
pass_groups=False,
pass_groupdict=False,
pass_user_data=False,
pass_chat_data=False,
run_async=False,
):
super().__init__(
callback,
pass_update_queue=pass_update_queue,
pass_job_queue=pass_job_queue,
pass_user_data=pass_user_data,
pass_chat_data=pass_chat_data,
run_async=run_async,
)
if isinstance(pattern, str):
pattern = re.compile(pattern)
self.pattern = pattern
self.pass_groups = pass_groups
self.pass_groupdict = pass_groupdict
|
def __init__(
self,
callback,
pass_update_queue=False,
pass_job_queue=False,
pattern=None,
pass_groups=False,
pass_groupdict=False,
pass_user_data=False,
pass_chat_data=False,
):
super().__init__(
callback,
pass_update_queue=pass_update_queue,
pass_job_queue=pass_job_queue,
pass_user_data=pass_user_data,
pass_chat_data=pass_chat_data,
)
if isinstance(pattern, str):
pattern = re.compile(pattern)
self.pattern = pattern
self.pass_groups = pass_groups
self.pass_groupdict = pass_groupdict
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/682
|
<project_path>/venv/bin/python <project_path>/issue.py
2017-06-20 23:24:11,743 - __main__ - INFO - /start
2017-06-20 23:24:11,906 - telegram.ext.dispatcher - WARNING - A TelegramError was raised while processing the Update.
2017-06-20 23:24:11,907 - __main__ - ERROR - Boom!
2017-06-20 23:24:11,907 - __main__ - INFO - /async_start
2017-06-20 23:24:12,188 - telegram.utils.promise - ERROR - An uncaught error was raised while running the promise
Traceback (most recent call last):
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/promise.py", line 42, in run
self._result = self.pooled_function(*self.args, **self.kwargs)
File "<project_path>/issue.py", line 25, in action_async_start
update.message.reply_text(msg, parse_mode=ParseMode.HTML)
File "<project_path>/venv/src/python-telegram-bot/telegram/message.py", line 340, in reply_text
return self.bot.send_message(self.chat_id, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 125, in decorator
result = func(self, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 158, in decorator
return Bot._message_wrapper(self, url, data, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 146, in _message_wrapper
result = self._request.post(url, data, timeout=kwargs.get('timeout'))
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 252, in post
**urlopen_kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 194, in _request_wrapper
raise BadRequest(message)
telegram.error.BadRequest: Can't parse entities in message text: unsupported start tag "invalid_tag" at byte offset 0
Process finished with exit code 0
|
telegram.error.BadRequest
|
def __init__(
self,
command,
callback,
filters=None,
allow_edited=None,
pass_args=False,
pass_update_queue=False,
pass_job_queue=False,
pass_user_data=False,
pass_chat_data=False,
run_async=False,
):
super().__init__(
callback,
pass_update_queue=pass_update_queue,
pass_job_queue=pass_job_queue,
pass_user_data=pass_user_data,
pass_chat_data=pass_chat_data,
run_async=run_async,
)
if isinstance(command, str):
self.command = [command.lower()]
else:
self.command = [x.lower() for x in command]
for comm in self.command:
if not re.match(r"^[\da-z_]{1,32}$", comm):
raise ValueError("Command is not a valid bot command")
if filters:
self.filters = Filters.update.messages & filters
else:
self.filters = Filters.update.messages
if allow_edited is not None:
warnings.warn(
"allow_edited is deprecated. See https://git.io/fxJuV for more info",
TelegramDeprecationWarning,
stacklevel=2,
)
if not allow_edited:
self.filters &= ~Filters.update.edited_message
self.pass_args = pass_args
|
def __init__(
self,
command,
callback,
filters=None,
allow_edited=None,
pass_args=False,
pass_update_queue=False,
pass_job_queue=False,
pass_user_data=False,
pass_chat_data=False,
):
super().__init__(
callback,
pass_update_queue=pass_update_queue,
pass_job_queue=pass_job_queue,
pass_user_data=pass_user_data,
pass_chat_data=pass_chat_data,
)
if isinstance(command, str):
self.command = [command.lower()]
else:
self.command = [x.lower() for x in command]
for comm in self.command:
if not re.match(r"^[\da-z_]{1,32}$", comm):
raise ValueError("Command is not a valid bot command")
if filters:
self.filters = Filters.update.messages & filters
else:
self.filters = Filters.update.messages
if allow_edited is not None:
warnings.warn(
"allow_edited is deprecated. See https://git.io/fxJuV for more info",
TelegramDeprecationWarning,
stacklevel=2,
)
if not allow_edited:
self.filters &= ~Filters.update.edited_message
self.pass_args = pass_args
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/682
|
<project_path>/venv/bin/python <project_path>/issue.py
2017-06-20 23:24:11,743 - __main__ - INFO - /start
2017-06-20 23:24:11,906 - telegram.ext.dispatcher - WARNING - A TelegramError was raised while processing the Update.
2017-06-20 23:24:11,907 - __main__ - ERROR - Boom!
2017-06-20 23:24:11,907 - __main__ - INFO - /async_start
2017-06-20 23:24:12,188 - telegram.utils.promise - ERROR - An uncaught error was raised while running the promise
Traceback (most recent call last):
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/promise.py", line 42, in run
self._result = self.pooled_function(*self.args, **self.kwargs)
File "<project_path>/issue.py", line 25, in action_async_start
update.message.reply_text(msg, parse_mode=ParseMode.HTML)
File "<project_path>/venv/src/python-telegram-bot/telegram/message.py", line 340, in reply_text
return self.bot.send_message(self.chat_id, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 125, in decorator
result = func(self, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 158, in decorator
return Bot._message_wrapper(self, url, data, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 146, in _message_wrapper
result = self._request.post(url, data, timeout=kwargs.get('timeout'))
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 252, in post
**urlopen_kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 194, in _request_wrapper
raise BadRequest(message)
telegram.error.BadRequest: Can't parse entities in message text: unsupported start tag "invalid_tag" at byte offset 0
Process finished with exit code 0
|
telegram.error.BadRequest
|
def __init__(
self,
prefix,
command,
callback,
filters=None,
pass_args=False,
pass_update_queue=False,
pass_job_queue=False,
pass_user_data=False,
pass_chat_data=False,
run_async=False,
):
self._prefix = list()
self._command = list()
self._commands = list()
super().__init__(
"nocommand",
callback,
filters=filters,
allow_edited=None,
pass_args=pass_args,
pass_update_queue=pass_update_queue,
pass_job_queue=pass_job_queue,
pass_user_data=pass_user_data,
pass_chat_data=pass_chat_data,
run_async=run_async,
)
self.prefix = prefix
self.command = command
self._build_commands()
|
def __init__(
self,
prefix,
command,
callback,
filters=None,
pass_args=False,
pass_update_queue=False,
pass_job_queue=False,
pass_user_data=False,
pass_chat_data=False,
):
self._prefix = list()
self._command = list()
self._commands = list()
super().__init__(
"nocommand",
callback,
filters=filters,
allow_edited=None,
pass_args=pass_args,
pass_update_queue=pass_update_queue,
pass_job_queue=pass_job_queue,
pass_user_data=pass_user_data,
pass_chat_data=pass_chat_data,
)
self.prefix = prefix
self.command = command
self._build_commands()
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/682
|
<project_path>/venv/bin/python <project_path>/issue.py
2017-06-20 23:24:11,743 - __main__ - INFO - /start
2017-06-20 23:24:11,906 - telegram.ext.dispatcher - WARNING - A TelegramError was raised while processing the Update.
2017-06-20 23:24:11,907 - __main__ - ERROR - Boom!
2017-06-20 23:24:11,907 - __main__ - INFO - /async_start
2017-06-20 23:24:12,188 - telegram.utils.promise - ERROR - An uncaught error was raised while running the promise
Traceback (most recent call last):
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/promise.py", line 42, in run
self._result = self.pooled_function(*self.args, **self.kwargs)
File "<project_path>/issue.py", line 25, in action_async_start
update.message.reply_text(msg, parse_mode=ParseMode.HTML)
File "<project_path>/venv/src/python-telegram-bot/telegram/message.py", line 340, in reply_text
return self.bot.send_message(self.chat_id, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 125, in decorator
result = func(self, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 158, in decorator
return Bot._message_wrapper(self, url, data, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 146, in _message_wrapper
result = self._request.post(url, data, timeout=kwargs.get('timeout'))
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 252, in post
**urlopen_kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 194, in _request_wrapper
raise BadRequest(message)
telegram.error.BadRequest: Can't parse entities in message text: unsupported start tag "invalid_tag" at byte offset 0
Process finished with exit code 0
|
telegram.error.BadRequest
|
def __init__(
self,
entry_points,
states,
fallbacks,
allow_reentry=False,
per_chat=True,
per_user=True,
per_message=False,
conversation_timeout=None,
name=None,
persistent=False,
map_to_parent=None,
):
self.run_async = False
self._entry_points = entry_points
self._states = states
self._fallbacks = fallbacks
self._allow_reentry = allow_reentry
self._per_user = per_user
self._per_chat = per_chat
self._per_message = per_message
self._conversation_timeout = conversation_timeout
self._name = name
if persistent and not self.name:
raise ValueError("Conversations can't be persistent when handler is unnamed.")
self.persistent = persistent
self._persistence = None
""":obj:`telegram.ext.BasePersistence`: The persistence used to store conversations.
Set by dispatcher"""
self._map_to_parent = map_to_parent
self.timeout_jobs = dict()
self._timeout_jobs_lock = Lock()
self._conversations = dict()
self._conversations_lock = Lock()
self.logger = logging.getLogger(__name__)
if not any((self.per_user, self.per_chat, self.per_message)):
raise ValueError(
"'per_user', 'per_chat' and 'per_message' can't all be 'False'"
)
if self.per_message and not self.per_chat:
warnings.warn(
"If 'per_message=True' is used, 'per_chat=True' should also be used, "
"since message IDs are not globally unique."
)
all_handlers = list()
all_handlers.extend(entry_points)
all_handlers.extend(fallbacks)
for state_handlers in states.values():
all_handlers.extend(state_handlers)
if self.per_message:
for handler in all_handlers:
if not isinstance(handler, CallbackQueryHandler):
warnings.warn(
"If 'per_message=True', all entry points and state handlers"
" must be 'CallbackQueryHandler', since no other handlers "
"have a message context."
)
break
else:
for handler in all_handlers:
if isinstance(handler, CallbackQueryHandler):
warnings.warn(
"If 'per_message=False', 'CallbackQueryHandler' will not be "
"tracked for every message."
)
break
if self.per_chat:
for handler in all_handlers:
if isinstance(handler, (InlineQueryHandler, ChosenInlineResultHandler)):
warnings.warn(
"If 'per_chat=True', 'InlineQueryHandler' can not be used, "
"since inline queries have no chat context."
)
break
|
def __init__(
self,
entry_points,
states,
fallbacks,
allow_reentry=False,
per_chat=True,
per_user=True,
per_message=False,
conversation_timeout=None,
name=None,
persistent=False,
map_to_parent=None,
):
self._entry_points = entry_points
self._states = states
self._fallbacks = fallbacks
self._allow_reentry = allow_reentry
self._per_user = per_user
self._per_chat = per_chat
self._per_message = per_message
self._conversation_timeout = conversation_timeout
self._name = name
if persistent and not self.name:
raise ValueError("Conversations can't be persistent when handler is unnamed.")
self.persistent = persistent
self._persistence = None
""":obj:`telegram.ext.BasePersistence`: The persistence used to store conversations.
Set by dispatcher"""
self._map_to_parent = map_to_parent
self.timeout_jobs = dict()
self._timeout_jobs_lock = Lock()
self._conversations = dict()
self._conversations_lock = Lock()
self.logger = logging.getLogger(__name__)
if not any((self.per_user, self.per_chat, self.per_message)):
raise ValueError(
"'per_user', 'per_chat' and 'per_message' can't all be 'False'"
)
if self.per_message and not self.per_chat:
warnings.warn(
"If 'per_message=True' is used, 'per_chat=True' should also be used, "
"since message IDs are not globally unique."
)
all_handlers = list()
all_handlers.extend(entry_points)
all_handlers.extend(fallbacks)
for state_handlers in states.values():
all_handlers.extend(state_handlers)
if self.per_message:
for handler in all_handlers:
if not isinstance(handler, CallbackQueryHandler):
warnings.warn(
"If 'per_message=True', all entry points and state handlers"
" must be 'CallbackQueryHandler', since no other handlers "
"have a message context."
)
break
else:
for handler in all_handlers:
if isinstance(handler, CallbackQueryHandler):
warnings.warn(
"If 'per_message=False', 'CallbackQueryHandler' will not be "
"tracked for every message."
)
break
if self.per_chat:
for handler in all_handlers:
if isinstance(handler, (InlineQueryHandler, ChosenInlineResultHandler)):
warnings.warn(
"If 'per_chat=True', 'InlineQueryHandler' can not be used, "
"since inline queries have no chat context."
)
break
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/682
|
<project_path>/venv/bin/python <project_path>/issue.py
2017-06-20 23:24:11,743 - __main__ - INFO - /start
2017-06-20 23:24:11,906 - telegram.ext.dispatcher - WARNING - A TelegramError was raised while processing the Update.
2017-06-20 23:24:11,907 - __main__ - ERROR - Boom!
2017-06-20 23:24:11,907 - __main__ - INFO - /async_start
2017-06-20 23:24:12,188 - telegram.utils.promise - ERROR - An uncaught error was raised while running the promise
Traceback (most recent call last):
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/promise.py", line 42, in run
self._result = self.pooled_function(*self.args, **self.kwargs)
File "<project_path>/issue.py", line 25, in action_async_start
update.message.reply_text(msg, parse_mode=ParseMode.HTML)
File "<project_path>/venv/src/python-telegram-bot/telegram/message.py", line 340, in reply_text
return self.bot.send_message(self.chat_id, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 125, in decorator
result = func(self, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 158, in decorator
return Bot._message_wrapper(self, url, data, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 146, in _message_wrapper
result = self._request.post(url, data, timeout=kwargs.get('timeout'))
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 252, in post
**urlopen_kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 194, in _request_wrapper
raise BadRequest(message)
telegram.error.BadRequest: Can't parse entities in message text: unsupported start tag "invalid_tag" at byte offset 0
Process finished with exit code 0
|
telegram.error.BadRequest
|
def run_async(self, func, *args, update=None, **kwargs):
"""
Queue a function (with given args/kwargs) to be run asynchronously. Exceptions raised
by the function will be handled by the error handlers registered with
:meth:`add_error_handler`.
Warning:
* If you're using ``@run_async``/:meth:`run_async` you cannot rely on adding custom
attributes to :class:`telegram.ext.CallbackContext`. See its docs for more info.
* Calling a function through :meth:`run_async` from within an error handler can lead to
an infinite error handling loop.
Args:
func (:obj:`callable`): The function to run in the thread.
*args (:obj:`tuple`, optional): Arguments to ``func``.
update (:class:`telegram.Update`, optional): The update associated with the functions
call. If passed, it will be available in the error handlers, in case an exception
is raised by :attr:`func`.
**kwargs (:obj:`dict`, optional): Keyword arguments to ``func``.
Returns:
Promise
"""
return self._run_async(func, *args, update=update, error_handling=True, **kwargs)
|
def run_async(self, func, *args, **kwargs):
"""Queue a function (with given args/kwargs) to be run asynchronously.
Warning:
If you're using @run_async you cannot rely on adding custom attributes to
:class:`telegram.ext.CallbackContext`. See its docs for more info.
Args:
func (:obj:`callable`): The function to run in the thread.
*args (:obj:`tuple`, optional): Arguments to `func`.
**kwargs (:obj:`dict`, optional): Keyword arguments to `func`.
Returns:
Promise
"""
# TODO: handle exception in async threads
# set a threading.Event to notify caller thread
promise = Promise(func, args, kwargs)
self.__async_queue.put(promise)
return promise
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/682
|
<project_path>/venv/bin/python <project_path>/issue.py
2017-06-20 23:24:11,743 - __main__ - INFO - /start
2017-06-20 23:24:11,906 - telegram.ext.dispatcher - WARNING - A TelegramError was raised while processing the Update.
2017-06-20 23:24:11,907 - __main__ - ERROR - Boom!
2017-06-20 23:24:11,907 - __main__ - INFO - /async_start
2017-06-20 23:24:12,188 - telegram.utils.promise - ERROR - An uncaught error was raised while running the promise
Traceback (most recent call last):
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/promise.py", line 42, in run
self._result = self.pooled_function(*self.args, **self.kwargs)
File "<project_path>/issue.py", line 25, in action_async_start
update.message.reply_text(msg, parse_mode=ParseMode.HTML)
File "<project_path>/venv/src/python-telegram-bot/telegram/message.py", line 340, in reply_text
return self.bot.send_message(self.chat_id, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 125, in decorator
result = func(self, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 158, in decorator
return Bot._message_wrapper(self, url, data, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 146, in _message_wrapper
result = self._request.post(url, data, timeout=kwargs.get('timeout'))
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 252, in post
**urlopen_kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 194, in _request_wrapper
raise BadRequest(message)
telegram.error.BadRequest: Can't parse entities in message text: unsupported start tag "invalid_tag" at byte offset 0
Process finished with exit code 0
|
telegram.error.BadRequest
|
def async_func(*args, **kwargs):
warnings.warn(
"The @run_async decorator is deprecated. Use the `run_async` parameter of"
"`Dispatcher.add_handler` or `Dispatcher.run_async` instead.",
TelegramDeprecationWarning,
stacklevel=2,
)
return Dispatcher.get_instance()._run_async(
func, *args, update=None, error_handling=False, **kwargs
)
|
def async_func(*args, **kwargs):
return Dispatcher.get_instance().run_async(func, *args, **kwargs)
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/682
|
<project_path>/venv/bin/python <project_path>/issue.py
2017-06-20 23:24:11,743 - __main__ - INFO - /start
2017-06-20 23:24:11,906 - telegram.ext.dispatcher - WARNING - A TelegramError was raised while processing the Update.
2017-06-20 23:24:11,907 - __main__ - ERROR - Boom!
2017-06-20 23:24:11,907 - __main__ - INFO - /async_start
2017-06-20 23:24:12,188 - telegram.utils.promise - ERROR - An uncaught error was raised while running the promise
Traceback (most recent call last):
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/promise.py", line 42, in run
self._result = self.pooled_function(*self.args, **self.kwargs)
File "<project_path>/issue.py", line 25, in action_async_start
update.message.reply_text(msg, parse_mode=ParseMode.HTML)
File "<project_path>/venv/src/python-telegram-bot/telegram/message.py", line 340, in reply_text
return self.bot.send_message(self.chat_id, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 125, in decorator
result = func(self, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 158, in decorator
return Bot._message_wrapper(self, url, data, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 146, in _message_wrapper
result = self._request.post(url, data, timeout=kwargs.get('timeout'))
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 252, in post
**urlopen_kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 194, in _request_wrapper
raise BadRequest(message)
telegram.error.BadRequest: Can't parse entities in message text: unsupported start tag "invalid_tag" at byte offset 0
Process finished with exit code 0
|
telegram.error.BadRequest
|
def __init__(
self,
bot,
update_queue,
workers=4,
exception_event=None,
job_queue=None,
persistence=None,
use_context=False,
):
self.bot = bot
self.update_queue = update_queue
self.job_queue = job_queue
self.workers = workers
self.use_context = use_context
if not use_context:
warnings.warn(
"Old Handler API is deprecated - see https://git.io/fxJuV for details",
TelegramDeprecationWarning,
stacklevel=3,
)
self.user_data = defaultdict(dict)
self.chat_data = defaultdict(dict)
self.bot_data = {}
self._update_persistence_lock = Lock()
if persistence:
if not isinstance(persistence, BasePersistence):
raise TypeError("persistence must be based on telegram.ext.BasePersistence")
self.persistence = persistence
if self.persistence.store_user_data:
self.user_data = self.persistence.get_user_data()
if not isinstance(self.user_data, defaultdict):
raise ValueError("user_data must be of type defaultdict")
if self.persistence.store_chat_data:
self.chat_data = self.persistence.get_chat_data()
if not isinstance(self.chat_data, defaultdict):
raise ValueError("chat_data must be of type defaultdict")
if self.persistence.store_bot_data:
self.bot_data = self.persistence.get_bot_data()
if not isinstance(self.bot_data, dict):
raise ValueError("bot_data must be of type dict")
else:
self.persistence = None
self.handlers = {}
"""Dict[:obj:`int`, List[:class:`telegram.ext.Handler`]]: Holds the handlers per group."""
self.groups = []
"""List[:obj:`int`]: A list with all groups."""
self.error_handlers = {}
"""Dict[:obj:`callable`, :obj:`bool`]: A dict, where the keys are error handlers and the
values indicate whether they are to be run asynchronously."""
self.running = False
""":obj:`bool`: Indicates if this dispatcher is running."""
self.__stop_event = Event()
self.__exception_event = exception_event or Event()
self.__async_queue = Queue()
self.__async_threads = set()
# For backward compatibility, we allow a "singleton" mode for the dispatcher. When there's
# only one instance of Dispatcher, it will be possible to use the `run_async` decorator.
with self.__singleton_lock:
if self.__singleton_semaphore.acquire(blocking=0):
self._set_singleton(self)
else:
self._set_singleton(None)
|
def __init__(
self,
bot,
update_queue,
workers=4,
exception_event=None,
job_queue=None,
persistence=None,
use_context=False,
):
self.bot = bot
self.update_queue = update_queue
self.job_queue = job_queue
self.workers = workers
self.use_context = use_context
if not use_context:
warnings.warn(
"Old Handler API is deprecated - see https://git.io/fxJuV for details",
TelegramDeprecationWarning,
stacklevel=3,
)
self.user_data = defaultdict(dict)
self.chat_data = defaultdict(dict)
self.bot_data = {}
if persistence:
if not isinstance(persistence, BasePersistence):
raise TypeError(
"persistence should be based on telegram.ext.BasePersistence"
)
self.persistence = persistence
if self.persistence.store_user_data:
self.user_data = self.persistence.get_user_data()
if not isinstance(self.user_data, defaultdict):
raise ValueError("user_data must be of type defaultdict")
if self.persistence.store_chat_data:
self.chat_data = self.persistence.get_chat_data()
if not isinstance(self.chat_data, defaultdict):
raise ValueError("chat_data must be of type defaultdict")
if self.persistence.store_bot_data:
self.bot_data = self.persistence.get_bot_data()
if not isinstance(self.bot_data, dict):
raise ValueError("bot_data must be of type dict")
else:
self.persistence = None
self.handlers = {}
"""Dict[:obj:`int`, List[:class:`telegram.ext.Handler`]]: Holds the handlers per group."""
self.groups = []
"""List[:obj:`int`]: A list with all groups."""
self.error_handlers = []
"""List[:obj:`callable`]: A list of errorHandlers."""
self.running = False
""":obj:`bool`: Indicates if this dispatcher is running."""
self.__stop_event = Event()
self.__exception_event = exception_event or Event()
self.__async_queue = Queue()
self.__async_threads = set()
# For backward compatibility, we allow a "singleton" mode for the dispatcher. When there's
# only one instance of Dispatcher, it will be possible to use the `run_async` decorator.
with self.__singleton_lock:
if self.__singleton_semaphore.acquire(blocking=0):
self._set_singleton(self)
else:
self._set_singleton(None)
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/682
|
<project_path>/venv/bin/python <project_path>/issue.py
2017-06-20 23:24:11,743 - __main__ - INFO - /start
2017-06-20 23:24:11,906 - telegram.ext.dispatcher - WARNING - A TelegramError was raised while processing the Update.
2017-06-20 23:24:11,907 - __main__ - ERROR - Boom!
2017-06-20 23:24:11,907 - __main__ - INFO - /async_start
2017-06-20 23:24:12,188 - telegram.utils.promise - ERROR - An uncaught error was raised while running the promise
Traceback (most recent call last):
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/promise.py", line 42, in run
self._result = self.pooled_function(*self.args, **self.kwargs)
File "<project_path>/issue.py", line 25, in action_async_start
update.message.reply_text(msg, parse_mode=ParseMode.HTML)
File "<project_path>/venv/src/python-telegram-bot/telegram/message.py", line 340, in reply_text
return self.bot.send_message(self.chat_id, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 125, in decorator
result = func(self, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 158, in decorator
return Bot._message_wrapper(self, url, data, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 146, in _message_wrapper
result = self._request.post(url, data, timeout=kwargs.get('timeout'))
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 252, in post
**urlopen_kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 194, in _request_wrapper
raise BadRequest(message)
telegram.error.BadRequest: Can't parse entities in message text: unsupported start tag "invalid_tag" at byte offset 0
Process finished with exit code 0
|
telegram.error.BadRequest
|
def _pooled(self):
thr_name = current_thread().getName()
while 1:
promise = self.__async_queue.get()
# If unpacking fails, the thread pool is being closed from Updater._join_async_threads
if not isinstance(promise, Promise):
self.logger.debug(
"Closing run_async thread %s/%d", thr_name, len(self.__async_threads)
)
break
promise.run()
if not promise.exception:
self.update_persistence(update=promise.update)
continue
if isinstance(promise.exception, DispatcherHandlerStop):
self.logger.warning(
"DispatcherHandlerStop is not supported with async functions; func: %s",
promise.pooled_function.__name__,
)
continue
# Avoid infinite recursion of error handlers.
if promise.pooled_function in self.error_handlers:
self.logger.error("An uncaught error was raised while handling the error.")
continue
# Don't perform error handling for a `Promise` with deactivated error handling. This
# should happen only via the deprecated `@run_async` decorator or `Promises` created
# within error handlers
if not promise.error_handling:
self.logger.error(
"A promise with deactivated error handling raised an error."
)
continue
# If we arrive here, an exception happened in the promise and was neither
# DispatcherHandlerStop nor raised by an error handler. So we can and must handle it
try:
self.dispatch_error(promise.update, promise.exception, promise=promise)
except Exception:
self.logger.exception(
"An uncaught error was raised while handling the error."
)
|
def _pooled(self):
thr_name = current_thread().getName()
while 1:
promise = self.__async_queue.get()
# If unpacking fails, the thread pool is being closed from Updater._join_async_threads
if not isinstance(promise, Promise):
self.logger.debug(
"Closing run_async thread %s/%d", thr_name, len(self.__async_threads)
)
break
promise.run()
if isinstance(promise.exception, DispatcherHandlerStop):
self.logger.warning(
"DispatcherHandlerStop is not supported with async functions; func: %s",
promise.pooled_function.__name__,
)
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/682
|
<project_path>/venv/bin/python <project_path>/issue.py
2017-06-20 23:24:11,743 - __main__ - INFO - /start
2017-06-20 23:24:11,906 - telegram.ext.dispatcher - WARNING - A TelegramError was raised while processing the Update.
2017-06-20 23:24:11,907 - __main__ - ERROR - Boom!
2017-06-20 23:24:11,907 - __main__ - INFO - /async_start
2017-06-20 23:24:12,188 - telegram.utils.promise - ERROR - An uncaught error was raised while running the promise
Traceback (most recent call last):
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/promise.py", line 42, in run
self._result = self.pooled_function(*self.args, **self.kwargs)
File "<project_path>/issue.py", line 25, in action_async_start
update.message.reply_text(msg, parse_mode=ParseMode.HTML)
File "<project_path>/venv/src/python-telegram-bot/telegram/message.py", line 340, in reply_text
return self.bot.send_message(self.chat_id, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 125, in decorator
result = func(self, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 158, in decorator
return Bot._message_wrapper(self, url, data, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 146, in _message_wrapper
result = self._request.post(url, data, timeout=kwargs.get('timeout'))
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 252, in post
**urlopen_kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 194, in _request_wrapper
raise BadRequest(message)
telegram.error.BadRequest: Can't parse entities in message text: unsupported start tag "invalid_tag" at byte offset 0
Process finished with exit code 0
|
telegram.error.BadRequest
|
def process_update(self, update):
"""Processes a single update.
Args:
update (:obj:`str` | :class:`telegram.Update` | :class:`telegram.TelegramError`):
The update to process.
"""
# An error happened while polling
if isinstance(update, TelegramError):
try:
self.dispatch_error(None, update)
except Exception:
self.logger.exception(
"An uncaught error was raised while handling the error."
)
return
context = None
for group in self.groups:
try:
for handler in self.handlers[group]:
check = handler.check_update(update)
if check is not None and check is not False:
if not context and self.use_context:
context = CallbackContext.from_update(update, self)
handler.handle_update(update, self, check, context)
# If handler runs async updating immediately doesn't make sense
if not handler.run_async:
self.update_persistence(update=update)
break
# Stop processing with any other handler.
except DispatcherHandlerStop:
self.logger.debug("Stopping further handlers due to DispatcherHandlerStop")
self.update_persistence(update=update)
break
# Dispatch any error.
except Exception as e:
try:
self.dispatch_error(update, e)
except DispatcherHandlerStop:
self.logger.debug("Error handler stopped further handlers")
break
# Errors should not stop the thread.
except Exception:
self.logger.exception(
"An uncaught error was raised while handling the error."
)
|
def process_update(self, update):
"""Processes a single update.
Args:
update (:obj:`str` | :class:`telegram.Update` | :class:`telegram.TelegramError`):
The update to process.
"""
# An error happened while polling
if isinstance(update, TelegramError):
try:
self.dispatch_error(None, update)
except Exception:
self.logger.exception(
"An uncaught error was raised while handling the error"
)
return
context = None
for group in self.groups:
try:
for handler in self.handlers[group]:
check = handler.check_update(update)
if check is not None and check is not False:
if not context and self.use_context:
context = CallbackContext.from_update(update, self)
handler.handle_update(update, self, check, context)
self.update_persistence(update=update)
break
# Stop processing with any other handler.
except DispatcherHandlerStop:
self.logger.debug("Stopping further handlers due to DispatcherHandlerStop")
self.update_persistence(update=update)
break
# Dispatch any error.
except Exception as e:
try:
self.dispatch_error(update, e)
except DispatcherHandlerStop:
self.logger.debug("Error handler stopped further handlers")
break
# Errors should not stop the thread.
except Exception:
self.logger.exception(
"An error was raised while processing the update and an "
"uncaught error was raised while handling the error "
"with an error_handler"
)
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/682
|
<project_path>/venv/bin/python <project_path>/issue.py
2017-06-20 23:24:11,743 - __main__ - INFO - /start
2017-06-20 23:24:11,906 - telegram.ext.dispatcher - WARNING - A TelegramError was raised while processing the Update.
2017-06-20 23:24:11,907 - __main__ - ERROR - Boom!
2017-06-20 23:24:11,907 - __main__ - INFO - /async_start
2017-06-20 23:24:12,188 - telegram.utils.promise - ERROR - An uncaught error was raised while running the promise
Traceback (most recent call last):
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/promise.py", line 42, in run
self._result = self.pooled_function(*self.args, **self.kwargs)
File "<project_path>/issue.py", line 25, in action_async_start
update.message.reply_text(msg, parse_mode=ParseMode.HTML)
File "<project_path>/venv/src/python-telegram-bot/telegram/message.py", line 340, in reply_text
return self.bot.send_message(self.chat_id, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 125, in decorator
result = func(self, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 158, in decorator
return Bot._message_wrapper(self, url, data, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 146, in _message_wrapper
result = self._request.post(url, data, timeout=kwargs.get('timeout'))
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 252, in post
**urlopen_kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 194, in _request_wrapper
raise BadRequest(message)
telegram.error.BadRequest: Can't parse entities in message text: unsupported start tag "invalid_tag" at byte offset 0
Process finished with exit code 0
|
telegram.error.BadRequest
|
def add_handler(self, handler, group=DEFAULT_GROUP):
"""Register a handler.
TL;DR: Order and priority counts. 0 or 1 handlers per group will be used. End handling of
update with :class:`telegram.ext.DispatcherHandlerStop`.
A handler must be an instance of a subclass of :class:`telegram.ext.Handler`. All handlers
are organized in groups with a numeric value. The default group is 0. All groups will be
evaluated for handling an update, but only 0 or 1 handler per group will be used. If
:class:`telegram.ext.DispatcherHandlerStop` is raised from one of the handlers, no further
handlers (regardless of the group) will be called.
The priority/order of handlers is determined as follows:
* Priority of the group (lower group number == higher priority)
* The first handler in a group which should handle an update (see
:attr:`telegram.ext.Handler.check_update`) will be used. Other handlers from the
group will not be used. The order in which handlers were added to the group defines the
priority.
Args:
handler (:class:`telegram.ext.Handler`): A Handler instance.
group (:obj:`int`, optional): The group identifier. Default is 0.
"""
# Unfortunately due to circular imports this has to be here
from .conversationhandler import ConversationHandler
if not isinstance(handler, Handler):
raise TypeError("handler is not an instance of {}".format(Handler.__name__))
if not isinstance(group, int):
raise TypeError("group is not int")
if isinstance(handler, ConversationHandler) and handler.persistent:
if not self.persistence:
raise ValueError(
"ConversationHandler {} can not be persistent if dispatcher has no "
"persistence".format(handler.name)
)
handler.persistence = self.persistence
handler.conversations = self.persistence.get_conversations(handler.name)
if group not in self.handlers:
self.handlers[group] = list()
self.groups.append(group)
self.groups = sorted(self.groups)
self.handlers[group].append(handler)
|
def add_handler(self, handler, group=DEFAULT_GROUP):
"""Register a handler.
TL;DR: Order and priority counts. 0 or 1 handlers per group will be used. End handling of
update with :class:`telegram.ext.DispatcherHandlerStop`.
A handler must be an instance of a subclass of :class:`telegram.ext.Handler`. All handlers
are organized in groups with a numeric value. The default group is 0. All groups will be
evaluated for handling an update, but only 0 or 1 handler per group will be used. If
:class:`telegram.ext.DispatcherHandlerStop` is raised from one of the handlers, no further
handlers (regardless of the group) will be called.
The priority/order of handlers is determined as follows:
* Priority of the group (lower group number == higher priority)
* The first handler in a group which should handle an update (see
:attr:`telegram.ext.Handler.check_update`) will be used. Other handlers from the
group will not be used. The order in which handlers were added to the group defines the
priority.
Args:
handler (:class:`telegram.ext.Handler`): A Handler instance.
group (:obj:`int`, optional): The group identifier. Default is 0.
"""
# Unfortunately due to circular imports this has to be here
from .conversationhandler import ConversationHandler
if not isinstance(handler, Handler):
raise TypeError("handler is not an instance of {}".format(Handler.__name__))
if not isinstance(group, int):
raise TypeError("group is not int")
if isinstance(handler, ConversationHandler) and handler.persistent:
if not self.persistence:
raise ValueError(
"Conversationhandler {} can not be persistent if dispatcher has no "
"persistence".format(handler.name)
)
handler.persistence = self.persistence
handler.conversations = self.persistence.get_conversations(handler.name)
if group not in self.handlers:
self.handlers[group] = list()
self.groups.append(group)
self.groups = sorted(self.groups)
self.handlers[group].append(handler)
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/682
|
<project_path>/venv/bin/python <project_path>/issue.py
2017-06-20 23:24:11,743 - __main__ - INFO - /start
2017-06-20 23:24:11,906 - telegram.ext.dispatcher - WARNING - A TelegramError was raised while processing the Update.
2017-06-20 23:24:11,907 - __main__ - ERROR - Boom!
2017-06-20 23:24:11,907 - __main__ - INFO - /async_start
2017-06-20 23:24:12,188 - telegram.utils.promise - ERROR - An uncaught error was raised while running the promise
Traceback (most recent call last):
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/promise.py", line 42, in run
self._result = self.pooled_function(*self.args, **self.kwargs)
File "<project_path>/issue.py", line 25, in action_async_start
update.message.reply_text(msg, parse_mode=ParseMode.HTML)
File "<project_path>/venv/src/python-telegram-bot/telegram/message.py", line 340, in reply_text
return self.bot.send_message(self.chat_id, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 125, in decorator
result = func(self, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 158, in decorator
return Bot._message_wrapper(self, url, data, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 146, in _message_wrapper
result = self._request.post(url, data, timeout=kwargs.get('timeout'))
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 252, in post
**urlopen_kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 194, in _request_wrapper
raise BadRequest(message)
telegram.error.BadRequest: Can't parse entities in message text: unsupported start tag "invalid_tag" at byte offset 0
Process finished with exit code 0
|
telegram.error.BadRequest
|
def update_persistence(self, update=None):
"""Update :attr:`user_data`, :attr:`chat_data` and :attr:`bot_data` in :attr:`persistence`.
Args:
update (:class:`telegram.Update`, optional): The update to process. If passed, only the
corresponding ``user_data`` and ``chat_data`` will be updated.
"""
with self._update_persistence_lock:
self.__update_persistence(update)
|
def update_persistence(self, update=None):
"""Update :attr:`user_data`, :attr:`chat_data` and :attr:`bot_data` in :attr:`persistence`.
Args:
update (:class:`telegram.Update`, optional): The update to process. If passed, only the
corresponding ``user_data`` and ``chat_data`` will be updated.
"""
if self.persistence:
chat_ids = self.chat_data.keys()
user_ids = self.user_data.keys()
if isinstance(update, Update):
if update.effective_chat:
chat_ids = [update.effective_chat.id]
else:
chat_ids = []
if update.effective_user:
user_ids = [update.effective_user.id]
else:
user_ids = []
if self.persistence.store_bot_data:
try:
self.persistence.update_bot_data(self.bot_data)
except Exception as e:
try:
self.dispatch_error(update, e)
except Exception:
message = (
"Saving bot data raised an error and an "
"uncaught error was raised while handling "
"the error with an error_handler"
)
self.logger.exception(message)
if self.persistence.store_chat_data:
for chat_id in chat_ids:
try:
self.persistence.update_chat_data(chat_id, self.chat_data[chat_id])
except Exception as e:
try:
self.dispatch_error(update, e)
except Exception:
message = (
"Saving chat data raised an error and an "
"uncaught error was raised while handling "
"the error with an error_handler"
)
self.logger.exception(message)
if self.persistence.store_user_data:
for user_id in user_ids:
try:
self.persistence.update_user_data(user_id, self.user_data[user_id])
except Exception as e:
try:
self.dispatch_error(update, e)
except Exception:
message = (
"Saving user data raised an error and an "
"uncaught error was raised while handling "
"the error with an error_handler"
)
self.logger.exception(message)
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/682
|
<project_path>/venv/bin/python <project_path>/issue.py
2017-06-20 23:24:11,743 - __main__ - INFO - /start
2017-06-20 23:24:11,906 - telegram.ext.dispatcher - WARNING - A TelegramError was raised while processing the Update.
2017-06-20 23:24:11,907 - __main__ - ERROR - Boom!
2017-06-20 23:24:11,907 - __main__ - INFO - /async_start
2017-06-20 23:24:12,188 - telegram.utils.promise - ERROR - An uncaught error was raised while running the promise
Traceback (most recent call last):
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/promise.py", line 42, in run
self._result = self.pooled_function(*self.args, **self.kwargs)
File "<project_path>/issue.py", line 25, in action_async_start
update.message.reply_text(msg, parse_mode=ParseMode.HTML)
File "<project_path>/venv/src/python-telegram-bot/telegram/message.py", line 340, in reply_text
return self.bot.send_message(self.chat_id, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 125, in decorator
result = func(self, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 158, in decorator
return Bot._message_wrapper(self, url, data, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 146, in _message_wrapper
result = self._request.post(url, data, timeout=kwargs.get('timeout'))
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 252, in post
**urlopen_kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 194, in _request_wrapper
raise BadRequest(message)
telegram.error.BadRequest: Can't parse entities in message text: unsupported start tag "invalid_tag" at byte offset 0
Process finished with exit code 0
|
telegram.error.BadRequest
|
def add_error_handler(self, callback, run_async=False):
"""Registers an error handler in the Dispatcher. This handler will receive every error
which happens in your bot.
Note:
Attempts to add the same callback multiple times will be ignored.
Warning:
The errors handled within these handlers won't show up in the logger, so you
need to make sure that you reraise the error.
Args:
callback (:obj:`callable`): The callback function for this error handler. Will be
called when an error is raised. Callback signature for context based API:
``def callback(update: Update, context: CallbackContext)``
The error that happened will be present in context.error.
run_async (:obj:`bool`, optional): Whether this handlers callback should be run
asynchronously using :meth:`run_async`. Defaults to :obj:`False`.
Note:
See https://git.io/fxJuV for more info about switching to context based API.
"""
if callback in self.error_handlers:
self.logger.debug(
"The callback is already registered as an error handler. Ignoring."
)
return
self.error_handlers[callback] = run_async
|
def add_error_handler(self, callback):
"""Registers an error handler in the Dispatcher. This handler will receive every error
which happens in your bot.
Warning: The errors handled within these handlers won't show up in the logger, so you
need to make sure that you reraise the error.
Args:
callback (:obj:`callable`): The callback function for this error handler. Will be
called when an error is raised. Callback signature for context based API:
``def callback(update: Update, context: CallbackContext)``
The error that happened will be present in context.error.
Note:
See https://git.io/fxJuV for more info about switching to context based API.
"""
self.error_handlers.append(callback)
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/682
|
<project_path>/venv/bin/python <project_path>/issue.py
2017-06-20 23:24:11,743 - __main__ - INFO - /start
2017-06-20 23:24:11,906 - telegram.ext.dispatcher - WARNING - A TelegramError was raised while processing the Update.
2017-06-20 23:24:11,907 - __main__ - ERROR - Boom!
2017-06-20 23:24:11,907 - __main__ - INFO - /async_start
2017-06-20 23:24:12,188 - telegram.utils.promise - ERROR - An uncaught error was raised while running the promise
Traceback (most recent call last):
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/promise.py", line 42, in run
self._result = self.pooled_function(*self.args, **self.kwargs)
File "<project_path>/issue.py", line 25, in action_async_start
update.message.reply_text(msg, parse_mode=ParseMode.HTML)
File "<project_path>/venv/src/python-telegram-bot/telegram/message.py", line 340, in reply_text
return self.bot.send_message(self.chat_id, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 125, in decorator
result = func(self, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 158, in decorator
return Bot._message_wrapper(self, url, data, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 146, in _message_wrapper
result = self._request.post(url, data, timeout=kwargs.get('timeout'))
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 252, in post
**urlopen_kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 194, in _request_wrapper
raise BadRequest(message)
telegram.error.BadRequest: Can't parse entities in message text: unsupported start tag "invalid_tag" at byte offset 0
Process finished with exit code 0
|
telegram.error.BadRequest
|
def remove_error_handler(self, callback):
"""Removes an error handler.
Args:
callback (:obj:`callable`): The error handler to remove.
"""
self.error_handlers.pop(callback, None)
|
def remove_error_handler(self, callback):
"""Removes an error handler.
Args:
callback (:obj:`callable`): The error handler to remove.
"""
if callback in self.error_handlers:
self.error_handlers.remove(callback)
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/682
|
<project_path>/venv/bin/python <project_path>/issue.py
2017-06-20 23:24:11,743 - __main__ - INFO - /start
2017-06-20 23:24:11,906 - telegram.ext.dispatcher - WARNING - A TelegramError was raised while processing the Update.
2017-06-20 23:24:11,907 - __main__ - ERROR - Boom!
2017-06-20 23:24:11,907 - __main__ - INFO - /async_start
2017-06-20 23:24:12,188 - telegram.utils.promise - ERROR - An uncaught error was raised while running the promise
Traceback (most recent call last):
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/promise.py", line 42, in run
self._result = self.pooled_function(*self.args, **self.kwargs)
File "<project_path>/issue.py", line 25, in action_async_start
update.message.reply_text(msg, parse_mode=ParseMode.HTML)
File "<project_path>/venv/src/python-telegram-bot/telegram/message.py", line 340, in reply_text
return self.bot.send_message(self.chat_id, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 125, in decorator
result = func(self, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 158, in decorator
return Bot._message_wrapper(self, url, data, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 146, in _message_wrapper
result = self._request.post(url, data, timeout=kwargs.get('timeout'))
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 252, in post
**urlopen_kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 194, in _request_wrapper
raise BadRequest(message)
telegram.error.BadRequest: Can't parse entities in message text: unsupported start tag "invalid_tag" at byte offset 0
Process finished with exit code 0
|
telegram.error.BadRequest
|
def dispatch_error(self, update, error, promise=None):
"""Dispatches an error.
Args:
update (:obj:`str` | :class:`telegram.Update` | None): The update that caused the error
error (:obj:`Exception`): The error that was raised.
promise (:class:`telegram.utils.Promise`, optional): The promise whose pooled function
raised the error.
"""
async_args = None if not promise else promise.args
async_kwargs = None if not promise else promise.kwargs
if self.error_handlers:
for callback, run_async in self.error_handlers.items():
if self.use_context:
context = CallbackContext.from_error(
update,
error,
self,
async_args=async_args,
async_kwargs=async_kwargs,
)
if run_async:
self.run_async(callback, update, context, update=update)
else:
callback(update, context)
else:
if run_async:
self.run_async(callback, self.bot, update, error, update=update)
else:
callback(self.bot, update, error)
else:
self.logger.exception(
"No error handlers are registered, logging exception.", exc_info=error
)
|
def dispatch_error(self, update, error):
"""Dispatches an error.
Args:
update (:obj:`str` | :class:`telegram.Update` | None): The update that caused the error
error (:obj:`Exception`): The error that was raised.
"""
if self.error_handlers:
for callback in self.error_handlers:
if self.use_context:
callback(update, CallbackContext.from_error(update, error, self))
else:
callback(self.bot, update, error)
else:
self.logger.exception(
"No error handlers are registered, logging exception.", exc_info=error
)
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/682
|
<project_path>/venv/bin/python <project_path>/issue.py
2017-06-20 23:24:11,743 - __main__ - INFO - /start
2017-06-20 23:24:11,906 - telegram.ext.dispatcher - WARNING - A TelegramError was raised while processing the Update.
2017-06-20 23:24:11,907 - __main__ - ERROR - Boom!
2017-06-20 23:24:11,907 - __main__ - INFO - /async_start
2017-06-20 23:24:12,188 - telegram.utils.promise - ERROR - An uncaught error was raised while running the promise
Traceback (most recent call last):
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/promise.py", line 42, in run
self._result = self.pooled_function(*self.args, **self.kwargs)
File "<project_path>/issue.py", line 25, in action_async_start
update.message.reply_text(msg, parse_mode=ParseMode.HTML)
File "<project_path>/venv/src/python-telegram-bot/telegram/message.py", line 340, in reply_text
return self.bot.send_message(self.chat_id, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 125, in decorator
result = func(self, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 158, in decorator
return Bot._message_wrapper(self, url, data, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 146, in _message_wrapper
result = self._request.post(url, data, timeout=kwargs.get('timeout'))
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 252, in post
**urlopen_kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 194, in _request_wrapper
raise BadRequest(message)
telegram.error.BadRequest: Can't parse entities in message text: unsupported start tag "invalid_tag" at byte offset 0
Process finished with exit code 0
|
telegram.error.BadRequest
|
def __init__(
self,
callback,
pass_update_queue=False,
pass_job_queue=False,
pass_user_data=False,
pass_chat_data=False,
run_async=False,
):
self.callback = callback
self.pass_update_queue = pass_update_queue
self.pass_job_queue = pass_job_queue
self.pass_user_data = pass_user_data
self.pass_chat_data = pass_chat_data
self.run_async = run_async
|
def __init__(
self,
callback,
pass_update_queue=False,
pass_job_queue=False,
pass_user_data=False,
pass_chat_data=False,
):
self.callback = callback
self.pass_update_queue = pass_update_queue
self.pass_job_queue = pass_job_queue
self.pass_user_data = pass_user_data
self.pass_chat_data = pass_chat_data
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/682
|
<project_path>/venv/bin/python <project_path>/issue.py
2017-06-20 23:24:11,743 - __main__ - INFO - /start
2017-06-20 23:24:11,906 - telegram.ext.dispatcher - WARNING - A TelegramError was raised while processing the Update.
2017-06-20 23:24:11,907 - __main__ - ERROR - Boom!
2017-06-20 23:24:11,907 - __main__ - INFO - /async_start
2017-06-20 23:24:12,188 - telegram.utils.promise - ERROR - An uncaught error was raised while running the promise
Traceback (most recent call last):
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/promise.py", line 42, in run
self._result = self.pooled_function(*self.args, **self.kwargs)
File "<project_path>/issue.py", line 25, in action_async_start
update.message.reply_text(msg, parse_mode=ParseMode.HTML)
File "<project_path>/venv/src/python-telegram-bot/telegram/message.py", line 340, in reply_text
return self.bot.send_message(self.chat_id, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 125, in decorator
result = func(self, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 158, in decorator
return Bot._message_wrapper(self, url, data, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 146, in _message_wrapper
result = self._request.post(url, data, timeout=kwargs.get('timeout'))
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 252, in post
**urlopen_kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 194, in _request_wrapper
raise BadRequest(message)
telegram.error.BadRequest: Can't parse entities in message text: unsupported start tag "invalid_tag" at byte offset 0
Process finished with exit code 0
|
telegram.error.BadRequest
|
def handle_update(self, update, dispatcher, check_result, context=None):
"""
This method is called if it was determined that an update should indeed
be handled by this instance. Calls :attr:`callback` along with its respectful
arguments. To work with the :class:`telegram.ext.ConversationHandler`, this method
returns the value returned from :attr:`callback`.
Note that it can be overridden if needed by the subclassing handler.
Args:
update (:obj:`str` | :class:`telegram.Update`): The update to be handled.
dispatcher (:class:`telegram.ext.Dispatcher`): The calling dispatcher.
check_result: The result from :attr:`check_update`.
"""
if context:
self.collect_additional_context(context, update, dispatcher, check_result)
if self.run_async:
return dispatcher.run_async(self.callback, update, context, update=update)
else:
return self.callback(update, context)
else:
optional_args = self.collect_optional_args(dispatcher, update, check_result)
if self.run_async:
return dispatcher.run_async(
self.callback, dispatcher.bot, update, update=update, **optional_args
)
else:
return self.callback(dispatcher.bot, update, **optional_args)
|
def handle_update(self, update, dispatcher, check_result, context=None):
"""
This method is called if it was determined that an update should indeed
be handled by this instance. Calls :attr:`callback` along with its respectful
arguments. To work with the :class:`telegram.ext.ConversationHandler`, this method
returns the value returned from :attr:`callback`.
Note that it can be overridden if needed by the subclassing handler.
Args:
update (:obj:`str` | :class:`telegram.Update`): The update to be handled.
dispatcher (:class:`telegram.ext.Dispatcher`): The calling dispatcher.
check_result: The result from :attr:`check_update`.
"""
if context:
self.collect_additional_context(context, update, dispatcher, check_result)
return self.callback(update, context)
else:
optional_args = self.collect_optional_args(dispatcher, update, check_result)
return self.callback(dispatcher.bot, update, **optional_args)
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/682
|
<project_path>/venv/bin/python <project_path>/issue.py
2017-06-20 23:24:11,743 - __main__ - INFO - /start
2017-06-20 23:24:11,906 - telegram.ext.dispatcher - WARNING - A TelegramError was raised while processing the Update.
2017-06-20 23:24:11,907 - __main__ - ERROR - Boom!
2017-06-20 23:24:11,907 - __main__ - INFO - /async_start
2017-06-20 23:24:12,188 - telegram.utils.promise - ERROR - An uncaught error was raised while running the promise
Traceback (most recent call last):
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/promise.py", line 42, in run
self._result = self.pooled_function(*self.args, **self.kwargs)
File "<project_path>/issue.py", line 25, in action_async_start
update.message.reply_text(msg, parse_mode=ParseMode.HTML)
File "<project_path>/venv/src/python-telegram-bot/telegram/message.py", line 340, in reply_text
return self.bot.send_message(self.chat_id, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 125, in decorator
result = func(self, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 158, in decorator
return Bot._message_wrapper(self, url, data, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 146, in _message_wrapper
result = self._request.post(url, data, timeout=kwargs.get('timeout'))
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 252, in post
**urlopen_kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 194, in _request_wrapper
raise BadRequest(message)
telegram.error.BadRequest: Can't parse entities in message text: unsupported start tag "invalid_tag" at byte offset 0
Process finished with exit code 0
|
telegram.error.BadRequest
|
def __init__(
self,
filters,
callback,
pass_update_queue=False,
pass_job_queue=False,
pass_user_data=False,
pass_chat_data=False,
message_updates=None,
channel_post_updates=None,
edited_updates=None,
run_async=False,
):
super().__init__(
callback,
pass_update_queue=pass_update_queue,
pass_job_queue=pass_job_queue,
pass_user_data=pass_user_data,
pass_chat_data=pass_chat_data,
run_async=run_async,
)
if (
message_updates is False
and channel_post_updates is False
and edited_updates is False
):
raise ValueError(
"message_updates, channel_post_updates and edited_updates are all False"
)
if filters is not None:
self.filters = Filters.update & filters
else:
self.filters = Filters.update
if message_updates is not None:
warnings.warn(
"message_updates is deprecated. See https://git.io/fxJuV for more info",
TelegramDeprecationWarning,
stacklevel=2,
)
if message_updates is False:
self.filters &= ~Filters.update.message
if channel_post_updates is not None:
warnings.warn(
"channel_post_updates is deprecated. See https://git.io/fxJuV "
"for more info",
TelegramDeprecationWarning,
stacklevel=2,
)
if channel_post_updates is False:
self.filters &= ~Filters.update.channel_post
if edited_updates is not None:
warnings.warn(
"edited_updates is deprecated. See https://git.io/fxJuV for more info",
TelegramDeprecationWarning,
stacklevel=2,
)
if edited_updates is False:
self.filters &= ~(
Filters.update.edited_message | Filters.update.edited_channel_post
)
|
def __init__(
self,
filters,
callback,
pass_update_queue=False,
pass_job_queue=False,
pass_user_data=False,
pass_chat_data=False,
message_updates=None,
channel_post_updates=None,
edited_updates=None,
):
super().__init__(
callback,
pass_update_queue=pass_update_queue,
pass_job_queue=pass_job_queue,
pass_user_data=pass_user_data,
pass_chat_data=pass_chat_data,
)
if (
message_updates is False
and channel_post_updates is False
and edited_updates is False
):
raise ValueError(
"message_updates, channel_post_updates and edited_updates are all False"
)
if filters is not None:
self.filters = Filters.update & filters
else:
self.filters = Filters.update
if message_updates is not None:
warnings.warn(
"message_updates is deprecated. See https://git.io/fxJuV for more info",
TelegramDeprecationWarning,
stacklevel=2,
)
if message_updates is False:
self.filters &= ~Filters.update.message
if channel_post_updates is not None:
warnings.warn(
"channel_post_updates is deprecated. See https://git.io/fxJuV "
"for more info",
TelegramDeprecationWarning,
stacklevel=2,
)
if channel_post_updates is False:
self.filters &= ~Filters.update.channel_post
if edited_updates is not None:
warnings.warn(
"edited_updates is deprecated. See https://git.io/fxJuV for more info",
TelegramDeprecationWarning,
stacklevel=2,
)
if edited_updates is False:
self.filters &= ~(
Filters.update.edited_message | Filters.update.edited_channel_post
)
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/682
|
<project_path>/venv/bin/python <project_path>/issue.py
2017-06-20 23:24:11,743 - __main__ - INFO - /start
2017-06-20 23:24:11,906 - telegram.ext.dispatcher - WARNING - A TelegramError was raised while processing the Update.
2017-06-20 23:24:11,907 - __main__ - ERROR - Boom!
2017-06-20 23:24:11,907 - __main__ - INFO - /async_start
2017-06-20 23:24:12,188 - telegram.utils.promise - ERROR - An uncaught error was raised while running the promise
Traceback (most recent call last):
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/promise.py", line 42, in run
self._result = self.pooled_function(*self.args, **self.kwargs)
File "<project_path>/issue.py", line 25, in action_async_start
update.message.reply_text(msg, parse_mode=ParseMode.HTML)
File "<project_path>/venv/src/python-telegram-bot/telegram/message.py", line 340, in reply_text
return self.bot.send_message(self.chat_id, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 125, in decorator
result = func(self, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 158, in decorator
return Bot._message_wrapper(self, url, data, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 146, in _message_wrapper
result = self._request.post(url, data, timeout=kwargs.get('timeout'))
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 252, in post
**urlopen_kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 194, in _request_wrapper
raise BadRequest(message)
telegram.error.BadRequest: Can't parse entities in message text: unsupported start tag "invalid_tag" at byte offset 0
Process finished with exit code 0
|
telegram.error.BadRequest
|
def __init__(
self,
pattern,
callback,
pass_groups=False,
pass_groupdict=False,
pass_update_queue=False,
pass_job_queue=False,
pass_user_data=False,
pass_chat_data=False,
allow_edited=False,
message_updates=True,
channel_post_updates=False,
edited_updates=False,
run_async=False,
):
warnings.warn(
"RegexHandler is deprecated. See https://git.io/fxJuV for more info",
TelegramDeprecationWarning,
stacklevel=2,
)
super().__init__(
Filters.regex(pattern),
callback,
pass_update_queue=pass_update_queue,
pass_job_queue=pass_job_queue,
pass_user_data=pass_user_data,
pass_chat_data=pass_chat_data,
message_updates=message_updates,
channel_post_updates=channel_post_updates,
edited_updates=edited_updates,
run_async=run_async,
)
self.pass_groups = pass_groups
self.pass_groupdict = pass_groupdict
|
def __init__(
self,
pattern,
callback,
pass_groups=False,
pass_groupdict=False,
pass_update_queue=False,
pass_job_queue=False,
pass_user_data=False,
pass_chat_data=False,
allow_edited=False,
message_updates=True,
channel_post_updates=False,
edited_updates=False,
):
warnings.warn(
"RegexHandler is deprecated. See https://git.io/fxJuV for more info",
TelegramDeprecationWarning,
stacklevel=2,
)
super().__init__(
Filters.regex(pattern),
callback,
pass_update_queue=pass_update_queue,
pass_job_queue=pass_job_queue,
pass_user_data=pass_user_data,
pass_chat_data=pass_chat_data,
message_updates=message_updates,
channel_post_updates=channel_post_updates,
edited_updates=edited_updates,
)
self.pass_groups = pass_groups
self.pass_groupdict = pass_groupdict
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/682
|
<project_path>/venv/bin/python <project_path>/issue.py
2017-06-20 23:24:11,743 - __main__ - INFO - /start
2017-06-20 23:24:11,906 - telegram.ext.dispatcher - WARNING - A TelegramError was raised while processing the Update.
2017-06-20 23:24:11,907 - __main__ - ERROR - Boom!
2017-06-20 23:24:11,907 - __main__ - INFO - /async_start
2017-06-20 23:24:12,188 - telegram.utils.promise - ERROR - An uncaught error was raised while running the promise
Traceback (most recent call last):
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/promise.py", line 42, in run
self._result = self.pooled_function(*self.args, **self.kwargs)
File "<project_path>/issue.py", line 25, in action_async_start
update.message.reply_text(msg, parse_mode=ParseMode.HTML)
File "<project_path>/venv/src/python-telegram-bot/telegram/message.py", line 340, in reply_text
return self.bot.send_message(self.chat_id, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 125, in decorator
result = func(self, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 158, in decorator
return Bot._message_wrapper(self, url, data, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 146, in _message_wrapper
result = self._request.post(url, data, timeout=kwargs.get('timeout'))
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 252, in post
**urlopen_kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 194, in _request_wrapper
raise BadRequest(message)
telegram.error.BadRequest: Can't parse entities in message text: unsupported start tag "invalid_tag" at byte offset 0
Process finished with exit code 0
|
telegram.error.BadRequest
|
def __init__(
self,
command,
callback,
pass_args=False,
pass_update_queue=False,
pass_job_queue=False,
run_async=False,
):
super().__init__(
callback,
pass_update_queue=pass_update_queue,
pass_job_queue=pass_job_queue,
run_async=run_async,
)
self.command = command
self.pass_args = pass_args
|
def __init__(
self,
command,
callback,
pass_args=False,
pass_update_queue=False,
pass_job_queue=False,
):
super().__init__(
callback, pass_update_queue=pass_update_queue, pass_job_queue=pass_job_queue
)
self.command = command
self.pass_args = pass_args
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/682
|
<project_path>/venv/bin/python <project_path>/issue.py
2017-06-20 23:24:11,743 - __main__ - INFO - /start
2017-06-20 23:24:11,906 - telegram.ext.dispatcher - WARNING - A TelegramError was raised while processing the Update.
2017-06-20 23:24:11,907 - __main__ - ERROR - Boom!
2017-06-20 23:24:11,907 - __main__ - INFO - /async_start
2017-06-20 23:24:12,188 - telegram.utils.promise - ERROR - An uncaught error was raised while running the promise
Traceback (most recent call last):
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/promise.py", line 42, in run
self._result = self.pooled_function(*self.args, **self.kwargs)
File "<project_path>/issue.py", line 25, in action_async_start
update.message.reply_text(msg, parse_mode=ParseMode.HTML)
File "<project_path>/venv/src/python-telegram-bot/telegram/message.py", line 340, in reply_text
return self.bot.send_message(self.chat_id, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 125, in decorator
result = func(self, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 158, in decorator
return Bot._message_wrapper(self, url, data, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 146, in _message_wrapper
result = self._request.post(url, data, timeout=kwargs.get('timeout'))
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 252, in post
**urlopen_kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 194, in _request_wrapper
raise BadRequest(message)
telegram.error.BadRequest: Can't parse entities in message text: unsupported start tag "invalid_tag" at byte offset 0
Process finished with exit code 0
|
telegram.error.BadRequest
|
def __init__(
self,
pattern,
callback,
pass_groups=False,
pass_groupdict=False,
pass_update_queue=False,
pass_job_queue=False,
run_async=False,
):
super().__init__(
callback,
pass_update_queue=pass_update_queue,
pass_job_queue=pass_job_queue,
run_async=run_async,
)
if isinstance(pattern, str):
pattern = re.compile(pattern)
self.pattern = pattern
self.pass_groups = pass_groups
self.pass_groupdict = pass_groupdict
|
def __init__(
self,
pattern,
callback,
pass_groups=False,
pass_groupdict=False,
pass_update_queue=False,
pass_job_queue=False,
):
super().__init__(
callback, pass_update_queue=pass_update_queue, pass_job_queue=pass_job_queue
)
if isinstance(pattern, str):
pattern = re.compile(pattern)
self.pattern = pattern
self.pass_groups = pass_groups
self.pass_groupdict = pass_groupdict
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/682
|
<project_path>/venv/bin/python <project_path>/issue.py
2017-06-20 23:24:11,743 - __main__ - INFO - /start
2017-06-20 23:24:11,906 - telegram.ext.dispatcher - WARNING - A TelegramError was raised while processing the Update.
2017-06-20 23:24:11,907 - __main__ - ERROR - Boom!
2017-06-20 23:24:11,907 - __main__ - INFO - /async_start
2017-06-20 23:24:12,188 - telegram.utils.promise - ERROR - An uncaught error was raised while running the promise
Traceback (most recent call last):
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/promise.py", line 42, in run
self._result = self.pooled_function(*self.args, **self.kwargs)
File "<project_path>/issue.py", line 25, in action_async_start
update.message.reply_text(msg, parse_mode=ParseMode.HTML)
File "<project_path>/venv/src/python-telegram-bot/telegram/message.py", line 340, in reply_text
return self.bot.send_message(self.chat_id, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 125, in decorator
result = func(self, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 158, in decorator
return Bot._message_wrapper(self, url, data, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 146, in _message_wrapper
result = self._request.post(url, data, timeout=kwargs.get('timeout'))
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 252, in post
**urlopen_kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 194, in _request_wrapper
raise BadRequest(message)
telegram.error.BadRequest: Can't parse entities in message text: unsupported start tag "invalid_tag" at byte offset 0
Process finished with exit code 0
|
telegram.error.BadRequest
|
def __init__(
self,
type,
callback,
strict=False,
pass_update_queue=False,
pass_job_queue=False,
run_async=False,
):
super().__init__(
callback,
pass_update_queue=pass_update_queue,
pass_job_queue=pass_job_queue,
run_async=run_async,
)
self.type = type
self.strict = strict
|
def __init__(
self, type, callback, strict=False, pass_update_queue=False, pass_job_queue=False
):
super().__init__(
callback, pass_update_queue=pass_update_queue, pass_job_queue=pass_job_queue
)
self.type = type
self.strict = strict
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/682
|
<project_path>/venv/bin/python <project_path>/issue.py
2017-06-20 23:24:11,743 - __main__ - INFO - /start
2017-06-20 23:24:11,906 - telegram.ext.dispatcher - WARNING - A TelegramError was raised while processing the Update.
2017-06-20 23:24:11,907 - __main__ - ERROR - Boom!
2017-06-20 23:24:11,907 - __main__ - INFO - /async_start
2017-06-20 23:24:12,188 - telegram.utils.promise - ERROR - An uncaught error was raised while running the promise
Traceback (most recent call last):
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/promise.py", line 42, in run
self._result = self.pooled_function(*self.args, **self.kwargs)
File "<project_path>/issue.py", line 25, in action_async_start
update.message.reply_text(msg, parse_mode=ParseMode.HTML)
File "<project_path>/venv/src/python-telegram-bot/telegram/message.py", line 340, in reply_text
return self.bot.send_message(self.chat_id, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 125, in decorator
result = func(self, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 158, in decorator
return Bot._message_wrapper(self, url, data, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 146, in _message_wrapper
result = self._request.post(url, data, timeout=kwargs.get('timeout'))
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 252, in post
**urlopen_kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 194, in _request_wrapper
raise BadRequest(message)
telegram.error.BadRequest: Can't parse entities in message text: unsupported start tag "invalid_tag" at byte offset 0
Process finished with exit code 0
|
telegram.error.BadRequest
|
def __init__(self, pooled_function, args, kwargs, update=None, error_handling=True):
self.pooled_function = pooled_function
self.args = args
self.kwargs = kwargs
self.update = update
self.error_handling = error_handling
self.done = Event()
self._result = None
self._exception = None
|
def __init__(self, pooled_function, args, kwargs):
self.pooled_function = pooled_function
self.args = args
self.kwargs = kwargs
self.done = Event()
self._result = None
self._exception = None
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/682
|
<project_path>/venv/bin/python <project_path>/issue.py
2017-06-20 23:24:11,743 - __main__ - INFO - /start
2017-06-20 23:24:11,906 - telegram.ext.dispatcher - WARNING - A TelegramError was raised while processing the Update.
2017-06-20 23:24:11,907 - __main__ - ERROR - Boom!
2017-06-20 23:24:11,907 - __main__ - INFO - /async_start
2017-06-20 23:24:12,188 - telegram.utils.promise - ERROR - An uncaught error was raised while running the promise
Traceback (most recent call last):
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/promise.py", line 42, in run
self._result = self.pooled_function(*self.args, **self.kwargs)
File "<project_path>/issue.py", line 25, in action_async_start
update.message.reply_text(msg, parse_mode=ParseMode.HTML)
File "<project_path>/venv/src/python-telegram-bot/telegram/message.py", line 340, in reply_text
return self.bot.send_message(self.chat_id, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 125, in decorator
result = func(self, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 158, in decorator
return Bot._message_wrapper(self, url, data, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 146, in _message_wrapper
result = self._request.post(url, data, timeout=kwargs.get('timeout'))
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 252, in post
**urlopen_kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 194, in _request_wrapper
raise BadRequest(message)
telegram.error.BadRequest: Can't parse entities in message text: unsupported start tag "invalid_tag" at byte offset 0
Process finished with exit code 0
|
telegram.error.BadRequest
|
def run(self):
"""Calls the :attr:`pooled_function` callable."""
try:
self._result = self.pooled_function(*self.args, **self.kwargs)
except Exception as exc:
self._exception = exc
finally:
self.done.set()
|
def run(self):
"""Calls the :attr:`pooled_function` callable."""
try:
self._result = self.pooled_function(*self.args, **self.kwargs)
except Exception as exc:
logger.exception("An uncaught error was raised while running the promise")
self._exception = exc
finally:
self.done.set()
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/682
|
<project_path>/venv/bin/python <project_path>/issue.py
2017-06-20 23:24:11,743 - __main__ - INFO - /start
2017-06-20 23:24:11,906 - telegram.ext.dispatcher - WARNING - A TelegramError was raised while processing the Update.
2017-06-20 23:24:11,907 - __main__ - ERROR - Boom!
2017-06-20 23:24:11,907 - __main__ - INFO - /async_start
2017-06-20 23:24:12,188 - telegram.utils.promise - ERROR - An uncaught error was raised while running the promise
Traceback (most recent call last):
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/promise.py", line 42, in run
self._result = self.pooled_function(*self.args, **self.kwargs)
File "<project_path>/issue.py", line 25, in action_async_start
update.message.reply_text(msg, parse_mode=ParseMode.HTML)
File "<project_path>/venv/src/python-telegram-bot/telegram/message.py", line 340, in reply_text
return self.bot.send_message(self.chat_id, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 125, in decorator
result = func(self, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 158, in decorator
return Bot._message_wrapper(self, url, data, *args, **kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/bot.py", line 146, in _message_wrapper
result = self._request.post(url, data, timeout=kwargs.get('timeout'))
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 252, in post
**urlopen_kwargs)
File "<project_path>/venv/src/python-telegram-bot/telegram/utils/request.py", line 194, in _request_wrapper
raise BadRequest(message)
telegram.error.BadRequest: Can't parse entities in message text: unsupported start tag "invalid_tag" at byte offset 0
Process finished with exit code 0
|
telegram.error.BadRequest
|
def start_polling(
self,
poll_interval=0.0,
timeout=10,
clean=False,
bootstrap_retries=-1,
read_latency=2.0,
allowed_updates=None,
):
"""Starts polling updates from Telegram.
Args:
poll_interval (:obj:`float`, optional): Time to wait between polling updates from
Telegram in seconds. Default is 0.0.
timeout (:obj:`float`, optional): Passed to :attr:`telegram.Bot.get_updates`.
clean (:obj:`bool`, optional): Whether to clean any pending updates on Telegram servers
before actually starting to poll. Default is :obj:`False`.
bootstrap_retries (:obj:`int`, optional): Whether the bootstrapping phase of the
`Updater` will retry on failures on the Telegram server.
* < 0 - retry indefinitely (default)
* 0 - no retries
* > 0 - retry up to X times
allowed_updates (List[:obj:`str`], optional): Passed to
:attr:`telegram.Bot.get_updates`.
read_latency (:obj:`float` | :obj:`int`, optional): Grace time in seconds for receiving
the reply from server. Will be added to the `timeout` value and used as the read
timeout from server (Default: 2).
Returns:
:obj:`Queue`: The update queue that can be filled from the main thread.
"""
with self.__lock:
if not self.running:
self.running = True
# Create & start threads
self.job_queue.start()
dispatcher_ready = Event()
polling_ready = Event()
self._init_thread(
self.dispatcher.start, "dispatcher", ready=dispatcher_ready
)
self._init_thread(
self._start_polling,
"updater",
poll_interval,
timeout,
read_latency,
bootstrap_retries,
clean,
allowed_updates,
ready=polling_ready,
)
self.logger.debug("Waiting for Dispatcher and polling to start")
dispatcher_ready.wait()
polling_ready.wait()
# Return the update queue so the main thread can insert updates
return self.update_queue
|
def start_polling(
self,
poll_interval=0.0,
timeout=10,
clean=False,
bootstrap_retries=-1,
read_latency=2.0,
allowed_updates=None,
):
"""Starts polling updates from Telegram.
Args:
poll_interval (:obj:`float`, optional): Time to wait between polling updates from
Telegram in seconds. Default is 0.0.
timeout (:obj:`float`, optional): Passed to :attr:`telegram.Bot.get_updates`.
clean (:obj:`bool`, optional): Whether to clean any pending updates on Telegram servers
before actually starting to poll. Default is :obj:`False`.
bootstrap_retries (:obj:`int`, optional): Whether the bootstrapping phase of the
`Updater` will retry on failures on the Telegram server.
* < 0 - retry indefinitely (default)
* 0 - no retries
* > 0 - retry up to X times
allowed_updates (List[:obj:`str`], optional): Passed to
:attr:`telegram.Bot.get_updates`.
read_latency (:obj:`float` | :obj:`int`, optional): Grace time in seconds for receiving
the reply from server. Will be added to the `timeout` value and used as the read
timeout from server (Default: 2).
Returns:
:obj:`Queue`: The update queue that can be filled from the main thread.
"""
with self.__lock:
if not self.running:
self.running = True
# Create & start threads
self.job_queue.start()
dispatcher_ready = Event()
self._init_thread(
self.dispatcher.start, "dispatcher", ready=dispatcher_ready
)
self._init_thread(
self._start_polling,
"updater",
poll_interval,
timeout,
read_latency,
bootstrap_retries,
clean,
allowed_updates,
)
dispatcher_ready.wait()
# Return the update queue so the main thread can insert updates
return self.update_queue
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/1977
|
Exception in thread Bot:377489430:updater:
Traceback (most recent call last):
File "C:\Program Files\Python38\lib\threading.py", line 932, in _bootstrap_inner
self.run()
File "C:\Program Files\Python38\lib\threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\telegram\ext\updater.py", line 216, in _thread_wrapper
target(*args, **kwargs)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\telegram\ext\updater.py", line 452, in _start_webhook
self.httpd.serve_forever()
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\telegram\utils\webhookhandler.py", line 52, in serve_forever
self.http_server.listen(self.port, address=self.listen)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\tornado\tcpserver.py", line 152, in listen
self.add_sockets(sockets)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\tornado\tcpserver.py", line 165, in add_sockets
self._handlers[sock.fileno()] = add_accept_handler(
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\tornado\netutil.py", line 279, in add_accept_handler
io_loop.add_handler(sock, accept_handler, IOLoop.READ)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\tornado\platform\asyncio.py", line 100, in add_handler
self.asyncio_loop.add_reader(fd, self._handle_events, fd, IOLoop.READ)
File "C:\Program Files\Python38\lib\asyncio\events.py", line 501, in add_reader
raise NotImplementedError
NotImplementedError
|
NotImplementedError
|
def start_webhook(
self,
listen="127.0.0.1",
port=80,
url_path="",
cert=None,
key=None,
clean=False,
bootstrap_retries=0,
webhook_url=None,
allowed_updates=None,
force_event_loop=False,
):
"""
Starts a small http server to listen for updates via webhook. If cert
and key are not provided, the webhook will be started directly on
http://listen:port/url_path, so SSL can be handled by another
application. Else, the webhook will be started on
https://listen:port/url_path
Note:
Due to an incompatibility of the Tornado library PTB uses for the webhook with Python
3.8+ on Windows machines, PTB will attempt to set the event loop to
:attr:`asyncio.SelectorEventLoop` and raise an exception, if an incompatible event loop
has already been specified. See this `thread`_ for more details. To suppress the
exception, set :attr:`force_event_loop` to :obj:`True`.
.. _thread: https://github.com/tornadoweb/tornado/issues/2608
Args:
listen (:obj:`str`, optional): IP-Address to listen on. Default ``127.0.0.1``.
port (:obj:`int`, optional): Port the bot should be listening on. Default ``80``.
url_path (:obj:`str`, optional): Path inside url.
cert (:obj:`str`, optional): Path to the SSL certificate file.
key (:obj:`str`, optional): Path to the SSL key file.
clean (:obj:`bool`, optional): Whether to clean any pending updates on Telegram servers
before actually starting the webhook. Default is :obj:`False`.
bootstrap_retries (:obj:`int`, optional): Whether the bootstrapping phase of the
`Updater` will retry on failures on the Telegram server.
* < 0 - retry indefinitely (default)
* 0 - no retries
* > 0 - retry up to X times
webhook_url (:obj:`str`, optional): Explicitly specify the webhook url. Useful behind
NAT, reverse proxy, etc. Default is derived from `listen`, `port` & `url_path`.
allowed_updates (List[:obj:`str`], optional): Passed to
:attr:`telegram.Bot.set_webhook`.
force_event_loop (:obj:`bool`, optional): Force using the current event loop. See above
note for details. Defaults to :obj:`False`
Returns:
:obj:`Queue`: The update queue that can be filled from the main thread.
"""
with self.__lock:
if not self.running:
self.running = True
# Create & start threads
webhook_ready = Event()
dispatcher_ready = Event()
self.job_queue.start()
self._init_thread(self.dispatcher.start, "dispatcher", dispatcher_ready)
self._init_thread(
self._start_webhook,
"updater",
listen,
port,
url_path,
cert,
key,
bootstrap_retries,
clean,
webhook_url,
allowed_updates,
ready=webhook_ready,
force_event_loop=force_event_loop,
)
self.logger.debug("Waiting for Dispatcher and Webhook to start")
webhook_ready.wait()
dispatcher_ready.wait()
# Return the update queue so the main thread can insert updates
return self.update_queue
|
def start_webhook(
self,
listen="127.0.0.1",
port=80,
url_path="",
cert=None,
key=None,
clean=False,
bootstrap_retries=0,
webhook_url=None,
allowed_updates=None,
):
"""
Starts a small http server to listen for updates via webhook. If cert
and key are not provided, the webhook will be started directly on
http://listen:port/url_path, so SSL can be handled by another
application. Else, the webhook will be started on
https://listen:port/url_path
Args:
listen (:obj:`str`, optional): IP-Address to listen on. Default ``127.0.0.1``.
port (:obj:`int`, optional): Port the bot should be listening on. Default ``80``.
url_path (:obj:`str`, optional): Path inside url.
cert (:obj:`str`, optional): Path to the SSL certificate file.
key (:obj:`str`, optional): Path to the SSL key file.
clean (:obj:`bool`, optional): Whether to clean any pending updates on Telegram servers
before actually starting the webhook. Default is :obj:`False`.
bootstrap_retries (:obj:`int`, optional): Whether the bootstrapping phase of the
`Updater` will retry on failures on the Telegram server.
* < 0 - retry indefinitely (default)
* 0 - no retries
* > 0 - retry up to X times
webhook_url (:obj:`str`, optional): Explicitly specify the webhook url. Useful behind
NAT, reverse proxy, etc. Default is derived from `listen`, `port` & `url_path`.
allowed_updates (List[:obj:`str`], optional): Passed to
:attr:`telegram.Bot.set_webhook`.
Returns:
:obj:`Queue`: The update queue that can be filled from the main thread.
"""
with self.__lock:
if not self.running:
self.running = True
# Create & start threads
self.job_queue.start()
(self._init_thread(self.dispatcher.start, "dispatcher"),)
self._init_thread(
self._start_webhook,
"updater",
listen,
port,
url_path,
cert,
key,
bootstrap_retries,
clean,
webhook_url,
allowed_updates,
)
# Return the update queue so the main thread can insert updates
return self.update_queue
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/1977
|
Exception in thread Bot:377489430:updater:
Traceback (most recent call last):
File "C:\Program Files\Python38\lib\threading.py", line 932, in _bootstrap_inner
self.run()
File "C:\Program Files\Python38\lib\threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\telegram\ext\updater.py", line 216, in _thread_wrapper
target(*args, **kwargs)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\telegram\ext\updater.py", line 452, in _start_webhook
self.httpd.serve_forever()
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\telegram\utils\webhookhandler.py", line 52, in serve_forever
self.http_server.listen(self.port, address=self.listen)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\tornado\tcpserver.py", line 152, in listen
self.add_sockets(sockets)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\tornado\tcpserver.py", line 165, in add_sockets
self._handlers[sock.fileno()] = add_accept_handler(
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\tornado\netutil.py", line 279, in add_accept_handler
io_loop.add_handler(sock, accept_handler, IOLoop.READ)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\tornado\platform\asyncio.py", line 100, in add_handler
self.asyncio_loop.add_reader(fd, self._handle_events, fd, IOLoop.READ)
File "C:\Program Files\Python38\lib\asyncio\events.py", line 501, in add_reader
raise NotImplementedError
NotImplementedError
|
NotImplementedError
|
def _start_polling(
self,
poll_interval,
timeout,
read_latency,
bootstrap_retries,
clean,
allowed_updates,
ready=None,
): # pragma: no cover
# Thread target of thread 'updater'. Runs in background, pulls
# updates from Telegram and inserts them in the update queue of the
# Dispatcher.
self.logger.debug("Updater thread started (polling)")
self._bootstrap(
bootstrap_retries, clean=clean, webhook_url="", allowed_updates=None
)
self.logger.debug("Bootstrap done")
def polling_action_cb():
updates = self.bot.get_updates(
self.last_update_id,
timeout=timeout,
read_latency=read_latency,
allowed_updates=allowed_updates,
)
if updates:
if not self.running:
self.logger.debug("Updates ignored and will be pulled again on restart")
else:
for update in updates:
self.update_queue.put(update)
self.last_update_id = updates[-1].update_id + 1
return True
def polling_onerr_cb(exc):
# Put the error into the update queue and let the Dispatcher
# broadcast it
self.update_queue.put(exc)
if ready is not None:
ready.set()
self._network_loop_retry(
polling_action_cb, polling_onerr_cb, "getting Updates", poll_interval
)
|
def _start_polling(
self,
poll_interval,
timeout,
read_latency,
bootstrap_retries,
clean,
allowed_updates,
): # pragma: no cover
# Thread target of thread 'updater'. Runs in background, pulls
# updates from Telegram and inserts them in the update queue of the
# Dispatcher.
self.logger.debug("Updater thread started (polling)")
self._bootstrap(
bootstrap_retries, clean=clean, webhook_url="", allowed_updates=None
)
self.logger.debug("Bootstrap done")
def polling_action_cb():
updates = self.bot.get_updates(
self.last_update_id,
timeout=timeout,
read_latency=read_latency,
allowed_updates=allowed_updates,
)
if updates:
if not self.running:
self.logger.debug("Updates ignored and will be pulled again on restart")
else:
for update in updates:
self.update_queue.put(update)
self.last_update_id = updates[-1].update_id + 1
return True
def polling_onerr_cb(exc):
# Put the error into the update queue and let the Dispatcher
# broadcast it
self.update_queue.put(exc)
self._network_loop_retry(
polling_action_cb, polling_onerr_cb, "getting Updates", poll_interval
)
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/1977
|
Exception in thread Bot:377489430:updater:
Traceback (most recent call last):
File "C:\Program Files\Python38\lib\threading.py", line 932, in _bootstrap_inner
self.run()
File "C:\Program Files\Python38\lib\threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\telegram\ext\updater.py", line 216, in _thread_wrapper
target(*args, **kwargs)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\telegram\ext\updater.py", line 452, in _start_webhook
self.httpd.serve_forever()
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\telegram\utils\webhookhandler.py", line 52, in serve_forever
self.http_server.listen(self.port, address=self.listen)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\tornado\tcpserver.py", line 152, in listen
self.add_sockets(sockets)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\tornado\tcpserver.py", line 165, in add_sockets
self._handlers[sock.fileno()] = add_accept_handler(
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\tornado\netutil.py", line 279, in add_accept_handler
io_loop.add_handler(sock, accept_handler, IOLoop.READ)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\tornado\platform\asyncio.py", line 100, in add_handler
self.asyncio_loop.add_reader(fd, self._handle_events, fd, IOLoop.READ)
File "C:\Program Files\Python38\lib\asyncio\events.py", line 501, in add_reader
raise NotImplementedError
NotImplementedError
|
NotImplementedError
|
def _start_webhook(
self,
listen,
port,
url_path,
cert,
key,
bootstrap_retries,
clean,
webhook_url,
allowed_updates,
ready=None,
force_event_loop=False,
):
self.logger.debug("Updater thread started (webhook)")
use_ssl = cert is not None and key is not None
if not url_path.startswith("/"):
url_path = "/{}".format(url_path)
# Create Tornado app instance
app = WebhookAppClass(
url_path, self.bot, self.update_queue, default_quote=self._default_quote
)
# Form SSL Context
# An SSLError is raised if the private key does not match with the certificate
if use_ssl:
try:
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.load_cert_chain(cert, key)
except ssl.SSLError:
raise TelegramError("Invalid SSL Certificate")
else:
ssl_ctx = None
# Create and start server
self.httpd = WebhookServer(listen, port, app, ssl_ctx)
if use_ssl:
# DO NOT CHANGE: Only set webhook if SSL is handled by library
if not webhook_url:
webhook_url = self._gen_webhook_url(listen, port, url_path)
self._bootstrap(
max_retries=bootstrap_retries,
clean=clean,
webhook_url=webhook_url,
cert=open(cert, "rb"),
allowed_updates=allowed_updates,
)
elif clean:
self.logger.warning(
"cleaning updates is not supported if "
"SSL-termination happens elsewhere; skipping"
)
self.httpd.serve_forever(force_event_loop=force_event_loop, ready=ready)
|
def _start_webhook(
self,
listen,
port,
url_path,
cert,
key,
bootstrap_retries,
clean,
webhook_url,
allowed_updates,
):
self.logger.debug("Updater thread started (webhook)")
use_ssl = cert is not None and key is not None
if not url_path.startswith("/"):
url_path = "/{}".format(url_path)
# Create Tornado app instance
app = WebhookAppClass(
url_path, self.bot, self.update_queue, default_quote=self._default_quote
)
# Form SSL Context
# An SSLError is raised if the private key does not match with the certificate
if use_ssl:
try:
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.load_cert_chain(cert, key)
except ssl.SSLError:
raise TelegramError("Invalid SSL Certificate")
else:
ssl_ctx = None
# Create and start server
self.httpd = WebhookServer(listen, port, app, ssl_ctx)
if use_ssl:
# DO NOT CHANGE: Only set webhook if SSL is handled by library
if not webhook_url:
webhook_url = self._gen_webhook_url(listen, port, url_path)
self._bootstrap(
max_retries=bootstrap_retries,
clean=clean,
webhook_url=webhook_url,
cert=open(cert, "rb"),
allowed_updates=allowed_updates,
)
elif clean:
self.logger.warning(
"cleaning updates is not supported if "
"SSL-termination happens elsewhere; skipping"
)
self.httpd.serve_forever()
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/1977
|
Exception in thread Bot:377489430:updater:
Traceback (most recent call last):
File "C:\Program Files\Python38\lib\threading.py", line 932, in _bootstrap_inner
self.run()
File "C:\Program Files\Python38\lib\threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\telegram\ext\updater.py", line 216, in _thread_wrapper
target(*args, **kwargs)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\telegram\ext\updater.py", line 452, in _start_webhook
self.httpd.serve_forever()
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\telegram\utils\webhookhandler.py", line 52, in serve_forever
self.http_server.listen(self.port, address=self.listen)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\tornado\tcpserver.py", line 152, in listen
self.add_sockets(sockets)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\tornado\tcpserver.py", line 165, in add_sockets
self._handlers[sock.fileno()] = add_accept_handler(
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\tornado\netutil.py", line 279, in add_accept_handler
io_loop.add_handler(sock, accept_handler, IOLoop.READ)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\tornado\platform\asyncio.py", line 100, in add_handler
self.asyncio_loop.add_reader(fd, self._handle_events, fd, IOLoop.READ)
File "C:\Program Files\Python38\lib\asyncio\events.py", line 501, in add_reader
raise NotImplementedError
NotImplementedError
|
NotImplementedError
|
def serve_forever(self, force_event_loop=False, ready=None):
with self.server_lock:
self.is_running = True
self.logger.debug("Webhook Server started.")
self._ensure_event_loop(force_event_loop=force_event_loop)
self.loop = IOLoop.current()
self.http_server.listen(self.port, address=self.listen)
if ready is not None:
ready.set()
self.loop.start()
self.logger.debug("Webhook Server stopped.")
self.is_running = False
|
def serve_forever(self):
with self.server_lock:
IOLoop().make_current()
self.is_running = True
self.logger.debug("Webhook Server started.")
self.http_server.listen(self.port, address=self.listen)
self.loop = IOLoop.current()
self.loop.start()
self.logger.debug("Webhook Server stopped.")
self.is_running = False
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/1977
|
Exception in thread Bot:377489430:updater:
Traceback (most recent call last):
File "C:\Program Files\Python38\lib\threading.py", line 932, in _bootstrap_inner
self.run()
File "C:\Program Files\Python38\lib\threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\telegram\ext\updater.py", line 216, in _thread_wrapper
target(*args, **kwargs)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\telegram\ext\updater.py", line 452, in _start_webhook
self.httpd.serve_forever()
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\telegram\utils\webhookhandler.py", line 52, in serve_forever
self.http_server.listen(self.port, address=self.listen)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\tornado\tcpserver.py", line 152, in listen
self.add_sockets(sockets)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\tornado\tcpserver.py", line 165, in add_sockets
self._handlers[sock.fileno()] = add_accept_handler(
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\tornado\netutil.py", line 279, in add_accept_handler
io_loop.add_handler(sock, accept_handler, IOLoop.READ)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\tornado\platform\asyncio.py", line 100, in add_handler
self.asyncio_loop.add_reader(fd, self._handle_events, fd, IOLoop.READ)
File "C:\Program Files\Python38\lib\asyncio\events.py", line 501, in add_reader
raise NotImplementedError
NotImplementedError
|
NotImplementedError
|
def __init__(self, application, request, **kwargs):
super().__init__(application, request, **kwargs)
self.logger = logging.getLogger(__name__)
|
def __init__(self, application, request, **kwargs):
super().__init__(application, request, **kwargs)
self.logger = logging.getLogger(__name__)
self._init_asyncio_patch()
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/1977
|
Exception in thread Bot:377489430:updater:
Traceback (most recent call last):
File "C:\Program Files\Python38\lib\threading.py", line 932, in _bootstrap_inner
self.run()
File "C:\Program Files\Python38\lib\threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\telegram\ext\updater.py", line 216, in _thread_wrapper
target(*args, **kwargs)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\telegram\ext\updater.py", line 452, in _start_webhook
self.httpd.serve_forever()
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\telegram\utils\webhookhandler.py", line 52, in serve_forever
self.http_server.listen(self.port, address=self.listen)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\tornado\tcpserver.py", line 152, in listen
self.add_sockets(sockets)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\tornado\tcpserver.py", line 165, in add_sockets
self._handlers[sock.fileno()] = add_accept_handler(
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\tornado\netutil.py", line 279, in add_accept_handler
io_loop.add_handler(sock, accept_handler, IOLoop.READ)
File "C:\Users\User\.virtualenvs\sandbox38\lib\site-packages\tornado\platform\asyncio.py", line 100, in add_handler
self.asyncio_loop.add_reader(fd, self._handle_events, fd, IOLoop.READ)
File "C:\Program Files\Python38\lib\asyncio\events.py", line 501, in add_reader
raise NotImplementedError
NotImplementedError
|
NotImplementedError
|
def __init__(
self,
entry_points,
states,
fallbacks,
allow_reentry=False,
per_chat=True,
per_user=True,
per_message=False,
conversation_timeout=None,
name=None,
persistent=False,
map_to_parent=None,
):
self.entry_points = entry_points
self.states = states
self.fallbacks = fallbacks
self.allow_reentry = allow_reentry
self.per_user = per_user
self.per_chat = per_chat
self.per_message = per_message
self.conversation_timeout = conversation_timeout
self.name = name
if persistent and not self.name:
raise ValueError("Conversations can't be persistent when handler is unnamed.")
self.persistent = persistent
self._persistence = None
""":obj:`telegram.ext.BasePersistance`: The persistence used to store conversations.
Set by dispatcher"""
self.map_to_parent = map_to_parent
self.timeout_jobs = dict()
self._timeout_jobs_lock = Lock()
self._conversations = dict()
self._conversations_lock = Lock()
self.logger = logging.getLogger(__name__)
if not any((self.per_user, self.per_chat, self.per_message)):
raise ValueError(
"'per_user', 'per_chat' and 'per_message' can't all be 'False'"
)
if self.per_message and not self.per_chat:
warnings.warn(
"If 'per_message=True' is used, 'per_chat=True' should also be used, "
"since message IDs are not globally unique."
)
all_handlers = list()
all_handlers.extend(entry_points)
all_handlers.extend(fallbacks)
for state_handlers in states.values():
all_handlers.extend(state_handlers)
if self.per_message:
for handler in all_handlers:
if not isinstance(handler, CallbackQueryHandler):
warnings.warn(
"If 'per_message=True', all entry points and state handlers"
" must be 'CallbackQueryHandler', since no other handlers "
"have a message context."
)
break
else:
for handler in all_handlers:
if isinstance(handler, CallbackQueryHandler):
warnings.warn(
"If 'per_message=False', 'CallbackQueryHandler' will not be "
"tracked for every message."
)
break
if self.per_chat:
for handler in all_handlers:
if isinstance(handler, (InlineQueryHandler, ChosenInlineResultHandler)):
warnings.warn(
"If 'per_chat=True', 'InlineQueryHandler' can not be used, "
"since inline queries have no chat context."
)
break
|
def __init__(
self,
entry_points,
states,
fallbacks,
allow_reentry=False,
per_chat=True,
per_user=True,
per_message=False,
conversation_timeout=None,
name=None,
persistent=False,
map_to_parent=None,
):
self.entry_points = entry_points
self.states = states
self.fallbacks = fallbacks
self.allow_reentry = allow_reentry
self.per_user = per_user
self.per_chat = per_chat
self.per_message = per_message
self.conversation_timeout = conversation_timeout
self.name = name
if persistent and not self.name:
raise ValueError("Conversations can't be persistent when handler is unnamed.")
self.persistent = persistent
self.persistence = None
""":obj:`telegram.ext.BasePersistance`: The persistence used to store conversations.
Set by dispatcher"""
self.map_to_parent = map_to_parent
self.timeout_jobs = dict()
self._timeout_jobs_lock = Lock()
self.conversations = dict()
self._conversations_lock = Lock()
self.logger = logging.getLogger(__name__)
if not any((self.per_user, self.per_chat, self.per_message)):
raise ValueError(
"'per_user', 'per_chat' and 'per_message' can't all be 'False'"
)
if self.per_message and not self.per_chat:
warnings.warn(
"If 'per_message=True' is used, 'per_chat=True' should also be used, "
"since message IDs are not globally unique."
)
all_handlers = list()
all_handlers.extend(entry_points)
all_handlers.extend(fallbacks)
for state_handlers in states.values():
all_handlers.extend(state_handlers)
if self.per_message:
for handler in all_handlers:
if not isinstance(handler, CallbackQueryHandler):
warnings.warn(
"If 'per_message=True', all entry points and state handlers"
" must be 'CallbackQueryHandler', since no other handlers "
"have a message context."
)
break
else:
for handler in all_handlers:
if isinstance(handler, CallbackQueryHandler):
warnings.warn(
"If 'per_message=False', 'CallbackQueryHandler' will not be "
"tracked for every message."
)
break
if self.per_chat:
for handler in all_handlers:
if isinstance(handler, (InlineQueryHandler, ChosenInlineResultHandler)):
warnings.warn(
"If 'per_chat=True', 'InlineQueryHandler' can not be used, "
"since inline queries have no chat context."
)
break
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/1710
|
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/telegram/ext/dispatcher.py", line 372, in process_update
handler.handle_update(update, self, check, context)
File "/usr/local/lib/python3.5/dist-packages/telegram/ext/conversationhandler.py", line 343, in handle_update
new_state = handler.handle_update(update, dispatcher, check_result, context)
File "/usr/local/lib/python3.5/dist-packages/telegram/ext/conversationhandler.py", line 357, in handle_update
self.update_state(new_state, conversation_key)
File "/usr/local/lib/python3.5/dist-packages/telegram/ext/conversationhandler.py", line 376, in update_state
self.persistence.update_conversation(self.name, key, new_state)
AttributeError: 'NoneType' object has no attribute 'update_conversation'
|
AttributeError
|
def add_handler(self, handler, group=DEFAULT_GROUP):
"""Register a handler.
TL;DR: Order and priority counts. 0 or 1 handlers per group will be used. End handling of
update with :class:`telegram.ext.DispatcherHandlerStop`.
A handler must be an instance of a subclass of :class:`telegram.ext.Handler`. All handlers
are organized in groups with a numeric value. The default group is 0. All groups will be
evaluated for handling an update, but only 0 or 1 handler per group will be used. If
:class:`telegram.ext.DispatcherHandlerStop` is raised from one of the handlers, no further
handlers (regardless of the group) will be called.
The priority/order of handlers is determined as follows:
* Priority of the group (lower group number == higher priority)
* The first handler in a group which should handle an update (see
:attr:`telegram.ext.Handler.check_update`) will be used. Other handlers from the
group will not be used. The order in which handlers were added to the group defines the
priority.
Args:
handler (:class:`telegram.ext.Handler`): A Handler instance.
group (:obj:`int`, optional): The group identifier. Default is 0.
"""
# Unfortunately due to circular imports this has to be here
from .conversationhandler import ConversationHandler
if not isinstance(handler, Handler):
raise TypeError("handler is not an instance of {0}".format(Handler.__name__))
if not isinstance(group, int):
raise TypeError("group is not int")
if isinstance(handler, ConversationHandler) and handler.persistent:
if not self.persistence:
raise ValueError(
"Conversationhandler {} can not be persistent if dispatcher has no "
"persistence".format(handler.name)
)
handler.persistence = self.persistence
handler.conversations = self.persistence.get_conversations(handler.name)
if group not in self.handlers:
self.handlers[group] = list()
self.groups.append(group)
self.groups = sorted(self.groups)
self.handlers[group].append(handler)
|
def add_handler(self, handler, group=DEFAULT_GROUP):
"""Register a handler.
TL;DR: Order and priority counts. 0 or 1 handlers per group will be used. End handling of
update with :class:`telegram.ext.DispatcherHandlerStop`.
A handler must be an instance of a subclass of :class:`telegram.ext.Handler`. All handlers
are organized in groups with a numeric value. The default group is 0. All groups will be
evaluated for handling an update, but only 0 or 1 handler per group will be used. If
:class:`telegram.ext.DispatcherHandlerStop` is raised from one of the handlers, no further
handlers (regardless of the group) will be called.
The priority/order of handlers is determined as follows:
* Priority of the group (lower group number == higher priority)
* The first handler in a group which should handle an update (see
:attr:`telegram.ext.Handler.check_update`) will be used. Other handlers from the
group will not be used. The order in which handlers were added to the group defines the
priority.
Args:
handler (:class:`telegram.ext.Handler`): A Handler instance.
group (:obj:`int`, optional): The group identifier. Default is 0.
"""
# Unfortunately due to circular imports this has to be here
from .conversationhandler import ConversationHandler
if not isinstance(handler, Handler):
raise TypeError("handler is not an instance of {0}".format(Handler.__name__))
if not isinstance(group, int):
raise TypeError("group is not int")
if isinstance(handler, ConversationHandler) and handler.persistent:
if not self.persistence:
raise ValueError(
"Conversationhandler {} can not be persistent if dispatcher has no "
"persistence".format(handler.name)
)
handler.conversations = self.persistence.get_conversations(handler.name)
handler.persistence = self.persistence
if group not in self.handlers:
self.handlers[group] = list()
self.groups.append(group)
self.groups = sorted(self.groups)
self.handlers[group].append(handler)
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/1710
|
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/telegram/ext/dispatcher.py", line 372, in process_update
handler.handle_update(update, self, check, context)
File "/usr/local/lib/python3.5/dist-packages/telegram/ext/conversationhandler.py", line 343, in handle_update
new_state = handler.handle_update(update, dispatcher, check_result, context)
File "/usr/local/lib/python3.5/dist-packages/telegram/ext/conversationhandler.py", line 357, in handle_update
self.update_state(new_state, conversation_key)
File "/usr/local/lib/python3.5/dist-packages/telegram/ext/conversationhandler.py", line 376, in update_state
self.persistence.update_conversation(self.name, key, new_state)
AttributeError: 'NoneType' object has no attribute 'update_conversation'
|
AttributeError
|
def _trigger_timeout(self, context, job=None):
self.logger.debug("conversation timeout was triggered!")
# Backward compatibility with bots that do not use CallbackContext
if isinstance(context, CallbackContext):
context = context.job.context
else:
context = job.context
del self.timeout_jobs[context.conversation_key]
handlers = self.states.get(self.TIMEOUT, [])
for handler in handlers:
check = handler.check_update(context.update)
if check is not None and check is not False:
handler.handle_update(context.update, context.dispatcher, check)
self.update_state(self.END, context.conversation_key)
|
def _trigger_timeout(self, bot, job):
self.logger.debug("conversation timeout was triggered!")
del self.timeout_jobs[job.context.conversation_key]
handlers = self.states.get(self.TIMEOUT, [])
for handler in handlers:
check = handler.check_update(job.context.update)
if check is not None and check is not False:
handler.handle_update(job.context.update, job.context.dispatcher, check)
self.update_state(self.END, job.context.conversation_key)
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/1366
|
2019-03-18 10:33:52,327 - JobQueue - DEBUG - Ticking jobs with t=1552876432.327480
2019-03-18 10:33:52,329 - JobQueue - DEBUG - Peeked at _trigger_timeout with t=1552876432.325519
2019-03-18 10:33:52,331 - JobQueue - DEBUG - Running job _trigger_timeout
2019-03-18 10:33:52,332 - JobQueue - ERROR - An uncaught error was raised while executing job _trigger_timeout
Traceback (most recent call last):
File "/venv/lib/python3.7/site-packages/telegram/ext/jobqueue.py", line 260, in tick
job.run(self._dispatcher)
File "/venv/lib/python3.7/site-packages/telegram/ext/jobqueue.py", line 388, in run
self.callback(CallbackContext.from_job(self, dispatcher))
TypeError: _trigger_timeout() missing 1 required positional argument: 'job'
2019-03-18 10:33:52,338 - JobQueue - DEBUG - Dropping non-repeating or removed job _trigger_timeout
|
TypeError
|
def post(self, url, data, timeout=None):
"""Request an URL.
Args:
url (:obj:`str`): The web location we want to retrieve.
data (dict[str, str|int]): A dict of key/value pairs. Note: On py2.7 value is unicode.
timeout (:obj:`int` | :obj:`float`): If this value is specified, use it as the read
timeout from the server (instead of the one specified during creation of the
connection pool).
Returns:
A JSON object.
"""
urlopen_kwargs = {}
if timeout is not None:
urlopen_kwargs["timeout"] = Timeout(read=timeout, connect=self._connect_timeout)
# Are we uploading files?
files = False
for key, val in data.copy().items():
if isinstance(val, InputFile):
# Convert the InputFile to urllib3 field format
data[key] = val.field_tuple
files = True
elif isinstance(val, (float, int)):
# Urllib3 doesn't like floats it seems
data[key] = str(val)
elif key == "media":
# One media or multiple
if isinstance(val, InputMedia):
# Attach and set val to attached name
data[key] = val.to_json()
if isinstance(val.media, InputFile):
data[val.media.attach] = val.media.field_tuple
else:
# Attach and set val to attached name for all
media = []
for m in val:
media.append(m.to_dict())
if isinstance(m.media, InputFile):
data[m.media.attach] = m.media.field_tuple
data[key] = json.dumps(media)
files = True
# Use multipart upload if we're uploading files, otherwise use JSON
if files:
result = self._request_wrapper("POST", url, fields=data, **urlopen_kwargs)
else:
result = self._request_wrapper(
"POST",
url,
body=json.dumps(data).encode("utf-8"),
headers={"Content-Type": "application/json"},
**urlopen_kwargs,
)
return self._parse(result)
|
def post(self, url, data, timeout=None):
"""Request an URL.
Args:
url (:obj:`str`): The web location we want to retrieve.
data (dict[str, str|int]): A dict of key/value pairs. Note: On py2.7 value is unicode.
timeout (:obj:`int` | :obj:`float`): If this value is specified, use it as the read
timeout from the server (instead of the one specified during creation of the
connection pool).
Returns:
A JSON object.
"""
urlopen_kwargs = {}
if timeout is not None:
urlopen_kwargs["timeout"] = Timeout(read=timeout, connect=self._connect_timeout)
# Are we uploading files?
files = False
for key, val in data.copy().items():
if isinstance(val, InputFile):
# Convert the InputFile to urllib3 field format
data[key] = val.field_tuple
files = True
elif isinstance(val, (float, int)):
# Urllib3 doesn't like floats it seems
data[key] = str(val)
elif key == "media":
# One media or multiple
if isinstance(val, InputMedia):
# Attach and set val to attached name
data[key] = val.to_json()
if isinstance(val.media, InputFile):
data[val.media.attach] = val.media.field_tuple
else:
# Attach and set val to attached name for all
media = []
for m in val:
media.append(m.to_dict())
if isinstance(m.media, InputFile):
data[m.media.attach] = m.media.field_tuple
data[key] = json.dumps(media)
files = True
# Use multipart upload if we're uploading files, otherwise use JSON
if files:
result = self._request_wrapper("POST", url, fields=data, **urlopen_kwargs)
else:
result = self._request_wrapper(
"POST",
url,
body=json.dumps(data).encode("utf-8"),
headers={"Content-Type": "application/json"},
)
return self._parse(result)
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/1328
|
Traceback (most recent call last):
File "/tmp/z10/lib/python3.5/site-packages/telegram/vendor/ptb_urllib3/urllib3/connectionpool.py", line 402, in _make_request
six.raise_from(e, None)
File "<string>", line 2, in raise_from
File "/tmp/z10/lib/python3.5/site-packages/telegram/vendor/ptb_urllib3/urllib3/connectionpool.py", line 398, in _make_request
httplib_response = conn.getresponse()
File "/usr/lib/python3.5/http/client.py", line 1198, in getresponse
response.begin()
File "/usr/lib/python3.5/http/client.py", line 297, in begin
version, status, reason = self._read_status()
File "/usr/lib/python3.5/http/client.py", line 258, in _read_status
line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
File "/usr/lib/python3.5/socket.py", line 576, in readinto
return self._sock.recv_into(b)
File "/usr/lib/python3.5/ssl.py", line 937, in recv_into
return self.read(nbytes, buffer)
File "/usr/lib/python3.5/ssl.py", line 799, in read
return self._sslobj.read(len, buffer)
File "/usr/lib/python3.5/ssl.py", line 583, in read
v = self._sslobj.read(len, buffer)
socket.timeout: The read operation timed out
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/tmp/z10/lib/python3.5/site-packages/telegram/utils/request.py", line 203, in _request_wrapper
resp = self._con_pool.request(*args, **kwargs)
File "/tmp/z10/lib/python3.5/site-packages/telegram/vendor/ptb_urllib3/urllib3/request.py", line 70, in request
**urlopen_kw)
File "/tmp/z10/lib/python3.5/site-packages/telegram/vendor/ptb_urllib3/urllib3/request.py", line 148, in request_encode_body
return self.urlopen(method, url, **extra_kw)
File "/tmp/z10/lib/python3.5/site-packages/telegram/vendor/ptb_urllib3/urllib3/poolmanager.py", line 244, in urlopen
response = conn.urlopen(method, u.request_uri, **kw)
File "/tmp/z10/lib/python3.5/site-packages/telegram/vendor/ptb_urllib3/urllib3/connectionpool.py", line 666, in urlopen
_stacktrace=sys.exc_info()[2])
File "/tmp/z10/lib/python3.5/site-packages/telegram/vendor/ptb_urllib3/urllib3/util/retry.py", line 347, in increment
raise six.reraise(type(error), error, _stacktrace)
File "/tmp/z10/lib/python3.5/site-packages/telegram/vendor/ptb_urllib3/urllib3/packages/six.py", line 686, in reraise
raise value
File "/tmp/z10/lib/python3.5/site-packages/telegram/vendor/ptb_urllib3/urllib3/connectionpool.py", line 617, in urlopen
chunked=chunked)
File "/tmp/z10/lib/python3.5/site-packages/telegram/vendor/ptb_urllib3/urllib3/connectionpool.py", line 405, in _make_request
exc_cls=ReadTimeoutError)
File "/tmp/z10/lib/python3.5/site-packages/telegram/vendor/ptb_urllib3/urllib3/connectionpool.py", line 321, in _raise_timeout
raise exc_cls(*args)
telegram.vendor.ptb_urllib3.urllib3.exceptions.ReadTimeoutError: HTTPSConnectionPool(host='api.telegram.org', port=443): Read timed out. (read timeout=5.0)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "test.py", line 4, in <module>
bot.get_chat_administrators(-1001071177546, timeout=60)
File "/tmp/z10/lib/python3.5/site-packages/telegram/bot.py", line 65, in decorator
result = func(self, *args, **kwargs)
File "/tmp/z10/lib/python3.5/site-packages/telegram/bot.py", line 2187, in get_chat_administrators
result = self._request.post(url, data, timeout=timeout)
File "/tmp/z10/lib/python3.5/site-packages/telegram/utils/request.py", line 309, in post
headers={'Content-Type': 'application/json'})
File "/tmp/z10/lib/python3.5/site-packages/telegram/utils/request.py", line 205, in _request_wrapper
raise TimedOut()
telegram.error.TimedOut: Timed out
|
telegram.vendor.ptb_urllib3.urllib3.exceptions.ReadTimeoutError
|
def shutdown(self):
with self.shutdown_lock:
if not self.is_running:
self.logger.warning("Webhook Server already stopped.")
return
else:
super(WebhookServer, self).shutdown()
self.is_running = False
|
def shutdown(self):
with self.shutdown_lock:
if not self.is_running:
self.logger.warn("Webhook Server already stopped.")
return
else:
super(WebhookServer, self).shutdown()
self.is_running = False
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/970
|
2018-01-10 03:54:56,480 - telegram.ext.updater - ERROR - unhandled exception
Traceback (most recent call last):
File "/usr/lib/python3.5/socketserver.py", line 313, in _handle_request_noblock
self.process_request(request, client_address)
File "/usr/lib/python3.5/socketserver.py", line 341, in process_request
self.finish_request(request, client_address)
File "/usr/lib/python3.5/socketserver.py", line 354, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/usr/local/lib/python3.5/dist-packages/telegram/utils/webhookhandler.py", line 62, in __init__
super(WebhookHandler, self).__init__(request, client_address, server)
File "/usr/lib/python3.5/socketserver.py", line 681, in __init__
self.handle()
File "/usr/lib/python3.5/http/server.py", line 422, in handle
self.handle_one_request()
File "/usr/lib/python3.5/http/server.py", line 390, in handle_one_request
self.raw_requestline = self.rfile.readline(65537)
File "/usr/lib/python3.5/socket.py", line 575, in readinto
return self._sock.recv_into(b)
ConnectionResetError: [Errno 104] Connection reset by peer
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/telegram/ext/updater.py", line 149, in _thread_wrapper
target(*args, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/telegram/ext/updater.py", line 353, in _start_webhook
self.httpd.serve_forever(poll_interval=1)
File "/usr/local/lib/python3.5/dist-packages/telegram/utils/webhookhandler.py", line 41, in serve_forever
super(WebhookServer, self).serve_forever(poll_interval)
File "/usr/lib/python3.5/socketserver.py", line 234, in serve_forever
self._handle_request_noblock()
File "/usr/lib/python3.5/socketserver.py", line 315, in _handle_request_noblock
self.handle_error(request, client_address)
File "/usr/lib/python3.5/socketserver.py", line 371, in handle_error
print('Exception happened during processing of request from', end=' ')
BrokenPipeError: [Errno 32] Broken pipe
Exception in thread updater:
Traceback (most recent call last):
File "/usr/lib/python3.5/socketserver.py", line 313, in _handle_request_noblock
self.process_request(request, client_address)
File "/usr/lib/python3.5/socketserver.py", line 341, in process_request
self.finish_request(request, client_address)
File "/usr/lib/python3.5/socketserver.py", line 354, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/usr/local/lib/python3.5/dist-packages/telegram/utils/webhookhandler.py", line 62, in __init__
super(WebhookHandler, self).__init__(request, client_address, server)
File "/usr/lib/python3.5/socketserver.py", line 681, in __init__
self.handle()
File "/usr/lib/python3.5/http/server.py", line 422, in handle
self.handle_one_request()
File "/usr/lib/python3.5/http/server.py", line 390, in handle_one_request
self.raw_requestline = self.rfile.readline(65537)
File "/usr/lib/python3.5/socket.py", line 575, in readinto
return self._sock.recv_into(b)
ConnectionResetError: [Errno 104] Connection reset by peer
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.5/threading.py", line 914, in _bootstrap_inner
self.run()
File "/usr/lib/python3.5/threading.py", line 862, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/lib/python3.5/dist-packages/telegram/ext/updater.py", line 149, in _thread_wrapper
target(*args, **kwargs)
File "/usr/local/lib/python3.5/dist-packages/telegram/ext/updater.py", line 353, in _start_webhook
self.httpd.serve_forever(poll_interval=1)
File "/usr/local/lib/python3.5/dist-packages/telegram/utils/webhookhandler.py", line 41, in serve_forever
super(WebhookServer, self).serve_forever(poll_interval)
File "/usr/lib/python3.5/socketserver.py", line 234, in serve_forever
self._handle_request_noblock()
File "/usr/lib/python3.5/socketserver.py", line 315, in _handle_request_noblock
self.handle_error(request, client_address)
File "/usr/lib/python3.5/socketserver.py", line 371, in handle_error
print('Exception happened during processing of request from', end=' ')
BrokenPipeError: [Errno 32] Broken pipe
2018-01-10 03:54:56,601 - telegram.ext.dispatcher - CRITICAL - stopping due to exception in another thread
2018-01-10 03:54:56,601 - telegram.ext.dispatcher - DEBUG - Dispatcher thread stopped
2018-01-10 03:54:56,601 - telegram.ext.updater - DEBUG - dispatcher - ended
|
ConnectionResetError
|
def __init__(self, file_id, bot, file_size=None, file_path=None, **kwargs):
# Required
self.file_id = str(file_id)
# Optionals
self.file_size = file_size
self.file_path = file_path
self.bot = bot
self._id_attrs = (self.file_id,)
|
def __init__(self, file_id, bot, file_size=None, file_path=None, **kwargs):
# Required
self.file_id = str(file_id)
# Optionals
self.file_size = file_size
if file_path:
self.file_path = str(file_path)
self.bot = bot
self._id_attrs = (self.file_id,)
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/650
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.6/site-packages/telegram/file.py", line 106, in download
self.bot.request.download(url, filename, timeout=timeout)
File "/usr/local/lib/python3.6/site-packages/telegram/utils/request.py", line 284, in download
buf = self.retrieve(url, timeout=timeout)
File "/usr/local/lib/python3.6/site-packages/telegram/utils/request.py", line 270, in retrieve
return self._request_wrapper('GET', url, **urlopen_kwargs)
File "/usr/local/lib/python3.6/site-packages/telegram/utils/request.py", line 174, in _request_wrapper
resp = self._con_pool.request(*args, **kwargs)
File "/usr/local/lib/python3.6/site-packages/telegram/vendor/ptb_urllib3/urllib3/request.py", line 66, in request
**urlopen_kw)
File "/usr/local/lib/python3.6/site-packages/telegram/vendor/ptb_urllib3/urllib3/request.py", line 87, in request_encode_url
return self.urlopen(method, url, **extra_kw)
File "/usr/local/lib/python3.6/site-packages/telegram/vendor/ptb_urllib3/urllib3/poolmanager.py", line 244, in urlopen
response = conn.urlopen(method, u.request_uri, **kw)
File "/usr/local/lib/python3.6/site-packages/telegram/vendor/ptb_urllib3/urllib3/connectionpool.py", line 617, in urlopen
chunked=chunked)
File "/usr/local/lib/python3.6/site-packages/telegram/vendor/ptb_urllib3/urllib3/connectionpool.py", line 390, in _make_request
conn.request(method, url, **httplib_request_kw)
File "/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/http/client.py", line 1239, in request
self._send_request(method, url, body, headers, encode_chunked)
File "/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/http/client.py", line 1250, in _send_request
self.putrequest(method, url, **skips)
File "/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/http/client.py", line 1117, in putrequest
self._output(request.encode('ascii'))
UnicodeEncodeError: 'ascii' codec can't encode characters in position 69-75: ordinal not in range(128)
'ascii' codec can't encode characters in position 69-75: ordinal not in range(128)
|
UnicodeEncodeError
|
def download(self, custom_path=None, out=None, timeout=None):
"""
Download this file. By default, the file is saved in the current working directory with its
original filename as reported by Telegram. If a ``custom_path`` is supplied, it will be
saved to that path instead. If ``out`` is defined, the file contents will be saved to that
object using the ``out.write`` method. ``custom_path`` and ``out`` are mutually exclusive.
Args:
custom_path (Optional[str]): Custom path.
out (Optional[object]): A file-like object. Must be opened in binary mode, if
applicable.
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
Raises:
ValueError: If both ``custom_path`` and ``out`` are passed.
"""
if custom_path is not None and out is not None:
raise ValueError("custom_path and out are mutually exclusive")
# Convert any UTF-8 char into a url encoded ASCII string.
sres = urllib_parse.urlsplit(self.file_path)
url = urllib_parse.urlunsplit(
urllib_parse.SplitResult(
sres.scheme,
sres.netloc,
urllib_parse.quote(sres.path),
sres.query,
sres.fragment,
)
)
if out:
buf = self.bot.request.retrieve(url)
out.write(buf)
else:
if custom_path:
filename = custom_path
else:
filename = basename(self.file_path)
self.bot.request.download(url, filename, timeout=timeout)
|
def download(self, custom_path=None, out=None, timeout=None):
"""
Download this file. By default, the file is saved in the current working directory with its
original filename as reported by Telegram. If a ``custom_path`` is supplied, it will be
saved to that path instead. If ``out`` is defined, the file contents will be saved to that
object using the ``out.write`` method. ``custom_path`` and ``out`` are mutually exclusive.
Args:
custom_path (Optional[str]): Custom path.
out (Optional[object]): A file-like object. Must be opened in binary mode, if
applicable.
timeout (Optional[int|float]): If this value is specified, use it as the read timeout
from the server (instead of the one specified during creation of the connection
pool).
Raises:
ValueError: If both ``custom_path`` and ``out`` are passed.
"""
if custom_path is not None and out is not None:
raise ValueError("custom_path and out are mutually exclusive")
url = self.file_path
if out:
buf = self.bot.request.retrieve(url)
out.write(buf)
else:
if custom_path:
filename = custom_path
else:
filename = basename(url)
self.bot.request.download(url, filename, timeout=timeout)
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/650
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.6/site-packages/telegram/file.py", line 106, in download
self.bot.request.download(url, filename, timeout=timeout)
File "/usr/local/lib/python3.6/site-packages/telegram/utils/request.py", line 284, in download
buf = self.retrieve(url, timeout=timeout)
File "/usr/local/lib/python3.6/site-packages/telegram/utils/request.py", line 270, in retrieve
return self._request_wrapper('GET', url, **urlopen_kwargs)
File "/usr/local/lib/python3.6/site-packages/telegram/utils/request.py", line 174, in _request_wrapper
resp = self._con_pool.request(*args, **kwargs)
File "/usr/local/lib/python3.6/site-packages/telegram/vendor/ptb_urllib3/urllib3/request.py", line 66, in request
**urlopen_kw)
File "/usr/local/lib/python3.6/site-packages/telegram/vendor/ptb_urllib3/urllib3/request.py", line 87, in request_encode_url
return self.urlopen(method, url, **extra_kw)
File "/usr/local/lib/python3.6/site-packages/telegram/vendor/ptb_urllib3/urllib3/poolmanager.py", line 244, in urlopen
response = conn.urlopen(method, u.request_uri, **kw)
File "/usr/local/lib/python3.6/site-packages/telegram/vendor/ptb_urllib3/urllib3/connectionpool.py", line 617, in urlopen
chunked=chunked)
File "/usr/local/lib/python3.6/site-packages/telegram/vendor/ptb_urllib3/urllib3/connectionpool.py", line 390, in _make_request
conn.request(method, url, **httplib_request_kw)
File "/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/http/client.py", line 1239, in request
self._send_request(method, url, body, headers, encode_chunked)
File "/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/http/client.py", line 1250, in _send_request
self.putrequest(method, url, **skips)
File "/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/http/client.py", line 1117, in putrequest
self._output(request.encode('ascii'))
UnicodeEncodeError: 'ascii' codec can't encode characters in position 69-75: ordinal not in range(128)
'ascii' codec can't encode characters in position 69-75: ordinal not in range(128)
|
UnicodeEncodeError
|
def getUpdates(self, offset=None, limit=100, timeout=0, network_delay=5.0, **kwargs):
"""Use this method to receive incoming updates using long polling.
Args:
offset (Optional[int]):
Identifier of the first update to be returned. Must be greater by one than the highest
among the identifiers of previously received updates. By default, updates starting with
the earliest unconfirmed update are returned. An update is considered confirmed as soon
as getUpdates is called with an offset higher than its update_id.
limit (Optional[int]):
Limits the number of updates to be retrieved. Values between 1-100 are accepted.
Defaults to 100.
timeout (Optional[int]):
Timeout in seconds for long polling. Defaults to 0, i.e. usual short polling.
network_delay (Optional[float]):
Additional timeout in seconds to allow the response from Telegram servers. This should
cover network latency around the globe, SSL handshake and slowness of the Telegram
servers (which unfortunately happens a lot recently - 2016-05-28). Defaults to 5.
Returns:
list[:class:`telegram.Update`]
Raises:
:class:`telegram.TelegramError`
"""
url = "{0}/getUpdates".format(self.base_url)
data = {"timeout": timeout}
if offset:
data["offset"] = offset
if limit:
data["limit"] = limit
urlopen_timeout = timeout + network_delay
result = request.post(url, data, timeout=urlopen_timeout)
if result:
self.logger.debug("Getting updates: %s", [u["update_id"] for u in result])
else:
self.logger.debug("No new updates found.")
return [Update.de_json(x) for x in result]
|
def getUpdates(self, offset=None, limit=100, timeout=0, network_delay=0.2, **kwargs):
"""Use this method to receive incoming updates using long polling.
Args:
offset:
Identifier of the first update to be returned. Must be greater by
one than the highest among the identifiers of previously received
updates. By default, updates starting with the earliest unconfirmed
update are returned. An update is considered confirmed as soon as
getUpdates is called with an offset higher than its update_id.
limit:
Limits the number of updates to be retrieved. Values between 1-100
are accepted. Defaults to 100.
timeout:
Timeout in seconds for long polling. Defaults to 0, i.e. usual
short polling.
network_delay:
Additional timeout in seconds to allow the response from Telegram
to take some time when using long polling. Defaults to 2, which
should be enough for most connections. Increase it if it takes very
long for data to be transmitted from and to the Telegram servers.
Returns:
list[:class:`telegram.Update`]: A list of :class:`telegram.Update`
objects are returned.
Raises:
:class:`telegram.TelegramError`
"""
url = "{0}/getUpdates".format(self.base_url)
data = {"timeout": timeout}
if offset:
data["offset"] = offset
if limit:
data["limit"] = limit
urlopen_timeout = timeout + network_delay
result = request.post(url, data, timeout=urlopen_timeout)
if result:
self.logger.debug("Getting updates: %s", [u["update_id"] for u in result])
else:
self.logger.debug("No new updates found.")
return [Update.de_json(x) for x in result]
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/309
|
Python 2.7.9 (default, Mar 8 2015, 00:52:26)
[GCC 4.9.2] on linux2
Type "copyright", "credits" or "license()" for more information.
import telegram
bot = telegram.Bot(token='123123:My_token')
print(bot.getMe())
{'username': u'My_bot', 'first_name': u'Flexget', 'last_name': '', 'type': '', 'id': 123123}
updates = bot.getUpdates()
Traceback (most recent call last):
File "<pyshell#3>", line 1, in <module>
updates = bot.getUpdates()
File "/usr/local/lib/python2.7/dist-packages/telegram/bot.py", line 109, in decorator
result = func(self, *args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/telegram/bot.py", line 1215, in getUpdates
result = request.post(url, data, timeout=urlopen_timeout)
File "/usr/local/lib/python2.7/dist-packages/telegram/utils/request.py", line 83, in decorator
raise NetworkError('URLError: {0}'.format(error.reason))
NetworkError: URLError: [Errno 101] Network is unreachable
chat_id = bot.getUpdates()[-1].message.chat_id
Traceback (most recent call last):
File "<pyshell#4>", line 1, in <module>
chat_id = bot.getUpdates()[-1].message.chat_id
File "/usr/local/lib/python2.7/dist-packages/telegram/bot.py", line 109, in decorator
result = func(self, *args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/telegram/bot.py", line 1215, in getUpdates
result = request.post(url, data, timeout=urlopen_timeout)
File "/usr/local/lib/python2.7/dist-packages/telegram/utils/request.py", line 83, in decorator
raise NetworkError('URLError: {0}'.format(error.reason))
NetworkError: URLError: [Errno 101] Network is unreachable
|
NetworkError
|
def start_polling(
self,
poll_interval=0.0,
timeout=10,
network_delay=5.0,
clean=False,
bootstrap_retries=0,
):
"""
Starts polling updates from Telegram.
Args:
poll_interval (Optional[float]): Time to wait between polling updates from Telegram in
seconds. Default is 0.0
timeout (Optional[float]): Passed to Bot.getUpdates
network_delay (Optional[float]): Passed to Bot.getUpdates
clean (Optional[bool]): Whether to clean any pending updates on Telegram servers before
actually starting to poll. Default is False.
bootstrap_retries (Optional[int]): Whether the bootstrapping phase of the `Updater`
will retry on failures on the Telegram server.
| < 0 - retry indefinitely
| 0 - no retries (default)
| > 0 - retry up to X times
Returns:
Queue: The update queue that can be filled from the main thread
"""
with self.__lock:
if not self.running:
self.running = True
# Create & start threads
self._init_thread(self.dispatcher.start, "dispatcher")
self._init_thread(
self._start_polling,
"updater",
poll_interval,
timeout,
network_delay,
bootstrap_retries,
clean,
)
# Return the update queue so the main thread can insert updates
return self.update_queue
|
def start_polling(
self,
poll_interval=0.0,
timeout=10,
network_delay=2,
clean=False,
bootstrap_retries=0,
):
"""
Starts polling updates from Telegram.
Args:
poll_interval (Optional[float]): Time to wait between polling
updates from Telegram in seconds. Default is 0.0
timeout (Optional[float]): Passed to Bot.getUpdates
network_delay (Optional[float]): Passed to Bot.getUpdates
clean (Optional[bool]): Whether to clean any pending updates on
Telegram servers before actually starting to poll. Default is
False.
bootstrap_retries (Optional[int[): Whether the bootstrapping phase
of the `Updater` will retry on failures on the Telegram server.
| < 0 - retry indefinitely
| 0 - no retries (default)
| > 0 - retry up to X times
Returns:
Queue: The update queue that can be filled from the main thread
"""
with self.__lock:
if not self.running:
self.running = True
# Create & start threads
self._init_thread(self.dispatcher.start, "dispatcher")
self._init_thread(
self._start_polling,
"updater",
poll_interval,
timeout,
network_delay,
bootstrap_retries,
clean,
)
# Return the update queue so the main thread can insert updates
return self.update_queue
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/309
|
Python 2.7.9 (default, Mar 8 2015, 00:52:26)
[GCC 4.9.2] on linux2
Type "copyright", "credits" or "license()" for more information.
import telegram
bot = telegram.Bot(token='123123:My_token')
print(bot.getMe())
{'username': u'My_bot', 'first_name': u'Flexget', 'last_name': '', 'type': '', 'id': 123123}
updates = bot.getUpdates()
Traceback (most recent call last):
File "<pyshell#3>", line 1, in <module>
updates = bot.getUpdates()
File "/usr/local/lib/python2.7/dist-packages/telegram/bot.py", line 109, in decorator
result = func(self, *args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/telegram/bot.py", line 1215, in getUpdates
result = request.post(url, data, timeout=urlopen_timeout)
File "/usr/local/lib/python2.7/dist-packages/telegram/utils/request.py", line 83, in decorator
raise NetworkError('URLError: {0}'.format(error.reason))
NetworkError: URLError: [Errno 101] Network is unreachable
chat_id = bot.getUpdates()[-1].message.chat_id
Traceback (most recent call last):
File "<pyshell#4>", line 1, in <module>
chat_id = bot.getUpdates()[-1].message.chat_id
File "/usr/local/lib/python2.7/dist-packages/telegram/bot.py", line 109, in decorator
result = func(self, *args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/telegram/bot.py", line 1215, in getUpdates
result = request.post(url, data, timeout=urlopen_timeout)
File "/usr/local/lib/python2.7/dist-packages/telegram/utils/request.py", line 83, in decorator
raise NetworkError('URLError: {0}'.format(error.reason))
NetworkError: URLError: [Errno 101] Network is unreachable
|
NetworkError
|
def start_polling(
self,
poll_interval=0.0,
timeout=10,
network_delay=2,
clean=False,
bootstrap_retries=0,
):
"""
Starts polling updates from Telegram.
Args:
poll_interval (Optional[float]): Time to wait between polling
updates from Telegram in seconds. Default is 0.0
timeout (Optional[float]): Passed to Bot.getUpdates
network_delay (Optional[float]): Passed to Bot.getUpdates
clean (Optional[bool]): Whether to clean any pending updates on
Telegram servers before actually starting to poll. Default is
False.
bootstrap_retries (Optional[int[): Whether the bootstrapping phase
of the `Updater` will retry on failures on the Telegram server.
| < 0 - retry indefinitely
| 0 - no retries (default)
| > 0 - retry up to X times
Returns:
Queue: The update queue that can be filled from the main thread
"""
with self.__lock:
if not self.running:
self.running = True
# Create & start threads
self._init_thread(self.dispatcher.start, "dispatcher")
self._init_thread(
self._start_polling,
"updater",
poll_interval,
timeout,
network_delay,
bootstrap_retries,
clean,
)
# Return the update queue so the main thread can insert updates
return self.update_queue
|
def start_polling(
self,
poll_interval=0.0,
timeout=10,
network_delay=2,
clean=False,
bootstrap_retries=0,
):
"""
Starts polling updates from Telegram.
Args:
poll_interval (Optional[float]): Time to wait between polling
updates from Telegram in seconds. Default is 0.0
timeout (Optional[float]): Passed to Bot.getUpdates
network_delay (Optional[float]): Passed to Bot.getUpdates
clean (Optional[bool]): Whether to clean any pending updates on
Telegram servers before actually starting to poll. Default is
False.
bootstrap_retries (Optional[int[): Whether the bootstrapping phase
of the `Updater` will retry on failures on the Telegram server.
| < 0 - retry indefinitely
| 0 - no retries (default)
| > 0 - retry up to X times
Returns:
Queue: The update queue that can be filled from the main thread
"""
with self.__lock:
if not self.running:
self.running = True
if clean:
self._clean_updates()
# Create & start threads
self._init_thread(self.dispatcher.start, "dispatcher")
self._init_thread(
self._start_polling,
"updater",
poll_interval,
timeout,
network_delay,
bootstrap_retries,
)
# Return the update queue so the main thread can insert updates
return self.update_queue
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/275
|
Traceback (most recent call last):
File "main.py", line 43, in <module>
main()
File "main.py", line 34, in main
updates = bot.getUpdates()
File "/usr/local/lib/python3.5/site-packages/telegram/bot.py", line 114, in decorator
result = func(self, *args, **kwargs)
File "/usr/local/lib/python3.5/site-packages/telegram/bot.py", line 1315, in getUpdates
result = request.post(url, data, timeout=urlopen_timeout)
File "/usr/local/lib/python3.5/site-packages/telegram/utils/request.py", line 83, in decorator
raise NetworkError('URLError: {0}'.format(error.reason))
telegram.error.NetworkError: URLError: _ssl.c:629: The handshake operation timed out
|
telegram.error.NetworkError
|
def start_webhook(
self,
listen="127.0.0.1",
port=80,
url_path="",
cert=None,
key=None,
clean=False,
bootstrap_retries=0,
webhook_url=None,
):
"""
Starts a small http server to listen for updates via webhook. If cert
and key are not provided, the webhook will be started directly on
http://listen:port/url_path, so SSL can be handled by another
application. Else, the webhook will be started on
https://listen:port/url_path
Args:
listen (Optional[str]): IP-Address to listen on
port (Optional[int]): Port the bot should be listening on
url_path (Optional[str]): Path inside url
cert (Optional[str]): Path to the SSL certificate file
key (Optional[str]): Path to the SSL key file
clean (Optional[bool]): Whether to clean any pending updates on
Telegram servers before actually starting the webhook. Default
is False.
bootstrap_retries (Optional[int[): Whether the bootstrapping phase
of the `Updater` will retry on failures on the Telegram server.
| < 0 - retry indefinitely
| 0 - no retries (default)
| > 0 - retry up to X times
webhook_url (Optional[str]): Explicitly specifiy the webhook url.
Useful behind NAT, reverse proxy, etc. Default is derived from
`listen`, `port` & `url_path`.
Returns:
Queue: The update queue that can be filled from the main thread
"""
with self.__lock:
if not self.running:
self.running = True
# Create & start threads
(self._init_thread(self.dispatcher.start, "dispatcher"),)
self._init_thread(
self._start_webhook,
"updater",
listen,
port,
url_path,
cert,
key,
bootstrap_retries,
clean,
webhook_url,
)
# Return the update queue so the main thread can insert updates
return self.update_queue
|
def start_webhook(
self,
listen="127.0.0.1",
port=80,
url_path="",
cert=None,
key=None,
clean=False,
bootstrap_retries=0,
webhook_url=None,
):
"""
Starts a small http server to listen for updates via webhook. If cert
and key are not provided, the webhook will be started directly on
http://listen:port/url_path, so SSL can be handled by another
application. Else, the webhook will be started on
https://listen:port/url_path
Args:
listen (Optional[str]): IP-Address to listen on
port (Optional[int]): Port the bot should be listening on
url_path (Optional[str]): Path inside url
cert (Optional[str]): Path to the SSL certificate file
key (Optional[str]): Path to the SSL key file
clean (Optional[bool]): Whether to clean any pending updates on
Telegram servers before actually starting the webhook. Default
is False.
bootstrap_retries (Optional[int[): Whether the bootstrapping phase
of the `Updater` will retry on failures on the Telegram server.
| < 0 - retry indefinitely
| 0 - no retries (default)
| > 0 - retry up to X times
webhook_url (Optional[str]): Explicitly specifiy the webhook url.
Useful behind NAT, reverse proxy, etc. Default is derived from
`listen`, `port` & `url_path`.
Returns:
Queue: The update queue that can be filled from the main thread
"""
with self.__lock:
if not self.running:
self.running = True
if clean:
self._clean_updates()
# Create & start threads
(self._init_thread(self.dispatcher.start, "dispatcher"),)
self._init_thread(
self._start_webhook,
"updater",
listen,
port,
url_path,
cert,
key,
bootstrap_retries,
webhook_url,
)
# Return the update queue so the main thread can insert updates
return self.update_queue
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/275
|
Traceback (most recent call last):
File "main.py", line 43, in <module>
main()
File "main.py", line 34, in main
updates = bot.getUpdates()
File "/usr/local/lib/python3.5/site-packages/telegram/bot.py", line 114, in decorator
result = func(self, *args, **kwargs)
File "/usr/local/lib/python3.5/site-packages/telegram/bot.py", line 1315, in getUpdates
result = request.post(url, data, timeout=urlopen_timeout)
File "/usr/local/lib/python3.5/site-packages/telegram/utils/request.py", line 83, in decorator
raise NetworkError('URLError: {0}'.format(error.reason))
telegram.error.NetworkError: URLError: _ssl.c:629: The handshake operation timed out
|
telegram.error.NetworkError
|
def _start_polling(
self, poll_interval, timeout, network_delay, bootstrap_retries, clean
):
"""
Thread target of thread 'updater'. Runs in background, pulls
updates from Telegram and inserts them in the update queue of the
Dispatcher.
"""
cur_interval = poll_interval
self.logger.debug("Updater thread started")
self._bootstrap(bootstrap_retries, clean=clean, webhook_url="")
while self.running:
try:
updates = self.bot.getUpdates(
self.last_update_id, timeout=timeout, network_delay=network_delay
)
except TelegramError as te:
self.logger.error("Error while getting Updates: {0}".format(te))
# Put the error into the update queue and let the Dispatcher
# broadcast it
self.update_queue.put(te)
cur_interval = self._increase_poll_interval(cur_interval)
else:
if not self.running:
if len(updates) > 0:
self.logger.debug(
"Updates ignored and will be pulled again on restart."
)
break
if updates:
for update in updates:
self.update_queue.put(update)
self.last_update_id = updates[-1].update_id + 1
cur_interval = poll_interval
sleep(cur_interval)
|
def _start_polling(self, poll_interval, timeout, network_delay, bootstrap_retries):
"""
Thread target of thread 'updater'. Runs in background, pulls
updates from Telegram and inserts them in the update queue of the
Dispatcher.
"""
cur_interval = poll_interval
self.logger.debug("Updater thread started")
self._set_webhook(None, bootstrap_retries, None)
while self.running:
try:
updates = self.bot.getUpdates(
self.last_update_id, timeout=timeout, network_delay=network_delay
)
except TelegramError as te:
self.logger.error("Error while getting Updates: {0}".format(te))
# Put the error into the update queue and let the Dispatcher
# broadcast it
self.update_queue.put(te)
cur_interval = self._increase_poll_interval(cur_interval)
else:
if not self.running:
if len(updates) > 0:
self.logger.debug(
"Updates ignored and will be pulled again on restart."
)
break
if updates:
for update in updates:
self.update_queue.put(update)
self.last_update_id = updates[-1].update_id + 1
cur_interval = poll_interval
sleep(cur_interval)
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/275
|
Traceback (most recent call last):
File "main.py", line 43, in <module>
main()
File "main.py", line 34, in main
updates = bot.getUpdates()
File "/usr/local/lib/python3.5/site-packages/telegram/bot.py", line 114, in decorator
result = func(self, *args, **kwargs)
File "/usr/local/lib/python3.5/site-packages/telegram/bot.py", line 1315, in getUpdates
result = request.post(url, data, timeout=urlopen_timeout)
File "/usr/local/lib/python3.5/site-packages/telegram/utils/request.py", line 83, in decorator
raise NetworkError('URLError: {0}'.format(error.reason))
telegram.error.NetworkError: URLError: _ssl.c:629: The handshake operation timed out
|
telegram.error.NetworkError
|
def _start_webhook(
self, listen, port, url_path, cert, key, bootstrap_retries, clean, webhook_url
):
self.logger.debug("Updater thread started")
use_ssl = cert is not None and key is not None
if not url_path.startswith("/"):
url_path = "/{0}".format(url_path)
# Create and start server
self.httpd = WebhookServer(
(listen, port), WebhookHandler, self.update_queue, url_path
)
if use_ssl:
self._check_ssl_cert(cert, key)
# DO NOT CHANGE: Only set webhook if SSL is handled by library
if not webhook_url:
webhook_url = self._gen_webhook_url(listen, port, url_path)
self._bootstrap(
max_retries=bootstrap_retries,
clean=clean,
webhook_url=webhook_url,
cert=open(cert, "rb"),
)
elif clean:
self.logger.warning(
"cleaning updates is not supported if "
"SSL-termination happens elsewhere; skipping"
)
self.httpd.serve_forever(poll_interval=1)
|
def _start_webhook(
self, listen, port, url_path, cert, key, bootstrap_retries, webhook_url
):
self.logger.debug("Updater thread started")
use_ssl = cert is not None and key is not None
if not url_path.startswith("/"):
url_path = "/{0}".format(url_path)
# Create and start server
self.httpd = WebhookServer(
(listen, port), WebhookHandler, self.update_queue, url_path
)
if use_ssl:
self._check_ssl_cert(cert, key)
# DO NOT CHANGE: Only set webhook if SSL is handled by library
if not webhook_url:
webhook_url = self._gen_webhook_url(listen, port, url_path)
self._set_webhook(webhook_url, bootstrap_retries, open(cert, "rb"))
self.httpd.serve_forever(poll_interval=1)
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/275
|
Traceback (most recent call last):
File "main.py", line 43, in <module>
main()
File "main.py", line 34, in main
updates = bot.getUpdates()
File "/usr/local/lib/python3.5/site-packages/telegram/bot.py", line 114, in decorator
result = func(self, *args, **kwargs)
File "/usr/local/lib/python3.5/site-packages/telegram/bot.py", line 1315, in getUpdates
result = request.post(url, data, timeout=urlopen_timeout)
File "/usr/local/lib/python3.5/site-packages/telegram/utils/request.py", line 83, in decorator
raise NetworkError('URLError: {0}'.format(error.reason))
telegram.error.NetworkError: URLError: _ssl.c:629: The handshake operation timed out
|
telegram.error.NetworkError
|
def _gen_webhook_url(listen, port, url_path):
return "https://{listen}:{port}{path}".format(
listen=listen, port=port, path=url_path
)
|
def _gen_webhook_url(self, listen, port, url_path):
return "https://{listen}:{port}{path}".format(
listen=listen, port=port, path=url_path
)
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/275
|
Traceback (most recent call last):
File "main.py", line 43, in <module>
main()
File "main.py", line 34, in main
updates = bot.getUpdates()
File "/usr/local/lib/python3.5/site-packages/telegram/bot.py", line 114, in decorator
result = func(self, *args, **kwargs)
File "/usr/local/lib/python3.5/site-packages/telegram/bot.py", line 1315, in getUpdates
result = request.post(url, data, timeout=urlopen_timeout)
File "/usr/local/lib/python3.5/site-packages/telegram/utils/request.py", line 83, in decorator
raise NetworkError('URLError: {0}'.format(error.reason))
telegram.error.NetworkError: URLError: _ssl.c:629: The handshake operation timed out
|
telegram.error.NetworkError
|
def main():
global job_queue
updater = Updater("TOKEN")
job_queue = updater.job_queue
# Get the dispatcher to register handlers
dp = updater.dispatcher
# on different commands - answer in Telegram
dp.addHandler(CommandHandler("start", start))
dp.addHandler(CommandHandler("help", start))
dp.addHandler(CommandHandler("set", set, pass_args=True))
# log all errors
dp.addErrorHandler(error)
# Start the Bot
updater.start_polling()
# Block until the you presses Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
|
def main():
global job_queue
updater = Updater("TOKEN")
job_queue = updater.job_queue
# Get the dispatcher to register handlers
dp = updater.dispatcher
# on different commands - answer in Telegram
dp.addHandler(CommandHandler("start", start))
dp.addHandler(CommandHandler("help", start))
dp.addHandler(CommandHandler("set", set))
# log all errors
dp.addErrorHandler(error)
# Start the Bot
updater.start_polling()
# Block until the you presses Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/270
|
2016-04-29 11:38:22,002 - telegram.ext.dispatcher - ERROR - An uncaught error was raised while processing the update
Traceback (most recent call last):
File "build/bdist.linux-i686/egg/telegram/ext/dispatcher.py", line 180, in processUpdate
handler.handleUpdate(update, self)
File "build/bdist.linux-i686/egg/telegram/ext/commandhandler.py", line 66, in handleUpdate
self.callback(dispatcher.bot, update, **optional_args)
TypeError: set() takes exactly 3 arguments (2 given)
|
TypeError
|
def message(bot, update, **kwargs):
"""
Example for an asynchronous handler. It's not guaranteed that replies will
be in order when using @run_async. Also, you have to include **kwargs in
your parameter list.
"""
print(kwargs)
sleep(2) # IO-heavy operation here
bot.sendMessage(update.message.chat_id, text="Echo: %s" % update.message.text)
|
def message(bot, update):
"""
Example for an asynchronous handler. It's not guaranteed that replies will
be in order when using @run_async.
"""
sleep(2) # IO-heavy operation here
bot.sendMessage(update.message.chat_id, text="Echo: %s" % update.message.text)
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/123
|
Exception in thread Thread-1:
Traceback (most recent call last):
File "C:\Program Files\Python 3.5\lib\threading.py", line 923, in _bootstrap_inner
self.run()
File "C:\Program Files\Python 3.5\lib\threading.py", line 871, in run
self._target(*self._args, **self._kwargs)
File "C:\Program Files\Python 3.5\lib\site-packages\telegram\dispatcher.py", line 39, in pooled
result = func(*args, **kwargs)
TypeError: akkarin() missing 1 required positional argument: 'args'
|
TypeError
|
def run_async(func):
"""
Function decorator that will run the function in a new thread. A function
decorated with this will have to include **kwargs in their parameter list,
which will contain all optional parameters.
Args:
func (function): The function to run in the thread.
Returns:
function:
"""
@wraps(func)
def pooled(*pargs, **kwargs):
"""
A wrapper to run a thread in a thread pool
"""
global running_async, async_lock
result = func(*pargs, **kwargs)
semaphore.release()
with async_lock:
running_async -= 1
return result
@wraps(func)
def async_func(*pargs, **kwargs):
"""
A wrapper to run a function in a thread
"""
global running_async, async_lock
thread = Thread(target=pooled, args=pargs, kwargs=kwargs)
semaphore.acquire()
with async_lock:
running_async += 1
thread.start()
return thread
return async_func
|
def run_async(func):
"""
Function decorator that will run the function in a new thread.
Args:
func (function): The function to run in the thread.
Returns:
function:
"""
@wraps(func)
def pooled(*args, **kwargs):
"""
A wrapper to run a thread in a thread pool
"""
global running_async, async_lock
result = func(*args, **kwargs)
semaphore.release()
with async_lock:
running_async -= 1
return result
@wraps(func)
def async_func(*args, **kwargs):
"""
A wrapper to run a function in a thread
"""
global running_async, async_lock
thread = Thread(target=pooled, args=args, kwargs=kwargs)
semaphore.acquire()
with async_lock:
running_async += 1
thread.start()
return thread
return async_func
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/123
|
Exception in thread Thread-1:
Traceback (most recent call last):
File "C:\Program Files\Python 3.5\lib\threading.py", line 923, in _bootstrap_inner
self.run()
File "C:\Program Files\Python 3.5\lib\threading.py", line 871, in run
self._target(*self._args, **self._kwargs)
File "C:\Program Files\Python 3.5\lib\site-packages\telegram\dispatcher.py", line 39, in pooled
result = func(*args, **kwargs)
TypeError: akkarin() missing 1 required positional argument: 'args'
|
TypeError
|
def pooled(*pargs, **kwargs):
"""
A wrapper to run a thread in a thread pool
"""
global running_async, async_lock
result = func(*pargs, **kwargs)
semaphore.release()
with async_lock:
running_async -= 1
return result
|
def pooled(*args, **kwargs):
"""
A wrapper to run a thread in a thread pool
"""
global running_async, async_lock
result = func(*args, **kwargs)
semaphore.release()
with async_lock:
running_async -= 1
return result
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/123
|
Exception in thread Thread-1:
Traceback (most recent call last):
File "C:\Program Files\Python 3.5\lib\threading.py", line 923, in _bootstrap_inner
self.run()
File "C:\Program Files\Python 3.5\lib\threading.py", line 871, in run
self._target(*self._args, **self._kwargs)
File "C:\Program Files\Python 3.5\lib\site-packages\telegram\dispatcher.py", line 39, in pooled
result = func(*args, **kwargs)
TypeError: akkarin() missing 1 required positional argument: 'args'
|
TypeError
|
def async_func(*pargs, **kwargs):
"""
A wrapper to run a function in a thread
"""
global running_async, async_lock
thread = Thread(target=pooled, args=pargs, kwargs=kwargs)
semaphore.acquire()
with async_lock:
running_async += 1
thread.start()
return thread
|
def async_func(*args, **kwargs):
"""
A wrapper to run a function in a thread
"""
global running_async, async_lock
thread = Thread(target=pooled, args=args, kwargs=kwargs)
semaphore.acquire()
with async_lock:
running_async += 1
thread.start()
return thread
|
https://github.com/python-telegram-bot/python-telegram-bot/issues/123
|
Exception in thread Thread-1:
Traceback (most recent call last):
File "C:\Program Files\Python 3.5\lib\threading.py", line 923, in _bootstrap_inner
self.run()
File "C:\Program Files\Python 3.5\lib\threading.py", line 871, in run
self._target(*self._args, **self._kwargs)
File "C:\Program Files\Python 3.5\lib\site-packages\telegram\dispatcher.py", line 39, in pooled
result = func(*args, **kwargs)
TypeError: akkarin() missing 1 required positional argument: 'args'
|
TypeError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.