hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f727a4a852b255e576e4ed8f9b5db24f9d41a4fc | 27,397 | py | Python | openstack_dashboard/test/api_tests/neutron_tests.py | HaManhDong/Custom-Horizon | 17513ebbe03b8ae58e0925f826801343e1e3e3e0 | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/test/api_tests/neutron_tests.py | HaManhDong/Custom-Horizon | 17513ebbe03b8ae58e0925f826801343e1e3e3e0 | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/test/api_tests/neutron_tests.py | HaManhDong/Custom-Horizon | 17513ebbe03b8ae58e0925f826801343e1e3e3e0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
from mox3.mox import IsA # noqa
from django import http
from django.test.utils import override_settings
from neutronclient.common import exceptions as neutron_exc
from openstack_dashboard import api
from openstack_dashboard import policy
from openstack_dashboard.test import helpers as test
class NeutronApiTests(test.APITestCase):
def test_network_list(self):
networks = {'networks': self.api_networks.list()}
subnets = {'subnets': self.api_subnets.list()}
neutronclient = self.stub_neutronclient()
neutronclient.list_networks().AndReturn(networks)
neutronclient.list_subnets().AndReturn(subnets)
self.mox.ReplayAll()
ret_val = api.neutron.network_list(self.request)
for n in ret_val:
self.assertIsInstance(n, api.neutron.Network)
@test.create_stubs({api.neutron: ('network_list',
'subnet_list')})
def _test_network_list_for_tenant(self, include_external):
all_networks = self.networks.list()
tenant_id = '1'
api.neutron.network_list(
IsA(http.HttpRequest),
tenant_id=tenant_id,
shared=False).AndReturn([
network for network in all_networks
if network['tenant_id'] == tenant_id
])
api.neutron.network_list(
IsA(http.HttpRequest),
shared=True).AndReturn([
network for network in all_networks
if network.get('shared')
])
if include_external:
api.neutron.network_list(
IsA(http.HttpRequest),
**{'router:external': True}).AndReturn([
network for network in all_networks
if network.get('router:external')
])
self.mox.ReplayAll()
ret_val = api.neutron.network_list_for_tenant(
self.request, tenant_id,
include_external=include_external)
expected = [n for n in all_networks
if (n['tenant_id'] == tenant_id or
n['shared'] or
(include_external and n['router:external']))]
self.assertEqual(set(n.id for n in expected),
set(n.id for n in ret_val))
def test_network_list_for_tenant(self):
self._test_network_list_for_tenant(include_external=False)
def test_network_list_for_tenant_with_external(self):
self._test_network_list_for_tenant(include_external=True)
def test_network_get(self):
network = {'network': self.api_networks.first()}
subnet = {'subnet': self.api_subnets.first()}
network_id = self.api_networks.first()['id']
subnet_id = self.api_networks.first()['subnets'][0]
neutronclient = self.stub_neutronclient()
neutronclient.show_network(network_id).AndReturn(network)
neutronclient.show_subnet(subnet_id).AndReturn(subnet)
self.mox.ReplayAll()
ret_val = api.neutron.network_get(self.request, network_id)
self.assertIsInstance(ret_val, api.neutron.Network)
def _test_network_create(self, with_n1kv=False):
network = {'network': self.api_networks.first()}
form_data = {'network': {'name': 'net1',
'tenant_id': self.request.user.project_id}}
neutronclient = self.stub_neutronclient()
if with_n1kv:
n1kv_profile = 'n1kv:profile'
test_net_profile = 'test_net_profile'
network['network'][n1kv_profile] = test_net_profile
form_data['network'][n1kv_profile] = test_net_profile
neutronclient.create_network(body=form_data).AndReturn(network)
self.mox.ReplayAll()
ret_val = api.neutron.network_create(
self.request,
name='net1',
net_profile_id=test_net_profile)
# assert that when 'net_profile_id' is passed as a param to
# network_create function, 'n1kv:profile' is appended as a key to
# the returned network dictionary with value TEST_NET_PROFILE
self.assertEqual(test_net_profile, ret_val[n1kv_profile])
# also assert that 'net_profile_id' isn't there in the returned
# network dictionary
self.assertNotIn('net_profile_id', [k for k, _ in ret_val.items()])
else:
neutronclient.create_network(body=form_data).AndReturn(network)
self.mox.ReplayAll()
ret_val = api.neutron.network_create(self.request, name='net1')
self.assertIsInstance(ret_val, api.neutron.Network)
def test_network_create(self):
self._test_network_create()
def test_network_create_with_net_profile(self):
self._test_network_create(with_n1kv=True)
def test_network_update(self):
network = {'network': self.api_networks.first()}
network_id = self.api_networks.first()['id']
neutronclient = self.stub_neutronclient()
form_data = {'network': {'name': 'net1'}}
neutronclient.update_network(network_id, body=form_data)\
.AndReturn(network)
self.mox.ReplayAll()
ret_val = api.neutron.network_update(self.request, network_id,
name='net1')
self.assertIsInstance(ret_val, api.neutron.Network)
def test_network_delete(self):
network_id = self.api_networks.first()['id']
neutronclient = self.stub_neutronclient()
neutronclient.delete_network(network_id)
self.mox.ReplayAll()
api.neutron.network_delete(self.request, network_id)
def test_get_network_ip_availability(self):
network = {'network': self.api_networks.first()}
mock_ip_availability = self.ip_availability.get()
neutronclient = self.stub_neutronclient()
neutronclient.show_network_ip_availability(network).\
AndReturn(mock_ip_availability)
self.mox.ReplayAll()
ret_val = api.neutron.show_network_ip_availability(self.request,
network)
self.assertIsInstance(ret_val, dict)
def test_subnet_network_ip_availability(self):
network = {'network': self.api_networks.first()}
mock_ip_availability = self.ip_availability.get()
neutronclient = self.stub_neutronclient()
neutronclient.show_network_ip_availability(network).\
AndReturn(mock_ip_availability)
self.mox.ReplayAll()
ip_availability = api.neutron. \
show_network_ip_availability(self.request, network)
availabilities = ip_availability.get("network_ip_availability",
{})
ret_val = availabilities.get("subnet_ip_availability", [])
self.assertIsInstance(ret_val, list)
def test_subnet_list(self):
subnets = {'subnets': self.api_subnets.list()}
neutronclient = self.stub_neutronclient()
neutronclient.list_subnets().AndReturn(subnets)
self.mox.ReplayAll()
ret_val = api.neutron.subnet_list(self.request)
for n in ret_val:
self.assertIsInstance(n, api.neutron.Subnet)
def test_subnet_get(self):
subnet = {'subnet': self.api_subnets.first()}
subnet_id = self.api_subnets.first()['id']
neutronclient = self.stub_neutronclient()
neutronclient.show_subnet(subnet_id).AndReturn(subnet)
self.mox.ReplayAll()
ret_val = api.neutron.subnet_get(self.request, subnet_id)
self.assertIsInstance(ret_val, api.neutron.Subnet)
def test_subnet_create(self):
subnet_data = self.api_subnets.first()
params = {'network_id': subnet_data['network_id'],
'tenant_id': subnet_data['tenant_id'],
'name': subnet_data['name'],
'cidr': subnet_data['cidr'],
'ip_version': subnet_data['ip_version'],
'gateway_ip': subnet_data['gateway_ip']}
neutronclient = self.stub_neutronclient()
neutronclient.create_subnet(body={'subnet': params})\
.AndReturn({'subnet': subnet_data})
self.mox.ReplayAll()
ret_val = api.neutron.subnet_create(self.request, **params)
self.assertIsInstance(ret_val, api.neutron.Subnet)
def test_subnet_update(self):
subnet_data = self.api_subnets.first()
subnet_id = subnet_data['id']
params = {'name': subnet_data['name'],
'gateway_ip': subnet_data['gateway_ip']}
neutronclient = self.stub_neutronclient()
neutronclient.update_subnet(subnet_id, body={'subnet': params})\
.AndReturn({'subnet': subnet_data})
self.mox.ReplayAll()
ret_val = api.neutron.subnet_update(self.request, subnet_id, **params)
self.assertIsInstance(ret_val, api.neutron.Subnet)
def test_subnet_delete(self):
subnet_id = self.api_subnets.first()['id']
neutronclient = self.stub_neutronclient()
neutronclient.delete_subnet(subnet_id)
self.mox.ReplayAll()
api.neutron.subnet_delete(self.request, subnet_id)
def test_subnetpool_list(self):
subnetpools = {'subnetpools': self.api_subnetpools.list()}
neutronclient = self.stub_neutronclient()
neutronclient.list_subnetpools().AndReturn(subnetpools)
self.mox.ReplayAll()
ret_val = api.neutron.subnetpool_list(self.request)
for n in ret_val:
self.assertIsInstance(n, api.neutron.SubnetPool)
def test_subnetpool_get(self):
subnetpool = {'subnetpool': self.api_subnetpools.first()}
subnetpool_id = self.api_subnetpools.first()['id']
neutronclient = self.stub_neutronclient()
neutronclient.show_subnetpool(subnetpool_id).AndReturn(subnetpool)
self.mox.ReplayAll()
ret_val = api.neutron.subnetpool_get(self.request, subnetpool_id)
self.assertIsInstance(ret_val, api.neutron.SubnetPool)
def test_subnetpool_create(self):
subnetpool_data = self.api_subnetpools.first()
params = {'name': subnetpool_data['name'],
'prefixes': subnetpool_data['prefixes'],
'tenant_id': subnetpool_data['tenant_id']}
neutronclient = self.stub_neutronclient()
neutronclient.create_subnetpool(body={'subnetpool': params})\
.AndReturn({'subnetpool': subnetpool_data})
self.mox.ReplayAll()
ret_val = api.neutron.subnetpool_create(self.request, **params)
self.assertIsInstance(ret_val, api.neutron.SubnetPool)
def test_subnetpool_update(self):
subnetpool_data = self.api_subnetpools.first()
subnetpool_id = subnetpool_data['id']
params = {'name': subnetpool_data['name'],
'prefixes': subnetpool_data['prefixes']}
neutronclient = self.stub_neutronclient()
neutronclient.update_subnetpool(subnetpool_id, body={'subnetpool': params})\
.AndReturn({'subnetpool': subnetpool_data})
self.mox.ReplayAll()
ret_val = api.neutron.subnetpool_update(self.request, subnetpool_id,
**params)
self.assertIsInstance(ret_val, api.neutron.SubnetPool)
def test_subnetpool_delete(self):
subnetpool_id = self.api_subnetpools.first()['id']
neutronclient = self.stub_neutronclient()
neutronclient.delete_subnetpool(subnetpool_id)
self.mox.ReplayAll()
api.neutron.subnetpool_delete(self.request, subnetpool_id)
def test_port_list(self):
ports = {'ports': self.api_ports.list()}
neutronclient = self.stub_neutronclient()
neutronclient.list_ports().AndReturn(ports)
self.mox.ReplayAll()
ret_val = api.neutron.port_list(self.request)
for p in ret_val:
self.assertIsInstance(p, api.neutron.Port)
def test_port_get(self):
port = {'port': self.api_ports.first()}
port_id = self.api_ports.first()['id']
neutronclient = self.stub_neutronclient()
neutronclient.show_port(port_id).AndReturn(port)
self.mox.ReplayAll()
ret_val = api.neutron.port_get(self.request, port_id)
self.assertIsInstance(ret_val, api.neutron.Port)
def _test_port_create(self, with_n1kv=False):
port = {'port': self.api_ports.first()}
params = {'network_id': port['port']['network_id'],
'tenant_id': port['port']['tenant_id'],
'name': port['port']['name'],
'device_id': port['port']['device_id']}
neutronclient = self.stub_neutronclient()
if with_n1kv:
n1kv_profile = 'n1kv:profile'
test_policy_profile = 'test_policy_profile'
port['port'][n1kv_profile] = test_policy_profile
body = {k: v for (k, v) in params.items()}
body[n1kv_profile] = port['port'][n1kv_profile]
neutronclient.create_port(body={'port': body}).AndReturn(port)
self.mox.ReplayAll()
ret_val = api.neutron.port_create(
self.request,
policy_profile_id=test_policy_profile,
**params)
# assert that when 'policy_profile_id' is passed as a param to
# port_create function, 'n1kv:profile' is appended as a key to the
# returned port dictionary with value TEST_POLICY_PROFILE
self.assertEqual(test_policy_profile, ret_val[n1kv_profile])
# also assert that 'policy_profile_id' isn't there in the returned
# port dictionary
self.assertNotIn('policy_profile_id',
[k for k, _ in ret_val.items()])
else:
neutronclient.create_port(body={'port': params}).AndReturn(port)
self.mox.ReplayAll()
ret_val = api.neutron.port_create(self.request, **params)
self.assertIsInstance(ret_val, api.neutron.Port)
self.assertEqual(api.neutron.Port(port['port']).id, ret_val.id)
def test_port_create(self):
self._test_port_create()
def test_port_create_with_policy_profile(self):
self._test_port_create(with_n1kv=True)
def test_port_update(self):
port_data = self.api_ports.first()
port_id = port_data['id']
params = {'name': port_data['name'],
'device_id': port_data['device_id']}
neutronclient = self.stub_neutronclient()
neutronclient.update_port(port_id, body={'port': params})\
.AndReturn({'port': port_data})
self.mox.ReplayAll()
ret_val = api.neutron.port_update(self.request, port_id, **params)
self.assertIsInstance(ret_val, api.neutron.Port)
self.assertEqual(api.neutron.Port(port_data).id, ret_val.id)
def test_port_delete(self):
port_id = self.api_ports.first()['id']
neutronclient = self.stub_neutronclient()
neutronclient.delete_port(port_id)
self.mox.ReplayAll()
api.neutron.port_delete(self.request, port_id)
def test_router_list(self):
routers = {'routers': self.api_routers.list()}
neutronclient = self.stub_neutronclient()
neutronclient.list_routers().AndReturn(routers)
self.mox.ReplayAll()
ret_val = api.neutron.router_list(self.request)
for n in ret_val:
self.assertIsInstance(n, api.neutron.Router)
def test_router_get(self):
router = {'router': self.api_routers.first()}
router_id = self.api_routers.first()['id']
neutronclient = self.stub_neutronclient()
neutronclient.show_router(router_id).AndReturn(router)
self.mox.ReplayAll()
ret_val = api.neutron.router_get(self.request, router_id)
self.assertIsInstance(ret_val, api.neutron.Router)
def test_router_create(self):
router = {'router': self.api_routers.first()}
neutronclient = self.stub_neutronclient()
form_data = {'router': {'name': 'router1',
'tenant_id': self.request.user.project_id}}
neutronclient.create_router(body=form_data).AndReturn(router)
self.mox.ReplayAll()
ret_val = api.neutron.router_create(self.request, name='router1')
self.assertIsInstance(ret_val, api.neutron.Router)
def test_router_delete(self):
router_id = self.api_routers.first()['id']
neutronclient = self.stub_neutronclient()
neutronclient.delete_router(router_id)
self.mox.ReplayAll()
api.neutron.router_delete(self.request, router_id)
def test_router_add_interface(self):
subnet_id = self.api_subnets.first()['id']
router_id = self.api_routers.first()['id']
neutronclient = self.stub_neutronclient()
form_data = {'subnet_id': subnet_id}
neutronclient.add_interface_router(
router_id, form_data).AndReturn(None)
self.mox.ReplayAll()
api.neutron.router_add_interface(
self.request, router_id, subnet_id=subnet_id)
def test_router_remove_interface(self):
router_id = self.api_routers.first()['id']
fake_port = self.api_ports.first()['id']
neutronclient = self.stub_neutronclient()
neutronclient.remove_interface_router(
router_id, {'port_id': fake_port})
self.mox.ReplayAll()
api.neutron.router_remove_interface(
self.request, router_id, port_id=fake_port)
@test.create_stubs({api.neutron: ('is_extension_supported',)})
def test_is_extension_supported(self):
api.neutron.is_extension_supported(self.request, "quotas")\
.AndReturn(True)
api.neutron.is_extension_supported(self.request, "doesntexist") \
.AndReturn(False)
self.mox.ReplayAll()
self.assertTrue(
api.neutron.is_extension_supported(self.request, 'quotas'))
self.assertFalse(
api.neutron.is_extension_supported(self.request, 'doesntexist'))
def test_router_static_route_list(self):
router = {'router': self.api_routers_with_routes.first()}
router_id = self.api_routers_with_routes.first()['id']
neutronclient = self.stub_neutronclient()
neutronclient.show_router(router_id).AndReturn(router)
self.mox.ReplayAll()
ret_val = api.neutron.router_static_route_list(self.request, router_id)
self.assertIsInstance(ret_val[0], api.neutron.RouterStaticRoute)
def test_router_static_route_remove(self):
router = {'router': self.api_routers_with_routes.first()}
router_id = self.api_routers_with_routes.first()['id']
post_router = copy.deepcopy(router)
route = api.neutron.RouterStaticRoute(post_router['router']
['routes'].pop())
neutronclient = self.stub_neutronclient()
neutronclient.show_router(router_id).AndReturn(router)
body = {'router': {'routes': post_router['router']['routes']}}
neutronclient.update_router(router_id, body=body)\
.AndReturn(post_router)
self.mox.ReplayAll()
api.neutron.router_static_route_remove(self.request,
router_id, route.id)
def test_router_static_route_add(self):
router = {'router': self.api_routers_with_routes.first()}
router_id = self.api_routers_with_routes.first()['id']
post_router = copy.deepcopy(router)
route = {'nexthop': '10.0.0.5', 'destination': '40.0.1.0/24'}
post_router['router']['routes'].insert(0, route)
body = {'router': {'routes': post_router['router']['routes']}}
neutronclient = self.stub_neutronclient()
neutronclient.show_router(router_id).AndReturn(router)
neutronclient.update_router(router_id, body=body)\
.AndReturn(post_router)
self.mox.ReplayAll()
api.neutron.router_static_route_add(self.request, router_id, route)
# NOTE(amotoki): "dvr" permission tests check most of
# get_feature_permission features.
# These tests are not specific to "dvr" extension.
# Please be careful if you drop "dvr" extension in future.
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_distributed_router':
True},
POLICY_CHECK_FUNCTION=None)
@test.create_stubs({api.neutron: ('is_extension_supported',)})
def _test_get_dvr_permission_dvr_supported(self, dvr_enabled):
api.neutron.is_extension_supported(self.request, 'dvr').\
AndReturn(dvr_enabled)
self.mox.ReplayAll()
self.assertEqual(dvr_enabled,
api.neutron.get_feature_permission(self.request,
'dvr', 'get'))
def test_get_dvr_permission_dvr_supported(self):
self._test_get_dvr_permission_dvr_supported(dvr_enabled=True)
def test_get_dvr_permission_dvr_not_supported(self):
self._test_get_dvr_permission_dvr_supported(dvr_enabled=False)
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_distributed_router':
True},
POLICY_CHECK_FUNCTION=policy.check)
@test.create_stubs({api.neutron: ('is_extension_supported',)})
def _test_get_dvr_permission_with_policy_check(self, policy_check_allowed,
operation):
self.mox.StubOutWithMock(policy, 'check')
if operation == "create":
role = (("network", "create_router:distributed"),)
elif operation == "get":
role = (("network", "get_router:distributed"),)
policy.check(role, self.request).AndReturn(policy_check_allowed)
if policy_check_allowed:
api.neutron.is_extension_supported(self.request, 'dvr').\
AndReturn(policy_check_allowed)
self.mox.ReplayAll()
self.assertEqual(policy_check_allowed,
api.neutron.get_feature_permission(self.request,
'dvr', operation))
def test_get_dvr_permission_with_policy_check_allowed(self):
self._test_get_dvr_permission_with_policy_check(True, "get")
def test_get_dvr_permission_with_policy_check_disallowed(self):
self._test_get_dvr_permission_with_policy_check(False, "get")
def test_get_dvr_permission_create_with_policy_check_allowed(self):
self._test_get_dvr_permission_with_policy_check(True, "create")
def test_get_dvr_permission_create_with_policy_check_disallowed(self):
self._test_get_dvr_permission_with_policy_check(False, "create")
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_distributed_router':
False})
def test_get_dvr_permission_dvr_disabled_by_config(self):
self.assertFalse(api.neutron.get_feature_permission(self.request,
'dvr', 'get'))
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_distributed_router':
True},
POLICY_CHECK_FUNCTION=policy.check)
def test_get_dvr_permission_dvr_unsupported_operation(self):
self.assertRaises(ValueError,
api.neutron.get_feature_permission,
self.request, 'dvr', 'unSupported')
@override_settings(OPENSTACK_NEUTRON_NETWORK={})
def test_get_dvr_permission_dvr_default_config(self):
self.assertFalse(api.neutron.get_feature_permission(self.request,
'dvr', 'get'))
@override_settings(OPENSTACK_NEUTRON_NETWORK={})
def test_get_dvr_permission_router_ha_default_config(self):
self.assertFalse(api.neutron.get_feature_permission(self.request,
'l3-ha', 'get'))
# NOTE(amotoki): Most of get_feature_permission are checked by "dvr" check
# above. l3-ha check only checks l3-ha specific code.
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_ha_router': True},
POLICY_CHECK_FUNCTION=policy.check)
@test.create_stubs({api.neutron: ('is_extension_supported', )})
def _test_get_router_ha_permission_with_policy_check(self, ha_enabled):
self.mox.StubOutWithMock(policy, 'check')
role = (("network", "create_router:ha"),)
policy.check(role, self.request).AndReturn(True)
api.neutron.is_extension_supported(self.request, 'l3-ha')\
.AndReturn(ha_enabled)
self.mox.ReplayAll()
self.assertEqual(ha_enabled,
api.neutron.get_feature_permission(self.request,
'l3-ha', 'create'))
def test_get_router_ha_permission_with_l3_ha_extension(self):
self._test_get_router_ha_permission_with_policy_check(True)
def test_get_router_ha_permission_without_l3_ha_extension(self):
self._test_get_router_ha_permission_with_policy_check(False)
def test_list_resources_with_long_filters(self):
# In this tests, port_list is called with id=[10 port ID]
# filter. It generates about 40*10 char length URI.
# Each port ID is converted to "id=<UUID>&" in URI and
# it means 40 chars (len(UUID)=36).
# If excess length is 220, it means 400-220=180 chars
# can be sent in the first request.
# As a result three API calls with 4, 4, 2 port ID
# are expected.
ports = [{'id': str(uuid.uuid4()),
'name': 'port%s' % i,
'admin_state_up': True}
for i in range(10)]
port_ids = [port['id'] for port in ports]
neutronclient = self.stub_neutronclient()
uri_len_exc = neutron_exc.RequestURITooLong(excess=220)
neutronclient.list_ports(id=port_ids).AndRaise(uri_len_exc)
for i in range(0, 10, 4):
neutronclient.list_ports(id=port_ids[i:i + 4]) \
.AndReturn({'ports': ports[i:i + 4]})
self.mox.ReplayAll()
ret_val = api.neutron.list_resources_with_long_filters(
api.neutron.port_list, 'id', port_ids,
request=self.request)
self.assertEqual(10, len(ret_val))
self.assertEqual(port_ids, [p.id for p in ret_val])
| 42.019939 | 84 | 0.638756 |
import copy
import uuid
from mox3.mox import IsA
from django import http
from django.test.utils import override_settings
from neutronclient.common import exceptions as neutron_exc
from openstack_dashboard import api
from openstack_dashboard import policy
from openstack_dashboard.test import helpers as test
class NeutronApiTests(test.APITestCase):
def test_network_list(self):
networks = {'networks': self.api_networks.list()}
subnets = {'subnets': self.api_subnets.list()}
neutronclient = self.stub_neutronclient()
neutronclient.list_networks().AndReturn(networks)
neutronclient.list_subnets().AndReturn(subnets)
self.mox.ReplayAll()
ret_val = api.neutron.network_list(self.request)
for n in ret_val:
self.assertIsInstance(n, api.neutron.Network)
@test.create_stubs({api.neutron: ('network_list',
'subnet_list')})
def _test_network_list_for_tenant(self, include_external):
all_networks = self.networks.list()
tenant_id = '1'
api.neutron.network_list(
IsA(http.HttpRequest),
tenant_id=tenant_id,
shared=False).AndReturn([
network for network in all_networks
if network['tenant_id'] == tenant_id
])
api.neutron.network_list(
IsA(http.HttpRequest),
shared=True).AndReturn([
network for network in all_networks
if network.get('shared')
])
if include_external:
api.neutron.network_list(
IsA(http.HttpRequest),
**{'router:external': True}).AndReturn([
network for network in all_networks
if network.get('router:external')
])
self.mox.ReplayAll()
ret_val = api.neutron.network_list_for_tenant(
self.request, tenant_id,
include_external=include_external)
expected = [n for n in all_networks
if (n['tenant_id'] == tenant_id or
n['shared'] or
(include_external and n['router:external']))]
self.assertEqual(set(n.id for n in expected),
set(n.id for n in ret_val))
def test_network_list_for_tenant(self):
self._test_network_list_for_tenant(include_external=False)
def test_network_list_for_tenant_with_external(self):
self._test_network_list_for_tenant(include_external=True)
def test_network_get(self):
network = {'network': self.api_networks.first()}
subnet = {'subnet': self.api_subnets.first()}
network_id = self.api_networks.first()['id']
subnet_id = self.api_networks.first()['subnets'][0]
neutronclient = self.stub_neutronclient()
neutronclient.show_network(network_id).AndReturn(network)
neutronclient.show_subnet(subnet_id).AndReturn(subnet)
self.mox.ReplayAll()
ret_val = api.neutron.network_get(self.request, network_id)
self.assertIsInstance(ret_val, api.neutron.Network)
def _test_network_create(self, with_n1kv=False):
network = {'network': self.api_networks.first()}
form_data = {'network': {'name': 'net1',
'tenant_id': self.request.user.project_id}}
neutronclient = self.stub_neutronclient()
if with_n1kv:
n1kv_profile = 'n1kv:profile'
test_net_profile = 'test_net_profile'
network['network'][n1kv_profile] = test_net_profile
form_data['network'][n1kv_profile] = test_net_profile
neutronclient.create_network(body=form_data).AndReturn(network)
self.mox.ReplayAll()
ret_val = api.neutron.network_create(
self.request,
name='net1',
net_profile_id=test_net_profile)
self.assertEqual(test_net_profile, ret_val[n1kv_profile])
# network dictionary
self.assertNotIn('net_profile_id', [k for k, _ in ret_val.items()])
else:
neutronclient.create_network(body=form_data).AndReturn(network)
self.mox.ReplayAll()
ret_val = api.neutron.network_create(self.request, name='net1')
self.assertIsInstance(ret_val, api.neutron.Network)
def test_network_create(self):
self._test_network_create()
def test_network_create_with_net_profile(self):
self._test_network_create(with_n1kv=True)
def test_network_update(self):
network = {'network': self.api_networks.first()}
network_id = self.api_networks.first()['id']
neutronclient = self.stub_neutronclient()
form_data = {'network': {'name': 'net1'}}
neutronclient.update_network(network_id, body=form_data)\
.AndReturn(network)
self.mox.ReplayAll()
ret_val = api.neutron.network_update(self.request, network_id,
name='net1')
self.assertIsInstance(ret_val, api.neutron.Network)
def test_network_delete(self):
network_id = self.api_networks.first()['id']
neutronclient = self.stub_neutronclient()
neutronclient.delete_network(network_id)
self.mox.ReplayAll()
api.neutron.network_delete(self.request, network_id)
def test_get_network_ip_availability(self):
network = {'network': self.api_networks.first()}
mock_ip_availability = self.ip_availability.get()
neutronclient = self.stub_neutronclient()
neutronclient.show_network_ip_availability(network).\
AndReturn(mock_ip_availability)
self.mox.ReplayAll()
ret_val = api.neutron.show_network_ip_availability(self.request,
network)
self.assertIsInstance(ret_val, dict)
def test_subnet_network_ip_availability(self):
network = {'network': self.api_networks.first()}
mock_ip_availability = self.ip_availability.get()
neutronclient = self.stub_neutronclient()
neutronclient.show_network_ip_availability(network).\
AndReturn(mock_ip_availability)
self.mox.ReplayAll()
ip_availability = api.neutron. \
show_network_ip_availability(self.request, network)
availabilities = ip_availability.get("network_ip_availability",
{})
ret_val = availabilities.get("subnet_ip_availability", [])
self.assertIsInstance(ret_val, list)
def test_subnet_list(self):
subnets = {'subnets': self.api_subnets.list()}
neutronclient = self.stub_neutronclient()
neutronclient.list_subnets().AndReturn(subnets)
self.mox.ReplayAll()
ret_val = api.neutron.subnet_list(self.request)
for n in ret_val:
self.assertIsInstance(n, api.neutron.Subnet)
def test_subnet_get(self):
subnet = {'subnet': self.api_subnets.first()}
subnet_id = self.api_subnets.first()['id']
neutronclient = self.stub_neutronclient()
neutronclient.show_subnet(subnet_id).AndReturn(subnet)
self.mox.ReplayAll()
ret_val = api.neutron.subnet_get(self.request, subnet_id)
self.assertIsInstance(ret_val, api.neutron.Subnet)
def test_subnet_create(self):
subnet_data = self.api_subnets.first()
params = {'network_id': subnet_data['network_id'],
'tenant_id': subnet_data['tenant_id'],
'name': subnet_data['name'],
'cidr': subnet_data['cidr'],
'ip_version': subnet_data['ip_version'],
'gateway_ip': subnet_data['gateway_ip']}
neutronclient = self.stub_neutronclient()
neutronclient.create_subnet(body={'subnet': params})\
.AndReturn({'subnet': subnet_data})
self.mox.ReplayAll()
ret_val = api.neutron.subnet_create(self.request, **params)
self.assertIsInstance(ret_val, api.neutron.Subnet)
def test_subnet_update(self):
subnet_data = self.api_subnets.first()
subnet_id = subnet_data['id']
params = {'name': subnet_data['name'],
'gateway_ip': subnet_data['gateway_ip']}
neutronclient = self.stub_neutronclient()
neutronclient.update_subnet(subnet_id, body={'subnet': params})\
.AndReturn({'subnet': subnet_data})
self.mox.ReplayAll()
ret_val = api.neutron.subnet_update(self.request, subnet_id, **params)
self.assertIsInstance(ret_val, api.neutron.Subnet)
def test_subnet_delete(self):
subnet_id = self.api_subnets.first()['id']
neutronclient = self.stub_neutronclient()
neutronclient.delete_subnet(subnet_id)
self.mox.ReplayAll()
api.neutron.subnet_delete(self.request, subnet_id)
def test_subnetpool_list(self):
subnetpools = {'subnetpools': self.api_subnetpools.list()}
neutronclient = self.stub_neutronclient()
neutronclient.list_subnetpools().AndReturn(subnetpools)
self.mox.ReplayAll()
ret_val = api.neutron.subnetpool_list(self.request)
for n in ret_val:
self.assertIsInstance(n, api.neutron.SubnetPool)
def test_subnetpool_get(self):
subnetpool = {'subnetpool': self.api_subnetpools.first()}
subnetpool_id = self.api_subnetpools.first()['id']
neutronclient = self.stub_neutronclient()
neutronclient.show_subnetpool(subnetpool_id).AndReturn(subnetpool)
self.mox.ReplayAll()
ret_val = api.neutron.subnetpool_get(self.request, subnetpool_id)
self.assertIsInstance(ret_val, api.neutron.SubnetPool)
def test_subnetpool_create(self):
subnetpool_data = self.api_subnetpools.first()
params = {'name': subnetpool_data['name'],
'prefixes': subnetpool_data['prefixes'],
'tenant_id': subnetpool_data['tenant_id']}
neutronclient = self.stub_neutronclient()
neutronclient.create_subnetpool(body={'subnetpool': params})\
.AndReturn({'subnetpool': subnetpool_data})
self.mox.ReplayAll()
ret_val = api.neutron.subnetpool_create(self.request, **params)
self.assertIsInstance(ret_val, api.neutron.SubnetPool)
def test_subnetpool_update(self):
subnetpool_data = self.api_subnetpools.first()
subnetpool_id = subnetpool_data['id']
params = {'name': subnetpool_data['name'],
'prefixes': subnetpool_data['prefixes']}
neutronclient = self.stub_neutronclient()
neutronclient.update_subnetpool(subnetpool_id, body={'subnetpool': params})\
.AndReturn({'subnetpool': subnetpool_data})
self.mox.ReplayAll()
ret_val = api.neutron.subnetpool_update(self.request, subnetpool_id,
**params)
self.assertIsInstance(ret_val, api.neutron.SubnetPool)
def test_subnetpool_delete(self):
subnetpool_id = self.api_subnetpools.first()['id']
neutronclient = self.stub_neutronclient()
neutronclient.delete_subnetpool(subnetpool_id)
self.mox.ReplayAll()
api.neutron.subnetpool_delete(self.request, subnetpool_id)
def test_port_list(self):
ports = {'ports': self.api_ports.list()}
neutronclient = self.stub_neutronclient()
neutronclient.list_ports().AndReturn(ports)
self.mox.ReplayAll()
ret_val = api.neutron.port_list(self.request)
for p in ret_val:
self.assertIsInstance(p, api.neutron.Port)
def test_port_get(self):
port = {'port': self.api_ports.first()}
port_id = self.api_ports.first()['id']
neutronclient = self.stub_neutronclient()
neutronclient.show_port(port_id).AndReturn(port)
self.mox.ReplayAll()
ret_val = api.neutron.port_get(self.request, port_id)
self.assertIsInstance(ret_val, api.neutron.Port)
def _test_port_create(self, with_n1kv=False):
port = {'port': self.api_ports.first()}
params = {'network_id': port['port']['network_id'],
'tenant_id': port['port']['tenant_id'],
'name': port['port']['name'],
'device_id': port['port']['device_id']}
neutronclient = self.stub_neutronclient()
if with_n1kv:
n1kv_profile = 'n1kv:profile'
test_policy_profile = 'test_policy_profile'
port['port'][n1kv_profile] = test_policy_profile
body = {k: v for (k, v) in params.items()}
body[n1kv_profile] = port['port'][n1kv_profile]
neutronclient.create_port(body={'port': body}).AndReturn(port)
self.mox.ReplayAll()
ret_val = api.neutron.port_create(
self.request,
policy_profile_id=test_policy_profile,
**params)
# assert that when 'policy_profile_id' is passed as a param to
# port_create function, 'n1kv:profile' is appended as a key to the
# returned port dictionary with value TEST_POLICY_PROFILE
self.assertEqual(test_policy_profile, ret_val[n1kv_profile])
# also assert that 'policy_profile_id' isn't there in the returned
self.assertNotIn('policy_profile_id',
[k for k, _ in ret_val.items()])
else:
neutronclient.create_port(body={'port': params}).AndReturn(port)
self.mox.ReplayAll()
ret_val = api.neutron.port_create(self.request, **params)
self.assertIsInstance(ret_val, api.neutron.Port)
self.assertEqual(api.neutron.Port(port['port']).id, ret_val.id)
def test_port_create(self):
self._test_port_create()
def test_port_create_with_policy_profile(self):
self._test_port_create(with_n1kv=True)
def test_port_update(self):
port_data = self.api_ports.first()
port_id = port_data['id']
params = {'name': port_data['name'],
'device_id': port_data['device_id']}
neutronclient = self.stub_neutronclient()
neutronclient.update_port(port_id, body={'port': params})\
.AndReturn({'port': port_data})
self.mox.ReplayAll()
ret_val = api.neutron.port_update(self.request, port_id, **params)
self.assertIsInstance(ret_val, api.neutron.Port)
self.assertEqual(api.neutron.Port(port_data).id, ret_val.id)
def test_port_delete(self):
port_id = self.api_ports.first()['id']
neutronclient = self.stub_neutronclient()
neutronclient.delete_port(port_id)
self.mox.ReplayAll()
api.neutron.port_delete(self.request, port_id)
def test_router_list(self):
routers = {'routers': self.api_routers.list()}
neutronclient = self.stub_neutronclient()
neutronclient.list_routers().AndReturn(routers)
self.mox.ReplayAll()
ret_val = api.neutron.router_list(self.request)
for n in ret_val:
self.assertIsInstance(n, api.neutron.Router)
def test_router_get(self):
router = {'router': self.api_routers.first()}
router_id = self.api_routers.first()['id']
neutronclient = self.stub_neutronclient()
neutronclient.show_router(router_id).AndReturn(router)
self.mox.ReplayAll()
ret_val = api.neutron.router_get(self.request, router_id)
self.assertIsInstance(ret_val, api.neutron.Router)
def test_router_create(self):
router = {'router': self.api_routers.first()}
neutronclient = self.stub_neutronclient()
form_data = {'router': {'name': 'router1',
'tenant_id': self.request.user.project_id}}
neutronclient.create_router(body=form_data).AndReturn(router)
self.mox.ReplayAll()
ret_val = api.neutron.router_create(self.request, name='router1')
self.assertIsInstance(ret_val, api.neutron.Router)
def test_router_delete(self):
router_id = self.api_routers.first()['id']
neutronclient = self.stub_neutronclient()
neutronclient.delete_router(router_id)
self.mox.ReplayAll()
api.neutron.router_delete(self.request, router_id)
def test_router_add_interface(self):
subnet_id = self.api_subnets.first()['id']
router_id = self.api_routers.first()['id']
neutronclient = self.stub_neutronclient()
form_data = {'subnet_id': subnet_id}
neutronclient.add_interface_router(
router_id, form_data).AndReturn(None)
self.mox.ReplayAll()
api.neutron.router_add_interface(
self.request, router_id, subnet_id=subnet_id)
def test_router_remove_interface(self):
router_id = self.api_routers.first()['id']
fake_port = self.api_ports.first()['id']
neutronclient = self.stub_neutronclient()
neutronclient.remove_interface_router(
router_id, {'port_id': fake_port})
self.mox.ReplayAll()
api.neutron.router_remove_interface(
self.request, router_id, port_id=fake_port)
@test.create_stubs({api.neutron: ('is_extension_supported',)})
def test_is_extension_supported(self):
api.neutron.is_extension_supported(self.request, "quotas")\
.AndReturn(True)
api.neutron.is_extension_supported(self.request, "doesntexist") \
.AndReturn(False)
self.mox.ReplayAll()
self.assertTrue(
api.neutron.is_extension_supported(self.request, 'quotas'))
self.assertFalse(
api.neutron.is_extension_supported(self.request, 'doesntexist'))
def test_router_static_route_list(self):
router = {'router': self.api_routers_with_routes.first()}
router_id = self.api_routers_with_routes.first()['id']
neutronclient = self.stub_neutronclient()
neutronclient.show_router(router_id).AndReturn(router)
self.mox.ReplayAll()
ret_val = api.neutron.router_static_route_list(self.request, router_id)
self.assertIsInstance(ret_val[0], api.neutron.RouterStaticRoute)
def test_router_static_route_remove(self):
router = {'router': self.api_routers_with_routes.first()}
router_id = self.api_routers_with_routes.first()['id']
post_router = copy.deepcopy(router)
route = api.neutron.RouterStaticRoute(post_router['router']
['routes'].pop())
neutronclient = self.stub_neutronclient()
neutronclient.show_router(router_id).AndReturn(router)
body = {'router': {'routes': post_router['router']['routes']}}
neutronclient.update_router(router_id, body=body)\
.AndReturn(post_router)
self.mox.ReplayAll()
api.neutron.router_static_route_remove(self.request,
router_id, route.id)
def test_router_static_route_add(self):
router = {'router': self.api_routers_with_routes.first()}
router_id = self.api_routers_with_routes.first()['id']
post_router = copy.deepcopy(router)
route = {'nexthop': '10.0.0.5', 'destination': '40.0.1.0/24'}
post_router['router']['routes'].insert(0, route)
body = {'router': {'routes': post_router['router']['routes']}}
neutronclient = self.stub_neutronclient()
neutronclient.show_router(router_id).AndReturn(router)
neutronclient.update_router(router_id, body=body)\
.AndReturn(post_router)
self.mox.ReplayAll()
api.neutron.router_static_route_add(self.request, router_id, route)
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_distributed_router':
True},
POLICY_CHECK_FUNCTION=None)
@test.create_stubs({api.neutron: ('is_extension_supported',)})
def _test_get_dvr_permission_dvr_supported(self, dvr_enabled):
api.neutron.is_extension_supported(self.request, 'dvr').\
AndReturn(dvr_enabled)
self.mox.ReplayAll()
self.assertEqual(dvr_enabled,
api.neutron.get_feature_permission(self.request,
'dvr', 'get'))
def test_get_dvr_permission_dvr_supported(self):
self._test_get_dvr_permission_dvr_supported(dvr_enabled=True)
def test_get_dvr_permission_dvr_not_supported(self):
self._test_get_dvr_permission_dvr_supported(dvr_enabled=False)
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_distributed_router':
True},
POLICY_CHECK_FUNCTION=policy.check)
@test.create_stubs({api.neutron: ('is_extension_supported',)})
def _test_get_dvr_permission_with_policy_check(self, policy_check_allowed,
operation):
self.mox.StubOutWithMock(policy, 'check')
if operation == "create":
role = (("network", "create_router:distributed"),)
elif operation == "get":
role = (("network", "get_router:distributed"),)
policy.check(role, self.request).AndReturn(policy_check_allowed)
if policy_check_allowed:
api.neutron.is_extension_supported(self.request, 'dvr').\
AndReturn(policy_check_allowed)
self.mox.ReplayAll()
self.assertEqual(policy_check_allowed,
api.neutron.get_feature_permission(self.request,
'dvr', operation))
def test_get_dvr_permission_with_policy_check_allowed(self):
self._test_get_dvr_permission_with_policy_check(True, "get")
def test_get_dvr_permission_with_policy_check_disallowed(self):
self._test_get_dvr_permission_with_policy_check(False, "get")
def test_get_dvr_permission_create_with_policy_check_allowed(self):
self._test_get_dvr_permission_with_policy_check(True, "create")
def test_get_dvr_permission_create_with_policy_check_disallowed(self):
self._test_get_dvr_permission_with_policy_check(False, "create")
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_distributed_router':
False})
def test_get_dvr_permission_dvr_disabled_by_config(self):
self.assertFalse(api.neutron.get_feature_permission(self.request,
'dvr', 'get'))
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_distributed_router':
True},
POLICY_CHECK_FUNCTION=policy.check)
def test_get_dvr_permission_dvr_unsupported_operation(self):
self.assertRaises(ValueError,
api.neutron.get_feature_permission,
self.request, 'dvr', 'unSupported')
@override_settings(OPENSTACK_NEUTRON_NETWORK={})
def test_get_dvr_permission_dvr_default_config(self):
self.assertFalse(api.neutron.get_feature_permission(self.request,
'dvr', 'get'))
@override_settings(OPENSTACK_NEUTRON_NETWORK={})
def test_get_dvr_permission_router_ha_default_config(self):
self.assertFalse(api.neutron.get_feature_permission(self.request,
'l3-ha', 'get'))
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_ha_router': True},
POLICY_CHECK_FUNCTION=policy.check)
@test.create_stubs({api.neutron: ('is_extension_supported', )})
def _test_get_router_ha_permission_with_policy_check(self, ha_enabled):
self.mox.StubOutWithMock(policy, 'check')
role = (("network", "create_router:ha"),)
policy.check(role, self.request).AndReturn(True)
api.neutron.is_extension_supported(self.request, 'l3-ha')\
.AndReturn(ha_enabled)
self.mox.ReplayAll()
self.assertEqual(ha_enabled,
api.neutron.get_feature_permission(self.request,
'l3-ha', 'create'))
def test_get_router_ha_permission_with_l3_ha_extension(self):
self._test_get_router_ha_permission_with_policy_check(True)
def test_get_router_ha_permission_without_l3_ha_extension(self):
self._test_get_router_ha_permission_with_policy_check(False)
def test_list_resources_with_long_filters(self):
ports = [{'id': str(uuid.uuid4()),
'name': 'port%s' % i,
'admin_state_up': True}
for i in range(10)]
port_ids = [port['id'] for port in ports]
neutronclient = self.stub_neutronclient()
uri_len_exc = neutron_exc.RequestURITooLong(excess=220)
neutronclient.list_ports(id=port_ids).AndRaise(uri_len_exc)
for i in range(0, 10, 4):
neutronclient.list_ports(id=port_ids[i:i + 4]) \
.AndReturn({'ports': ports[i:i + 4]})
self.mox.ReplayAll()
ret_val = api.neutron.list_resources_with_long_filters(
api.neutron.port_list, 'id', port_ids,
request=self.request)
self.assertEqual(10, len(ret_val))
self.assertEqual(port_ids, [p.id for p in ret_val])
| true | true |
f727a6fbe910664403fa49773ddc8bc6ca0f017c | 3,896 | py | Python | cfgs_fatdet/voc/fatnat/ttfnet_fatnet_ttfheadfull_96_10x_aug_no_pretrain.py | Leotju/ttfnet | 94eea28ea22215310140caee492d5de2b01b3d04 | [
"Apache-2.0"
] | null | null | null | cfgs_fatdet/voc/fatnat/ttfnet_fatnet_ttfheadfull_96_10x_aug_no_pretrain.py | Leotju/ttfnet | 94eea28ea22215310140caee492d5de2b01b3d04 | [
"Apache-2.0"
] | null | null | null | cfgs_fatdet/voc/fatnat/ttfnet_fatnet_ttfheadfull_96_10x_aug_no_pretrain.py | Leotju/ttfnet | 94eea28ea22215310140caee492d5de2b01b3d04 | [
"Apache-2.0"
] | null | null | null | # model settings
model = dict(
type='TTFNet',
# pretrained='modelzoo://resnet18',
pretrained=None,
backbone=dict(
type='FatNetSimple',
norm_cfg = dict(type='BN', requires_grad=True),
),
neck=None,
bbox_head=dict(
type='TTFHeadFull',
inplanes=16,
planes=64,
head_conv=64,
down_ratio=1,
wh_conv=64,
hm_head_conv_num=2,
wh_head_conv_num=1,
num_classes=21,
wh_agnostic=True,
wh_gaussian=True,
norm_cfg=dict(type='BN'),
alpha=0.54,
hm_weight=1.,
wh_weight=5.))
cudnn_benchmark = True
# training and testing settings
train_cfg = dict(
vis_every_n_iters=100,
debug=False)
test_cfg = dict(
score_thr=0.01,
max_per_img=100)
# dataset settings
dataset_type = 'VOCDataset'
data_root = '../data/VOCdevkit/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=16,
workers_per_gpu=2,
train=dict(
type='RepeatDataset', # to avoid reloading datasets frequently
times=30,
dataset=dict(
type=dataset_type,
ann_file=[
data_root + 'VOC2007/ImageSets/Main/trainval.txt',
data_root + 'VOC2012/ImageSets/Main/trainval.txt'
],
img_prefix=[data_root + 'VOC2007/', data_root + 'VOC2012/'],
img_scale=(96, 96),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0.5,
with_mask=False,
with_crowd=False,
with_label=True,
extra_aug=dict(
photo_metric_distortion=dict(
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
expand=dict(
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
random_crop=dict(
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3)),
resize_keep_ratio=False)),
val=dict(
type=dataset_type,
ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt',
img_prefix=data_root + 'VOC2007/',
img_scale=(512, 512),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_crowd=False,
with_label=True,
resize_keep_ratio=False),
test=dict(
type=dataset_type,
ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt',
img_prefix=data_root + 'VOC2007/',
img_scale=(96, 96),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_crowd=False,
with_label=False,
test_mode=True,
resize_keep_ratio=False))
# optimizer
optimizer = dict(type='SGD', lr=0.016, momentum=0.9, weight_decay=0.0004,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 5,
step=[3])
checkpoint_config = dict(interval=1)
bbox_head_hist_config = dict(
model_type=['ConvModule', 'DeformConvPack'],
sub_modules=['bbox_head'],
save_every_n_steps=500)
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
])
# yapf:enable
# runtime settings
total_epochs = 4
device_ids = range(8)
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = '../work_dirs/pascal/baseline/ttfnet_fatnet_ttfhead_full_96_10x_aug_no_pretrain'
load_from = None
resume_from = None
workflow = [('train', 1)]
# 26.6
| 29.969231 | 91 | 0.592402 |
model = dict(
type='TTFNet',
pretrained=None,
backbone=dict(
type='FatNetSimple',
norm_cfg = dict(type='BN', requires_grad=True),
),
neck=None,
bbox_head=dict(
type='TTFHeadFull',
inplanes=16,
planes=64,
head_conv=64,
down_ratio=1,
wh_conv=64,
hm_head_conv_num=2,
wh_head_conv_num=1,
num_classes=21,
wh_agnostic=True,
wh_gaussian=True,
norm_cfg=dict(type='BN'),
alpha=0.54,
hm_weight=1.,
wh_weight=5.))
cudnn_benchmark = True
train_cfg = dict(
vis_every_n_iters=100,
debug=False)
test_cfg = dict(
score_thr=0.01,
max_per_img=100)
dataset_type = 'VOCDataset'
data_root = '../data/VOCdevkit/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
data = dict(
imgs_per_gpu=16,
workers_per_gpu=2,
train=dict(
type='RepeatDataset',
times=30,
dataset=dict(
type=dataset_type,
ann_file=[
data_root + 'VOC2007/ImageSets/Main/trainval.txt',
data_root + 'VOC2012/ImageSets/Main/trainval.txt'
],
img_prefix=[data_root + 'VOC2007/', data_root + 'VOC2012/'],
img_scale=(96, 96),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0.5,
with_mask=False,
with_crowd=False,
with_label=True,
extra_aug=dict(
photo_metric_distortion=dict(
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
expand=dict(
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
random_crop=dict(
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3)),
resize_keep_ratio=False)),
val=dict(
type=dataset_type,
ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt',
img_prefix=data_root + 'VOC2007/',
img_scale=(512, 512),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_crowd=False,
with_label=True,
resize_keep_ratio=False),
test=dict(
type=dataset_type,
ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt',
img_prefix=data_root + 'VOC2007/',
img_scale=(96, 96),
img_norm_cfg=img_norm_cfg,
size_divisor=32,
flip_ratio=0,
with_mask=False,
with_crowd=False,
with_label=False,
test_mode=True,
resize_keep_ratio=False))
optimizer = dict(type='SGD', lr=0.016, momentum=0.9, weight_decay=0.0004,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 5,
step=[3])
checkpoint_config = dict(interval=1)
bbox_head_hist_config = dict(
model_type=['ConvModule', 'DeformConvPack'],
sub_modules=['bbox_head'],
save_every_n_steps=500)
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
])
total_epochs = 4
device_ids = range(8)
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = '../work_dirs/pascal/baseline/ttfnet_fatnet_ttfhead_full_96_10x_aug_no_pretrain'
load_from = None
resume_from = None
workflow = [('train', 1)]
| true | true |
f727a95057ac0c7fae2ad50f1000ff55d2fd438e | 35,520 | py | Python | tests/test_project.py | Anton-Latukha/wakatime | 3035a28a3f996a11d928802dcb05844bb0a52655 | [
"BSD-3-Clause"
] | 1,198 | 2015-01-02T12:08:49.000Z | 2021-10-07T02:46:59.000Z | tests/test_project.py | Anton-Latukha/wakatime | 3035a28a3f996a11d928802dcb05844bb0a52655 | [
"BSD-3-Clause"
] | 249 | 2015-01-22T13:31:12.000Z | 2021-05-01T08:01:22.000Z | tests/test_project.py | Anton-Latukha/wakatime | 3035a28a3f996a11d928802dcb05844bb0a52655 | [
"BSD-3-Clause"
] | 118 | 2015-01-16T19:13:15.000Z | 2021-07-21T15:09:15.000Z | # -*- coding: utf-8 -*-
from wakatime.main import execute
from wakatime.packages import requests
from wakatime.packages.requests.models import Response
import logging
import os
import shutil
import tempfile
import time
from testfixtures import log_capture
from wakatime.compat import u, open
from wakatime.constants import API_ERROR, SUCCESS
from wakatime.exceptions import NotYetImplemented
from wakatime.project import generate_project_name
from wakatime.projects.base import BaseProject
from wakatime.projects.git import Git
from .utils import ANY, DynamicIterable, TestCase, TemporaryDirectory, CustomResponse, mock, json
class ProjectTestCase(TestCase):
patch_these = [
'wakatime.packages.requests.adapters.HTTPAdapter.send',
'wakatime.offlinequeue.Queue.push',
['wakatime.offlinequeue.Queue.pop', None],
['wakatime.offlinequeue.Queue.connect', None],
'wakatime.session_cache.SessionCache.save',
'wakatime.session_cache.SessionCache.delete',
['wakatime.session_cache.SessionCache.get', requests.session],
['wakatime.session_cache.SessionCache.connect', None],
]
def shared(self, expected_project='', expected_branch=ANY, entity='', config='good_config.cfg', extra_args=[]):
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
config = os.path.join('tests/samples/configs', config)
if not os.path.exists(entity):
entity = os.path.realpath(os.path.join('tests/samples', entity))
now = u(int(time.time()))
args = ['--file', entity, '--config', config, '--time', now] + extra_args
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
heartbeat = {
'language': ANY,
'lines': ANY,
'entity': os.path.realpath(entity),
'project': expected_project,
'branch': expected_branch,
'dependencies': ANY,
'time': float(now),
'type': 'file',
'is_write': False,
'user_agent': ANY,
}
self.assertHeartbeatSent(heartbeat)
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsSynced()
self.assertSessionCacheSaved()
def test_project_base(self):
path = 'tests/samples/codefiles/see.h'
project = BaseProject(path)
with self.assertRaises(NotYetImplemented):
project.process()
with self.assertRaises(NotYetImplemented):
project.name()
with self.assertRaises(NotYetImplemented):
project.branch()
def test_project_argument_overrides_detected_project(self):
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now = u(int(time.time()))
entity = 'tests/samples/projects/git/emptyfile.txt'
config = 'tests/samples/configs/good_config.cfg'
args = ['--project', 'forced-project', '--file', entity, '--config', config, '--time', now]
execute(args)
self.assertEquals('forced-project', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
def test_alternate_project_argument_does_not_override_detected_project(self):
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now = u(int(time.time()))
entity = 'tests/samples/projects/git/emptyfile.txt'
config = 'tests/samples/configs/good_config.cfg'
project = os.path.basename(os.path.abspath('.'))
args = ['--alternate-project', 'alt-project', '--file', entity, '--config', config, '--time', now]
execute(args)
self.assertEquals(project, self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
def test_alternate_project_argument_does_not_override_project_argument(self):
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now = u(int(time.time()))
entity = 'tests/samples/projects/git/emptyfile.txt'
config = 'tests/samples/configs/good_config.cfg'
args = ['--project', 'forced-project', '--alternate-project', 'alt-project', '--file', entity, '--config', config, '--time', now]
execute(args)
self.assertEquals('forced-project', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
def test_alternate_project_argument_used_when_project_not_detected(self):
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
tempdir = tempfile.mkdtemp()
entity = 'tests/samples/projects/git/emptyfile.txt'
shutil.copy(entity, os.path.join(tempdir, 'emptyfile.txt'))
now = u(int(time.time()))
entity = os.path.join(tempdir, 'emptyfile.txt')
config = 'tests/samples/configs/good_config.cfg'
args = ['--file', entity, '--config', config, '--time', now]
execute(args)
args = ['--file', entity, '--config', config, '--time', now, '--alternate-project', 'alt-project']
execute(args)
calls = self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].call_args_list
body = calls[0][0][0].body
data = json.loads(body)[0]
self.assertEquals(None, data.get('project'))
body = calls[1][0][0].body
data = json.loads(body)[0]
self.assertEquals('alt-project', data['project'])
def test_wakatime_project_file(self):
self.shared(
expected_project='waka-project-file',
entity='projects/wakatime_project_file/emptyfile.txt',
)
def test_wakatime_project_file_used_even_when_project_names_hidden(self):
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/wakatime_project_file', os.path.join(tempdir, 'wakatime_project_file'))
self.shared(
expected_project='waka-project-file',
entity=os.path.join(tempdir, 'wakatime_project_file', 'emptyfile.txt'),
extra_args=['--hide-project-names'],
)
def test_git_project_detected(self):
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
self.shared(
expected_project='git',
expected_branch='master',
entity=os.path.join(tempdir, 'git', 'emptyfile.txt'),
)
def test_git_project_not_used_when_project_names_hidden(self):
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
now = u(int(time.time()))
entity = os.path.join(tempdir, 'git', 'emptyfile.txt')
config = 'tests/samples/configs/good_config.cfg'
args = ['--hide-project-names', '--file', entity, '--config', config, '--time', now]
execute(args)
self.assertHeartbeatSavedOffline()
self.assertNotEquals('git', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
self.assertEquals(None, self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['branch'])
proj = open(os.path.join(tempdir, 'git', '.wakatime-project')).read()
self.assertEquals(proj, self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
execute(args)
self.assertEquals(proj, self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
def test_git_branch_not_used_when_branch_names_hidden(self):
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
self.shared(
expected_project='git',
expected_branch=None,
entity=os.path.join(tempdir, 'git', 'emptyfile.txt'),
extra_args=['--hide-branch-names'],
)
def test_branch_used_when_project_names_hidden_but_branch_names_visible(self):
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
now = u(int(time.time()))
entity = os.path.join(tempdir, 'git', 'emptyfile.txt')
config = 'tests/samples/configs/show_branch_names.cfg'
args = ['--hide-project-names', '--file', entity, '--config', config, '--time', now]
execute(args)
self.assertHeartbeatSavedOffline()
self.assertNotEquals('git', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
self.assertNotEquals(None, self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['branch'])
proj = open(os.path.join(tempdir, 'git', '.wakatime-project')).read()
self.assertEquals(proj, self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
execute(args)
self.assertEquals(proj, self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
@log_capture()
def test_ioerror_when_reading_git_branch(self, logs):
logging.disable(logging.NOTSET)
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
entity = os.path.join(tempdir, 'git', 'emptyfile.txt')
with mock.patch('wakatime.projects.git.open') as mock_open:
mock_open.side_effect = IOError('')
self.shared(
expected_project='git',
expected_branch='master',
entity=entity,
)
self.assertNothingPrinted()
actual = self.getLogOutput(logs)
expected = 'OSError' if self.isPy33OrNewer else 'IOError'
self.assertIn(expected, actual)
def test_git_detached_head_not_used_as_branch(self):
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git-with-detached-head', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
entity = os.path.join(tempdir, 'git', 'emptyfile.txt')
self.shared(
expected_project='git',
expected_branch=None,
entity=entity,
)
def test_svn_project_detected(self):
with mock.patch('wakatime.projects.git.Git.process') as mock_git:
mock_git.return_value = False
with mock.patch('wakatime.projects.subversion.Subversion._has_xcode_tools') as mock_has_xcode:
mock_has_xcode.return_value = True
with mock.patch('wakatime.projects.subversion.Popen.communicate') as mock_popen:
stdout = open('tests/samples/output/svn').read()
stderr = ''
mock_popen.return_value = DynamicIterable((stdout, stderr), max_calls=1)
self.shared(
expected_project='svn',
entity='projects/svn/afolder/emptyfile.txt',
)
@log_capture()
def test_svn_exception_handled(self, logs):
logging.disable(logging.NOTSET)
with mock.patch('wakatime.projects.git.Git.process') as mock_git:
mock_git.return_value = False
with mock.patch('wakatime.projects.subversion.Subversion._has_xcode_tools') as mock_has_xcode:
mock_has_xcode.return_value = True
with mock.patch('wakatime.projects.subversion.Popen') as mock_popen:
mock_popen.side_effect = OSError('')
with mock.patch('wakatime.projects.subversion.Popen.communicate') as mock_communicate:
mock_communicate.side_effect = OSError('')
self.shared(
expected_project=None,
entity='projects/svn/afolder/emptyfile.txt',
)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
def test_svn_on_mac_without_xcode_tools_installed(self):
with mock.patch('wakatime.projects.git.Git.process') as mock_git:
mock_git.return_value = False
with mock.patch('wakatime.projects.subversion.platform.system') as mock_system:
mock_system.return_value = 'Darwin'
with mock.patch('wakatime.projects.subversion.Popen.communicate') as mock_popen:
stdout = open('tests/samples/output/svn').read()
stderr = ''
mock_popen.return_value = DynamicIterable((stdout, stderr), raise_on_calls=[OSError('')])
self.shared(
expected_project=None,
entity='projects/svn/afolder/emptyfile.txt',
)
def test_svn_on_mac_with_xcode_tools_installed(self):
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now = u(int(time.time()))
entity = 'tests/samples/projects/svn/afolder/emptyfile.txt'
config = 'tests/samples/configs/good_config.cfg'
args = ['--file', entity, '--config', config, '--time', now]
with mock.patch('wakatime.projects.git.Git.process') as mock_git:
mock_git.return_value = False
with mock.patch('wakatime.projects.subversion.platform.system') as mock_system:
mock_system.return_value = 'Darwin'
with mock.patch('wakatime.projects.subversion.Popen') as mock_popen:
stdout = open('tests/samples/output/svn').read()
stderr = ''
class Dynamic(object):
def __init__(self):
self.called = 0
def communicate(self):
self.called += 1
if self.called == 2:
return (stdout, stderr)
def wait(self):
if self.called == 1:
return 0
mock_popen.return_value = Dynamic()
execute(args)
self.assertEquals('svn', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
def test_mercurial_project_detected(self):
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
with mock.patch('wakatime.projects.git.Git.process') as mock_git:
mock_git.return_value = False
now = u(int(time.time()))
entity = 'tests/samples/projects/hg/emptyfile.txt'
config = 'tests/samples/configs/good_config.cfg'
args = ['--file', entity, '--config', config, '--time', now]
execute(args)
self.assertEquals('hg', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
self.assertEquals('test-hg-branch', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['branch'])
def test_mercurial_project_branch_with_slash_detected(self):
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
with mock.patch('wakatime.projects.git.Git.process') as mock_git:
mock_git.return_value = False
now = u(int(time.time()))
entity = 'tests/samples/projects/hg-branch-with-slash/emptyfile.txt'
config = 'tests/samples/configs/good_config.cfg'
args = ['--file', entity, '--config', config, '--time', now]
execute(args)
self.assertEquals('hg-branch-with-slash', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
self.assertEquals('branch/with/slash', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['branch'])
@log_capture()
def test_ioerror_when_reading_mercurial_branch(self, logs):
logging.disable(logging.NOTSET)
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
with mock.patch('wakatime.projects.git.Git.process') as mock_git:
mock_git.return_value = False
now = u(int(time.time()))
entity = 'tests/samples/projects/hg/emptyfile.txt'
config = 'tests/samples/configs/good_config.cfg'
args = ['--file', entity, '--config', config, '--time', now]
with mock.patch('wakatime.projects.mercurial.open') as mock_open:
mock_open.side_effect = IOError('')
execute(args)
self.assertEquals('hg', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
self.assertEquals('default', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['branch'])
self.assertNothingPrinted()
actual = self.getLogOutput(logs)
expected = 'OSError' if self.isPy33OrNewer else 'IOError'
self.assertIn(expected, actual)
def test_git_submodule_detected(self):
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git-with-submodule', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
shutil.move(os.path.join(tempdir, 'git', 'asubmodule', 'dot_git'), os.path.join(tempdir, 'git', 'asubmodule', '.git'))
entity = os.path.join(tempdir, 'git', 'asubmodule', 'emptyfile.txt')
self.shared(
expected_project='asubmodule',
expected_branch='asubbranch',
entity=entity,
)
def test_git_submodule_without_option(self):
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git-with-submodule', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
shutil.move(os.path.join(tempdir, 'git', 'asubmodule', 'dot_git'), os.path.join(tempdir, 'git', 'asubmodule', '.git'))
entity = os.path.join(tempdir, 'git', 'asubmodule', 'emptyfile.txt')
self.shared(
expected_project='asubmodule',
expected_branch='asubbranch',
entity=entity,
config='git-submodules-without-option.cfg',
)
def test_git_submodule_detected_and_enabled_globally(self):
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git-with-submodule', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
shutil.move(os.path.join(tempdir, 'git', 'asubmodule', 'dot_git'), os.path.join(tempdir, 'git', 'asubmodule', '.git'))
entity = os.path.join(tempdir, 'git', 'asubmodule', 'emptyfile.txt')
self.shared(
expected_project='asubmodule',
expected_branch='asubbranch',
entity=entity,
config='git-submodules-enabled.cfg',
)
def test_git_submodule_detected_but_disabled_globally(self):
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git-with-submodule', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
shutil.move(os.path.join(tempdir, 'git', 'asubmodule', 'dot_git'), os.path.join(tempdir, 'git', 'asubmodule', '.git'))
entity = os.path.join(tempdir, 'git', 'asubmodule', 'emptyfile.txt')
self.shared(
expected_project='git',
expected_branch='master',
entity=entity,
config='git-submodules-disabled.cfg',
)
def test_git_submodule_detected_and_disabled_using_regex(self):
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git-with-submodule', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
shutil.move(os.path.join(tempdir, 'git', 'asubmodule', 'dot_git'), os.path.join(tempdir, 'git', 'asubmodule', '.git'))
entity = os.path.join(tempdir, 'git', 'asubmodule', 'emptyfile.txt')
self.shared(
expected_project='git',
expected_branch='master',
entity=entity,
config='git-submodules-disabled-using-regex.cfg',
)
def test_git_submodule_detected_and_enabled_using_regex(self):
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git-with-submodule', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
shutil.move(os.path.join(tempdir, 'git', 'asubmodule', 'dot_git'), os.path.join(tempdir, 'git', 'asubmodule', '.git'))
entity = os.path.join(tempdir, 'git', 'asubmodule', 'emptyfile.txt')
self.shared(
expected_project='asubmodule',
expected_branch='asubbranch',
entity=entity,
config='git-submodules-enabled-using-regex.cfg',
)
@log_capture()
def test_git_submodule_detected_with_invalid_regex(self, logs):
logging.disable(logging.NOTSET)
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git-with-submodule', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
shutil.move(os.path.join(tempdir, 'git', 'asubmodule', 'dot_git'), os.path.join(tempdir, 'git', 'asubmodule', '.git'))
entity = os.path.join(tempdir, 'git', 'asubmodule', 'emptyfile.txt')
self.shared(
expected_project='git',
expected_branch='master',
entity=entity,
config='git-submodules-invalid-regex.cfg',
)
self.assertNothingPrinted()
actual = self.getLogOutput(logs)
expected = u('WakaTime WARNING Regex error (unbalanced parenthesis) for disable git submodules pattern: \\(invalid regex)')
if self.isPy35OrNewer:
expected = 'WakaTime WARNING Regex error (unbalanced parenthesis at position 15) for disable git submodules pattern: \\(invalid regex)'
self.assertEquals(expected, actual)
def test_git_worktree_detected(self):
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git-worktree', os.path.join(tempdir, 'git-wt'))
shutil.copytree('tests/samples/projects/git', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git-wt', 'dot_git'), os.path.join(tempdir, 'git-wt', '.git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
entity = os.path.join(tempdir, 'git-wt', 'emptyfile.txt')
self.shared(
expected_project='git',
expected_branch='worktree-detection-branch',
entity=entity,
)
def test_git_worktree_not_detected_when_commondir_missing(self):
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git-worktree', os.path.join(tempdir, 'git-wt'))
shutil.copytree('tests/samples/projects/git', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git-wt', 'dot_git'), os.path.join(tempdir, 'git-wt', '.git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
os.remove(os.path.join(tempdir, 'git', '.git', 'worktrees', 'git-worktree', 'commondir'))
entity = os.path.join(tempdir, 'git-wt', 'emptyfile.txt')
self.shared(
expected_project=None,
expected_branch='worktree-detection-branch',
entity=entity,
)
@log_capture()
def test_git_path_from_gitdir_link_file(self, logs):
logging.disable(logging.NOTSET)
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git-with-submodule', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
shutil.move(os.path.join(tempdir, 'git', 'asubmodule', 'dot_git'), os.path.join(tempdir, 'git', 'asubmodule', '.git'))
path = os.path.join(tempdir, 'git', 'asubmodule')
git = Git(None)
result = git._path_from_gitdir_link_file(path)
expected = os.path.realpath(os.path.join(tempdir, 'git', '.git', 'modules', 'asubmodule'))
self.assertEquals(expected, result)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
@log_capture()
def test_git_path_from_gitdir_link_file_handles_exceptions(self, logs):
logging.disable(logging.NOTSET)
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git-with-submodule', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
shutil.move(os.path.join(tempdir, 'git', 'asubmodule', 'dot_git'), os.path.join(tempdir, 'git', 'asubmodule', '.git'))
self.orig_open = open
self.count = 0
with mock.patch('wakatime.projects.git.open') as mock_open:
def side_effect_function(*args, **kwargs):
self.count += 1
if self.count <= 1:
raise IOError('')
return self.orig_open(*args, **kwargs)
mock_open.side_effect = side_effect_function
git = Git(None)
path = os.path.join(tempdir, 'git', 'asubmodule')
result = git._path_from_gitdir_link_file(path)
expected = os.path.realpath(os.path.join(tempdir, 'git', '.git', 'modules', 'asubmodule'))
self.assertEquals(expected, result)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
with mock.patch('wakatime.projects.git.open') as mock_open:
mock_open.side_effect = UnicodeDecodeError('utf8', ''.encode('utf8'), 0, 0, '')
git = Git(None)
path = os.path.join(tempdir, 'git', 'asubmodule')
result = git._path_from_gitdir_link_file(path)
self.assertIsNone(result)
self.assertNothingPrinted()
actual = self.getLogOutput(logs)
expected = 'UnicodeDecodeError'
self.assertIn(expected, actual)
with mock.patch('wakatime.projects.git.open') as mock_open:
mock_open.side_effect = IOError('')
git = Git(None)
path = os.path.join(tempdir, 'git', 'asubmodule')
result = git._path_from_gitdir_link_file(path)
self.assertIsNone(result)
self.assertNothingPrinted()
actual = self.getLogOutput(logs)
expected = 'OSError' if self.isPy33OrNewer else 'IOError'
self.assertIn(expected, actual)
@log_capture()
def test_git_path_from_gitdir_link_file_handles_invalid_link(self, logs):
logging.disable(logging.NOTSET)
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git-with-submodule', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'asubmodule', 'dot_git'), os.path.join(tempdir, 'git', 'asubmodule', '.git'))
path = os.path.join(tempdir, 'git', 'asubmodule')
git = Git(None)
result = git._path_from_gitdir_link_file(path)
expected = None
self.assertEquals(expected, result)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
def test_git_branch_with_slash(self):
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git-branch-with-slash', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
self.shared(
expected_project='git',
expected_branch='branch/with/slash',
entity=os.path.join(tempdir, 'git', 'emptyfile.txt'),
)
@log_capture()
def test_project_map(self, logs):
logging.disable(logging.NOTSET)
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now = u(int(time.time()))
entity = 'tests/samples/projects/project_map/emptyfile.txt'
config = 'tests/samples/configs/project_map.cfg'
args = ['--file', entity, '--config', config, '--time', now]
execute(args)
self.assertEquals('proj-map', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
self.assertNothingPrinted()
self.assertNothingLogged(logs)
@log_capture()
def test_project_map_group_usage(self, logs):
logging.disable(logging.NOTSET)
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now = u(int(time.time()))
entity = 'tests/samples/projects/project_map42/emptyfile.txt'
config = 'tests/samples/configs/project_map.cfg'
args = ['--file', entity, '--config', config, '--time', now]
execute(args)
self.assertEquals('proj-map42', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
self.assertNothingPrinted()
self.assertNothingLogged(logs)
@log_capture()
def test_project_map_with_invalid_regex(self, logs):
logging.disable(logging.NOTSET)
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
now = u(int(time.time()))
entity = 'tests/samples/projects/project_map42/emptyfile.txt'
config = 'tests/samples/configs/project_map_invalid.cfg'
args = ['--file', entity, '--config', config, '--time', now]
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
actual = self.getLogOutput(logs)
expected = u('WakaTime WARNING Regex error (unexpected end of regular expression) for projectmap pattern: invalid[({regex')
if self.isPy35OrNewer:
expected = u('WakaTime WARNING Regex error (unterminated character set at position 7) for projectmap pattern: invalid[({regex')
self.assertEquals(expected, actual)
@log_capture()
def test_project_map_with_replacement_group_index_error(self, logs):
logging.disable(logging.NOTSET)
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now = u(int(time.time()))
entity = 'tests/samples/projects/project_map42/emptyfile.txt'
config = 'tests/samples/configs/project_map_malformed.cfg'
args = ['--file', entity, '--config', config, '--time', now]
retval = execute(args)
self.assertEquals(retval, API_ERROR)
self.assertNothingPrinted()
actual = self.getLogOutput(logs)
expected = u('WakaTime WARNING Regex error (tuple index out of range) for projectmap pattern: proj-map{3}')
self.assertEquals(expected, actual)
@log_capture()
def test_project_map_allows_duplicate_keys(self, logs):
logging.disable(logging.NOTSET)
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now = u(int(time.time()))
entity = 'tests/samples/projects/project_map/emptyfile.txt'
config = 'tests/samples/configs/project_map_with_duplicate_keys.cfg'
args = ['--file', entity, '--config', config, '--time', now]
execute(args)
self.assertEquals('proj-map-duplicate-5', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
self.assertNothingPrinted()
self.assertNothingLogged(logs)
@log_capture()
def test_project_map_allows_colon_in_key(self, logs):
logging.disable(logging.NOTSET)
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now = u(int(time.time()))
entity = 'tests/samples/projects/project_map/emptyfile.txt'
config = 'tests/samples/configs/project_map_with_colon_in_key.cfg'
args = ['--file', entity, '--config', config, '--time', now]
execute(args)
self.assertEquals('proj-map-match', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
self.assertNothingPrinted()
self.assertNothingLogged(logs)
@log_capture()
def test_exclude_unknown_project_when_project_detected(self, logs):
logging.disable(logging.NOTSET)
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
with TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/emptyfile.txt'
shutil.copy(entity, os.path.join(tempdir, 'emptyfile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'emptyfile.txt'))
config = 'tests/samples/configs/exclude_unknown_project.cfg'
args = ['--file', entity, '--project', 'proj-arg', '--config', config, '--log-file', '~/.wakatime.log']
execute(args)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
self.assertEquals('proj-arg', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
def test_generate_project_name(self):
self.assertGreater(len(generate_project_name()), 1)
self.assertNotEqual(generate_project_name(), generate_project_name())
| 41.788235 | 147 | 0.633136 |
from wakatime.main import execute
from wakatime.packages import requests
from wakatime.packages.requests.models import Response
import logging
import os
import shutil
import tempfile
import time
from testfixtures import log_capture
from wakatime.compat import u, open
from wakatime.constants import API_ERROR, SUCCESS
from wakatime.exceptions import NotYetImplemented
from wakatime.project import generate_project_name
from wakatime.projects.base import BaseProject
from wakatime.projects.git import Git
from .utils import ANY, DynamicIterable, TestCase, TemporaryDirectory, CustomResponse, mock, json
class ProjectTestCase(TestCase):
patch_these = [
'wakatime.packages.requests.adapters.HTTPAdapter.send',
'wakatime.offlinequeue.Queue.push',
['wakatime.offlinequeue.Queue.pop', None],
['wakatime.offlinequeue.Queue.connect', None],
'wakatime.session_cache.SessionCache.save',
'wakatime.session_cache.SessionCache.delete',
['wakatime.session_cache.SessionCache.get', requests.session],
['wakatime.session_cache.SessionCache.connect', None],
]
def shared(self, expected_project='', expected_branch=ANY, entity='', config='good_config.cfg', extra_args=[]):
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
config = os.path.join('tests/samples/configs', config)
if not os.path.exists(entity):
entity = os.path.realpath(os.path.join('tests/samples', entity))
now = u(int(time.time()))
args = ['--file', entity, '--config', config, '--time', now] + extra_args
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
heartbeat = {
'language': ANY,
'lines': ANY,
'entity': os.path.realpath(entity),
'project': expected_project,
'branch': expected_branch,
'dependencies': ANY,
'time': float(now),
'type': 'file',
'is_write': False,
'user_agent': ANY,
}
self.assertHeartbeatSent(heartbeat)
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsSynced()
self.assertSessionCacheSaved()
def test_project_base(self):
path = 'tests/samples/codefiles/see.h'
project = BaseProject(path)
with self.assertRaises(NotYetImplemented):
project.process()
with self.assertRaises(NotYetImplemented):
project.name()
with self.assertRaises(NotYetImplemented):
project.branch()
def test_project_argument_overrides_detected_project(self):
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now = u(int(time.time()))
entity = 'tests/samples/projects/git/emptyfile.txt'
config = 'tests/samples/configs/good_config.cfg'
args = ['--project', 'forced-project', '--file', entity, '--config', config, '--time', now]
execute(args)
self.assertEquals('forced-project', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
def test_alternate_project_argument_does_not_override_detected_project(self):
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now = u(int(time.time()))
entity = 'tests/samples/projects/git/emptyfile.txt'
config = 'tests/samples/configs/good_config.cfg'
project = os.path.basename(os.path.abspath('.'))
args = ['--alternate-project', 'alt-project', '--file', entity, '--config', config, '--time', now]
execute(args)
self.assertEquals(project, self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
def test_alternate_project_argument_does_not_override_project_argument(self):
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now = u(int(time.time()))
entity = 'tests/samples/projects/git/emptyfile.txt'
config = 'tests/samples/configs/good_config.cfg'
args = ['--project', 'forced-project', '--alternate-project', 'alt-project', '--file', entity, '--config', config, '--time', now]
execute(args)
self.assertEquals('forced-project', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
def test_alternate_project_argument_used_when_project_not_detected(self):
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
tempdir = tempfile.mkdtemp()
entity = 'tests/samples/projects/git/emptyfile.txt'
shutil.copy(entity, os.path.join(tempdir, 'emptyfile.txt'))
now = u(int(time.time()))
entity = os.path.join(tempdir, 'emptyfile.txt')
config = 'tests/samples/configs/good_config.cfg'
args = ['--file', entity, '--config', config, '--time', now]
execute(args)
args = ['--file', entity, '--config', config, '--time', now, '--alternate-project', 'alt-project']
execute(args)
calls = self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].call_args_list
body = calls[0][0][0].body
data = json.loads(body)[0]
self.assertEquals(None, data.get('project'))
body = calls[1][0][0].body
data = json.loads(body)[0]
self.assertEquals('alt-project', data['project'])
def test_wakatime_project_file(self):
self.shared(
expected_project='waka-project-file',
entity='projects/wakatime_project_file/emptyfile.txt',
)
def test_wakatime_project_file_used_even_when_project_names_hidden(self):
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/wakatime_project_file', os.path.join(tempdir, 'wakatime_project_file'))
self.shared(
expected_project='waka-project-file',
entity=os.path.join(tempdir, 'wakatime_project_file', 'emptyfile.txt'),
extra_args=['--hide-project-names'],
)
def test_git_project_detected(self):
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
self.shared(
expected_project='git',
expected_branch='master',
entity=os.path.join(tempdir, 'git', 'emptyfile.txt'),
)
def test_git_project_not_used_when_project_names_hidden(self):
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
now = u(int(time.time()))
entity = os.path.join(tempdir, 'git', 'emptyfile.txt')
config = 'tests/samples/configs/good_config.cfg'
args = ['--hide-project-names', '--file', entity, '--config', config, '--time', now]
execute(args)
self.assertHeartbeatSavedOffline()
self.assertNotEquals('git', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
self.assertEquals(None, self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['branch'])
proj = open(os.path.join(tempdir, 'git', '.wakatime-project')).read()
self.assertEquals(proj, self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
execute(args)
self.assertEquals(proj, self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
def test_git_branch_not_used_when_branch_names_hidden(self):
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
self.shared(
expected_project='git',
expected_branch=None,
entity=os.path.join(tempdir, 'git', 'emptyfile.txt'),
extra_args=['--hide-branch-names'],
)
def test_branch_used_when_project_names_hidden_but_branch_names_visible(self):
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
now = u(int(time.time()))
entity = os.path.join(tempdir, 'git', 'emptyfile.txt')
config = 'tests/samples/configs/show_branch_names.cfg'
args = ['--hide-project-names', '--file', entity, '--config', config, '--time', now]
execute(args)
self.assertHeartbeatSavedOffline()
self.assertNotEquals('git', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
self.assertNotEquals(None, self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['branch'])
proj = open(os.path.join(tempdir, 'git', '.wakatime-project')).read()
self.assertEquals(proj, self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
execute(args)
self.assertEquals(proj, self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
@log_capture()
def test_ioerror_when_reading_git_branch(self, logs):
logging.disable(logging.NOTSET)
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
entity = os.path.join(tempdir, 'git', 'emptyfile.txt')
with mock.patch('wakatime.projects.git.open') as mock_open:
mock_open.side_effect = IOError('')
self.shared(
expected_project='git',
expected_branch='master',
entity=entity,
)
self.assertNothingPrinted()
actual = self.getLogOutput(logs)
expected = 'OSError' if self.isPy33OrNewer else 'IOError'
self.assertIn(expected, actual)
def test_git_detached_head_not_used_as_branch(self):
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git-with-detached-head', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
entity = os.path.join(tempdir, 'git', 'emptyfile.txt')
self.shared(
expected_project='git',
expected_branch=None,
entity=entity,
)
def test_svn_project_detected(self):
with mock.patch('wakatime.projects.git.Git.process') as mock_git:
mock_git.return_value = False
with mock.patch('wakatime.projects.subversion.Subversion._has_xcode_tools') as mock_has_xcode:
mock_has_xcode.return_value = True
with mock.patch('wakatime.projects.subversion.Popen.communicate') as mock_popen:
stdout = open('tests/samples/output/svn').read()
stderr = ''
mock_popen.return_value = DynamicIterable((stdout, stderr), max_calls=1)
self.shared(
expected_project='svn',
entity='projects/svn/afolder/emptyfile.txt',
)
@log_capture()
def test_svn_exception_handled(self, logs):
logging.disable(logging.NOTSET)
with mock.patch('wakatime.projects.git.Git.process') as mock_git:
mock_git.return_value = False
with mock.patch('wakatime.projects.subversion.Subversion._has_xcode_tools') as mock_has_xcode:
mock_has_xcode.return_value = True
with mock.patch('wakatime.projects.subversion.Popen') as mock_popen:
mock_popen.side_effect = OSError('')
with mock.patch('wakatime.projects.subversion.Popen.communicate') as mock_communicate:
mock_communicate.side_effect = OSError('')
self.shared(
expected_project=None,
entity='projects/svn/afolder/emptyfile.txt',
)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
def test_svn_on_mac_without_xcode_tools_installed(self):
with mock.patch('wakatime.projects.git.Git.process') as mock_git:
mock_git.return_value = False
with mock.patch('wakatime.projects.subversion.platform.system') as mock_system:
mock_system.return_value = 'Darwin'
with mock.patch('wakatime.projects.subversion.Popen.communicate') as mock_popen:
stdout = open('tests/samples/output/svn').read()
stderr = ''
mock_popen.return_value = DynamicIterable((stdout, stderr), raise_on_calls=[OSError('')])
self.shared(
expected_project=None,
entity='projects/svn/afolder/emptyfile.txt',
)
def test_svn_on_mac_with_xcode_tools_installed(self):
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now = u(int(time.time()))
entity = 'tests/samples/projects/svn/afolder/emptyfile.txt'
config = 'tests/samples/configs/good_config.cfg'
args = ['--file', entity, '--config', config, '--time', now]
with mock.patch('wakatime.projects.git.Git.process') as mock_git:
mock_git.return_value = False
with mock.patch('wakatime.projects.subversion.platform.system') as mock_system:
mock_system.return_value = 'Darwin'
with mock.patch('wakatime.projects.subversion.Popen') as mock_popen:
stdout = open('tests/samples/output/svn').read()
stderr = ''
class Dynamic(object):
def __init__(self):
self.called = 0
def communicate(self):
self.called += 1
if self.called == 2:
return (stdout, stderr)
def wait(self):
if self.called == 1:
return 0
mock_popen.return_value = Dynamic()
execute(args)
self.assertEquals('svn', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
def test_mercurial_project_detected(self):
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
with mock.patch('wakatime.projects.git.Git.process') as mock_git:
mock_git.return_value = False
now = u(int(time.time()))
entity = 'tests/samples/projects/hg/emptyfile.txt'
config = 'tests/samples/configs/good_config.cfg'
args = ['--file', entity, '--config', config, '--time', now]
execute(args)
self.assertEquals('hg', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
self.assertEquals('test-hg-branch', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['branch'])
def test_mercurial_project_branch_with_slash_detected(self):
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
with mock.patch('wakatime.projects.git.Git.process') as mock_git:
mock_git.return_value = False
now = u(int(time.time()))
entity = 'tests/samples/projects/hg-branch-with-slash/emptyfile.txt'
config = 'tests/samples/configs/good_config.cfg'
args = ['--file', entity, '--config', config, '--time', now]
execute(args)
self.assertEquals('hg-branch-with-slash', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
self.assertEquals('branch/with/slash', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['branch'])
@log_capture()
def test_ioerror_when_reading_mercurial_branch(self, logs):
logging.disable(logging.NOTSET)
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
with mock.patch('wakatime.projects.git.Git.process') as mock_git:
mock_git.return_value = False
now = u(int(time.time()))
entity = 'tests/samples/projects/hg/emptyfile.txt'
config = 'tests/samples/configs/good_config.cfg'
args = ['--file', entity, '--config', config, '--time', now]
with mock.patch('wakatime.projects.mercurial.open') as mock_open:
mock_open.side_effect = IOError('')
execute(args)
self.assertEquals('hg', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
self.assertEquals('default', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['branch'])
self.assertNothingPrinted()
actual = self.getLogOutput(logs)
expected = 'OSError' if self.isPy33OrNewer else 'IOError'
self.assertIn(expected, actual)
def test_git_submodule_detected(self):
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git-with-submodule', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
shutil.move(os.path.join(tempdir, 'git', 'asubmodule', 'dot_git'), os.path.join(tempdir, 'git', 'asubmodule', '.git'))
entity = os.path.join(tempdir, 'git', 'asubmodule', 'emptyfile.txt')
self.shared(
expected_project='asubmodule',
expected_branch='asubbranch',
entity=entity,
)
def test_git_submodule_without_option(self):
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git-with-submodule', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
shutil.move(os.path.join(tempdir, 'git', 'asubmodule', 'dot_git'), os.path.join(tempdir, 'git', 'asubmodule', '.git'))
entity = os.path.join(tempdir, 'git', 'asubmodule', 'emptyfile.txt')
self.shared(
expected_project='asubmodule',
expected_branch='asubbranch',
entity=entity,
config='git-submodules-without-option.cfg',
)
def test_git_submodule_detected_and_enabled_globally(self):
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git-with-submodule', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
shutil.move(os.path.join(tempdir, 'git', 'asubmodule', 'dot_git'), os.path.join(tempdir, 'git', 'asubmodule', '.git'))
entity = os.path.join(tempdir, 'git', 'asubmodule', 'emptyfile.txt')
self.shared(
expected_project='asubmodule',
expected_branch='asubbranch',
entity=entity,
config='git-submodules-enabled.cfg',
)
def test_git_submodule_detected_but_disabled_globally(self):
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git-with-submodule', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
shutil.move(os.path.join(tempdir, 'git', 'asubmodule', 'dot_git'), os.path.join(tempdir, 'git', 'asubmodule', '.git'))
entity = os.path.join(tempdir, 'git', 'asubmodule', 'emptyfile.txt')
self.shared(
expected_project='git',
expected_branch='master',
entity=entity,
config='git-submodules-disabled.cfg',
)
def test_git_submodule_detected_and_disabled_using_regex(self):
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git-with-submodule', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
shutil.move(os.path.join(tempdir, 'git', 'asubmodule', 'dot_git'), os.path.join(tempdir, 'git', 'asubmodule', '.git'))
entity = os.path.join(tempdir, 'git', 'asubmodule', 'emptyfile.txt')
self.shared(
expected_project='git',
expected_branch='master',
entity=entity,
config='git-submodules-disabled-using-regex.cfg',
)
def test_git_submodule_detected_and_enabled_using_regex(self):
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git-with-submodule', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
shutil.move(os.path.join(tempdir, 'git', 'asubmodule', 'dot_git'), os.path.join(tempdir, 'git', 'asubmodule', '.git'))
entity = os.path.join(tempdir, 'git', 'asubmodule', 'emptyfile.txt')
self.shared(
expected_project='asubmodule',
expected_branch='asubbranch',
entity=entity,
config='git-submodules-enabled-using-regex.cfg',
)
@log_capture()
def test_git_submodule_detected_with_invalid_regex(self, logs):
logging.disable(logging.NOTSET)
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git-with-submodule', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
shutil.move(os.path.join(tempdir, 'git', 'asubmodule', 'dot_git'), os.path.join(tempdir, 'git', 'asubmodule', '.git'))
entity = os.path.join(tempdir, 'git', 'asubmodule', 'emptyfile.txt')
self.shared(
expected_project='git',
expected_branch='master',
entity=entity,
config='git-submodules-invalid-regex.cfg',
)
self.assertNothingPrinted()
actual = self.getLogOutput(logs)
expected = u('WakaTime WARNING Regex error (unbalanced parenthesis) for disable git submodules pattern: \\(invalid regex)')
if self.isPy35OrNewer:
expected = 'WakaTime WARNING Regex error (unbalanced parenthesis at position 15) for disable git submodules pattern: \\(invalid regex)'
self.assertEquals(expected, actual)
def test_git_worktree_detected(self):
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git-worktree', os.path.join(tempdir, 'git-wt'))
shutil.copytree('tests/samples/projects/git', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git-wt', 'dot_git'), os.path.join(tempdir, 'git-wt', '.git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
entity = os.path.join(tempdir, 'git-wt', 'emptyfile.txt')
self.shared(
expected_project='git',
expected_branch='worktree-detection-branch',
entity=entity,
)
def test_git_worktree_not_detected_when_commondir_missing(self):
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git-worktree', os.path.join(tempdir, 'git-wt'))
shutil.copytree('tests/samples/projects/git', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git-wt', 'dot_git'), os.path.join(tempdir, 'git-wt', '.git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
os.remove(os.path.join(tempdir, 'git', '.git', 'worktrees', 'git-worktree', 'commondir'))
entity = os.path.join(tempdir, 'git-wt', 'emptyfile.txt')
self.shared(
expected_project=None,
expected_branch='worktree-detection-branch',
entity=entity,
)
@log_capture()
def test_git_path_from_gitdir_link_file(self, logs):
logging.disable(logging.NOTSET)
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git-with-submodule', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
shutil.move(os.path.join(tempdir, 'git', 'asubmodule', 'dot_git'), os.path.join(tempdir, 'git', 'asubmodule', '.git'))
path = os.path.join(tempdir, 'git', 'asubmodule')
git = Git(None)
result = git._path_from_gitdir_link_file(path)
expected = os.path.realpath(os.path.join(tempdir, 'git', '.git', 'modules', 'asubmodule'))
self.assertEquals(expected, result)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
@log_capture()
def test_git_path_from_gitdir_link_file_handles_exceptions(self, logs):
logging.disable(logging.NOTSET)
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git-with-submodule', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
shutil.move(os.path.join(tempdir, 'git', 'asubmodule', 'dot_git'), os.path.join(tempdir, 'git', 'asubmodule', '.git'))
self.orig_open = open
self.count = 0
with mock.patch('wakatime.projects.git.open') as mock_open:
def side_effect_function(*args, **kwargs):
self.count += 1
if self.count <= 1:
raise IOError('')
return self.orig_open(*args, **kwargs)
mock_open.side_effect = side_effect_function
git = Git(None)
path = os.path.join(tempdir, 'git', 'asubmodule')
result = git._path_from_gitdir_link_file(path)
expected = os.path.realpath(os.path.join(tempdir, 'git', '.git', 'modules', 'asubmodule'))
self.assertEquals(expected, result)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
with mock.patch('wakatime.projects.git.open') as mock_open:
mock_open.side_effect = UnicodeDecodeError('utf8', ''.encode('utf8'), 0, 0, '')
git = Git(None)
path = os.path.join(tempdir, 'git', 'asubmodule')
result = git._path_from_gitdir_link_file(path)
self.assertIsNone(result)
self.assertNothingPrinted()
actual = self.getLogOutput(logs)
expected = 'UnicodeDecodeError'
self.assertIn(expected, actual)
with mock.patch('wakatime.projects.git.open') as mock_open:
mock_open.side_effect = IOError('')
git = Git(None)
path = os.path.join(tempdir, 'git', 'asubmodule')
result = git._path_from_gitdir_link_file(path)
self.assertIsNone(result)
self.assertNothingPrinted()
actual = self.getLogOutput(logs)
expected = 'OSError' if self.isPy33OrNewer else 'IOError'
self.assertIn(expected, actual)
@log_capture()
def test_git_path_from_gitdir_link_file_handles_invalid_link(self, logs):
logging.disable(logging.NOTSET)
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git-with-submodule', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'asubmodule', 'dot_git'), os.path.join(tempdir, 'git', 'asubmodule', '.git'))
path = os.path.join(tempdir, 'git', 'asubmodule')
git = Git(None)
result = git._path_from_gitdir_link_file(path)
expected = None
self.assertEquals(expected, result)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
def test_git_branch_with_slash(self):
tempdir = tempfile.mkdtemp()
shutil.copytree('tests/samples/projects/git-branch-with-slash', os.path.join(tempdir, 'git'))
shutil.move(os.path.join(tempdir, 'git', 'dot_git'), os.path.join(tempdir, 'git', '.git'))
self.shared(
expected_project='git',
expected_branch='branch/with/slash',
entity=os.path.join(tempdir, 'git', 'emptyfile.txt'),
)
@log_capture()
def test_project_map(self, logs):
logging.disable(logging.NOTSET)
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now = u(int(time.time()))
entity = 'tests/samples/projects/project_map/emptyfile.txt'
config = 'tests/samples/configs/project_map.cfg'
args = ['--file', entity, '--config', config, '--time', now]
execute(args)
self.assertEquals('proj-map', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
self.assertNothingPrinted()
self.assertNothingLogged(logs)
@log_capture()
def test_project_map_group_usage(self, logs):
logging.disable(logging.NOTSET)
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now = u(int(time.time()))
entity = 'tests/samples/projects/project_map42/emptyfile.txt'
config = 'tests/samples/configs/project_map.cfg'
args = ['--file', entity, '--config', config, '--time', now]
execute(args)
self.assertEquals('proj-map42', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
self.assertNothingPrinted()
self.assertNothingLogged(logs)
@log_capture()
def test_project_map_with_invalid_regex(self, logs):
logging.disable(logging.NOTSET)
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
now = u(int(time.time()))
entity = 'tests/samples/projects/project_map42/emptyfile.txt'
config = 'tests/samples/configs/project_map_invalid.cfg'
args = ['--file', entity, '--config', config, '--time', now]
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
actual = self.getLogOutput(logs)
expected = u('WakaTime WARNING Regex error (unexpected end of regular expression) for projectmap pattern: invalid[({regex')
if self.isPy35OrNewer:
expected = u('WakaTime WARNING Regex error (unterminated character set at position 7) for projectmap pattern: invalid[({regex')
self.assertEquals(expected, actual)
@log_capture()
def test_project_map_with_replacement_group_index_error(self, logs):
logging.disable(logging.NOTSET)
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now = u(int(time.time()))
entity = 'tests/samples/projects/project_map42/emptyfile.txt'
config = 'tests/samples/configs/project_map_malformed.cfg'
args = ['--file', entity, '--config', config, '--time', now]
retval = execute(args)
self.assertEquals(retval, API_ERROR)
self.assertNothingPrinted()
actual = self.getLogOutput(logs)
expected = u('WakaTime WARNING Regex error (tuple index out of range) for projectmap pattern: proj-map{3}')
self.assertEquals(expected, actual)
@log_capture()
def test_project_map_allows_duplicate_keys(self, logs):
logging.disable(logging.NOTSET)
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now = u(int(time.time()))
entity = 'tests/samples/projects/project_map/emptyfile.txt'
config = 'tests/samples/configs/project_map_with_duplicate_keys.cfg'
args = ['--file', entity, '--config', config, '--time', now]
execute(args)
self.assertEquals('proj-map-duplicate-5', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
self.assertNothingPrinted()
self.assertNothingLogged(logs)
@log_capture()
def test_project_map_allows_colon_in_key(self, logs):
logging.disable(logging.NOTSET)
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now = u(int(time.time()))
entity = 'tests/samples/projects/project_map/emptyfile.txt'
config = 'tests/samples/configs/project_map_with_colon_in_key.cfg'
args = ['--file', entity, '--config', config, '--time', now]
execute(args)
self.assertEquals('proj-map-match', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
self.assertNothingPrinted()
self.assertNothingLogged(logs)
@log_capture()
def test_exclude_unknown_project_when_project_detected(self, logs):
logging.disable(logging.NOTSET)
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
with TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/emptyfile.txt'
shutil.copy(entity, os.path.join(tempdir, 'emptyfile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'emptyfile.txt'))
config = 'tests/samples/configs/exclude_unknown_project.cfg'
args = ['--file', entity, '--project', 'proj-arg', '--config', config, '--log-file', '~/.wakatime.log']
execute(args)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
self.assertEquals('proj-arg', self.patched['wakatime.offlinequeue.Queue.push'].call_args[0][0]['project'])
def test_generate_project_name(self):
self.assertGreater(len(generate_project_name()), 1)
self.assertNotEqual(generate_project_name(), generate_project_name())
| true | true |
f727a9f08929b08e1199a143d5fe11d7e1b8cf8a | 2,647 | py | Python | flask/tutorial/flaskr/blog.py | jianchengwang/todo-python | 36bdaf6fae714531946047ececca995d60f86e4a | [
"MIT"
] | null | null | null | flask/tutorial/flaskr/blog.py | jianchengwang/todo-python | 36bdaf6fae714531946047ececca995d60f86e4a | [
"MIT"
] | null | null | null | flask/tutorial/flaskr/blog.py | jianchengwang/todo-python | 36bdaf6fae714531946047ececca995d60f86e4a | [
"MIT"
] | null | null | null | from flask import (
Blueprint, flash, g, redirect, render_template, request, url_for
)
from werkzeug.exceptions import abort
from flaskr.auth import login_required
from flaskr.db import get_db
bp = Blueprint('blog', __name__)
@bp.route('/')
def index():
db = get_db()
posts = db.execute(
'SELECT p.id, title, body, created, author_id, username'
' FROM post p JOIN user u ON p.author_id = u.id'
' ORDER BY created DESC'
).fetchall()
return render_template('blog/index.html', posts=posts)
@bp.route('/create', methods=('GET', 'POST'))
@login_required
def create():
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
error = None
if not title:
error = 'Title is required.'
if error is not None:
flash(error)
else:
db = get_db()
db.execute(
'INSERT INTO post (title, body, author_id)'
' VALUES (?, ?, ?)',
(title, body, g.user['id'])
)
db.commit()
return redirect(url_for('blog.index'))
return render_template('blog/create.html')
def get_post(id, check_author=True):
post = get_db().execute(
'SELECT p.id, title, body, created, author_id, username'
' FROM post p JOIN user u ON p.author_id = u.id'
' WHERE p.id = ?',
(id,)
).fetchone()
if post is None:
abort(404, "Post id {0} doesn't exist.".format(id))
if check_author and post['author_id'] != g.user['id']:
abort(403)
return post
@bp.route('/<int:id>/update', methods=('GET', 'POST'))
@login_required
def update(id):
post = get_post(id)
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
error = None
if not title:
error = 'Title is required.'
if error is not None:
flash(error)
else:
db = get_db()
db.execute(
'UPDATE post SET title = ?, body = ?'
' WHERE id = ?',
(title, body, id)
)
db.commit()
return redirect(url_for('blog.index'))
return render_template('blog/update.html', post=post)
@bp.route('/<int:id>/delete', methods=('POST',))
@login_required
def delete(id):
get_post(id)
db = get_db()
db.execute('DELETE FROM post WHERE id = ?', (id,))
db.commit()
return redirect(url_for('blog.index'))
| 27.28866 | 69 | 0.530412 | from flask import (
Blueprint, flash, g, redirect, render_template, request, url_for
)
from werkzeug.exceptions import abort
from flaskr.auth import login_required
from flaskr.db import get_db
bp = Blueprint('blog', __name__)
@bp.route('/')
def index():
db = get_db()
posts = db.execute(
'SELECT p.id, title, body, created, author_id, username'
' FROM post p JOIN user u ON p.author_id = u.id'
' ORDER BY created DESC'
).fetchall()
return render_template('blog/index.html', posts=posts)
@bp.route('/create', methods=('GET', 'POST'))
@login_required
def create():
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
error = None
if not title:
error = 'Title is required.'
if error is not None:
flash(error)
else:
db = get_db()
db.execute(
'INSERT INTO post (title, body, author_id)'
' VALUES (?, ?, ?)',
(title, body, g.user['id'])
)
db.commit()
return redirect(url_for('blog.index'))
return render_template('blog/create.html')
def get_post(id, check_author=True):
post = get_db().execute(
'SELECT p.id, title, body, created, author_id, username'
' FROM post p JOIN user u ON p.author_id = u.id'
' WHERE p.id = ?',
(id,)
).fetchone()
if post is None:
abort(404, "Post id {0} doesn't exist.".format(id))
if check_author and post['author_id'] != g.user['id']:
abort(403)
return post
@bp.route('/<int:id>/update', methods=('GET', 'POST'))
@login_required
def update(id):
post = get_post(id)
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
error = None
if not title:
error = 'Title is required.'
if error is not None:
flash(error)
else:
db = get_db()
db.execute(
'UPDATE post SET title = ?, body = ?'
' WHERE id = ?',
(title, body, id)
)
db.commit()
return redirect(url_for('blog.index'))
return render_template('blog/update.html', post=post)
@bp.route('/<int:id>/delete', methods=('POST',))
@login_required
def delete(id):
get_post(id)
db = get_db()
db.execute('DELETE FROM post WHERE id = ?', (id,))
db.commit()
return redirect(url_for('blog.index'))
| true | true |
f727ad7b679d2e1f9b7546a04cf754276693bc15 | 418 | py | Python | backend/wishlist/settings/development.py | oaguy1/wishlist.vote | 3440cc05cfe039e905064fd2dfcb0ae06402e12c | [
"MIT"
] | null | null | null | backend/wishlist/settings/development.py | oaguy1/wishlist.vote | 3440cc05cfe039e905064fd2dfcb0ae06402e12c | [
"MIT"
] | null | null | null | backend/wishlist/settings/development.py | oaguy1/wishlist.vote | 3440cc05cfe039e905064fd2dfcb0ae06402e12c | [
"MIT"
] | null | null | null | from .base import *
DEBUG = True
ALLOWED_HOSTS = []
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get("POSTGRES_DB"),
'USER': os.environ.get("POSTGRES_USER"),
'PASSWORD': os.environ.get("POSTGRES_PASSWORD"),
'HOST': 'db',
'PORT': 5432,
}
} | 20.9 | 63 | 0.593301 | from .base import *
DEBUG = True
ALLOWED_HOSTS = []
S = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get("POSTGRES_DB"),
'USER': os.environ.get("POSTGRES_USER"),
'PASSWORD': os.environ.get("POSTGRES_PASSWORD"),
'HOST': 'db',
'PORT': 5432,
}
} | true | true |
f727af1c0db0d85bc6562a4f4ff54917e782503c | 1,030 | py | Python | annotater/memetext/decorators.py | stricoff92/annotater | 8ca471477e2d567945e14f09d3d763d379e7587e | [
"MIT"
] | null | null | null | annotater/memetext/decorators.py | stricoff92/annotater | 8ca471477e2d567945e14f09d3d763d379e7587e | [
"MIT"
] | null | null | null | annotater/memetext/decorators.py | stricoff92/annotater | 8ca471477e2d567945e14f09d3d763d379e7587e | [
"MIT"
] | null | null | null |
from functools import wraps
from django.http import HttpResponse
from rest_framework.response import Response
from rest_framework import status
from website.constants import WIDGET_NAMES
def user_can_use_web_widget(function):
@wraps(function)
def decorator(request, *a, **k):
if request.user.userprofile.can_use_widget(WIDGET_NAMES.memetext):
return function(request, *a, **k)
else:
return HttpResponse(
"<h1>ERROR 401: No access to this widget.</h1>", # should be 403
status=status.HTTP_401_UNAUTHORIZED,
)
return decorator
def user_can_use_api_widget(function):
@wraps(function)
def decorator(request, *a, **k):
if request.user.userprofile.can_use_widget(WIDGET_NAMES.memetext):
return function(request, *a, **k)
else:
return Response(
"no access to this widget",
status.HTTP_401_UNAUTHORIZED, # should be 403
)
return decorator
| 29.428571 | 80 | 0.646602 |
from functools import wraps
from django.http import HttpResponse
from rest_framework.response import Response
from rest_framework import status
from website.constants import WIDGET_NAMES
def user_can_use_web_widget(function):
@wraps(function)
def decorator(request, *a, **k):
if request.user.userprofile.can_use_widget(WIDGET_NAMES.memetext):
return function(request, *a, **k)
else:
return HttpResponse(
"<h1>ERROR 401: No access to this widget.</h1>",
status=status.HTTP_401_UNAUTHORIZED,
)
return decorator
def user_can_use_api_widget(function):
@wraps(function)
def decorator(request, *a, **k):
if request.user.userprofile.can_use_widget(WIDGET_NAMES.memetext):
return function(request, *a, **k)
else:
return Response(
"no access to this widget",
status.HTTP_401_UNAUTHORIZED,
)
return decorator
| true | true |
f727af8237ad53753e4c67cd9224b1cb5ed49267 | 4,059 | py | Python | tests/module/fetch/test_init.py | mgorny/pkgcore | ab4a718aa1626f4edeb385383f5595a1e262b0dc | [
"BSD-3-Clause"
] | null | null | null | tests/module/fetch/test_init.py | mgorny/pkgcore | ab4a718aa1626f4edeb385383f5595a1e262b0dc | [
"BSD-3-Clause"
] | null | null | null | tests/module/fetch/test_init.py | mgorny/pkgcore | ab4a718aa1626f4edeb385383f5595a1e262b0dc | [
"BSD-3-Clause"
] | null | null | null | # Copyright: 2006 Brian Harring <ferringb@gmail.com>
# License: GPL2/BSD
from snakeoil.sequences import iflatten_instance
from snakeoil.test import TestCase
from pkgcore import fetch
class base(TestCase):
def assertUri(self, obj, uri):
uri = list(uri)
self.assertEqual(list(iflatten_instance(obj)), uri)
if uri:
self.assertTrue(obj)
else:
self.assertFalse(obj)
class TestFetchable(base):
def test_init(self):
o = fetch.fetchable("dar", uri=["asdf"], chksums={"asdf":1})
self.assertEqual(o.filename, "dar")
self.assertUri(o.uri, ["asdf"])
self.assertEqual(o.chksums, {"asdf":1})
def test_eq_ne(self):
o1 = fetch.fetchable("dar", uri=["asdf"], chksums={"asdf":1})
self.assertEqual(o1, o1)
o2 = fetch.fetchable("dar", uri=["asdf"], chksums={"asdf":1})
self.assertEqual(o1, o2)
self.assertNotEqual(o1,
fetch.fetchable("dar1", uri=["asdf"], chksums={"asdf":1}))
self.assertNotEqual(o1,
fetch.fetchable("dar", uri=["asdf1"], chksums={"asdf":1}))
self.assertNotEqual(o1,
fetch.fetchable("dar", uri=["asdf1"], chksums={"asdf":1, "foon":1}))
class TestMirror(base):
kls = fetch.mirror
default_mirrors = ["http://foon", "ftp://spoon"]
def setUp(self):
self.mirror = self.kls(self.default_mirrors, "fork")
def test_init(self):
self.assertEqual(self.mirror.mirror_name, "fork")
# explicit test should any tuple like sequence show up
self.assertInstance(self.mirror.mirrors, tuple)
self.assertEqual(self.mirror.mirrors, tuple(self.default_mirrors))
def test_iter(self):
self.assertEqual(list(self.mirror), self.default_mirrors)
def test_len(self):
self.assertEqual(len(self.mirror), len(self.default_mirrors))
def test_getitem(self):
self.assertEqual(self.mirror[1], self.default_mirrors[1])
def test_eq_ne(self):
self.assertEqual(self.mirror, self.kls(self.default_mirrors, 'fork'))
self.assertNotEqual(self.mirror,
self.kls(self.default_mirrors + ['http://fark'], 'fork'))
class TestDefaultMirror(TestMirror):
kls = fetch.default_mirror
class Test_uri_list(base):
def setUp(self):
self.uril = fetch.uri_list("cows")
@staticmethod
def mk_uri_list(*iterable, **kwds):
filename = kwds.get("filename", "asdf")
obj = fetch.uri_list(filename)
for x in iterable:
if isinstance(x, fetch.mirror):
obj.add_mirror(x)
else:
obj.add_uri(x)
return obj
def test_mirrors(self):
self.assertRaises(TypeError, self.uril.add_mirror, "cows")
mirror = fetch.mirror(["me", "WI"], "asdf")
self.uril.add_mirror(mirror)
self.assertEqual(list(self.uril), ["me/cows", "WI/cows"])
self.uril.add_mirror(mirror, "foon/boon")
self.assertUri(self.uril,
["me/cows", "WI/cows", "me/foon/boon", "WI/foon/boon"])
def test_uris(self):
self.uril.add_uri("blar")
self.assertUri(self.uril, ["blar"])
def test_combined(self):
l = ["blarn", "me/cows", "WI/cows", "madison",
"belleville/cows", "verona/cows"]
self.uril.add_uri("blarn")
self.uril.add_mirror(fetch.mirror(["me", "WI"], "asdf"))
self.uril.add_uri("madison")
self.uril.add_mirror(fetch.default_mirror(
["belleville", "verona"], "foon"))
self.assertUri(self.uril, l)
def test_nonzero(self):
self.assertTrue(self.mk_uri_list("asdf"))
self.assertFalse(self.mk_uri_list())
self.assertFalse(self.mk_uri_list(fetch.mirror((), "mirror")))
def test_len(self):
self.assertLen(self.mk_uri_list(), 0)
self.assertLen(self.mk_uri_list("fdas"), 1)
self.assertLen(self.mk_uri_list(fetch.mirror((), "mirror")), 0)
self.assertLen(self.mk_uri_list(fetch.mirror(("asdf",), "mirror")), 1)
| 32.472 | 80 | 0.614437 |
from snakeoil.sequences import iflatten_instance
from snakeoil.test import TestCase
from pkgcore import fetch
class base(TestCase):
def assertUri(self, obj, uri):
uri = list(uri)
self.assertEqual(list(iflatten_instance(obj)), uri)
if uri:
self.assertTrue(obj)
else:
self.assertFalse(obj)
class TestFetchable(base):
def test_init(self):
o = fetch.fetchable("dar", uri=["asdf"], chksums={"asdf":1})
self.assertEqual(o.filename, "dar")
self.assertUri(o.uri, ["asdf"])
self.assertEqual(o.chksums, {"asdf":1})
def test_eq_ne(self):
o1 = fetch.fetchable("dar", uri=["asdf"], chksums={"asdf":1})
self.assertEqual(o1, o1)
o2 = fetch.fetchable("dar", uri=["asdf"], chksums={"asdf":1})
self.assertEqual(o1, o2)
self.assertNotEqual(o1,
fetch.fetchable("dar1", uri=["asdf"], chksums={"asdf":1}))
self.assertNotEqual(o1,
fetch.fetchable("dar", uri=["asdf1"], chksums={"asdf":1}))
self.assertNotEqual(o1,
fetch.fetchable("dar", uri=["asdf1"], chksums={"asdf":1, "foon":1}))
class TestMirror(base):
kls = fetch.mirror
default_mirrors = ["http://foon", "ftp://spoon"]
def setUp(self):
self.mirror = self.kls(self.default_mirrors, "fork")
def test_init(self):
self.assertEqual(self.mirror.mirror_name, "fork")
self.assertInstance(self.mirror.mirrors, tuple)
self.assertEqual(self.mirror.mirrors, tuple(self.default_mirrors))
def test_iter(self):
self.assertEqual(list(self.mirror), self.default_mirrors)
def test_len(self):
self.assertEqual(len(self.mirror), len(self.default_mirrors))
def test_getitem(self):
self.assertEqual(self.mirror[1], self.default_mirrors[1])
def test_eq_ne(self):
self.assertEqual(self.mirror, self.kls(self.default_mirrors, 'fork'))
self.assertNotEqual(self.mirror,
self.kls(self.default_mirrors + ['http://fark'], 'fork'))
class TestDefaultMirror(TestMirror):
kls = fetch.default_mirror
class Test_uri_list(base):
def setUp(self):
self.uril = fetch.uri_list("cows")
@staticmethod
def mk_uri_list(*iterable, **kwds):
filename = kwds.get("filename", "asdf")
obj = fetch.uri_list(filename)
for x in iterable:
if isinstance(x, fetch.mirror):
obj.add_mirror(x)
else:
obj.add_uri(x)
return obj
def test_mirrors(self):
self.assertRaises(TypeError, self.uril.add_mirror, "cows")
mirror = fetch.mirror(["me", "WI"], "asdf")
self.uril.add_mirror(mirror)
self.assertEqual(list(self.uril), ["me/cows", "WI/cows"])
self.uril.add_mirror(mirror, "foon/boon")
self.assertUri(self.uril,
["me/cows", "WI/cows", "me/foon/boon", "WI/foon/boon"])
def test_uris(self):
self.uril.add_uri("blar")
self.assertUri(self.uril, ["blar"])
def test_combined(self):
l = ["blarn", "me/cows", "WI/cows", "madison",
"belleville/cows", "verona/cows"]
self.uril.add_uri("blarn")
self.uril.add_mirror(fetch.mirror(["me", "WI"], "asdf"))
self.uril.add_uri("madison")
self.uril.add_mirror(fetch.default_mirror(
["belleville", "verona"], "foon"))
self.assertUri(self.uril, l)
def test_nonzero(self):
self.assertTrue(self.mk_uri_list("asdf"))
self.assertFalse(self.mk_uri_list())
self.assertFalse(self.mk_uri_list(fetch.mirror((), "mirror")))
def test_len(self):
self.assertLen(self.mk_uri_list(), 0)
self.assertLen(self.mk_uri_list("fdas"), 1)
self.assertLen(self.mk_uri_list(fetch.mirror((), "mirror")), 0)
self.assertLen(self.mk_uri_list(fetch.mirror(("asdf",), "mirror")), 1)
| true | true |
f727aff556ad2c85d3ab130a226c9d8be9a26bce | 5,527 | py | Python | Practice/adapter_roberta_v4/adapter_model.py | accordproject/labs-cicero-classify | 3a52ebaf45252515c417bf94a05e33fc1c2628b8 | [
"Apache-2.0"
] | 2 | 2021-07-07T01:06:18.000Z | 2021-11-12T18:54:21.000Z | Practice/adapter_roberta_v4/adapter_model.py | accordproject/labs_cicero_classify | 3a52ebaf45252515c417bf94a05e33fc1c2628b8 | [
"Apache-2.0"
] | 3 | 2021-06-25T12:40:23.000Z | 2022-02-14T13:42:30.000Z | Practice/adapter_roberta_v4/adapter_model.py | accordproject/labs_cicero_classify | 3a52ebaf45252515c417bf94a05e33fc1c2628b8 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import numpy as np
import torch
print(f"Torch Version: {torch.__version__}")
import transformers
print(f"transformers (Adapter) Version: {transformers.__version__}")
from transformers import RobertaTokenizer
import numpy as np
tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
from transformers import RobertaTokenizer
tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
def encode_batch(batch):
"""Encodes a batch of input data using the model tokenizer."""
return tokenizer(batch["text"], max_length=80, truncation=True, padding="max_length")
data_path = "./NER_multilabel_data_v4.csv"
df = pd.read_csv(data_path)
all_tags = df.newTag
all_tags = set(all_tags)
all_tags = "|".join(all_tags)
all_tags = all_tags.split("|")
all_tags = set(all_tags)
all_tags = list(all_tags)
from ner_dataset import get_trainset_data_loader
all_tags, trainset, trainloader = get_trainset_data_loader(tokenizer, BATCH_SIZE=128)
from transformers import RobertaConfig, RobertaModelWithHeads
config = RobertaConfig.from_pretrained(
"roberta-base",
num_labels=len(all_tags),
label2id = trainset.label_map,
id2label = trainset.id2label
)
model = RobertaModelWithHeads.from_pretrained(
"roberta-base",
config=config,
)
all_adapter_name = []
for tag in all_tags:
adapter_name = f"{tag}_0731"
name = model.load_adapter(f"./save_adapters/{adapter_name}")
all_adapter_name.append(name)
model.load_head(f"./save_heads/{adapter_name}")
import re
parallel_text = "','".join(all_adapter_name)
result = re.findall(r'[;|(|)]',parallel_text)
if len(result) != 0:
raise(ValueError("Adapter Name must not contain \"" + '\", \"'.join(result) + '"'))
from transformers.adapters.composition import Parallel
parallel = eval("Parallel('" + "','".join(all_adapter_name) + "')")
model.set_active_adapters(parallel)
device = "cpu"
def get_adapter_mapping(model):
print(model.active_head)
label_2_id_mapping = dict()
id_2_label_mapping = dict()
for i, head in enumerate(model.active_head):
label_2_id_mapping[head] = i
id_2_label_mapping[i] = head
return label_2_id_mapping, id_2_label_mapping
def model_predict(model, sentence, device = "cpu"):
tokenized_sentence = torch.tensor([tokenizer.encode(sentence)])
pos = torch.tensor([[0] * len(tokenized_sentence)])
tags = torch.tensor([[1] * len(tokenized_sentence)])
model = model.to(device)
with torch.no_grad():
outputs = model(input_ids=tokenized_sentence.to(device),
token_type_ids=pos.to(device),
attention_mask=tags.to(device))
logits = outputs[1][0]
return_tags_order = {}
all_output = None
for i, output in enumerate(outputs):
return_tags_order[i] = (model.active_head[i])
output = outputs[i][0]
if all_output != None:
all_output = torch.cat((all_output, output), dim=2)
else:
all_output = output
all_output = torch.sigmoid(all_output)
output_array = np.array(all_output)
output_array = output_array.reshape(output_array.shape[-2], output_array.shape[-1])
label_confidences = []
for label_confidence in list(output_array):
label_confidences.append(list(label_confidence))
#Drop Head and End since it is start/stop Token
label_confidences = label_confidences[1:-1]
max_value = np.array(label_confidences).argmax(axis=1)
trans_func = np.vectorize(lambda x: model.active_head[x])
out_labels = trans_func(max_value)
out_sentence = tokenizer.tokenize(sentence)
return out_sentence, out_labels, label_confidences, return_tags_order
device = "cpu"
def get_adapter_mapping(model):
print(model.active_head)
label_2_id_mapping = dict()
id_2_label_mapping = dict()
for i, head in enumerate(model.active_head):
label_2_id_mapping[head] = i
id_2_label_mapping[i] = head
return label_2_id_mapping, id_2_label_mapping
def model_predict(model, sentence, device = "cpu"):
tokenized_sentence = torch.tensor([tokenizer.encode(sentence)])
pos = torch.tensor([[0] * len(tokenized_sentence)])
tags = torch.tensor([[1] * len(tokenized_sentence)])
model = model.to(device)
with torch.no_grad():
outputs = model(input_ids=tokenized_sentence.to(device),
token_type_ids=pos.to(device),
attention_mask=tags.to(device))
logits = outputs[1][0]
return_tags_order = {}
all_output = None
for i, output in enumerate(outputs):
return_tags_order[i] = (model.active_head[i])
output = outputs[i][0]
if all_output != None:
all_output = torch.cat((all_output, output), dim=2)
else:
all_output = output
all_output = torch.sigmoid(all_output)
output_array = np.array(all_output)
output_array = output_array.reshape(output_array.shape[-2], output_array.shape[-1])
label_confidences = []
for label_confidence in list(output_array):
label_confidences.append(list(label_confidence))
#Drop Head and End since it is start/stop Token
label_confidences = label_confidences[1:-1]
max_value = np.array(label_confidences).argmax(axis=1)
trans_func = np.vectorize(lambda x: model.active_head[x])
out_labels = trans_func(max_value)
out_sentence = tokenizer.tokenize(sentence)
return out_sentence, out_labels, label_confidences, return_tags_order | 29.875676 | 87 | 0.701646 | import pandas as pd
import numpy as np
import torch
print(f"Torch Version: {torch.__version__}")
import transformers
print(f"transformers (Adapter) Version: {transformers.__version__}")
from transformers import RobertaTokenizer
import numpy as np
tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
from transformers import RobertaTokenizer
tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
def encode_batch(batch):
return tokenizer(batch["text"], max_length=80, truncation=True, padding="max_length")
data_path = "./NER_multilabel_data_v4.csv"
df = pd.read_csv(data_path)
all_tags = df.newTag
all_tags = set(all_tags)
all_tags = "|".join(all_tags)
all_tags = all_tags.split("|")
all_tags = set(all_tags)
all_tags = list(all_tags)
from ner_dataset import get_trainset_data_loader
all_tags, trainset, trainloader = get_trainset_data_loader(tokenizer, BATCH_SIZE=128)
from transformers import RobertaConfig, RobertaModelWithHeads
config = RobertaConfig.from_pretrained(
"roberta-base",
num_labels=len(all_tags),
label2id = trainset.label_map,
id2label = trainset.id2label
)
model = RobertaModelWithHeads.from_pretrained(
"roberta-base",
config=config,
)
all_adapter_name = []
for tag in all_tags:
adapter_name = f"{tag}_0731"
name = model.load_adapter(f"./save_adapters/{adapter_name}")
all_adapter_name.append(name)
model.load_head(f"./save_heads/{adapter_name}")
import re
parallel_text = "','".join(all_adapter_name)
result = re.findall(r'[;|(|)]',parallel_text)
if len(result) != 0:
raise(ValueError("Adapter Name must not contain \"" + '\", \"'.join(result) + '"'))
from transformers.adapters.composition import Parallel
parallel = eval("Parallel('" + "','".join(all_adapter_name) + "')")
model.set_active_adapters(parallel)
device = "cpu"
def get_adapter_mapping(model):
print(model.active_head)
label_2_id_mapping = dict()
id_2_label_mapping = dict()
for i, head in enumerate(model.active_head):
label_2_id_mapping[head] = i
id_2_label_mapping[i] = head
return label_2_id_mapping, id_2_label_mapping
def model_predict(model, sentence, device = "cpu"):
tokenized_sentence = torch.tensor([tokenizer.encode(sentence)])
pos = torch.tensor([[0] * len(tokenized_sentence)])
tags = torch.tensor([[1] * len(tokenized_sentence)])
model = model.to(device)
with torch.no_grad():
outputs = model(input_ids=tokenized_sentence.to(device),
token_type_ids=pos.to(device),
attention_mask=tags.to(device))
logits = outputs[1][0]
return_tags_order = {}
all_output = None
for i, output in enumerate(outputs):
return_tags_order[i] = (model.active_head[i])
output = outputs[i][0]
if all_output != None:
all_output = torch.cat((all_output, output), dim=2)
else:
all_output = output
all_output = torch.sigmoid(all_output)
output_array = np.array(all_output)
output_array = output_array.reshape(output_array.shape[-2], output_array.shape[-1])
label_confidences = []
for label_confidence in list(output_array):
label_confidences.append(list(label_confidence))
label_confidences = label_confidences[1:-1]
max_value = np.array(label_confidences).argmax(axis=1)
trans_func = np.vectorize(lambda x: model.active_head[x])
out_labels = trans_func(max_value)
out_sentence = tokenizer.tokenize(sentence)
return out_sentence, out_labels, label_confidences, return_tags_order
device = "cpu"
def get_adapter_mapping(model):
print(model.active_head)
label_2_id_mapping = dict()
id_2_label_mapping = dict()
for i, head in enumerate(model.active_head):
label_2_id_mapping[head] = i
id_2_label_mapping[i] = head
return label_2_id_mapping, id_2_label_mapping
def model_predict(model, sentence, device = "cpu"):
tokenized_sentence = torch.tensor([tokenizer.encode(sentence)])
pos = torch.tensor([[0] * len(tokenized_sentence)])
tags = torch.tensor([[1] * len(tokenized_sentence)])
model = model.to(device)
with torch.no_grad():
outputs = model(input_ids=tokenized_sentence.to(device),
token_type_ids=pos.to(device),
attention_mask=tags.to(device))
logits = outputs[1][0]
return_tags_order = {}
all_output = None
for i, output in enumerate(outputs):
return_tags_order[i] = (model.active_head[i])
output = outputs[i][0]
if all_output != None:
all_output = torch.cat((all_output, output), dim=2)
else:
all_output = output
all_output = torch.sigmoid(all_output)
output_array = np.array(all_output)
output_array = output_array.reshape(output_array.shape[-2], output_array.shape[-1])
label_confidences = []
for label_confidence in list(output_array):
label_confidences.append(list(label_confidence))
label_confidences = label_confidences[1:-1]
max_value = np.array(label_confidences).argmax(axis=1)
trans_func = np.vectorize(lambda x: model.active_head[x])
out_labels = trans_func(max_value)
out_sentence = tokenizer.tokenize(sentence)
return out_sentence, out_labels, label_confidences, return_tags_order | true | true |
f727b1c8a4f7b616a8ac216e156b396852c3e415 | 23,151 | py | Python | src/python/WMCore/MicroService/MSTransferor/RequestInfo.py | vkuznet/WMCore | 001cc51651052405a7ecd811cde91da611b1dc57 | [
"Apache-2.0"
] | null | null | null | src/python/WMCore/MicroService/MSTransferor/RequestInfo.py | vkuznet/WMCore | 001cc51651052405a7ecd811cde91da611b1dc57 | [
"Apache-2.0"
] | 1 | 2018-10-30T16:23:07.000Z | 2018-10-30T16:23:07.000Z | src/python/WMCore/MicroService/MSTransferor/RequestInfo.py | vkuznet/WMCore | 001cc51651052405a7ecd811cde91da611b1dc57 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
_RequestInfo_
Class to hold and parse all information related to a given request
"""
# futures
from __future__ import division, print_function
from future.utils import viewitems
# system modules
import datetime
import time
# WMCore modules
from pprint import pformat
from copy import deepcopy
from Utils.IteratorTools import grouper
from WMCore.DataStructs.LumiList import LumiList
from WMCore.MicroService.MSTransferor.Workflow import Workflow
from WMCore.MicroService.Tools.PycurlRucio import (getRucioToken, getPileupContainerSizesRucio,
listReplicationRules, getBlocksAndSizeRucio)
from WMCore.MicroService.Tools.Common import (elapsedTime, findBlockParents,
findParent, getBlocksByDsetAndRun,
getFileLumisInBlock, getRunsInBlock)
from WMCore.MicroService.MSCore import MSCore
class RequestInfo(MSCore):
"""
RequestInfo class provides functionality to access and
manipulate requests.
"""
def __init__(self, msConfig, rucioObj, logger):
"""
Basic setup for this RequestInfo module
"""
extraArgs = {"skipReqMgr": True, "skipRucio": True}
super(RequestInfo, self).__init__(msConfig, logger=logger, **extraArgs)
self.rucio = rucioObj
self.rucioToken = None
self.tokenValidity = None
def __call__(self, reqRecords):
"""
Run the unified transferor box
:param reqRecords: input records
:return: output records
"""
# obtain new unified Configuration
uConfig = self.unifiedConfig()
if not uConfig:
self.logger.warning("Failed to fetch the latest unified config. Skipping this cycle")
return []
self.logger.info("Going to process %d requests.", len(reqRecords))
# create a Workflow object representing the request
workflows = []
for record in reqRecords:
wflow = Workflow(record['RequestName'], record, logger=self.logger)
workflows.append(wflow)
msg = "Processing request: %s, with campaigns: %s, " % (wflow.getName(),
wflow.getCampaigns())
msg += "and input data as:\n%s" % pformat(wflow.getDataCampaignMap())
self.logger.info(msg)
# setup the Rucio token
self.setupRucio()
# get complete requests information (based on Unified Transferor logic)
self.unified(workflows)
return workflows
def setupRucio(self):
"""
Check whether Rucio is enabled and create a new token, or renew it if needed
"""
if not self.tokenValidity:
# a brand new token needs to be created. To be done in the coming lines...
pass
elif self.tokenValidity:
# then check the token lifetime
dateTimeNow = int(datetime.datetime.utcnow().strftime("%s"))
timeDiff = self.tokenValidity - dateTimeNow
if timeDiff > 30 * 60: # 30min
# current token still valid for a while
return
self.rucioToken, self.tokenValidity = getRucioToken(self.msConfig['rucioAuthUrl'],
self.msConfig['rucioAccount'])
def unified(self, workflows):
"""
Unified Transferor black box
:param workflows: input workflow objects
"""
# get aux info for dataset/blocks from inputs/parents/pileups
# make subscriptions based on site white/black lists
self.logger.info("Unified method processing %d requests", len(workflows))
orig = time.time()
# start by finding what are the parent datasets for requests requiring it
time0 = time.time()
parentMap = self.getParentDatasets(workflows)
self.setParentDatasets(workflows, parentMap)
self.logger.debug(elapsedTime(time0, "### getParentDatasets"))
# then check the secondary dataset sizes and locations
time0 = time.time()
sizeByDset, locationByDset = self.getSecondaryDatasets(workflows)
locationByDset = self.resolveSecondaryRSEs(locationByDset)
self.setSecondaryDatasets(workflows, sizeByDset, locationByDset)
self.logger.debug(elapsedTime(time0, "### getSecondaryDatasets"))
# get final primary and parent list of valid blocks,
# considering run, block and lumi lists
time0 = time.time()
blocksByDset = self.getInputDataBlocks(workflows)
self.setInputDataBlocks(workflows, blocksByDset)
self.logger.debug(elapsedTime(time0, "### getInputDataBlocks"))
# get a final list of parent blocks
time0 = time.time()
parentageMap = self.getParentChildBlocks(workflows)
self.setParentChildBlocks(workflows, parentageMap)
self.logger.debug(elapsedTime(time0, "### getParentChildBlocks"))
self.logger.info(elapsedTime(orig, '### total time for unified method'))
self.logger.info("Unified method successfully processed %d requests", len(workflows))
return workflows
def _workflowRemoval(self, listOfWorkflows, workflowsToRetry):
"""
Receives the initial list of workflows and another list of workflows
that failed somewhere in the MS processing (like fetching information
from the data-services); and remove those workflows from this cycle of
the MSTransferor, such that they can be retried in the next cycle.
:param listOfWorkflows: reference to the list of the initial workflows
:param workflowsToRetry: list of workflows with missing information
:return: nothing, the workflow removal is done in place
"""
for wflow in set(workflowsToRetry):
self.logger.warning("Removing workflow that failed processing in MSTransferor: %s", wflow.getName())
listOfWorkflows.remove(wflow)
def getParentDatasets(self, workflows):
"""
Given a list of requests, find which requests need to process a parent
dataset, and discover what the parent dataset name is.
:return: dictionary with the child and the parent dataset
"""
retryWorkflows = []
retryDatasets = []
datasetByDbs = {}
parentByDset = {}
for wflow in workflows:
if wflow.hasParents():
datasetByDbs.setdefault(wflow.getDbsUrl(), set())
datasetByDbs[wflow.getDbsUrl()].add(wflow.getInputDataset())
for dbsUrl, datasets in viewitems(datasetByDbs):
self.logger.info("Resolving %d dataset parentage against DBS: %s", len(datasets), dbsUrl)
# first find out what's the parent dataset name
parentByDset.update(findParent(datasets, dbsUrl))
# now check if any of our calls failed; if so, workflow needs to be skipped from this cycle
# FIXME: isn't there a better way to do this?!?
for dset, value in viewitems(parentByDset):
if value is None:
retryDatasets.append(dset)
if retryDatasets:
for wflow in workflows:
if wflow.hasParents() and wflow.getInputDataset() in retryDatasets:
retryWorkflows.append(wflow)
# remove workflows that failed one or more of the bulk queries to the data-service
self._workflowRemoval(workflows, retryWorkflows)
return parentByDset
def setParentDatasets(self, workflows, parentageMap):
"""
Set the parent dataset for workflows requiring parents
"""
for wflow in workflows:
if wflow.hasParents() and wflow.getInputDataset() in parentageMap:
wflow.setParentDataset(parentageMap[wflow.getInputDataset()])
def getSecondaryDatasets(self, workflows):
"""
Given a list of requests, list all the pileup datasets and, find their
total dataset sizes and which locations host completed and subscribed datasets.
NOTE it only uses valid blocks (i.e., blocks with at least one replica!)
:param workflows: a list of Workflow objects
:return: two dictionaries keyed by the dataset.
First contains dataset size as value.
Second contains a list of locations as value.
"""
retryWorkflows = []
retryDatasets = []
datasets = set()
for wflow in workflows:
datasets = datasets | wflow.getPileupDatasets()
# retrieve pileup container size and locations from Rucio
self.logger.info("Fetching pileup dataset sizes for %d datasets against Rucio: %s",
len(datasets), self.msConfig['rucioUrl'])
sizesByDset = getPileupContainerSizesRucio(datasets, self.msConfig['rucioUrl'], self.rucioToken)
# then fetch data location for locked data, under our own rucio account
self.logger.info("Fetching pileup container location for %d containers against Rucio: %s",
len(datasets), self.msConfig['rucioUrl'])
locationsByDset = listReplicationRules(datasets, self.msConfig['rucioAccount'],
grouping="A", rucioUrl=self.msConfig['rucioUrl'],
rucioToken=self.rucioToken)
# now check if any of our calls failed; if so, workflow needs to be skipped from this cycle
# FIXME: isn't there a better way to do this?!?
for dset, value in viewitems(sizesByDset):
if value is None:
retryDatasets.append(dset)
for dset, value in viewitems(locationsByDset):
if value is None:
retryDatasets.append(dset)
if retryDatasets:
for wflow in workflows:
for pileup in wflow.getPileupDatasets():
if pileup in retryDatasets:
retryWorkflows.append(wflow)
# remove workflows that failed one or more of the bulk queries to the data-service
self._workflowRemoval(workflows, retryWorkflows)
return sizesByDset, locationsByDset
def resolveSecondaryRSEs(self, rsesByContainer):
"""
Given a dictionary with containers and their list of RSE
expressions, resolve the RSE expressions into RSE names,
dropping all the Tape RSEs.
:param rsesByContainer: dict key'ed by the container with a list of expressions
:return: a dictionary key'ed by the container name, with a flat list of unique
RSE names.
"""
self.logger.info("Resolving Rucio RSE expressions for %d containers", len(rsesByContainer))
for contName in list(rsesByContainer):
rseNames = []
for rseExpr in rsesByContainer[contName]:
rseNames.extend(self.rucio.evaluateRSEExpression(rseExpr, returnTape=False))
rsesByContainer[contName] = list(set(rseNames))
return rsesByContainer
def setSecondaryDatasets(self, workflows, sizesByDset, locationsByDset):
"""
Given dictionaries with the pileup dataset size and locations, set the
workflow object accordingly.
"""
for wflow in workflows:
for dsetName in wflow.getPileupDatasets():
wflow.setSecondarySummary(dsetName, sizesByDset[dsetName], locationsByDset[dsetName])
def getInputDataBlocks(self, workflows):
"""
Given a list of requests, list all the primary and parent datasets and, find
their block sizes and which locations host completed and subscribed blocks
NOTE it only uses valid blocks (i.e., blocks with at least one replica!)
:param workflows: a list of Workflow objects
:return: dictionary with dataset and a few block information
"""
retryWorkflows = []
retryDatasets = []
datasets = set()
for wflow in workflows:
for dataIn in wflow.getDataCampaignMap():
if dataIn['type'] in ["primary", "parent"]:
datasets.add(dataIn['name'])
# fetch all block names and their sizes from Rucio
self.logger.info("Fetching parent/primary block sizes for %d containers against Rucio: %s",
len(datasets), self.msConfig['rucioUrl'])
blocksByDset = getBlocksAndSizeRucio(datasets, self.msConfig['rucioUrl'], self.rucioToken)
# now check if any of our calls failed; if so, workflow needs to be skipped from this cycle
# FIXME: isn't there a better way to do this?!?
for dsetName in blocksByDset:
if blocksByDset[dsetName] is None:
retryDatasets.append(dsetName)
if retryDatasets:
for wflow in workflows:
if wflow.getInputDataset() in retryDatasets or wflow.getParentDataset() in retryDatasets:
retryWorkflows.append(wflow)
# remove workflows that failed one or more of the bulk queries to the data-service
self._workflowRemoval(workflows, retryWorkflows)
return blocksByDset
def setInputDataBlocks(self, workflows, blocksByDset):
"""
Provided a dictionary structure of dictionary, block name, and a couple of
block information, set the workflow attributes accordingly.
"""
retryWorkflows = []
for wflow in workflows:
try:
for dataIn in wflow.getDataCampaignMap():
if dataIn['type'] == "primary":
newBlockDict = self._handleInputDataInfo(wflow, dataIn['name'],
blocksByDset[dataIn['name']])
wflow.setPrimaryBlocks(newBlockDict)
elif dataIn['type'] == "parent":
newBlockDict = self._handleInputDataInfo(wflow, dataIn['name'],
blocksByDset[dataIn['name']])
wflow.setParentBlocks(newBlockDict)
except Exception:
self.logger.error("Workflow: %s will be retried in the next cycle", wflow.getName())
retryWorkflows.append(wflow)
# remove workflows that failed one or more of the bulk queries to the data-service
self._workflowRemoval(workflows, retryWorkflows)
def _handleInputDataInfo(self, wflow, dset, blocksDict):
"""
Applies any run/block/lumi list on the primary and parent
blocks provided.
It's a convoluted logic, such as:
1) if there is no run/block/lumi list, just return the initial blocksDict
2) if it has lumi list, filter runs from it and run block discovery
given a dataset name and a list of runs
3) if it has RunWhitelist, run block discovery for a given dataset name
and a list of runs
4) if it has only RunBlacklist, discover the run list of all initial blocks
provided in blocksDict and remove blocks matching only the black list
5) for the steps above, always check whether the block has replicas
6) NOW that the block data discovery is completed (considering runs):
* if LumiList is not enabled, just return the current list of blocks
* else, fetch file/run/lumi information in bulk of blocks and compare it
to the LumiList, skipping blocks without a single file that matches it.
Note that the LumiList check is dealt with in a similar way
as done in the WorkQueue StartPolicyInterface/getMaskedBlocks:
:param wflow: the Workflow object
:param dset: dataset name
:param blocksDict: dictionary of blocks, their size and location
:return: dictionary of block names and block size
"""
finalBlocks = {}
dbsUrl = wflow.getDbsUrl()
runAllowedList = wflow.getRunWhitelist()
runForbiddenList = set(wflow.getRunBlacklist())
lumiList = wflow.getLumilist()
# if there is no filter on the input data, simply return it
if not (lumiList or runAllowedList or runForbiddenList):
return self._removeZeroSizeBlocks(blocksDict)
if lumiList:
# LumiList has precedence over RunWhitelist
runAllowedList = []
for run in lumiList.getRuns():
runAllowedList.append(int(run))
runAllowedList = list(set(runAllowedList))
if runAllowedList:
# Run number 1 is not supported by DBSServer
if int(runAllowedList[0]) == 1:
finalBlocks = deepcopy(blocksDict)
else:
runAllowedList = list(set(runAllowedList) - runForbiddenList)
self.logger.info("Fetching blocks matching a list of runs for %s", wflow.getName())
try:
blocks = getBlocksByDsetAndRun(dset, runAllowedList, dbsUrl)
except Exception as exc:
msg = "Failed to retrieve blocks by dataset '%s'and run: %s\n" % (dset, runAllowedList)
msg += "Error details: %s" % str(exc)
self.logger.error(msg)
raise
for block in blocks:
if block in blocksDict:
finalBlocks[block] = deepcopy(blocksDict[block])
else:
self.logger.warning("Dropping block existent in DBS but not in Rucio: %s", block)
elif runForbiddenList:
# only run blacklist set
self.logger.info("Fetching runs in blocks for RunBlacklist for %s", wflow.getName())
try:
blockRuns = getRunsInBlock(list(blocksDict), dbsUrl)
except Exception as exc:
self.logger.error("Failed to bulk retrieve runs per block. Details: %s", str(exc))
raise
for block, runs in viewitems(blockRuns):
if not set(runs).difference(runForbiddenList):
self.logger.info("Dropping block with only blacklisted runs: %s", block)
elif block in blocksDict:
finalBlocks[block] = deepcopy(blocksDict[block])
if lumiList:
self.logger.info("Fetching block/lumi information for %d blocks in %s",
len(finalBlocks), wflow.getName())
self.logger.debug("with the following run whitelist: %s", runAllowedList)
goodBlocks = set()
# now with a smaller set of blocks in hand, we collect their lumi
# information and discard any blocks not matching the lumi list
for blockSlice in grouper(finalBlocks, 10):
try:
blockFileLumis = getFileLumisInBlock(blockSlice, dbsUrl, validFileOnly=1)
except Exception as exc:
self.logger.error("Failed to bulk retrieve run/lumi per block. Details: %s", str(exc))
raise
for block, fileLumis in viewitems(blockFileLumis):
for fileLumi in fileLumis:
if int(fileLumi['run_num']) not in runAllowedList:
continue
runNumber = str(fileLumi['run_num'])
lumis = fileLumi['lumi_section_num']
fileMask = LumiList(runsAndLumis={runNumber: lumis})
if lumiList & fileMask:
# then it has lumis that we need, keep this block and move on
goodBlocks.add(block)
break
# last but not least, drop any blocks that are not in the good list
for block in list(finalBlocks):
if block not in goodBlocks:
self.logger.info("Dropping block not matching LumiList: %s", block)
finalBlocks.pop(block)
return self._removeZeroSizeBlocks(finalBlocks)
def _removeZeroSizeBlocks(self, blocksDict):
"""
Given a dictionary of blocks and their block size and location information,
return only blocks with >0 bytes of block size (Rucio blocks with no replicas/
files result in blocks with None size).
:return: dictionary of block names and block size
"""
finalBlocks = {}
for blockName in blocksDict:
if blocksDict[blockName]['blockSize']:
finalBlocks[blockName] = blocksDict[blockName]
else:
self.logger.info("Dropping block: %s with no files and size: %s",
blockName, blocksDict[blockName]['blockSize'])
return finalBlocks
def getParentChildBlocks(self, workflows):
"""
Given a list of requests, get their children block, discover their parent blocks
and finally filter out any parent blocks with only invalid files (without any replicas)
:param workflows: list of workflow objects
:return: nothing, updates the workflow attributes in place
"""
retryWorkflows = []
retryDatasets = []
blocksByDbs = {}
parentageMap = {}
for wflow in workflows:
blocksByDbs.setdefault(wflow.getDbsUrl(), set())
if wflow.getParentDataset():
blocksByDbs[wflow.getDbsUrl()] = blocksByDbs[wflow.getDbsUrl()] | set(wflow.getPrimaryBlocks().keys())
for dbsUrl, blocks in viewitems(blocksByDbs):
if not blocks:
continue
self.logger.debug("Fetching DBS parent blocks for %d children blocks...", len(blocks))
# first find out what's the parent dataset name
parentageMap.update(findBlockParents(blocks, dbsUrl))
# now check if any of our calls failed; if so, workflow needs to be skipped from this cycle
# FIXME: isn't there a better way to do this?!?
for dset, value in viewitems(parentageMap):
if value is None:
retryDatasets.append(dset)
if retryDatasets:
for wflow in workflows:
if wflow.getParentDataset() in retryDatasets:
retryWorkflows.append(wflow)
# remove workflows that failed one or more of the bulk queries to the data-service
self._workflowRemoval(workflows, retryWorkflows)
return parentageMap
def setParentChildBlocks(self, workflows, parentageMap):
"""
Provided a dictionary with the dataset, the child block and a set
of the parent blocks, set the workflow attribute accordingly
"""
for wflow in workflows:
if wflow.getParentDataset() and wflow.getInputDataset() in parentageMap:
wflow.setChildToParentBlocks(parentageMap[wflow.getInputDataset()])
| 48.03112 | 118 | 0.619066 |
from __future__ import division, print_function
from future.utils import viewitems
import datetime
import time
from pprint import pformat
from copy import deepcopy
from Utils.IteratorTools import grouper
from WMCore.DataStructs.LumiList import LumiList
from WMCore.MicroService.MSTransferor.Workflow import Workflow
from WMCore.MicroService.Tools.PycurlRucio import (getRucioToken, getPileupContainerSizesRucio,
listReplicationRules, getBlocksAndSizeRucio)
from WMCore.MicroService.Tools.Common import (elapsedTime, findBlockParents,
findParent, getBlocksByDsetAndRun,
getFileLumisInBlock, getRunsInBlock)
from WMCore.MicroService.MSCore import MSCore
class RequestInfo(MSCore):
def __init__(self, msConfig, rucioObj, logger):
extraArgs = {"skipReqMgr": True, "skipRucio": True}
super(RequestInfo, self).__init__(msConfig, logger=logger, **extraArgs)
self.rucio = rucioObj
self.rucioToken = None
self.tokenValidity = None
def __call__(self, reqRecords):
uConfig = self.unifiedConfig()
if not uConfig:
self.logger.warning("Failed to fetch the latest unified config. Skipping this cycle")
return []
self.logger.info("Going to process %d requests.", len(reqRecords))
workflows = []
for record in reqRecords:
wflow = Workflow(record['RequestName'], record, logger=self.logger)
workflows.append(wflow)
msg = "Processing request: %s, with campaigns: %s, " % (wflow.getName(),
wflow.getCampaigns())
msg += "and input data as:\n%s" % pformat(wflow.getDataCampaignMap())
self.logger.info(msg)
self.setupRucio()
self.unified(workflows)
return workflows
def setupRucio(self):
if not self.tokenValidity:
pass
elif self.tokenValidity:
dateTimeNow = int(datetime.datetime.utcnow().strftime("%s"))
timeDiff = self.tokenValidity - dateTimeNow
if timeDiff > 30 * 60:
return
self.rucioToken, self.tokenValidity = getRucioToken(self.msConfig['rucioAuthUrl'],
self.msConfig['rucioAccount'])
def unified(self, workflows):
self.logger.info("Unified method processing %d requests", len(workflows))
orig = time.time()
time0 = time.time()
parentMap = self.getParentDatasets(workflows)
self.setParentDatasets(workflows, parentMap)
self.logger.debug(elapsedTime(time0, "### getParentDatasets"))
time0 = time.time()
sizeByDset, locationByDset = self.getSecondaryDatasets(workflows)
locationByDset = self.resolveSecondaryRSEs(locationByDset)
self.setSecondaryDatasets(workflows, sizeByDset, locationByDset)
self.logger.debug(elapsedTime(time0, "### getSecondaryDatasets"))
time0 = time.time()
blocksByDset = self.getInputDataBlocks(workflows)
self.setInputDataBlocks(workflows, blocksByDset)
self.logger.debug(elapsedTime(time0, "### getInputDataBlocks"))
time0 = time.time()
parentageMap = self.getParentChildBlocks(workflows)
self.setParentChildBlocks(workflows, parentageMap)
self.logger.debug(elapsedTime(time0, "### getParentChildBlocks"))
self.logger.info(elapsedTime(orig, '### total time for unified method'))
self.logger.info("Unified method successfully processed %d requests", len(workflows))
return workflows
def _workflowRemoval(self, listOfWorkflows, workflowsToRetry):
for wflow in set(workflowsToRetry):
self.logger.warning("Removing workflow that failed processing in MSTransferor: %s", wflow.getName())
listOfWorkflows.remove(wflow)
def getParentDatasets(self, workflows):
retryWorkflows = []
retryDatasets = []
datasetByDbs = {}
parentByDset = {}
for wflow in workflows:
if wflow.hasParents():
datasetByDbs.setdefault(wflow.getDbsUrl(), set())
datasetByDbs[wflow.getDbsUrl()].add(wflow.getInputDataset())
for dbsUrl, datasets in viewitems(datasetByDbs):
self.logger.info("Resolving %d dataset parentage against DBS: %s", len(datasets), dbsUrl)
parentByDset.update(findParent(datasets, dbsUrl))
# now check if any of our calls failed; if so, workflow needs to be skipped from this cycle
# FIXME: isn't there a better way to do this?!?
for dset, value in viewitems(parentByDset):
if value is None:
retryDatasets.append(dset)
if retryDatasets:
for wflow in workflows:
if wflow.hasParents() and wflow.getInputDataset() in retryDatasets:
retryWorkflows.append(wflow)
self._workflowRemoval(workflows, retryWorkflows)
return parentByDset
def setParentDatasets(self, workflows, parentageMap):
for wflow in workflows:
if wflow.hasParents() and wflow.getInputDataset() in parentageMap:
wflow.setParentDataset(parentageMap[wflow.getInputDataset()])
def getSecondaryDatasets(self, workflows):
retryWorkflows = []
retryDatasets = []
datasets = set()
for wflow in workflows:
datasets = datasets | wflow.getPileupDatasets()
self.logger.info("Fetching pileup dataset sizes for %d datasets against Rucio: %s",
len(datasets), self.msConfig['rucioUrl'])
sizesByDset = getPileupContainerSizesRucio(datasets, self.msConfig['rucioUrl'], self.rucioToken)
self.logger.info("Fetching pileup container location for %d containers against Rucio: %s",
len(datasets), self.msConfig['rucioUrl'])
locationsByDset = listReplicationRules(datasets, self.msConfig['rucioAccount'],
grouping="A", rucioUrl=self.msConfig['rucioUrl'],
rucioToken=self.rucioToken)
for dset, value in viewitems(sizesByDset):
if value is None:
retryDatasets.append(dset)
for dset, value in viewitems(locationsByDset):
if value is None:
retryDatasets.append(dset)
if retryDatasets:
for wflow in workflows:
for pileup in wflow.getPileupDatasets():
if pileup in retryDatasets:
retryWorkflows.append(wflow)
# remove workflows that failed one or more of the bulk queries to the data-service
self._workflowRemoval(workflows, retryWorkflows)
return sizesByDset, locationsByDset
def resolveSecondaryRSEs(self, rsesByContainer):
self.logger.info("Resolving Rucio RSE expressions for %d containers", len(rsesByContainer))
for contName in list(rsesByContainer):
rseNames = []
for rseExpr in rsesByContainer[contName]:
rseNames.extend(self.rucio.evaluateRSEExpression(rseExpr, returnTape=False))
rsesByContainer[contName] = list(set(rseNames))
return rsesByContainer
def setSecondaryDatasets(self, workflows, sizesByDset, locationsByDset):
for wflow in workflows:
for dsetName in wflow.getPileupDatasets():
wflow.setSecondarySummary(dsetName, sizesByDset[dsetName], locationsByDset[dsetName])
def getInputDataBlocks(self, workflows):
retryWorkflows = []
retryDatasets = []
datasets = set()
for wflow in workflows:
for dataIn in wflow.getDataCampaignMap():
if dataIn['type'] in ["primary", "parent"]:
datasets.add(dataIn['name'])
# fetch all block names and their sizes from Rucio
self.logger.info("Fetching parent/primary block sizes for %d containers against Rucio: %s",
len(datasets), self.msConfig['rucioUrl'])
blocksByDset = getBlocksAndSizeRucio(datasets, self.msConfig['rucioUrl'], self.rucioToken)
# now check if any of our calls failed; if so, workflow needs to be skipped from this cycle
# FIXME: isn't there a better way to do this?!?
for dsetName in blocksByDset:
if blocksByDset[dsetName] is None:
retryDatasets.append(dsetName)
if retryDatasets:
for wflow in workflows:
if wflow.getInputDataset() in retryDatasets or wflow.getParentDataset() in retryDatasets:
retryWorkflows.append(wflow)
self._workflowRemoval(workflows, retryWorkflows)
return blocksByDset
def setInputDataBlocks(self, workflows, blocksByDset):
retryWorkflows = []
for wflow in workflows:
try:
for dataIn in wflow.getDataCampaignMap():
if dataIn['type'] == "primary":
newBlockDict = self._handleInputDataInfo(wflow, dataIn['name'],
blocksByDset[dataIn['name']])
wflow.setPrimaryBlocks(newBlockDict)
elif dataIn['type'] == "parent":
newBlockDict = self._handleInputDataInfo(wflow, dataIn['name'],
blocksByDset[dataIn['name']])
wflow.setParentBlocks(newBlockDict)
except Exception:
self.logger.error("Workflow: %s will be retried in the next cycle", wflow.getName())
retryWorkflows.append(wflow)
self._workflowRemoval(workflows, retryWorkflows)
def _handleInputDataInfo(self, wflow, dset, blocksDict):
finalBlocks = {}
dbsUrl = wflow.getDbsUrl()
runAllowedList = wflow.getRunWhitelist()
runForbiddenList = set(wflow.getRunBlacklist())
lumiList = wflow.getLumilist()
if not (lumiList or runAllowedList or runForbiddenList):
return self._removeZeroSizeBlocks(blocksDict)
if lumiList:
runAllowedList = []
for run in lumiList.getRuns():
runAllowedList.append(int(run))
runAllowedList = list(set(runAllowedList))
if runAllowedList:
if int(runAllowedList[0]) == 1:
finalBlocks = deepcopy(blocksDict)
else:
runAllowedList = list(set(runAllowedList) - runForbiddenList)
self.logger.info("Fetching blocks matching a list of runs for %s", wflow.getName())
try:
blocks = getBlocksByDsetAndRun(dset, runAllowedList, dbsUrl)
except Exception as exc:
msg = "Failed to retrieve blocks by dataset '%s'and run: %s\n" % (dset, runAllowedList)
msg += "Error details: %s" % str(exc)
self.logger.error(msg)
raise
for block in blocks:
if block in blocksDict:
finalBlocks[block] = deepcopy(blocksDict[block])
else:
self.logger.warning("Dropping block existent in DBS but not in Rucio: %s", block)
elif runForbiddenList:
self.logger.info("Fetching runs in blocks for RunBlacklist for %s", wflow.getName())
try:
blockRuns = getRunsInBlock(list(blocksDict), dbsUrl)
except Exception as exc:
self.logger.error("Failed to bulk retrieve runs per block. Details: %s", str(exc))
raise
for block, runs in viewitems(blockRuns):
if not set(runs).difference(runForbiddenList):
self.logger.info("Dropping block with only blacklisted runs: %s", block)
elif block in blocksDict:
finalBlocks[block] = deepcopy(blocksDict[block])
if lumiList:
self.logger.info("Fetching block/lumi information for %d blocks in %s",
len(finalBlocks), wflow.getName())
self.logger.debug("with the following run whitelist: %s", runAllowedList)
goodBlocks = set()
for blockSlice in grouper(finalBlocks, 10):
try:
blockFileLumis = getFileLumisInBlock(blockSlice, dbsUrl, validFileOnly=1)
except Exception as exc:
self.logger.error("Failed to bulk retrieve run/lumi per block. Details: %s", str(exc))
raise
for block, fileLumis in viewitems(blockFileLumis):
for fileLumi in fileLumis:
if int(fileLumi['run_num']) not in runAllowedList:
continue
runNumber = str(fileLumi['run_num'])
lumis = fileLumi['lumi_section_num']
fileMask = LumiList(runsAndLumis={runNumber: lumis})
if lumiList & fileMask:
goodBlocks.add(block)
break
for block in list(finalBlocks):
if block not in goodBlocks:
self.logger.info("Dropping block not matching LumiList: %s", block)
finalBlocks.pop(block)
return self._removeZeroSizeBlocks(finalBlocks)
def _removeZeroSizeBlocks(self, blocksDict):
finalBlocks = {}
for blockName in blocksDict:
if blocksDict[blockName]['blockSize']:
finalBlocks[blockName] = blocksDict[blockName]
else:
self.logger.info("Dropping block: %s with no files and size: %s",
blockName, blocksDict[blockName]['blockSize'])
return finalBlocks
def getParentChildBlocks(self, workflows):
retryWorkflows = []
retryDatasets = []
blocksByDbs = {}
parentageMap = {}
for wflow in workflows:
blocksByDbs.setdefault(wflow.getDbsUrl(), set())
if wflow.getParentDataset():
blocksByDbs[wflow.getDbsUrl()] = blocksByDbs[wflow.getDbsUrl()] | set(wflow.getPrimaryBlocks().keys())
for dbsUrl, blocks in viewitems(blocksByDbs):
if not blocks:
continue
self.logger.debug("Fetching DBS parent blocks for %d children blocks...", len(blocks))
parentageMap.update(findBlockParents(blocks, dbsUrl))
# now check if any of our calls failed; if so, workflow needs to be skipped from this cycle
# FIXME: isn't there a better way to do this?!?
for dset, value in viewitems(parentageMap):
if value is None:
retryDatasets.append(dset)
if retryDatasets:
for wflow in workflows:
if wflow.getParentDataset() in retryDatasets:
retryWorkflows.append(wflow)
self._workflowRemoval(workflows, retryWorkflows)
return parentageMap
def setParentChildBlocks(self, workflows, parentageMap):
for wflow in workflows:
if wflow.getParentDataset() and wflow.getInputDataset() in parentageMap:
wflow.setChildToParentBlocks(parentageMap[wflow.getInputDataset()])
| true | true |
f727b49174b3a60786564bef9812ead45c308098 | 1,363 | py | Python | database/migrations/0016_auto_20190113_0449.py | ccraddock/beiwe-backend-cc | b37c2604800aafcf81c93bc14673ada6aed17a39 | [
"BSD-3-Clause"
] | null | null | null | database/migrations/0016_auto_20190113_0449.py | ccraddock/beiwe-backend-cc | b37c2604800aafcf81c93bc14673ada6aed17a39 | [
"BSD-3-Clause"
] | null | null | null | database/migrations/0016_auto_20190113_0449.py | ccraddock/beiwe-backend-cc | b37c2604800aafcf81c93bc14673ada6aed17a39 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2019-01-13 04:49
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('database', '0015_auto_20190112_0251'),
]
operations = [
migrations.CreateModel(
name='ReceivedDataStats',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deleted', models.BooleanField(default=False)),
('created_on', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('data_type', models.CharField(max_length=256)),
('last_upload_timestamp', models.DateTimeField()),
('number_of_uploads', models.PositiveIntegerField()),
('number_bytes_uploaded', models.PositiveIntegerField()),
('participant', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='received_data_stats', to='database.Participant')),
],
),
migrations.AlterUniqueTogether(
name='receiveddatastats',
unique_together=set([('participant', 'data_type')]),
),
]
| 38.942857 | 159 | 0.623624 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('database', '0015_auto_20190112_0251'),
]
operations = [
migrations.CreateModel(
name='ReceivedDataStats',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deleted', models.BooleanField(default=False)),
('created_on', models.DateTimeField(auto_now_add=True)),
('last_updated', models.DateTimeField(auto_now=True)),
('data_type', models.CharField(max_length=256)),
('last_upload_timestamp', models.DateTimeField()),
('number_of_uploads', models.PositiveIntegerField()),
('number_bytes_uploaded', models.PositiveIntegerField()),
('participant', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='received_data_stats', to='database.Participant')),
],
),
migrations.AlterUniqueTogether(
name='receiveddatastats',
unique_together=set([('participant', 'data_type')]),
),
]
| true | true |
f727b5c41eac8de45cd42818ceedba94bf6fbdec | 427 | py | Python | Extra Exercises/artbyraza.py | luizpavanello/python_udacity | 6411af82db8123e5b6b731c5a3bced2c31dc2c57 | [
"MIT"
] | null | null | null | Extra Exercises/artbyraza.py | luizpavanello/python_udacity | 6411af82db8123e5b6b731c5a3bced2c31dc2c57 | [
"MIT"
] | null | null | null | Extra Exercises/artbyraza.py | luizpavanello/python_udacity | 6411af82db8123e5b6b731c5a3bced2c31dc2c57 | [
"MIT"
] | null | null | null | import turtle
t = turtle.Turtle()
turtle.bgcolor('black')
def triangle():
t.forward(100)
t.left(120)
t.forward(100)
t.left(120)
t.forward(100)
t.left(120)
t.forward(100)
t.speed(0)
t.pencolor('red')
t.penup()
t.goto(-350,250)
t.pendown()
for numbers in range(7):
x = 250 - (86*numbers)
t.penup()
t.goto(-350,x)
t.pendown()
for number in range(7):
triangle()
turtle.done() | 17.791667 | 27 | 0.59719 | import turtle
t = turtle.Turtle()
turtle.bgcolor('black')
def triangle():
t.forward(100)
t.left(120)
t.forward(100)
t.left(120)
t.forward(100)
t.left(120)
t.forward(100)
t.speed(0)
t.pencolor('red')
t.penup()
t.goto(-350,250)
t.pendown()
for numbers in range(7):
x = 250 - (86*numbers)
t.penup()
t.goto(-350,x)
t.pendown()
for number in range(7):
triangle()
turtle.done() | true | true |
f727b654f4653c3f574ae05040ca03e4ed0356f1 | 1,821 | py | Python | setup.py | pauleveritt/wired | 629f950176a9682a7ccb68efbb27cb2e23b4e93e | [
"MIT"
] | 1 | 2021-01-09T00:05:54.000Z | 2021-01-09T00:05:54.000Z | setup.py | pauleveritt/wired | 629f950176a9682a7ccb68efbb27cb2e23b4e93e | [
"MIT"
] | null | null | null | setup.py | pauleveritt/wired | 629f950176a9682a7ccb68efbb27cb2e23b4e93e | [
"MIT"
] | 1 | 2019-04-22T14:22:39.000Z | 2019-04-22T14:22:39.000Z | from setuptools import setup, find_packages
def readfile(name):
with open(name) as f:
return f.read()
readme = readfile('README.rst')
changes = readfile('CHANGES.rst')
requires = ['zope.interface']
docs_require = ['Sphinx', 'sphinx_rtd_theme']
tests_require = ['pytest', 'pytest-cov', 'venusian', 'sybil']
setup(
name='wired',
description=(
'An inversion-of-control (IoC) container for building decoupled, '
'configurable, pluggable applications.'
),
version='0.2',
long_description=readme + '\n\n' + changes,
long_description_content_type='text/x-rst',
author='Michael Merickel',
author_email='pylons-discuss@googlegroups.com',
url='https://wired.readthedocs.io',
packages=find_packages('src', exclude=['tests']),
package_dir={'': 'src'},
include_package_data=True,
python_requires='>=3.4',
install_requires=requires,
extras_require={'docs': docs_require, 'testing': tests_require},
zip_safe=False,
keywords=','.join(
[
'ioc container',
'inversion of control',
'dependency injection',
'service locator',
'singleton',
'service factory',
]
),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
)
| 29.852459 | 74 | 0.606809 | from setuptools import setup, find_packages
def readfile(name):
with open(name) as f:
return f.read()
readme = readfile('README.rst')
changes = readfile('CHANGES.rst')
requires = ['zope.interface']
docs_require = ['Sphinx', 'sphinx_rtd_theme']
tests_require = ['pytest', 'pytest-cov', 'venusian', 'sybil']
setup(
name='wired',
description=(
'An inversion-of-control (IoC) container for building decoupled, '
'configurable, pluggable applications.'
),
version='0.2',
long_description=readme + '\n\n' + changes,
long_description_content_type='text/x-rst',
author='Michael Merickel',
author_email='pylons-discuss@googlegroups.com',
url='https://wired.readthedocs.io',
packages=find_packages('src', exclude=['tests']),
package_dir={'': 'src'},
include_package_data=True,
python_requires='>=3.4',
install_requires=requires,
extras_require={'docs': docs_require, 'testing': tests_require},
zip_safe=False,
keywords=','.join(
[
'ioc container',
'inversion of control',
'dependency injection',
'service locator',
'singleton',
'service factory',
]
),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
)
| true | true |
f727b7bdfdf05c7eb3a30b160d05ef9a20097bfc | 227,887 | py | Python | src/borg/testsuite/archiver.py | adrian5/borg | 6f371d5522e515c738340c2cd6dc2473b644c4d2 | [
"BSD-3-Clause"
] | 1 | 2021-03-20T15:13:11.000Z | 2021-03-20T15:13:11.000Z | src/borg/testsuite/archiver.py | adrian5/borg | 6f371d5522e515c738340c2cd6dc2473b644c4d2 | [
"BSD-3-Clause"
] | 9 | 2020-12-05T01:08:44.000Z | 2021-04-19T23:06:12.000Z | src/borg/testsuite/archiver.py | adrian5/borg | 6f371d5522e515c738340c2cd6dc2473b644c4d2 | [
"BSD-3-Clause"
] | null | null | null | import argparse
import dateutil.tz
import errno
import io
import json
import logging
import os
import pstats
import random
import re
import shutil
import socket
import stat
import subprocess
import sys
import tempfile
import time
import unittest
from binascii import unhexlify, b2a_base64
from configparser import ConfigParser
from datetime import datetime
from datetime import timezone
from datetime import timedelta
from hashlib import sha256
from io import BytesIO, StringIO
from unittest.mock import patch
import pytest
import borg
from .. import xattr, helpers, platform
from ..archive import Archive, ChunkBuffer
from ..archiver import Archiver, parse_storage_quota, PURE_PYTHON_MSGPACK_WARNING
from ..cache import Cache, LocalCache
from ..chunker import has_seek_hole
from ..constants import * # NOQA
from ..crypto.low_level import bytes_to_long, num_cipher_blocks
from ..crypto.key import KeyfileKeyBase, RepoKey, KeyfileKey, Passphrase, TAMRequiredError
from ..crypto.keymanager import RepoIdMismatch, NotABorgKeyFile
from ..crypto.file_integrity import FileIntegrityError
from ..helpers import Location, get_security_dir
from ..helpers import Manifest, MandatoryFeatureUnsupported
from ..helpers import EXIT_SUCCESS, EXIT_WARNING, EXIT_ERROR
from ..helpers import bin_to_hex
from ..helpers import MAX_S
from ..helpers import msgpack
from ..helpers import flags_noatime, flags_normal
from ..nanorst import RstToTextLazy, rst_to_terminal
from ..patterns import IECommand, PatternMatcher, parse_pattern
from ..item import Item, ItemDiff
from ..locking import LockFailed
from ..logger import setup_logging
from ..remote import RemoteRepository, PathNotAllowed
from ..repository import Repository
from . import has_lchflags, llfuse
from . import BaseTestCase, changedir, environment_variable, no_selinux
from . import are_symlinks_supported, are_hardlinks_supported, are_fifos_supported, is_utime_fully_supported, is_birthtime_fully_supported
from .platform import fakeroot_detected
from .upgrader import make_attic_repo
from . import key
src_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
def exec_cmd(*args, archiver=None, fork=False, exe=None, input=b'', binary_output=False, **kw):
if fork:
try:
if exe is None:
borg = (sys.executable, '-m', 'borg.archiver')
elif isinstance(exe, str):
borg = (exe, )
elif not isinstance(exe, tuple):
raise ValueError('exe must be None, a tuple or a str')
output = subprocess.check_output(borg + args, stderr=subprocess.STDOUT, input=input)
ret = 0
except subprocess.CalledProcessError as e:
output = e.output
ret = e.returncode
except SystemExit as e: # possibly raised by argparse
output = ''
ret = e.code
if binary_output:
return ret, output
else:
return ret, os.fsdecode(output)
else:
stdin, stdout, stderr = sys.stdin, sys.stdout, sys.stderr
try:
sys.stdin = StringIO(input.decode())
sys.stdin.buffer = BytesIO(input)
output = BytesIO()
# Always use utf-8 here, to simply .decode() below
output_text = sys.stdout = sys.stderr = io.TextIOWrapper(output, encoding='utf-8')
if archiver is None:
archiver = Archiver()
archiver.prerun_checks = lambda *args: None
archiver.exit_code = EXIT_SUCCESS
helpers.exit_code = EXIT_SUCCESS
try:
args = archiver.parse_args(list(args))
# argparse parsing may raise SystemExit when the command line is bad or
# actions that abort early (eg. --help) where given. Catch this and return
# the error code as-if we invoked a Borg binary.
except SystemExit as e:
output_text.flush()
return e.code, output.getvalue() if binary_output else output.getvalue().decode()
ret = archiver.run(args)
output_text.flush()
return ret, output.getvalue() if binary_output else output.getvalue().decode()
finally:
sys.stdin, sys.stdout, sys.stderr = stdin, stdout, stderr
def have_gnutar():
if not shutil.which('tar'):
return False
popen = subprocess.Popen(['tar', '--version'], stdout=subprocess.PIPE)
stdout, stderr = popen.communicate()
return b'GNU tar' in stdout
# check if the binary "borg.exe" is available (for local testing a symlink to virtualenv/bin/borg should do)
try:
exec_cmd('help', exe='borg.exe', fork=True)
BORG_EXES = ['python', 'binary', ]
except FileNotFoundError:
BORG_EXES = ['python', ]
@pytest.fixture(params=BORG_EXES)
def cmd(request):
if request.param == 'python':
exe = None
elif request.param == 'binary':
exe = 'borg.exe'
else:
raise ValueError("param must be 'python' or 'binary'")
def exec_fn(*args, **kw):
return exec_cmd(*args, exe=exe, fork=True, **kw)
return exec_fn
def test_return_codes(cmd, tmpdir):
repo = tmpdir.mkdir('repo')
input = tmpdir.mkdir('input')
output = tmpdir.mkdir('output')
input.join('test_file').write('content')
rc, out = cmd('init', '--encryption=none', '%s' % str(repo))
assert rc == EXIT_SUCCESS
rc, out = cmd('create', '%s::archive' % repo, str(input))
assert rc == EXIT_SUCCESS
with changedir(str(output)):
rc, out = cmd('extract', '%s::archive' % repo)
assert rc == EXIT_SUCCESS
rc, out = cmd('extract', '%s::archive' % repo, 'does/not/match')
assert rc == EXIT_WARNING # pattern did not match
rc, out = cmd('create', '%s::archive' % repo, str(input))
assert rc == EXIT_ERROR # duplicate archive name
"""
test_disk_full is very slow and not recommended to be included in daily testing.
for this test, an empty, writable 16MB filesystem mounted on DF_MOUNT is required.
for speed and other reasons, it is recommended that the underlying block device is
in RAM, not a magnetic or flash disk.
assuming /tmp is a tmpfs (in memory filesystem), one can use this:
dd if=/dev/zero of=/tmp/borg-disk bs=16M count=1
mkfs.ext4 /tmp/borg-disk
mkdir /tmp/borg-mount
sudo mount /tmp/borg-disk /tmp/borg-mount
if the directory does not exist, the test will be skipped.
"""
DF_MOUNT = '/tmp/borg-mount'
@pytest.mark.skipif(not os.path.exists(DF_MOUNT), reason="needs a 16MB fs mounted on %s" % DF_MOUNT)
def test_disk_full(cmd):
def make_files(dir, count, size, rnd=True):
shutil.rmtree(dir, ignore_errors=True)
os.mkdir(dir)
if rnd:
count = random.randint(1, count)
if size > 1:
size = random.randint(1, size)
for i in range(count):
fn = os.path.join(dir, "file%03d" % i)
with open(fn, 'wb') as f:
data = os.urandom(size)
f.write(data)
with environment_variable(BORG_CHECK_I_KNOW_WHAT_I_AM_DOING='YES'):
mount = DF_MOUNT
assert os.path.exists(mount)
repo = os.path.join(mount, 'repo')
input = os.path.join(mount, 'input')
reserve = os.path.join(mount, 'reserve')
for j in range(100):
shutil.rmtree(repo, ignore_errors=True)
shutil.rmtree(input, ignore_errors=True)
# keep some space and some inodes in reserve that we can free up later:
make_files(reserve, 80, 100000, rnd=False)
rc, out = cmd('init', repo)
if rc != EXIT_SUCCESS:
print('init', rc, out)
assert rc == EXIT_SUCCESS
try:
success, i = True, 0
while success:
i += 1
try:
make_files(input, 20, 200000)
except OSError as err:
if err.errno == errno.ENOSPC:
# already out of space
break
raise
try:
rc, out = cmd('create', '%s::test%03d' % (repo, i), input)
success = rc == EXIT_SUCCESS
if not success:
print('create', rc, out)
finally:
# make sure repo is not locked
shutil.rmtree(os.path.join(repo, 'lock.exclusive'), ignore_errors=True)
os.remove(os.path.join(repo, 'lock.roster'))
finally:
# now some error happened, likely we are out of disk space.
# free some space so we can expect borg to be able to work normally:
shutil.rmtree(reserve, ignore_errors=True)
rc, out = cmd('list', repo)
if rc != EXIT_SUCCESS:
print('list', rc, out)
rc, out = cmd('check', '--repair', repo)
if rc != EXIT_SUCCESS:
print('check', rc, out)
assert rc == EXIT_SUCCESS
class ArchiverTestCaseBase(BaseTestCase):
EXE = None # python source based
FORK_DEFAULT = False
prefix = ''
def setUp(self):
os.environ['BORG_CHECK_I_KNOW_WHAT_I_AM_DOING'] = 'YES'
os.environ['BORG_DELETE_I_KNOW_WHAT_I_AM_DOING'] = 'YES'
os.environ['BORG_PASSPHRASE'] = 'waytooeasyonlyfortests'
self.archiver = not self.FORK_DEFAULT and Archiver() or None
self.tmpdir = tempfile.mkdtemp()
self.repository_path = os.path.join(self.tmpdir, 'repository')
self.repository_location = self.prefix + self.repository_path
self.input_path = os.path.join(self.tmpdir, 'input')
self.output_path = os.path.join(self.tmpdir, 'output')
self.keys_path = os.path.join(self.tmpdir, 'keys')
self.cache_path = os.path.join(self.tmpdir, 'cache')
self.exclude_file_path = os.path.join(self.tmpdir, 'excludes')
self.patterns_file_path = os.path.join(self.tmpdir, 'patterns')
os.environ['BORG_KEYS_DIR'] = self.keys_path
os.environ['BORG_CACHE_DIR'] = self.cache_path
os.mkdir(self.input_path)
os.chmod(self.input_path, 0o777) # avoid troubles with fakeroot / FUSE
os.mkdir(self.output_path)
os.mkdir(self.keys_path)
os.mkdir(self.cache_path)
with open(self.exclude_file_path, 'wb') as fd:
fd.write(b'input/file2\n# A comment line, then a blank line\n\n')
with open(self.patterns_file_path, 'wb') as fd:
fd.write(b'+input/file_important\n- input/file*\n# A comment line, then a blank line\n\n')
self._old_wd = os.getcwd()
os.chdir(self.tmpdir)
def tearDown(self):
os.chdir(self._old_wd)
# note: ignore_errors=True as workaround for issue #862
shutil.rmtree(self.tmpdir, ignore_errors=True)
setup_logging()
def cmd(self, *args, **kw):
exit_code = kw.pop('exit_code', 0)
fork = kw.pop('fork', None)
binary_output = kw.get('binary_output', False)
if fork is None:
fork = self.FORK_DEFAULT
ret, output = exec_cmd(*args, fork=fork, exe=self.EXE, archiver=self.archiver, **kw)
if ret != exit_code:
print(output)
self.assert_equal(ret, exit_code)
# if tests are run with the pure-python msgpack, there will be warnings about
# this in the output, which would make a lot of tests fail.
pp_msg = PURE_PYTHON_MSGPACK_WARNING.encode() if binary_output else PURE_PYTHON_MSGPACK_WARNING
empty = b'' if binary_output else ''
output = empty.join(line for line in output.splitlines(keepends=True)
if pp_msg not in line)
return output
def create_src_archive(self, name):
self.cmd('create', '--compression=lz4', self.repository_location + '::' + name, src_dir)
def open_archive(self, name):
repository = Repository(self.repository_path, exclusive=True)
with repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
archive = Archive(repository, key, manifest, name)
return archive, repository
def open_repository(self):
return Repository(self.repository_path, exclusive=True)
def create_regular_file(self, name, size=0, contents=None):
assert not (size != 0 and contents and len(contents) != size), 'size and contents do not match'
filename = os.path.join(self.input_path, name)
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'wb') as fd:
if contents is None:
contents = b'X' * size
fd.write(contents)
def create_test_files(self):
"""Create a minimal test case including all supported file types
"""
# File
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('flagfile', size=1024)
# Directory
self.create_regular_file('dir2/file2', size=1024 * 80)
# File mode
os.chmod('input/file1', 0o4755)
# Hard link
if are_hardlinks_supported():
os.link(os.path.join(self.input_path, 'file1'),
os.path.join(self.input_path, 'hardlink'))
# Symlink
if are_symlinks_supported():
os.symlink('somewhere', os.path.join(self.input_path, 'link1'))
self.create_regular_file('fusexattr', size=1)
if not xattr.XATTR_FAKEROOT and xattr.is_enabled(self.input_path):
fn = os.fsencode(os.path.join(self.input_path, 'fusexattr'))
# ironically, due to the way how fakeroot works, comparing FUSE file xattrs to orig file xattrs
# will FAIL if fakeroot supports xattrs, thus we only set the xattr if XATTR_FAKEROOT is False.
# This is because fakeroot with xattr-support does not propagate xattrs of the underlying file
# into "fakeroot space". Because the xattrs exposed by borgfs are these of an underlying file
# (from fakeroots point of view) they are invisible to the test process inside the fakeroot.
xattr.setxattr(fn, b'user.foo', b'bar')
xattr.setxattr(fn, b'user.empty', b'')
# XXX this always fails for me
# ubuntu 14.04, on a TMP dir filesystem with user_xattr, using fakeroot
# same for newer ubuntu and centos.
# if this is supported just on specific platform, platform should be checked first,
# so that the test setup for all tests using it does not fail here always for others.
# xattr.setxattr(os.path.join(self.input_path, 'link1'), b'user.foo_symlink', b'bar_symlink', follow_symlinks=False)
# FIFO node
if are_fifos_supported():
os.mkfifo(os.path.join(self.input_path, 'fifo1'))
if has_lchflags:
platform.set_flags(os.path.join(self.input_path, 'flagfile'), stat.UF_NODUMP)
try:
# Block device
os.mknod('input/bdev', 0o600 | stat.S_IFBLK, os.makedev(10, 20))
# Char device
os.mknod('input/cdev', 0o600 | stat.S_IFCHR, os.makedev(30, 40))
# File mode
os.chmod('input/dir2', 0o555) # if we take away write perms, we need root to remove contents
# File owner
os.chown('input/file1', 100, 200) # raises OSError invalid argument on cygwin
have_root = True # we have (fake)root
except PermissionError:
have_root = False
except OSError as e:
# Note: ENOSYS "Function not implemented" happens as non-root on Win 10 Linux Subsystem.
if e.errno not in (errno.EINVAL, errno.ENOSYS):
raise
have_root = False
time.sleep(1) # "empty" must have newer timestamp than other files
self.create_regular_file('empty', size=0)
return have_root
class ArchiverTestCase(ArchiverTestCaseBase):
requires_hardlinks = pytest.mark.skipif(not are_hardlinks_supported(), reason='hardlinks not supported')
def test_basic_functionality(self):
have_root = self.create_test_files()
# fork required to test show-rc output
output = self.cmd('init', '--encryption=repokey', '--show-version', '--show-rc', self.repository_location, fork=True)
self.assert_in('borgbackup version', output)
self.assert_in('terminating with success status, rc 0', output)
self.cmd('create', '--exclude-nodump', self.repository_location + '::test', 'input')
output = self.cmd('create', '--exclude-nodump', '--stats', self.repository_location + '::test.2', 'input')
self.assert_in('Archive name: test.2', output)
self.assert_in('This archive: ', output)
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
list_output = self.cmd('list', '--short', self.repository_location)
self.assert_in('test', list_output)
self.assert_in('test.2', list_output)
expected = [
'input',
'input/bdev',
'input/cdev',
'input/dir2',
'input/dir2/file2',
'input/empty',
'input/file1',
'input/flagfile',
]
if are_fifos_supported():
expected.append('input/fifo1')
if are_symlinks_supported():
expected.append('input/link1')
if are_hardlinks_supported():
expected.append('input/hardlink')
if not have_root:
# we could not create these device files without (fake)root
expected.remove('input/bdev')
expected.remove('input/cdev')
if has_lchflags:
# remove the file we did not backup, so input and output become equal
expected.remove('input/flagfile') # this file is UF_NODUMP
os.remove(os.path.join('input', 'flagfile'))
list_output = self.cmd('list', '--short', self.repository_location + '::test')
for name in expected:
self.assert_in(name, list_output)
self.assert_dirs_equal('input', 'output/input')
info_output = self.cmd('info', self.repository_location + '::test')
item_count = 4 if has_lchflags else 5 # one file is UF_NODUMP
self.assert_in('Number of files: %d' % item_count, info_output)
shutil.rmtree(self.cache_path)
info_output2 = self.cmd('info', self.repository_location + '::test')
def filter(output):
# filter for interesting "info" output, ignore cache rebuilding related stuff
prefixes = ['Name:', 'Fingerprint:', 'Number of files:', 'This archive:',
'All archives:', 'Chunk index:', ]
result = []
for line in output.splitlines():
for prefix in prefixes:
if line.startswith(prefix):
result.append(line)
return '\n'.join(result)
# the interesting parts of info_output2 and info_output should be same
self.assert_equal(filter(info_output), filter(info_output2))
@requires_hardlinks
def test_create_duplicate_root(self):
# setup for #5603
path_a = os.path.join(self.input_path, 'a')
path_b = os.path.join(self.input_path, 'b')
os.mkdir(path_a)
os.mkdir(path_b)
hl_a = os.path.join(path_a, 'hardlink')
hl_b = os.path.join(path_b, 'hardlink')
self.create_regular_file(hl_a, contents=b'123456')
os.link(hl_a, hl_b)
self.cmd('init', '--encryption=none', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input', 'input') # give input twice!
# test if created archive has 'input' contents twice:
archive_list = self.cmd('list', '--json-lines', self.repository_location + '::test')
paths = [json.loads(line)['path'] for line in archive_list.split('\n') if line]
# we have all fs items exactly once!
assert sorted(paths) == ['input', 'input/a', 'input/a/hardlink', 'input/b', 'input/b/hardlink']
def test_init_parent_dirs(self):
parent_path = os.path.join(self.tmpdir, 'parent1', 'parent2')
repository_path = os.path.join(parent_path, 'repository')
repository_location = self.prefix + repository_path
with pytest.raises(Repository.ParentPathDoesNotExist):
# normal borg init does NOT create missing parent dirs
self.cmd('init', '--encryption=none', repository_location)
# but if told so, it does:
self.cmd('init', '--encryption=none', '--make-parent-dirs', repository_location)
assert os.path.exists(parent_path)
def test_unix_socket(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(os.path.join(self.input_path, 'unix-socket'))
except PermissionError as err:
if err.errno == errno.EPERM:
pytest.skip('unix sockets disabled or not supported')
elif err.errno == errno.EACCES:
pytest.skip('permission denied to create unix sockets')
self.cmd('create', self.repository_location + '::test', 'input')
sock.close()
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
assert not os.path.exists('input/unix-socket')
@pytest.mark.skipif(not are_symlinks_supported(), reason='symlinks not supported')
def test_symlink_extract(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
assert os.readlink('input/link1') == 'somewhere'
@pytest.mark.skipif(not is_utime_fully_supported(), reason='cannot properly setup and execute test without utime')
def test_atime(self):
def has_noatime(some_file):
atime_before = os.stat(some_file).st_atime_ns
try:
with open(os.open(some_file, flags_noatime)) as file:
file.read()
except PermissionError:
return False
else:
atime_after = os.stat(some_file).st_atime_ns
noatime_used = flags_noatime != flags_normal
return noatime_used and atime_before == atime_after
self.create_test_files()
atime, mtime = 123456780, 234567890
have_noatime = has_noatime('input/file1')
os.utime('input/file1', (atime, mtime))
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', '--atime', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
sti = os.stat('input/file1')
sto = os.stat('output/input/file1')
assert sti.st_mtime_ns == sto.st_mtime_ns == mtime * 1e9
if have_noatime:
assert sti.st_atime_ns == sto.st_atime_ns == atime * 1e9
else:
# it touched the input file's atime while backing it up
assert sto.st_atime_ns == atime * 1e9
@pytest.mark.skipif(not is_utime_fully_supported(), reason='cannot properly setup and execute test without utime')
@pytest.mark.skipif(not is_birthtime_fully_supported(), reason='cannot properly setup and execute test without birthtime')
def test_birthtime(self):
self.create_test_files()
birthtime, mtime, atime = 946598400, 946684800, 946771200
os.utime('input/file1', (atime, birthtime))
os.utime('input/file1', (atime, mtime))
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
sti = os.stat('input/file1')
sto = os.stat('output/input/file1')
assert int(sti.st_birthtime * 1e9) == int(sto.st_birthtime * 1e9) == birthtime * 1e9
assert sti.st_mtime_ns == sto.st_mtime_ns == mtime * 1e9
@pytest.mark.skipif(not is_utime_fully_supported(), reason='cannot properly setup and execute test without utime')
@pytest.mark.skipif(not is_birthtime_fully_supported(), reason='cannot properly setup and execute test without birthtime')
def test_nobirthtime(self):
self.create_test_files()
birthtime, mtime, atime = 946598400, 946684800, 946771200
os.utime('input/file1', (atime, birthtime))
os.utime('input/file1', (atime, mtime))
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', '--nobirthtime', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
sti = os.stat('input/file1')
sto = os.stat('output/input/file1')
assert int(sti.st_birthtime * 1e9) == birthtime * 1e9
assert int(sto.st_birthtime * 1e9) == mtime * 1e9
assert sti.st_mtime_ns == sto.st_mtime_ns == mtime * 1e9
def _extract_repository_id(self, path):
with Repository(self.repository_path) as repository:
return repository.id
def _set_repository_id(self, path, id):
config = ConfigParser(interpolation=None)
config.read(os.path.join(path, 'config'))
config.set('repository', 'id', bin_to_hex(id))
with open(os.path.join(path, 'config'), 'w') as fd:
config.write(fd)
with Repository(self.repository_path) as repository:
return repository.id
def test_sparse_file(self):
def is_sparse(fn, total_size, hole_size):
st = os.stat(fn)
assert st.st_size == total_size
sparse = True
if sparse and hasattr(st, 'st_blocks') and st.st_blocks * 512 >= st.st_size:
sparse = False
if sparse and has_seek_hole:
with open(fn, 'rb') as fd:
# only check if the first hole is as expected, because the 2nd hole check
# is problematic on xfs due to its "dynamic speculative EOF preallocation
try:
if fd.seek(0, os.SEEK_HOLE) != 0:
sparse = False
if fd.seek(0, os.SEEK_DATA) != hole_size:
sparse = False
except OSError:
# OS/FS does not really support SEEK_HOLE/SEEK_DATA
sparse = False
return sparse
filename = os.path.join(self.input_path, 'sparse')
content = b'foobar'
hole_size = 5 * (1 << CHUNK_MAX_EXP) # 5 full chunker buffers
total_size = hole_size + len(content) + hole_size
with open(filename, 'wb') as fd:
# create a file that has a hole at the beginning and end (if the
# OS and filesystem supports sparse files)
fd.seek(hole_size, 1)
fd.write(content)
fd.seek(hole_size, 1)
pos = fd.tell()
fd.truncate(pos)
# we first check if we could create a sparse input file:
sparse_support = is_sparse(filename, total_size, hole_size)
if sparse_support:
# we could create a sparse input file, so creating a backup of it and
# extracting it again (as sparse) should also work:
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir(self.output_path):
self.cmd('extract', '--sparse', self.repository_location + '::test')
self.assert_dirs_equal('input', 'output/input')
filename = os.path.join(self.output_path, 'input', 'sparse')
with open(filename, 'rb') as fd:
# check if file contents are as expected
self.assert_equal(fd.read(hole_size), b'\0' * hole_size)
self.assert_equal(fd.read(len(content)), content)
self.assert_equal(fd.read(hole_size), b'\0' * hole_size)
self.assert_true(is_sparse(filename, total_size, hole_size))
def test_unusual_filenames(self):
filenames = ['normal', 'with some blanks', '(with_parens)', ]
for filename in filenames:
filename = os.path.join(self.input_path, filename)
with open(filename, 'wb'):
pass
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
for filename in filenames:
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', os.path.join('input', filename))
assert os.path.exists(os.path.join('output', 'input', filename))
def test_repository_swap_detection(self):
self.create_test_files()
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=repokey', self.repository_location)
repository_id = self._extract_repository_id(self.repository_path)
self.cmd('create', self.repository_location + '::test', 'input')
shutil.rmtree(self.repository_path)
self.cmd('init', '--encryption=none', self.repository_location)
self._set_repository_id(self.repository_path, repository_id)
self.assert_equal(repository_id, self._extract_repository_id(self.repository_path))
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.EncryptionMethodMismatch):
self.cmd('create', self.repository_location + '::test.2', 'input')
def test_repository_swap_detection2(self):
self.create_test_files()
self.cmd('init', '--encryption=none', self.repository_location + '_unencrypted')
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=repokey', self.repository_location + '_encrypted')
self.cmd('create', self.repository_location + '_encrypted::test', 'input')
shutil.rmtree(self.repository_path + '_encrypted')
os.rename(self.repository_path + '_unencrypted', self.repository_path + '_encrypted')
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '_encrypted::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.RepositoryAccessAborted):
self.cmd('create', self.repository_location + '_encrypted::test.2', 'input')
def test_repository_swap_detection_no_cache(self):
self.create_test_files()
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=repokey', self.repository_location)
repository_id = self._extract_repository_id(self.repository_path)
self.cmd('create', self.repository_location + '::test', 'input')
shutil.rmtree(self.repository_path)
self.cmd('init', '--encryption=none', self.repository_location)
self._set_repository_id(self.repository_path, repository_id)
self.assert_equal(repository_id, self._extract_repository_id(self.repository_path))
self.cmd('delete', '--cache-only', self.repository_location)
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.EncryptionMethodMismatch):
self.cmd('create', self.repository_location + '::test.2', 'input')
def test_repository_swap_detection2_no_cache(self):
self.create_test_files()
self.cmd('init', '--encryption=none', self.repository_location + '_unencrypted')
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=repokey', self.repository_location + '_encrypted')
self.cmd('create', self.repository_location + '_encrypted::test', 'input')
self.cmd('delete', '--cache-only', self.repository_location + '_unencrypted')
self.cmd('delete', '--cache-only', self.repository_location + '_encrypted')
shutil.rmtree(self.repository_path + '_encrypted')
os.rename(self.repository_path + '_unencrypted', self.repository_path + '_encrypted')
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '_encrypted::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.RepositoryAccessAborted):
self.cmd('create', self.repository_location + '_encrypted::test.2', 'input')
def test_repository_swap_detection_repokey_blank_passphrase(self):
# Check that a repokey repo with a blank passphrase is considered like a plaintext repo.
self.create_test_files()
# User initializes her repository with her passphrase
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
# Attacker replaces it with her own repository, which is encrypted but has no passphrase set
shutil.rmtree(self.repository_path)
with environment_variable(BORG_PASSPHRASE=''):
self.cmd('init', '--encryption=repokey', self.repository_location)
# Delete cache & security database, AKA switch to user perspective
self.cmd('delete', '--cache-only', self.repository_location)
repository_id = bin_to_hex(self._extract_repository_id(self.repository_path))
shutil.rmtree(get_security_dir(repository_id))
with environment_variable(BORG_PASSPHRASE=None):
# This is the part were the user would be tricked, e.g. she assumes that BORG_PASSPHRASE
# is set, while it isn't. Previously this raised no warning,
# since the repository is, technically, encrypted.
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.CacheInitAbortedError):
self.cmd('create', self.repository_location + '::test.2', 'input')
def test_repository_move(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
repository_id = bin_to_hex(self._extract_repository_id(self.repository_path))
os.rename(self.repository_path, self.repository_path + '_new')
with environment_variable(BORG_RELOCATED_REPO_ACCESS_IS_OK='yes'):
self.cmd('info', self.repository_location + '_new')
security_dir = get_security_dir(repository_id)
with open(os.path.join(security_dir, 'location')) as fd:
location = fd.read()
assert location == Location(self.repository_location + '_new').canonical_path()
# Needs no confirmation anymore
self.cmd('info', self.repository_location + '_new')
shutil.rmtree(self.cache_path)
self.cmd('info', self.repository_location + '_new')
shutil.rmtree(security_dir)
self.cmd('info', self.repository_location + '_new')
for file in ('location', 'key-type', 'manifest-timestamp'):
assert os.path.exists(os.path.join(security_dir, file))
def test_security_dir_compat(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
repository_id = bin_to_hex(self._extract_repository_id(self.repository_path))
security_dir = get_security_dir(repository_id)
with open(os.path.join(security_dir, 'location'), 'w') as fd:
fd.write('something outdated')
# This is fine, because the cache still has the correct information. security_dir and cache can disagree
# if older versions are used to confirm a renamed repository.
self.cmd('info', self.repository_location)
def test_unknown_unencrypted(self):
self.cmd('init', '--encryption=none', self.repository_location)
repository_id = bin_to_hex(self._extract_repository_id(self.repository_path))
security_dir = get_security_dir(repository_id)
# Ok: repository is known
self.cmd('info', self.repository_location)
# Ok: repository is still known (through security_dir)
shutil.rmtree(self.cache_path)
self.cmd('info', self.repository_location)
# Needs confirmation: cache and security dir both gone (eg. another host or rm -rf ~)
shutil.rmtree(self.cache_path)
shutil.rmtree(security_dir)
if self.FORK_DEFAULT:
self.cmd('info', self.repository_location, exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.CacheInitAbortedError):
self.cmd('info', self.repository_location)
with environment_variable(BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK='yes'):
self.cmd('info', self.repository_location)
def test_strip_components(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('dir/file')
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '3')
self.assert_true(not os.path.exists('file'))
with self.assert_creates_file('file'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '2')
with self.assert_creates_file('dir/file'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '1')
with self.assert_creates_file('input/dir/file'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '0')
def _extract_hardlinks_setup(self):
os.mkdir(os.path.join(self.input_path, 'dir1'))
os.mkdir(os.path.join(self.input_path, 'dir1/subdir'))
self.create_regular_file('source', contents=b'123456')
os.link(os.path.join(self.input_path, 'source'),
os.path.join(self.input_path, 'abba'))
os.link(os.path.join(self.input_path, 'source'),
os.path.join(self.input_path, 'dir1/hardlink'))
os.link(os.path.join(self.input_path, 'source'),
os.path.join(self.input_path, 'dir1/subdir/hardlink'))
self.create_regular_file('dir1/source2')
os.link(os.path.join(self.input_path, 'dir1/source2'),
os.path.join(self.input_path, 'dir1/aaaa'))
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
@requires_hardlinks
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_fuse_mount_hardlinks(self):
self._extract_hardlinks_setup()
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
# we need to get rid of permissions checking because fakeroot causes issues with it.
# On all platforms, borg defaults to "default_permissions" and we need to get rid of it via "ignore_permissions".
# On macOS (darwin), we additionally need "defer_permissions" to switch off the checks in osxfuse.
if sys.platform == 'darwin':
ignore_perms = ['-o', 'ignore_permissions,defer_permissions']
else:
ignore_perms = ['-o', 'ignore_permissions']
with self.fuse_mount(self.repository_location + '::test', mountpoint, '--strip-components=2', *ignore_perms), \
changedir(mountpoint):
assert os.stat('hardlink').st_nlink == 2
assert os.stat('subdir/hardlink').st_nlink == 2
assert open('subdir/hardlink', 'rb').read() == b'123456'
assert os.stat('aaaa').st_nlink == 2
assert os.stat('source2').st_nlink == 2
with self.fuse_mount(self.repository_location + '::test', mountpoint, 'input/dir1', *ignore_perms), \
changedir(mountpoint):
assert os.stat('input/dir1/hardlink').st_nlink == 2
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2
assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456'
assert os.stat('input/dir1/aaaa').st_nlink == 2
assert os.stat('input/dir1/source2').st_nlink == 2
with self.fuse_mount(self.repository_location + '::test', mountpoint, *ignore_perms), \
changedir(mountpoint):
assert os.stat('input/source').st_nlink == 4
assert os.stat('input/abba').st_nlink == 4
assert os.stat('input/dir1/hardlink').st_nlink == 4
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 4
assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456'
@requires_hardlinks
def test_extract_hardlinks1(self):
self._extract_hardlinks_setup()
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
assert os.stat('input/source').st_nlink == 4
assert os.stat('input/abba').st_nlink == 4
assert os.stat('input/dir1/hardlink').st_nlink == 4
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 4
assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456'
@requires_hardlinks
def test_extract_hardlinks2(self):
self._extract_hardlinks_setup()
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '2')
assert os.stat('hardlink').st_nlink == 2
assert os.stat('subdir/hardlink').st_nlink == 2
assert open('subdir/hardlink', 'rb').read() == b'123456'
assert os.stat('aaaa').st_nlink == 2
assert os.stat('source2').st_nlink == 2
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', 'input/dir1')
assert os.stat('input/dir1/hardlink').st_nlink == 2
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2
assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456'
assert os.stat('input/dir1/aaaa').st_nlink == 2
assert os.stat('input/dir1/source2').st_nlink == 2
@requires_hardlinks
def test_extract_hardlinks_twice(self):
# setup for #5603
path_a = os.path.join(self.input_path, 'a')
path_b = os.path.join(self.input_path, 'b')
os.mkdir(path_a)
os.mkdir(path_b)
hl_a = os.path.join(path_a, 'hardlink')
hl_b = os.path.join(path_b, 'hardlink')
self.create_regular_file(hl_a, contents=b'123456')
os.link(hl_a, hl_b)
self.cmd('init', '--encryption=none', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input', 'input') # give input twice!
# now test extraction
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
# if issue #5603 happens, extraction gives rc == 1 (triggering AssertionError) and warnings like:
# input/a/hardlink: link: [Errno 2] No such file or directory: 'input/a/hardlink' -> 'input/a/hardlink'
# input/b/hardlink: link: [Errno 2] No such file or directory: 'input/a/hardlink' -> 'input/b/hardlink'
# otherwise, when fixed, the hardlinks should be there and have a link count of 2
assert os.stat('input/a/hardlink').st_nlink == 2
assert os.stat('input/b/hardlink').st_nlink == 2
def test_extract_include_exclude(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file3', size=1024 * 80)
self.create_regular_file('file4', size=1024 * 80)
self.cmd('create', '--exclude=input/file4', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', 'input/file1', )
self.assert_equal(sorted(os.listdir('output/input')), ['file1'])
with changedir('output'):
self.cmd('extract', '--exclude=input/file2', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file3'])
with changedir('output'):
self.cmd('extract', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file3'])
def test_extract_include_exclude_regex(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file3', size=1024 * 80)
self.create_regular_file('file4', size=1024 * 80)
self.create_regular_file('file333', size=1024 * 80)
# Create with regular expression exclusion for file4
self.cmd('create', '--exclude=re:input/file4$', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2', 'file3', 'file333'])
shutil.rmtree('output/input')
# Extract with regular expression exclusion
with changedir('output'):
self.cmd('extract', '--exclude=re:file3+', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2'])
shutil.rmtree('output/input')
# Combine --exclude with fnmatch and regular expression
with changedir('output'):
self.cmd('extract', '--exclude=input/file2', '--exclude=re:file[01]', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file3', 'file333'])
shutil.rmtree('output/input')
# Combine --exclude-from and regular expression exclusion
with changedir('output'):
self.cmd('extract', '--exclude-from=' + self.exclude_file_path, '--exclude=re:file1',
'--exclude=re:file(\\d)\\1\\1$', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file3'])
def test_extract_include_exclude_regex_from_file(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file3', size=1024 * 80)
self.create_regular_file('file4', size=1024 * 80)
self.create_regular_file('file333', size=1024 * 80)
self.create_regular_file('aa:something', size=1024 * 80)
# Create while excluding using mixed pattern styles
with open(self.exclude_file_path, 'wb') as fd:
fd.write(b're:input/file4$\n')
fd.write(b'fm:*aa:*thing\n')
self.cmd('create', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2', 'file3', 'file333'])
shutil.rmtree('output/input')
# Exclude using regular expression
with open(self.exclude_file_path, 'wb') as fd:
fd.write(b're:file3+\n')
with changedir('output'):
self.cmd('extract', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2'])
shutil.rmtree('output/input')
# Mixed exclude pattern styles
with open(self.exclude_file_path, 'wb') as fd:
fd.write(b're:file(\\d)\\1\\1$\n')
fd.write(b'fm:nothingwillmatchthis\n')
fd.write(b'*/file1\n')
fd.write(b're:file2$\n')
with changedir('output'):
self.cmd('extract', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file3'])
def test_extract_with_pattern(self):
self.cmd("init", '--encryption=repokey', self.repository_location)
self.create_regular_file("file1", size=1024 * 80)
self.create_regular_file("file2", size=1024 * 80)
self.create_regular_file("file3", size=1024 * 80)
self.create_regular_file("file4", size=1024 * 80)
self.create_regular_file("file333", size=1024 * 80)
self.cmd("create", self.repository_location + "::test", "input")
# Extract everything with regular expression
with changedir("output"):
self.cmd("extract", self.repository_location + "::test", "re:.*")
self.assert_equal(sorted(os.listdir("output/input")), ["file1", "file2", "file3", "file333", "file4"])
shutil.rmtree("output/input")
# Extract with pattern while also excluding files
with changedir("output"):
self.cmd("extract", "--exclude=re:file[34]$", self.repository_location + "::test", r"re:file\d$")
self.assert_equal(sorted(os.listdir("output/input")), ["file1", "file2"])
shutil.rmtree("output/input")
# Combine --exclude with pattern for extraction
with changedir("output"):
self.cmd("extract", "--exclude=input/file1", self.repository_location + "::test", "re:file[12]$")
self.assert_equal(sorted(os.listdir("output/input")), ["file2"])
shutil.rmtree("output/input")
# Multiple pattern
with changedir("output"):
self.cmd("extract", self.repository_location + "::test", "fm:input/file1", "fm:*file33*", "input/file2")
self.assert_equal(sorted(os.listdir("output/input")), ["file1", "file2", "file333"])
def test_extract_list_output(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file', size=1024 * 80)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
output = self.cmd('extract', self.repository_location + '::test')
self.assert_not_in("input/file", output)
shutil.rmtree('output/input')
with changedir('output'):
output = self.cmd('extract', '--info', self.repository_location + '::test')
self.assert_not_in("input/file", output)
shutil.rmtree('output/input')
with changedir('output'):
output = self.cmd('extract', '--list', self.repository_location + '::test')
self.assert_in("input/file", output)
shutil.rmtree('output/input')
with changedir('output'):
output = self.cmd('extract', '--list', '--info', self.repository_location + '::test')
self.assert_in("input/file", output)
def test_extract_progress(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file', size=1024 * 80)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
output = self.cmd('extract', self.repository_location + '::test', '--progress')
assert 'Extracting:' in output
def _create_test_caches(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('cache1/%s' % CACHE_TAG_NAME,
contents=CACHE_TAG_CONTENTS + b' extra stuff')
self.create_regular_file('cache2/%s' % CACHE_TAG_NAME,
contents=b'invalid signature')
os.mkdir('input/cache3')
if are_hardlinks_supported():
os.link('input/cache1/%s' % CACHE_TAG_NAME, 'input/cache3/%s' % CACHE_TAG_NAME)
else:
self.create_regular_file('cache3/%s' % CACHE_TAG_NAME,
contents=CACHE_TAG_CONTENTS + b' extra stuff')
def test_create_stdin(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
input_data = b'\x00foo\n\nbar\n \n'
self.cmd('create', self.repository_location + '::test', '-', input=input_data)
item = json.loads(self.cmd('list', '--json-lines', self.repository_location + '::test'))
assert item['uid'] == 0
assert item['gid'] == 0
assert item['size'] == len(input_data)
assert item['path'] == 'stdin'
extracted_data = self.cmd('extract', '--stdout', self.repository_location + '::test', binary_output=True)
assert extracted_data == input_data
def test_create_content_from_command(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
input_data = 'some test content'
name = 'a/b/c'
self.cmd('create', '--stdin-name', name, '--content-from-command',
self.repository_location + '::test', '--', 'echo', input_data)
item = json.loads(self.cmd('list', '--json-lines', self.repository_location + '::test'))
assert item['uid'] == 0
assert item['gid'] == 0
assert item['size'] == len(input_data) + 1 # `echo` adds newline
assert item['path'] == name
extracted_data = self.cmd('extract', '--stdout', self.repository_location + '::test')
assert extracted_data == input_data + '\n'
def test_create_content_from_command_with_failed_command(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--content-from-command', self.repository_location + '::test',
'--', 'sh', '-c', 'exit 73;', exit_code=2)
assert output.endswith("Command 'sh' exited with status 73\n")
archive_list = json.loads(self.cmd('list', '--json', self.repository_location))
assert archive_list['archives'] == []
def test_create_content_from_command_missing_command(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--content-from-command', self.repository_location + '::test', exit_code=2)
assert output.endswith('No command given.\n')
def test_create_paths_from_stdin(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file("file1", size=1024 * 80)
self.create_regular_file("dir1/file2", size=1024 * 80)
self.create_regular_file("dir1/file3", size=1024 * 80)
self.create_regular_file("file4", size=1024 * 80)
input_data = b'input/file1\0input/dir1\0input/file4'
self.cmd('create', '--paths-from-stdin', '--paths-delimiter', '\\0',
self.repository_location + '::test', input=input_data)
archive_list = self.cmd('list', '--json-lines', self.repository_location + '::test')
paths = [json.loads(line)['path'] for line in archive_list.split('\n') if line]
assert paths == ['input/file1', 'input/dir1', 'input/file4']
def test_create_paths_from_command(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file("file1", size=1024 * 80)
self.create_regular_file("file2", size=1024 * 80)
self.create_regular_file("file3", size=1024 * 80)
self.create_regular_file("file4", size=1024 * 80)
input_data = 'input/file1\ninput/file2\ninput/file3'
self.cmd('create', '--paths-from-command',
self.repository_location + '::test', '--', 'echo', input_data)
archive_list = self.cmd('list', '--json-lines', self.repository_location + '::test')
paths = [json.loads(line)['path'] for line in archive_list.split('\n') if line]
assert paths == ['input/file1', 'input/file2', 'input/file3']
def test_create_paths_from_command_with_failed_command(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--paths-from-command', self.repository_location + '::test',
'--', 'sh', '-c', 'exit 73;', exit_code=2)
assert output.endswith("Command 'sh' exited with status 73\n")
archive_list = json.loads(self.cmd('list', '--json', self.repository_location))
assert archive_list['archives'] == []
def test_create_paths_from_command_missing_command(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--paths-from-command', self.repository_location + '::test', exit_code=2)
assert output.endswith('No command given.\n')
def test_create_without_root(self):
"""test create without a root"""
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', exit_code=2)
def test_create_pattern_root(self):
"""test create with only a root pattern"""
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
output = self.cmd('create', '-v', '--list', '--pattern=R input', self.repository_location + '::test')
self.assert_in("A input/file1", output)
self.assert_in("A input/file2", output)
def test_create_pattern(self):
"""test file patterns during create"""
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file_important', size=1024 * 80)
output = self.cmd('create', '-v', '--list',
'--pattern=+input/file_important', '--pattern=-input/file*',
self.repository_location + '::test', 'input')
self.assert_in("A input/file_important", output)
self.assert_in('x input/file1', output)
self.assert_in('x input/file2', output)
def test_create_pattern_file(self):
"""test file patterns during create"""
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('otherfile', size=1024 * 80)
self.create_regular_file('file_important', size=1024 * 80)
output = self.cmd('create', '-v', '--list',
'--pattern=-input/otherfile', '--patterns-from=' + self.patterns_file_path,
self.repository_location + '::test', 'input')
self.assert_in("A input/file_important", output)
self.assert_in('x input/file1', output)
self.assert_in('x input/file2', output)
self.assert_in('x input/otherfile', output)
def test_create_pattern_exclude_folder_but_recurse(self):
"""test when patterns exclude a parent folder, but include a child"""
self.patterns_file_path2 = os.path.join(self.tmpdir, 'patterns2')
with open(self.patterns_file_path2, 'wb') as fd:
fd.write(b'+ input/x/b\n- input/x*\n')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('x/a/foo_a', size=1024 * 80)
self.create_regular_file('x/b/foo_b', size=1024 * 80)
self.create_regular_file('y/foo_y', size=1024 * 80)
output = self.cmd('create', '-v', '--list',
'--patterns-from=' + self.patterns_file_path2,
self.repository_location + '::test', 'input')
self.assert_in('x input/x/a/foo_a', output)
self.assert_in("A input/x/b/foo_b", output)
self.assert_in('A input/y/foo_y', output)
def test_create_pattern_exclude_folder_no_recurse(self):
"""test when patterns exclude a parent folder and, but include a child"""
self.patterns_file_path2 = os.path.join(self.tmpdir, 'patterns2')
with open(self.patterns_file_path2, 'wb') as fd:
fd.write(b'+ input/x/b\n! input/x*\n')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('x/a/foo_a', size=1024 * 80)
self.create_regular_file('x/b/foo_b', size=1024 * 80)
self.create_regular_file('y/foo_y', size=1024 * 80)
output = self.cmd('create', '-v', '--list',
'--patterns-from=' + self.patterns_file_path2,
self.repository_location + '::test', 'input')
self.assert_not_in('input/x/a/foo_a', output)
self.assert_not_in('input/x/a', output)
self.assert_in('A input/y/foo_y', output)
def test_create_pattern_intermediate_folders_first(self):
"""test that intermediate folders appear first when patterns exclude a parent folder but include a child"""
self.patterns_file_path2 = os.path.join(self.tmpdir, 'patterns2')
with open(self.patterns_file_path2, 'wb') as fd:
fd.write(b'+ input/x/a\n+ input/x/b\n- input/x*\n')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('x/a/foo_a', size=1024 * 80)
self.create_regular_file('x/b/foo_b', size=1024 * 80)
with changedir('input'):
self.cmd('create', '--patterns-from=' + self.patterns_file_path2,
self.repository_location + '::test', '.')
# list the archive and verify that the "intermediate" folders appear before
# their contents
out = self.cmd('list', '--format', '{type} {path}{NL}', self.repository_location + '::test')
out_list = out.splitlines()
self.assert_in('d x/a', out_list)
self.assert_in('d x/b', out_list)
assert out_list.index('d x/a') < out_list.index('- x/a/foo_a')
assert out_list.index('d x/b') < out_list.index('- x/b/foo_b')
def test_create_no_cache_sync(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('delete', '--cache-only', self.repository_location)
create_json = json.loads(self.cmd('create', '--no-cache-sync', self.repository_location + '::test', 'input',
'--json', '--error')) # ignore experimental warning
info_json = json.loads(self.cmd('info', self.repository_location + '::test', '--json'))
create_stats = create_json['cache']['stats']
info_stats = info_json['cache']['stats']
assert create_stats == info_stats
self.cmd('delete', '--cache-only', self.repository_location)
self.cmd('create', '--no-cache-sync', self.repository_location + '::test2', 'input')
self.cmd('info', self.repository_location)
self.cmd('check', self.repository_location)
def test_extract_pattern_opt(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file_important', size=1024 * 80)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract',
'--pattern=+input/file_important', '--pattern=-input/file*',
self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file_important'])
def _assert_test_caches(self):
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['cache2', 'file1'])
self.assert_equal(sorted(os.listdir('output/input/cache2')), [CACHE_TAG_NAME])
def test_exclude_caches(self):
self._create_test_caches()
self.cmd('create', '--exclude-caches', self.repository_location + '::test', 'input')
self._assert_test_caches()
def test_recreate_exclude_caches(self):
self._create_test_caches()
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('recreate', '--exclude-caches', self.repository_location + '::test')
self._assert_test_caches()
def _create_test_tagged(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('tagged1/.NOBACKUP')
self.create_regular_file('tagged2/00-NOBACKUP')
self.create_regular_file('tagged3/.NOBACKUP/file2', size=1024)
def _assert_test_tagged(self):
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1'])
def test_exclude_tagged(self):
self._create_test_tagged()
self.cmd('create', '--exclude-if-present', '.NOBACKUP', '--exclude-if-present', '00-NOBACKUP', self.repository_location + '::test', 'input')
self._assert_test_tagged()
def test_recreate_exclude_tagged(self):
self._create_test_tagged()
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('recreate', '--exclude-if-present', '.NOBACKUP', '--exclude-if-present', '00-NOBACKUP',
self.repository_location + '::test')
self._assert_test_tagged()
def _create_test_keep_tagged(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file0', size=1024)
self.create_regular_file('tagged1/.NOBACKUP1')
self.create_regular_file('tagged1/file1', size=1024)
self.create_regular_file('tagged2/.NOBACKUP2/subfile1', size=1024)
self.create_regular_file('tagged2/file2', size=1024)
self.create_regular_file('tagged3/%s' % CACHE_TAG_NAME,
contents=CACHE_TAG_CONTENTS + b' extra stuff')
self.create_regular_file('tagged3/file3', size=1024)
self.create_regular_file('taggedall/.NOBACKUP1')
self.create_regular_file('taggedall/.NOBACKUP2/subfile1', size=1024)
self.create_regular_file('taggedall/%s' % CACHE_TAG_NAME,
contents=CACHE_TAG_CONTENTS + b' extra stuff')
self.create_regular_file('taggedall/file4', size=1024)
def _assert_test_keep_tagged(self):
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file0', 'tagged1', 'tagged2', 'tagged3', 'taggedall'])
self.assert_equal(os.listdir('output/input/tagged1'), ['.NOBACKUP1'])
self.assert_equal(os.listdir('output/input/tagged2'), ['.NOBACKUP2'])
self.assert_equal(os.listdir('output/input/tagged3'), [CACHE_TAG_NAME])
self.assert_equal(sorted(os.listdir('output/input/taggedall')),
['.NOBACKUP1', '.NOBACKUP2', CACHE_TAG_NAME, ])
def test_exclude_keep_tagged(self):
self._create_test_keep_tagged()
self.cmd('create', '--exclude-if-present', '.NOBACKUP1', '--exclude-if-present', '.NOBACKUP2',
'--exclude-caches', '--keep-exclude-tags', self.repository_location + '::test', 'input')
self._assert_test_keep_tagged()
def test_recreate_exclude_keep_tagged(self):
self._create_test_keep_tagged()
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('recreate', '--exclude-if-present', '.NOBACKUP1', '--exclude-if-present', '.NOBACKUP2',
'--exclude-caches', '--keep-exclude-tags', self.repository_location + '::test')
self._assert_test_keep_tagged()
@pytest.mark.skipif(not are_hardlinks_supported(), reason='hardlinks not supported')
def test_recreate_hardlinked_tags(self): # test for issue #4911
self.cmd('init', '--encryption=none', self.repository_location)
self.create_regular_file('file1', contents=CACHE_TAG_CONTENTS) # "wrong" filename, but correct tag contents
os.mkdir(os.path.join(self.input_path, 'subdir')) # to make sure the tag is encountered *after* file1
os.link(os.path.join(self.input_path, 'file1'),
os.path.join(self.input_path, 'subdir', CACHE_TAG_NAME)) # correct tag name, hardlink to file1
self.cmd('create', self.repository_location + '::test', 'input')
# in the "test" archive, we now have, in this order:
# - a regular file item for "file1"
# - a hardlink item for "CACHEDIR.TAG" referring back to file1 for its contents
self.cmd('recreate', '--exclude-caches', '--keep-exclude-tags', self.repository_location + '::test')
# if issue #4911 is present, the recreate will crash with a KeyError for "input/file1"
@pytest.mark.skipif(not xattr.XATTR_FAKEROOT, reason='Linux capabilities test, requires fakeroot >= 1.20.2')
def test_extract_capabilities(self):
fchown = os.fchown
# We need to manually patch chown to get the behaviour Linux has, since fakeroot does not
# accurately model the interaction of chown(2) and Linux capabilities, i.e. it does not remove them.
def patched_fchown(fd, uid, gid):
xattr.setxattr(fd, b'security.capability', b'', follow_symlinks=False)
fchown(fd, uid, gid)
# The capability descriptor used here is valid and taken from a /usr/bin/ping
capabilities = b'\x01\x00\x00\x02\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
self.create_regular_file('file')
xattr.setxattr(b'input/file', b'security.capability', capabilities)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
with patch.object(os, 'fchown', patched_fchown):
self.cmd('extract', self.repository_location + '::test')
assert xattr.getxattr(b'input/file', b'security.capability') == capabilities
@pytest.mark.skipif(not xattr.XATTR_FAKEROOT, reason='xattr not supported on this system or on this version of'
'fakeroot')
def test_extract_xattrs_errors(self):
def patched_setxattr_E2BIG(*args, **kwargs):
raise OSError(errno.E2BIG, 'E2BIG')
def patched_setxattr_ENOTSUP(*args, **kwargs):
raise OSError(errno.ENOTSUP, 'ENOTSUP')
def patched_setxattr_EACCES(*args, **kwargs):
raise OSError(errno.EACCES, 'EACCES')
self.create_regular_file('file')
xattr.setxattr(b'input/file', b'user.attribute', b'value')
self.cmd('init', self.repository_location, '-e' 'none')
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
input_abspath = os.path.abspath('input/file')
with patch.object(xattr, 'setxattr', patched_setxattr_E2BIG):
out = self.cmd('extract', self.repository_location + '::test', exit_code=EXIT_WARNING)
assert ': when setting extended attribute user.attribute: too big for this filesystem\n' in out
os.remove(input_abspath)
with patch.object(xattr, 'setxattr', patched_setxattr_ENOTSUP):
out = self.cmd('extract', self.repository_location + '::test', exit_code=EXIT_WARNING)
assert ': when setting extended attribute user.attribute: xattrs not supported on this filesystem\n' in out
os.remove(input_abspath)
with patch.object(xattr, 'setxattr', patched_setxattr_EACCES):
out = self.cmd('extract', self.repository_location + '::test', exit_code=EXIT_WARNING)
assert ': when setting extended attribute user.attribute: Permission denied\n' in out
assert os.path.isfile(input_abspath)
def test_path_normalization(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('dir1/dir2/file', size=1024 * 80)
with changedir('input/dir1/dir2'):
self.cmd('create', self.repository_location + '::test', '../../../input/dir1/../dir1/dir2/..')
output = self.cmd('list', self.repository_location + '::test')
self.assert_not_in('..', output)
self.assert_in(' input/dir1/dir2/file', output)
def test_exclude_normalization(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
with changedir('input'):
self.cmd('create', '--exclude=file1', self.repository_location + '::test1', '.')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test1')
self.assert_equal(sorted(os.listdir('output')), ['file2'])
with changedir('input'):
self.cmd('create', '--exclude=./file1', self.repository_location + '::test2', '.')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test2')
self.assert_equal(sorted(os.listdir('output')), ['file2'])
self.cmd('create', '--exclude=input/./file1', self.repository_location + '::test3', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test3')
self.assert_equal(sorted(os.listdir('output/input')), ['file2'])
def test_repeated_files(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input', 'input')
def test_overwrite(self):
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('dir2/file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
# Overwriting regular files and directories should be supported
os.mkdir('output/input')
os.mkdir('output/input/file1')
os.mkdir('output/input/dir2')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_dirs_equal('input', 'output/input')
# But non-empty dirs should fail
os.unlink('output/input/file1')
os.mkdir('output/input/file1')
os.mkdir('output/input/file1/dir')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', exit_code=1)
def test_rename(self):
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('dir2/file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('create', self.repository_location + '::test.2', 'input')
self.cmd('extract', '--dry-run', self.repository_location + '::test')
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
self.cmd('rename', self.repository_location + '::test', 'test.3')
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
self.cmd('rename', self.repository_location + '::test.2', 'test.4')
self.cmd('extract', '--dry-run', self.repository_location + '::test.3')
self.cmd('extract', '--dry-run', self.repository_location + '::test.4')
# Make sure both archives have been renamed
with Repository(self.repository_path) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
self.assert_equal(len(manifest.archives), 2)
self.assert_in('test.3', manifest.archives)
self.assert_in('test.4', manifest.archives)
def test_info(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
info_repo = self.cmd('info', self.repository_location)
assert 'All archives:' in info_repo
info_archive = self.cmd('info', self.repository_location + '::test')
assert 'Archive name: test\n' in info_archive
info_archive = self.cmd('info', '--first', '1', self.repository_location)
assert 'Archive name: test\n' in info_archive
def test_info_json(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
info_repo = json.loads(self.cmd('info', '--json', self.repository_location))
repository = info_repo['repository']
assert len(repository['id']) == 64
assert 'last_modified' in repository
assert datetime.strptime(repository['last_modified'], ISO_FORMAT) # must not raise
assert info_repo['encryption']['mode'] == 'repokey'
assert 'keyfile' not in info_repo['encryption']
cache = info_repo['cache']
stats = cache['stats']
assert all(isinstance(o, int) for o in stats.values())
assert all(key in stats for key in ('total_chunks', 'total_csize', 'total_size', 'total_unique_chunks', 'unique_csize', 'unique_size'))
info_archive = json.loads(self.cmd('info', '--json', self.repository_location + '::test'))
assert info_repo['repository'] == info_archive['repository']
assert info_repo['cache'] == info_archive['cache']
archives = info_archive['archives']
assert len(archives) == 1
archive = archives[0]
assert archive['name'] == 'test'
assert isinstance(archive['command_line'], list)
assert isinstance(archive['duration'], float)
assert len(archive['id']) == 64
assert 'stats' in archive
assert datetime.strptime(archive['start'], ISO_FORMAT)
assert datetime.strptime(archive['end'], ISO_FORMAT)
def test_comment(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test1', 'input')
self.cmd('create', '--comment', 'this is the comment', self.repository_location + '::test2', 'input')
self.cmd('create', '--comment', '"deleted" comment', self.repository_location + '::test3', 'input')
self.cmd('create', '--comment', 'preserved comment', self.repository_location + '::test4', 'input')
assert 'Comment: \n' in self.cmd('info', self.repository_location + '::test1')
assert 'Comment: this is the comment' in self.cmd('info', self.repository_location + '::test2')
self.cmd('recreate', self.repository_location + '::test1', '--comment', 'added comment')
self.cmd('recreate', self.repository_location + '::test2', '--comment', 'modified comment')
self.cmd('recreate', self.repository_location + '::test3', '--comment', '')
self.cmd('recreate', self.repository_location + '::test4', '12345')
assert 'Comment: added comment' in self.cmd('info', self.repository_location + '::test1')
assert 'Comment: modified comment' in self.cmd('info', self.repository_location + '::test2')
assert 'Comment: \n' in self.cmd('info', self.repository_location + '::test3')
assert 'Comment: preserved comment' in self.cmd('info', self.repository_location + '::test4')
def test_delete(self):
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('dir2/file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('create', self.repository_location + '::test.2', 'input')
self.cmd('create', self.repository_location + '::test.3', 'input')
self.cmd('create', self.repository_location + '::another_test.1', 'input')
self.cmd('create', self.repository_location + '::another_test.2', 'input')
self.cmd('extract', '--dry-run', self.repository_location + '::test')
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
self.cmd('delete', '--prefix', 'another_', self.repository_location)
self.cmd('delete', '--last', '1', self.repository_location)
self.cmd('delete', self.repository_location + '::test')
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
output = self.cmd('delete', '--stats', self.repository_location + '::test.2')
self.assert_in('Deleted data:', output)
# Make sure all data except the manifest has been deleted
with Repository(self.repository_path) as repository:
self.assert_equal(len(repository), 1)
def test_delete_multiple(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test1', 'input')
self.cmd('create', self.repository_location + '::test2', 'input')
self.cmd('create', self.repository_location + '::test3', 'input')
self.cmd('delete', self.repository_location + '::test1', 'test2')
self.cmd('extract', '--dry-run', self.repository_location + '::test3')
self.cmd('delete', self.repository_location, 'test3')
assert not self.cmd('list', self.repository_location)
def test_delete_repo(self):
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('dir2/file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('create', self.repository_location + '::test.2', 'input')
os.environ['BORG_DELETE_I_KNOW_WHAT_I_AM_DOING'] = 'no'
self.cmd('delete', self.repository_location, exit_code=2)
assert os.path.exists(self.repository_path)
os.environ['BORG_DELETE_I_KNOW_WHAT_I_AM_DOING'] = 'YES'
self.cmd('delete', self.repository_location)
# Make sure the repo is gone
self.assertFalse(os.path.exists(self.repository_path))
def test_delete_force(self):
self.cmd('init', '--encryption=none', self.repository_location)
self.create_src_archive('test')
with Repository(self.repository_path, exclusive=True) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
archive = Archive(repository, key, manifest, 'test')
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
repository.delete(item.chunks[-1].id)
break
else:
assert False # missed the file
repository.commit(compact=False)
output = self.cmd('delete', '--force', self.repository_location + '::test')
self.assert_in('deleted archive was corrupted', output)
self.cmd('check', '--repair', self.repository_location)
output = self.cmd('list', self.repository_location)
self.assert_not_in('test', output)
def test_delete_double_force(self):
self.cmd('init', '--encryption=none', self.repository_location)
self.create_src_archive('test')
with Repository(self.repository_path, exclusive=True) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
archive = Archive(repository, key, manifest, 'test')
id = archive.metadata.items[0]
repository.put(id, b'corrupted items metadata stream chunk')
repository.commit(compact=False)
self.cmd('delete', '--force', '--force', self.repository_location + '::test')
self.cmd('check', '--repair', self.repository_location)
output = self.cmd('list', self.repository_location)
self.assert_not_in('test', output)
def test_corrupted_repository(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
self.cmd('extract', '--dry-run', self.repository_location + '::test')
output = self.cmd('check', '--show-version', self.repository_location)
self.assert_in('borgbackup version', output) # implied output even without --info given
self.assert_not_in('Starting repository check', output) # --info not given for root logger
name = sorted(os.listdir(os.path.join(self.tmpdir, 'repository', 'data', '0')), reverse=True)[1]
with open(os.path.join(self.tmpdir, 'repository', 'data', '0', name), 'r+b') as fd:
fd.seek(100)
fd.write(b'XXXX')
output = self.cmd('check', '--info', self.repository_location, exit_code=1)
self.assert_in('Starting repository check', output) # --info given for root logger
def test_readonly_check(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
with self.read_only(self.repository_path):
# verify that command normally doesn't work with read-only repo
if self.FORK_DEFAULT:
self.cmd('check', '--verify-data', self.repository_location, exit_code=EXIT_ERROR)
else:
with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo:
self.cmd('check', '--verify-data', self.repository_location)
if isinstance(excinfo.value, RemoteRepository.RPCError):
assert excinfo.value.exception_class == 'LockFailed'
# verify that command works with read-only repo when using --bypass-lock
self.cmd('check', '--verify-data', self.repository_location, '--bypass-lock')
def test_readonly_diff(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('a')
self.create_src_archive('b')
with self.read_only(self.repository_path):
# verify that command normally doesn't work with read-only repo
if self.FORK_DEFAULT:
self.cmd('diff', '%s::a' % self.repository_location, 'b', exit_code=EXIT_ERROR)
else:
with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo:
self.cmd('diff', '%s::a' % self.repository_location, 'b')
if isinstance(excinfo.value, RemoteRepository.RPCError):
assert excinfo.value.exception_class == 'LockFailed'
# verify that command works with read-only repo when using --bypass-lock
self.cmd('diff', '%s::a' % self.repository_location, 'b', '--bypass-lock')
def test_readonly_export_tar(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
with self.read_only(self.repository_path):
# verify that command normally doesn't work with read-only repo
if self.FORK_DEFAULT:
self.cmd('export-tar', '%s::test' % self.repository_location, 'test.tar', exit_code=EXIT_ERROR)
else:
with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo:
self.cmd('export-tar', '%s::test' % self.repository_location, 'test.tar')
if isinstance(excinfo.value, RemoteRepository.RPCError):
assert excinfo.value.exception_class == 'LockFailed'
# verify that command works with read-only repo when using --bypass-lock
self.cmd('export-tar', '%s::test' % self.repository_location, 'test.tar', '--bypass-lock')
def test_readonly_extract(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
with self.read_only(self.repository_path):
# verify that command normally doesn't work with read-only repo
if self.FORK_DEFAULT:
self.cmd('extract', '%s::test' % self.repository_location, exit_code=EXIT_ERROR)
else:
with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo:
self.cmd('extract', '%s::test' % self.repository_location)
if isinstance(excinfo.value, RemoteRepository.RPCError):
assert excinfo.value.exception_class == 'LockFailed'
# verify that command works with read-only repo when using --bypass-lock
self.cmd('extract', '%s::test' % self.repository_location, '--bypass-lock')
def test_readonly_info(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
with self.read_only(self.repository_path):
# verify that command normally doesn't work with read-only repo
if self.FORK_DEFAULT:
self.cmd('info', self.repository_location, exit_code=EXIT_ERROR)
else:
with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo:
self.cmd('info', self.repository_location)
if isinstance(excinfo.value, RemoteRepository.RPCError):
assert excinfo.value.exception_class == 'LockFailed'
# verify that command works with read-only repo when using --bypass-lock
self.cmd('info', self.repository_location, '--bypass-lock')
def test_readonly_list(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
with self.read_only(self.repository_path):
# verify that command normally doesn't work with read-only repo
if self.FORK_DEFAULT:
self.cmd('list', self.repository_location, exit_code=EXIT_ERROR)
else:
with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo:
self.cmd('list', self.repository_location)
if isinstance(excinfo.value, RemoteRepository.RPCError):
assert excinfo.value.exception_class == 'LockFailed'
# verify that command works with read-only repo when using --bypass-lock
self.cmd('list', self.repository_location, '--bypass-lock')
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_readonly_mount(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
with self.read_only(self.repository_path):
# verify that command normally doesn't work with read-only repo
if self.FORK_DEFAULT:
with self.fuse_mount(self.repository_location, exit_code=EXIT_ERROR):
pass
else:
with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo:
# self.fuse_mount always assumes fork=True, so for this test we have to manually set fork=False
with self.fuse_mount(self.repository_location, fork=False):
pass
if isinstance(excinfo.value, RemoteRepository.RPCError):
assert excinfo.value.exception_class == 'LockFailed'
# verify that command works with read-only repo when using --bypass-lock
with self.fuse_mount(self.repository_location, None, '--bypass-lock'):
pass
@pytest.mark.skipif('BORG_TESTS_IGNORE_MODES' in os.environ, reason='modes unreliable')
def test_umask(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
mode = os.stat(self.repository_path).st_mode
self.assertEqual(stat.S_IMODE(mode), 0o700)
def test_create_dry_run(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', '--dry-run', self.repository_location + '::test', 'input')
# Make sure no archive has been created
with Repository(self.repository_path) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
self.assert_equal(len(manifest.archives), 0)
def add_unknown_feature(self, operation):
with Repository(self.repository_path, exclusive=True) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
manifest.config[b'feature_flags'] = {operation.value.encode(): {b'mandatory': [b'unknown-feature']}}
manifest.write()
repository.commit(compact=False)
def cmd_raises_unknown_feature(self, args):
if self.FORK_DEFAULT:
self.cmd(*args, exit_code=EXIT_ERROR)
else:
with pytest.raises(MandatoryFeatureUnsupported) as excinfo:
self.cmd(*args)
assert excinfo.value.args == (['unknown-feature'],)
def test_unknown_feature_on_create(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.add_unknown_feature(Manifest.Operation.WRITE)
self.cmd_raises_unknown_feature(['create', self.repository_location + '::test', 'input'])
def test_unknown_feature_on_cache_sync(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('delete', '--cache-only', self.repository_location)
self.add_unknown_feature(Manifest.Operation.READ)
self.cmd_raises_unknown_feature(['create', self.repository_location + '::test', 'input'])
def test_unknown_feature_on_change_passphrase(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.add_unknown_feature(Manifest.Operation.CHECK)
self.cmd_raises_unknown_feature(['key', 'change-passphrase', self.repository_location])
def test_unknown_feature_on_read(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.cmd('create', self.repository_location + '::test', 'input')
self.add_unknown_feature(Manifest.Operation.READ)
with changedir('output'):
self.cmd_raises_unknown_feature(['extract', self.repository_location + '::test'])
self.cmd_raises_unknown_feature(['list', self.repository_location])
self.cmd_raises_unknown_feature(['info', self.repository_location + '::test'])
def test_unknown_feature_on_rename(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.cmd('create', self.repository_location + '::test', 'input')
self.add_unknown_feature(Manifest.Operation.CHECK)
self.cmd_raises_unknown_feature(['rename', self.repository_location + '::test', 'other'])
def test_unknown_feature_on_delete(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.cmd('create', self.repository_location + '::test', 'input')
self.add_unknown_feature(Manifest.Operation.DELETE)
# delete of an archive raises
self.cmd_raises_unknown_feature(['delete', self.repository_location + '::test'])
self.cmd_raises_unknown_feature(['prune', '--keep-daily=3', self.repository_location])
# delete of the whole repository ignores features
self.cmd('delete', self.repository_location)
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_unknown_feature_on_mount(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.add_unknown_feature(Manifest.Operation.READ)
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
os.mkdir(mountpoint)
# XXX this might hang if it doesn't raise an error
self.cmd_raises_unknown_feature(['mount', self.repository_location + '::test', mountpoint])
@pytest.mark.allow_cache_wipe
def test_unknown_mandatory_feature_in_cache(self):
if self.prefix:
path_prefix = 'ssh://__testsuite__'
else:
path_prefix = ''
print(self.cmd('init', '--encryption=repokey', self.repository_location))
with Repository(self.repository_path, exclusive=True) as repository:
if path_prefix:
repository._location = Location(self.repository_location)
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest) as cache:
cache.begin_txn()
cache.cache_config.mandatory_features = set(['unknown-feature'])
cache.commit()
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '::test', 'input')
else:
called = False
wipe_cache_safe = LocalCache.wipe_cache
def wipe_wrapper(*args):
nonlocal called
called = True
wipe_cache_safe(*args)
with patch.object(LocalCache, 'wipe_cache', wipe_wrapper):
self.cmd('create', self.repository_location + '::test', 'input')
assert called
with Repository(self.repository_path, exclusive=True) as repository:
if path_prefix:
repository._location = Location(self.repository_location)
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest) as cache:
assert cache.cache_config.mandatory_features == set([])
def test_progress_on(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--progress', self.repository_location + '::test4', 'input')
self.assert_in("\r", output)
def test_progress_off(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', self.repository_location + '::test5', 'input')
self.assert_not_in("\r", output)
def test_file_status(self):
"""test that various file status show expected results
clearly incomplete: only tests for the weird "unchanged" status for now"""
self.create_regular_file('file1', size=1024 * 80)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', self.repository_location + '::test', 'input')
self.assert_in("A input/file1", output)
self.assert_in("A input/file2", output)
# should find first file as unmodified
output = self.cmd('create', '--list', self.repository_location + '::test1', 'input')
self.assert_in("U input/file1", output)
# this is expected, although surprising, for why, see:
# https://borgbackup.readthedocs.org/en/latest/faq.html#i-am-seeing-a-added-status-for-a-unchanged-file
self.assert_in("A input/file2", output)
def test_file_status_cs_cache_mode(self):
"""test that a changed file with faked "previous" mtime still gets backed up in ctime,size cache_mode"""
self.create_regular_file('file1', contents=b'123')
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=10)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', '--files-cache=ctime,size', self.repository_location + '::test1', 'input')
# modify file1, but cheat with the mtime (and atime) and also keep same size:
st = os.stat('input/file1')
self.create_regular_file('file1', contents=b'321')
os.utime('input/file1', ns=(st.st_atime_ns, st.st_mtime_ns))
# this mode uses ctime for change detection, so it should find file1 as modified
output = self.cmd('create', '--list', '--files-cache=ctime,size', self.repository_location + '::test2', 'input')
self.assert_in("M input/file1", output)
def test_file_status_ms_cache_mode(self):
"""test that a chmod'ed file with no content changes does not get chunked again in mtime,size cache_mode"""
self.create_regular_file('file1', size=10)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=10)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', '--files-cache=mtime,size', self.repository_location + '::test1', 'input')
# change mode of file1, no content change:
st = os.stat('input/file1')
os.chmod('input/file1', st.st_mode ^ stat.S_IRWXO) # this triggers a ctime change, but mtime is unchanged
# this mode uses mtime for change detection, so it should find file1 as unmodified
output = self.cmd('create', '--list', '--files-cache=mtime,size', self.repository_location + '::test2', 'input')
self.assert_in("U input/file1", output)
def test_file_status_rc_cache_mode(self):
"""test that files get rechunked unconditionally in rechunk,ctime cache mode"""
self.create_regular_file('file1', size=10)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=10)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', '--files-cache=rechunk,ctime', self.repository_location + '::test1', 'input')
# no changes here, but this mode rechunks unconditionally
output = self.cmd('create', '--list', '--files-cache=rechunk,ctime', self.repository_location + '::test2', 'input')
self.assert_in("A input/file1", output)
def test_file_status_excluded(self):
"""test that excluded paths are listed"""
self.create_regular_file('file1', size=1024 * 80)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=1024 * 80)
if has_lchflags:
self.create_regular_file('file3', size=1024 * 80)
platform.set_flags(os.path.join(self.input_path, 'file3'), stat.UF_NODUMP)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', '--exclude-nodump', self.repository_location + '::test', 'input')
self.assert_in("A input/file1", output)
self.assert_in("A input/file2", output)
if has_lchflags:
self.assert_in("x input/file3", output)
# should find second file as excluded
output = self.cmd('create', '--list', '--exclude-nodump', self.repository_location + '::test1', 'input', '--exclude', '*/file2')
self.assert_in("U input/file1", output)
self.assert_in("x input/file2", output)
if has_lchflags:
self.assert_in("x input/file3", output)
def test_create_json(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
create_info = json.loads(self.cmd('create', '--json', self.repository_location + '::test', 'input'))
# The usual keys
assert 'encryption' in create_info
assert 'repository' in create_info
assert 'cache' in create_info
assert 'last_modified' in create_info['repository']
archive = create_info['archive']
assert archive['name'] == 'test'
assert isinstance(archive['command_line'], list)
assert isinstance(archive['duration'], float)
assert len(archive['id']) == 64
assert 'stats' in archive
def test_create_topical(self):
self.create_regular_file('file1', size=1024 * 80)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
# no listing by default
output = self.cmd('create', self.repository_location + '::test', 'input')
self.assert_not_in('file1', output)
# shouldn't be listed even if unchanged
output = self.cmd('create', self.repository_location + '::test0', 'input')
self.assert_not_in('file1', output)
# should list the file as unchanged
output = self.cmd('create', '--list', '--filter=U', self.repository_location + '::test1', 'input')
self.assert_in('file1', output)
# should *not* list the file as changed
output = self.cmd('create', '--list', '--filter=AM', self.repository_location + '::test2', 'input')
self.assert_not_in('file1', output)
# change the file
self.create_regular_file('file1', size=1024 * 100)
# should list the file as changed
output = self.cmd('create', '--list', '--filter=AM', self.repository_location + '::test3', 'input')
self.assert_in('file1', output)
@pytest.mark.skipif(not are_fifos_supported(), reason='FIFOs not supported')
def test_create_read_special_symlink(self):
from threading import Thread
def fifo_feeder(fifo_fn, data):
fd = os.open(fifo_fn, os.O_WRONLY)
try:
os.write(fd, data)
finally:
os.close(fd)
self.cmd('init', '--encryption=repokey', self.repository_location)
archive = self.repository_location + '::test'
data = b'foobar' * 1000
fifo_fn = os.path.join(self.input_path, 'fifo')
link_fn = os.path.join(self.input_path, 'link_fifo')
os.mkfifo(fifo_fn)
os.symlink(fifo_fn, link_fn)
t = Thread(target=fifo_feeder, args=(fifo_fn, data))
t.start()
try:
self.cmd('create', '--read-special', archive, 'input/link_fifo')
finally:
t.join()
with changedir('output'):
self.cmd('extract', archive)
fifo_fn = 'input/link_fifo'
with open(fifo_fn, 'rb') as f:
extracted_data = f.read()
assert extracted_data == data
def test_create_read_special_broken_symlink(self):
os.symlink('somewhere does not exist', os.path.join(self.input_path, 'link'))
self.cmd('init', '--encryption=repokey', self.repository_location)
archive = self.repository_location + '::test'
self.cmd('create', '--read-special', archive, 'input')
output = self.cmd('list', archive)
assert 'input/link -> somewhere does not exist' in output
# def test_cmdline_compatibility(self):
# self.create_regular_file('file1', size=1024 * 80)
# self.cmd('init', '--encryption=repokey', self.repository_location)
# self.cmd('create', self.repository_location + '::test', 'input')
# output = self.cmd('foo', self.repository_location, '--old')
# self.assert_in('"--old" has been deprecated. Use "--new" instead', output)
def test_prune_repository(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test1', src_dir)
self.cmd('create', self.repository_location + '::test2', src_dir)
# these are not really a checkpoints, but they look like some:
self.cmd('create', self.repository_location + '::test3.checkpoint', src_dir)
self.cmd('create', self.repository_location + '::test3.checkpoint.1', src_dir)
self.cmd('create', self.repository_location + '::test4.checkpoint', src_dir)
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=1')
assert re.search(r'Would prune:\s+test1', output)
# must keep the latest non-checkpoint archive:
assert re.search(r'Keeping archive \(rule: daily #1\):\s+test2', output)
# must keep the latest checkpoint archive:
assert re.search(r'Keeping checkpoint archive:\s+test4.checkpoint', output)
output = self.cmd('list', '--consider-checkpoints', self.repository_location)
self.assert_in('test1', output)
self.assert_in('test2', output)
self.assert_in('test3.checkpoint', output)
self.assert_in('test3.checkpoint.1', output)
self.assert_in('test4.checkpoint', output)
self.cmd('prune', self.repository_location, '--keep-daily=1')
output = self.cmd('list', '--consider-checkpoints', self.repository_location)
self.assert_not_in('test1', output)
# the latest non-checkpoint archive must be still there:
self.assert_in('test2', output)
# only the latest checkpoint archive must still be there:
self.assert_not_in('test3.checkpoint', output)
self.assert_not_in('test3.checkpoint.1', output)
self.assert_in('test4.checkpoint', output)
# now we supercede the latest checkpoint by a successful backup:
self.cmd('create', self.repository_location + '::test5', src_dir)
self.cmd('prune', self.repository_location, '--keep-daily=2')
output = self.cmd('list', '--consider-checkpoints', self.repository_location)
# all checkpoints should be gone now:
self.assert_not_in('checkpoint', output)
# the latest archive must be still there
self.assert_in('test5', output)
# Given a date and time in local tz, create a UTC timestamp string suitable
# for create --timestamp command line option
def _to_utc_timestamp(self, year, month, day, hour, minute, second):
dtime = datetime(year, month, day, hour, minute, second, 0, dateutil.tz.gettz())
return dtime.astimezone(dateutil.tz.UTC).strftime("%Y-%m-%dT%H:%M:%S")
def _create_archive_ts(self, name, y, m, d, H=0, M=0, S=0):
loc = self.repository_location + '::' + name
self.cmd('create', '--timestamp', self._to_utc_timestamp(y, m, d, H, M, S), loc, src_dir)
# This test must match docs/misc/prune-example.txt
def test_prune_repository_example(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
# Archives that will be kept, per the example
# Oldest archive
self._create_archive_ts('test01', 2015, 1, 1)
# 6 monthly archives
self._create_archive_ts('test02', 2015, 6, 30)
self._create_archive_ts('test03', 2015, 7, 31)
self._create_archive_ts('test04', 2015, 8, 31)
self._create_archive_ts('test05', 2015, 9, 30)
self._create_archive_ts('test06', 2015, 10, 31)
self._create_archive_ts('test07', 2015, 11, 30)
# 14 daily archives
self._create_archive_ts('test08', 2015, 12, 17)
self._create_archive_ts('test09', 2015, 12, 18)
self._create_archive_ts('test10', 2015, 12, 20)
self._create_archive_ts('test11', 2015, 12, 21)
self._create_archive_ts('test12', 2015, 12, 22)
self._create_archive_ts('test13', 2015, 12, 23)
self._create_archive_ts('test14', 2015, 12, 24)
self._create_archive_ts('test15', 2015, 12, 25)
self._create_archive_ts('test16', 2015, 12, 26)
self._create_archive_ts('test17', 2015, 12, 27)
self._create_archive_ts('test18', 2015, 12, 28)
self._create_archive_ts('test19', 2015, 12, 29)
self._create_archive_ts('test20', 2015, 12, 30)
self._create_archive_ts('test21', 2015, 12, 31)
# Additional archives that would be pruned
# The second backup of the year
self._create_archive_ts('test22', 2015, 1, 2)
# The next older monthly backup
self._create_archive_ts('test23', 2015, 5, 31)
# The next older daily backup
self._create_archive_ts('test24', 2015, 12, 16)
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=14', '--keep-monthly=6', '--keep-yearly=1')
# Prune second backup of the year
assert re.search(r'Would prune:\s+test22', output)
# Prune next older monthly and daily backups
assert re.search(r'Would prune:\s+test23', output)
assert re.search(r'Would prune:\s+test24', output)
# Must keep the other 21 backups
# Yearly is kept as oldest archive
assert re.search(r'Keeping archive \(rule: yearly\[oldest\] #1\):\s+test01', output)
for i in range(1, 7):
assert re.search(r'Keeping archive \(rule: monthly #' + str(i) + r'\):\s+test' + ("%02d" % (8-i)), output)
for i in range(1, 15):
assert re.search(r'Keeping archive \(rule: daily #' + str(i) + r'\):\s+test' + ("%02d" % (22-i)), output)
output = self.cmd('list', self.repository_location)
# Nothing pruned after dry run
for i in range(1, 25):
self.assert_in('test%02d' % i, output)
self.cmd('prune', self.repository_location, '--keep-daily=14', '--keep-monthly=6', '--keep-yearly=1')
output = self.cmd('list', self.repository_location)
# All matching backups plus oldest kept
for i in range(1, 22):
self.assert_in('test%02d' % i, output)
# Other backups have been pruned
for i in range(22, 25):
self.assert_not_in('test%02d' % i, output)
# With an initial and daily backup, prune daily until oldest is replaced by a monthly backup
def test_prune_retain_and_expire_oldest(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
# Initial backup
self._create_archive_ts('original_archive', 2020, 9, 1, 11, 15)
# Archive and prune daily for 30 days
for i in range(1, 31):
self._create_archive_ts('september%02d' % i, 2020, 9, i, 12)
self.cmd('prune', self.repository_location, '--keep-daily=7', '--keep-monthly=1')
# Archive and prune 6 days into the next month
for i in range(1, 7):
self._create_archive_ts('october%02d' % i, 2020, 10, i, 12)
self.cmd('prune', self.repository_location, '--keep-daily=7', '--keep-monthly=1')
# Oldest backup is still retained
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=7', '--keep-monthly=1')
assert re.search(r'Keeping archive \(rule: monthly\[oldest\] #1' + r'\):\s+original_archive', output)
# Archive one more day and prune.
self._create_archive_ts('october07', 2020, 10, 7, 12)
self.cmd('prune', self.repository_location, '--keep-daily=7', '--keep-monthly=1')
# Last day of previous month is retained as monthly, and oldest is expired.
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=7', '--keep-monthly=1')
assert re.search(r'Keeping archive \(rule: monthly #1\):\s+september30', output)
self.assert_not_in('original_archive', output)
def test_prune_repository_save_space(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test1', src_dir)
self.cmd('create', self.repository_location + '::test2', src_dir)
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=1')
assert re.search(r'Keeping archive \(rule: daily #1\):\s+test2', output)
assert re.search(r'Would prune:\s+test1', output)
output = self.cmd('list', self.repository_location)
self.assert_in('test1', output)
self.assert_in('test2', output)
self.cmd('prune', '--save-space', self.repository_location, '--keep-daily=1')
output = self.cmd('list', self.repository_location)
self.assert_not_in('test1', output)
self.assert_in('test2', output)
def test_prune_repository_prefix(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::foo-2015-08-12-10:00', src_dir)
self.cmd('create', self.repository_location + '::foo-2015-08-12-20:00', src_dir)
self.cmd('create', self.repository_location + '::bar-2015-08-12-10:00', src_dir)
self.cmd('create', self.repository_location + '::bar-2015-08-12-20:00', src_dir)
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=1', '--prefix=foo-')
assert re.search(r'Keeping archive \(rule: daily #1\):\s+foo-2015-08-12-20:00', output)
assert re.search(r'Would prune:\s+foo-2015-08-12-10:00', output)
output = self.cmd('list', self.repository_location)
self.assert_in('foo-2015-08-12-10:00', output)
self.assert_in('foo-2015-08-12-20:00', output)
self.assert_in('bar-2015-08-12-10:00', output)
self.assert_in('bar-2015-08-12-20:00', output)
self.cmd('prune', self.repository_location, '--keep-daily=1', '--prefix=foo-')
output = self.cmd('list', self.repository_location)
self.assert_not_in('foo-2015-08-12-10:00', output)
self.assert_in('foo-2015-08-12-20:00', output)
self.assert_in('bar-2015-08-12-10:00', output)
self.assert_in('bar-2015-08-12-20:00', output)
def test_prune_repository_glob(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::2015-08-12-10:00-foo', src_dir)
self.cmd('create', self.repository_location + '::2015-08-12-20:00-foo', src_dir)
self.cmd('create', self.repository_location + '::2015-08-12-10:00-bar', src_dir)
self.cmd('create', self.repository_location + '::2015-08-12-20:00-bar', src_dir)
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=1', '--glob-archives=2015-*-foo')
assert re.search(r'Keeping archive \(rule: daily #1\):\s+2015-08-12-20:00-foo', output)
assert re.search(r'Would prune:\s+2015-08-12-10:00-foo', output)
output = self.cmd('list', self.repository_location)
self.assert_in('2015-08-12-10:00-foo', output)
self.assert_in('2015-08-12-20:00-foo', output)
self.assert_in('2015-08-12-10:00-bar', output)
self.assert_in('2015-08-12-20:00-bar', output)
self.cmd('prune', self.repository_location, '--keep-daily=1', '--glob-archives=2015-*-foo')
output = self.cmd('list', self.repository_location)
self.assert_not_in('2015-08-12-10:00-foo', output)
self.assert_in('2015-08-12-20:00-foo', output)
self.assert_in('2015-08-12-10:00-bar', output)
self.assert_in('2015-08-12-20:00-bar', output)
def test_list_prefix(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test-1', src_dir)
self.cmd('create', self.repository_location + '::something-else-than-test-1', src_dir)
self.cmd('create', self.repository_location + '::test-2', src_dir)
output = self.cmd('list', '--prefix=test-', self.repository_location)
self.assert_in('test-1', output)
self.assert_in('test-2', output)
self.assert_not_in('something-else', output)
def test_list_format(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
test_archive = self.repository_location + '::test'
self.cmd('create', test_archive, src_dir)
output_1 = self.cmd('list', test_archive)
output_2 = self.cmd('list', '--format', '{mode} {user:6} {group:6} {size:8d} {mtime} {path}{extra}{NEWLINE}', test_archive)
output_3 = self.cmd('list', '--format', '{mtime:%s} {path}{NL}', test_archive)
self.assertEqual(output_1, output_2)
self.assertNotEqual(output_1, output_3)
def test_list_repository_format(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', '--comment', 'comment 1', self.repository_location + '::test-1', src_dir)
self.cmd('create', '--comment', 'comment 2', self.repository_location + '::test-2', src_dir)
output_1 = self.cmd('list', self.repository_location)
output_2 = self.cmd('list', '--format', '{archive:<36} {time} [{id}]{NL}', self.repository_location)
self.assertEqual(output_1, output_2)
output_1 = self.cmd('list', '--short', self.repository_location)
self.assertEqual(output_1, 'test-1\ntest-2\n')
output_1 = self.cmd('list', '--format', '{barchive}/', self.repository_location)
self.assertEqual(output_1, 'test-1/test-2/')
output_3 = self.cmd('list', '--format', '{name} {comment}{NL}', self.repository_location)
self.assert_in('test-1 comment 1\n', output_3)
self.assert_in('test-2 comment 2\n', output_3)
def test_list_hash(self):
self.create_regular_file('empty_file', size=0)
self.create_regular_file('amb', contents=b'a' * 1000000)
self.cmd('init', '--encryption=repokey', self.repository_location)
test_archive = self.repository_location + '::test'
self.cmd('create', test_archive, 'input')
output = self.cmd('list', '--format', '{sha256} {path}{NL}', test_archive)
assert "cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0 input/amb" in output
assert "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 input/empty_file" in output
def test_list_consider_checkpoints(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test1', src_dir)
# these are not really a checkpoints, but they look like some:
self.cmd('create', self.repository_location + '::test2.checkpoint', src_dir)
self.cmd('create', self.repository_location + '::test3.checkpoint.1', src_dir)
output = self.cmd('list', self.repository_location)
assert "test1" in output
assert "test2.checkpoint" not in output
assert "test3.checkpoint.1" not in output
output = self.cmd('list', '--consider-checkpoints', self.repository_location)
assert "test1" in output
assert "test2.checkpoint" in output
assert "test3.checkpoint.1" in output
def test_list_chunk_counts(self):
self.create_regular_file('empty_file', size=0)
self.create_regular_file('two_chunks')
with open(os.path.join(self.input_path, 'two_chunks'), 'wb') as fd:
fd.write(b'abba' * 2000000)
fd.write(b'baab' * 2000000)
self.cmd('init', '--encryption=repokey', self.repository_location)
test_archive = self.repository_location + '::test'
self.cmd('create', test_archive, 'input')
output = self.cmd('list', '--format', '{num_chunks} {unique_chunks} {path}{NL}', test_archive)
assert "0 0 input/empty_file" in output
assert "2 2 input/two_chunks" in output
def test_list_size(self):
self.create_regular_file('compressible_file', size=10000)
self.cmd('init', '--encryption=repokey', self.repository_location)
test_archive = self.repository_location + '::test'
self.cmd('create', '-C', 'lz4', test_archive, 'input')
output = self.cmd('list', '--format', '{size} {csize} {dsize} {dcsize} {path}{NL}', test_archive)
size, csize, dsize, dcsize, path = output.split("\n")[1].split(" ")
assert int(csize) < int(size)
assert int(dcsize) < int(dsize)
assert int(dsize) <= int(size)
assert int(dcsize) <= int(csize)
def test_list_json(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
list_repo = json.loads(self.cmd('list', '--json', self.repository_location))
repository = list_repo['repository']
assert len(repository['id']) == 64
assert datetime.strptime(repository['last_modified'], ISO_FORMAT) # must not raise
assert list_repo['encryption']['mode'] == 'repokey'
assert 'keyfile' not in list_repo['encryption']
archive0 = list_repo['archives'][0]
assert datetime.strptime(archive0['time'], ISO_FORMAT) # must not raise
list_archive = self.cmd('list', '--json-lines', self.repository_location + '::test')
items = [json.loads(s) for s in list_archive.splitlines()]
assert len(items) == 2
file1 = items[1]
assert file1['path'] == 'input/file1'
assert file1['size'] == 81920
assert datetime.strptime(file1['mtime'], ISO_FORMAT) # must not raise
list_archive = self.cmd('list', '--json-lines', '--format={sha256}', self.repository_location + '::test')
items = [json.loads(s) for s in list_archive.splitlines()]
assert len(items) == 2
file1 = items[1]
assert file1['path'] == 'input/file1'
assert file1['sha256'] == 'b2915eb69f260d8d3c25249195f2c8f4f716ea82ec760ae929732c0262442b2b'
def test_list_json_args(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('list', '--json-lines', self.repository_location, exit_code=2)
self.cmd('list', '--json', self.repository_location + '::archive', exit_code=2)
def test_log_json(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
log = self.cmd('create', '--log-json', self.repository_location + '::test', 'input', '--list', '--debug')
messages = {} # type -> message, one of each kind
for line in log.splitlines():
msg = json.loads(line)
messages[msg['type']] = msg
file_status = messages['file_status']
assert 'status' in file_status
assert file_status['path'].startswith('input')
log_message = messages['log_message']
assert isinstance(log_message['time'], float)
assert log_message['levelname'] == 'DEBUG' # there should only be DEBUG messages
assert isinstance(log_message['message'], str)
def test_debug_profile(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input', '--debug-profile=create.prof')
self.cmd('debug', 'convert-profile', 'create.prof', 'create.pyprof')
stats = pstats.Stats('create.pyprof')
stats.strip_dirs()
stats.sort_stats('cumtime')
self.cmd('create', self.repository_location + '::test2', 'input', '--debug-profile=create.pyprof')
stats = pstats.Stats('create.pyprof') # Only do this on trusted data!
stats.strip_dirs()
stats.sort_stats('cumtime')
def test_common_options(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
log = self.cmd('--debug', 'create', self.repository_location + '::test', 'input')
assert 'security: read previous location' in log
def _get_sizes(self, compression, compressible, size=10000):
if compressible:
contents = b'X' * size
else:
contents = os.urandom(size)
self.create_regular_file('file', contents=contents)
self.cmd('init', '--encryption=none', self.repository_location)
archive = self.repository_location + '::test'
self.cmd('create', '-C', compression, archive, 'input')
output = self.cmd('list', '--format', '{size} {csize} {path}{NL}', archive)
size, csize, path = output.split("\n")[1].split(" ")
return int(size), int(csize)
def test_compression_none_compressible(self):
size, csize = self._get_sizes('none', compressible=True)
assert csize == size + 3
def test_compression_none_uncompressible(self):
size, csize = self._get_sizes('none', compressible=False)
assert csize == size + 3
def test_compression_zlib_compressible(self):
size, csize = self._get_sizes('zlib', compressible=True)
assert csize < size * 0.1
assert csize == 35
def test_compression_zlib_uncompressible(self):
size, csize = self._get_sizes('zlib', compressible=False)
assert csize >= size
def test_compression_auto_compressible(self):
size, csize = self._get_sizes('auto,zlib', compressible=True)
assert csize < size * 0.1
assert csize == 35 # same as compression 'zlib'
def test_compression_auto_uncompressible(self):
size, csize = self._get_sizes('auto,zlib', compressible=False)
assert csize == size + 3 # same as compression 'none'
def test_compression_lz4_compressible(self):
size, csize = self._get_sizes('lz4', compressible=True)
assert csize < size * 0.1
def test_compression_lz4_uncompressible(self):
size, csize = self._get_sizes('lz4', compressible=False)
assert csize == size + 3 # same as compression 'none'
def test_compression_lzma_compressible(self):
size, csize = self._get_sizes('lzma', compressible=True)
assert csize < size * 0.1
def test_compression_lzma_uncompressible(self):
size, csize = self._get_sizes('lzma', compressible=False)
assert csize == size + 3 # same as compression 'none'
def test_compression_zstd_compressible(self):
size, csize = self._get_sizes('zstd', compressible=True)
assert csize < size * 0.1
def test_compression_zstd_uncompressible(self):
size, csize = self._get_sizes('zstd', compressible=False)
assert csize == size + 3 # same as compression 'none'
def test_change_passphrase(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
os.environ['BORG_NEW_PASSPHRASE'] = 'newpassphrase'
# here we have both BORG_PASSPHRASE and BORG_NEW_PASSPHRASE set:
self.cmd('key', 'change-passphrase', self.repository_location)
os.environ['BORG_PASSPHRASE'] = 'newpassphrase'
self.cmd('list', self.repository_location)
def test_break_lock(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('break-lock', self.repository_location)
def test_usage(self):
self.cmd()
self.cmd('-h')
def test_help(self):
assert 'Borg' in self.cmd('help')
assert 'patterns' in self.cmd('help', 'patterns')
assert 'Initialize' in self.cmd('help', 'init')
assert 'positional arguments' not in self.cmd('help', 'init', '--epilog-only')
assert 'This command initializes' not in self.cmd('help', 'init', '--usage-only')
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_fuse(self):
def has_noatime(some_file):
atime_before = os.stat(some_file).st_atime_ns
try:
os.close(os.open(some_file, flags_noatime))
except PermissionError:
return False
else:
atime_after = os.stat(some_file).st_atime_ns
noatime_used = flags_noatime != flags_normal
return noatime_used and atime_before == atime_after
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_test_files()
have_noatime = has_noatime('input/file1')
self.cmd('create', '--exclude-nodump', '--atime', self.repository_location + '::archive', 'input')
self.cmd('create', '--exclude-nodump', '--atime', self.repository_location + '::archive2', 'input')
if has_lchflags:
# remove the file we did not backup, so input and output become equal
os.remove(os.path.join('input', 'flagfile'))
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
# mount the whole repository, archive contents shall show up in archivename subdirs of mountpoint:
with self.fuse_mount(self.repository_location, mountpoint):
# flags are not supported by the FUSE mount
# we also ignore xattrs here, they are tested separately
self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'archive', 'input'),
ignore_flags=True, ignore_xattrs=True)
self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'archive2', 'input'),
ignore_flags=True, ignore_xattrs=True)
# mount only 1 archive, its contents shall show up directly in mountpoint:
with self.fuse_mount(self.repository_location + '::archive', mountpoint):
self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'input'),
ignore_flags=True, ignore_xattrs=True)
# regular file
in_fn = 'input/file1'
out_fn = os.path.join(mountpoint, 'input', 'file1')
# stat
sti1 = os.stat(in_fn)
sto1 = os.stat(out_fn)
assert sti1.st_mode == sto1.st_mode
assert sti1.st_uid == sto1.st_uid
assert sti1.st_gid == sto1.st_gid
assert sti1.st_size == sto1.st_size
if have_noatime:
assert sti1.st_atime == sto1.st_atime
assert sti1.st_ctime == sto1.st_ctime
assert sti1.st_mtime == sto1.st_mtime
if are_hardlinks_supported():
# note: there is another hardlink to this, see below
assert sti1.st_nlink == sto1.st_nlink == 2
# read
with open(in_fn, 'rb') as in_f, open(out_fn, 'rb') as out_f:
assert in_f.read() == out_f.read()
# hardlink (to 'input/file1')
if are_hardlinks_supported():
in_fn = 'input/hardlink'
out_fn = os.path.join(mountpoint, 'input', 'hardlink')
sti2 = os.stat(in_fn)
sto2 = os.stat(out_fn)
assert sti2.st_nlink == sto2.st_nlink == 2
assert sto1.st_ino == sto2.st_ino
# symlink
if are_symlinks_supported():
in_fn = 'input/link1'
out_fn = os.path.join(mountpoint, 'input', 'link1')
sti = os.stat(in_fn, follow_symlinks=False)
sto = os.stat(out_fn, follow_symlinks=False)
assert sti.st_size == len('somewhere')
assert sto.st_size == len('somewhere')
assert stat.S_ISLNK(sti.st_mode)
assert stat.S_ISLNK(sto.st_mode)
assert os.readlink(in_fn) == os.readlink(out_fn)
# FIFO
if are_fifos_supported():
out_fn = os.path.join(mountpoint, 'input', 'fifo1')
sto = os.stat(out_fn)
assert stat.S_ISFIFO(sto.st_mode)
# list/read xattrs
try:
in_fn = 'input/fusexattr'
out_fn = os.fsencode(os.path.join(mountpoint, 'input', 'fusexattr'))
if not xattr.XATTR_FAKEROOT and xattr.is_enabled(self.input_path):
assert sorted(no_selinux(xattr.listxattr(out_fn))) == [b'user.empty', b'user.foo', ]
assert xattr.getxattr(out_fn, b'user.foo') == b'bar'
assert xattr.getxattr(out_fn, b'user.empty') == b''
else:
assert no_selinux(xattr.listxattr(out_fn)) == []
try:
xattr.getxattr(out_fn, b'user.foo')
except OSError as e:
assert e.errno == llfuse.ENOATTR
else:
assert False, "expected OSError(ENOATTR), but no error was raised"
except OSError as err:
if sys.platform.startswith(('nothing_here_now', )) and err.errno == errno.ENOTSUP:
# some systems have no xattr support on FUSE
pass
else:
raise
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_fuse_versions_view(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('test', contents=b'first')
if are_hardlinks_supported():
self.create_regular_file('hardlink1', contents=b'123456')
os.link('input/hardlink1', 'input/hardlink2')
os.link('input/hardlink1', 'input/hardlink3')
self.cmd('create', self.repository_location + '::archive1', 'input')
self.create_regular_file('test', contents=b'second')
self.cmd('create', self.repository_location + '::archive2', 'input')
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
# mount the whole repository, archive contents shall show up in versioned view:
with self.fuse_mount(self.repository_location, mountpoint, '-o', 'versions'):
path = os.path.join(mountpoint, 'input', 'test') # filename shows up as directory ...
files = os.listdir(path)
assert all(f.startswith('test.') for f in files) # ... with files test.xxxxx in there
assert {b'first', b'second'} == {open(os.path.join(path, f), 'rb').read() for f in files}
if are_hardlinks_supported():
hl1 = os.path.join(mountpoint, 'input', 'hardlink1', 'hardlink1.00001')
hl2 = os.path.join(mountpoint, 'input', 'hardlink2', 'hardlink2.00001')
hl3 = os.path.join(mountpoint, 'input', 'hardlink3', 'hardlink3.00001')
assert os.stat(hl1).st_ino == os.stat(hl2).st_ino == os.stat(hl3).st_ino
assert open(hl3, 'rb').read() == b'123456'
# similar again, but exclude the hardlink master:
with self.fuse_mount(self.repository_location, mountpoint, '-o', 'versions', '-e', 'input/hardlink1'):
if are_hardlinks_supported():
hl2 = os.path.join(mountpoint, 'input', 'hardlink2', 'hardlink2.00001')
hl3 = os.path.join(mountpoint, 'input', 'hardlink3', 'hardlink3.00001')
assert os.stat(hl2).st_ino == os.stat(hl3).st_ino
assert open(hl3, 'rb').read() == b'123456'
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_fuse_allow_damaged_files(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive')
# Get rid of a chunk and repair it
archive, repository = self.open_archive('archive')
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
repository.delete(item.chunks[-1].id)
path = item.path # store full path for later
break
else:
assert False # missed the file
repository.commit(compact=False)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
with self.fuse_mount(self.repository_location + '::archive', mountpoint):
with pytest.raises(OSError) as excinfo:
open(os.path.join(mountpoint, path))
assert excinfo.value.errno == errno.EIO
with self.fuse_mount(self.repository_location + '::archive', mountpoint, '-o', 'allow_damaged_files'):
open(os.path.join(mountpoint, path)).close()
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_fuse_mount_options(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('arch11')
self.create_src_archive('arch12')
self.create_src_archive('arch21')
self.create_src_archive('arch22')
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
with self.fuse_mount(self.repository_location, mountpoint, '--first=2', '--sort=name'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch11', 'arch12']
with self.fuse_mount(self.repository_location, mountpoint, '--last=2', '--sort=name'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch21', 'arch22']
with self.fuse_mount(self.repository_location, mountpoint, '--prefix=arch1'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch11', 'arch12']
with self.fuse_mount(self.repository_location, mountpoint, '--prefix=arch2'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch21', 'arch22']
with self.fuse_mount(self.repository_location, mountpoint, '--prefix=arch'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch11', 'arch12', 'arch21', 'arch22']
with self.fuse_mount(self.repository_location, mountpoint, '--prefix=nope'):
assert sorted(os.listdir(os.path.join(mountpoint))) == []
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_migrate_lock_alive(self):
"""Both old_id and new_id must not be stale during lock migration / daemonization."""
from functools import wraps
import pickle
import traceback
# Check results are communicated from the borg mount background process
# to the pytest process by means of a serialized dict object stored in this file.
assert_data_file = os.path.join(self.tmpdir, 'migrate_lock_assert_data.pickle')
# Decorates Lock.migrate_lock() with process_alive() checks before and after.
# (We don't want to mix testing code into runtime.)
def write_assert_data(migrate_lock):
@wraps(migrate_lock)
def wrapper(self, old_id, new_id):
wrapper.num_calls += 1
assert_data = {
'num_calls': wrapper.num_calls,
'old_id': old_id,
'new_id': new_id,
'before': {
'old_id_alive': platform.process_alive(*old_id),
'new_id_alive': platform.process_alive(*new_id)},
'exception': None,
'exception.extr_tb': None,
'after': {
'old_id_alive': None,
'new_id_alive': None}}
try:
with open(assert_data_file, 'wb') as _out:
pickle.dump(assert_data, _out)
except:
pass
try:
return migrate_lock(self, old_id, new_id)
except BaseException as e:
assert_data['exception'] = e
assert_data['exception.extr_tb'] = traceback.extract_tb(e.__traceback__)
finally:
assert_data['after'].update({
'old_id_alive': platform.process_alive(*old_id),
'new_id_alive': platform.process_alive(*new_id)})
try:
with open(assert_data_file, 'wb') as _out:
pickle.dump(assert_data, _out)
except:
pass
wrapper.num_calls = 0
return wrapper
# Decorate
borg.locking.Lock.migrate_lock = write_assert_data(borg.locking.Lock.migrate_lock)
try:
self.cmd('init', '--encryption=none', self.repository_location)
self.create_src_archive('arch')
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
# In order that the decoration is kept for the borg mount process, we must not spawn, but actually fork;
# not to be confused with the forking in borg.helpers.daemonize() which is done as well.
with self.fuse_mount(self.repository_location, mountpoint, os_fork=True):
pass
with open(assert_data_file, 'rb') as _in:
assert_data = pickle.load(_in)
print('\nLock.migrate_lock(): assert_data = %r.' % (assert_data, ), file=sys.stderr, flush=True)
exception = assert_data['exception']
if exception is not None:
extracted_tb = assert_data['exception.extr_tb']
print(
'Lock.migrate_lock() raised an exception:\n',
'Traceback (most recent call last):\n',
*traceback.format_list(extracted_tb),
*traceback.format_exception(exception.__class__, exception, None),
sep='', end='', file=sys.stderr, flush=True)
assert assert_data['num_calls'] == 1, "Lock.migrate_lock() must be called exactly once."
assert exception is None, "Lock.migrate_lock() may not raise an exception."
assert_data_before = assert_data['before']
assert assert_data_before['old_id_alive'], "old_id must be alive (=must not be stale) when calling Lock.migrate_lock()."
assert assert_data_before['new_id_alive'], "new_id must be alive (=must not be stale) when calling Lock.migrate_lock()."
assert_data_after = assert_data['after']
assert assert_data_after['old_id_alive'], "old_id must be alive (=must not be stale) when Lock.migrate_lock() has returned."
assert assert_data_after['new_id_alive'], "new_id must be alive (=must not be stale) when Lock.migrate_lock() has returned."
finally:
# Undecorate
borg.locking.Lock.migrate_lock = borg.locking.Lock.migrate_lock.__wrapped__
def verify_aes_counter_uniqueness(self, method):
seen = set() # Chunks already seen
used = set() # counter values already used
def verify_uniqueness():
with Repository(self.repository_path) as repository:
for id, _ in repository.open_index(repository.get_transaction_id()).iteritems():
data = repository.get(id)
hash = sha256(data).digest()
if hash not in seen:
seen.add(hash)
num_blocks = num_cipher_blocks(len(data) - 41)
nonce = bytes_to_long(data[33:41])
for counter in range(nonce, nonce + num_blocks):
self.assert_not_in(counter, used)
used.add(counter)
self.create_test_files()
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=' + method, self.repository_location)
verify_uniqueness()
self.cmd('create', self.repository_location + '::test', 'input')
verify_uniqueness()
self.cmd('create', self.repository_location + '::test.2', 'input')
verify_uniqueness()
self.cmd('delete', self.repository_location + '::test.2')
verify_uniqueness()
def test_aes_counter_uniqueness_keyfile(self):
self.verify_aes_counter_uniqueness('keyfile')
def test_aes_counter_uniqueness_passphrase(self):
self.verify_aes_counter_uniqueness('repokey')
def test_debug_dump_archive_items(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
output = self.cmd('debug', 'dump-archive-items', self.repository_location + '::test')
output_dir = sorted(os.listdir('output'))
assert len(output_dir) > 0 and output_dir[0].startswith('000000_')
assert 'Done.' in output
def test_debug_dump_repo_objs(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
output = self.cmd('debug', 'dump-repo-objs', self.repository_location)
output_dir = sorted(os.listdir('output'))
assert len(output_dir) > 0 and output_dir[0].startswith('00000000_')
assert 'Done.' in output
def test_debug_put_get_delete_obj(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
data = b'some data'
hexkey = sha256(data).hexdigest()
self.create_regular_file('file', contents=data)
output = self.cmd('debug', 'put-obj', self.repository_location, 'input/file')
assert hexkey in output
output = self.cmd('debug', 'get-obj', self.repository_location, hexkey, 'output/file')
assert hexkey in output
with open('output/file', 'rb') as f:
data_read = f.read()
assert data == data_read
output = self.cmd('debug', 'delete-obj', self.repository_location, hexkey)
assert "deleted" in output
output = self.cmd('debug', 'delete-obj', self.repository_location, hexkey)
assert "not found" in output
output = self.cmd('debug', 'delete-obj', self.repository_location, 'invalid')
assert "is invalid" in output
def test_init_interrupt(self):
def raise_eof(*args):
raise EOFError
with patch.object(KeyfileKeyBase, 'create', raise_eof):
self.cmd('init', '--encryption=repokey', self.repository_location, exit_code=1)
assert not os.path.exists(self.repository_location)
def test_init_requires_encryption_option(self):
self.cmd('init', self.repository_location, exit_code=2)
def test_init_nested_repositories(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
if self.FORK_DEFAULT:
self.cmd('init', '--encryption=repokey', self.repository_location + '/nested', exit_code=2)
else:
with pytest.raises(Repository.AlreadyExists):
self.cmd('init', '--encryption=repokey', self.repository_location + '/nested')
def check_cache(self):
# First run a regular borg check
self.cmd('check', self.repository_location)
# Then check that the cache on disk matches exactly what's in the repo.
with self.open_repository() as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest, sync=False) as cache:
original_chunks = cache.chunks
Cache.destroy(repository)
with Cache(repository, key, manifest) as cache:
correct_chunks = cache.chunks
assert original_chunks is not correct_chunks
seen = set()
for id, (refcount, size, csize) in correct_chunks.iteritems():
o_refcount, o_size, o_csize = original_chunks[id]
assert refcount == o_refcount
assert size == o_size
assert csize == o_csize
seen.add(id)
for id, (refcount, size, csize) in original_chunks.iteritems():
assert id in seen
def test_check_cache(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with self.open_repository() as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest, sync=False) as cache:
cache.begin_txn()
cache.chunks.incref(list(cache.chunks.iteritems())[0][0])
cache.commit()
with pytest.raises(AssertionError):
self.check_cache()
def test_recreate_target_rc(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('recreate', self.repository_location, '--target=asdf', exit_code=2)
assert 'Need to specify single archive' in output
def test_recreate_target(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.check_cache()
archive = self.repository_location + '::test0'
self.cmd('create', archive, 'input')
self.check_cache()
original_archive = self.cmd('list', self.repository_location)
self.cmd('recreate', archive, 'input/dir2', '-e', 'input/dir2/file3', '--target=new-archive')
self.check_cache()
archives = self.cmd('list', self.repository_location)
assert original_archive in archives
assert 'new-archive' in archives
archive = self.repository_location + '::new-archive'
listing = self.cmd('list', '--short', archive)
assert 'file1' not in listing
assert 'dir2/file2' in listing
assert 'dir2/file3' not in listing
def test_recreate_basic(self):
self.create_test_files()
self.create_regular_file('dir2/file3', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
archive = self.repository_location + '::test0'
self.cmd('create', archive, 'input')
self.cmd('recreate', archive, 'input/dir2', '-e', 'input/dir2/file3')
self.check_cache()
listing = self.cmd('list', '--short', archive)
assert 'file1' not in listing
assert 'dir2/file2' in listing
assert 'dir2/file3' not in listing
@pytest.mark.skipif(not are_hardlinks_supported(), reason='hardlinks not supported')
def test_recreate_subtree_hardlinks(self):
# This is essentially the same problem set as in test_extract_hardlinks
self._extract_hardlinks_setup()
self.cmd('create', self.repository_location + '::test2', 'input')
self.cmd('recreate', self.repository_location + '::test', 'input/dir1')
self.check_cache()
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
assert os.stat('input/dir1/hardlink').st_nlink == 2
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2
assert os.stat('input/dir1/aaaa').st_nlink == 2
assert os.stat('input/dir1/source2').st_nlink == 2
with changedir('output'):
self.cmd('extract', self.repository_location + '::test2')
assert os.stat('input/dir1/hardlink').st_nlink == 4
def test_recreate_rechunkify(self):
with open(os.path.join(self.input_path, 'large_file'), 'wb') as fd:
fd.write(b'a' * 280)
fd.write(b'b' * 280)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', '--chunker-params', '7,9,8,128', self.repository_location + '::test1', 'input')
self.cmd('create', self.repository_location + '::test2', 'input', '--files-cache=disabled')
list = self.cmd('list', self.repository_location + '::test1', 'input/large_file',
'--format', '{num_chunks} {unique_chunks}')
num_chunks, unique_chunks = map(int, list.split(' '))
# test1 and test2 do not deduplicate
assert num_chunks == unique_chunks
self.cmd('recreate', self.repository_location, '--chunker-params', 'default')
self.check_cache()
# test1 and test2 do deduplicate after recreate
assert int(self.cmd('list', self.repository_location + '::test1', 'input/large_file', '--format={size}'))
assert not int(self.cmd('list', self.repository_location + '::test1', 'input/large_file',
'--format', '{unique_chunks}'))
def test_recreate_recompress(self):
self.create_regular_file('compressible', size=10000)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input', '-C', 'none')
file_list = self.cmd('list', self.repository_location + '::test', 'input/compressible',
'--format', '{size} {csize} {sha256}')
size, csize, sha256_before = file_list.split(' ')
assert int(csize) >= int(size) # >= due to metadata overhead
self.cmd('recreate', self.repository_location, '-C', 'lz4', '--recompress')
self.check_cache()
file_list = self.cmd('list', self.repository_location + '::test', 'input/compressible',
'--format', '{size} {csize} {sha256}')
size, csize, sha256_after = file_list.split(' ')
assert int(csize) < int(size)
assert sha256_before == sha256_after
def test_recreate_timestamp(self):
local_timezone = datetime.now(timezone(timedelta(0))).astimezone().tzinfo
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
archive = self.repository_location + '::test0'
self.cmd('create', archive, 'input')
self.cmd('recreate', '--timestamp', "1970-01-02T00:00:00", '--comment',
'test', archive)
info = self.cmd('info', archive).splitlines()
dtime = datetime(1970, 1, 2) + local_timezone.utcoffset(None)
s_time = dtime.strftime("%Y-%m-%d")
assert any([re.search(r'Time \(start\).+ %s' % s_time, item) for item in info])
assert any([re.search(r'Time \(end\).+ %s' % s_time, item) for item in info])
def test_recreate_dry_run(self):
self.create_regular_file('compressible', size=10000)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
archives_before = self.cmd('list', self.repository_location + '::test')
self.cmd('recreate', self.repository_location, '-n', '-e', 'input/compressible')
self.check_cache()
archives_after = self.cmd('list', self.repository_location + '::test')
assert archives_after == archives_before
def test_recreate_skips_nothing_to_do(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
info_before = self.cmd('info', self.repository_location + '::test')
self.cmd('recreate', self.repository_location, '--chunker-params', 'default')
self.check_cache()
info_after = self.cmd('info', self.repository_location + '::test')
assert info_before == info_after # includes archive ID
def test_with_lock(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
lock_path = os.path.join(self.repository_path, 'lock.exclusive')
cmd = 'python3', '-c', 'import os, sys; sys.exit(42 if os.path.exists("%s") else 23)' % lock_path
self.cmd('with-lock', self.repository_location, *cmd, fork=True, exit_code=42)
def test_recreate_list_output(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=0)
self.create_regular_file('file2', size=0)
self.create_regular_file('file3', size=0)
self.create_regular_file('file4', size=0)
self.create_regular_file('file5', size=0)
self.cmd('create', self.repository_location + '::test', 'input')
output = self.cmd('recreate', '--list', '--info', self.repository_location + '::test', '-e', 'input/file2')
self.check_cache()
self.assert_in("input/file1", output)
self.assert_in("x input/file2", output)
output = self.cmd('recreate', '--list', self.repository_location + '::test', '-e', 'input/file3')
self.check_cache()
self.assert_in("input/file1", output)
self.assert_in("x input/file3", output)
output = self.cmd('recreate', self.repository_location + '::test', '-e', 'input/file4')
self.check_cache()
self.assert_not_in("input/file1", output)
self.assert_not_in("x input/file4", output)
output = self.cmd('recreate', '--info', self.repository_location + '::test', '-e', 'input/file5')
self.check_cache()
self.assert_not_in("input/file1", output)
self.assert_not_in("x input/file5", output)
def test_bad_filters(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('delete', '--first', '1', '--last', '1', self.repository_location, fork=True, exit_code=2)
def test_key_export_keyfile(self):
export_file = self.output_path + '/exported'
self.cmd('init', self.repository_location, '--encryption', 'keyfile')
repo_id = self._extract_repository_id(self.repository_path)
self.cmd('key', 'export', self.repository_location, export_file)
with open(export_file, 'r') as fd:
export_contents = fd.read()
assert export_contents.startswith('BORG_KEY ' + bin_to_hex(repo_id) + '\n')
key_file = self.keys_path + '/' + os.listdir(self.keys_path)[0]
with open(key_file, 'r') as fd:
key_contents = fd.read()
assert key_contents == export_contents
os.unlink(key_file)
self.cmd('key', 'import', self.repository_location, export_file)
with open(key_file, 'r') as fd:
key_contents2 = fd.read()
assert key_contents2 == key_contents
def test_key_import_keyfile_with_borg_key_file(self):
self.cmd('init', self.repository_location, '--encryption', 'keyfile')
exported_key_file = os.path.join(self.output_path, 'exported')
self.cmd('key', 'export', self.repository_location, exported_key_file)
key_file = os.path.join(self.keys_path, os.listdir(self.keys_path)[0])
with open(key_file, 'r') as fd:
key_contents = fd.read()
os.unlink(key_file)
imported_key_file = os.path.join(self.output_path, 'imported')
with environment_variable(BORG_KEY_FILE=imported_key_file):
self.cmd('key', 'import', self.repository_location, exported_key_file)
assert not os.path.isfile(key_file), '"borg key import" should respect BORG_KEY_FILE'
with open(imported_key_file, 'r') as fd:
imported_key_contents = fd.read()
assert imported_key_contents == key_contents
def test_key_export_repokey(self):
export_file = self.output_path + '/exported'
self.cmd('init', self.repository_location, '--encryption', 'repokey')
repo_id = self._extract_repository_id(self.repository_path)
self.cmd('key', 'export', self.repository_location, export_file)
with open(export_file, 'r') as fd:
export_contents = fd.read()
assert export_contents.startswith('BORG_KEY ' + bin_to_hex(repo_id) + '\n')
with Repository(self.repository_path) as repository:
repo_key = RepoKey(repository)
repo_key.load(None, Passphrase.env_passphrase())
backup_key = KeyfileKey(key.TestKey.MockRepository())
backup_key.load(export_file, Passphrase.env_passphrase())
assert repo_key.enc_key == backup_key.enc_key
with Repository(self.repository_path) as repository:
repository.save_key(b'')
self.cmd('key', 'import', self.repository_location, export_file)
with Repository(self.repository_path) as repository:
repo_key2 = RepoKey(repository)
repo_key2.load(None, Passphrase.env_passphrase())
assert repo_key2.enc_key == repo_key2.enc_key
def test_key_export_qr(self):
export_file = self.output_path + '/exported.html'
self.cmd('init', self.repository_location, '--encryption', 'repokey')
repo_id = self._extract_repository_id(self.repository_path)
self.cmd('key', 'export', '--qr-html', self.repository_location, export_file)
with open(export_file, 'r', encoding='utf-8') as fd:
export_contents = fd.read()
assert bin_to_hex(repo_id) in export_contents
assert export_contents.startswith('<!doctype html>')
assert export_contents.endswith('</html>\n')
def test_key_export_directory(self):
export_directory = self.output_path + '/exported'
os.mkdir(export_directory)
self.cmd('init', self.repository_location, '--encryption', 'repokey')
self.cmd('key', 'export', self.repository_location, export_directory, exit_code=EXIT_ERROR)
def test_key_import_errors(self):
export_file = self.output_path + '/exported'
self.cmd('init', self.repository_location, '--encryption', 'keyfile')
self.cmd('key', 'import', self.repository_location, export_file, exit_code=EXIT_ERROR)
with open(export_file, 'w') as fd:
fd.write('something not a key\n')
if self.FORK_DEFAULT:
self.cmd('key', 'import', self.repository_location, export_file, exit_code=2)
else:
with pytest.raises(NotABorgKeyFile):
self.cmd('key', 'import', self.repository_location, export_file)
with open(export_file, 'w') as fd:
fd.write('BORG_KEY a0a0a0\n')
if self.FORK_DEFAULT:
self.cmd('key', 'import', self.repository_location, export_file, exit_code=2)
else:
with pytest.raises(RepoIdMismatch):
self.cmd('key', 'import', self.repository_location, export_file)
def test_key_export_paperkey(self):
repo_id = 'e294423506da4e1ea76e8dcdf1a3919624ae3ae496fddf905610c351d3f09239'
export_file = self.output_path + '/exported'
self.cmd('init', self.repository_location, '--encryption', 'keyfile')
self._set_repository_id(self.repository_path, unhexlify(repo_id))
key_file = self.keys_path + '/' + os.listdir(self.keys_path)[0]
with open(key_file, 'w') as fd:
fd.write(KeyfileKey.FILE_ID + ' ' + repo_id + '\n')
fd.write(b2a_base64(b'abcdefghijklmnopqrstu').decode())
self.cmd('key', 'export', '--paper', self.repository_location, export_file)
with open(export_file, 'r') as fd:
export_contents = fd.read()
assert export_contents == """To restore key use borg key import --paper /path/to/repo
BORG PAPER KEY v1
id: 2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02
1: 616263 646566 676869 6a6b6c 6d6e6f 707172 - 6d
2: 737475 - 88
"""
def test_key_import_paperkey(self):
repo_id = 'e294423506da4e1ea76e8dcdf1a3919624ae3ae496fddf905610c351d3f09239'
self.cmd('init', self.repository_location, '--encryption', 'keyfile')
self._set_repository_id(self.repository_path, unhexlify(repo_id))
key_file = self.keys_path + '/' + os.listdir(self.keys_path)[0]
with open(key_file, 'w') as fd:
fd.write(KeyfileKey.FILE_ID + ' ' + repo_id + '\n')
fd.write(b2a_base64(b'abcdefghijklmnopqrstu').decode())
typed_input = (
b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 02\n' # Forgot to type "-"
b'2 / e29442 3506da 4e1ea7 25f62a 5a3d41 - 02\n' # Forgot to type second "/"
b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d42 - 02\n' # Typo (..42 not ..41)
b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02\n' # Correct! Congratulations
b'616263 646566 676869 6a6b6c 6d6e6f 707172 - 6d\n'
b'\n\n' # Abort [yN] => N
b'737475 88\n' # missing "-"
b'73747i - 88\n' # typo
b'73747 - 88\n' # missing nibble
b'73 74 75 - 89\n' # line checksum mismatch
b'00a1 - 88\n' # line hash collision - overall hash mismatch, have to start over
b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02\n'
b'616263 646566 676869 6a6b6c 6d6e6f 707172 - 6d\n'
b'73 74 75 - 88\n'
)
# In case that this has to change, here is a quick way to find a colliding line hash:
#
# from hashlib import sha256
# hash_fn = lambda x: sha256(b'\x00\x02' + x).hexdigest()[:2]
# for i in range(1000):
# if hash_fn(i.to_bytes(2, byteorder='big')) == '88': # 88 = line hash
# print(i.to_bytes(2, 'big'))
# break
self.cmd('key', 'import', '--paper', self.repository_location, input=typed_input)
# Test abort paths
typed_input = b'\ny\n'
self.cmd('key', 'import', '--paper', self.repository_location, input=typed_input)
typed_input = b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02\n\ny\n'
self.cmd('key', 'import', '--paper', self.repository_location, input=typed_input)
def test_debug_dump_manifest(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
dump_file = self.output_path + '/dump'
output = self.cmd('debug', 'dump-manifest', self.repository_location, dump_file)
assert output == ""
with open(dump_file, "r") as f:
result = json.load(f)
assert 'archives' in result
assert 'config' in result
assert 'item_keys' in result
assert 'timestamp' in result
assert 'version' in result
def test_debug_dump_archive(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
dump_file = self.output_path + '/dump'
output = self.cmd('debug', 'dump-archive', self.repository_location + "::test", dump_file)
assert output == ""
with open(dump_file, "r") as f:
result = json.load(f)
assert '_name' in result
assert '_manifest_entry' in result
assert '_meta' in result
assert '_items' in result
def test_debug_refcount_obj(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('debug', 'refcount-obj', self.repository_location, '0' * 64).strip()
assert output == 'object 0000000000000000000000000000000000000000000000000000000000000000 not found [info from chunks cache].'
create_json = json.loads(self.cmd('create', '--json', self.repository_location + '::test', 'input'))
archive_id = create_json['archive']['id']
output = self.cmd('debug', 'refcount-obj', self.repository_location, archive_id).strip()
assert output == 'object ' + archive_id + ' has 1 referrers [info from chunks cache].'
# Invalid IDs do not abort or return an error
output = self.cmd('debug', 'refcount-obj', self.repository_location, '124', 'xyza').strip()
assert output == 'object id 124 is invalid.\nobject id xyza is invalid.'
def test_debug_info(self):
output = self.cmd('debug', 'info')
assert 'CRC implementation' in output
assert 'Python' in output
def test_benchmark_crud(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
with environment_variable(_BORG_BENCHMARK_CRUD_TEST='YES'):
self.cmd('benchmark', 'crud', self.repository_location, self.input_path)
def test_config(self):
self.create_test_files()
os.unlink('input/flagfile')
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('config', '--list', self.repository_location)
self.assert_in('[repository]', output)
self.assert_in('version', output)
self.assert_in('segments_per_dir', output)
self.assert_in('storage_quota', output)
self.assert_in('append_only', output)
self.assert_in('additional_free_space', output)
self.assert_in('id', output)
self.assert_not_in('last_segment_checked', output)
output = self.cmd('config', self.repository_location, 'last_segment_checked', exit_code=1)
self.assert_in('No option ', output)
self.cmd('config', self.repository_location, 'last_segment_checked', '123')
output = self.cmd('config', self.repository_location, 'last_segment_checked')
assert output == '123' + '\n'
output = self.cmd('config', '--list', self.repository_location)
self.assert_in('last_segment_checked', output)
self.cmd('config', '--delete', self.repository_location, 'last_segment_checked')
for cfg_key, cfg_value in [
('additional_free_space', '2G'),
('repository.append_only', '1'),
]:
output = self.cmd('config', self.repository_location, cfg_key)
assert output == '0' + '\n'
self.cmd('config', self.repository_location, cfg_key, cfg_value)
output = self.cmd('config', self.repository_location, cfg_key)
assert output == cfg_value + '\n'
self.cmd('config', '--delete', self.repository_location, cfg_key)
self.cmd('config', self.repository_location, cfg_key, exit_code=1)
self.cmd('config', '--list', '--delete', self.repository_location, exit_code=2)
self.cmd('config', self.repository_location, exit_code=2)
self.cmd('config', self.repository_location, 'invalid-option', exit_code=1)
requires_gnutar = pytest.mark.skipif(not have_gnutar(), reason='GNU tar must be installed for this test.')
requires_gzip = pytest.mark.skipif(not shutil.which('gzip'), reason='gzip must be installed for this test.')
@requires_gnutar
def test_export_tar(self):
self.create_test_files()
os.unlink('input/flagfile')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('export-tar', self.repository_location + '::test', 'simple.tar', '--progress')
with changedir('output'):
# This probably assumes GNU tar. Note -p switch to extract permissions regardless of umask.
subprocess.check_call(['tar', 'xpf', '../simple.tar', '--warning=no-timestamp'])
self.assert_dirs_equal('input', 'output/input', ignore_flags=True, ignore_xattrs=True, ignore_ns=True)
@requires_gnutar
@requires_gzip
def test_export_tar_gz(self):
if not shutil.which('gzip'):
pytest.skip('gzip is not installed')
self.create_test_files()
os.unlink('input/flagfile')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
list = self.cmd('export-tar', self.repository_location + '::test', 'simple.tar.gz', '--list')
assert 'input/file1\n' in list
assert 'input/dir2\n' in list
with changedir('output'):
subprocess.check_call(['tar', 'xpf', '../simple.tar.gz', '--warning=no-timestamp'])
self.assert_dirs_equal('input', 'output/input', ignore_flags=True, ignore_xattrs=True, ignore_ns=True)
@requires_gnutar
def test_export_tar_strip_components(self):
if not shutil.which('gzip'):
pytest.skip('gzip is not installed')
self.create_test_files()
os.unlink('input/flagfile')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
list = self.cmd('export-tar', self.repository_location + '::test', 'simple.tar', '--strip-components=1', '--list')
# --list's path are those before processing with --strip-components
assert 'input/file1\n' in list
assert 'input/dir2\n' in list
with changedir('output'):
subprocess.check_call(['tar', 'xpf', '../simple.tar', '--warning=no-timestamp'])
self.assert_dirs_equal('input', 'output/', ignore_flags=True, ignore_xattrs=True, ignore_ns=True)
@requires_hardlinks
@requires_gnutar
def test_export_tar_strip_components_links(self):
self._extract_hardlinks_setup()
self.cmd('export-tar', self.repository_location + '::test', 'output.tar', '--strip-components=2')
with changedir('output'):
subprocess.check_call(['tar', 'xpf', '../output.tar', '--warning=no-timestamp'])
assert os.stat('hardlink').st_nlink == 2
assert os.stat('subdir/hardlink').st_nlink == 2
assert os.stat('aaaa').st_nlink == 2
assert os.stat('source2').st_nlink == 2
@requires_hardlinks
@requires_gnutar
def test_extract_hardlinks_tar(self):
self._extract_hardlinks_setup()
self.cmd('export-tar', self.repository_location + '::test', 'output.tar', 'input/dir1')
with changedir('output'):
subprocess.check_call(['tar', 'xpf', '../output.tar', '--warning=no-timestamp'])
assert os.stat('input/dir1/hardlink').st_nlink == 2
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2
assert os.stat('input/dir1/aaaa').st_nlink == 2
assert os.stat('input/dir1/source2').st_nlink == 2
def test_detect_attic_repo(self):
path = make_attic_repo(self.repository_path)
cmds = [
['create', path + '::test', self.tmpdir],
['extract', path + '::test'],
['check', path],
['rename', path + '::test', 'newname'],
['list', path],
['delete', path],
['prune', path],
['info', path + '::test'],
['key', 'export', path, 'exported'],
['key', 'import', path, 'import'],
['key', 'change-passphrase', path],
['break-lock', path],
]
for args in cmds:
output = self.cmd(*args, fork=True, exit_code=2)
assert 'Attic repository detected.' in output
@unittest.skipUnless('binary' in BORG_EXES, 'no borg.exe available')
class ArchiverTestCaseBinary(ArchiverTestCase):
EXE = 'borg.exe'
FORK_DEFAULT = True
@unittest.skip('does not raise Exception, but sets rc==2')
def test_init_parent_dirs(self):
pass
@unittest.skip('patches objects')
def test_init_interrupt(self):
pass
@unittest.skip('patches objects')
def test_extract_capabilities(self):
pass
@unittest.skip('patches objects')
def test_extract_xattrs_errors(self):
pass
@unittest.skip('test_basic_functionality seems incompatible with fakeroot and/or the binary.')
def test_basic_functionality(self):
pass
@unittest.skip('test_overwrite seems incompatible with fakeroot and/or the binary.')
def test_overwrite(self):
pass
def test_fuse(self):
if fakeroot_detected():
unittest.skip('test_fuse with the binary is not compatible with fakeroot')
else:
super().test_fuse()
class ArchiverCheckTestCase(ArchiverTestCaseBase):
def setUp(self):
super().setUp()
with patch.object(ChunkBuffer, 'BUFFER_SIZE', 10):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive1')
self.create_src_archive('archive2')
def test_check_usage(self):
output = self.cmd('check', '-v', '--progress', self.repository_location, exit_code=0)
self.assert_in('Starting repository check', output)
self.assert_in('Starting archive consistency check', output)
self.assert_in('Checking segments', output)
# reset logging to new process default to avoid need for fork=True on next check
logging.getLogger('borg.output.progress').setLevel(logging.NOTSET)
output = self.cmd('check', '-v', '--repository-only', self.repository_location, exit_code=0)
self.assert_in('Starting repository check', output)
self.assert_not_in('Starting archive consistency check', output)
self.assert_not_in('Checking segments', output)
output = self.cmd('check', '-v', '--archives-only', self.repository_location, exit_code=0)
self.assert_not_in('Starting repository check', output)
self.assert_in('Starting archive consistency check', output)
output = self.cmd('check', '-v', '--archives-only', '--prefix=archive2', self.repository_location, exit_code=0)
self.assert_not_in('archive1', output)
output = self.cmd('check', '-v', '--archives-only', '--first=1', self.repository_location, exit_code=0)
self.assert_in('archive1', output)
self.assert_not_in('archive2', output)
output = self.cmd('check', '-v', '--archives-only', '--last=1', self.repository_location, exit_code=0)
self.assert_not_in('archive1', output)
self.assert_in('archive2', output)
def test_missing_file_chunk(self):
archive, repository = self.open_archive('archive1')
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
valid_chunks = item.chunks
killed_chunk = valid_chunks[-1]
repository.delete(killed_chunk.id)
break
else:
self.fail('should not happen')
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
output = self.cmd('check', '--repair', self.repository_location, exit_code=0)
self.assert_in('New missing file chunk detected', output)
self.cmd('check', self.repository_location, exit_code=0)
output = self.cmd('list', '--format={health}#{path}{LF}', self.repository_location + '::archive1', exit_code=0)
self.assert_in('broken#', output)
# check that the file in the old archives has now a different chunk list without the killed chunk
for archive_name in ('archive1', 'archive2'):
archive, repository = self.open_archive(archive_name)
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
self.assert_not_equal(valid_chunks, item.chunks)
self.assert_not_in(killed_chunk, item.chunks)
break
else:
self.fail('should not happen')
# do a fresh backup (that will include the killed chunk)
with patch.object(ChunkBuffer, 'BUFFER_SIZE', 10):
self.create_src_archive('archive3')
# check should be able to heal the file now:
output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0)
self.assert_in('Healed previously missing file chunk', output)
self.assert_in('testsuite/archiver.py: Completely healed previously damaged file!', output)
# check that the file in the old archives has the correct chunks again
for archive_name in ('archive1', 'archive2'):
archive, repository = self.open_archive(archive_name)
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
self.assert_equal(valid_chunks, item.chunks)
break
else:
self.fail('should not happen')
# list is also all-healthy again
output = self.cmd('list', '--format={health}#{path}{LF}', self.repository_location + '::archive1', exit_code=0)
self.assert_not_in('broken#', output)
def test_missing_archive_item_chunk(self):
archive, repository = self.open_archive('archive1')
with repository:
repository.delete(archive.metadata.items[0])
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
self.cmd('check', self.repository_location, exit_code=0)
def test_missing_archive_metadata(self):
archive, repository = self.open_archive('archive1')
with repository:
repository.delete(archive.id)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
self.cmd('check', self.repository_location, exit_code=0)
def test_missing_manifest(self):
archive, repository = self.open_archive('archive1')
with repository:
repository.delete(Manifest.MANIFEST_ID)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0)
self.assert_in('archive1', output)
self.assert_in('archive2', output)
self.cmd('check', self.repository_location, exit_code=0)
def test_corrupted_manifest(self):
archive, repository = self.open_archive('archive1')
with repository:
manifest = repository.get(Manifest.MANIFEST_ID)
corrupted_manifest = manifest + b'corrupted!'
repository.put(Manifest.MANIFEST_ID, corrupted_manifest)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0)
self.assert_in('archive1', output)
self.assert_in('archive2', output)
self.cmd('check', self.repository_location, exit_code=0)
def test_manifest_rebuild_corrupted_chunk(self):
archive, repository = self.open_archive('archive1')
with repository:
manifest = repository.get(Manifest.MANIFEST_ID)
corrupted_manifest = manifest + b'corrupted!'
repository.put(Manifest.MANIFEST_ID, corrupted_manifest)
chunk = repository.get(archive.id)
corrupted_chunk = chunk + b'corrupted!'
repository.put(archive.id, corrupted_chunk)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0)
self.assert_in('archive2', output)
self.cmd('check', self.repository_location, exit_code=0)
def test_manifest_rebuild_duplicate_archive(self):
archive, repository = self.open_archive('archive1')
key = archive.key
with repository:
manifest = repository.get(Manifest.MANIFEST_ID)
corrupted_manifest = manifest + b'corrupted!'
repository.put(Manifest.MANIFEST_ID, corrupted_manifest)
archive = msgpack.packb({
'cmdline': [],
'items': [],
'hostname': 'foo',
'username': 'bar',
'name': 'archive1',
'time': '2016-12-15T18:49:51.849711',
'version': 1,
})
archive_id = key.id_hash(archive)
repository.put(archive_id, key.encrypt(archive))
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
output = self.cmd('list', self.repository_location)
self.assert_in('archive1', output)
self.assert_in('archive1.1', output)
self.assert_in('archive2', output)
def test_extra_chunks(self):
self.cmd('check', self.repository_location, exit_code=0)
with Repository(self.repository_location, exclusive=True) as repository:
repository.put(b'01234567890123456789012345678901', b'xxxx')
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
self.cmd('check', self.repository_location, exit_code=0)
self.cmd('extract', '--dry-run', self.repository_location + '::archive1', exit_code=0)
def _test_verify_data(self, *init_args):
shutil.rmtree(self.repository_path)
self.cmd('init', self.repository_location, *init_args)
self.create_src_archive('archive1')
archive, repository = self.open_archive('archive1')
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
chunk = item.chunks[-1]
data = repository.get(chunk.id) + b'1234'
repository.put(chunk.id, data)
break
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=0)
output = self.cmd('check', '--verify-data', self.repository_location, exit_code=1)
assert bin_to_hex(chunk.id) + ', integrity error' in output
# repair (heal is tested in another test)
output = self.cmd('check', '--repair', '--verify-data', self.repository_location, exit_code=0)
assert bin_to_hex(chunk.id) + ', integrity error' in output
assert 'testsuite/archiver.py: New missing file chunk detected' in output
def test_verify_data(self):
self._test_verify_data('--encryption', 'repokey')
def test_verify_data_unencrypted(self):
self._test_verify_data('--encryption', 'none')
def test_empty_repository(self):
with Repository(self.repository_location, exclusive=True) as repository:
for id_ in repository.list():
repository.delete(id_)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
def test_attic013_acl_bug(self):
# Attic up to release 0.13 contained a bug where every item unintentionally received
# a b'acl'=None key-value pair.
# This bug can still live on in Borg repositories (through borg upgrade).
class Attic013Item:
def as_dict(self):
return {
# These are required
b'path': '1234',
b'mtime': 0,
b'mode': 0,
b'user': b'0',
b'group': b'0',
b'uid': 0,
b'gid': 0,
# acl is the offending key.
b'acl': None,
}
archive, repository = self.open_archive('archive1')
with repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest) as cache:
archive = Archive(repository, key, manifest, '0.13', cache=cache, create=True)
archive.items_buffer.add(Attic013Item())
archive.save()
self.cmd('check', self.repository_location, exit_code=0)
self.cmd('list', self.repository_location + '::0.13', exit_code=0)
class ManifestAuthenticationTest(ArchiverTestCaseBase):
def spoof_manifest(self, repository):
with repository:
_, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
repository.put(Manifest.MANIFEST_ID, key.encrypt(msgpack.packb({
'version': 1,
'archives': {},
'config': {},
'timestamp': (datetime.utcnow() + timedelta(days=1)).strftime(ISO_FORMAT),
})))
repository.commit(compact=False)
def test_fresh_init_tam_required(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
repository = Repository(self.repository_path, exclusive=True)
with repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
repository.put(Manifest.MANIFEST_ID, key.encrypt(msgpack.packb({
'version': 1,
'archives': {},
'timestamp': (datetime.utcnow() + timedelta(days=1)).strftime(ISO_FORMAT),
})))
repository.commit(compact=False)
with pytest.raises(TAMRequiredError):
self.cmd('list', self.repository_location)
def test_not_required(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive1234')
repository = Repository(self.repository_path, exclusive=True)
with repository:
shutil.rmtree(get_security_dir(bin_to_hex(repository.id)))
_, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
key.tam_required = False
key.change_passphrase(key._passphrase)
manifest = msgpack.unpackb(key.decrypt(None, repository.get(Manifest.MANIFEST_ID)))
del manifest[b'tam']
repository.put(Manifest.MANIFEST_ID, key.encrypt(msgpack.packb(manifest)))
repository.commit(compact=False)
output = self.cmd('list', '--debug', self.repository_location)
assert 'archive1234' in output
assert 'TAM not found and not required' in output
# Run upgrade
self.cmd('upgrade', '--tam', self.repository_location)
# Manifest must be authenticated now
output = self.cmd('list', '--debug', self.repository_location)
assert 'archive1234' in output
assert 'TAM-verified manifest' in output
# Try to spoof / modify pre-1.0.9
self.spoof_manifest(repository)
# Fails
with pytest.raises(TAMRequiredError):
self.cmd('list', self.repository_location)
# Force upgrade
self.cmd('upgrade', '--tam', '--force', self.repository_location)
self.cmd('list', self.repository_location)
def test_disable(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive1234')
self.cmd('upgrade', '--disable-tam', self.repository_location)
repository = Repository(self.repository_path, exclusive=True)
self.spoof_manifest(repository)
assert not self.cmd('list', self.repository_location)
def test_disable2(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive1234')
repository = Repository(self.repository_path, exclusive=True)
self.spoof_manifest(repository)
self.cmd('upgrade', '--disable-tam', self.repository_location)
assert not self.cmd('list', self.repository_location)
class RemoteArchiverTestCase(ArchiverTestCase):
prefix = '__testsuite__:'
def open_repository(self):
return RemoteRepository(Location(self.repository_location))
def test_remote_repo_restrict_to_path(self):
# restricted to repo directory itself:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', self.repository_path]):
self.cmd('init', '--encryption=repokey', self.repository_location)
# restricted to repo directory itself, fail for other directories with same prefix:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', self.repository_path]):
with pytest.raises(PathNotAllowed):
self.cmd('init', '--encryption=repokey', self.repository_location + '_0')
# restricted to a completely different path:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', '/foo']):
with pytest.raises(PathNotAllowed):
self.cmd('init', '--encryption=repokey', self.repository_location + '_1')
path_prefix = os.path.dirname(self.repository_path)
# restrict to repo directory's parent directory:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', path_prefix]):
self.cmd('init', '--encryption=repokey', self.repository_location + '_2')
# restrict to repo directory's parent directory and another directory:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', '/foo', '--restrict-to-path', path_prefix]):
self.cmd('init', '--encryption=repokey', self.repository_location + '_3')
def test_remote_repo_restrict_to_repository(self):
# restricted to repo directory itself:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-repository', self.repository_path]):
self.cmd('init', '--encryption=repokey', self.repository_location)
parent_path = os.path.join(self.repository_path, '..')
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-repository', parent_path]):
with pytest.raises(PathNotAllowed):
self.cmd('init', '--encryption=repokey', self.repository_location)
@unittest.skip('only works locally')
def test_debug_put_get_delete_obj(self):
pass
@unittest.skip('only works locally')
def test_config(self):
pass
@unittest.skip('only works locally')
def test_migrate_lock_alive(self):
pass
def test_strip_components_doesnt_leak(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('dir/file', contents=b"test file contents 1")
self.create_regular_file('dir/file2', contents=b"test file contents 2")
self.create_regular_file('skipped-file1', contents=b"test file contents 3")
self.create_regular_file('skipped-file2', contents=b"test file contents 4")
self.create_regular_file('skipped-file3', contents=b"test file contents 5")
self.cmd('create', self.repository_location + '::test', 'input')
marker = 'cached responses left in RemoteRepository'
with changedir('output'):
res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '3')
self.assert_true(marker not in res)
with self.assert_creates_file('file'):
res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '2')
self.assert_true(marker not in res)
with self.assert_creates_file('dir/file'):
res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '1')
self.assert_true(marker not in res)
with self.assert_creates_file('input/dir/file'):
res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '0')
self.assert_true(marker not in res)
class ArchiverCorruptionTestCase(ArchiverTestCaseBase):
def setUp(self):
super().setUp()
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cache_path = json.loads(self.cmd('info', self.repository_location, '--json'))['cache']['path']
def corrupt(self, file, amount=1):
with open(file, 'r+b') as fd:
fd.seek(-amount, io.SEEK_END)
corrupted = bytes(255-c for c in fd.read(amount))
fd.seek(-amount, io.SEEK_END)
fd.write(corrupted)
def test_cache_chunks(self):
self.corrupt(os.path.join(self.cache_path, 'chunks'))
if self.FORK_DEFAULT:
out = self.cmd('info', self.repository_location, exit_code=2)
assert 'failed integrity check' in out
else:
with pytest.raises(FileIntegrityError):
self.cmd('info', self.repository_location)
def test_cache_files(self):
self.cmd('create', self.repository_location + '::test', 'input')
self.corrupt(os.path.join(self.cache_path, 'files'))
out = self.cmd('create', self.repository_location + '::test1', 'input')
# borg warns about the corrupt files cache, but then continues without files cache.
assert 'files cache is corrupted' in out
def test_chunks_archive(self):
self.cmd('create', self.repository_location + '::test1', 'input')
# Find ID of test1 so we can corrupt it later :)
target_id = self.cmd('list', self.repository_location, '--format={id}{LF}').strip()
self.cmd('create', self.repository_location + '::test2', 'input')
# Force cache sync, creating archive chunks of test1 and test2 in chunks.archive.d
self.cmd('delete', '--cache-only', self.repository_location)
self.cmd('info', self.repository_location, '--json')
chunks_archive = os.path.join(self.cache_path, 'chunks.archive.d')
assert len(os.listdir(chunks_archive)) == 4 # two archives, one chunks cache and one .integrity file each
self.corrupt(os.path.join(chunks_archive, target_id + '.compact'))
# Trigger cache sync by changing the manifest ID in the cache config
config_path = os.path.join(self.cache_path, 'config')
config = ConfigParser(interpolation=None)
config.read(config_path)
config.set('cache', 'manifest', bin_to_hex(bytes(32)))
with open(config_path, 'w') as fd:
config.write(fd)
# Cache sync notices corrupted archive chunks, but automatically recovers.
out = self.cmd('create', '-v', self.repository_location + '::test3', 'input', exit_code=1)
assert 'Reading cached archive chunk index for test1' in out
assert 'Cached archive chunk index of test1 is corrupted' in out
assert 'Fetching and building archive index for test1' in out
def test_old_version_interfered(self):
# Modify the main manifest ID without touching the manifest ID in the integrity section.
# This happens if a version without integrity checking modifies the cache.
config_path = os.path.join(self.cache_path, 'config')
config = ConfigParser(interpolation=None)
config.read(config_path)
config.set('cache', 'manifest', bin_to_hex(bytes(32)))
with open(config_path, 'w') as fd:
config.write(fd)
out = self.cmd('info', self.repository_location)
assert 'Cache integrity data not available: old Borg version modified the cache.' in out
class DiffArchiverTestCase(ArchiverTestCaseBase):
def test_basic_functionality(self):
# Setup files for the first snapshot
self.create_regular_file('empty', size=0)
self.create_regular_file('file_unchanged', size=128)
self.create_regular_file('file_removed', size=256)
self.create_regular_file('file_removed2', size=512)
self.create_regular_file('file_replaced', size=1024)
os.mkdir('input/dir_replaced_with_file')
os.chmod('input/dir_replaced_with_file', stat.S_IFDIR | 0o755)
os.mkdir('input/dir_removed')
if are_symlinks_supported():
os.mkdir('input/dir_replaced_with_link')
os.symlink('input/dir_replaced_with_file', 'input/link_changed')
os.symlink('input/file_unchanged', 'input/link_removed')
os.symlink('input/file_removed2', 'input/link_target_removed')
os.symlink('input/empty', 'input/link_target_contents_changed')
os.symlink('input/empty', 'input/link_replaced_by_file')
if are_hardlinks_supported():
os.link('input/file_replaced', 'input/hardlink_target_replaced')
os.link('input/empty', 'input/hardlink_contents_changed')
os.link('input/file_removed', 'input/hardlink_removed')
os.link('input/file_removed2', 'input/hardlink_target_removed')
self.cmd('init', '--encryption=repokey', self.repository_location)
# Create the first snapshot
self.cmd('create', self.repository_location + '::test0', 'input')
# Setup files for the second snapshot
self.create_regular_file('file_added', size=2048)
self.create_regular_file('file_empty_added', size=0)
os.unlink('input/file_replaced')
self.create_regular_file('file_replaced', contents=b'0' * 4096)
os.unlink('input/file_removed')
os.unlink('input/file_removed2')
os.rmdir('input/dir_replaced_with_file')
self.create_regular_file('dir_replaced_with_file', size=8192)
os.chmod('input/dir_replaced_with_file', stat.S_IFREG | 0o755)
os.mkdir('input/dir_added')
os.rmdir('input/dir_removed')
if are_symlinks_supported():
os.rmdir('input/dir_replaced_with_link')
os.symlink('input/dir_added', 'input/dir_replaced_with_link')
os.unlink('input/link_changed')
os.symlink('input/dir_added', 'input/link_changed')
os.symlink('input/dir_added', 'input/link_added')
os.unlink('input/link_replaced_by_file')
self.create_regular_file('link_replaced_by_file', size=16384)
os.unlink('input/link_removed')
if are_hardlinks_supported():
os.unlink('input/hardlink_removed')
os.link('input/file_added', 'input/hardlink_added')
with open('input/empty', 'ab') as fd:
fd.write(b'appended_data')
# Create the second snapshot
self.cmd('create', self.repository_location + '::test1a', 'input')
self.cmd('create', '--chunker-params', '16,18,17,4095', self.repository_location + '::test1b', 'input')
def do_asserts(output, can_compare_ids):
# File contents changed (deleted and replaced with a new file)
change = 'B' if can_compare_ids else '{:<19}'.format('modified')
assert 'file_replaced' in output # added to debug #3494
assert '{} input/file_replaced'.format(change) in output
# File unchanged
assert 'input/file_unchanged' not in output
# Directory replaced with a regular file
if 'BORG_TESTS_IGNORE_MODES' not in os.environ:
assert '[drwxr-xr-x -> -rwxr-xr-x] input/dir_replaced_with_file' in output
# Basic directory cases
assert 'added directory input/dir_added' in output
assert 'removed directory input/dir_removed' in output
if are_symlinks_supported():
# Basic symlink cases
assert 'changed link input/link_changed' in output
assert 'added link input/link_added' in output
assert 'removed link input/link_removed' in output
# Symlink replacing or being replaced
assert '] input/dir_replaced_with_link' in output
assert '] input/link_replaced_by_file' in output
# Symlink target removed. Should not affect the symlink at all.
assert 'input/link_target_removed' not in output
# The inode has two links and the file contents changed. Borg
# should notice the changes in both links. However, the symlink
# pointing to the file is not changed.
change = '0 B' if can_compare_ids else '{:<19}'.format('modified')
assert '{} input/empty'.format(change) in output
if are_hardlinks_supported():
assert '{} input/hardlink_contents_changed'.format(change) in output
if are_symlinks_supported():
assert 'input/link_target_contents_changed' not in output
# Added a new file and a hard link to it. Both links to the same
# inode should appear as separate files.
assert 'added 2.05 kB input/file_added' in output
if are_hardlinks_supported():
assert 'added 2.05 kB input/hardlink_added' in output
# check if a diff between non-existent and empty new file is found
assert 'added 0 B input/file_empty_added' in output
# The inode has two links and both of them are deleted. They should
# appear as two deleted files.
assert 'removed 256 B input/file_removed' in output
if are_hardlinks_supported():
assert 'removed 256 B input/hardlink_removed' in output
# Another link (marked previously as the source in borg) to the
# same inode was removed. This should not change this link at all.
if are_hardlinks_supported():
assert 'input/hardlink_target_removed' not in output
# Another link (marked previously as the source in borg) to the
# same inode was replaced with a new regular file. This should not
# change this link at all.
if are_hardlinks_supported():
assert 'input/hardlink_target_replaced' not in output
def do_json_asserts(output, can_compare_ids):
def get_changes(filename, data):
chgsets = [j['changes'] for j in data if j['path'] == filename]
assert len(chgsets) < 2
# return a flattened list of changes for given filename
return [chg for chgset in chgsets for chg in chgset]
# convert output to list of dicts
joutput = [json.loads(line) for line in output.split('\n') if line]
# File contents changed (deleted and replaced with a new file)
expected = {'type': 'modified', 'added': 4096, 'removed': 1024} if can_compare_ids else {'type': 'modified'}
assert expected in get_changes('input/file_replaced', joutput)
# File unchanged
assert not any(get_changes('input/file_unchanged', joutput))
# Directory replaced with a regular file
if 'BORG_TESTS_IGNORE_MODES' not in os.environ:
assert {'type': 'mode', 'old_mode': 'drwxr-xr-x', 'new_mode': '-rwxr-xr-x'} in \
get_changes('input/dir_replaced_with_file', joutput)
# Basic directory cases
assert {'type': 'added directory'} in get_changes('input/dir_added', joutput)
assert {'type': 'removed directory'} in get_changes('input/dir_removed', joutput)
if are_symlinks_supported():
# Basic symlink cases
assert {'type': 'changed link'} in get_changes('input/link_changed', joutput)
assert {'type': 'added link'} in get_changes('input/link_added', joutput)
assert {'type': 'removed link'} in get_changes('input/link_removed', joutput)
# Symlink replacing or being replaced
assert any(chg['type'] == 'mode' and chg['new_mode'].startswith('l') for chg in
get_changes('input/dir_replaced_with_link', joutput))
assert any(chg['type'] == 'mode' and chg['old_mode'].startswith('l') for chg in
get_changes('input/link_replaced_by_file', joutput))
# Symlink target removed. Should not affect the symlink at all.
assert not any(get_changes('input/link_target_removed', joutput))
# The inode has two links and the file contents changed. Borg
# should notice the changes in both links. However, the symlink
# pointing to the file is not changed.
expected = {'type': 'modified', 'added': 13, 'removed': 0} if can_compare_ids else {'type': 'modified'}
assert expected in get_changes('input/empty', joutput)
if are_hardlinks_supported():
assert expected in get_changes('input/hardlink_contents_changed', joutput)
if are_symlinks_supported():
assert not any(get_changes('input/link_target_contents_changed', joutput))
# Added a new file and a hard link to it. Both links to the same
# inode should appear as separate files.
assert {'type': 'added', 'size': 2048} in get_changes('input/file_added', joutput)
if are_hardlinks_supported():
assert {'type': 'added', 'size': 2048} in get_changes('input/hardlink_added', joutput)
# check if a diff between non-existent and empty new file is found
assert {'type': 'added', 'size': 0} in get_changes('input/file_empty_added', joutput)
# The inode has two links and both of them are deleted. They should
# appear as two deleted files.
assert {'type': 'removed', 'size': 256} in get_changes('input/file_removed', joutput)
if are_hardlinks_supported():
assert {'type': 'removed', 'size': 256} in get_changes('input/hardlink_removed', joutput)
# Another link (marked previously as the source in borg) to the
# same inode was removed. This should not change this link at all.
if are_hardlinks_supported():
assert not any(get_changes('input/hardlink_target_removed', joutput))
# Another link (marked previously as the source in borg) to the
# same inode was replaced with a new regular file. This should not
# change this link at all.
if are_hardlinks_supported():
assert not any(get_changes('input/hardlink_target_replaced', joutput))
do_asserts(self.cmd('diff', self.repository_location + '::test0', 'test1a'), True)
# We expect exit_code=1 due to the chunker params warning
do_asserts(self.cmd('diff', self.repository_location + '::test0', 'test1b', exit_code=1), False)
do_json_asserts(self.cmd('diff', self.repository_location + '::test0', 'test1a', '--json-lines'), True)
def test_sort_option(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('a_file_removed', size=8)
self.create_regular_file('f_file_removed', size=16)
self.create_regular_file('c_file_changed', size=32)
self.create_regular_file('e_file_changed', size=64)
self.cmd('create', self.repository_location + '::test0', 'input')
os.unlink('input/a_file_removed')
os.unlink('input/f_file_removed')
os.unlink('input/c_file_changed')
os.unlink('input/e_file_changed')
self.create_regular_file('c_file_changed', size=512)
self.create_regular_file('e_file_changed', size=1024)
self.create_regular_file('b_file_added', size=128)
self.create_regular_file('d_file_added', size=256)
self.cmd('create', self.repository_location + '::test1', 'input')
output = self.cmd('diff', '--sort', self.repository_location + '::test0', 'test1')
expected = [
'a_file_removed',
'b_file_added',
'c_file_changed',
'd_file_added',
'e_file_changed',
'f_file_removed',
]
assert all(x in line for x, line in zip(expected, output.splitlines()))
def test_get_args():
archiver = Archiver()
# everything normal:
# first param is argv as produced by ssh forced command,
# second param is like from SSH_ORIGINAL_COMMAND env variable
args = archiver.get_args(['borg', 'serve', '--umask=0027', '--restrict-to-path=/p1', '--restrict-to-path=/p2', ],
'borg serve --info')
assert args.func == archiver.do_serve
assert args.restrict_to_paths == ['/p1', '/p2']
assert args.umask == 0o027
assert args.log_level == 'info'
# similar, but with --restrict-to-repository
args = archiver.get_args(['borg', 'serve', '--restrict-to-repository=/r1', '--restrict-to-repository=/r2', ],
'borg serve --info --umask=0027')
assert args.restrict_to_repositories == ['/r1', '/r2']
# trying to cheat - break out of path restriction
args = archiver.get_args(['borg', 'serve', '--restrict-to-path=/p1', '--restrict-to-path=/p2', ],
'borg serve --restrict-to-path=/')
assert args.restrict_to_paths == ['/p1', '/p2']
# trying to cheat - break out of repository restriction
args = archiver.get_args(['borg', 'serve', '--restrict-to-repository=/r1', '--restrict-to-repository=/r2', ],
'borg serve --restrict-to-repository=/')
assert args.restrict_to_repositories == ['/r1', '/r2']
# trying to cheat - break below repository restriction
args = archiver.get_args(['borg', 'serve', '--restrict-to-repository=/r1', '--restrict-to-repository=/r2', ],
'borg serve --restrict-to-repository=/r1/below')
assert args.restrict_to_repositories == ['/r1', '/r2']
# trying to cheat - try to execute different subcommand
args = archiver.get_args(['borg', 'serve', '--restrict-to-path=/p1', '--restrict-to-path=/p2', ],
'borg init --encryption=repokey /')
assert args.func == archiver.do_serve
# Check that environment variables in the forced command don't cause issues. If the command
# were not forced, environment variables would be interpreted by the shell, but this does not
# happen for forced commands - we get the verbatim command line and need to deal with env vars.
args = archiver.get_args(['borg', 'serve', ],
'BORG_FOO=bar borg serve --info')
assert args.func == archiver.do_serve
def test_chunk_content_equal():
def ccc(a, b):
chunks_a = [data for data in a]
chunks_b = [data for data in b]
compare1 = ItemDiff._chunk_content_equal(iter(chunks_a), iter(chunks_b))
compare2 = ItemDiff._chunk_content_equal(iter(chunks_b), iter(chunks_a))
assert compare1 == compare2
return compare1
assert ccc([
b'1234', b'567A', b'bC'
], [
b'1', b'23', b'4567A', b'b', b'C'
])
# one iterator exhausted before the other
assert not ccc([
b'12345',
], [
b'1234', b'56'
])
# content mismatch
assert not ccc([
b'1234', b'65'
], [
b'1234', b'56'
])
# first is the prefix of second
assert not ccc([
b'1234', b'56'
], [
b'1234', b'565'
])
class TestBuildFilter:
@staticmethod
def peek_and_store_hardlink_masters(item, matched):
pass
def test_basic(self):
matcher = PatternMatcher()
matcher.add([parse_pattern('included')], IECommand.Include)
filter = Archiver.build_filter(matcher, self.peek_and_store_hardlink_masters, 0)
assert filter(Item(path='included'))
assert filter(Item(path='included/file'))
assert not filter(Item(path='something else'))
def test_empty(self):
matcher = PatternMatcher(fallback=True)
filter = Archiver.build_filter(matcher, self.peek_and_store_hardlink_masters, 0)
assert filter(Item(path='anything'))
def test_strip_components(self):
matcher = PatternMatcher(fallback=True)
filter = Archiver.build_filter(matcher, self.peek_and_store_hardlink_masters, strip_components=1)
assert not filter(Item(path='shallow'))
assert not filter(Item(path='shallow/')) # can this even happen? paths are normalized...
assert filter(Item(path='deep enough/file'))
assert filter(Item(path='something/dir/file'))
class TestCommonOptions:
@staticmethod
def define_common_options(add_common_option):
add_common_option('-h', '--help', action='help', help='show this help message and exit')
add_common_option('--critical', dest='log_level', help='foo',
action='store_const', const='critical', default='warning')
add_common_option('--error', dest='log_level', help='foo',
action='store_const', const='error', default='warning')
add_common_option('--append', dest='append', help='foo',
action='append', metavar='TOPIC', default=[])
add_common_option('-p', '--progress', dest='progress', action='store_true', help='foo')
add_common_option('--lock-wait', dest='lock_wait', type=int, metavar='N', default=1,
help='(default: %(default)d).')
@pytest.fixture
def basic_parser(self):
parser = argparse.ArgumentParser(prog='test', description='test parser', add_help=False)
parser.common_options = Archiver.CommonOptions(self.define_common_options,
suffix_precedence=('_level0', '_level1'))
return parser
@pytest.fixture
def subparsers(self, basic_parser):
if sys.version_info >= (3, 7):
# py37 pre-release defaults to unwanted required=True, in 3.7.0+ it was fixed to =False
return basic_parser.add_subparsers(title='required arguments', metavar='<command>', required=False)
else:
# py36 does not support required=... argument (but behaves like required=False).
# note: use below call for 3.6 and 3.7 when there are no alphas/betas/RCs of 3.7.0 around any more.
return basic_parser.add_subparsers(title='required arguments', metavar='<command>')
@pytest.fixture
def parser(self, basic_parser):
basic_parser.common_options.add_common_group(basic_parser, '_level0', provide_defaults=True)
return basic_parser
@pytest.fixture
def common_parser(self, parser):
common_parser = argparse.ArgumentParser(add_help=False, prog='test')
parser.common_options.add_common_group(common_parser, '_level1')
return common_parser
@pytest.fixture
def parse_vars_from_line(self, parser, subparsers, common_parser):
subparser = subparsers.add_parser('subcommand', parents=[common_parser], add_help=False,
description='foo', epilog='bar', help='baz',
formatter_class=argparse.RawDescriptionHelpFormatter)
subparser.set_defaults(func=1234)
subparser.add_argument('--append-only', dest='append_only', action='store_true')
def parse_vars_from_line(*line):
print(line)
args = parser.parse_args(line)
parser.common_options.resolve(args)
return vars(args)
return parse_vars_from_line
def test_simple(self, parse_vars_from_line):
assert parse_vars_from_line('--error') == {
'append': [],
'lock_wait': 1,
'log_level': 'error',
'progress': False
}
assert parse_vars_from_line('--error', 'subcommand', '--critical') == {
'append': [],
'lock_wait': 1,
'log_level': 'critical',
'progress': False,
'append_only': False,
'func': 1234,
}
with pytest.raises(SystemExit):
parse_vars_from_line('--append-only', 'subcommand')
assert parse_vars_from_line('--append=foo', '--append', 'bar', 'subcommand', '--append', 'baz') == {
'append': ['foo', 'bar', 'baz'],
'lock_wait': 1,
'log_level': 'warning',
'progress': False,
'append_only': False,
'func': 1234,
}
@pytest.mark.parametrize('position', ('before', 'after', 'both'))
@pytest.mark.parametrize('flag,args_key,args_value', (
('-p', 'progress', True),
('--lock-wait=3', 'lock_wait', 3),
))
def test_flag_position_independence(self, parse_vars_from_line, position, flag, args_key, args_value):
line = []
if position in ('before', 'both'):
line.append(flag)
line.append('subcommand')
if position in ('after', 'both'):
line.append(flag)
result = {
'append': [],
'lock_wait': 1,
'log_level': 'warning',
'progress': False,
'append_only': False,
'func': 1234,
}
result[args_key] = args_value
assert parse_vars_from_line(*line) == result
def test_parse_storage_quota():
assert parse_storage_quota('50M') == 50 * 1000**2
with pytest.raises(argparse.ArgumentTypeError):
parse_storage_quota('5M')
def get_all_parsers():
"""
Return dict mapping command to parser.
"""
parser = Archiver(prog='borg').build_parser()
borgfs_parser = Archiver(prog='borgfs').build_parser()
parsers = {}
def discover_level(prefix, parser, Archiver, extra_choices=None):
choices = {}
for action in parser._actions:
if action.choices is not None and 'SubParsersAction' in str(action.__class__):
for cmd, parser in action.choices.items():
choices[prefix + cmd] = parser
if extra_choices is not None:
choices.update(extra_choices)
if prefix and not choices:
return
for command, parser in sorted(choices.items()):
discover_level(command + " ", parser, Archiver)
parsers[command] = parser
discover_level("", parser, Archiver, {'borgfs': borgfs_parser})
return parsers
@pytest.mark.parametrize('command, parser', list(get_all_parsers().items()))
def test_help_formatting(command, parser):
if isinstance(parser.epilog, RstToTextLazy):
assert parser.epilog.rst
@pytest.mark.parametrize('topic, helptext', list(Archiver.helptext.items()))
def test_help_formatting_helptexts(topic, helptext):
assert str(rst_to_terminal(helptext))
| 51.418547 | 148 | 0.630304 | import argparse
import dateutil.tz
import errno
import io
import json
import logging
import os
import pstats
import random
import re
import shutil
import socket
import stat
import subprocess
import sys
import tempfile
import time
import unittest
from binascii import unhexlify, b2a_base64
from configparser import ConfigParser
from datetime import datetime
from datetime import timezone
from datetime import timedelta
from hashlib import sha256
from io import BytesIO, StringIO
from unittest.mock import patch
import pytest
import borg
from .. import xattr, helpers, platform
from ..archive import Archive, ChunkBuffer
from ..archiver import Archiver, parse_storage_quota, PURE_PYTHON_MSGPACK_WARNING
from ..cache import Cache, LocalCache
from ..chunker import has_seek_hole
from ..constants import *
from ..crypto.low_level import bytes_to_long, num_cipher_blocks
from ..crypto.key import KeyfileKeyBase, RepoKey, KeyfileKey, Passphrase, TAMRequiredError
from ..crypto.keymanager import RepoIdMismatch, NotABorgKeyFile
from ..crypto.file_integrity import FileIntegrityError
from ..helpers import Location, get_security_dir
from ..helpers import Manifest, MandatoryFeatureUnsupported
from ..helpers import EXIT_SUCCESS, EXIT_WARNING, EXIT_ERROR
from ..helpers import bin_to_hex
from ..helpers import MAX_S
from ..helpers import msgpack
from ..helpers import flags_noatime, flags_normal
from ..nanorst import RstToTextLazy, rst_to_terminal
from ..patterns import IECommand, PatternMatcher, parse_pattern
from ..item import Item, ItemDiff
from ..locking import LockFailed
from ..logger import setup_logging
from ..remote import RemoteRepository, PathNotAllowed
from ..repository import Repository
from . import has_lchflags, llfuse
from . import BaseTestCase, changedir, environment_variable, no_selinux
from . import are_symlinks_supported, are_hardlinks_supported, are_fifos_supported, is_utime_fully_supported, is_birthtime_fully_supported
from .platform import fakeroot_detected
from .upgrader import make_attic_repo
from . import key
src_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
def exec_cmd(*args, archiver=None, fork=False, exe=None, input=b'', binary_output=False, **kw):
if fork:
try:
if exe is None:
borg = (sys.executable, '-m', 'borg.archiver')
elif isinstance(exe, str):
borg = (exe, )
elif not isinstance(exe, tuple):
raise ValueError('exe must be None, a tuple or a str')
output = subprocess.check_output(borg + args, stderr=subprocess.STDOUT, input=input)
ret = 0
except subprocess.CalledProcessError as e:
output = e.output
ret = e.returncode
except SystemExit as e:
output = ''
ret = e.code
if binary_output:
return ret, output
else:
return ret, os.fsdecode(output)
else:
stdin, stdout, stderr = sys.stdin, sys.stdout, sys.stderr
try:
sys.stdin = StringIO(input.decode())
sys.stdin.buffer = BytesIO(input)
output = BytesIO()
output_text = sys.stdout = sys.stderr = io.TextIOWrapper(output, encoding='utf-8')
if archiver is None:
archiver = Archiver()
archiver.prerun_checks = lambda *args: None
archiver.exit_code = EXIT_SUCCESS
helpers.exit_code = EXIT_SUCCESS
try:
args = archiver.parse_args(list(args))
except SystemExit as e:
output_text.flush()
return e.code, output.getvalue() if binary_output else output.getvalue().decode()
ret = archiver.run(args)
output_text.flush()
return ret, output.getvalue() if binary_output else output.getvalue().decode()
finally:
sys.stdin, sys.stdout, sys.stderr = stdin, stdout, stderr
def have_gnutar():
if not shutil.which('tar'):
return False
popen = subprocess.Popen(['tar', '--version'], stdout=subprocess.PIPE)
stdout, stderr = popen.communicate()
return b'GNU tar' in stdout
try:
exec_cmd('help', exe='borg.exe', fork=True)
BORG_EXES = ['python', 'binary', ]
except FileNotFoundError:
BORG_EXES = ['python', ]
@pytest.fixture(params=BORG_EXES)
def cmd(request):
if request.param == 'python':
exe = None
elif request.param == 'binary':
exe = 'borg.exe'
else:
raise ValueError("param must be 'python' or 'binary'")
def exec_fn(*args, **kw):
return exec_cmd(*args, exe=exe, fork=True, **kw)
return exec_fn
def test_return_codes(cmd, tmpdir):
repo = tmpdir.mkdir('repo')
input = tmpdir.mkdir('input')
output = tmpdir.mkdir('output')
input.join('test_file').write('content')
rc, out = cmd('init', '--encryption=none', '%s' % str(repo))
assert rc == EXIT_SUCCESS
rc, out = cmd('create', '%s::archive' % repo, str(input))
assert rc == EXIT_SUCCESS
with changedir(str(output)):
rc, out = cmd('extract', '%s::archive' % repo)
assert rc == EXIT_SUCCESS
rc, out = cmd('extract', '%s::archive' % repo, 'does/not/match')
assert rc == EXIT_WARNING
rc, out = cmd('create', '%s::archive' % repo, str(input))
assert rc == EXIT_ERROR
DF_MOUNT = '/tmp/borg-mount'
@pytest.mark.skipif(not os.path.exists(DF_MOUNT), reason="needs a 16MB fs mounted on %s" % DF_MOUNT)
def test_disk_full(cmd):
def make_files(dir, count, size, rnd=True):
shutil.rmtree(dir, ignore_errors=True)
os.mkdir(dir)
if rnd:
count = random.randint(1, count)
if size > 1:
size = random.randint(1, size)
for i in range(count):
fn = os.path.join(dir, "file%03d" % i)
with open(fn, 'wb') as f:
data = os.urandom(size)
f.write(data)
with environment_variable(BORG_CHECK_I_KNOW_WHAT_I_AM_DOING='YES'):
mount = DF_MOUNT
assert os.path.exists(mount)
repo = os.path.join(mount, 'repo')
input = os.path.join(mount, 'input')
reserve = os.path.join(mount, 'reserve')
for j in range(100):
shutil.rmtree(repo, ignore_errors=True)
shutil.rmtree(input, ignore_errors=True)
make_files(reserve, 80, 100000, rnd=False)
rc, out = cmd('init', repo)
if rc != EXIT_SUCCESS:
print('init', rc, out)
assert rc == EXIT_SUCCESS
try:
success, i = True, 0
while success:
i += 1
try:
make_files(input, 20, 200000)
except OSError as err:
if err.errno == errno.ENOSPC:
break
raise
try:
rc, out = cmd('create', '%s::test%03d' % (repo, i), input)
success = rc == EXIT_SUCCESS
if not success:
print('create', rc, out)
finally:
shutil.rmtree(os.path.join(repo, 'lock.exclusive'), ignore_errors=True)
os.remove(os.path.join(repo, 'lock.roster'))
finally:
shutil.rmtree(reserve, ignore_errors=True)
rc, out = cmd('list', repo)
if rc != EXIT_SUCCESS:
print('list', rc, out)
rc, out = cmd('check', '--repair', repo)
if rc != EXIT_SUCCESS:
print('check', rc, out)
assert rc == EXIT_SUCCESS
class ArchiverTestCaseBase(BaseTestCase):
EXE = None
FORK_DEFAULT = False
prefix = ''
def setUp(self):
os.environ['BORG_CHECK_I_KNOW_WHAT_I_AM_DOING'] = 'YES'
os.environ['BORG_DELETE_I_KNOW_WHAT_I_AM_DOING'] = 'YES'
os.environ['BORG_PASSPHRASE'] = 'waytooeasyonlyfortests'
self.archiver = not self.FORK_DEFAULT and Archiver() or None
self.tmpdir = tempfile.mkdtemp()
self.repository_path = os.path.join(self.tmpdir, 'repository')
self.repository_location = self.prefix + self.repository_path
self.input_path = os.path.join(self.tmpdir, 'input')
self.output_path = os.path.join(self.tmpdir, 'output')
self.keys_path = os.path.join(self.tmpdir, 'keys')
self.cache_path = os.path.join(self.tmpdir, 'cache')
self.exclude_file_path = os.path.join(self.tmpdir, 'excludes')
self.patterns_file_path = os.path.join(self.tmpdir, 'patterns')
os.environ['BORG_KEYS_DIR'] = self.keys_path
os.environ['BORG_CACHE_DIR'] = self.cache_path
os.mkdir(self.input_path)
os.chmod(self.input_path, 0o777)
os.mkdir(self.output_path)
os.mkdir(self.keys_path)
os.mkdir(self.cache_path)
with open(self.exclude_file_path, 'wb') as fd:
fd.write(b'input/file2\n# A comment line, then a blank line\n\n')
with open(self.patterns_file_path, 'wb') as fd:
fd.write(b'+input/file_important\n- input/file*\n# A comment line, then a blank line\n\n')
self._old_wd = os.getcwd()
os.chdir(self.tmpdir)
def tearDown(self):
os.chdir(self._old_wd)
shutil.rmtree(self.tmpdir, ignore_errors=True)
setup_logging()
def cmd(self, *args, **kw):
exit_code = kw.pop('exit_code', 0)
fork = kw.pop('fork', None)
binary_output = kw.get('binary_output', False)
if fork is None:
fork = self.FORK_DEFAULT
ret, output = exec_cmd(*args, fork=fork, exe=self.EXE, archiver=self.archiver, **kw)
if ret != exit_code:
print(output)
self.assert_equal(ret, exit_code)
pp_msg = PURE_PYTHON_MSGPACK_WARNING.encode() if binary_output else PURE_PYTHON_MSGPACK_WARNING
empty = b'' if binary_output else ''
output = empty.join(line for line in output.splitlines(keepends=True)
if pp_msg not in line)
return output
def create_src_archive(self, name):
self.cmd('create', '--compression=lz4', self.repository_location + '::' + name, src_dir)
def open_archive(self, name):
repository = Repository(self.repository_path, exclusive=True)
with repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
archive = Archive(repository, key, manifest, name)
return archive, repository
def open_repository(self):
return Repository(self.repository_path, exclusive=True)
def create_regular_file(self, name, size=0, contents=None):
assert not (size != 0 and contents and len(contents) != size), 'size and contents do not match'
filename = os.path.join(self.input_path, name)
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'wb') as fd:
if contents is None:
contents = b'X' * size
fd.write(contents)
def create_test_files(self):
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('flagfile', size=1024)
self.create_regular_file('dir2/file2', size=1024 * 80)
os.chmod('input/file1', 0o4755)
if are_hardlinks_supported():
os.link(os.path.join(self.input_path, 'file1'),
os.path.join(self.input_path, 'hardlink'))
if are_symlinks_supported():
os.symlink('somewhere', os.path.join(self.input_path, 'link1'))
self.create_regular_file('fusexattr', size=1)
if not xattr.XATTR_FAKEROOT and xattr.is_enabled(self.input_path):
fn = os.fsencode(os.path.join(self.input_path, 'fusexattr'))
xattr.setxattr(fn, b'user.foo', b'bar')
xattr.setxattr(fn, b'user.empty', b'')
if are_fifos_supported():
os.mkfifo(os.path.join(self.input_path, 'fifo1'))
if has_lchflags:
platform.set_flags(os.path.join(self.input_path, 'flagfile'), stat.UF_NODUMP)
try:
os.mknod('input/bdev', 0o600 | stat.S_IFBLK, os.makedev(10, 20))
os.mknod('input/cdev', 0o600 | stat.S_IFCHR, os.makedev(30, 40))
os.chmod('input/dir2', 0o555)
os.chown('input/file1', 100, 200)
have_root = True
except PermissionError:
have_root = False
except OSError as e:
if e.errno not in (errno.EINVAL, errno.ENOSYS):
raise
have_root = False
time.sleep(1)
self.create_regular_file('empty', size=0)
return have_root
class ArchiverTestCase(ArchiverTestCaseBase):
requires_hardlinks = pytest.mark.skipif(not are_hardlinks_supported(), reason='hardlinks not supported')
def test_basic_functionality(self):
have_root = self.create_test_files()
output = self.cmd('init', '--encryption=repokey', '--show-version', '--show-rc', self.repository_location, fork=True)
self.assert_in('borgbackup version', output)
self.assert_in('terminating with success status, rc 0', output)
self.cmd('create', '--exclude-nodump', self.repository_location + '::test', 'input')
output = self.cmd('create', '--exclude-nodump', '--stats', self.repository_location + '::test.2', 'input')
self.assert_in('Archive name: test.2', output)
self.assert_in('This archive: ', output)
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
list_output = self.cmd('list', '--short', self.repository_location)
self.assert_in('test', list_output)
self.assert_in('test.2', list_output)
expected = [
'input',
'input/bdev',
'input/cdev',
'input/dir2',
'input/dir2/file2',
'input/empty',
'input/file1',
'input/flagfile',
]
if are_fifos_supported():
expected.append('input/fifo1')
if are_symlinks_supported():
expected.append('input/link1')
if are_hardlinks_supported():
expected.append('input/hardlink')
if not have_root:
expected.remove('input/bdev')
expected.remove('input/cdev')
if has_lchflags:
expected.remove('input/flagfile')
os.remove(os.path.join('input', 'flagfile'))
list_output = self.cmd('list', '--short', self.repository_location + '::test')
for name in expected:
self.assert_in(name, list_output)
self.assert_dirs_equal('input', 'output/input')
info_output = self.cmd('info', self.repository_location + '::test')
item_count = 4 if has_lchflags else 5
self.assert_in('Number of files: %d' % item_count, info_output)
shutil.rmtree(self.cache_path)
info_output2 = self.cmd('info', self.repository_location + '::test')
def filter(output):
prefixes = ['Name:', 'Fingerprint:', 'Number of files:', 'This archive:',
'All archives:', 'Chunk index:', ]
result = []
for line in output.splitlines():
for prefix in prefixes:
if line.startswith(prefix):
result.append(line)
return '\n'.join(result)
self.assert_equal(filter(info_output), filter(info_output2))
@requires_hardlinks
def test_create_duplicate_root(self):
path_a = os.path.join(self.input_path, 'a')
path_b = os.path.join(self.input_path, 'b')
os.mkdir(path_a)
os.mkdir(path_b)
hl_a = os.path.join(path_a, 'hardlink')
hl_b = os.path.join(path_b, 'hardlink')
self.create_regular_file(hl_a, contents=b'123456')
os.link(hl_a, hl_b)
self.cmd('init', '--encryption=none', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input', 'input')
archive_list = self.cmd('list', '--json-lines', self.repository_location + '::test')
paths = [json.loads(line)['path'] for line in archive_list.split('\n') if line]
assert sorted(paths) == ['input', 'input/a', 'input/a/hardlink', 'input/b', 'input/b/hardlink']
def test_init_parent_dirs(self):
parent_path = os.path.join(self.tmpdir, 'parent1', 'parent2')
repository_path = os.path.join(parent_path, 'repository')
repository_location = self.prefix + repository_path
with pytest.raises(Repository.ParentPathDoesNotExist):
self.cmd('init', '--encryption=none', repository_location)
self.cmd('init', '--encryption=none', '--make-parent-dirs', repository_location)
assert os.path.exists(parent_path)
def test_unix_socket(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(os.path.join(self.input_path, 'unix-socket'))
except PermissionError as err:
if err.errno == errno.EPERM:
pytest.skip('unix sockets disabled or not supported')
elif err.errno == errno.EACCES:
pytest.skip('permission denied to create unix sockets')
self.cmd('create', self.repository_location + '::test', 'input')
sock.close()
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
assert not os.path.exists('input/unix-socket')
@pytest.mark.skipif(not are_symlinks_supported(), reason='symlinks not supported')
def test_symlink_extract(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
assert os.readlink('input/link1') == 'somewhere'
@pytest.mark.skipif(not is_utime_fully_supported(), reason='cannot properly setup and execute test without utime')
def test_atime(self):
def has_noatime(some_file):
atime_before = os.stat(some_file).st_atime_ns
try:
with open(os.open(some_file, flags_noatime)) as file:
file.read()
except PermissionError:
return False
else:
atime_after = os.stat(some_file).st_atime_ns
noatime_used = flags_noatime != flags_normal
return noatime_used and atime_before == atime_after
self.create_test_files()
atime, mtime = 123456780, 234567890
have_noatime = has_noatime('input/file1')
os.utime('input/file1', (atime, mtime))
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', '--atime', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
sti = os.stat('input/file1')
sto = os.stat('output/input/file1')
assert sti.st_mtime_ns == sto.st_mtime_ns == mtime * 1e9
if have_noatime:
assert sti.st_atime_ns == sto.st_atime_ns == atime * 1e9
else:
assert sto.st_atime_ns == atime * 1e9
@pytest.mark.skipif(not is_utime_fully_supported(), reason='cannot properly setup and execute test without utime')
@pytest.mark.skipif(not is_birthtime_fully_supported(), reason='cannot properly setup and execute test without birthtime')
def test_birthtime(self):
self.create_test_files()
birthtime, mtime, atime = 946598400, 946684800, 946771200
os.utime('input/file1', (atime, birthtime))
os.utime('input/file1', (atime, mtime))
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
sti = os.stat('input/file1')
sto = os.stat('output/input/file1')
assert int(sti.st_birthtime * 1e9) == int(sto.st_birthtime * 1e9) == birthtime * 1e9
assert sti.st_mtime_ns == sto.st_mtime_ns == mtime * 1e9
@pytest.mark.skipif(not is_utime_fully_supported(), reason='cannot properly setup and execute test without utime')
@pytest.mark.skipif(not is_birthtime_fully_supported(), reason='cannot properly setup and execute test without birthtime')
def test_nobirthtime(self):
self.create_test_files()
birthtime, mtime, atime = 946598400, 946684800, 946771200
os.utime('input/file1', (atime, birthtime))
os.utime('input/file1', (atime, mtime))
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', '--nobirthtime', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
sti = os.stat('input/file1')
sto = os.stat('output/input/file1')
assert int(sti.st_birthtime * 1e9) == birthtime * 1e9
assert int(sto.st_birthtime * 1e9) == mtime * 1e9
assert sti.st_mtime_ns == sto.st_mtime_ns == mtime * 1e9
def _extract_repository_id(self, path):
with Repository(self.repository_path) as repository:
return repository.id
def _set_repository_id(self, path, id):
config = ConfigParser(interpolation=None)
config.read(os.path.join(path, 'config'))
config.set('repository', 'id', bin_to_hex(id))
with open(os.path.join(path, 'config'), 'w') as fd:
config.write(fd)
with Repository(self.repository_path) as repository:
return repository.id
def test_sparse_file(self):
def is_sparse(fn, total_size, hole_size):
st = os.stat(fn)
assert st.st_size == total_size
sparse = True
if sparse and hasattr(st, 'st_blocks') and st.st_blocks * 512 >= st.st_size:
sparse = False
if sparse and has_seek_hole:
with open(fn, 'rb') as fd:
# only check if the first hole is as expected, because the 2nd hole check
# is problematic on xfs due to its "dynamic speculative EOF preallocation
try:
if fd.seek(0, os.SEEK_HOLE) != 0:
sparse = False
if fd.seek(0, os.SEEK_DATA) != hole_size:
sparse = False
except OSError:
# OS/FS does not really support SEEK_HOLE/SEEK_DATA
sparse = False
return sparse
filename = os.path.join(self.input_path, 'sparse')
content = b'foobar'
hole_size = 5 * (1 << CHUNK_MAX_EXP) # 5 full chunker buffers
total_size = hole_size + len(content) + hole_size
with open(filename, 'wb') as fd:
# create a file that has a hole at the beginning and end (if the
# OS and filesystem supports sparse files)
fd.seek(hole_size, 1)
fd.write(content)
fd.seek(hole_size, 1)
pos = fd.tell()
fd.truncate(pos)
# we first check if we could create a sparse input file:
sparse_support = is_sparse(filename, total_size, hole_size)
if sparse_support:
# we could create a sparse input file, so creating a backup of it and
# extracting it again (as sparse) should also work:
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir(self.output_path):
self.cmd('extract', '--sparse', self.repository_location + '::test')
self.assert_dirs_equal('input', 'output/input')
filename = os.path.join(self.output_path, 'input', 'sparse')
with open(filename, 'rb') as fd:
# check if file contents are as expected
self.assert_equal(fd.read(hole_size), b'\0' * hole_size)
self.assert_equal(fd.read(len(content)), content)
self.assert_equal(fd.read(hole_size), b'\0' * hole_size)
self.assert_true(is_sparse(filename, total_size, hole_size))
def test_unusual_filenames(self):
filenames = ['normal', 'with some blanks', '(with_parens)', ]
for filename in filenames:
filename = os.path.join(self.input_path, filename)
with open(filename, 'wb'):
pass
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
for filename in filenames:
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', os.path.join('input', filename))
assert os.path.exists(os.path.join('output', 'input', filename))
def test_repository_swap_detection(self):
self.create_test_files()
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=repokey', self.repository_location)
repository_id = self._extract_repository_id(self.repository_path)
self.cmd('create', self.repository_location + '::test', 'input')
shutil.rmtree(self.repository_path)
self.cmd('init', '--encryption=none', self.repository_location)
self._set_repository_id(self.repository_path, repository_id)
self.assert_equal(repository_id, self._extract_repository_id(self.repository_path))
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.EncryptionMethodMismatch):
self.cmd('create', self.repository_location + '::test.2', 'input')
def test_repository_swap_detection2(self):
self.create_test_files()
self.cmd('init', '--encryption=none', self.repository_location + '_unencrypted')
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=repokey', self.repository_location + '_encrypted')
self.cmd('create', self.repository_location + '_encrypted::test', 'input')
shutil.rmtree(self.repository_path + '_encrypted')
os.rename(self.repository_path + '_unencrypted', self.repository_path + '_encrypted')
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '_encrypted::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.RepositoryAccessAborted):
self.cmd('create', self.repository_location + '_encrypted::test.2', 'input')
def test_repository_swap_detection_no_cache(self):
self.create_test_files()
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=repokey', self.repository_location)
repository_id = self._extract_repository_id(self.repository_path)
self.cmd('create', self.repository_location + '::test', 'input')
shutil.rmtree(self.repository_path)
self.cmd('init', '--encryption=none', self.repository_location)
self._set_repository_id(self.repository_path, repository_id)
self.assert_equal(repository_id, self._extract_repository_id(self.repository_path))
self.cmd('delete', '--cache-only', self.repository_location)
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.EncryptionMethodMismatch):
self.cmd('create', self.repository_location + '::test.2', 'input')
def test_repository_swap_detection2_no_cache(self):
self.create_test_files()
self.cmd('init', '--encryption=none', self.repository_location + '_unencrypted')
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=repokey', self.repository_location + '_encrypted')
self.cmd('create', self.repository_location + '_encrypted::test', 'input')
self.cmd('delete', '--cache-only', self.repository_location + '_unencrypted')
self.cmd('delete', '--cache-only', self.repository_location + '_encrypted')
shutil.rmtree(self.repository_path + '_encrypted')
os.rename(self.repository_path + '_unencrypted', self.repository_path + '_encrypted')
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '_encrypted::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.RepositoryAccessAborted):
self.cmd('create', self.repository_location + '_encrypted::test.2', 'input')
def test_repository_swap_detection_repokey_blank_passphrase(self):
# Check that a repokey repo with a blank passphrase is considered like a plaintext repo.
self.create_test_files()
# User initializes her repository with her passphrase
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
# Attacker replaces it with her own repository, which is encrypted but has no passphrase set
shutil.rmtree(self.repository_path)
with environment_variable(BORG_PASSPHRASE=''):
self.cmd('init', '--encryption=repokey', self.repository_location)
# Delete cache & security database, AKA switch to user perspective
self.cmd('delete', '--cache-only', self.repository_location)
repository_id = bin_to_hex(self._extract_repository_id(self.repository_path))
shutil.rmtree(get_security_dir(repository_id))
with environment_variable(BORG_PASSPHRASE=None):
# This is the part were the user would be tricked, e.g. she assumes that BORG_PASSPHRASE
# is set, while it isn't. Previously this raised no warning,
# since the repository is, technically, encrypted.
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.CacheInitAbortedError):
self.cmd('create', self.repository_location + '::test.2', 'input')
def test_repository_move(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
repository_id = bin_to_hex(self._extract_repository_id(self.repository_path))
os.rename(self.repository_path, self.repository_path + '_new')
with environment_variable(BORG_RELOCATED_REPO_ACCESS_IS_OK='yes'):
self.cmd('info', self.repository_location + '_new')
security_dir = get_security_dir(repository_id)
with open(os.path.join(security_dir, 'location')) as fd:
location = fd.read()
assert location == Location(self.repository_location + '_new').canonical_path()
# Needs no confirmation anymore
self.cmd('info', self.repository_location + '_new')
shutil.rmtree(self.cache_path)
self.cmd('info', self.repository_location + '_new')
shutil.rmtree(security_dir)
self.cmd('info', self.repository_location + '_new')
for file in ('location', 'key-type', 'manifest-timestamp'):
assert os.path.exists(os.path.join(security_dir, file))
def test_security_dir_compat(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
repository_id = bin_to_hex(self._extract_repository_id(self.repository_path))
security_dir = get_security_dir(repository_id)
with open(os.path.join(security_dir, 'location'), 'w') as fd:
fd.write('something outdated')
# This is fine, because the cache still has the correct information. security_dir and cache can disagree
# if older versions are used to confirm a renamed repository.
self.cmd('info', self.repository_location)
def test_unknown_unencrypted(self):
self.cmd('init', '--encryption=none', self.repository_location)
repository_id = bin_to_hex(self._extract_repository_id(self.repository_path))
security_dir = get_security_dir(repository_id)
# Ok: repository is known
self.cmd('info', self.repository_location)
# Ok: repository is still known (through security_dir)
shutil.rmtree(self.cache_path)
self.cmd('info', self.repository_location)
# Needs confirmation: cache and security dir both gone (eg. another host or rm -rf ~)
shutil.rmtree(self.cache_path)
shutil.rmtree(security_dir)
if self.FORK_DEFAULT:
self.cmd('info', self.repository_location, exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.CacheInitAbortedError):
self.cmd('info', self.repository_location)
with environment_variable(BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK='yes'):
self.cmd('info', self.repository_location)
def test_strip_components(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('dir/file')
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '3')
self.assert_true(not os.path.exists('file'))
with self.assert_creates_file('file'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '2')
with self.assert_creates_file('dir/file'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '1')
with self.assert_creates_file('input/dir/file'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '0')
def _extract_hardlinks_setup(self):
os.mkdir(os.path.join(self.input_path, 'dir1'))
os.mkdir(os.path.join(self.input_path, 'dir1/subdir'))
self.create_regular_file('source', contents=b'123456')
os.link(os.path.join(self.input_path, 'source'),
os.path.join(self.input_path, 'abba'))
os.link(os.path.join(self.input_path, 'source'),
os.path.join(self.input_path, 'dir1/hardlink'))
os.link(os.path.join(self.input_path, 'source'),
os.path.join(self.input_path, 'dir1/subdir/hardlink'))
self.create_regular_file('dir1/source2')
os.link(os.path.join(self.input_path, 'dir1/source2'),
os.path.join(self.input_path, 'dir1/aaaa'))
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
@requires_hardlinks
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_fuse_mount_hardlinks(self):
self._extract_hardlinks_setup()
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
# we need to get rid of permissions checking because fakeroot causes issues with it.
# On all platforms, borg defaults to "default_permissions" and we need to get rid of it via "ignore_permissions".
# On macOS (darwin), we additionally need "defer_permissions" to switch off the checks in osxfuse.
if sys.platform == 'darwin':
ignore_perms = ['-o', 'ignore_permissions,defer_permissions']
else:
ignore_perms = ['-o', 'ignore_permissions']
with self.fuse_mount(self.repository_location + '::test', mountpoint, '--strip-components=2', *ignore_perms), \
changedir(mountpoint):
assert os.stat('hardlink').st_nlink == 2
assert os.stat('subdir/hardlink').st_nlink == 2
assert open('subdir/hardlink', 'rb').read() == b'123456'
assert os.stat('aaaa').st_nlink == 2
assert os.stat('source2').st_nlink == 2
with self.fuse_mount(self.repository_location + '::test', mountpoint, 'input/dir1', *ignore_perms), \
changedir(mountpoint):
assert os.stat('input/dir1/hardlink').st_nlink == 2
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2
assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456'
assert os.stat('input/dir1/aaaa').st_nlink == 2
assert os.stat('input/dir1/source2').st_nlink == 2
with self.fuse_mount(self.repository_location + '::test', mountpoint, *ignore_perms), \
changedir(mountpoint):
assert os.stat('input/source').st_nlink == 4
assert os.stat('input/abba').st_nlink == 4
assert os.stat('input/dir1/hardlink').st_nlink == 4
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 4
assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456'
@requires_hardlinks
def test_extract_hardlinks1(self):
self._extract_hardlinks_setup()
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
assert os.stat('input/source').st_nlink == 4
assert os.stat('input/abba').st_nlink == 4
assert os.stat('input/dir1/hardlink').st_nlink == 4
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 4
assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456'
@requires_hardlinks
def test_extract_hardlinks2(self):
self._extract_hardlinks_setup()
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '2')
assert os.stat('hardlink').st_nlink == 2
assert os.stat('subdir/hardlink').st_nlink == 2
assert open('subdir/hardlink', 'rb').read() == b'123456'
assert os.stat('aaaa').st_nlink == 2
assert os.stat('source2').st_nlink == 2
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', 'input/dir1')
assert os.stat('input/dir1/hardlink').st_nlink == 2
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2
assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456'
assert os.stat('input/dir1/aaaa').st_nlink == 2
assert os.stat('input/dir1/source2').st_nlink == 2
@requires_hardlinks
def test_extract_hardlinks_twice(self):
# setup for #5603
path_a = os.path.join(self.input_path, 'a')
path_b = os.path.join(self.input_path, 'b')
os.mkdir(path_a)
os.mkdir(path_b)
hl_a = os.path.join(path_a, 'hardlink')
hl_b = os.path.join(path_b, 'hardlink')
self.create_regular_file(hl_a, contents=b'123456')
os.link(hl_a, hl_b)
self.cmd('init', '--encryption=none', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input', 'input') # give input twice!
# now test extraction
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
# if issue #5603 happens, extraction gives rc == 1 (triggering AssertionError) and warnings like:
# input/a/hardlink: link: [Errno 2] No such file or directory: 'input/a/hardlink' -> 'input/a/hardlink'
# input/b/hardlink: link: [Errno 2] No such file or directory: 'input/a/hardlink' -> 'input/b/hardlink'
# otherwise, when fixed, the hardlinks should be there and have a link count of 2
assert os.stat('input/a/hardlink').st_nlink == 2
assert os.stat('input/b/hardlink').st_nlink == 2
def test_extract_include_exclude(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file3', size=1024 * 80)
self.create_regular_file('file4', size=1024 * 80)
self.cmd('create', '--exclude=input/file4', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', 'input/file1', )
self.assert_equal(sorted(os.listdir('output/input')), ['file1'])
with changedir('output'):
self.cmd('extract', '--exclude=input/file2', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file3'])
with changedir('output'):
self.cmd('extract', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file3'])
def test_extract_include_exclude_regex(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file3', size=1024 * 80)
self.create_regular_file('file4', size=1024 * 80)
self.create_regular_file('file333', size=1024 * 80)
# Create with regular expression exclusion for file4
self.cmd('create', '--exclude=re:input/file4$', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2', 'file3', 'file333'])
shutil.rmtree('output/input')
# Extract with regular expression exclusion
with changedir('output'):
self.cmd('extract', '--exclude=re:file3+', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2'])
shutil.rmtree('output/input')
# Combine --exclude with fnmatch and regular expression
with changedir('output'):
self.cmd('extract', '--exclude=input/file2', '--exclude=re:file[01]', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file3', 'file333'])
shutil.rmtree('output/input')
# Combine --exclude-from and regular expression exclusion
with changedir('output'):
self.cmd('extract', '--exclude-from=' + self.exclude_file_path, '--exclude=re:file1',
'--exclude=re:file(\\d)\\1\\1$', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file3'])
def test_extract_include_exclude_regex_from_file(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file3', size=1024 * 80)
self.create_regular_file('file4', size=1024 * 80)
self.create_regular_file('file333', size=1024 * 80)
self.create_regular_file('aa:something', size=1024 * 80)
# Create while excluding using mixed pattern styles
with open(self.exclude_file_path, 'wb') as fd:
fd.write(b're:input/file4$\n')
fd.write(b'fm:*aa:*thing\n')
self.cmd('create', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2', 'file3', 'file333'])
shutil.rmtree('output/input')
# Exclude using regular expression
with open(self.exclude_file_path, 'wb') as fd:
fd.write(b're:file3+\n')
with changedir('output'):
self.cmd('extract', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2'])
shutil.rmtree('output/input')
# Mixed exclude pattern styles
with open(self.exclude_file_path, 'wb') as fd:
fd.write(b're:file(\\d)\\1\\1$\n')
fd.write(b'fm:nothingwillmatchthis\n')
fd.write(b'*/file1\n')
fd.write(b're:file2$\n')
with changedir('output'):
self.cmd('extract', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file3'])
def test_extract_with_pattern(self):
self.cmd("init", '--encryption=repokey', self.repository_location)
self.create_regular_file("file1", size=1024 * 80)
self.create_regular_file("file2", size=1024 * 80)
self.create_regular_file("file3", size=1024 * 80)
self.create_regular_file("file4", size=1024 * 80)
self.create_regular_file("file333", size=1024 * 80)
self.cmd("create", self.repository_location + "::test", "input")
# Extract everything with regular expression
with changedir("output"):
self.cmd("extract", self.repository_location + "::test", "re:.*")
self.assert_equal(sorted(os.listdir("output/input")), ["file1", "file2", "file3", "file333", "file4"])
shutil.rmtree("output/input")
# Extract with pattern while also excluding files
with changedir("output"):
self.cmd("extract", "--exclude=re:file[34]$", self.repository_location + "::test", r"re:file\d$")
self.assert_equal(sorted(os.listdir("output/input")), ["file1", "file2"])
shutil.rmtree("output/input")
# Combine --exclude with pattern for extraction
with changedir("output"):
self.cmd("extract", "--exclude=input/file1", self.repository_location + "::test", "re:file[12]$")
self.assert_equal(sorted(os.listdir("output/input")), ["file2"])
shutil.rmtree("output/input")
# Multiple pattern
with changedir("output"):
self.cmd("extract", self.repository_location + "::test", "fm:input/file1", "fm:*file33*", "input/file2")
self.assert_equal(sorted(os.listdir("output/input")), ["file1", "file2", "file333"])
def test_extract_list_output(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file', size=1024 * 80)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
output = self.cmd('extract', self.repository_location + '::test')
self.assert_not_in("input/file", output)
shutil.rmtree('output/input')
with changedir('output'):
output = self.cmd('extract', '--info', self.repository_location + '::test')
self.assert_not_in("input/file", output)
shutil.rmtree('output/input')
with changedir('output'):
output = self.cmd('extract', '--list', self.repository_location + '::test')
self.assert_in("input/file", output)
shutil.rmtree('output/input')
with changedir('output'):
output = self.cmd('extract', '--list', '--info', self.repository_location + '::test')
self.assert_in("input/file", output)
def test_extract_progress(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file', size=1024 * 80)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
output = self.cmd('extract', self.repository_location + '::test', '--progress')
assert 'Extracting:' in output
def _create_test_caches(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('cache1/%s' % CACHE_TAG_NAME,
contents=CACHE_TAG_CONTENTS + b' extra stuff')
self.create_regular_file('cache2/%s' % CACHE_TAG_NAME,
contents=b'invalid signature')
os.mkdir('input/cache3')
if are_hardlinks_supported():
os.link('input/cache1/%s' % CACHE_TAG_NAME, 'input/cache3/%s' % CACHE_TAG_NAME)
else:
self.create_regular_file('cache3/%s' % CACHE_TAG_NAME,
contents=CACHE_TAG_CONTENTS + b' extra stuff')
def test_create_stdin(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
input_data = b'\x00foo\n\nbar\n \n'
self.cmd('create', self.repository_location + '::test', '-', input=input_data)
item = json.loads(self.cmd('list', '--json-lines', self.repository_location + '::test'))
assert item['uid'] == 0
assert item['gid'] == 0
assert item['size'] == len(input_data)
assert item['path'] == 'stdin'
extracted_data = self.cmd('extract', '--stdout', self.repository_location + '::test', binary_output=True)
assert extracted_data == input_data
def test_create_content_from_command(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
input_data = 'some test content'
name = 'a/b/c'
self.cmd('create', '--stdin-name', name, '--content-from-command',
self.repository_location + '::test', '--', 'echo', input_data)
item = json.loads(self.cmd('list', '--json-lines', self.repository_location + '::test'))
assert item['uid'] == 0
assert item['gid'] == 0
assert item['size'] == len(input_data) + 1 # `echo` adds newline
assert item['path'] == name
extracted_data = self.cmd('extract', '--stdout', self.repository_location + '::test')
assert extracted_data == input_data + '\n'
def test_create_content_from_command_with_failed_command(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--content-from-command', self.repository_location + '::test',
'--', 'sh', '-c', 'exit 73;', exit_code=2)
assert output.endswith("Command 'sh' exited with status 73\n")
archive_list = json.loads(self.cmd('list', '--json', self.repository_location))
assert archive_list['archives'] == []
def test_create_content_from_command_missing_command(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--content-from-command', self.repository_location + '::test', exit_code=2)
assert output.endswith('No command given.\n')
def test_create_paths_from_stdin(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file("file1", size=1024 * 80)
self.create_regular_file("dir1/file2", size=1024 * 80)
self.create_regular_file("dir1/file3", size=1024 * 80)
self.create_regular_file("file4", size=1024 * 80)
input_data = b'input/file1\0input/dir1\0input/file4'
self.cmd('create', '--paths-from-stdin', '--paths-delimiter', '\\0',
self.repository_location + '::test', input=input_data)
archive_list = self.cmd('list', '--json-lines', self.repository_location + '::test')
paths = [json.loads(line)['path'] for line in archive_list.split('\n') if line]
assert paths == ['input/file1', 'input/dir1', 'input/file4']
def test_create_paths_from_command(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file("file1", size=1024 * 80)
self.create_regular_file("file2", size=1024 * 80)
self.create_regular_file("file3", size=1024 * 80)
self.create_regular_file("file4", size=1024 * 80)
input_data = 'input/file1\ninput/file2\ninput/file3'
self.cmd('create', '--paths-from-command',
self.repository_location + '::test', '--', 'echo', input_data)
archive_list = self.cmd('list', '--json-lines', self.repository_location + '::test')
paths = [json.loads(line)['path'] for line in archive_list.split('\n') if line]
assert paths == ['input/file1', 'input/file2', 'input/file3']
def test_create_paths_from_command_with_failed_command(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--paths-from-command', self.repository_location + '::test',
'--', 'sh', '-c', 'exit 73;', exit_code=2)
assert output.endswith("Command 'sh' exited with status 73\n")
archive_list = json.loads(self.cmd('list', '--json', self.repository_location))
assert archive_list['archives'] == []
def test_create_paths_from_command_missing_command(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--paths-from-command', self.repository_location + '::test', exit_code=2)
assert output.endswith('No command given.\n')
def test_create_without_root(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', exit_code=2)
def test_create_pattern_root(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
output = self.cmd('create', '-v', '--list', '--pattern=R input', self.repository_location + '::test')
self.assert_in("A input/file1", output)
self.assert_in("A input/file2", output)
def test_create_pattern(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file_important', size=1024 * 80)
output = self.cmd('create', '-v', '--list',
'--pattern=+input/file_important', '--pattern=-input/file*',
self.repository_location + '::test', 'input')
self.assert_in("A input/file_important", output)
self.assert_in('x input/file1', output)
self.assert_in('x input/file2', output)
def test_create_pattern_file(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('otherfile', size=1024 * 80)
self.create_regular_file('file_important', size=1024 * 80)
output = self.cmd('create', '-v', '--list',
'--pattern=-input/otherfile', '--patterns-from=' + self.patterns_file_path,
self.repository_location + '::test', 'input')
self.assert_in("A input/file_important", output)
self.assert_in('x input/file1', output)
self.assert_in('x input/file2', output)
self.assert_in('x input/otherfile', output)
def test_create_pattern_exclude_folder_but_recurse(self):
self.patterns_file_path2 = os.path.join(self.tmpdir, 'patterns2')
with open(self.patterns_file_path2, 'wb') as fd:
fd.write(b'+ input/x/b\n- input/x*\n')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('x/a/foo_a', size=1024 * 80)
self.create_regular_file('x/b/foo_b', size=1024 * 80)
self.create_regular_file('y/foo_y', size=1024 * 80)
output = self.cmd('create', '-v', '--list',
'--patterns-from=' + self.patterns_file_path2,
self.repository_location + '::test', 'input')
self.assert_in('x input/x/a/foo_a', output)
self.assert_in("A input/x/b/foo_b", output)
self.assert_in('A input/y/foo_y', output)
def test_create_pattern_exclude_folder_no_recurse(self):
self.patterns_file_path2 = os.path.join(self.tmpdir, 'patterns2')
with open(self.patterns_file_path2, 'wb') as fd:
fd.write(b'+ input/x/b\n! input/x*\n')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('x/a/foo_a', size=1024 * 80)
self.create_regular_file('x/b/foo_b', size=1024 * 80)
self.create_regular_file('y/foo_y', size=1024 * 80)
output = self.cmd('create', '-v', '--list',
'--patterns-from=' + self.patterns_file_path2,
self.repository_location + '::test', 'input')
self.assert_not_in('input/x/a/foo_a', output)
self.assert_not_in('input/x/a', output)
self.assert_in('A input/y/foo_y', output)
def test_create_pattern_intermediate_folders_first(self):
self.patterns_file_path2 = os.path.join(self.tmpdir, 'patterns2')
with open(self.patterns_file_path2, 'wb') as fd:
fd.write(b'+ input/x/a\n+ input/x/b\n- input/x*\n')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('x/a/foo_a', size=1024 * 80)
self.create_regular_file('x/b/foo_b', size=1024 * 80)
with changedir('input'):
self.cmd('create', '--patterns-from=' + self.patterns_file_path2,
self.repository_location + '::test', '.')
# list the archive and verify that the "intermediate" folders appear before
# their contents
out = self.cmd('list', '--format', '{type} {path}{NL}', self.repository_location + '::test')
out_list = out.splitlines()
self.assert_in('d x/a', out_list)
self.assert_in('d x/b', out_list)
assert out_list.index('d x/a') < out_list.index('- x/a/foo_a')
assert out_list.index('d x/b') < out_list.index('- x/b/foo_b')
def test_create_no_cache_sync(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('delete', '--cache-only', self.repository_location)
create_json = json.loads(self.cmd('create', '--no-cache-sync', self.repository_location + '::test', 'input',
'--json', '--error')) # ignore experimental warning
info_json = json.loads(self.cmd('info', self.repository_location + '::test', '--json'))
create_stats = create_json['cache']['stats']
info_stats = info_json['cache']['stats']
assert create_stats == info_stats
self.cmd('delete', '--cache-only', self.repository_location)
self.cmd('create', '--no-cache-sync', self.repository_location + '::test2', 'input')
self.cmd('info', self.repository_location)
self.cmd('check', self.repository_location)
def test_extract_pattern_opt(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file_important', size=1024 * 80)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract',
'--pattern=+input/file_important', '--pattern=-input/file*',
self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file_important'])
def _assert_test_caches(self):
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['cache2', 'file1'])
self.assert_equal(sorted(os.listdir('output/input/cache2')), [CACHE_TAG_NAME])
def test_exclude_caches(self):
self._create_test_caches()
self.cmd('create', '--exclude-caches', self.repository_location + '::test', 'input')
self._assert_test_caches()
def test_recreate_exclude_caches(self):
self._create_test_caches()
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('recreate', '--exclude-caches', self.repository_location + '::test')
self._assert_test_caches()
def _create_test_tagged(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('tagged1/.NOBACKUP')
self.create_regular_file('tagged2/00-NOBACKUP')
self.create_regular_file('tagged3/.NOBACKUP/file2', size=1024)
def _assert_test_tagged(self):
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1'])
def test_exclude_tagged(self):
self._create_test_tagged()
self.cmd('create', '--exclude-if-present', '.NOBACKUP', '--exclude-if-present', '00-NOBACKUP', self.repository_location + '::test', 'input')
self._assert_test_tagged()
def test_recreate_exclude_tagged(self):
self._create_test_tagged()
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('recreate', '--exclude-if-present', '.NOBACKUP', '--exclude-if-present', '00-NOBACKUP',
self.repository_location + '::test')
self._assert_test_tagged()
def _create_test_keep_tagged(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file0', size=1024)
self.create_regular_file('tagged1/.NOBACKUP1')
self.create_regular_file('tagged1/file1', size=1024)
self.create_regular_file('tagged2/.NOBACKUP2/subfile1', size=1024)
self.create_regular_file('tagged2/file2', size=1024)
self.create_regular_file('tagged3/%s' % CACHE_TAG_NAME,
contents=CACHE_TAG_CONTENTS + b' extra stuff')
self.create_regular_file('tagged3/file3', size=1024)
self.create_regular_file('taggedall/.NOBACKUP1')
self.create_regular_file('taggedall/.NOBACKUP2/subfile1', size=1024)
self.create_regular_file('taggedall/%s' % CACHE_TAG_NAME,
contents=CACHE_TAG_CONTENTS + b' extra stuff')
self.create_regular_file('taggedall/file4', size=1024)
def _assert_test_keep_tagged(self):
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file0', 'tagged1', 'tagged2', 'tagged3', 'taggedall'])
self.assert_equal(os.listdir('output/input/tagged1'), ['.NOBACKUP1'])
self.assert_equal(os.listdir('output/input/tagged2'), ['.NOBACKUP2'])
self.assert_equal(os.listdir('output/input/tagged3'), [CACHE_TAG_NAME])
self.assert_equal(sorted(os.listdir('output/input/taggedall')),
['.NOBACKUP1', '.NOBACKUP2', CACHE_TAG_NAME, ])
def test_exclude_keep_tagged(self):
self._create_test_keep_tagged()
self.cmd('create', '--exclude-if-present', '.NOBACKUP1', '--exclude-if-present', '.NOBACKUP2',
'--exclude-caches', '--keep-exclude-tags', self.repository_location + '::test', 'input')
self._assert_test_keep_tagged()
def test_recreate_exclude_keep_tagged(self):
self._create_test_keep_tagged()
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('recreate', '--exclude-if-present', '.NOBACKUP1', '--exclude-if-present', '.NOBACKUP2',
'--exclude-caches', '--keep-exclude-tags', self.repository_location + '::test')
self._assert_test_keep_tagged()
@pytest.mark.skipif(not are_hardlinks_supported(), reason='hardlinks not supported')
def test_recreate_hardlinked_tags(self): # test for issue #4911
self.cmd('init', '--encryption=none', self.repository_location)
self.create_regular_file('file1', contents=CACHE_TAG_CONTENTS) # "wrong" filename, but correct tag contents
os.mkdir(os.path.join(self.input_path, 'subdir')) # to make sure the tag is encountered *after* file1
os.link(os.path.join(self.input_path, 'file1'),
os.path.join(self.input_path, 'subdir', CACHE_TAG_NAME)) # correct tag name, hardlink to file1
self.cmd('create', self.repository_location + '::test', 'input')
# in the "test" archive, we now have, in this order:
# - a regular file item for "file1"
# - a hardlink item for "CACHEDIR.TAG" referring back to file1 for its contents
self.cmd('recreate', '--exclude-caches', '--keep-exclude-tags', self.repository_location + '::test')
# if issue #4911 is present, the recreate will crash with a KeyError for "input/file1"
@pytest.mark.skipif(not xattr.XATTR_FAKEROOT, reason='Linux capabilities test, requires fakeroot >= 1.20.2')
def test_extract_capabilities(self):
fchown = os.fchown
# We need to manually patch chown to get the behaviour Linux has, since fakeroot does not
# accurately model the interaction of chown(2) and Linux capabilities, i.e. it does not remove them.
def patched_fchown(fd, uid, gid):
xattr.setxattr(fd, b'security.capability', b'', follow_symlinks=False)
fchown(fd, uid, gid)
# The capability descriptor used here is valid and taken from a /usr/bin/ping
capabilities = b'\x01\x00\x00\x02\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
self.create_regular_file('file')
xattr.setxattr(b'input/file', b'security.capability', capabilities)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
with patch.object(os, 'fchown', patched_fchown):
self.cmd('extract', self.repository_location + '::test')
assert xattr.getxattr(b'input/file', b'security.capability') == capabilities
@pytest.mark.skipif(not xattr.XATTR_FAKEROOT, reason='xattr not supported on this system or on this version of'
'fakeroot')
def test_extract_xattrs_errors(self):
def patched_setxattr_E2BIG(*args, **kwargs):
raise OSError(errno.E2BIG, 'E2BIG')
def patched_setxattr_ENOTSUP(*args, **kwargs):
raise OSError(errno.ENOTSUP, 'ENOTSUP')
def patched_setxattr_EACCES(*args, **kwargs):
raise OSError(errno.EACCES, 'EACCES')
self.create_regular_file('file')
xattr.setxattr(b'input/file', b'user.attribute', b'value')
self.cmd('init', self.repository_location, '-e' 'none')
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
input_abspath = os.path.abspath('input/file')
with patch.object(xattr, 'setxattr', patched_setxattr_E2BIG):
out = self.cmd('extract', self.repository_location + '::test', exit_code=EXIT_WARNING)
assert ': when setting extended attribute user.attribute: too big for this filesystem\n' in out
os.remove(input_abspath)
with patch.object(xattr, 'setxattr', patched_setxattr_ENOTSUP):
out = self.cmd('extract', self.repository_location + '::test', exit_code=EXIT_WARNING)
assert ': when setting extended attribute user.attribute: xattrs not supported on this filesystem\n' in out
os.remove(input_abspath)
with patch.object(xattr, 'setxattr', patched_setxattr_EACCES):
out = self.cmd('extract', self.repository_location + '::test', exit_code=EXIT_WARNING)
assert ': when setting extended attribute user.attribute: Permission denied\n' in out
assert os.path.isfile(input_abspath)
def test_path_normalization(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('dir1/dir2/file', size=1024 * 80)
with changedir('input/dir1/dir2'):
self.cmd('create', self.repository_location + '::test', '../../../input/dir1/../dir1/dir2/..')
output = self.cmd('list', self.repository_location + '::test')
self.assert_not_in('..', output)
self.assert_in(' input/dir1/dir2/file', output)
def test_exclude_normalization(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
with changedir('input'):
self.cmd('create', '--exclude=file1', self.repository_location + '::test1', '.')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test1')
self.assert_equal(sorted(os.listdir('output')), ['file2'])
with changedir('input'):
self.cmd('create', '--exclude=./file1', self.repository_location + '::test2', '.')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test2')
self.assert_equal(sorted(os.listdir('output')), ['file2'])
self.cmd('create', '--exclude=input/./file1', self.repository_location + '::test3', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test3')
self.assert_equal(sorted(os.listdir('output/input')), ['file2'])
def test_repeated_files(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input', 'input')
def test_overwrite(self):
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('dir2/file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
# Overwriting regular files and directories should be supported
os.mkdir('output/input')
os.mkdir('output/input/file1')
os.mkdir('output/input/dir2')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_dirs_equal('input', 'output/input')
# But non-empty dirs should fail
os.unlink('output/input/file1')
os.mkdir('output/input/file1')
os.mkdir('output/input/file1/dir')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', exit_code=1)
def test_rename(self):
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('dir2/file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('create', self.repository_location + '::test.2', 'input')
self.cmd('extract', '--dry-run', self.repository_location + '::test')
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
self.cmd('rename', self.repository_location + '::test', 'test.3')
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
self.cmd('rename', self.repository_location + '::test.2', 'test.4')
self.cmd('extract', '--dry-run', self.repository_location + '::test.3')
self.cmd('extract', '--dry-run', self.repository_location + '::test.4')
# Make sure both archives have been renamed
with Repository(self.repository_path) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
self.assert_equal(len(manifest.archives), 2)
self.assert_in('test.3', manifest.archives)
self.assert_in('test.4', manifest.archives)
def test_info(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
info_repo = self.cmd('info', self.repository_location)
assert 'All archives:' in info_repo
info_archive = self.cmd('info', self.repository_location + '::test')
assert 'Archive name: test\n' in info_archive
info_archive = self.cmd('info', '--first', '1', self.repository_location)
assert 'Archive name: test\n' in info_archive
def test_info_json(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
info_repo = json.loads(self.cmd('info', '--json', self.repository_location))
repository = info_repo['repository']
assert len(repository['id']) == 64
assert 'last_modified' in repository
assert datetime.strptime(repository['last_modified'], ISO_FORMAT) # must not raise
assert info_repo['encryption']['mode'] == 'repokey'
assert 'keyfile' not in info_repo['encryption']
cache = info_repo['cache']
stats = cache['stats']
assert all(isinstance(o, int) for o in stats.values())
assert all(key in stats for key in ('total_chunks', 'total_csize', 'total_size', 'total_unique_chunks', 'unique_csize', 'unique_size'))
info_archive = json.loads(self.cmd('info', '--json', self.repository_location + '::test'))
assert info_repo['repository'] == info_archive['repository']
assert info_repo['cache'] == info_archive['cache']
archives = info_archive['archives']
assert len(archives) == 1
archive = archives[0]
assert archive['name'] == 'test'
assert isinstance(archive['command_line'], list)
assert isinstance(archive['duration'], float)
assert len(archive['id']) == 64
assert 'stats' in archive
assert datetime.strptime(archive['start'], ISO_FORMAT)
assert datetime.strptime(archive['end'], ISO_FORMAT)
def test_comment(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test1', 'input')
self.cmd('create', '--comment', 'this is the comment', self.repository_location + '::test2', 'input')
self.cmd('create', '--comment', '"deleted" comment', self.repository_location + '::test3', 'input')
self.cmd('create', '--comment', 'preserved comment', self.repository_location + '::test4', 'input')
assert 'Comment: \n' in self.cmd('info', self.repository_location + '::test1')
assert 'Comment: this is the comment' in self.cmd('info', self.repository_location + '::test2')
self.cmd('recreate', self.repository_location + '::test1', '--comment', 'added comment')
self.cmd('recreate', self.repository_location + '::test2', '--comment', 'modified comment')
self.cmd('recreate', self.repository_location + '::test3', '--comment', '')
self.cmd('recreate', self.repository_location + '::test4', '12345')
assert 'Comment: added comment' in self.cmd('info', self.repository_location + '::test1')
assert 'Comment: modified comment' in self.cmd('info', self.repository_location + '::test2')
assert 'Comment: \n' in self.cmd('info', self.repository_location + '::test3')
assert 'Comment: preserved comment' in self.cmd('info', self.repository_location + '::test4')
def test_delete(self):
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('dir2/file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('create', self.repository_location + '::test.2', 'input')
self.cmd('create', self.repository_location + '::test.3', 'input')
self.cmd('create', self.repository_location + '::another_test.1', 'input')
self.cmd('create', self.repository_location + '::another_test.2', 'input')
self.cmd('extract', '--dry-run', self.repository_location + '::test')
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
self.cmd('delete', '--prefix', 'another_', self.repository_location)
self.cmd('delete', '--last', '1', self.repository_location)
self.cmd('delete', self.repository_location + '::test')
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
output = self.cmd('delete', '--stats', self.repository_location + '::test.2')
self.assert_in('Deleted data:', output)
# Make sure all data except the manifest has been deleted
with Repository(self.repository_path) as repository:
self.assert_equal(len(repository), 1)
def test_delete_multiple(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test1', 'input')
self.cmd('create', self.repository_location + '::test2', 'input')
self.cmd('create', self.repository_location + '::test3', 'input')
self.cmd('delete', self.repository_location + '::test1', 'test2')
self.cmd('extract', '--dry-run', self.repository_location + '::test3')
self.cmd('delete', self.repository_location, 'test3')
assert not self.cmd('list', self.repository_location)
def test_delete_repo(self):
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('dir2/file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('create', self.repository_location + '::test.2', 'input')
os.environ['BORG_DELETE_I_KNOW_WHAT_I_AM_DOING'] = 'no'
self.cmd('delete', self.repository_location, exit_code=2)
assert os.path.exists(self.repository_path)
os.environ['BORG_DELETE_I_KNOW_WHAT_I_AM_DOING'] = 'YES'
self.cmd('delete', self.repository_location)
# Make sure the repo is gone
self.assertFalse(os.path.exists(self.repository_path))
def test_delete_force(self):
self.cmd('init', '--encryption=none', self.repository_location)
self.create_src_archive('test')
with Repository(self.repository_path, exclusive=True) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
archive = Archive(repository, key, manifest, 'test')
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
repository.delete(item.chunks[-1].id)
break
else:
assert False # missed the file
repository.commit(compact=False)
output = self.cmd('delete', '--force', self.repository_location + '::test')
self.assert_in('deleted archive was corrupted', output)
self.cmd('check', '--repair', self.repository_location)
output = self.cmd('list', self.repository_location)
self.assert_not_in('test', output)
def test_delete_double_force(self):
self.cmd('init', '--encryption=none', self.repository_location)
self.create_src_archive('test')
with Repository(self.repository_path, exclusive=True) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
archive = Archive(repository, key, manifest, 'test')
id = archive.metadata.items[0]
repository.put(id, b'corrupted items metadata stream chunk')
repository.commit(compact=False)
self.cmd('delete', '--force', '--force', self.repository_location + '::test')
self.cmd('check', '--repair', self.repository_location)
output = self.cmd('list', self.repository_location)
self.assert_not_in('test', output)
def test_corrupted_repository(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
self.cmd('extract', '--dry-run', self.repository_location + '::test')
output = self.cmd('check', '--show-version', self.repository_location)
self.assert_in('borgbackup version', output) # implied output even without --info given
self.assert_not_in('Starting repository check', output) # --info not given for root logger
name = sorted(os.listdir(os.path.join(self.tmpdir, 'repository', 'data', '0')), reverse=True)[1]
with open(os.path.join(self.tmpdir, 'repository', 'data', '0', name), 'r+b') as fd:
fd.seek(100)
fd.write(b'XXXX')
output = self.cmd('check', '--info', self.repository_location, exit_code=1)
self.assert_in('Starting repository check', output) # --info given for root logger
def test_readonly_check(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
with self.read_only(self.repository_path):
# verify that command normally doesn't work with read-only repo
if self.FORK_DEFAULT:
self.cmd('check', '--verify-data', self.repository_location, exit_code=EXIT_ERROR)
else:
with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo:
self.cmd('check', '--verify-data', self.repository_location)
if isinstance(excinfo.value, RemoteRepository.RPCError):
assert excinfo.value.exception_class == 'LockFailed'
# verify that command works with read-only repo when using --bypass-lock
self.cmd('check', '--verify-data', self.repository_location, '--bypass-lock')
def test_readonly_diff(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('a')
self.create_src_archive('b')
with self.read_only(self.repository_path):
# verify that command normally doesn't work with read-only repo
if self.FORK_DEFAULT:
self.cmd('diff', '%s::a' % self.repository_location, 'b', exit_code=EXIT_ERROR)
else:
with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo:
self.cmd('diff', '%s::a' % self.repository_location, 'b')
if isinstance(excinfo.value, RemoteRepository.RPCError):
assert excinfo.value.exception_class == 'LockFailed'
# verify that command works with read-only repo when using --bypass-lock
self.cmd('diff', '%s::a' % self.repository_location, 'b', '--bypass-lock')
def test_readonly_export_tar(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
with self.read_only(self.repository_path):
# verify that command normally doesn't work with read-only repo
if self.FORK_DEFAULT:
self.cmd('export-tar', '%s::test' % self.repository_location, 'test.tar', exit_code=EXIT_ERROR)
else:
with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo:
self.cmd('export-tar', '%s::test' % self.repository_location, 'test.tar')
if isinstance(excinfo.value, RemoteRepository.RPCError):
assert excinfo.value.exception_class == 'LockFailed'
# verify that command works with read-only repo when using --bypass-lock
self.cmd('export-tar', '%s::test' % self.repository_location, 'test.tar', '--bypass-lock')
def test_readonly_extract(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
with self.read_only(self.repository_path):
# verify that command normally doesn't work with read-only repo
if self.FORK_DEFAULT:
self.cmd('extract', '%s::test' % self.repository_location, exit_code=EXIT_ERROR)
else:
with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo:
self.cmd('extract', '%s::test' % self.repository_location)
if isinstance(excinfo.value, RemoteRepository.RPCError):
assert excinfo.value.exception_class == 'LockFailed'
# verify that command works with read-only repo when using --bypass-lock
self.cmd('extract', '%s::test' % self.repository_location, '--bypass-lock')
def test_readonly_info(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
with self.read_only(self.repository_path):
# verify that command normally doesn't work with read-only repo
if self.FORK_DEFAULT:
self.cmd('info', self.repository_location, exit_code=EXIT_ERROR)
else:
with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo:
self.cmd('info', self.repository_location)
if isinstance(excinfo.value, RemoteRepository.RPCError):
assert excinfo.value.exception_class == 'LockFailed'
# verify that command works with read-only repo when using --bypass-lock
self.cmd('info', self.repository_location, '--bypass-lock')
def test_readonly_list(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
with self.read_only(self.repository_path):
# verify that command normally doesn't work with read-only repo
if self.FORK_DEFAULT:
self.cmd('list', self.repository_location, exit_code=EXIT_ERROR)
else:
with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo:
self.cmd('list', self.repository_location)
if isinstance(excinfo.value, RemoteRepository.RPCError):
assert excinfo.value.exception_class == 'LockFailed'
# verify that command works with read-only repo when using --bypass-lock
self.cmd('list', self.repository_location, '--bypass-lock')
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_readonly_mount(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
with self.read_only(self.repository_path):
# verify that command normally doesn't work with read-only repo
if self.FORK_DEFAULT:
with self.fuse_mount(self.repository_location, exit_code=EXIT_ERROR):
pass
else:
with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo:
# self.fuse_mount always assumes fork=True, so for this test we have to manually set fork=False
with self.fuse_mount(self.repository_location, fork=False):
pass
if isinstance(excinfo.value, RemoteRepository.RPCError):
assert excinfo.value.exception_class == 'LockFailed'
# verify that command works with read-only repo when using --bypass-lock
with self.fuse_mount(self.repository_location, None, '--bypass-lock'):
pass
@pytest.mark.skipif('BORG_TESTS_IGNORE_MODES' in os.environ, reason='modes unreliable')
def test_umask(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
mode = os.stat(self.repository_path).st_mode
self.assertEqual(stat.S_IMODE(mode), 0o700)
def test_create_dry_run(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', '--dry-run', self.repository_location + '::test', 'input')
# Make sure no archive has been created
with Repository(self.repository_path) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
self.assert_equal(len(manifest.archives), 0)
def add_unknown_feature(self, operation):
with Repository(self.repository_path, exclusive=True) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
manifest.config[b'feature_flags'] = {operation.value.encode(): {b'mandatory': [b'unknown-feature']}}
manifest.write()
repository.commit(compact=False)
def cmd_raises_unknown_feature(self, args):
if self.FORK_DEFAULT:
self.cmd(*args, exit_code=EXIT_ERROR)
else:
with pytest.raises(MandatoryFeatureUnsupported) as excinfo:
self.cmd(*args)
assert excinfo.value.args == (['unknown-feature'],)
def test_unknown_feature_on_create(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.add_unknown_feature(Manifest.Operation.WRITE)
self.cmd_raises_unknown_feature(['create', self.repository_location + '::test', 'input'])
def test_unknown_feature_on_cache_sync(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('delete', '--cache-only', self.repository_location)
self.add_unknown_feature(Manifest.Operation.READ)
self.cmd_raises_unknown_feature(['create', self.repository_location + '::test', 'input'])
def test_unknown_feature_on_change_passphrase(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.add_unknown_feature(Manifest.Operation.CHECK)
self.cmd_raises_unknown_feature(['key', 'change-passphrase', self.repository_location])
def test_unknown_feature_on_read(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.cmd('create', self.repository_location + '::test', 'input')
self.add_unknown_feature(Manifest.Operation.READ)
with changedir('output'):
self.cmd_raises_unknown_feature(['extract', self.repository_location + '::test'])
self.cmd_raises_unknown_feature(['list', self.repository_location])
self.cmd_raises_unknown_feature(['info', self.repository_location + '::test'])
def test_unknown_feature_on_rename(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.cmd('create', self.repository_location + '::test', 'input')
self.add_unknown_feature(Manifest.Operation.CHECK)
self.cmd_raises_unknown_feature(['rename', self.repository_location + '::test', 'other'])
def test_unknown_feature_on_delete(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.cmd('create', self.repository_location + '::test', 'input')
self.add_unknown_feature(Manifest.Operation.DELETE)
# delete of an archive raises
self.cmd_raises_unknown_feature(['delete', self.repository_location + '::test'])
self.cmd_raises_unknown_feature(['prune', '--keep-daily=3', self.repository_location])
# delete of the whole repository ignores features
self.cmd('delete', self.repository_location)
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_unknown_feature_on_mount(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.add_unknown_feature(Manifest.Operation.READ)
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
os.mkdir(mountpoint)
# XXX this might hang if it doesn't raise an error
self.cmd_raises_unknown_feature(['mount', self.repository_location + '::test', mountpoint])
@pytest.mark.allow_cache_wipe
def test_unknown_mandatory_feature_in_cache(self):
if self.prefix:
path_prefix = 'ssh://__testsuite__'
else:
path_prefix = ''
print(self.cmd('init', '--encryption=repokey', self.repository_location))
with Repository(self.repository_path, exclusive=True) as repository:
if path_prefix:
repository._location = Location(self.repository_location)
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest) as cache:
cache.begin_txn()
cache.cache_config.mandatory_features = set(['unknown-feature'])
cache.commit()
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '::test', 'input')
else:
called = False
wipe_cache_safe = LocalCache.wipe_cache
def wipe_wrapper(*args):
nonlocal called
called = True
wipe_cache_safe(*args)
with patch.object(LocalCache, 'wipe_cache', wipe_wrapper):
self.cmd('create', self.repository_location + '::test', 'input')
assert called
with Repository(self.repository_path, exclusive=True) as repository:
if path_prefix:
repository._location = Location(self.repository_location)
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest) as cache:
assert cache.cache_config.mandatory_features == set([])
def test_progress_on(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--progress', self.repository_location + '::test4', 'input')
self.assert_in("\r", output)
def test_progress_off(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', self.repository_location + '::test5', 'input')
self.assert_not_in("\r", output)
def test_file_status(self):
self.create_regular_file('file1', size=1024 * 80)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', self.repository_location + '::test', 'input')
self.assert_in("A input/file1", output)
self.assert_in("A input/file2", output)
# should find first file as unmodified
output = self.cmd('create', '--list', self.repository_location + '::test1', 'input')
self.assert_in("U input/file1", output)
# this is expected, although surprising, for why, see:
# https://borgbackup.readthedocs.org/en/latest/faq.html#i-am-seeing-a-added-status-for-a-unchanged-file
self.assert_in("A input/file2", output)
def test_file_status_cs_cache_mode(self):
self.create_regular_file('file1', contents=b'123')
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=10)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', '--files-cache=ctime,size', self.repository_location + '::test1', 'input')
# modify file1, but cheat with the mtime (and atime) and also keep same size:
st = os.stat('input/file1')
self.create_regular_file('file1', contents=b'321')
os.utime('input/file1', ns=(st.st_atime_ns, st.st_mtime_ns))
# this mode uses ctime for change detection, so it should find file1 as modified
output = self.cmd('create', '--list', '--files-cache=ctime,size', self.repository_location + '::test2', 'input')
self.assert_in("M input/file1", output)
def test_file_status_ms_cache_mode(self):
self.create_regular_file('file1', size=10)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=10)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', '--files-cache=mtime,size', self.repository_location + '::test1', 'input')
# change mode of file1, no content change:
st = os.stat('input/file1')
os.chmod('input/file1', st.st_mode ^ stat.S_IRWXO) # this triggers a ctime change, but mtime is unchanged
# this mode uses mtime for change detection, so it should find file1 as unmodified
output = self.cmd('create', '--list', '--files-cache=mtime,size', self.repository_location + '::test2', 'input')
self.assert_in("U input/file1", output)
def test_file_status_rc_cache_mode(self):
self.create_regular_file('file1', size=10)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=10)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', '--files-cache=rechunk,ctime', self.repository_location + '::test1', 'input')
# no changes here, but this mode rechunks unconditionally
output = self.cmd('create', '--list', '--files-cache=rechunk,ctime', self.repository_location + '::test2', 'input')
self.assert_in("A input/file1", output)
def test_file_status_excluded(self):
self.create_regular_file('file1', size=1024 * 80)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=1024 * 80)
if has_lchflags:
self.create_regular_file('file3', size=1024 * 80)
platform.set_flags(os.path.join(self.input_path, 'file3'), stat.UF_NODUMP)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', '--exclude-nodump', self.repository_location + '::test', 'input')
self.assert_in("A input/file1", output)
self.assert_in("A input/file2", output)
if has_lchflags:
self.assert_in("x input/file3", output)
# should find second file as excluded
output = self.cmd('create', '--list', '--exclude-nodump', self.repository_location + '::test1', 'input', '--exclude', '*/file2')
self.assert_in("U input/file1", output)
self.assert_in("x input/file2", output)
if has_lchflags:
self.assert_in("x input/file3", output)
def test_create_json(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
create_info = json.loads(self.cmd('create', '--json', self.repository_location + '::test', 'input'))
# The usual keys
assert 'encryption' in create_info
assert 'repository' in create_info
assert 'cache' in create_info
assert 'last_modified' in create_info['repository']
archive = create_info['archive']
assert archive['name'] == 'test'
assert isinstance(archive['command_line'], list)
assert isinstance(archive['duration'], float)
assert len(archive['id']) == 64
assert 'stats' in archive
def test_create_topical(self):
self.create_regular_file('file1', size=1024 * 80)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
# no listing by default
output = self.cmd('create', self.repository_location + '::test', 'input')
self.assert_not_in('file1', output)
# shouldn't be listed even if unchanged
output = self.cmd('create', self.repository_location + '::test0', 'input')
self.assert_not_in('file1', output)
# should list the file as unchanged
output = self.cmd('create', '--list', '--filter=U', self.repository_location + '::test1', 'input')
self.assert_in('file1', output)
# should *not* list the file as changed
output = self.cmd('create', '--list', '--filter=AM', self.repository_location + '::test2', 'input')
self.assert_not_in('file1', output)
# change the file
self.create_regular_file('file1', size=1024 * 100)
# should list the file as changed
output = self.cmd('create', '--list', '--filter=AM', self.repository_location + '::test3', 'input')
self.assert_in('file1', output)
@pytest.mark.skipif(not are_fifos_supported(), reason='FIFOs not supported')
def test_create_read_special_symlink(self):
from threading import Thread
def fifo_feeder(fifo_fn, data):
fd = os.open(fifo_fn, os.O_WRONLY)
try:
os.write(fd, data)
finally:
os.close(fd)
self.cmd('init', '--encryption=repokey', self.repository_location)
archive = self.repository_location + '::test'
data = b'foobar' * 1000
fifo_fn = os.path.join(self.input_path, 'fifo')
link_fn = os.path.join(self.input_path, 'link_fifo')
os.mkfifo(fifo_fn)
os.symlink(fifo_fn, link_fn)
t = Thread(target=fifo_feeder, args=(fifo_fn, data))
t.start()
try:
self.cmd('create', '--read-special', archive, 'input/link_fifo')
finally:
t.join()
with changedir('output'):
self.cmd('extract', archive)
fifo_fn = 'input/link_fifo'
with open(fifo_fn, 'rb') as f:
extracted_data = f.read()
assert extracted_data == data
def test_create_read_special_broken_symlink(self):
os.symlink('somewhere does not exist', os.path.join(self.input_path, 'link'))
self.cmd('init', '--encryption=repokey', self.repository_location)
archive = self.repository_location + '::test'
self.cmd('create', '--read-special', archive, 'input')
output = self.cmd('list', archive)
assert 'input/link -> somewhere does not exist' in output
# def test_cmdline_compatibility(self):
# self.create_regular_file('file1', size=1024 * 80)
# self.cmd('init', '--encryption=repokey', self.repository_location)
# self.cmd('create', self.repository_location + '::test', 'input')
# output = self.cmd('foo', self.repository_location, '--old')
# self.assert_in('"--old" has been deprecated. Use "--new" instead', output)
def test_prune_repository(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test1', src_dir)
self.cmd('create', self.repository_location + '::test2', src_dir)
# these are not really a checkpoints, but they look like some:
self.cmd('create', self.repository_location + '::test3.checkpoint', src_dir)
self.cmd('create', self.repository_location + '::test3.checkpoint.1', src_dir)
self.cmd('create', self.repository_location + '::test4.checkpoint', src_dir)
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=1')
assert re.search(r'Would prune:\s+test1', output)
# must keep the latest non-checkpoint archive:
assert re.search(r'Keeping archive \(rule: daily #1\):\s+test2', output)
# must keep the latest checkpoint archive:
assert re.search(r'Keeping checkpoint archive:\s+test4.checkpoint', output)
output = self.cmd('list', '--consider-checkpoints', self.repository_location)
self.assert_in('test1', output)
self.assert_in('test2', output)
self.assert_in('test3.checkpoint', output)
self.assert_in('test3.checkpoint.1', output)
self.assert_in('test4.checkpoint', output)
self.cmd('prune', self.repository_location, '--keep-daily=1')
output = self.cmd('list', '--consider-checkpoints', self.repository_location)
self.assert_not_in('test1', output)
# the latest non-checkpoint archive must be still there:
self.assert_in('test2', output)
# only the latest checkpoint archive must still be there:
self.assert_not_in('test3.checkpoint', output)
self.assert_not_in('test3.checkpoint.1', output)
self.assert_in('test4.checkpoint', output)
# now we supercede the latest checkpoint by a successful backup:
self.cmd('create', self.repository_location + '::test5', src_dir)
self.cmd('prune', self.repository_location, '--keep-daily=2')
output = self.cmd('list', '--consider-checkpoints', self.repository_location)
# all checkpoints should be gone now:
self.assert_not_in('checkpoint', output)
# the latest archive must be still there
self.assert_in('test5', output)
# Given a date and time in local tz, create a UTC timestamp string suitable
# for create --timestamp command line option
def _to_utc_timestamp(self, year, month, day, hour, minute, second):
dtime = datetime(year, month, day, hour, minute, second, 0, dateutil.tz.gettz())
return dtime.astimezone(dateutil.tz.UTC).strftime("%Y-%m-%dT%H:%M:%S")
def _create_archive_ts(self, name, y, m, d, H=0, M=0, S=0):
loc = self.repository_location + '::' + name
self.cmd('create', '--timestamp', self._to_utc_timestamp(y, m, d, H, M, S), loc, src_dir)
# This test must match docs/misc/prune-example.txt
def test_prune_repository_example(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
# Archives that will be kept, per the example
# Oldest archive
self._create_archive_ts('test01', 2015, 1, 1)
# 6 monthly archives
self._create_archive_ts('test02', 2015, 6, 30)
self._create_archive_ts('test03', 2015, 7, 31)
self._create_archive_ts('test04', 2015, 8, 31)
self._create_archive_ts('test05', 2015, 9, 30)
self._create_archive_ts('test06', 2015, 10, 31)
self._create_archive_ts('test07', 2015, 11, 30)
# 14 daily archives
self._create_archive_ts('test08', 2015, 12, 17)
self._create_archive_ts('test09', 2015, 12, 18)
self._create_archive_ts('test10', 2015, 12, 20)
self._create_archive_ts('test11', 2015, 12, 21)
self._create_archive_ts('test12', 2015, 12, 22)
self._create_archive_ts('test13', 2015, 12, 23)
self._create_archive_ts('test14', 2015, 12, 24)
self._create_archive_ts('test15', 2015, 12, 25)
self._create_archive_ts('test16', 2015, 12, 26)
self._create_archive_ts('test17', 2015, 12, 27)
self._create_archive_ts('test18', 2015, 12, 28)
self._create_archive_ts('test19', 2015, 12, 29)
self._create_archive_ts('test20', 2015, 12, 30)
self._create_archive_ts('test21', 2015, 12, 31)
# Additional archives that would be pruned
# The second backup of the year
self._create_archive_ts('test22', 2015, 1, 2)
# The next older monthly backup
self._create_archive_ts('test23', 2015, 5, 31)
# The next older daily backup
self._create_archive_ts('test24', 2015, 12, 16)
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=14', '--keep-monthly=6', '--keep-yearly=1')
# Prune second backup of the year
assert re.search(r'Would prune:\s+test22', output)
# Prune next older monthly and daily backups
assert re.search(r'Would prune:\s+test23', output)
assert re.search(r'Would prune:\s+test24', output)
# Must keep the other 21 backups
# Yearly is kept as oldest archive
assert re.search(r'Keeping archive \(rule: yearly\[oldest\] #1\):\s+test01', output)
for i in range(1, 7):
assert re.search(r'Keeping archive \(rule: monthly #' + str(i) + r'\):\s+test' + ("%02d" % (8-i)), output)
for i in range(1, 15):
assert re.search(r'Keeping archive \(rule: daily #' + str(i) + r'\):\s+test' + ("%02d" % (22-i)), output)
output = self.cmd('list', self.repository_location)
# Nothing pruned after dry run
for i in range(1, 25):
self.assert_in('test%02d' % i, output)
self.cmd('prune', self.repository_location, '--keep-daily=14', '--keep-monthly=6', '--keep-yearly=1')
output = self.cmd('list', self.repository_location)
# All matching backups plus oldest kept
for i in range(1, 22):
self.assert_in('test%02d' % i, output)
# Other backups have been pruned
for i in range(22, 25):
self.assert_not_in('test%02d' % i, output)
# With an initial and daily backup, prune daily until oldest is replaced by a monthly backup
def test_prune_retain_and_expire_oldest(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
# Initial backup
self._create_archive_ts('original_archive', 2020, 9, 1, 11, 15)
# Archive and prune daily for 30 days
for i in range(1, 31):
self._create_archive_ts('september%02d' % i, 2020, 9, i, 12)
self.cmd('prune', self.repository_location, '--keep-daily=7', '--keep-monthly=1')
# Archive and prune 6 days into the next month
for i in range(1, 7):
self._create_archive_ts('october%02d' % i, 2020, 10, i, 12)
self.cmd('prune', self.repository_location, '--keep-daily=7', '--keep-monthly=1')
# Oldest backup is still retained
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=7', '--keep-monthly=1')
assert re.search(r'Keeping archive \(rule: monthly\[oldest\] #1' + r'\):\s+original_archive', output)
# Archive one more day and prune.
self._create_archive_ts('october07', 2020, 10, 7, 12)
self.cmd('prune', self.repository_location, '--keep-daily=7', '--keep-monthly=1')
# Last day of previous month is retained as monthly, and oldest is expired.
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=7', '--keep-monthly=1')
assert re.search(r'Keeping archive \(rule: monthly #1\):\s+september30', output)
self.assert_not_in('original_archive', output)
def test_prune_repository_save_space(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test1', src_dir)
self.cmd('create', self.repository_location + '::test2', src_dir)
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=1')
assert re.search(r'Keeping archive \(rule: daily #1\):\s+test2', output)
assert re.search(r'Would prune:\s+test1', output)
output = self.cmd('list', self.repository_location)
self.assert_in('test1', output)
self.assert_in('test2', output)
self.cmd('prune', '--save-space', self.repository_location, '--keep-daily=1')
output = self.cmd('list', self.repository_location)
self.assert_not_in('test1', output)
self.assert_in('test2', output)
def test_prune_repository_prefix(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::foo-2015-08-12-10:00', src_dir)
self.cmd('create', self.repository_location + '::foo-2015-08-12-20:00', src_dir)
self.cmd('create', self.repository_location + '::bar-2015-08-12-10:00', src_dir)
self.cmd('create', self.repository_location + '::bar-2015-08-12-20:00', src_dir)
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=1', '--prefix=foo-')
assert re.search(r'Keeping archive \(rule: daily #1\):\s+foo-2015-08-12-20:00', output)
assert re.search(r'Would prune:\s+foo-2015-08-12-10:00', output)
output = self.cmd('list', self.repository_location)
self.assert_in('foo-2015-08-12-10:00', output)
self.assert_in('foo-2015-08-12-20:00', output)
self.assert_in('bar-2015-08-12-10:00', output)
self.assert_in('bar-2015-08-12-20:00', output)
self.cmd('prune', self.repository_location, '--keep-daily=1', '--prefix=foo-')
output = self.cmd('list', self.repository_location)
self.assert_not_in('foo-2015-08-12-10:00', output)
self.assert_in('foo-2015-08-12-20:00', output)
self.assert_in('bar-2015-08-12-10:00', output)
self.assert_in('bar-2015-08-12-20:00', output)
def test_prune_repository_glob(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::2015-08-12-10:00-foo', src_dir)
self.cmd('create', self.repository_location + '::2015-08-12-20:00-foo', src_dir)
self.cmd('create', self.repository_location + '::2015-08-12-10:00-bar', src_dir)
self.cmd('create', self.repository_location + '::2015-08-12-20:00-bar', src_dir)
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=1', '--glob-archives=2015-*-foo')
assert re.search(r'Keeping archive \(rule: daily #1\):\s+2015-08-12-20:00-foo', output)
assert re.search(r'Would prune:\s+2015-08-12-10:00-foo', output)
output = self.cmd('list', self.repository_location)
self.assert_in('2015-08-12-10:00-foo', output)
self.assert_in('2015-08-12-20:00-foo', output)
self.assert_in('2015-08-12-10:00-bar', output)
self.assert_in('2015-08-12-20:00-bar', output)
self.cmd('prune', self.repository_location, '--keep-daily=1', '--glob-archives=2015-*-foo')
output = self.cmd('list', self.repository_location)
self.assert_not_in('2015-08-12-10:00-foo', output)
self.assert_in('2015-08-12-20:00-foo', output)
self.assert_in('2015-08-12-10:00-bar', output)
self.assert_in('2015-08-12-20:00-bar', output)
def test_list_prefix(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test-1', src_dir)
self.cmd('create', self.repository_location + '::something-else-than-test-1', src_dir)
self.cmd('create', self.repository_location + '::test-2', src_dir)
output = self.cmd('list', '--prefix=test-', self.repository_location)
self.assert_in('test-1', output)
self.assert_in('test-2', output)
self.assert_not_in('something-else', output)
def test_list_format(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
test_archive = self.repository_location + '::test'
self.cmd('create', test_archive, src_dir)
output_1 = self.cmd('list', test_archive)
output_2 = self.cmd('list', '--format', '{mode} {user:6} {group:6} {size:8d} {mtime} {path}{extra}{NEWLINE}', test_archive)
output_3 = self.cmd('list', '--format', '{mtime:%s} {path}{NL}', test_archive)
self.assertEqual(output_1, output_2)
self.assertNotEqual(output_1, output_3)
def test_list_repository_format(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', '--comment', 'comment 1', self.repository_location + '::test-1', src_dir)
self.cmd('create', '--comment', 'comment 2', self.repository_location + '::test-2', src_dir)
output_1 = self.cmd('list', self.repository_location)
output_2 = self.cmd('list', '--format', '{archive:<36} {time} [{id}]{NL}', self.repository_location)
self.assertEqual(output_1, output_2)
output_1 = self.cmd('list', '--short', self.repository_location)
self.assertEqual(output_1, 'test-1\ntest-2\n')
output_1 = self.cmd('list', '--format', '{barchive}/', self.repository_location)
self.assertEqual(output_1, 'test-1/test-2/')
output_3 = self.cmd('list', '--format', '{name} {comment}{NL}', self.repository_location)
self.assert_in('test-1 comment 1\n', output_3)
self.assert_in('test-2 comment 2\n', output_3)
def test_list_hash(self):
self.create_regular_file('empty_file', size=0)
self.create_regular_file('amb', contents=b'a' * 1000000)
self.cmd('init', '--encryption=repokey', self.repository_location)
test_archive = self.repository_location + '::test'
self.cmd('create', test_archive, 'input')
output = self.cmd('list', '--format', '{sha256} {path}{NL}', test_archive)
assert "cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0 input/amb" in output
assert "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 input/empty_file" in output
def test_list_consider_checkpoints(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test1', src_dir)
# these are not really a checkpoints, but they look like some:
self.cmd('create', self.repository_location + '::test2.checkpoint', src_dir)
self.cmd('create', self.repository_location + '::test3.checkpoint.1', src_dir)
output = self.cmd('list', self.repository_location)
assert "test1" in output
assert "test2.checkpoint" not in output
assert "test3.checkpoint.1" not in output
output = self.cmd('list', '--consider-checkpoints', self.repository_location)
assert "test1" in output
assert "test2.checkpoint" in output
assert "test3.checkpoint.1" in output
def test_list_chunk_counts(self):
self.create_regular_file('empty_file', size=0)
self.create_regular_file('two_chunks')
with open(os.path.join(self.input_path, 'two_chunks'), 'wb') as fd:
fd.write(b'abba' * 2000000)
fd.write(b'baab' * 2000000)
self.cmd('init', '--encryption=repokey', self.repository_location)
test_archive = self.repository_location + '::test'
self.cmd('create', test_archive, 'input')
output = self.cmd('list', '--format', '{num_chunks} {unique_chunks} {path}{NL}', test_archive)
assert "0 0 input/empty_file" in output
assert "2 2 input/two_chunks" in output
def test_list_size(self):
self.create_regular_file('compressible_file', size=10000)
self.cmd('init', '--encryption=repokey', self.repository_location)
test_archive = self.repository_location + '::test'
self.cmd('create', '-C', 'lz4', test_archive, 'input')
output = self.cmd('list', '--format', '{size} {csize} {dsize} {dcsize} {path}{NL}', test_archive)
size, csize, dsize, dcsize, path = output.split("\n")[1].split(" ")
assert int(csize) < int(size)
assert int(dcsize) < int(dsize)
assert int(dsize) <= int(size)
assert int(dcsize) <= int(csize)
def test_list_json(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
list_repo = json.loads(self.cmd('list', '--json', self.repository_location))
repository = list_repo['repository']
assert len(repository['id']) == 64
assert datetime.strptime(repository['last_modified'], ISO_FORMAT) # must not raise
assert list_repo['encryption']['mode'] == 'repokey'
assert 'keyfile' not in list_repo['encryption']
archive0 = list_repo['archives'][0]
assert datetime.strptime(archive0['time'], ISO_FORMAT) # must not raise
list_archive = self.cmd('list', '--json-lines', self.repository_location + '::test')
items = [json.loads(s) for s in list_archive.splitlines()]
assert len(items) == 2
file1 = items[1]
assert file1['path'] == 'input/file1'
assert file1['size'] == 81920
assert datetime.strptime(file1['mtime'], ISO_FORMAT) # must not raise
list_archive = self.cmd('list', '--json-lines', '--format={sha256}', self.repository_location + '::test')
items = [json.loads(s) for s in list_archive.splitlines()]
assert len(items) == 2
file1 = items[1]
assert file1['path'] == 'input/file1'
assert file1['sha256'] == 'b2915eb69f260d8d3c25249195f2c8f4f716ea82ec760ae929732c0262442b2b'
def test_list_json_args(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('list', '--json-lines', self.repository_location, exit_code=2)
self.cmd('list', '--json', self.repository_location + '::archive', exit_code=2)
def test_log_json(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
log = self.cmd('create', '--log-json', self.repository_location + '::test', 'input', '--list', '--debug')
messages = {} # type -> message, one of each kind
for line in log.splitlines():
msg = json.loads(line)
messages[msg['type']] = msg
file_status = messages['file_status']
assert 'status' in file_status
assert file_status['path'].startswith('input')
log_message = messages['log_message']
assert isinstance(log_message['time'], float)
assert log_message['levelname'] == 'DEBUG' # there should only be DEBUG messages
assert isinstance(log_message['message'], str)
def test_debug_profile(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input', '--debug-profile=create.prof')
self.cmd('debug', 'convert-profile', 'create.prof', 'create.pyprof')
stats = pstats.Stats('create.pyprof')
stats.strip_dirs()
stats.sort_stats('cumtime')
self.cmd('create', self.repository_location + '::test2', 'input', '--debug-profile=create.pyprof')
stats = pstats.Stats('create.pyprof') # Only do this on trusted data!
stats.strip_dirs()
stats.sort_stats('cumtime')
def test_common_options(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
log = self.cmd('--debug', 'create', self.repository_location + '::test', 'input')
assert 'security: read previous location' in log
def _get_sizes(self, compression, compressible, size=10000):
if compressible:
contents = b'X' * size
else:
contents = os.urandom(size)
self.create_regular_file('file', contents=contents)
self.cmd('init', '--encryption=none', self.repository_location)
archive = self.repository_location + '::test'
self.cmd('create', '-C', compression, archive, 'input')
output = self.cmd('list', '--format', '{size} {csize} {path}{NL}', archive)
size, csize, path = output.split("\n")[1].split(" ")
return int(size), int(csize)
def test_compression_none_compressible(self):
size, csize = self._get_sizes('none', compressible=True)
assert csize == size + 3
def test_compression_none_uncompressible(self):
size, csize = self._get_sizes('none', compressible=False)
assert csize == size + 3
def test_compression_zlib_compressible(self):
size, csize = self._get_sizes('zlib', compressible=True)
assert csize < size * 0.1
assert csize == 35
def test_compression_zlib_uncompressible(self):
size, csize = self._get_sizes('zlib', compressible=False)
assert csize >= size
def test_compression_auto_compressible(self):
size, csize = self._get_sizes('auto,zlib', compressible=True)
assert csize < size * 0.1
assert csize == 35 # same as compression 'zlib'
def test_compression_auto_uncompressible(self):
size, csize = self._get_sizes('auto,zlib', compressible=False)
assert csize == size + 3 # same as compression 'none'
def test_compression_lz4_compressible(self):
size, csize = self._get_sizes('lz4', compressible=True)
assert csize < size * 0.1
def test_compression_lz4_uncompressible(self):
size, csize = self._get_sizes('lz4', compressible=False)
assert csize == size + 3 # same as compression 'none'
def test_compression_lzma_compressible(self):
size, csize = self._get_sizes('lzma', compressible=True)
assert csize < size * 0.1
def test_compression_lzma_uncompressible(self):
size, csize = self._get_sizes('lzma', compressible=False)
assert csize == size + 3 # same as compression 'none'
def test_compression_zstd_compressible(self):
size, csize = self._get_sizes('zstd', compressible=True)
assert csize < size * 0.1
def test_compression_zstd_uncompressible(self):
size, csize = self._get_sizes('zstd', compressible=False)
assert csize == size + 3 # same as compression 'none'
def test_change_passphrase(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
os.environ['BORG_NEW_PASSPHRASE'] = 'newpassphrase'
# here we have both BORG_PASSPHRASE and BORG_NEW_PASSPHRASE set:
self.cmd('key', 'change-passphrase', self.repository_location)
os.environ['BORG_PASSPHRASE'] = 'newpassphrase'
self.cmd('list', self.repository_location)
def test_break_lock(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('break-lock', self.repository_location)
def test_usage(self):
self.cmd()
self.cmd('-h')
def test_help(self):
assert 'Borg' in self.cmd('help')
assert 'patterns' in self.cmd('help', 'patterns')
assert 'Initialize' in self.cmd('help', 'init')
assert 'positional arguments' not in self.cmd('help', 'init', '--epilog-only')
assert 'This command initializes' not in self.cmd('help', 'init', '--usage-only')
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_fuse(self):
def has_noatime(some_file):
atime_before = os.stat(some_file).st_atime_ns
try:
os.close(os.open(some_file, flags_noatime))
except PermissionError:
return False
else:
atime_after = os.stat(some_file).st_atime_ns
noatime_used = flags_noatime != flags_normal
return noatime_used and atime_before == atime_after
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_test_files()
have_noatime = has_noatime('input/file1')
self.cmd('create', '--exclude-nodump', '--atime', self.repository_location + '::archive', 'input')
self.cmd('create', '--exclude-nodump', '--atime', self.repository_location + '::archive2', 'input')
if has_lchflags:
# remove the file we did not backup, so input and output become equal
os.remove(os.path.join('input', 'flagfile'))
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
# mount the whole repository, archive contents shall show up in archivename subdirs of mountpoint:
with self.fuse_mount(self.repository_location, mountpoint):
# flags are not supported by the FUSE mount
# we also ignore xattrs here, they are tested separately
self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'archive', 'input'),
ignore_flags=True, ignore_xattrs=True)
self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'archive2', 'input'),
ignore_flags=True, ignore_xattrs=True)
# mount only 1 archive, its contents shall show up directly in mountpoint:
with self.fuse_mount(self.repository_location + '::archive', mountpoint):
self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'input'),
ignore_flags=True, ignore_xattrs=True)
# regular file
in_fn = 'input/file1'
out_fn = os.path.join(mountpoint, 'input', 'file1')
# stat
sti1 = os.stat(in_fn)
sto1 = os.stat(out_fn)
assert sti1.st_mode == sto1.st_mode
assert sti1.st_uid == sto1.st_uid
assert sti1.st_gid == sto1.st_gid
assert sti1.st_size == sto1.st_size
if have_noatime:
assert sti1.st_atime == sto1.st_atime
assert sti1.st_ctime == sto1.st_ctime
assert sti1.st_mtime == sto1.st_mtime
if are_hardlinks_supported():
# note: there is another hardlink to this, see below
assert sti1.st_nlink == sto1.st_nlink == 2
# read
with open(in_fn, 'rb') as in_f, open(out_fn, 'rb') as out_f:
assert in_f.read() == out_f.read()
# hardlink (to 'input/file1')
if are_hardlinks_supported():
in_fn = 'input/hardlink'
out_fn = os.path.join(mountpoint, 'input', 'hardlink')
sti2 = os.stat(in_fn)
sto2 = os.stat(out_fn)
assert sti2.st_nlink == sto2.st_nlink == 2
assert sto1.st_ino == sto2.st_ino
# symlink
if are_symlinks_supported():
in_fn = 'input/link1'
out_fn = os.path.join(mountpoint, 'input', 'link1')
sti = os.stat(in_fn, follow_symlinks=False)
sto = os.stat(out_fn, follow_symlinks=False)
assert sti.st_size == len('somewhere')
assert sto.st_size == len('somewhere')
assert stat.S_ISLNK(sti.st_mode)
assert stat.S_ISLNK(sto.st_mode)
assert os.readlink(in_fn) == os.readlink(out_fn)
# FIFO
if are_fifos_supported():
out_fn = os.path.join(mountpoint, 'input', 'fifo1')
sto = os.stat(out_fn)
assert stat.S_ISFIFO(sto.st_mode)
# list/read xattrs
try:
in_fn = 'input/fusexattr'
out_fn = os.fsencode(os.path.join(mountpoint, 'input', 'fusexattr'))
if not xattr.XATTR_FAKEROOT and xattr.is_enabled(self.input_path):
assert sorted(no_selinux(xattr.listxattr(out_fn))) == [b'user.empty', b'user.foo', ]
assert xattr.getxattr(out_fn, b'user.foo') == b'bar'
assert xattr.getxattr(out_fn, b'user.empty') == b''
else:
assert no_selinux(xattr.listxattr(out_fn)) == []
try:
xattr.getxattr(out_fn, b'user.foo')
except OSError as e:
assert e.errno == llfuse.ENOATTR
else:
assert False, "expected OSError(ENOATTR), but no error was raised"
except OSError as err:
if sys.platform.startswith(('nothing_here_now', )) and err.errno == errno.ENOTSUP:
# some systems have no xattr support on FUSE
pass
else:
raise
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_fuse_versions_view(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('test', contents=b'first')
if are_hardlinks_supported():
self.create_regular_file('hardlink1', contents=b'123456')
os.link('input/hardlink1', 'input/hardlink2')
os.link('input/hardlink1', 'input/hardlink3')
self.cmd('create', self.repository_location + '::archive1', 'input')
self.create_regular_file('test', contents=b'second')
self.cmd('create', self.repository_location + '::archive2', 'input')
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
# mount the whole repository, archive contents shall show up in versioned view:
with self.fuse_mount(self.repository_location, mountpoint, '-o', 'versions'):
path = os.path.join(mountpoint, 'input', 'test') # filename shows up as directory ...
files = os.listdir(path)
assert all(f.startswith('test.') for f in files) # ... with files test.xxxxx in there
assert {b'first', b'second'} == {open(os.path.join(path, f), 'rb').read() for f in files}
if are_hardlinks_supported():
hl1 = os.path.join(mountpoint, 'input', 'hardlink1', 'hardlink1.00001')
hl2 = os.path.join(mountpoint, 'input', 'hardlink2', 'hardlink2.00001')
hl3 = os.path.join(mountpoint, 'input', 'hardlink3', 'hardlink3.00001')
assert os.stat(hl1).st_ino == os.stat(hl2).st_ino == os.stat(hl3).st_ino
assert open(hl3, 'rb').read() == b'123456'
# similar again, but exclude the hardlink master:
with self.fuse_mount(self.repository_location, mountpoint, '-o', 'versions', '-e', 'input/hardlink1'):
if are_hardlinks_supported():
hl2 = os.path.join(mountpoint, 'input', 'hardlink2', 'hardlink2.00001')
hl3 = os.path.join(mountpoint, 'input', 'hardlink3', 'hardlink3.00001')
assert os.stat(hl2).st_ino == os.stat(hl3).st_ino
assert open(hl3, 'rb').read() == b'123456'
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_fuse_allow_damaged_files(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive')
# Get rid of a chunk and repair it
archive, repository = self.open_archive('archive')
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
repository.delete(item.chunks[-1].id)
path = item.path # store full path for later
break
else:
assert False # missed the file
repository.commit(compact=False)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
with self.fuse_mount(self.repository_location + '::archive', mountpoint):
with pytest.raises(OSError) as excinfo:
open(os.path.join(mountpoint, path))
assert excinfo.value.errno == errno.EIO
with self.fuse_mount(self.repository_location + '::archive', mountpoint, '-o', 'allow_damaged_files'):
open(os.path.join(mountpoint, path)).close()
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_fuse_mount_options(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('arch11')
self.create_src_archive('arch12')
self.create_src_archive('arch21')
self.create_src_archive('arch22')
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
with self.fuse_mount(self.repository_location, mountpoint, '--first=2', '--sort=name'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch11', 'arch12']
with self.fuse_mount(self.repository_location, mountpoint, '--last=2', '--sort=name'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch21', 'arch22']
with self.fuse_mount(self.repository_location, mountpoint, '--prefix=arch1'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch11', 'arch12']
with self.fuse_mount(self.repository_location, mountpoint, '--prefix=arch2'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch21', 'arch22']
with self.fuse_mount(self.repository_location, mountpoint, '--prefix=arch'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch11', 'arch12', 'arch21', 'arch22']
with self.fuse_mount(self.repository_location, mountpoint, '--prefix=nope'):
assert sorted(os.listdir(os.path.join(mountpoint))) == []
@unittest.skipUnless(llfuse, 'llfuse not installed')
def test_migrate_lock_alive(self):
from functools import wraps
import pickle
import traceback
# Check results are communicated from the borg mount background process
# to the pytest process by means of a serialized dict object stored in this file.
assert_data_file = os.path.join(self.tmpdir, 'migrate_lock_assert_data.pickle')
# Decorates Lock.migrate_lock() with process_alive() checks before and after.
# (We don't want to mix testing code into runtime.)
def write_assert_data(migrate_lock):
@wraps(migrate_lock)
def wrapper(self, old_id, new_id):
wrapper.num_calls += 1
assert_data = {
'num_calls': wrapper.num_calls,
'old_id': old_id,
'new_id': new_id,
'before': {
'old_id_alive': platform.process_alive(*old_id),
'new_id_alive': platform.process_alive(*new_id)},
'exception': None,
'exception.extr_tb': None,
'after': {
'old_id_alive': None,
'new_id_alive': None}}
try:
with open(assert_data_file, 'wb') as _out:
pickle.dump(assert_data, _out)
except:
pass
try:
return migrate_lock(self, old_id, new_id)
except BaseException as e:
assert_data['exception'] = e
assert_data['exception.extr_tb'] = traceback.extract_tb(e.__traceback__)
finally:
assert_data['after'].update({
'old_id_alive': platform.process_alive(*old_id),
'new_id_alive': platform.process_alive(*new_id)})
try:
with open(assert_data_file, 'wb') as _out:
pickle.dump(assert_data, _out)
except:
pass
wrapper.num_calls = 0
return wrapper
# Decorate
borg.locking.Lock.migrate_lock = write_assert_data(borg.locking.Lock.migrate_lock)
try:
self.cmd('init', '--encryption=none', self.repository_location)
self.create_src_archive('arch')
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
# In order that the decoration is kept for the borg mount process, we must not spawn, but actually fork;
# not to be confused with the forking in borg.helpers.daemonize() which is done as well.
with self.fuse_mount(self.repository_location, mountpoint, os_fork=True):
pass
with open(assert_data_file, 'rb') as _in:
assert_data = pickle.load(_in)
print('\nLock.migrate_lock(): assert_data = %r.' % (assert_data, ), file=sys.stderr, flush=True)
exception = assert_data['exception']
if exception is not None:
extracted_tb = assert_data['exception.extr_tb']
print(
'Lock.migrate_lock() raised an exception:\n',
'Traceback (most recent call last):\n',
*traceback.format_list(extracted_tb),
*traceback.format_exception(exception.__class__, exception, None),
sep='', end='', file=sys.stderr, flush=True)
assert assert_data['num_calls'] == 1, "Lock.migrate_lock() must be called exactly once."
assert exception is None, "Lock.migrate_lock() may not raise an exception."
assert_data_before = assert_data['before']
assert assert_data_before['old_id_alive'], "old_id must be alive (=must not be stale) when calling Lock.migrate_lock()."
assert assert_data_before['new_id_alive'], "new_id must be alive (=must not be stale) when calling Lock.migrate_lock()."
assert_data_after = assert_data['after']
assert assert_data_after['old_id_alive'], "old_id must be alive (=must not be stale) when Lock.migrate_lock() has returned."
assert assert_data_after['new_id_alive'], "new_id must be alive (=must not be stale) when Lock.migrate_lock() has returned."
finally:
# Undecorate
borg.locking.Lock.migrate_lock = borg.locking.Lock.migrate_lock.__wrapped__
def verify_aes_counter_uniqueness(self, method):
seen = set() # Chunks already seen
used = set() # counter values already used
def verify_uniqueness():
with Repository(self.repository_path) as repository:
for id, _ in repository.open_index(repository.get_transaction_id()).iteritems():
data = repository.get(id)
hash = sha256(data).digest()
if hash not in seen:
seen.add(hash)
num_blocks = num_cipher_blocks(len(data) - 41)
nonce = bytes_to_long(data[33:41])
for counter in range(nonce, nonce + num_blocks):
self.assert_not_in(counter, used)
used.add(counter)
self.create_test_files()
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=' + method, self.repository_location)
verify_uniqueness()
self.cmd('create', self.repository_location + '::test', 'input')
verify_uniqueness()
self.cmd('create', self.repository_location + '::test.2', 'input')
verify_uniqueness()
self.cmd('delete', self.repository_location + '::test.2')
verify_uniqueness()
def test_aes_counter_uniqueness_keyfile(self):
self.verify_aes_counter_uniqueness('keyfile')
def test_aes_counter_uniqueness_passphrase(self):
self.verify_aes_counter_uniqueness('repokey')
def test_debug_dump_archive_items(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
output = self.cmd('debug', 'dump-archive-items', self.repository_location + '::test')
output_dir = sorted(os.listdir('output'))
assert len(output_dir) > 0 and output_dir[0].startswith('000000_')
assert 'Done.' in output
def test_debug_dump_repo_objs(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
output = self.cmd('debug', 'dump-repo-objs', self.repository_location)
output_dir = sorted(os.listdir('output'))
assert len(output_dir) > 0 and output_dir[0].startswith('00000000_')
assert 'Done.' in output
def test_debug_put_get_delete_obj(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
data = b'some data'
hexkey = sha256(data).hexdigest()
self.create_regular_file('file', contents=data)
output = self.cmd('debug', 'put-obj', self.repository_location, 'input/file')
assert hexkey in output
output = self.cmd('debug', 'get-obj', self.repository_location, hexkey, 'output/file')
assert hexkey in output
with open('output/file', 'rb') as f:
data_read = f.read()
assert data == data_read
output = self.cmd('debug', 'delete-obj', self.repository_location, hexkey)
assert "deleted" in output
output = self.cmd('debug', 'delete-obj', self.repository_location, hexkey)
assert "not found" in output
output = self.cmd('debug', 'delete-obj', self.repository_location, 'invalid')
assert "is invalid" in output
def test_init_interrupt(self):
def raise_eof(*args):
raise EOFError
with patch.object(KeyfileKeyBase, 'create', raise_eof):
self.cmd('init', '--encryption=repokey', self.repository_location, exit_code=1)
assert not os.path.exists(self.repository_location)
def test_init_requires_encryption_option(self):
self.cmd('init', self.repository_location, exit_code=2)
def test_init_nested_repositories(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
if self.FORK_DEFAULT:
self.cmd('init', '--encryption=repokey', self.repository_location + '/nested', exit_code=2)
else:
with pytest.raises(Repository.AlreadyExists):
self.cmd('init', '--encryption=repokey', self.repository_location + '/nested')
def check_cache(self):
# First run a regular borg check
self.cmd('check', self.repository_location)
# Then check that the cache on disk matches exactly what's in the repo.
with self.open_repository() as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest, sync=False) as cache:
original_chunks = cache.chunks
Cache.destroy(repository)
with Cache(repository, key, manifest) as cache:
correct_chunks = cache.chunks
assert original_chunks is not correct_chunks
seen = set()
for id, (refcount, size, csize) in correct_chunks.iteritems():
o_refcount, o_size, o_csize = original_chunks[id]
assert refcount == o_refcount
assert size == o_size
assert csize == o_csize
seen.add(id)
for id, (refcount, size, csize) in original_chunks.iteritems():
assert id in seen
def test_check_cache(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with self.open_repository() as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest, sync=False) as cache:
cache.begin_txn()
cache.chunks.incref(list(cache.chunks.iteritems())[0][0])
cache.commit()
with pytest.raises(AssertionError):
self.check_cache()
def test_recreate_target_rc(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('recreate', self.repository_location, '--target=asdf', exit_code=2)
assert 'Need to specify single archive' in output
def test_recreate_target(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.check_cache()
archive = self.repository_location + '::test0'
self.cmd('create', archive, 'input')
self.check_cache()
original_archive = self.cmd('list', self.repository_location)
self.cmd('recreate', archive, 'input/dir2', '-e', 'input/dir2/file3', '--target=new-archive')
self.check_cache()
archives = self.cmd('list', self.repository_location)
assert original_archive in archives
assert 'new-archive' in archives
archive = self.repository_location + '::new-archive'
listing = self.cmd('list', '--short', archive)
assert 'file1' not in listing
assert 'dir2/file2' in listing
assert 'dir2/file3' not in listing
def test_recreate_basic(self):
self.create_test_files()
self.create_regular_file('dir2/file3', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
archive = self.repository_location + '::test0'
self.cmd('create', archive, 'input')
self.cmd('recreate', archive, 'input/dir2', '-e', 'input/dir2/file3')
self.check_cache()
listing = self.cmd('list', '--short', archive)
assert 'file1' not in listing
assert 'dir2/file2' in listing
assert 'dir2/file3' not in listing
@pytest.mark.skipif(not are_hardlinks_supported(), reason='hardlinks not supported')
def test_recreate_subtree_hardlinks(self):
# This is essentially the same problem set as in test_extract_hardlinks
self._extract_hardlinks_setup()
self.cmd('create', self.repository_location + '::test2', 'input')
self.cmd('recreate', self.repository_location + '::test', 'input/dir1')
self.check_cache()
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
assert os.stat('input/dir1/hardlink').st_nlink == 2
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2
assert os.stat('input/dir1/aaaa').st_nlink == 2
assert os.stat('input/dir1/source2').st_nlink == 2
with changedir('output'):
self.cmd('extract', self.repository_location + '::test2')
assert os.stat('input/dir1/hardlink').st_nlink == 4
def test_recreate_rechunkify(self):
with open(os.path.join(self.input_path, 'large_file'), 'wb') as fd:
fd.write(b'a' * 280)
fd.write(b'b' * 280)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', '--chunker-params', '7,9,8,128', self.repository_location + '::test1', 'input')
self.cmd('create', self.repository_location + '::test2', 'input', '--files-cache=disabled')
list = self.cmd('list', self.repository_location + '::test1', 'input/large_file',
'--format', '{num_chunks} {unique_chunks}')
num_chunks, unique_chunks = map(int, list.split(' '))
# test1 and test2 do not deduplicate
assert num_chunks == unique_chunks
self.cmd('recreate', self.repository_location, '--chunker-params', 'default')
self.check_cache()
# test1 and test2 do deduplicate after recreate
assert int(self.cmd('list', self.repository_location + '::test1', 'input/large_file', '--format={size}'))
assert not int(self.cmd('list', self.repository_location + '::test1', 'input/large_file',
'--format', '{unique_chunks}'))
def test_recreate_recompress(self):
self.create_regular_file('compressible', size=10000)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input', '-C', 'none')
file_list = self.cmd('list', self.repository_location + '::test', 'input/compressible',
'--format', '{size} {csize} {sha256}')
size, csize, sha256_before = file_list.split(' ')
assert int(csize) >= int(size) # >= due to metadata overhead
self.cmd('recreate', self.repository_location, '-C', 'lz4', '--recompress')
self.check_cache()
file_list = self.cmd('list', self.repository_location + '::test', 'input/compressible',
'--format', '{size} {csize} {sha256}')
size, csize, sha256_after = file_list.split(' ')
assert int(csize) < int(size)
assert sha256_before == sha256_after
def test_recreate_timestamp(self):
local_timezone = datetime.now(timezone(timedelta(0))).astimezone().tzinfo
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
archive = self.repository_location + '::test0'
self.cmd('create', archive, 'input')
self.cmd('recreate', '--timestamp', "1970-01-02T00:00:00", '--comment',
'test', archive)
info = self.cmd('info', archive).splitlines()
dtime = datetime(1970, 1, 2) + local_timezone.utcoffset(None)
s_time = dtime.strftime("%Y-%m-%d")
assert any([re.search(r'Time \(start\).+ %s' % s_time, item) for item in info])
assert any([re.search(r'Time \(end\).+ %s' % s_time, item) for item in info])
def test_recreate_dry_run(self):
self.create_regular_file('compressible', size=10000)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
archives_before = self.cmd('list', self.repository_location + '::test')
self.cmd('recreate', self.repository_location, '-n', '-e', 'input/compressible')
self.check_cache()
archives_after = self.cmd('list', self.repository_location + '::test')
assert archives_after == archives_before
def test_recreate_skips_nothing_to_do(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
info_before = self.cmd('info', self.repository_location + '::test')
self.cmd('recreate', self.repository_location, '--chunker-params', 'default')
self.check_cache()
info_after = self.cmd('info', self.repository_location + '::test')
assert info_before == info_after # includes archive ID
def test_with_lock(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
lock_path = os.path.join(self.repository_path, 'lock.exclusive')
cmd = 'python3', '-c', 'import os, sys; sys.exit(42 if os.path.exists("%s") else 23)' % lock_path
self.cmd('with-lock', self.repository_location, *cmd, fork=True, exit_code=42)
def test_recreate_list_output(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=0)
self.create_regular_file('file2', size=0)
self.create_regular_file('file3', size=0)
self.create_regular_file('file4', size=0)
self.create_regular_file('file5', size=0)
self.cmd('create', self.repository_location + '::test', 'input')
output = self.cmd('recreate', '--list', '--info', self.repository_location + '::test', '-e', 'input/file2')
self.check_cache()
self.assert_in("input/file1", output)
self.assert_in("x input/file2", output)
output = self.cmd('recreate', '--list', self.repository_location + '::test', '-e', 'input/file3')
self.check_cache()
self.assert_in("input/file1", output)
self.assert_in("x input/file3", output)
output = self.cmd('recreate', self.repository_location + '::test', '-e', 'input/file4')
self.check_cache()
self.assert_not_in("input/file1", output)
self.assert_not_in("x input/file4", output)
output = self.cmd('recreate', '--info', self.repository_location + '::test', '-e', 'input/file5')
self.check_cache()
self.assert_not_in("input/file1", output)
self.assert_not_in("x input/file5", output)
def test_bad_filters(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('delete', '--first', '1', '--last', '1', self.repository_location, fork=True, exit_code=2)
def test_key_export_keyfile(self):
export_file = self.output_path + '/exported'
self.cmd('init', self.repository_location, '--encryption', 'keyfile')
repo_id = self._extract_repository_id(self.repository_path)
self.cmd('key', 'export', self.repository_location, export_file)
with open(export_file, 'r') as fd:
export_contents = fd.read()
assert export_contents.startswith('BORG_KEY ' + bin_to_hex(repo_id) + '\n')
key_file = self.keys_path + '/' + os.listdir(self.keys_path)[0]
with open(key_file, 'r') as fd:
key_contents = fd.read()
assert key_contents == export_contents
os.unlink(key_file)
self.cmd('key', 'import', self.repository_location, export_file)
with open(key_file, 'r') as fd:
key_contents2 = fd.read()
assert key_contents2 == key_contents
def test_key_import_keyfile_with_borg_key_file(self):
self.cmd('init', self.repository_location, '--encryption', 'keyfile')
exported_key_file = os.path.join(self.output_path, 'exported')
self.cmd('key', 'export', self.repository_location, exported_key_file)
key_file = os.path.join(self.keys_path, os.listdir(self.keys_path)[0])
with open(key_file, 'r') as fd:
key_contents = fd.read()
os.unlink(key_file)
imported_key_file = os.path.join(self.output_path, 'imported')
with environment_variable(BORG_KEY_FILE=imported_key_file):
self.cmd('key', 'import', self.repository_location, exported_key_file)
assert not os.path.isfile(key_file), '"borg key import" should respect BORG_KEY_FILE'
with open(imported_key_file, 'r') as fd:
imported_key_contents = fd.read()
assert imported_key_contents == key_contents
def test_key_export_repokey(self):
export_file = self.output_path + '/exported'
self.cmd('init', self.repository_location, '--encryption', 'repokey')
repo_id = self._extract_repository_id(self.repository_path)
self.cmd('key', 'export', self.repository_location, export_file)
with open(export_file, 'r') as fd:
export_contents = fd.read()
assert export_contents.startswith('BORG_KEY ' + bin_to_hex(repo_id) + '\n')
with Repository(self.repository_path) as repository:
repo_key = RepoKey(repository)
repo_key.load(None, Passphrase.env_passphrase())
backup_key = KeyfileKey(key.TestKey.MockRepository())
backup_key.load(export_file, Passphrase.env_passphrase())
assert repo_key.enc_key == backup_key.enc_key
with Repository(self.repository_path) as repository:
repository.save_key(b'')
self.cmd('key', 'import', self.repository_location, export_file)
with Repository(self.repository_path) as repository:
repo_key2 = RepoKey(repository)
repo_key2.load(None, Passphrase.env_passphrase())
assert repo_key2.enc_key == repo_key2.enc_key
def test_key_export_qr(self):
export_file = self.output_path + '/exported.html'
self.cmd('init', self.repository_location, '--encryption', 'repokey')
repo_id = self._extract_repository_id(self.repository_path)
self.cmd('key', 'export', '--qr-html', self.repository_location, export_file)
with open(export_file, 'r', encoding='utf-8') as fd:
export_contents = fd.read()
assert bin_to_hex(repo_id) in export_contents
assert export_contents.startswith('<!doctype html>')
assert export_contents.endswith('</html>\n')
def test_key_export_directory(self):
export_directory = self.output_path + '/exported'
os.mkdir(export_directory)
self.cmd('init', self.repository_location, '--encryption', 'repokey')
self.cmd('key', 'export', self.repository_location, export_directory, exit_code=EXIT_ERROR)
def test_key_import_errors(self):
export_file = self.output_path + '/exported'
self.cmd('init', self.repository_location, '--encryption', 'keyfile')
self.cmd('key', 'import', self.repository_location, export_file, exit_code=EXIT_ERROR)
with open(export_file, 'w') as fd:
fd.write('something not a key\n')
if self.FORK_DEFAULT:
self.cmd('key', 'import', self.repository_location, export_file, exit_code=2)
else:
with pytest.raises(NotABorgKeyFile):
self.cmd('key', 'import', self.repository_location, export_file)
with open(export_file, 'w') as fd:
fd.write('BORG_KEY a0a0a0\n')
if self.FORK_DEFAULT:
self.cmd('key', 'import', self.repository_location, export_file, exit_code=2)
else:
with pytest.raises(RepoIdMismatch):
self.cmd('key', 'import', self.repository_location, export_file)
def test_key_export_paperkey(self):
repo_id = 'e294423506da4e1ea76e8dcdf1a3919624ae3ae496fddf905610c351d3f09239'
export_file = self.output_path + '/exported'
self.cmd('init', self.repository_location, '--encryption', 'keyfile')
self._set_repository_id(self.repository_path, unhexlify(repo_id))
key_file = self.keys_path + '/' + os.listdir(self.keys_path)[0]
with open(key_file, 'w') as fd:
fd.write(KeyfileKey.FILE_ID + ' ' + repo_id + '\n')
fd.write(b2a_base64(b'abcdefghijklmnopqrstu').decode())
self.cmd('key', 'export', '--paper', self.repository_location, export_file)
with open(export_file, 'r') as fd:
export_contents = fd.read()
assert export_contents == """To restore key use borg key import --paper /path/to/repo
BORG PAPER KEY v1
id: 2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02
1: 616263 646566 676869 6a6b6c 6d6e6f 707172 - 6d
2: 737475 - 88
"""
def test_key_import_paperkey(self):
repo_id = 'e294423506da4e1ea76e8dcdf1a3919624ae3ae496fddf905610c351d3f09239'
self.cmd('init', self.repository_location, '--encryption', 'keyfile')
self._set_repository_id(self.repository_path, unhexlify(repo_id))
key_file = self.keys_path + '/' + os.listdir(self.keys_path)[0]
with open(key_file, 'w') as fd:
fd.write(KeyfileKey.FILE_ID + ' ' + repo_id + '\n')
fd.write(b2a_base64(b'abcdefghijklmnopqrstu').decode())
typed_input = (
b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 02\n' # Forgot to type "-"
b'2 / e29442 3506da 4e1ea7 25f62a 5a3d41 - 02\n' # Forgot to type second "/"
b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d42 - 02\n' # Typo (..42 not ..41)
b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02\n' # Correct! Congratulations
b'616263 646566 676869 6a6b6c 6d6e6f 707172 - 6d\n'
b'\n\n' # Abort [yN] => N
b'737475 88\n' # missing "-"
b'73747i - 88\n' # typo
b'73747 - 88\n' # missing nibble
b'73 74 75 - 89\n' # line checksum mismatch
b'00a1 - 88\n' # line hash collision - overall hash mismatch, have to start over
b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02\n'
b'616263 646566 676869 6a6b6c 6d6e6f 707172 - 6d\n'
b'73 74 75 - 88\n'
)
# In case that this has to change, here is a quick way to find a colliding line hash:
#
# from hashlib import sha256
# hash_fn = lambda x: sha256(b'\x00\x02' + x).hexdigest()[:2]
# for i in range(1000):
# if hash_fn(i.to_bytes(2, byteorder='big')) == '88': # 88 = line hash
# print(i.to_bytes(2, 'big'))
# break
self.cmd('key', 'import', '--paper', self.repository_location, input=typed_input)
# Test abort paths
typed_input = b'\ny\n'
self.cmd('key', 'import', '--paper', self.repository_location, input=typed_input)
typed_input = b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02\n\ny\n'
self.cmd('key', 'import', '--paper', self.repository_location, input=typed_input)
def test_debug_dump_manifest(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
dump_file = self.output_path + '/dump'
output = self.cmd('debug', 'dump-manifest', self.repository_location, dump_file)
assert output == ""
with open(dump_file, "r") as f:
result = json.load(f)
assert 'archives' in result
assert 'config' in result
assert 'item_keys' in result
assert 'timestamp' in result
assert 'version' in result
def test_debug_dump_archive(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
dump_file = self.output_path + '/dump'
output = self.cmd('debug', 'dump-archive', self.repository_location + "::test", dump_file)
assert output == ""
with open(dump_file, "r") as f:
result = json.load(f)
assert '_name' in result
assert '_manifest_entry' in result
assert '_meta' in result
assert '_items' in result
def test_debug_refcount_obj(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('debug', 'refcount-obj', self.repository_location, '0' * 64).strip()
assert output == 'object 0000000000000000000000000000000000000000000000000000000000000000 not found [info from chunks cache].'
create_json = json.loads(self.cmd('create', '--json', self.repository_location + '::test', 'input'))
archive_id = create_json['archive']['id']
output = self.cmd('debug', 'refcount-obj', self.repository_location, archive_id).strip()
assert output == 'object ' + archive_id + ' has 1 referrers [info from chunks cache].'
# Invalid IDs do not abort or return an error
output = self.cmd('debug', 'refcount-obj', self.repository_location, '124', 'xyza').strip()
assert output == 'object id 124 is invalid.\nobject id xyza is invalid.'
def test_debug_info(self):
output = self.cmd('debug', 'info')
assert 'CRC implementation' in output
assert 'Python' in output
def test_benchmark_crud(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
with environment_variable(_BORG_BENCHMARK_CRUD_TEST='YES'):
self.cmd('benchmark', 'crud', self.repository_location, self.input_path)
def test_config(self):
self.create_test_files()
os.unlink('input/flagfile')
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('config', '--list', self.repository_location)
self.assert_in('[repository]', output)
self.assert_in('version', output)
self.assert_in('segments_per_dir', output)
self.assert_in('storage_quota', output)
self.assert_in('append_only', output)
self.assert_in('additional_free_space', output)
self.assert_in('id', output)
self.assert_not_in('last_segment_checked', output)
output = self.cmd('config', self.repository_location, 'last_segment_checked', exit_code=1)
self.assert_in('No option ', output)
self.cmd('config', self.repository_location, 'last_segment_checked', '123')
output = self.cmd('config', self.repository_location, 'last_segment_checked')
assert output == '123' + '\n'
output = self.cmd('config', '--list', self.repository_location)
self.assert_in('last_segment_checked', output)
self.cmd('config', '--delete', self.repository_location, 'last_segment_checked')
for cfg_key, cfg_value in [
('additional_free_space', '2G'),
('repository.append_only', '1'),
]:
output = self.cmd('config', self.repository_location, cfg_key)
assert output == '0' + '\n'
self.cmd('config', self.repository_location, cfg_key, cfg_value)
output = self.cmd('config', self.repository_location, cfg_key)
assert output == cfg_value + '\n'
self.cmd('config', '--delete', self.repository_location, cfg_key)
self.cmd('config', self.repository_location, cfg_key, exit_code=1)
self.cmd('config', '--list', '--delete', self.repository_location, exit_code=2)
self.cmd('config', self.repository_location, exit_code=2)
self.cmd('config', self.repository_location, 'invalid-option', exit_code=1)
requires_gnutar = pytest.mark.skipif(not have_gnutar(), reason='GNU tar must be installed for this test.')
requires_gzip = pytest.mark.skipif(not shutil.which('gzip'), reason='gzip must be installed for this test.')
@requires_gnutar
def test_export_tar(self):
self.create_test_files()
os.unlink('input/flagfile')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('export-tar', self.repository_location + '::test', 'simple.tar', '--progress')
with changedir('output'):
# This probably assumes GNU tar. Note -p switch to extract permissions regardless of umask.
subprocess.check_call(['tar', 'xpf', '../simple.tar', '--warning=no-timestamp'])
self.assert_dirs_equal('input', 'output/input', ignore_flags=True, ignore_xattrs=True, ignore_ns=True)
@requires_gnutar
@requires_gzip
def test_export_tar_gz(self):
if not shutil.which('gzip'):
pytest.skip('gzip is not installed')
self.create_test_files()
os.unlink('input/flagfile')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
list = self.cmd('export-tar', self.repository_location + '::test', 'simple.tar.gz', '--list')
assert 'input/file1\n' in list
assert 'input/dir2\n' in list
with changedir('output'):
subprocess.check_call(['tar', 'xpf', '../simple.tar.gz', '--warning=no-timestamp'])
self.assert_dirs_equal('input', 'output/input', ignore_flags=True, ignore_xattrs=True, ignore_ns=True)
@requires_gnutar
def test_export_tar_strip_components(self):
if not shutil.which('gzip'):
pytest.skip('gzip is not installed')
self.create_test_files()
os.unlink('input/flagfile')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
list = self.cmd('export-tar', self.repository_location + '::test', 'simple.tar', '--strip-components=1', '--list')
# --list's path are those before processing with --strip-components
assert 'input/file1\n' in list
assert 'input/dir2\n' in list
with changedir('output'):
subprocess.check_call(['tar', 'xpf', '../simple.tar', '--warning=no-timestamp'])
self.assert_dirs_equal('input', 'output/', ignore_flags=True, ignore_xattrs=True, ignore_ns=True)
@requires_hardlinks
@requires_gnutar
def test_export_tar_strip_components_links(self):
self._extract_hardlinks_setup()
self.cmd('export-tar', self.repository_location + '::test', 'output.tar', '--strip-components=2')
with changedir('output'):
subprocess.check_call(['tar', 'xpf', '../output.tar', '--warning=no-timestamp'])
assert os.stat('hardlink').st_nlink == 2
assert os.stat('subdir/hardlink').st_nlink == 2
assert os.stat('aaaa').st_nlink == 2
assert os.stat('source2').st_nlink == 2
@requires_hardlinks
@requires_gnutar
def test_extract_hardlinks_tar(self):
self._extract_hardlinks_setup()
self.cmd('export-tar', self.repository_location + '::test', 'output.tar', 'input/dir1')
with changedir('output'):
subprocess.check_call(['tar', 'xpf', '../output.tar', '--warning=no-timestamp'])
assert os.stat('input/dir1/hardlink').st_nlink == 2
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2
assert os.stat('input/dir1/aaaa').st_nlink == 2
assert os.stat('input/dir1/source2').st_nlink == 2
def test_detect_attic_repo(self):
path = make_attic_repo(self.repository_path)
cmds = [
['create', path + '::test', self.tmpdir],
['extract', path + '::test'],
['check', path],
['rename', path + '::test', 'newname'],
['list', path],
['delete', path],
['prune', path],
['info', path + '::test'],
['key', 'export', path, 'exported'],
['key', 'import', path, 'import'],
['key', 'change-passphrase', path],
['break-lock', path],
]
for args in cmds:
output = self.cmd(*args, fork=True, exit_code=2)
assert 'Attic repository detected.' in output
@unittest.skipUnless('binary' in BORG_EXES, 'no borg.exe available')
class ArchiverTestCaseBinary(ArchiverTestCase):
EXE = 'borg.exe'
FORK_DEFAULT = True
@unittest.skip('does not raise Exception, but sets rc==2')
def test_init_parent_dirs(self):
pass
@unittest.skip('patches objects')
def test_init_interrupt(self):
pass
@unittest.skip('patches objects')
def test_extract_capabilities(self):
pass
@unittest.skip('patches objects')
def test_extract_xattrs_errors(self):
pass
@unittest.skip('test_basic_functionality seems incompatible with fakeroot and/or the binary.')
def test_basic_functionality(self):
pass
@unittest.skip('test_overwrite seems incompatible with fakeroot and/or the binary.')
def test_overwrite(self):
pass
def test_fuse(self):
if fakeroot_detected():
unittest.skip('test_fuse with the binary is not compatible with fakeroot')
else:
super().test_fuse()
class ArchiverCheckTestCase(ArchiverTestCaseBase):
def setUp(self):
super().setUp()
with patch.object(ChunkBuffer, 'BUFFER_SIZE', 10):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive1')
self.create_src_archive('archive2')
def test_check_usage(self):
output = self.cmd('check', '-v', '--progress', self.repository_location, exit_code=0)
self.assert_in('Starting repository check', output)
self.assert_in('Starting archive consistency check', output)
self.assert_in('Checking segments', output)
# reset logging to new process default to avoid need for fork=True on next check
logging.getLogger('borg.output.progress').setLevel(logging.NOTSET)
output = self.cmd('check', '-v', '--repository-only', self.repository_location, exit_code=0)
self.assert_in('Starting repository check', output)
self.assert_not_in('Starting archive consistency check', output)
self.assert_not_in('Checking segments', output)
output = self.cmd('check', '-v', '--archives-only', self.repository_location, exit_code=0)
self.assert_not_in('Starting repository check', output)
self.assert_in('Starting archive consistency check', output)
output = self.cmd('check', '-v', '--archives-only', '--prefix=archive2', self.repository_location, exit_code=0)
self.assert_not_in('archive1', output)
output = self.cmd('check', '-v', '--archives-only', '--first=1', self.repository_location, exit_code=0)
self.assert_in('archive1', output)
self.assert_not_in('archive2', output)
output = self.cmd('check', '-v', '--archives-only', '--last=1', self.repository_location, exit_code=0)
self.assert_not_in('archive1', output)
self.assert_in('archive2', output)
def test_missing_file_chunk(self):
archive, repository = self.open_archive('archive1')
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
valid_chunks = item.chunks
killed_chunk = valid_chunks[-1]
repository.delete(killed_chunk.id)
break
else:
self.fail('should not happen')
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
output = self.cmd('check', '--repair', self.repository_location, exit_code=0)
self.assert_in('New missing file chunk detected', output)
self.cmd('check', self.repository_location, exit_code=0)
output = self.cmd('list', '--format={health}#{path}{LF}', self.repository_location + '::archive1', exit_code=0)
self.assert_in('broken#', output)
# check that the file in the old archives has now a different chunk list without the killed chunk
for archive_name in ('archive1', 'archive2'):
archive, repository = self.open_archive(archive_name)
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
self.assert_not_equal(valid_chunks, item.chunks)
self.assert_not_in(killed_chunk, item.chunks)
break
else:
self.fail('should not happen')
# do a fresh backup (that will include the killed chunk)
with patch.object(ChunkBuffer, 'BUFFER_SIZE', 10):
self.create_src_archive('archive3')
# check should be able to heal the file now:
output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0)
self.assert_in('Healed previously missing file chunk', output)
self.assert_in('testsuite/archiver.py: Completely healed previously damaged file!', output)
# check that the file in the old archives has the correct chunks again
for archive_name in ('archive1', 'archive2'):
archive, repository = self.open_archive(archive_name)
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
self.assert_equal(valid_chunks, item.chunks)
break
else:
self.fail('should not happen')
# list is also all-healthy again
output = self.cmd('list', '--format={health}#{path}{LF}', self.repository_location + '::archive1', exit_code=0)
self.assert_not_in('broken#', output)
def test_missing_archive_item_chunk(self):
archive, repository = self.open_archive('archive1')
with repository:
repository.delete(archive.metadata.items[0])
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
self.cmd('check', self.repository_location, exit_code=0)
def test_missing_archive_metadata(self):
archive, repository = self.open_archive('archive1')
with repository:
repository.delete(archive.id)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
self.cmd('check', self.repository_location, exit_code=0)
def test_missing_manifest(self):
archive, repository = self.open_archive('archive1')
with repository:
repository.delete(Manifest.MANIFEST_ID)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0)
self.assert_in('archive1', output)
self.assert_in('archive2', output)
self.cmd('check', self.repository_location, exit_code=0)
def test_corrupted_manifest(self):
archive, repository = self.open_archive('archive1')
with repository:
manifest = repository.get(Manifest.MANIFEST_ID)
corrupted_manifest = manifest + b'corrupted!'
repository.put(Manifest.MANIFEST_ID, corrupted_manifest)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0)
self.assert_in('archive1', output)
self.assert_in('archive2', output)
self.cmd('check', self.repository_location, exit_code=0)
def test_manifest_rebuild_corrupted_chunk(self):
archive, repository = self.open_archive('archive1')
with repository:
manifest = repository.get(Manifest.MANIFEST_ID)
corrupted_manifest = manifest + b'corrupted!'
repository.put(Manifest.MANIFEST_ID, corrupted_manifest)
chunk = repository.get(archive.id)
corrupted_chunk = chunk + b'corrupted!'
repository.put(archive.id, corrupted_chunk)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0)
self.assert_in('archive2', output)
self.cmd('check', self.repository_location, exit_code=0)
def test_manifest_rebuild_duplicate_archive(self):
archive, repository = self.open_archive('archive1')
key = archive.key
with repository:
manifest = repository.get(Manifest.MANIFEST_ID)
corrupted_manifest = manifest + b'corrupted!'
repository.put(Manifest.MANIFEST_ID, corrupted_manifest)
archive = msgpack.packb({
'cmdline': [],
'items': [],
'hostname': 'foo',
'username': 'bar',
'name': 'archive1',
'time': '2016-12-15T18:49:51.849711',
'version': 1,
})
archive_id = key.id_hash(archive)
repository.put(archive_id, key.encrypt(archive))
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
output = self.cmd('list', self.repository_location)
self.assert_in('archive1', output)
self.assert_in('archive1.1', output)
self.assert_in('archive2', output)
def test_extra_chunks(self):
self.cmd('check', self.repository_location, exit_code=0)
with Repository(self.repository_location, exclusive=True) as repository:
repository.put(b'01234567890123456789012345678901', b'xxxx')
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
self.cmd('check', self.repository_location, exit_code=0)
self.cmd('extract', '--dry-run', self.repository_location + '::archive1', exit_code=0)
def _test_verify_data(self, *init_args):
shutil.rmtree(self.repository_path)
self.cmd('init', self.repository_location, *init_args)
self.create_src_archive('archive1')
archive, repository = self.open_archive('archive1')
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
chunk = item.chunks[-1]
data = repository.get(chunk.id) + b'1234'
repository.put(chunk.id, data)
break
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=0)
output = self.cmd('check', '--verify-data', self.repository_location, exit_code=1)
assert bin_to_hex(chunk.id) + ', integrity error' in output
# repair (heal is tested in another test)
output = self.cmd('check', '--repair', '--verify-data', self.repository_location, exit_code=0)
assert bin_to_hex(chunk.id) + ', integrity error' in output
assert 'testsuite/archiver.py: New missing file chunk detected' in output
def test_verify_data(self):
self._test_verify_data('--encryption', 'repokey')
def test_verify_data_unencrypted(self):
self._test_verify_data('--encryption', 'none')
def test_empty_repository(self):
with Repository(self.repository_location, exclusive=True) as repository:
for id_ in repository.list():
repository.delete(id_)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
def test_attic013_acl_bug(self):
# Attic up to release 0.13 contained a bug where every item unintentionally received
# a b'acl'=None key-value pair.
# This bug can still live on in Borg repositories (through borg upgrade).
class Attic013Item:
def as_dict(self):
return {
# These are required
b'path': '1234',
b'mtime': 0,
b'mode': 0,
b'user': b'0',
b'group': b'0',
b'uid': 0,
b'gid': 0,
# acl is the offending key.
b'acl': None,
}
archive, repository = self.open_archive('archive1')
with repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest) as cache:
archive = Archive(repository, key, manifest, '0.13', cache=cache, create=True)
archive.items_buffer.add(Attic013Item())
archive.save()
self.cmd('check', self.repository_location, exit_code=0)
self.cmd('list', self.repository_location + '::0.13', exit_code=0)
class ManifestAuthenticationTest(ArchiverTestCaseBase):
def spoof_manifest(self, repository):
with repository:
_, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
repository.put(Manifest.MANIFEST_ID, key.encrypt(msgpack.packb({
'version': 1,
'archives': {},
'config': {},
'timestamp': (datetime.utcnow() + timedelta(days=1)).strftime(ISO_FORMAT),
})))
repository.commit(compact=False)
def test_fresh_init_tam_required(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
repository = Repository(self.repository_path, exclusive=True)
with repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
repository.put(Manifest.MANIFEST_ID, key.encrypt(msgpack.packb({
'version': 1,
'archives': {},
'timestamp': (datetime.utcnow() + timedelta(days=1)).strftime(ISO_FORMAT),
})))
repository.commit(compact=False)
with pytest.raises(TAMRequiredError):
self.cmd('list', self.repository_location)
def test_not_required(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive1234')
repository = Repository(self.repository_path, exclusive=True)
with repository:
shutil.rmtree(get_security_dir(bin_to_hex(repository.id)))
_, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
key.tam_required = False
key.change_passphrase(key._passphrase)
manifest = msgpack.unpackb(key.decrypt(None, repository.get(Manifest.MANIFEST_ID)))
del manifest[b'tam']
repository.put(Manifest.MANIFEST_ID, key.encrypt(msgpack.packb(manifest)))
repository.commit(compact=False)
output = self.cmd('list', '--debug', self.repository_location)
assert 'archive1234' in output
assert 'TAM not found and not required' in output
# Run upgrade
self.cmd('upgrade', '--tam', self.repository_location)
# Manifest must be authenticated now
output = self.cmd('list', '--debug', self.repository_location)
assert 'archive1234' in output
assert 'TAM-verified manifest' in output
# Try to spoof / modify pre-1.0.9
self.spoof_manifest(repository)
# Fails
with pytest.raises(TAMRequiredError):
self.cmd('list', self.repository_location)
# Force upgrade
self.cmd('upgrade', '--tam', '--force', self.repository_location)
self.cmd('list', self.repository_location)
def test_disable(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive1234')
self.cmd('upgrade', '--disable-tam', self.repository_location)
repository = Repository(self.repository_path, exclusive=True)
self.spoof_manifest(repository)
assert not self.cmd('list', self.repository_location)
def test_disable2(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive1234')
repository = Repository(self.repository_path, exclusive=True)
self.spoof_manifest(repository)
self.cmd('upgrade', '--disable-tam', self.repository_location)
assert not self.cmd('list', self.repository_location)
class RemoteArchiverTestCase(ArchiverTestCase):
prefix = '__testsuite__:'
def open_repository(self):
return RemoteRepository(Location(self.repository_location))
def test_remote_repo_restrict_to_path(self):
# restricted to repo directory itself:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', self.repository_path]):
self.cmd('init', '--encryption=repokey', self.repository_location)
# restricted to repo directory itself, fail for other directories with same prefix:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', self.repository_path]):
with pytest.raises(PathNotAllowed):
self.cmd('init', '--encryption=repokey', self.repository_location + '_0')
# restricted to a completely different path:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', '/foo']):
with pytest.raises(PathNotAllowed):
self.cmd('init', '--encryption=repokey', self.repository_location + '_1')
path_prefix = os.path.dirname(self.repository_path)
# restrict to repo directory's parent directory:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', path_prefix]):
self.cmd('init', '--encryption=repokey', self.repository_location + '_2')
# restrict to repo directory's parent directory and another directory:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', '/foo', '--restrict-to-path', path_prefix]):
self.cmd('init', '--encryption=repokey', self.repository_location + '_3')
def test_remote_repo_restrict_to_repository(self):
# restricted to repo directory itself:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-repository', self.repository_path]):
self.cmd('init', '--encryption=repokey', self.repository_location)
parent_path = os.path.join(self.repository_path, '..')
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-repository', parent_path]):
with pytest.raises(PathNotAllowed):
self.cmd('init', '--encryption=repokey', self.repository_location)
@unittest.skip('only works locally')
def test_debug_put_get_delete_obj(self):
pass
@unittest.skip('only works locally')
def test_config(self):
pass
@unittest.skip('only works locally')
def test_migrate_lock_alive(self):
pass
def test_strip_components_doesnt_leak(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('dir/file', contents=b"test file contents 1")
self.create_regular_file('dir/file2', contents=b"test file contents 2")
self.create_regular_file('skipped-file1', contents=b"test file contents 3")
self.create_regular_file('skipped-file2', contents=b"test file contents 4")
self.create_regular_file('skipped-file3', contents=b"test file contents 5")
self.cmd('create', self.repository_location + '::test', 'input')
marker = 'cached responses left in RemoteRepository'
with changedir('output'):
res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '3')
self.assert_true(marker not in res)
with self.assert_creates_file('file'):
res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '2')
self.assert_true(marker not in res)
with self.assert_creates_file('dir/file'):
res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '1')
self.assert_true(marker not in res)
with self.assert_creates_file('input/dir/file'):
res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '0')
self.assert_true(marker not in res)
class ArchiverCorruptionTestCase(ArchiverTestCaseBase):
def setUp(self):
super().setUp()
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cache_path = json.loads(self.cmd('info', self.repository_location, '--json'))['cache']['path']
def corrupt(self, file, amount=1):
with open(file, 'r+b') as fd:
fd.seek(-amount, io.SEEK_END)
corrupted = bytes(255-c for c in fd.read(amount))
fd.seek(-amount, io.SEEK_END)
fd.write(corrupted)
def test_cache_chunks(self):
self.corrupt(os.path.join(self.cache_path, 'chunks'))
if self.FORK_DEFAULT:
out = self.cmd('info', self.repository_location, exit_code=2)
assert 'failed integrity check' in out
else:
with pytest.raises(FileIntegrityError):
self.cmd('info', self.repository_location)
def test_cache_files(self):
self.cmd('create', self.repository_location + '::test', 'input')
self.corrupt(os.path.join(self.cache_path, 'files'))
out = self.cmd('create', self.repository_location + '::test1', 'input')
# borg warns about the corrupt files cache, but then continues without files cache.
assert 'files cache is corrupted' in out
def test_chunks_archive(self):
self.cmd('create', self.repository_location + '::test1', 'input')
# Find ID of test1 so we can corrupt it later :)
target_id = self.cmd('list', self.repository_location, '--format={id}{LF}').strip()
self.cmd('create', self.repository_location + '::test2', 'input')
# Force cache sync, creating archive chunks of test1 and test2 in chunks.archive.d
self.cmd('delete', '--cache-only', self.repository_location)
self.cmd('info', self.repository_location, '--json')
chunks_archive = os.path.join(self.cache_path, 'chunks.archive.d')
assert len(os.listdir(chunks_archive)) == 4 # two archives, one chunks cache and one .integrity file each
self.corrupt(os.path.join(chunks_archive, target_id + '.compact'))
# Trigger cache sync by changing the manifest ID in the cache config
config_path = os.path.join(self.cache_path, 'config')
config = ConfigParser(interpolation=None)
config.read(config_path)
config.set('cache', 'manifest', bin_to_hex(bytes(32)))
with open(config_path, 'w') as fd:
config.write(fd)
# Cache sync notices corrupted archive chunks, but automatically recovers.
out = self.cmd('create', '-v', self.repository_location + '::test3', 'input', exit_code=1)
assert 'Reading cached archive chunk index for test1' in out
assert 'Cached archive chunk index of test1 is corrupted' in out
assert 'Fetching and building archive index for test1' in out
def test_old_version_interfered(self):
# Modify the main manifest ID without touching the manifest ID in the integrity section.
# This happens if a version without integrity checking modifies the cache.
config_path = os.path.join(self.cache_path, 'config')
config = ConfigParser(interpolation=None)
config.read(config_path)
config.set('cache', 'manifest', bin_to_hex(bytes(32)))
with open(config_path, 'w') as fd:
config.write(fd)
out = self.cmd('info', self.repository_location)
assert 'Cache integrity data not available: old Borg version modified the cache.' in out
class DiffArchiverTestCase(ArchiverTestCaseBase):
def test_basic_functionality(self):
# Setup files for the first snapshot
self.create_regular_file('empty', size=0)
self.create_regular_file('file_unchanged', size=128)
self.create_regular_file('file_removed', size=256)
self.create_regular_file('file_removed2', size=512)
self.create_regular_file('file_replaced', size=1024)
os.mkdir('input/dir_replaced_with_file')
os.chmod('input/dir_replaced_with_file', stat.S_IFDIR | 0o755)
os.mkdir('input/dir_removed')
if are_symlinks_supported():
os.mkdir('input/dir_replaced_with_link')
os.symlink('input/dir_replaced_with_file', 'input/link_changed')
os.symlink('input/file_unchanged', 'input/link_removed')
os.symlink('input/file_removed2', 'input/link_target_removed')
os.symlink('input/empty', 'input/link_target_contents_changed')
os.symlink('input/empty', 'input/link_replaced_by_file')
if are_hardlinks_supported():
os.link('input/file_replaced', 'input/hardlink_target_replaced')
os.link('input/empty', 'input/hardlink_contents_changed')
os.link('input/file_removed', 'input/hardlink_removed')
os.link('input/file_removed2', 'input/hardlink_target_removed')
self.cmd('init', '--encryption=repokey', self.repository_location)
# Create the first snapshot
self.cmd('create', self.repository_location + '::test0', 'input')
# Setup files for the second snapshot
self.create_regular_file('file_added', size=2048)
self.create_regular_file('file_empty_added', size=0)
os.unlink('input/file_replaced')
self.create_regular_file('file_replaced', contents=b'0' * 4096)
os.unlink('input/file_removed')
os.unlink('input/file_removed2')
os.rmdir('input/dir_replaced_with_file')
self.create_regular_file('dir_replaced_with_file', size=8192)
os.chmod('input/dir_replaced_with_file', stat.S_IFREG | 0o755)
os.mkdir('input/dir_added')
os.rmdir('input/dir_removed')
if are_symlinks_supported():
os.rmdir('input/dir_replaced_with_link')
os.symlink('input/dir_added', 'input/dir_replaced_with_link')
os.unlink('input/link_changed')
os.symlink('input/dir_added', 'input/link_changed')
os.symlink('input/dir_added', 'input/link_added')
os.unlink('input/link_replaced_by_file')
self.create_regular_file('link_replaced_by_file', size=16384)
os.unlink('input/link_removed')
if are_hardlinks_supported():
os.unlink('input/hardlink_removed')
os.link('input/file_added', 'input/hardlink_added')
with open('input/empty', 'ab') as fd:
fd.write(b'appended_data')
# Create the second snapshot
self.cmd('create', self.repository_location + '::test1a', 'input')
self.cmd('create', '--chunker-params', '16,18,17,4095', self.repository_location + '::test1b', 'input')
def do_asserts(output, can_compare_ids):
# File contents changed (deleted and replaced with a new file)
change = 'B' if can_compare_ids else '{:<19}'.format('modified')
assert 'file_replaced' in output # added to debug #3494
assert '{} input/file_replaced'.format(change) in output
# File unchanged
assert 'input/file_unchanged' not in output
# Directory replaced with a regular file
if 'BORG_TESTS_IGNORE_MODES' not in os.environ:
assert '[drwxr-xr-x -> -rwxr-xr-x] input/dir_replaced_with_file' in output
# Basic directory cases
assert 'added directory input/dir_added' in output
assert 'removed directory input/dir_removed' in output
if are_symlinks_supported():
# Basic symlink cases
assert 'changed link input/link_changed' in output
assert 'added link input/link_added' in output
assert 'removed link input/link_removed' in output
# Symlink replacing or being replaced
assert '] input/dir_replaced_with_link' in output
assert '] input/link_replaced_by_file' in output
# Symlink target removed. Should not affect the symlink at all.
assert 'input/link_target_removed' not in output
# The inode has two links and the file contents changed. Borg
# should notice the changes in both links. However, the symlink
# pointing to the file is not changed.
change = '0 B' if can_compare_ids else '{:<19}'.format('modified')
assert '{} input/empty'.format(change) in output
if are_hardlinks_supported():
assert '{} input/hardlink_contents_changed'.format(change) in output
if are_symlinks_supported():
assert 'input/link_target_contents_changed' not in output
# Added a new file and a hard link to it. Both links to the same
# inode should appear as separate files.
assert 'added 2.05 kB input/file_added' in output
if are_hardlinks_supported():
assert 'added 2.05 kB input/hardlink_added' in output
# check if a diff between non-existent and empty new file is found
assert 'added 0 B input/file_empty_added' in output
# The inode has two links and both of them are deleted. They should
# appear as two deleted files.
assert 'removed 256 B input/file_removed' in output
if are_hardlinks_supported():
assert 'removed 256 B input/hardlink_removed' in output
# Another link (marked previously as the source in borg) to the
# same inode was removed. This should not change this link at all.
if are_hardlinks_supported():
assert 'input/hardlink_target_removed' not in output
# Another link (marked previously as the source in borg) to the
# same inode was replaced with a new regular file. This should not
# change this link at all.
if are_hardlinks_supported():
assert 'input/hardlink_target_replaced' not in output
def do_json_asserts(output, can_compare_ids):
def get_changes(filename, data):
chgsets = [j['changes'] for j in data if j['path'] == filename]
assert len(chgsets) < 2
# return a flattened list of changes for given filename
return [chg for chgset in chgsets for chg in chgset]
# convert output to list of dicts
joutput = [json.loads(line) for line in output.split('\n') if line]
# File contents changed (deleted and replaced with a new file)
expected = {'type': 'modified', 'added': 4096, 'removed': 1024} if can_compare_ids else {'type': 'modified'}
assert expected in get_changes('input/file_replaced', joutput)
# File unchanged
assert not any(get_changes('input/file_unchanged', joutput))
# Directory replaced with a regular file
if 'BORG_TESTS_IGNORE_MODES' not in os.environ:
assert {'type': 'mode', 'old_mode': 'drwxr-xr-x', 'new_mode': '-rwxr-xr-x'} in \
get_changes('input/dir_replaced_with_file', joutput)
# Basic directory cases
assert {'type': 'added directory'} in get_changes('input/dir_added', joutput)
assert {'type': 'removed directory'} in get_changes('input/dir_removed', joutput)
if are_symlinks_supported():
# Basic symlink cases
assert {'type': 'changed link'} in get_changes('input/link_changed', joutput)
assert {'type': 'added link'} in get_changes('input/link_added', joutput)
assert {'type': 'removed link'} in get_changes('input/link_removed', joutput)
# Symlink replacing or being replaced
assert any(chg['type'] == 'mode' and chg['new_mode'].startswith('l') for chg in
get_changes('input/dir_replaced_with_link', joutput))
assert any(chg['type'] == 'mode' and chg['old_mode'].startswith('l') for chg in
get_changes('input/link_replaced_by_file', joutput))
# Symlink target removed. Should not affect the symlink at all.
assert not any(get_changes('input/link_target_removed', joutput))
# The inode has two links and the file contents changed. Borg
# should notice the changes in both links. However, the symlink
# pointing to the file is not changed.
expected = {'type': 'modified', 'added': 13, 'removed': 0} if can_compare_ids else {'type': 'modified'}
assert expected in get_changes('input/empty', joutput)
if are_hardlinks_supported():
assert expected in get_changes('input/hardlink_contents_changed', joutput)
if are_symlinks_supported():
assert not any(get_changes('input/link_target_contents_changed', joutput))
# Added a new file and a hard link to it. Both links to the same
# inode should appear as separate files.
assert {'type': 'added', 'size': 2048} in get_changes('input/file_added', joutput)
if are_hardlinks_supported():
assert {'type': 'added', 'size': 2048} in get_changes('input/hardlink_added', joutput)
# check if a diff between non-existent and empty new file is found
assert {'type': 'added', 'size': 0} in get_changes('input/file_empty_added', joutput)
# The inode has two links and both of them are deleted. They should
# appear as two deleted files.
assert {'type': 'removed', 'size': 256} in get_changes('input/file_removed', joutput)
if are_hardlinks_supported():
assert {'type': 'removed', 'size': 256} in get_changes('input/hardlink_removed', joutput)
# Another link (marked previously as the source in borg) to the
# same inode was removed. This should not change this link at all.
if are_hardlinks_supported():
assert not any(get_changes('input/hardlink_target_removed', joutput))
# Another link (marked previously as the source in borg) to the
# same inode was replaced with a new regular file. This should not
# change this link at all.
if are_hardlinks_supported():
assert not any(get_changes('input/hardlink_target_replaced', joutput))
do_asserts(self.cmd('diff', self.repository_location + '::test0', 'test1a'), True)
# We expect exit_code=1 due to the chunker params warning
do_asserts(self.cmd('diff', self.repository_location + '::test0', 'test1b', exit_code=1), False)
do_json_asserts(self.cmd('diff', self.repository_location + '::test0', 'test1a', '--json-lines'), True)
def test_sort_option(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('a_file_removed', size=8)
self.create_regular_file('f_file_removed', size=16)
self.create_regular_file('c_file_changed', size=32)
self.create_regular_file('e_file_changed', size=64)
self.cmd('create', self.repository_location + '::test0', 'input')
os.unlink('input/a_file_removed')
os.unlink('input/f_file_removed')
os.unlink('input/c_file_changed')
os.unlink('input/e_file_changed')
self.create_regular_file('c_file_changed', size=512)
self.create_regular_file('e_file_changed', size=1024)
self.create_regular_file('b_file_added', size=128)
self.create_regular_file('d_file_added', size=256)
self.cmd('create', self.repository_location + '::test1', 'input')
output = self.cmd('diff', '--sort', self.repository_location + '::test0', 'test1')
expected = [
'a_file_removed',
'b_file_added',
'c_file_changed',
'd_file_added',
'e_file_changed',
'f_file_removed',
]
assert all(x in line for x, line in zip(expected, output.splitlines()))
def test_get_args():
archiver = Archiver()
# everything normal:
# first param is argv as produced by ssh forced command,
# second param is like from SSH_ORIGINAL_COMMAND env variable
args = archiver.get_args(['borg', 'serve', '--umask=0027', '--restrict-to-path=/p1', '--restrict-to-path=/p2', ],
'borg serve --info')
assert args.func == archiver.do_serve
assert args.restrict_to_paths == ['/p1', '/p2']
assert args.umask == 0o027
assert args.log_level == 'info'
# similar, but with --restrict-to-repository
args = archiver.get_args(['borg', 'serve', '--restrict-to-repository=/r1', '--restrict-to-repository=/r2', ],
'borg serve --info --umask=0027')
assert args.restrict_to_repositories == ['/r1', '/r2']
# trying to cheat - break out of path restriction
args = archiver.get_args(['borg', 'serve', '--restrict-to-path=/p1', '--restrict-to-path=/p2', ],
'borg serve --restrict-to-path=/')
assert args.restrict_to_paths == ['/p1', '/p2']
# trying to cheat - break out of repository restriction
args = archiver.get_args(['borg', 'serve', '--restrict-to-repository=/r1', '--restrict-to-repository=/r2', ],
'borg serve --restrict-to-repository=/')
assert args.restrict_to_repositories == ['/r1', '/r2']
# trying to cheat - break below repository restriction
args = archiver.get_args(['borg', 'serve', '--restrict-to-repository=/r1', '--restrict-to-repository=/r2', ],
'borg serve --restrict-to-repository=/r1/below')
assert args.restrict_to_repositories == ['/r1', '/r2']
# trying to cheat - try to execute different subcommand
args = archiver.get_args(['borg', 'serve', '--restrict-to-path=/p1', '--restrict-to-path=/p2', ],
'borg init --encryption=repokey /')
assert args.func == archiver.do_serve
# Check that environment variables in the forced command don't cause issues. If the command
# were not forced, environment variables would be interpreted by the shell, but this does not
# happen for forced commands - we get the verbatim command line and need to deal with env vars.
args = archiver.get_args(['borg', 'serve', ],
'BORG_FOO=bar borg serve --info')
assert args.func == archiver.do_serve
def test_chunk_content_equal():
def ccc(a, b):
chunks_a = [data for data in a]
chunks_b = [data for data in b]
compare1 = ItemDiff._chunk_content_equal(iter(chunks_a), iter(chunks_b))
compare2 = ItemDiff._chunk_content_equal(iter(chunks_b), iter(chunks_a))
assert compare1 == compare2
return compare1
assert ccc([
b'1234', b'567A', b'bC'
], [
b'1', b'23', b'4567A', b'b', b'C'
])
# one iterator exhausted before the other
assert not ccc([
b'12345',
], [
b'1234', b'56'
])
# content mismatch
assert not ccc([
b'1234', b'65'
], [
b'1234', b'56'
])
# first is the prefix of second
assert not ccc([
b'1234', b'56'
], [
b'1234', b'565'
])
class TestBuildFilter:
@staticmethod
def peek_and_store_hardlink_masters(item, matched):
pass
def test_basic(self):
matcher = PatternMatcher()
matcher.add([parse_pattern('included')], IECommand.Include)
filter = Archiver.build_filter(matcher, self.peek_and_store_hardlink_masters, 0)
assert filter(Item(path='included'))
assert filter(Item(path='included/file'))
assert not filter(Item(path='something else'))
def test_empty(self):
matcher = PatternMatcher(fallback=True)
filter = Archiver.build_filter(matcher, self.peek_and_store_hardlink_masters, 0)
assert filter(Item(path='anything'))
def test_strip_components(self):
matcher = PatternMatcher(fallback=True)
filter = Archiver.build_filter(matcher, self.peek_and_store_hardlink_masters, strip_components=1)
assert not filter(Item(path='shallow'))
assert not filter(Item(path='shallow/')) # can this even happen? paths are normalized...
assert filter(Item(path='deep enough/file'))
assert filter(Item(path='something/dir/file'))
class TestCommonOptions:
@staticmethod
def define_common_options(add_common_option):
add_common_option('-h', '--help', action='help', help='show this help message and exit')
add_common_option('--critical', dest='log_level', help='foo',
action='store_const', const='critical', default='warning')
add_common_option('--error', dest='log_level', help='foo',
action='store_const', const='error', default='warning')
add_common_option('--append', dest='append', help='foo',
action='append', metavar='TOPIC', default=[])
add_common_option('-p', '--progress', dest='progress', action='store_true', help='foo')
add_common_option('--lock-wait', dest='lock_wait', type=int, metavar='N', default=1,
help='(default: %(default)d).')
@pytest.fixture
def basic_parser(self):
parser = argparse.ArgumentParser(prog='test', description='test parser', add_help=False)
parser.common_options = Archiver.CommonOptions(self.define_common_options,
suffix_precedence=('_level0', '_level1'))
return parser
@pytest.fixture
def subparsers(self, basic_parser):
if sys.version_info >= (3, 7):
# py37 pre-release defaults to unwanted required=True, in 3.7.0+ it was fixed to =False
return basic_parser.add_subparsers(title='required arguments', metavar='<command>', required=False)
else:
# py36 does not support required=... argument (but behaves like required=False).
# note: use below call for 3.6 and 3.7 when there are no alphas/betas/RCs of 3.7.0 around any more.
return basic_parser.add_subparsers(title='required arguments', metavar='<command>')
@pytest.fixture
def parser(self, basic_parser):
basic_parser.common_options.add_common_group(basic_parser, '_level0', provide_defaults=True)
return basic_parser
@pytest.fixture
def common_parser(self, parser):
common_parser = argparse.ArgumentParser(add_help=False, prog='test')
parser.common_options.add_common_group(common_parser, '_level1')
return common_parser
@pytest.fixture
def parse_vars_from_line(self, parser, subparsers, common_parser):
subparser = subparsers.add_parser('subcommand', parents=[common_parser], add_help=False,
description='foo', epilog='bar', help='baz',
formatter_class=argparse.RawDescriptionHelpFormatter)
subparser.set_defaults(func=1234)
subparser.add_argument('--append-only', dest='append_only', action='store_true')
def parse_vars_from_line(*line):
print(line)
args = parser.parse_args(line)
parser.common_options.resolve(args)
return vars(args)
return parse_vars_from_line
def test_simple(self, parse_vars_from_line):
assert parse_vars_from_line('--error') == {
'append': [],
'lock_wait': 1,
'log_level': 'error',
'progress': False
}
assert parse_vars_from_line('--error', 'subcommand', '--critical') == {
'append': [],
'lock_wait': 1,
'log_level': 'critical',
'progress': False,
'append_only': False,
'func': 1234,
}
with pytest.raises(SystemExit):
parse_vars_from_line('--append-only', 'subcommand')
assert parse_vars_from_line('--append=foo', '--append', 'bar', 'subcommand', '--append', 'baz') == {
'append': ['foo', 'bar', 'baz'],
'lock_wait': 1,
'log_level': 'warning',
'progress': False,
'append_only': False,
'func': 1234,
}
@pytest.mark.parametrize('position', ('before', 'after', 'both'))
@pytest.mark.parametrize('flag,args_key,args_value', (
('-p', 'progress', True),
('--lock-wait=3', 'lock_wait', 3),
))
def test_flag_position_independence(self, parse_vars_from_line, position, flag, args_key, args_value):
line = []
if position in ('before', 'both'):
line.append(flag)
line.append('subcommand')
if position in ('after', 'both'):
line.append(flag)
result = {
'append': [],
'lock_wait': 1,
'log_level': 'warning',
'progress': False,
'append_only': False,
'func': 1234,
}
result[args_key] = args_value
assert parse_vars_from_line(*line) == result
def test_parse_storage_quota():
assert parse_storage_quota('50M') == 50 * 1000**2
with pytest.raises(argparse.ArgumentTypeError):
parse_storage_quota('5M')
def get_all_parsers():
parser = Archiver(prog='borg').build_parser()
borgfs_parser = Archiver(prog='borgfs').build_parser()
parsers = {}
def discover_level(prefix, parser, Archiver, extra_choices=None):
choices = {}
for action in parser._actions:
if action.choices is not None and 'SubParsersAction' in str(action.__class__):
for cmd, parser in action.choices.items():
choices[prefix + cmd] = parser
if extra_choices is not None:
choices.update(extra_choices)
if prefix and not choices:
return
for command, parser in sorted(choices.items()):
discover_level(command + " ", parser, Archiver)
parsers[command] = parser
discover_level("", parser, Archiver, {'borgfs': borgfs_parser})
return parsers
@pytest.mark.parametrize('command, parser', list(get_all_parsers().items()))
def test_help_formatting(command, parser):
if isinstance(parser.epilog, RstToTextLazy):
assert parser.epilog.rst
@pytest.mark.parametrize('topic, helptext', list(Archiver.helptext.items()))
def test_help_formatting_helptexts(topic, helptext):
assert str(rst_to_terminal(helptext))
| true | true |
f727b88af81bc3a82c519d895f95d0e68a9f59ab | 1,583 | py | Python | sandbox/lib/jumpscale/JumpScale9Lib/clients/atyourservice/ays/ClientFactory.py | Jumpscale/sandbox_linux | 2aacd36b467ef30ac83718abfa82c6883b67a02f | [
"Apache-2.0"
] | 2 | 2017-06-07T08:11:47.000Z | 2017-11-10T02:19:48.000Z | JumpScale9Lib/clients/atyourservice/ays/ClientFactory.py | Jumpscale/lib9 | 82224784ef2a7071faeb48349007211c367bc673 | [
"Apache-2.0"
] | 188 | 2017-06-21T06:16:13.000Z | 2020-06-17T14:20:24.000Z | sandbox/lib/jumpscale/JumpScale9Lib/clients/atyourservice/ays/ClientFactory.py | Jumpscale/sandbox_linux | 2aacd36b467ef30ac83718abfa82c6883b67a02f | [
"Apache-2.0"
] | 3 | 2018-06-12T05:18:28.000Z | 2019-09-24T06:49:17.000Z | from js9 import j
from .client import Client
JSConfigBase = j.tools.configmanager.base_class_configs
JSBASE = j.application.jsbase_get_class()
class ClientFactory(JSConfigBase):
def __init__(self):
self.__jslocation__ = 'j.clients.ays'
JSConfigBase.__init__(self, Client)
# def get(self, url=DEFAULT_URL, client_id=None, client_secret=None, validity=3600):
# """
# Get an AYS client to interact with a local or remote AYS server.
# Args:
# client_id: client ID of the API access key of the ItsYou.online organization protecting the AYS RESTful API; defaults to None
# client_secret: secret of the API access key of the ItsYou.online organization protecting the AYS RESTful API; defaults to None
# url: url of the AYS RESTful API, e.g. `http://172.25.0.238:5000`; defaults to https://localhost:5000
# validity: validity of the JWT that will be created based on the given client ID and secret, defaults to 3600 (seconds)
# """
# return Client(url=url, jwt=None, clientID=client_id, secret=client_secret, validity=validity)
# def get_with_jwt(self, instance='main'):
# """
# Get an AYS client to interact with a local or remote AYS server.
# Args:
# url: url of the AYS RESTful API, e.g. `http://172.25.0.238:5000`; defaults to https://localhost:5000
# jwt: JSON Web Token for the ItsYou.online organization protecting the AYS RESTful API; defaults to None
# """
# return Client(instance=instance)
| 45.228571 | 140 | 0.672142 | from js9 import j
from .client import Client
JSConfigBase = j.tools.configmanager.base_class_configs
JSBASE = j.application.jsbase_get_class()
class ClientFactory(JSConfigBase):
def __init__(self):
self.__jslocation__ = 'j.clients.ays'
JSConfigBase.__init__(self, Client)
# Get an AYS client to interact with a local or remote AYS server.
# Args:
# client_id: client ID of the API access key of the ItsYou.online organization protecting the AYS RESTful API; defaults to None
# client_secret: secret of the API access key of the ItsYou.online organization protecting the AYS RESTful API; defaults to None
# url: url of the AYS RESTful API, e.g. `http://172.25.0.238:5000`; defaults to https://localhost:5000
# validity: validity of the JWT that will be created based on the given client ID and secret, defaults to 3600 (seconds)
# """
# Get an AYS client to interact with a local or remote AYS server.
# Args:
# url: url of the AYS RESTful API, e.g. `http://172.25.0.238:5000`; defaults to https://localhost:5000
# jwt: JSON Web Token for the ItsYou.online organization protecting the AYS RESTful API; defaults to None
# """
| true | true |
f727b91907659f8e5e86ad39633e0f340dedda64 | 3,175 | py | Python | models/object_detection/model_templates/horizontal-text-detection/tools/draw_recall_graph.py | dqawami/openvino_training_extensions | dddda1dfd651eaae2d59cecda84275b1b03bd0ad | [
"Apache-2.0"
] | 256 | 2020-09-09T03:27:57.000Z | 2022-03-30T10:06:06.000Z | models/object_detection/model_templates/horizontal-text-detection/tools/draw_recall_graph.py | dqawami/openvino_training_extensions | dddda1dfd651eaae2d59cecda84275b1b03bd0ad | [
"Apache-2.0"
] | 604 | 2020-09-08T12:29:49.000Z | 2022-03-31T21:51:08.000Z | models/object_detection/model_templates/horizontal-text-detection/tools/draw_recall_graph.py | dqawami/openvino_training_extensions | dddda1dfd651eaae2d59cecda84275b1b03bd0ad | [
"Apache-2.0"
] | 160 | 2020-09-09T14:06:07.000Z | 2022-03-30T14:50:48.000Z | # Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
""" This script draws a graph of the detected words number (recall)
depending on their width. It helps to see the detection quality of the
small, normal or large inscriptions. Also for your convenience you may
visualize the detections straight away."""
import argparse
from os.path import exists
import subprocess
import mmcv
from mmdet.datasets import build_dataset # pylint: disable=import-error
from mmdet.core.evaluation.text_evaluation import text_eval # pylint: disable=import-error
def parse_args():
""" Parses input arguments. """
parser = argparse.ArgumentParser(
description='This script draws a histogram of the detected words '
'number (recall) depending on their width. It helps to '
'see the detection quality of the small, normal or large '
'inscriptions. Also for your convenience you may '
'visualize the detections straight away.')
parser.add_argument('config', help='test config file path')
parser.add_argument('snapshot', help='path to snapshot to be tested')
parser.add_argument('--draw_graph', action='store_true', help='draw histogram of recall')
parser.add_argument('--visualize', action='store_true', help='show detection result on images')
args = parser.parse_args()
return args
def main():
""" Main function. """
args = parse_args()
detection_file = 'horizontal_text_detection'
if not exists(f'{detection_file}.bbox.json'):
subprocess.run(
f'python ../../../../../external/mmdetection/tools/test.py'
f' {args.config} {args.snapshot}'
f' --options jsonfile_prefix={detection_file}'
f' --format-only',
check=True, shell=True
)
cfg = mmcv.Config.fromfile(args.config)
dataset = build_dataset(cfg.data.test)
results = mmcv.load(f'{detection_file}.bbox.json')
coco = dataset.coco
coco_dets = coco.loadRes(results)
predictions = coco_dets.imgToAnns
gt_annotations = coco.imgToAnns
if args.visualize:
img_paths = [dataset.img_prefix + image['file_name']
for image in coco_dets.dataset['images']]
else:
img_paths = None
recall, precision, hmean, _ = text_eval(
predictions, gt_annotations,
cfg.model.test_cfg.score_thr,
images=img_paths,
show_recall_graph=args.draw_graph)
print('Text detection recall={:.4f} precision={:.4f} hmean={:.4f}'.
format(recall, precision, hmean))
if __name__ == '__main__':
main()
| 36.918605 | 99 | 0.68 |
import argparse
from os.path import exists
import subprocess
import mmcv
from mmdet.datasets import build_dataset
from mmdet.core.evaluation.text_evaluation import text_eval
def parse_args():
parser = argparse.ArgumentParser(
description='This script draws a histogram of the detected words '
'number (recall) depending on their width. It helps to '
'see the detection quality of the small, normal or large '
'inscriptions. Also for your convenience you may '
'visualize the detections straight away.')
parser.add_argument('config', help='test config file path')
parser.add_argument('snapshot', help='path to snapshot to be tested')
parser.add_argument('--draw_graph', action='store_true', help='draw histogram of recall')
parser.add_argument('--visualize', action='store_true', help='show detection result on images')
args = parser.parse_args()
return args
def main():
args = parse_args()
detection_file = 'horizontal_text_detection'
if not exists(f'{detection_file}.bbox.json'):
subprocess.run(
f'python ../../../../../external/mmdetection/tools/test.py'
f' {args.config} {args.snapshot}'
f' --options jsonfile_prefix={detection_file}'
f' --format-only',
check=True, shell=True
)
cfg = mmcv.Config.fromfile(args.config)
dataset = build_dataset(cfg.data.test)
results = mmcv.load(f'{detection_file}.bbox.json')
coco = dataset.coco
coco_dets = coco.loadRes(results)
predictions = coco_dets.imgToAnns
gt_annotations = coco.imgToAnns
if args.visualize:
img_paths = [dataset.img_prefix + image['file_name']
for image in coco_dets.dataset['images']]
else:
img_paths = None
recall, precision, hmean, _ = text_eval(
predictions, gt_annotations,
cfg.model.test_cfg.score_thr,
images=img_paths,
show_recall_graph=args.draw_graph)
print('Text detection recall={:.4f} precision={:.4f} hmean={:.4f}'.
format(recall, precision, hmean))
if __name__ == '__main__':
main()
| true | true |
f727b9d4cab59ebd33276d381d0c931725d42173 | 2,391 | py | Python | session8/bloomfilter.py | zhiweih/pb-exercises | c5e64075c47503a40063aa836c06a452af14246d | [
"BSD-2-Clause"
] | 153 | 2017-09-27T01:10:19.000Z | 2022-03-17T12:13:59.000Z | session8/bloomfilter.py | zhiweih/pb-exercises | c5e64075c47503a40063aa836c06a452af14246d | [
"BSD-2-Clause"
] | 3 | 2018-11-10T20:04:13.000Z | 2022-02-15T23:12:53.000Z | session8/bloomfilter.py | zhiweih/pb-exercises | c5e64075c47503a40063aa836c06a452af14246d | [
"BSD-2-Clause"
] | 85 | 2017-10-09T16:18:00.000Z | 2022-02-09T14:21:08.000Z | from unittest import TestCase
from helper import bit_field_to_bytes, encode_varint, int_to_little_endian, murmur3
from network import GenericMessage
BIP37_CONSTANT = 0xfba4c795
class BloomFilter:
def __init__(self, size, function_count, tweak):
self.size = size
self.bit_field = [0] * (size * 8)
self.function_count = function_count
self.tweak = tweak
def add(self, item):
'''Add an item to the filter'''
# iterate self.function_count number of times
for i in range(self.function_count):
# BIP0037 spec seed is i*BIP37_CONSTANT + self.tweak
seed = i * BIP37_CONSTANT + self.tweak
# get the murmur3 hash given that seed
h = murmur3(item, seed=seed)
# set the bit at the hash mod the bitfield size (self.size*8)
self.bit_field[h % (self.size * 8)] = 1
# set the bit field at bit to be 1
def filter_bytes(self):
return bit_field_to_bytes(self.bit_field)
def filterload(self, flag=1):
'''Return a network message whose command is filterload'''
# encode_varint self.size
result = encode_varint(self.size)
# next is the self.filter_bytes()
result += self.filter_bytes()
# function count is 4 bytes little endian
result += int_to_little_endian(self.function_count, 4)
# tweak is 4 bytes little endian
result += int_to_little_endian(self.tweak, 4)
# flag is 1 byte little endian
result += int_to_little_endian(flag, 1)
# return a GenericMessage with b'filterload' as the command
return GenericMessage(b'filterload', result)
class BloomFilterTest(TestCase):
def test_add(self):
bf = BloomFilter(10, 5, 99)
item = b'Hello World'
bf.add(item)
expected = '0000000a080000000140'
self.assertEqual(bf.filter_bytes().hex(), expected)
item = b'Goodbye!'
bf.add(item)
expected = '4000600a080000010940'
self.assertEqual(bf.filter_bytes().hex(), expected)
def test_filterload(self):
bf = BloomFilter(10, 5, 99)
item = b'Hello World'
bf.add(item)
item = b'Goodbye!'
bf.add(item)
expected = '0a4000600a080000010940050000006300000001'
self.assertEqual(bf.filterload().serialize().hex(), expected)
| 34.157143 | 83 | 0.634463 | from unittest import TestCase
from helper import bit_field_to_bytes, encode_varint, int_to_little_endian, murmur3
from network import GenericMessage
BIP37_CONSTANT = 0xfba4c795
class BloomFilter:
def __init__(self, size, function_count, tweak):
self.size = size
self.bit_field = [0] * (size * 8)
self.function_count = function_count
self.tweak = tweak
def add(self, item):
for i in range(self.function_count):
seed = i * BIP37_CONSTANT + self.tweak
h = murmur3(item, seed=seed)
self.bit_field[h % (self.size * 8)] = 1
def filter_bytes(self):
return bit_field_to_bytes(self.bit_field)
def filterload(self, flag=1):
result = encode_varint(self.size)
result += self.filter_bytes()
result += int_to_little_endian(self.function_count, 4)
result += int_to_little_endian(self.tweak, 4)
result += int_to_little_endian(flag, 1)
return GenericMessage(b'filterload', result)
class BloomFilterTest(TestCase):
def test_add(self):
bf = BloomFilter(10, 5, 99)
item = b'Hello World'
bf.add(item)
expected = '0000000a080000000140'
self.assertEqual(bf.filter_bytes().hex(), expected)
item = b'Goodbye!'
bf.add(item)
expected = '4000600a080000010940'
self.assertEqual(bf.filter_bytes().hex(), expected)
def test_filterload(self):
bf = BloomFilter(10, 5, 99)
item = b'Hello World'
bf.add(item)
item = b'Goodbye!'
bf.add(item)
expected = '0a4000600a080000010940050000006300000001'
self.assertEqual(bf.filterload().serialize().hex(), expected)
| true | true |
f727ba6681f8dfea391e57c63b138af4829f136b | 301 | py | Python | Communication/temp_driver.py | Forence1999/SmartWalker | 635410bf44234eead9fd1e2fe226eb8eafa9d27d | [
"MIT"
] | 2 | 2021-11-13T14:16:06.000Z | 2022-01-12T06:07:32.000Z | Communication/temp_driver.py | Forence1999/SmartWalker | 635410bf44234eead9fd1e2fe226eb8eafa9d27d | [
"MIT"
] | null | null | null | Communication/temp_driver.py | Forence1999/SmartWalker | 635410bf44234eead9fd1e2fe226eb8eafa9d27d | [
"MIT"
] | 3 | 2021-08-30T04:40:39.000Z | 2022-01-09T11:34:04.000Z |
import os, sys
pwd = os.path.abspath(os.path.abspath(__file__))
father_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + "..")
sys.path.append(father_path)
from Communication.Modules.Driver_recv import DriverRecv
if __name__ == "__main__":
drv_recv = DriverRecv()
drv_recv.start() | 27.363636 | 72 | 0.734219 |
import os, sys
pwd = os.path.abspath(os.path.abspath(__file__))
father_path = os.path.abspath(os.path.dirname(pwd) + os.path.sep + "..")
sys.path.append(father_path)
from Communication.Modules.Driver_recv import DriverRecv
if __name__ == "__main__":
drv_recv = DriverRecv()
drv_recv.start() | true | true |
f727bbf4f042610f878f6f2a4448221ae54ed568 | 147 | py | Python | yc166/525.py | c-yan/yukicoder | cdbbd65402177225dd989df7fe01f67908484a69 | [
"MIT"
] | null | null | null | yc166/525.py | c-yan/yukicoder | cdbbd65402177225dd989df7fe01f67908484a69 | [
"MIT"
] | null | null | null | yc166/525.py | c-yan/yukicoder | cdbbd65402177225dd989df7fe01f67908484a69 | [
"MIT"
] | null | null | null | T = input()
hh, mm = map(int, T.split(':'))
mm += 5
if mm > 59:
hh += 1
mm %= 60
if hh > 23:
hh %= 24
print('%02d:%02d' % (hh, mm))
| 11.307692 | 31 | 0.428571 | T = input()
hh, mm = map(int, T.split(':'))
mm += 5
if mm > 59:
hh += 1
mm %= 60
if hh > 23:
hh %= 24
print('%02d:%02d' % (hh, mm))
| true | true |
f727bc6b391386643fcb9047d7e3f34d29e75ee1 | 3,712 | py | Python | intersight/models/hyperflex_ucsm_config_policy_ref.py | ategaw-cisco/intersight-python | 9d6476620507281b1dc358e29ac452d56081bbb0 | [
"Apache-2.0"
] | null | null | null | intersight/models/hyperflex_ucsm_config_policy_ref.py | ategaw-cisco/intersight-python | 9d6476620507281b1dc358e29ac452d56081bbb0 | [
"Apache-2.0"
] | null | null | null | intersight/models/hyperflex_ucsm_config_policy_ref.py | ategaw-cisco/intersight-python | 9d6476620507281b1dc358e29ac452d56081bbb0 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-262
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class HyperflexUcsmConfigPolicyRef(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'moid': 'str',
'object_type': 'str'
}
attribute_map = {
'moid': 'Moid',
'object_type': 'ObjectType'
}
def __init__(self, moid=None, object_type=None):
"""
HyperflexUcsmConfigPolicyRef - a model defined in Swagger
"""
self._moid = None
self._object_type = None
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
@property
def moid(self):
"""
Gets the moid of this HyperflexUcsmConfigPolicyRef.
:return: The moid of this HyperflexUcsmConfigPolicyRef.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this HyperflexUcsmConfigPolicyRef.
:param moid: The moid of this HyperflexUcsmConfigPolicyRef.
:type: str
"""
self._moid = moid
@property
def object_type(self):
"""
Gets the object_type of this HyperflexUcsmConfigPolicyRef.
:return: The object_type of this HyperflexUcsmConfigPolicyRef.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this HyperflexUcsmConfigPolicyRef.
:param object_type: The object_type of this HyperflexUcsmConfigPolicyRef.
:type: str
"""
self._object_type = object_type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, HyperflexUcsmConfigPolicyRef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 24.746667 | 81 | 0.554149 |
from pprint import pformat
from six import iteritems
import re
class HyperflexUcsmConfigPolicyRef(object):
swagger_types = {
'moid': 'str',
'object_type': 'str'
}
attribute_map = {
'moid': 'Moid',
'object_type': 'ObjectType'
}
def __init__(self, moid=None, object_type=None):
self._moid = None
self._object_type = None
if moid is not None:
self.moid = moid
if object_type is not None:
self.object_type = object_type
@property
def moid(self):
return self._moid
@moid.setter
def moid(self, moid):
self._moid = moid
@property
def object_type(self):
return self._object_type
@object_type.setter
def object_type(self, object_type):
self._object_type = object_type
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, HyperflexUcsmConfigPolicyRef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f727bc6fd1864b3db8c69008ff273cd86317a3fd | 545 | py | Python | conda_verify/package/test_files.py | soapy1/conda-verify | 4941b8ea37bf4afa5ca16366ae855f756037f230 | [
"BSD-3-Clause"
] | null | null | null | conda_verify/package/test_files.py | soapy1/conda-verify | 4941b8ea37bf4afa5ca16366ae855f756037f230 | [
"BSD-3-Clause"
] | null | null | null | conda_verify/package/test_files.py | soapy1/conda-verify | 4941b8ea37bf4afa5ca16366ae855f756037f230 | [
"BSD-3-Clause"
] | 1 | 2020-02-03T12:43:17.000Z | 2020-02-03T12:43:17.000Z | from conda_verify.conda_package_check import CondaPackageCheck
def verify(path_to_package=None, verbose=True, **kwargs):
package_check = CondaPackageCheck(path_to_package, verbose)
package_check.info_files()
package_check.no_hardlinks()
package_check.not_allowed_files()
package_check.index_json()
package_check.no_bat_and_exe()
package_check.list_packages()
pedantic = kwargs.get("pedantic") if "pedantic" in kwargs.keys() else True
package_check.has_prefix(pedantic=pedantic)
package_check.t.close()
| 34.0625 | 78 | 0.774312 | from conda_verify.conda_package_check import CondaPackageCheck
def verify(path_to_package=None, verbose=True, **kwargs):
package_check = CondaPackageCheck(path_to_package, verbose)
package_check.info_files()
package_check.no_hardlinks()
package_check.not_allowed_files()
package_check.index_json()
package_check.no_bat_and_exe()
package_check.list_packages()
pedantic = kwargs.get("pedantic") if "pedantic" in kwargs.keys() else True
package_check.has_prefix(pedantic=pedantic)
package_check.t.close()
| true | true |
f727be7b1536a7590641b1a6b3f2745fd7b98775 | 433 | py | Python | src/batch_iterator.py | thealah/imdb-import | fd51d7fbefc419c134277bc0c85b59dcbb457350 | [
"MIT"
] | null | null | null | src/batch_iterator.py | thealah/imdb-import | fd51d7fbefc419c134277bc0c85b59dcbb457350 | [
"MIT"
] | null | null | null | src/batch_iterator.py | thealah/imdb-import | fd51d7fbefc419c134277bc0c85b59dcbb457350 | [
"MIT"
] | null | null | null | def batch_iterator(iterable, size=100, filter_expression=None):
current_batch = []
for x in iterable:
if filter_expression:
if filter_expression(x):
current_batch.append(x)
else:
current_batch.append(x)
if len(current_batch) == size:
yield current_batch
current_batch = []
if current_batch:
yield current_batch | 27.0625 | 63 | 0.575058 | def batch_iterator(iterable, size=100, filter_expression=None):
current_batch = []
for x in iterable:
if filter_expression:
if filter_expression(x):
current_batch.append(x)
else:
current_batch.append(x)
if len(current_batch) == size:
yield current_batch
current_batch = []
if current_batch:
yield current_batch | true | true |
f727be9a5c7b0dee1552109b912eec3664b93ab0 | 3,227 | py | Python | exavault/models/share_relationships_notification.py | ExaVault/evapi-python | 769bfa9fbb683f2b4653ca2564029ffb72445c8c | [
"MIT"
] | null | null | null | exavault/models/share_relationships_notification.py | ExaVault/evapi-python | 769bfa9fbb683f2b4653ca2564029ffb72445c8c | [
"MIT"
] | 3 | 2017-07-13T20:58:05.000Z | 2019-08-02T19:08:37.000Z | exavault/models/share_relationships_notification.py | ExaVault/evapi-python | 769bfa9fbb683f2b4653ca2564029ffb72445c8c | [
"MIT"
] | 4 | 2016-11-16T00:14:23.000Z | 2020-09-24T14:50:46.000Z | # coding: utf-8
"""
ExaVault API
See our API reference documentation at https://www.exavault.com/developer/api-docs/ # noqa: E501
OpenAPI spec version: 2.0
Contact: support@exavault.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ShareRelationshipsNotification(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'data': 'ShareRelationshipsData'
}
attribute_map = {
'data': 'data'
}
def __init__(self, data=None): # noqa: E501
"""ShareRelationshipsNotification - a model defined in Swagger""" # noqa: E501
self._data = None
self.discriminator = None
if data is not None:
self.data = data
@property
def data(self):
"""Gets the data of this ShareRelationshipsNotification. # noqa: E501
:return: The data of this ShareRelationshipsNotification. # noqa: E501
:rtype: ShareRelationshipsData
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this ShareRelationshipsNotification.
:param data: The data of this ShareRelationshipsNotification. # noqa: E501
:type: ShareRelationshipsData
"""
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ShareRelationshipsNotification, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShareRelationshipsNotification):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.072072 | 101 | 0.575147 |
import pprint
import re
import six
class ShareRelationshipsNotification(object):
swagger_types = {
'data': 'ShareRelationshipsData'
}
attribute_map = {
'data': 'data'
}
def __init__(self, data=None):
self._data = None
self.discriminator = None
if data is not None:
self.data = data
@property
def data(self):
return self._data
@data.setter
def data(self, data):
self._data = data
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ShareRelationshipsNotification, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ShareRelationshipsNotification):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f727bf0787af05ae30dd3c315f89a1ffc68b1a5f | 126 | py | Python | models.py | banr1jnts/kaggle-youtube2nd | 21248d563afcf707cc7665703a987d71c94f4c5a | [
"MIT"
] | null | null | null | models.py | banr1jnts/kaggle-youtube2nd | 21248d563afcf707cc7665703a987d71c94f4c5a | [
"MIT"
] | null | null | null | models.py | banr1jnts/kaggle-youtube2nd | 21248d563afcf707cc7665703a987d71c94f4c5a | [
"MIT"
] | null | null | null | class BaseModel(object):
def create_model(self, unused_model_input, **unused_params):
raise NotImplementedError()
| 31.5 | 64 | 0.746032 | class BaseModel(object):
def create_model(self, unused_model_input, **unused_params):
raise NotImplementedError()
| true | true |
f727c080bee1883b62b81efe7828d2d40ce4de8a | 375 | py | Python | CodeSignal/Arcade/The_Core/Level_10_Lab_Of_Transformation/085_Higher_Version.py | Zubieta/CPP | fb4a3cbf2e4edcc590df15663cd28fb9ecab679c | [
"MIT"
] | 8 | 2017-03-02T07:56:45.000Z | 2021-08-07T20:20:19.000Z | CodeSignal/Arcade/The_Core/Level_10_Lab_Of_Transformation/085_Higher_Version.py | zubie7a/Algorithms | fb4a3cbf2e4edcc590df15663cd28fb9ecab679c | [
"MIT"
] | null | null | null | CodeSignal/Arcade/The_Core/Level_10_Lab_Of_Transformation/085_Higher_Version.py | zubie7a/Algorithms | fb4a3cbf2e4edcc590df15663cd28fb9ecab679c | [
"MIT"
] | 1 | 2021-08-07T20:20:20.000Z | 2021-08-07T20:20:20.000Z | # https://app.codesignal.com/arcade/code-arcade/lab-of-transformations/vsKRjYKv4SCjzJc8r/
def higherVersion(ver1, ver2):
# Split by dots, convert individual elements to ints.
ver1 = [int(x) for x in ver1.split(".")]
ver2 = [int(x) for x in ver2.split(".")]
# This will do a comparison item-wise of all elements in the iterable arrays.
return ver1 > ver2
| 41.666667 | 89 | 0.696 |
def higherVersion(ver1, ver2):
ver1 = [int(x) for x in ver1.split(".")]
ver2 = [int(x) for x in ver2.split(".")]
return ver1 > ver2
| true | true |
f727c1a63c15749305645ffe5f2bcd064e0496d2 | 23,239 | py | Python | code_generate.py | Taekyoon/Python-Mini-C-Compiler | deabdba9f92f772e7b5276288d05b94cb96e4d72 | [
"MIT"
] | 4 | 2018-10-11T12:30:46.000Z | 2019-11-12T12:17:17.000Z | code_generate.py | Taekyoon/Python-Mini-C-Compiler | deabdba9f92f772e7b5276288d05b94cb96e4d72 | [
"MIT"
] | null | null | null | code_generate.py | Taekyoon/Python-Mini-C-Compiler | deabdba9f92f772e7b5276288d05b94cb96e4d72 | [
"MIT"
] | null | null | null | from lex import tsymbol_dict
from expression import *
from code_generate_table import *
from code_generate_utils import *
class CodeGenerator(object):
def __init__(self, tree):
self.tree = tree
self.base, self.offset, self.width = 1, 1, 1
self.TERMINAL, self.NONTERMINAL = 0, 1
self.lvalue, self.rvalue = None, None
self.labelNum = 0
self.opcodeName = opcodeName
self.symbolTable = list()
self.symLevel = 0
self.ucode_str = ""
def generate(self):
ptr = self.tree
self.initSymbolTable()
p = ptr.son
while p:
if p.token["type"] == nodeNumber.DCL:
self.processDeclaration(p.son)
elif p.token["type"] == nodeNumber.FUNC_DEF:
self.processFuncHeader(p.son)
else:
icg_error(3)
p = p.brother
globalSize = self.offset - 1
p = ptr.son
while p:
if p.token["type"] == nodeNumber.FUNC_DEF:
self.processFunction(p)
p = p.brother
self.emit1(opcode.bgn, globalSize)
self.emit0(opcode.ldp)
self.emitJump(opcode.call, "main")
self.emit0(opcode.endop)
def initSymbolTable(self):
self.symbolTable.clear()
### DECLARATION
def insert(self, name, typeSpecifier, typeQualifier, base, offset, width, initialValue):
item = {"name": name,
"typeSpecifier": typeSpecifier,
"typeQualifier": typeQualifier,
"base": base,
"offset": offset,
"width": width,
"initialValue": initialValue,
"level": self.symLevel}
self.symbolTable.append(item)
def processDeclaration(self, ptr):
if not ptr.token["type"] == nodeNumber.DCL_SPEC:
raise AttributeError("icg_error(4)")
typeSpecifier = typeEnum.INT_TYPE
typeQualifier = typeEnum.VAR_TYPE
p = ptr.son
while p:
if p.token["type"] == nodeNumber.INT_NODE:
typeSpecifier = typeEnum.INT_TYPE
elif p.token["type"] == nodeNumber.CONST_NODE:
typeQualifier = typeEnum.CONST_TYPE
else:
print("processDeclaration: not yet implemented")
p = p.brother
p = ptr.brother
if not p.token["type"] == nodeNumber.DCL_ITEM:
raise AttributeError("icg_error")
switch_case = {nodeNumber.SIMPLE_VAR: self.processSimpleVariable,
nodeNumber.ARRAY_VAR: self.processArrayVariable}
while p:
q = p.son
token_number = q.token["type"]
if token_number in switch_case:
switch_case[token_number](q, typeSpecifier, typeQualifier)
else:
print("error in SIMPLE_VAR or ARRAY_VAR")
p = p.brother
def processSimpleVariable(self, ptr, typeSpecifier, typeQualifier):
p = ptr.son
q = ptr.brother
sign = 1
if not ptr.token["type"] == nodeNumber.SIMPLE_VAR:
print("error in SIMPLE_VAR")
if typeQualifier == typeEnum.CONST_TYPE:
if q is None:
print(ptr.son.token["value"], " must have a constant value")
return
if q.token["type"] == nodeNumber.UNARY_MINUS:
sign = -1
q = q.son
initialValue = sign * q.token["value"]
stIndex = self.insert(p.token["value"], typeSpecifier, typeQualifier,
0, 0, 0, initialValue)
else:
size = typeSize(typeSpecifier)
stIndex = self.insert(p.token["value"], typeSpecifier, typeQualifier,
self.base, self.offset, self.width, 0)
self.offset += size
def processArrayVariable(self, ptr, typeSpecifier, typeQualifier):
p = ptr.son
if not ptr.token["type"] == nodeNumber.ARRAY_VAR:
print("error in ARRAY_VAR")
return
if p.brother is None:
print("array size must be specified")
else:
size = int(p.brother.token["value"])
size *= typeSize(typeSpecifier)
stIndex = self.insert(p.token["value"], typeSpecifier, typeQualifier,
self.base, self.offset, size, 0)
self.offset += size
### EXPRESSION
def lookup(self, name):
for i, item in enumerate(self.symbolTable):
print(item["name"], self.symLevel, item["level"])
if item["name"] == name and item["level"] == self.symLevel:
return i
return -1
def emit0(self, opcode):
self.ucode_str += " {}\n".format(self.opcodeName[opcode])
def emit1(self, opcode, operand):
self.ucode_str += " {} {}\n".format(self.opcodeName[opcode], operand)
def emit2(self, opcode, operand1, operand2):
self.ucode_str += " {} {} {}\n".format(self.opcodeName[opcode], operand1, operand2)
def emit3(self, opcode, operand1, operand2, operand3):
self.ucode_str += " {} {} {} {}\n".format(self.opcodeName[opcode], operand1, operand2, operand3)
def emitJump(self, opcode, label):
self.ucode_str += " {} {}\n".format(self.opcodeName[opcode], label)
def rv_emit(self, ptr):
if ptr.token["type"] == tsymbol_dict["tnumber"]:
self.emit1(opcode.ldc, ptr.token["value"])
else:
stIndex = self.lookup(ptr.token["value"])
if stIndex == -1:
return
if self.symbolTable[stIndex]["typeQualifier"] == typeEnum.CONST_TYPE:
self.emit1(opcode.ldc, self.symbolTable[stIndex]["initialValue"])
elif self.symbolTable[stIndex]["width"] > 1:
self.emit2(opcode.lda, self.symbolTable[stIndex]["base"], self.symbolTable[stIndex]["offset"])
else:
self.emit2(opcode.lod, self.symbolTable[stIndex]["base"], self.symbolTable[stIndex]["offset"])
def read_emit(self, ptr):
if ptr.token["type"] == tsymbol_dict["tnumber"]:
self.emit1(opcode.ldc, ptr.token["value"])
else:
stIndex = self.lookup(ptr.token["value"])
if stIndex == -1:
return
if self.symbolTable[stIndex]["typeQualifier"] == typeEnum.CONST_TYPE:
self.emit1(opcode.ldc, self.symbolTable[stIndex]["initialValue"])
else:
self.emit2(opcode.lda, self.symbolTable[stIndex]["base"], self.symbolTable[stIndex]["offset"])
def processOperator(self, ptr):
token_number = ptr.token["type"]
if token_number == nodeNumber.ASSIGN_OP:
lhs, rhs = ptr.son, ptr.son.brother
if lhs.noderep == self.NONTERMINAL:
self.lvalue = 1
self.processOperator(lhs)
self.lvalue = 0
if rhs.noderep == self.NONTERMINAL:
self.processOperator(rhs)
else:
self.rv_emit(rhs)
if lhs.noderep == self.TERMINAL:
stIndex = self.lookup(lhs.token["value"])
if stIndex == -1:
print("undefined variable : ", lhs.token["value"])
return
self.emit2(opcode._str, self.symbolTable[stIndex]["base"], self.symbolTable[stIndex]["offset"]) ### Need to fill out
else:
self.emit0(opcode.sti) ### Need to fill out
elif token_number == nodeNumber.ADD_ASSIGN or\
token_number == nodeNumber.SUB_ASSIGN or\
token_number == nodeNumber.MUL_ASSIGN or\
token_number == nodeNumber.DIV_ASSIGN or\
token_number == nodeNumber.MOD_ASSIGN:
lhs, rhs = ptr.son, ptr.son.brother
current_nodeNumber = ptr.token["type"]
ptr.token["type"] = nodeNumber.ASSIGN_OP
if lhs.noderep == self.NONTERMINAL:
self.lvalue = 1
self.processOperator(lhs)
self.lvalue = 0
ptr.token["type"] = current_nodeNumber
if lhs.noderep == self.NONTERMINAL:
self.processOperator(lhs)
else:
self.rv_emit(lhs)
if rhs.noderep == self.NONTERMINAL:
self.processOperator(rhs)
else:
self.rv_emit(rhs)
# step 4
if token_number == nodeNumber.ADD_ASSIGN: self.emit0(opcode.add)
elif token_number == nodeNumber.SUB_ASSIGN: self.emit0(opcode.sub)
elif token_number == nodeNumber.MUL_ASSIGN: self.emit0(opcode.mult)
elif token_number == nodeNumber.DIV_ASSIGN: self.emit0(opcode.divop)
elif token_number == nodeNumber.MOD_ASSIGN: self.emit0(opcode.modop)
# step 5
if lhs.noderep == self.TERMINAL:
stIndex = self.lookup(lhs.token["value"])
if stIndex == -1:
print("undefined variable : ", lhs.son.token["value"])
return
self.emit2(opcode._str, self.symbolTable[stIndex]["base"], self.symbolTable[stIndex]["offset"])
else:
self.emit0(opcode.sti)
elif token_number == nodeNumber.ADD or token_number == nodeNumber.SUB or\
token_number == nodeNumber.MUL or token_number == nodeNumber.DIV or\
token_number == nodeNumber.MOD or token_number == nodeNumber.EQ or\
token_number == nodeNumber.NE or token_number == nodeNumber.GT or\
token_number == nodeNumber.GE or token_number == nodeNumber.LT or\
token_number == nodeNumber.LE or\
token_number == nodeNumber.LOGICAL_AND or\
token_number == nodeNumber.LOGICAL_OR:
lhs, rhs = ptr.son, ptr.son.brother
if lhs.noderep == self.NONTERMINAL:
self.processOperator(lhs)
else:
self.rv_emit(lhs)
if rhs.noderep == self.NONTERMINAL:
self.processOperator(rhs)
else:
self.rv_emit(rhs)
# step 3
if token_number == nodeNumber.ADD: self.emit0(opcode.add)
elif token_number == nodeNumber.SUB: self.emit0(opcode.sub)
elif token_number == nodeNumber.MUL: self.emit0(opcode.mult)
elif token_number == nodeNumber.DIV: self.emit0(opcode.divop)
elif token_number == nodeNumber.MOD: self.emit0(opcode.modop)
elif token_number == nodeNumber.EQ: self.emit0(opcode.eq)
elif token_number == nodeNumber.NE: self.emit0(opcode.ne)
elif token_number == nodeNumber.GT: self.emit0(opcode.gt)
elif token_number == nodeNumber.LT: self.emit0(opcode.lt)
elif token_number == nodeNumber.GE: self.emit0(opcode.ge)
elif token_number == nodeNumber.LE: self.emit0(opcode.le)
elif token_number == nodeNumber.LOGICAL_AND: self.emit0(opcode.andop)
elif token_number == nodeNumber.LOGICAL_OR: self.emit0(opcode.orop)
elif token_number == nodeNumber.UNARY_MINUS or\
token_number == nodeNumber.LOGICAL_NOT:
p = ptr.son
if p.noderep == self.NONTERMINAL:
self.processOperator(p)
else:
self.rv_emit(p)
if token_number == nodeNumber.UNARY_MINUS: self.emit0(opcode.neg)
elif token_number == nodeNumber.LOGICAL_NOT: self.emit0(opcode.notop)
## switch something
elif token_number == nodeNumber.PRE_INC or token_number == nodeNumber.PRE_DEC or\
token_number == nodeNumber.POST_INC or token_number == nodeNumber.POST_DEC:
p = ptr.son
if p.noderep == self.NONTERMINAL:
self.processOperator(p)
else:
self.rv_emit(p)
q = p
while not q.noderep == self.TERMINAL:
q = q.son
if q is None or not q.token["type"] == tsymbol_dict["tident"]:
print("increment/decrement operators can not be applied in expression")
return
stIndex = self.lookup(q.token["value"])
if stIndex == -1:
return
if token_number == nodeNumber.PRE_INC or token_number == nodeNumber.POST_INC:
self.emit0(opcode.incop)
elif token_number == nodeNumber.PRE_DEC or token_number == nodeNumber.POST_DEC:
self.emit0(opcode.decop)
if p.noderep == self.TERMINAL:
stIndex = self.lookup(p.token["value"])
if stIndex == -1:
return
self.emit2(opcode._str, self.symbolTable[stIndex]["base"], self.symbolTable[stIndex]["offset"]) # Need to do
elif p.token["type"] == nodeNumber.INDEX:
self.lvalue = 1
self.processOperator(p)
self.lvalue = 0
self.emit0(opcode.swp)
self.emit0(opcode.sti)
else:
print("error in increment/decrement operators")
elif token_number == nodeNumber.INDEX:
indexExp = ptr.son.brother
if indexExp.noderep == self.NONTERMINAL:
self.processOperator(indexExp)
else:
self.rv_emit(indexExp)
stIndex = self.lookup(ptr.son.token["value"])
if stIndex == -1:
print("undefined variable: ", ptr.son.token["value"])
return
self.emit2(opcode.lda, self.symbolTable[stIndex]["base"], self.symbolTable[stIndex]["offset"])
self.emit0(opcode.add)
if self.lvalue == 0:
self.emit0(opcode.ldi)
elif token_number == nodeNumber.CALL:
p = ptr.son
if self.checkPredefined(p):
return
functionName = p.token["value"]
print(functionName)
stIndex = self.lookup(functionName)
print(stIndex)
if stIndex == -1:
return
noArguments = self.symbolTable[stIndex]["width"]
self.emit0(opcode.ldp)
p = p.brother
while p:
if p.noderep == self.NONTERMINAL:
self.processOperator(p)
else:
self.rv_emit(p)
noArguments -= 1
p = p.brother
if noArguments > 0:
print(functionName, " : too few actual arguments")
if noArguments < 0:
print(functionName, " : too many actual arguments")
self.emitJump(opcode.call, ptr.son.token["value"])
else:
print("processOperator: not yet implemented")
def checkPredefined(self, ptr):
p = None
if ptr.token["value"] == "read":
self.emit0(opcode.ldp)
p = ptr.brother
while p:
if p.noderep == self.NONTERMINAL:
self.processOperator(p)
else:
self.read_emit(p)
p = p.brother
self.emitJump(opcode.call, "read")
return True
elif ptr.token["value"] == "write":
self.emit0(opcode.ldp)
p = ptr.brother
while p:
if p.noderep == self.NONTERMINAL:
self.processOperator(p)
else:
self.rv_emit(p)
p = p.brother
self.emitJump(opcode.call, "write")
return True
elif ptr.token["value"] == "lf":
self.emitJump(opcode.call, "lf")
return True
return False
### STATEMENT
def genLabel(self):
ret_str = "$${}".format(self.labelNum)
self.labelNum += 1
return ret_str
def emitLabel(self, label):
self.ucode_str += "{:11}{}\n".format(label, "nop")
def processCondition(self, ptr):
if ptr.noderep == self.NONTERMINAL:
self.processOperator(ptr)
else:
self.rv_emit(ptr)
def processStatement(self, ptr):
token_number = ptr.token["type"]
if token_number == nodeNumber.COMPOUND_ST:
p = ptr.son.brother
p = p.son
while p:
self.processStatement(p)
p = p.brother
elif token_number == nodeNumber.EXP_ST:
if ptr.son is not None:
self.processOperator(ptr.son)
elif token_number == nodeNumber.RETURN_ST:
if ptr.son is not None:
returnWithValue = 1
p = ptr.son
if p.noderep == self.NONTERMINAL:
self.processOperator(p)
else:
self.rv_emit(p)
self.emit0(opcode.retv)
else:
self.emit0(opcode.ret)
elif token_number == nodeNumber.IF_ST:
label = self.genLabel()
self.processCondition(ptr.son)
self.emitJump(opcode.fjp, label)
self.processStatement(ptr.son.brother)
self.emitLabel(label)
elif token_number == nodeNumber.IF_ELSE_ST:
label1, label2 = self.genLabel(), self.genLabel()
self.processCondition(ptr.son)
self.emitJump(opcode.fjp, label1)
self.processStatement(ptr.son.brother)
self.emitJump(opcode.ujp, label2)
self.emitLabel(label1)
self.processStatement(ptr.son.brother.brother)
self.emitLabel(label2)
elif token_number == nodeNumber.WHILE_ST:
label1, label2 = self.genLabel(), self.genLabel()
self.emitLabel(label1)
self.processCondition(ptr.son)
self.emitJump(opcode.fjp, label2)
self.processStatement(ptr.son.brother)
self.emitJump(opcode.ujp, label1)
self.emitLabel(label2)
else:
print("processStatement: not yet implemented.")
print_part_tree(ptr)
raise AttributeError("Bang!")
### FUNCTION
def processSimpleParamVariable(self, ptr, typeSpecifier, typeQualifier):
p = ptr.son
if not ptr.token["type"] == nodeNumber.SIMPLE_VAR:
print("error in SIMPLE_VAR")
size = typeSize(typeSpecifier)
stindex = self.insert(p.token["value"], typeSpecifier, typeQualifier,
self.base, self.offset, 0, 0)
self.offset += size
def processArrayParamVariable(self, ptr, typeSpecifier, typeQualifier):
p = ptr.son
if not ptr.token["type"] == nodeNumber.ARRAY_VAR:
print("error in ARRAY_VAR")
return
size = typeSize(typeSpecifier)
stIndex = self.insert(p.token["value"], typeSpecifier, typeQualifier,
base, offset, width, 0)
offset += size
def processParamDeclaration(self, ptr):
if not ptr.token["type"] == nodeNumber.DCL_SPEC:
icg_error(4)
typeSpecifier = typeEnum.INT_TYPE
typeQualifier = typeEnum.VAR_TYPE
p = ptr.son
while p:
if p.token["type"] == nodeNumber.INT_NODE:
typeSpecifier = typeEnum.INT_TYPE
elif p.token["type"] == nodeNumber.CONST_NODE:
typeQualifier = typeEnum.CONST_TYPE
else:
print("processParamDeclaration: not yet implemented")
p = p.brother
p = ptr.brother
token_number = p.token["type"]
if token_number == nodeNumber.SIMPLE_VAR:
self.processSimpleParamVariable(p, typeSpecifier, typeQualifier)
elif token_number == nodeNumber.ARRAY_VAR:
self.processArrayParamVariable(p, typeSpecifier, typeQualifier)
else:
print(token_number, nodeNumber.SIMPLE_VAR, nodeNumber.ARRAY_VAR)
print("error in SIMPLE_VAR or ARRAY_VAR")
def emitFunc(self, FuncName, operand1, operand2, operand3):
self.ucode_str += "{:11}{} {} {} {}\n".format(FuncName, "fun", operand1, operand2, operand3)
def processFuncHeader(self, ptr):
if not ptr.token["type"] == nodeNumber.FUNC_HEAD:
print("error in processFuncHeader")
p = ptr.son.son
while p:
if p.token["type"] == nodeNumber.INT_NODE:
returnType = typeEnum.INT_TYPE
elif p.token["type"] == nodeNumber.VOID_NODE:
returnType = typeEnum.VOID_TYPE
else:
print("invalid function return type")
p = p.brother
p = ptr.son.brother.brother
p = p.son
noArguments = 0
while p:
noArguments += 1
p = p.brother
stIndex = self.insert(ptr.son.brother.token["value"], returnType,
typeEnum.FUNC_TYPE, 1, 0, noArguments, 0)
def processFunction(self, ptr):
sizeOfVar, numOfVar = 0, 0
self.base += 1
self.offset = 1
if not ptr.token["type"] == nodeNumber.FUNC_DEF:
icg_error(4)
p = ptr.son.son.brother.brother
p = p.son
while p:
if p.token["type"] == nodeNumber.PARAM_DCL:
self.processParamDeclaration(p.son)
sizeOfVar += 1
numOfVar += 1
p = p.brother
p = ptr.son.brother.son.son
while p:
if p.token["type"] == nodeNumber.DCL:
self.processDeclaration(p.son)
q = p.son.brother
while q:
if q.token["type"] == nodeNumber.DCL_ITEM:
if q.son.token["type"] == nodeNumber.ARRAY_VAR:
sizeOfVar += int(q.son.son.brother.token["value"])
else:
sizeOfVar += 1
numOfVar += 1
q = q.brother
p = p.brother
p = ptr.son.son.brother
self.emitFunc(p.token["value"], sizeOfVar, self.base, 2)
for stIndex in range(len(self.symbolTable)-numOfVar, len(self.symbolTable)):
self.emit3(opcode.sym, self.symbolTable[stIndex]["base"], self.symbolTable[stIndex]["offset"],
self.symbolTable[stIndex]["width"])
p = ptr.son.brother
self.processStatement(p)
p = ptr.son.son
if p.token["type"] == nodeNumber.DCL_SPEC:
p = p.son
if p.token["type"] == nodeNumber.VOID_NODE:
self.emit0(opcode.ret)
elif p.token["type"] == nodeNumber.CONST_NODE:
if p.brother.token["type"] == nodeNumber.VOID_NODE:
self.emit0(opcode.ret)
self.emit0(opcode.endop)
self.base -= 1
#self.symLevel += 1
def write_code_to_file(self, file_name):
file_name = file_name + ".uco"
with open(file_name, 'w') as f:
f.write(self.ucode_str)
| 36.887302 | 132 | 0.547399 | from lex import tsymbol_dict
from expression import *
from code_generate_table import *
from code_generate_utils import *
class CodeGenerator(object):
def __init__(self, tree):
self.tree = tree
self.base, self.offset, self.width = 1, 1, 1
self.TERMINAL, self.NONTERMINAL = 0, 1
self.lvalue, self.rvalue = None, None
self.labelNum = 0
self.opcodeName = opcodeName
self.symbolTable = list()
self.symLevel = 0
self.ucode_str = ""
def generate(self):
ptr = self.tree
self.initSymbolTable()
p = ptr.son
while p:
if p.token["type"] == nodeNumber.DCL:
self.processDeclaration(p.son)
elif p.token["type"] == nodeNumber.FUNC_DEF:
self.processFuncHeader(p.son)
else:
icg_error(3)
p = p.brother
globalSize = self.offset - 1
p = ptr.son
while p:
if p.token["type"] == nodeNumber.FUNC_DEF:
self.processFunction(p)
p = p.brother
self.emit1(opcode.bgn, globalSize)
self.emit0(opcode.ldp)
self.emitJump(opcode.call, "main")
self.emit0(opcode.endop)
def initSymbolTable(self):
self.symbolTable.clear()
typeSpecifier, typeQualifier, base, offset, width, initialValue):
item = {"name": name,
"typeSpecifier": typeSpecifier,
"typeQualifier": typeQualifier,
"base": base,
"offset": offset,
"width": width,
"initialValue": initialValue,
"level": self.symLevel}
self.symbolTable.append(item)
def processDeclaration(self, ptr):
if not ptr.token["type"] == nodeNumber.DCL_SPEC:
raise AttributeError("icg_error(4)")
typeSpecifier = typeEnum.INT_TYPE
typeQualifier = typeEnum.VAR_TYPE
p = ptr.son
while p:
if p.token["type"] == nodeNumber.INT_NODE:
typeSpecifier = typeEnum.INT_TYPE
elif p.token["type"] == nodeNumber.CONST_NODE:
typeQualifier = typeEnum.CONST_TYPE
else:
print("processDeclaration: not yet implemented")
p = p.brother
p = ptr.brother
if not p.token["type"] == nodeNumber.DCL_ITEM:
raise AttributeError("icg_error")
switch_case = {nodeNumber.SIMPLE_VAR: self.processSimpleVariable,
nodeNumber.ARRAY_VAR: self.processArrayVariable}
while p:
q = p.son
token_number = q.token["type"]
if token_number in switch_case:
switch_case[token_number](q, typeSpecifier, typeQualifier)
else:
print("error in SIMPLE_VAR or ARRAY_VAR")
p = p.brother
def processSimpleVariable(self, ptr, typeSpecifier, typeQualifier):
p = ptr.son
q = ptr.brother
sign = 1
if not ptr.token["type"] == nodeNumber.SIMPLE_VAR:
print("error in SIMPLE_VAR")
if typeQualifier == typeEnum.CONST_TYPE:
if q is None:
print(ptr.son.token["value"], " must have a constant value")
return
if q.token["type"] == nodeNumber.UNARY_MINUS:
sign = -1
q = q.son
initialValue = sign * q.token["value"]
stIndex = self.insert(p.token["value"], typeSpecifier, typeQualifier,
0, 0, 0, initialValue)
else:
size = typeSize(typeSpecifier)
stIndex = self.insert(p.token["value"], typeSpecifier, typeQualifier,
self.base, self.offset, self.width, 0)
self.offset += size
def processArrayVariable(self, ptr, typeSpecifier, typeQualifier):
p = ptr.son
if not ptr.token["type"] == nodeNumber.ARRAY_VAR:
print("error in ARRAY_VAR")
return
if p.brother is None:
print("array size must be specified")
else:
size = int(p.brother.token["value"])
size *= typeSize(typeSpecifier)
stIndex = self.insert(p.token["value"], typeSpecifier, typeQualifier,
self.base, self.offset, size, 0)
self.offset += size
me):
for i, item in enumerate(self.symbolTable):
print(item["name"], self.symLevel, item["level"])
if item["name"] == name and item["level"] == self.symLevel:
return i
return -1
def emit0(self, opcode):
self.ucode_str += " {}\n".format(self.opcodeName[opcode])
def emit1(self, opcode, operand):
self.ucode_str += " {} {}\n".format(self.opcodeName[opcode], operand)
def emit2(self, opcode, operand1, operand2):
self.ucode_str += " {} {} {}\n".format(self.opcodeName[opcode], operand1, operand2)
def emit3(self, opcode, operand1, operand2, operand3):
self.ucode_str += " {} {} {} {}\n".format(self.opcodeName[opcode], operand1, operand2, operand3)
def emitJump(self, opcode, label):
self.ucode_str += " {} {}\n".format(self.opcodeName[opcode], label)
def rv_emit(self, ptr):
if ptr.token["type"] == tsymbol_dict["tnumber"]:
self.emit1(opcode.ldc, ptr.token["value"])
else:
stIndex = self.lookup(ptr.token["value"])
if stIndex == -1:
return
if self.symbolTable[stIndex]["typeQualifier"] == typeEnum.CONST_TYPE:
self.emit1(opcode.ldc, self.symbolTable[stIndex]["initialValue"])
elif self.symbolTable[stIndex]["width"] > 1:
self.emit2(opcode.lda, self.symbolTable[stIndex]["base"], self.symbolTable[stIndex]["offset"])
else:
self.emit2(opcode.lod, self.symbolTable[stIndex]["base"], self.symbolTable[stIndex]["offset"])
def read_emit(self, ptr):
if ptr.token["type"] == tsymbol_dict["tnumber"]:
self.emit1(opcode.ldc, ptr.token["value"])
else:
stIndex = self.lookup(ptr.token["value"])
if stIndex == -1:
return
if self.symbolTable[stIndex]["typeQualifier"] == typeEnum.CONST_TYPE:
self.emit1(opcode.ldc, self.symbolTable[stIndex]["initialValue"])
else:
self.emit2(opcode.lda, self.symbolTable[stIndex]["base"], self.symbolTable[stIndex]["offset"])
def processOperator(self, ptr):
token_number = ptr.token["type"]
if token_number == nodeNumber.ASSIGN_OP:
lhs, rhs = ptr.son, ptr.son.brother
if lhs.noderep == self.NONTERMINAL:
self.lvalue = 1
self.processOperator(lhs)
self.lvalue = 0
if rhs.noderep == self.NONTERMINAL:
self.processOperator(rhs)
else:
self.rv_emit(rhs)
if lhs.noderep == self.TERMINAL:
stIndex = self.lookup(lhs.token["value"])
if stIndex == -1:
print("undefined variable : ", lhs.token["value"])
return
self.emit2(opcode._str, self.symbolTable[stIndex]["base"], self.symbolTable[stIndex]["offset"]) lf.emit0(opcode.sti) mber.ADD_ASSIGN or\
token_number == nodeNumber.SUB_ASSIGN or\
token_number == nodeNumber.MUL_ASSIGN or\
token_number == nodeNumber.DIV_ASSIGN or\
token_number == nodeNumber.MOD_ASSIGN:
lhs, rhs = ptr.son, ptr.son.brother
current_nodeNumber = ptr.token["type"]
ptr.token["type"] = nodeNumber.ASSIGN_OP
if lhs.noderep == self.NONTERMINAL:
self.lvalue = 1
self.processOperator(lhs)
self.lvalue = 0
ptr.token["type"] = current_nodeNumber
if lhs.noderep == self.NONTERMINAL:
self.processOperator(lhs)
else:
self.rv_emit(lhs)
if rhs.noderep == self.NONTERMINAL:
self.processOperator(rhs)
else:
self.rv_emit(rhs)
if token_number == nodeNumber.ADD_ASSIGN: self.emit0(opcode.add)
elif token_number == nodeNumber.SUB_ASSIGN: self.emit0(opcode.sub)
elif token_number == nodeNumber.MUL_ASSIGN: self.emit0(opcode.mult)
elif token_number == nodeNumber.DIV_ASSIGN: self.emit0(opcode.divop)
elif token_number == nodeNumber.MOD_ASSIGN: self.emit0(opcode.modop)
if lhs.noderep == self.TERMINAL:
stIndex = self.lookup(lhs.token["value"])
if stIndex == -1:
print("undefined variable : ", lhs.son.token["value"])
return
self.emit2(opcode._str, self.symbolTable[stIndex]["base"], self.symbolTable[stIndex]["offset"])
else:
self.emit0(opcode.sti)
elif token_number == nodeNumber.ADD or token_number == nodeNumber.SUB or\
token_number == nodeNumber.MUL or token_number == nodeNumber.DIV or\
token_number == nodeNumber.MOD or token_number == nodeNumber.EQ or\
token_number == nodeNumber.NE or token_number == nodeNumber.GT or\
token_number == nodeNumber.GE or token_number == nodeNumber.LT or\
token_number == nodeNumber.LE or\
token_number == nodeNumber.LOGICAL_AND or\
token_number == nodeNumber.LOGICAL_OR:
lhs, rhs = ptr.son, ptr.son.brother
if lhs.noderep == self.NONTERMINAL:
self.processOperator(lhs)
else:
self.rv_emit(lhs)
if rhs.noderep == self.NONTERMINAL:
self.processOperator(rhs)
else:
self.rv_emit(rhs)
if token_number == nodeNumber.ADD: self.emit0(opcode.add)
elif token_number == nodeNumber.SUB: self.emit0(opcode.sub)
elif token_number == nodeNumber.MUL: self.emit0(opcode.mult)
elif token_number == nodeNumber.DIV: self.emit0(opcode.divop)
elif token_number == nodeNumber.MOD: self.emit0(opcode.modop)
elif token_number == nodeNumber.EQ: self.emit0(opcode.eq)
elif token_number == nodeNumber.NE: self.emit0(opcode.ne)
elif token_number == nodeNumber.GT: self.emit0(opcode.gt)
elif token_number == nodeNumber.LT: self.emit0(opcode.lt)
elif token_number == nodeNumber.GE: self.emit0(opcode.ge)
elif token_number == nodeNumber.LE: self.emit0(opcode.le)
elif token_number == nodeNumber.LOGICAL_AND: self.emit0(opcode.andop)
elif token_number == nodeNumber.LOGICAL_OR: self.emit0(opcode.orop)
elif token_number == nodeNumber.UNARY_MINUS or\
token_number == nodeNumber.LOGICAL_NOT:
p = ptr.son
if p.noderep == self.NONTERMINAL:
self.processOperator(p)
else:
self.rv_emit(p)
if token_number == nodeNumber.UNARY_MINUS: self.emit0(opcode.neg)
elif token_number == nodeNumber.LOGICAL_NOT: self.emit0(opcode.notop)
n_number == nodeNumber.PRE_INC or token_number == nodeNumber.PRE_DEC or\
token_number == nodeNumber.POST_INC or token_number == nodeNumber.POST_DEC:
p = ptr.son
if p.noderep == self.NONTERMINAL:
self.processOperator(p)
else:
self.rv_emit(p)
q = p
while not q.noderep == self.TERMINAL:
q = q.son
if q is None or not q.token["type"] == tsymbol_dict["tident"]:
print("increment/decrement operators can not be applied in expression")
return
stIndex = self.lookup(q.token["value"])
if stIndex == -1:
return
if token_number == nodeNumber.PRE_INC or token_number == nodeNumber.POST_INC:
self.emit0(opcode.incop)
elif token_number == nodeNumber.PRE_DEC or token_number == nodeNumber.POST_DEC:
self.emit0(opcode.decop)
if p.noderep == self.TERMINAL:
stIndex = self.lookup(p.token["value"])
if stIndex == -1:
return
self.emit2(opcode._str, self.symbolTable[stIndex]["base"], self.symbolTable[stIndex]["offset"])
elif p.token["type"] == nodeNumber.INDEX:
self.lvalue = 1
self.processOperator(p)
self.lvalue = 0
self.emit0(opcode.swp)
self.emit0(opcode.sti)
else:
print("error in increment/decrement operators")
elif token_number == nodeNumber.INDEX:
indexExp = ptr.son.brother
if indexExp.noderep == self.NONTERMINAL:
self.processOperator(indexExp)
else:
self.rv_emit(indexExp)
stIndex = self.lookup(ptr.son.token["value"])
if stIndex == -1:
print("undefined variable: ", ptr.son.token["value"])
return
self.emit2(opcode.lda, self.symbolTable[stIndex]["base"], self.symbolTable[stIndex]["offset"])
self.emit0(opcode.add)
if self.lvalue == 0:
self.emit0(opcode.ldi)
elif token_number == nodeNumber.CALL:
p = ptr.son
if self.checkPredefined(p):
return
functionName = p.token["value"]
print(functionName)
stIndex = self.lookup(functionName)
print(stIndex)
if stIndex == -1:
return
noArguments = self.symbolTable[stIndex]["width"]
self.emit0(opcode.ldp)
p = p.brother
while p:
if p.noderep == self.NONTERMINAL:
self.processOperator(p)
else:
self.rv_emit(p)
noArguments -= 1
p = p.brother
if noArguments > 0:
print(functionName, " : too few actual arguments")
if noArguments < 0:
print(functionName, " : too many actual arguments")
self.emitJump(opcode.call, ptr.son.token["value"])
else:
print("processOperator: not yet implemented")
def checkPredefined(self, ptr):
p = None
if ptr.token["value"] == "read":
self.emit0(opcode.ldp)
p = ptr.brother
while p:
if p.noderep == self.NONTERMINAL:
self.processOperator(p)
else:
self.read_emit(p)
p = p.brother
self.emitJump(opcode.call, "read")
return True
elif ptr.token["value"] == "write":
self.emit0(opcode.ldp)
p = ptr.brother
while p:
if p.noderep == self.NONTERMINAL:
self.processOperator(p)
else:
self.rv_emit(p)
p = p.brother
self.emitJump(opcode.call, "write")
return True
elif ptr.token["value"] == "lf":
self.emitJump(opcode.call, "lf")
return True
return False
:
ret_str = "$${}".format(self.labelNum)
self.labelNum += 1
return ret_str
def emitLabel(self, label):
self.ucode_str += "{:11}{}\n".format(label, "nop")
def processCondition(self, ptr):
if ptr.noderep == self.NONTERMINAL:
self.processOperator(ptr)
else:
self.rv_emit(ptr)
def processStatement(self, ptr):
token_number = ptr.token["type"]
if token_number == nodeNumber.COMPOUND_ST:
p = ptr.son.brother
p = p.son
while p:
self.processStatement(p)
p = p.brother
elif token_number == nodeNumber.EXP_ST:
if ptr.son is not None:
self.processOperator(ptr.son)
elif token_number == nodeNumber.RETURN_ST:
if ptr.son is not None:
returnWithValue = 1
p = ptr.son
if p.noderep == self.NONTERMINAL:
self.processOperator(p)
else:
self.rv_emit(p)
self.emit0(opcode.retv)
else:
self.emit0(opcode.ret)
elif token_number == nodeNumber.IF_ST:
label = self.genLabel()
self.processCondition(ptr.son)
self.emitJump(opcode.fjp, label)
self.processStatement(ptr.son.brother)
self.emitLabel(label)
elif token_number == nodeNumber.IF_ELSE_ST:
label1, label2 = self.genLabel(), self.genLabel()
self.processCondition(ptr.son)
self.emitJump(opcode.fjp, label1)
self.processStatement(ptr.son.brother)
self.emitJump(opcode.ujp, label2)
self.emitLabel(label1)
self.processStatement(ptr.son.brother.brother)
self.emitLabel(label2)
elif token_number == nodeNumber.WHILE_ST:
label1, label2 = self.genLabel(), self.genLabel()
self.emitLabel(label1)
self.processCondition(ptr.son)
self.emitJump(opcode.fjp, label2)
self.processStatement(ptr.son.brother)
self.emitJump(opcode.ujp, label1)
self.emitLabel(label2)
else:
print("processStatement: not yet implemented.")
print_part_tree(ptr)
raise AttributeError("Bang!")
eParamVariable(self, ptr, typeSpecifier, typeQualifier):
p = ptr.son
if not ptr.token["type"] == nodeNumber.SIMPLE_VAR:
print("error in SIMPLE_VAR")
size = typeSize(typeSpecifier)
stindex = self.insert(p.token["value"], typeSpecifier, typeQualifier,
self.base, self.offset, 0, 0)
self.offset += size
def processArrayParamVariable(self, ptr, typeSpecifier, typeQualifier):
p = ptr.son
if not ptr.token["type"] == nodeNumber.ARRAY_VAR:
print("error in ARRAY_VAR")
return
size = typeSize(typeSpecifier)
stIndex = self.insert(p.token["value"], typeSpecifier, typeQualifier,
base, offset, width, 0)
offset += size
def processParamDeclaration(self, ptr):
if not ptr.token["type"] == nodeNumber.DCL_SPEC:
icg_error(4)
typeSpecifier = typeEnum.INT_TYPE
typeQualifier = typeEnum.VAR_TYPE
p = ptr.son
while p:
if p.token["type"] == nodeNumber.INT_NODE:
typeSpecifier = typeEnum.INT_TYPE
elif p.token["type"] == nodeNumber.CONST_NODE:
typeQualifier = typeEnum.CONST_TYPE
else:
print("processParamDeclaration: not yet implemented")
p = p.brother
p = ptr.brother
token_number = p.token["type"]
if token_number == nodeNumber.SIMPLE_VAR:
self.processSimpleParamVariable(p, typeSpecifier, typeQualifier)
elif token_number == nodeNumber.ARRAY_VAR:
self.processArrayParamVariable(p, typeSpecifier, typeQualifier)
else:
print(token_number, nodeNumber.SIMPLE_VAR, nodeNumber.ARRAY_VAR)
print("error in SIMPLE_VAR or ARRAY_VAR")
def emitFunc(self, FuncName, operand1, operand2, operand3):
self.ucode_str += "{:11}{} {} {} {}\n".format(FuncName, "fun", operand1, operand2, operand3)
def processFuncHeader(self, ptr):
if not ptr.token["type"] == nodeNumber.FUNC_HEAD:
print("error in processFuncHeader")
p = ptr.son.son
while p:
if p.token["type"] == nodeNumber.INT_NODE:
returnType = typeEnum.INT_TYPE
elif p.token["type"] == nodeNumber.VOID_NODE:
returnType = typeEnum.VOID_TYPE
else:
print("invalid function return type")
p = p.brother
p = ptr.son.brother.brother
p = p.son
noArguments = 0
while p:
noArguments += 1
p = p.brother
stIndex = self.insert(ptr.son.brother.token["value"], returnType,
typeEnum.FUNC_TYPE, 1, 0, noArguments, 0)
def processFunction(self, ptr):
sizeOfVar, numOfVar = 0, 0
self.base += 1
self.offset = 1
if not ptr.token["type"] == nodeNumber.FUNC_DEF:
icg_error(4)
p = ptr.son.son.brother.brother
p = p.son
while p:
if p.token["type"] == nodeNumber.PARAM_DCL:
self.processParamDeclaration(p.son)
sizeOfVar += 1
numOfVar += 1
p = p.brother
p = ptr.son.brother.son.son
while p:
if p.token["type"] == nodeNumber.DCL:
self.processDeclaration(p.son)
q = p.son.brother
while q:
if q.token["type"] == nodeNumber.DCL_ITEM:
if q.son.token["type"] == nodeNumber.ARRAY_VAR:
sizeOfVar += int(q.son.son.brother.token["value"])
else:
sizeOfVar += 1
numOfVar += 1
q = q.brother
p = p.brother
p = ptr.son.son.brother
self.emitFunc(p.token["value"], sizeOfVar, self.base, 2)
for stIndex in range(len(self.symbolTable)-numOfVar, len(self.symbolTable)):
self.emit3(opcode.sym, self.symbolTable[stIndex]["base"], self.symbolTable[stIndex]["offset"],
self.symbolTable[stIndex]["width"])
p = ptr.son.brother
self.processStatement(p)
p = ptr.son.son
if p.token["type"] == nodeNumber.DCL_SPEC:
p = p.son
if p.token["type"] == nodeNumber.VOID_NODE:
self.emit0(opcode.ret)
elif p.token["type"] == nodeNumber.CONST_NODE:
if p.brother.token["type"] == nodeNumber.VOID_NODE:
self.emit0(opcode.ret)
self.emit0(opcode.endop)
self.base -= 1
def write_code_to_file(self, file_name):
file_name = file_name + ".uco"
with open(file_name, 'w') as f:
f.write(self.ucode_str)
| true | true |
f727c25e97bf1486886310c30e2304cba568c8b8 | 3,816 | py | Python | core/metrics/recall_k.py | overlordmax/PaddleRec | ddf6ec25b228ffde83e6592163d88c3ace9069f0 | [
"Apache-2.0"
] | 1 | 2020-09-29T01:14:22.000Z | 2020-09-29T01:14:22.000Z | core/metrics/recall_k.py | Qdriving/PaddleRec | b4d6ac77450d98a935c6a5d0eba6abbb21b9d06a | [
"Apache-2.0"
] | null | null | null | core/metrics/recall_k.py | Qdriving/PaddleRec | b4d6ac77450d98a935c6a5d0eba6abbb21b9d06a | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import paddle.fluid as fluid
from paddlerec.core.metric import Metric
from paddle.fluid.layers import accuracy
from paddle.fluid.initializer import Constant
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.layers.tensor import Variable
class RecallK(Metric):
"""
Metric For Fluid Model
"""
def __init__(self, input, label, k=20):
""" """
kwargs = locals()
del kwargs['self']
self.k = k
if not isinstance(input, Variable):
raise ValueError("input must be Variable, but received %s" %
type(input))
if not isinstance(label, Variable):
raise ValueError("label must be Variable, but received %s" %
type(label))
helper = LayerHelper("PaddleRec_RecallK", **kwargs)
batch_accuracy = accuracy(input, label, self.k)
global_ins_cnt, _ = helper.create_or_get_global_variable(
name="ins_cnt", persistable=True, dtype='float32', shape=[1])
global_pos_cnt, _ = helper.create_or_get_global_variable(
name="pos_cnt", persistable=True, dtype='float32', shape=[1])
for var in [global_ins_cnt, global_pos_cnt]:
helper.set_variable_initializer(
var, Constant(
value=0.0, force_cpu=True))
tmp_ones = fluid.layers.fill_constant(
shape=fluid.layers.shape(label), dtype="float32", value=1.0)
batch_ins = fluid.layers.reduce_sum(tmp_ones)
batch_pos = batch_ins * batch_accuracy
helper.append_op(
type="elementwise_add",
inputs={"X": [global_ins_cnt],
"Y": [batch_ins]},
outputs={"Out": [global_ins_cnt]})
helper.append_op(
type="elementwise_add",
inputs={"X": [global_pos_cnt],
"Y": [batch_pos]},
outputs={"Out": [global_pos_cnt]})
self.acc = global_pos_cnt / global_ins_cnt
self._global_metric_state_vars = dict()
self._global_metric_state_vars['ins_cnt'] = (global_ins_cnt.name,
"float32")
self._global_metric_state_vars['pos_cnt'] = (global_pos_cnt.name,
"float32")
metric_name = "Acc(Recall@%d)" % self.k
self.metrics = dict()
self.metrics["InsCnt"] = global_ins_cnt
self.metrics["RecallCnt"] = global_pos_cnt
self.metrics[metric_name] = self.acc
# self.metrics["batch_metrics"] = batch_metrics
def _calculate(self, global_metrics):
for key in self._global_metric_state_vars:
if key not in global_metrics:
raise ValueError("%s not existed" % key)
ins_cnt = global_metrics['ins_cnt'][0]
pos_cnt = global_metrics['pos_cnt'][0]
if ins_cnt == 0:
acc = 0
else:
acc = float(pos_cnt) / ins_cnt
return "InsCnt=%s RecallCnt=%s Acc(Recall@%d)=%s" % (
str(ins_cnt), str(pos_cnt), self.k, str(acc))
def get_result(self):
return self.metrics
| 36.692308 | 74 | 0.612945 |
import math
import numpy as np
import paddle.fluid as fluid
from paddlerec.core.metric import Metric
from paddle.fluid.layers import accuracy
from paddle.fluid.initializer import Constant
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.layers.tensor import Variable
class RecallK(Metric):
def __init__(self, input, label, k=20):
kwargs = locals()
del kwargs['self']
self.k = k
if not isinstance(input, Variable):
raise ValueError("input must be Variable, but received %s" %
type(input))
if not isinstance(label, Variable):
raise ValueError("label must be Variable, but received %s" %
type(label))
helper = LayerHelper("PaddleRec_RecallK", **kwargs)
batch_accuracy = accuracy(input, label, self.k)
global_ins_cnt, _ = helper.create_or_get_global_variable(
name="ins_cnt", persistable=True, dtype='float32', shape=[1])
global_pos_cnt, _ = helper.create_or_get_global_variable(
name="pos_cnt", persistable=True, dtype='float32', shape=[1])
for var in [global_ins_cnt, global_pos_cnt]:
helper.set_variable_initializer(
var, Constant(
value=0.0, force_cpu=True))
tmp_ones = fluid.layers.fill_constant(
shape=fluid.layers.shape(label), dtype="float32", value=1.0)
batch_ins = fluid.layers.reduce_sum(tmp_ones)
batch_pos = batch_ins * batch_accuracy
helper.append_op(
type="elementwise_add",
inputs={"X": [global_ins_cnt],
"Y": [batch_ins]},
outputs={"Out": [global_ins_cnt]})
helper.append_op(
type="elementwise_add",
inputs={"X": [global_pos_cnt],
"Y": [batch_pos]},
outputs={"Out": [global_pos_cnt]})
self.acc = global_pos_cnt / global_ins_cnt
self._global_metric_state_vars = dict()
self._global_metric_state_vars['ins_cnt'] = (global_ins_cnt.name,
"float32")
self._global_metric_state_vars['pos_cnt'] = (global_pos_cnt.name,
"float32")
metric_name = "Acc(Recall@%d)" % self.k
self.metrics = dict()
self.metrics["InsCnt"] = global_ins_cnt
self.metrics["RecallCnt"] = global_pos_cnt
self.metrics[metric_name] = self.acc
def _calculate(self, global_metrics):
for key in self._global_metric_state_vars:
if key not in global_metrics:
raise ValueError("%s not existed" % key)
ins_cnt = global_metrics['ins_cnt'][0]
pos_cnt = global_metrics['pos_cnt'][0]
if ins_cnt == 0:
acc = 0
else:
acc = float(pos_cnt) / ins_cnt
return "InsCnt=%s RecallCnt=%s Acc(Recall@%d)=%s" % (
str(ins_cnt), str(pos_cnt), self.k, str(acc))
def get_result(self):
return self.metrics
| true | true |
f727c2bb12990251b7186d56d9373b097441a307 | 4,591 | py | Python | examples/pagination.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | examples/pagination.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | examples/pagination.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
# This script demonstrates different methods of pagination in the SDK via the oci.pagination module. This module supports:
#
# - Eagerly loading all possible results from a list call
# - Eagerly loading all results from a list call up to a given limit
# - Generators that can be used to lazily iterate over results from a list call. These generators can yield either values/models
# or the raw responses of the call
# - Pagination using raw responses instead of the oci.pagination module
import oci
config = oci.config.from_file()
compartment_id = config["tenancy"]
identity = oci.identity.IdentityClient(config)
# This demonstrates the eager loading of all possible results. This will return an oci.response.Response whose data attribute contains
# a list of all results. The other attributes of the Response object come from the last response received from the service.
print('--------------------------------------------')
print('Eager load all results')
print('--------------------------------------------')
response = oci.pagination.list_call_get_all_results(identity.list_users, compartment_id)
for user in response.data:
print('User: {}'.format(user.name))
# This demonstrates the eager loading of all results up to a given limit. Note that we have to specify a record limit (20) and
# a page size (5)
#
# This will return an oci.response.Response whose data attribute contains a list of all results. The other attributes of the
# Response object come from the last response received from the service.
print('--------------------------------------------')
print('Eager load up to limit')
print('--------------------------------------------')
response = oci.pagination.list_call_get_up_to_limit(identity.list_users, 20, 5, compartment_id)
total_results = 0
for user in response.data:
total_results += 1
print('User: {}'.format(user.name))
print('Total results: {}'.format(total_results))
# This demonstrates lazily loading, via a generator, all results. Here we use a generator which yields values/models via specifying
# the yield_mode as 'record'
print('--------------------------------------------')
print('Lazy load all results - yield values')
print('--------------------------------------------')
for user in oci.pagination.list_call_get_all_results_generator(identity.list_users, 'record', config["tenancy"]):
print('User: {}'.format(user.name))
# The below demonstrates lazily loading, via a generator, results up to a certain limit
print('--------------------------------------------')
print('Lazy load all results - yield raw responses')
print('--------------------------------------------')
response_num = 0
for response in oci.pagination.list_call_get_all_results_generator(identity.list_users, 'response', config["tenancy"]):
response_num += 1
for user in response.data:
print('Response: {}, User: {}'.format(response_num, user.name))
print('--------------------------------------------')
print('Lazy load up to limit - yield values')
print('--------------------------------------------')
total_results = 0
for user in oci.pagination.list_call_get_up_to_limit_generator(identity.list_users, 20, 10, 'record', config["tenancy"]):
total_results += 1
print('User: {}'.format(user.name))
print('Total results: {}'.format(total_results))
print('--------------------------------------------')
print('Lazy load up to limit - yield raw responses')
print('--------------------------------------------')
response_num = 0
total_results = 0
for response in oci.pagination.list_call_get_up_to_limit_generator(identity.list_users, 20, 10, 'response', config["tenancy"]):
response_num += 1
for user in response.data:
total_results += 1
print('Response: {}, User: {}'.format(response_num, user.name))
print('Total results: {}'.format(total_results))
print('--------------------------------------------')
print('Pagination using raw responses')
print('--------------------------------------------')
response = identity.list_users(compartment_id)
users = response.data
while response.has_next_page:
response = identity.list_users(compartment_id, page=response.next_page)
users.extend(response.data)
for u in users:
print('User: {}'.format(u.name))
| 48.840426 | 245 | 0.648007 |
import oci
config = oci.config.from_file()
compartment_id = config["tenancy"]
identity = oci.identity.IdentityClient(config)
print('--------------------------------------------')
print('Eager load all results')
print('--------------------------------------------')
response = oci.pagination.list_call_get_all_results(identity.list_users, compartment_id)
for user in response.data:
print('User: {}'.format(user.name))
print('--------------------------------------------')
print('Eager load up to limit')
print('--------------------------------------------')
response = oci.pagination.list_call_get_up_to_limit(identity.list_users, 20, 5, compartment_id)
total_results = 0
for user in response.data:
total_results += 1
print('User: {}'.format(user.name))
print('Total results: {}'.format(total_results))
print('--------------------------------------------')
print('Lazy load all results - yield values')
print('--------------------------------------------')
for user in oci.pagination.list_call_get_all_results_generator(identity.list_users, 'record', config["tenancy"]):
print('User: {}'.format(user.name))
print('--------------------------------------------')
print('Lazy load all results - yield raw responses')
print('--------------------------------------------')
response_num = 0
for response in oci.pagination.list_call_get_all_results_generator(identity.list_users, 'response', config["tenancy"]):
response_num += 1
for user in response.data:
print('Response: {}, User: {}'.format(response_num, user.name))
print('--------------------------------------------')
print('Lazy load up to limit - yield values')
print('--------------------------------------------')
total_results = 0
for user in oci.pagination.list_call_get_up_to_limit_generator(identity.list_users, 20, 10, 'record', config["tenancy"]):
total_results += 1
print('User: {}'.format(user.name))
print('Total results: {}'.format(total_results))
print('--------------------------------------------')
print('Lazy load up to limit - yield raw responses')
print('--------------------------------------------')
response_num = 0
total_results = 0
for response in oci.pagination.list_call_get_up_to_limit_generator(identity.list_users, 20, 10, 'response', config["tenancy"]):
response_num += 1
for user in response.data:
total_results += 1
print('Response: {}, User: {}'.format(response_num, user.name))
print('Total results: {}'.format(total_results))
print('--------------------------------------------')
print('Pagination using raw responses')
print('--------------------------------------------')
response = identity.list_users(compartment_id)
users = response.data
while response.has_next_page:
response = identity.list_users(compartment_id, page=response.next_page)
users.extend(response.data)
for u in users:
print('User: {}'.format(u.name))
| true | true |
f727c3080a1e1cf10bc6a51628ccb3002fa6f009 | 6,213 | py | Python | slack/webhook/client.py | timgates42/python-slack-sdk | 6339fbe81031c9aec3f95927ac03706fd31f3544 | [
"MIT"
] | null | null | null | slack/webhook/client.py | timgates42/python-slack-sdk | 6339fbe81031c9aec3f95927ac03706fd31f3544 | [
"MIT"
] | null | null | null | slack/webhook/client.py | timgates42/python-slack-sdk | 6339fbe81031c9aec3f95927ac03706fd31f3544 | [
"MIT"
] | null | null | null | import json
import logging
import urllib
from http.client import HTTPResponse
from ssl import SSLContext
from typing import Dict, Union, List, Optional
from urllib.error import HTTPError
from urllib.request import Request, urlopen, OpenerDirector, ProxyHandler, HTTPSHandler
from slack.errors import SlackRequestError
from .internal_utils import _build_body, _build_request_headers, _debug_log_response
from .webhook_response import WebhookResponse
from ..web.classes.attachments import Attachment
from ..web.classes.blocks import Block
class WebhookClient:
logger = logging.getLogger(__name__)
def __init__(
self,
url: str,
timeout: int = 30,
ssl: Optional[SSLContext] = None,
proxy: Optional[str] = None,
default_headers: Optional[Dict[str, str]] = None,
):
"""API client for Incoming Webhooks and response_url
:param url: a complete URL to send data (e.g., https://hooks.slack.com/XXX)
:param timeout: request timeout (in seconds)
:param ssl: ssl.SSLContext to use for requests
:param proxy: proxy URL (e.g., localhost:9000, http://localhost:9000)
:param default_headers: request headers to add to all requests
"""
self.url = url
self.timeout = timeout
self.ssl = ssl
self.proxy = proxy
self.default_headers = default_headers if default_headers else {}
def send(
self,
*,
text: Optional[str] = None,
attachments: Optional[List[Union[Dict[str, any], Attachment]]] = None,
blocks: Optional[List[Union[Dict[str, any], Block]]] = None,
response_type: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> WebhookResponse:
"""Performs a Slack API request and returns the result.
:param text: the text message (even when having blocks, setting this as well is recommended as it works as fallback)
:param attachments: a collection of attachments
:param blocks: a collection of Block Kit UI components
:param response_type: the type of message (either 'in_channel' or 'ephemeral')
:param headers: request headers to append only for this request
:return: API response
"""
return self.send_dict(
body={
"text": text,
"attachments": attachments,
"blocks": blocks,
"response_type": response_type,
},
headers=headers,
)
def send_dict(
self, body: Dict[str, any], headers: Optional[Dict[str, str]] = None
) -> WebhookResponse:
"""Performs a Slack API request and returns the result.
:param body: json data structure (it's still a dict at this point),
if you give this argument, body_params and files will be skipped
:param headers: request headers to append only for this request
:return: API response
"""
return self._perform_http_request(
body=_build_body(body),
headers=_build_request_headers(self.default_headers, headers),
)
def _perform_http_request(
self, *, body: Dict[str, any], headers: Dict[str, str]
) -> WebhookResponse:
"""Performs an HTTP request and parses the response.
:param url: a complete URL to send data (e.g., https://hooks.slack.com/XXX)
:param body: request body data
:param headers: complete set of request headers
:return: API response
"""
body = json.dumps(body)
headers["Content-Type"] = "application/json;charset=utf-8"
if self.logger.level <= logging.DEBUG:
self.logger.debug(
f"Sending a request - url: {self.url}, body: {body}, headers: {headers}"
)
try:
url = self.url
opener: Optional[OpenerDirector] = None
# for security (BAN-B310)
if url.lower().startswith("http"):
req = Request(
method="POST", url=url, data=body.encode("utf-8"), headers=headers
)
if self.proxy is not None:
if isinstance(self.proxy, str):
opener = urllib.request.build_opener(
ProxyHandler({"http": self.proxy, "https": self.proxy}),
HTTPSHandler(context=self.ssl),
)
else:
raise SlackRequestError(
f"Invalid proxy detected: {self.proxy} must be a str value"
)
else:
raise SlackRequestError(f"Invalid URL detected: {url}")
# NOTE: BAN-B310 is already checked above
resp: Optional[HTTPResponse] = None
if opener:
resp = opener.open(req, timeout=self.timeout) # skipcq: BAN-B310
else:
resp = urlopen( # skipcq: BAN-B310
req, context=self.ssl, timeout=self.timeout
)
charset: str = resp.headers.get_content_charset() or "utf-8"
response_body: str = resp.read().decode(charset)
resp = WebhookResponse(
url=url,
status_code=resp.status,
body=response_body,
headers=resp.headers,
)
_debug_log_response(self.logger, resp)
return resp
except HTTPError as e:
charset = e.headers.get_content_charset() or "utf-8"
body: str = e.read().decode(charset) # read the response body here
resp = WebhookResponse(
url=url, status_code=e.code, body=body, headers=e.headers,
)
if e.code == 429:
# for backward-compatibility with WebClient (v.2.5.0 or older)
resp.headers["Retry-After"] = resp.headers["retry-after"]
_debug_log_response(self.logger, resp)
return resp
except Exception as err:
self.logger.error(f"Failed to send a request to Slack API server: {err}")
raise err
| 40.607843 | 124 | 0.584259 | import json
import logging
import urllib
from http.client import HTTPResponse
from ssl import SSLContext
from typing import Dict, Union, List, Optional
from urllib.error import HTTPError
from urllib.request import Request, urlopen, OpenerDirector, ProxyHandler, HTTPSHandler
from slack.errors import SlackRequestError
from .internal_utils import _build_body, _build_request_headers, _debug_log_response
from .webhook_response import WebhookResponse
from ..web.classes.attachments import Attachment
from ..web.classes.blocks import Block
class WebhookClient:
logger = logging.getLogger(__name__)
def __init__(
self,
url: str,
timeout: int = 30,
ssl: Optional[SSLContext] = None,
proxy: Optional[str] = None,
default_headers: Optional[Dict[str, str]] = None,
):
self.url = url
self.timeout = timeout
self.ssl = ssl
self.proxy = proxy
self.default_headers = default_headers if default_headers else {}
def send(
self,
*,
text: Optional[str] = None,
attachments: Optional[List[Union[Dict[str, any], Attachment]]] = None,
blocks: Optional[List[Union[Dict[str, any], Block]]] = None,
response_type: Optional[str] = None,
headers: Optional[Dict[str, str]] = None,
) -> WebhookResponse:
return self.send_dict(
body={
"text": text,
"attachments": attachments,
"blocks": blocks,
"response_type": response_type,
},
headers=headers,
)
def send_dict(
self, body: Dict[str, any], headers: Optional[Dict[str, str]] = None
) -> WebhookResponse:
return self._perform_http_request(
body=_build_body(body),
headers=_build_request_headers(self.default_headers, headers),
)
def _perform_http_request(
self, *, body: Dict[str, any], headers: Dict[str, str]
) -> WebhookResponse:
body = json.dumps(body)
headers["Content-Type"] = "application/json;charset=utf-8"
if self.logger.level <= logging.DEBUG:
self.logger.debug(
f"Sending a request - url: {self.url}, body: {body}, headers: {headers}"
)
try:
url = self.url
opener: Optional[OpenerDirector] = None
if url.lower().startswith("http"):
req = Request(
method="POST", url=url, data=body.encode("utf-8"), headers=headers
)
if self.proxy is not None:
if isinstance(self.proxy, str):
opener = urllib.request.build_opener(
ProxyHandler({"http": self.proxy, "https": self.proxy}),
HTTPSHandler(context=self.ssl),
)
else:
raise SlackRequestError(
f"Invalid proxy detected: {self.proxy} must be a str value"
)
else:
raise SlackRequestError(f"Invalid URL detected: {url}")
resp: Optional[HTTPResponse] = None
if opener:
resp = opener.open(req, timeout=self.timeout)
else:
resp = urlopen(
req, context=self.ssl, timeout=self.timeout
)
charset: str = resp.headers.get_content_charset() or "utf-8"
response_body: str = resp.read().decode(charset)
resp = WebhookResponse(
url=url,
status_code=resp.status,
body=response_body,
headers=resp.headers,
)
_debug_log_response(self.logger, resp)
return resp
except HTTPError as e:
charset = e.headers.get_content_charset() or "utf-8"
body: str = e.read().decode(charset)
resp = WebhookResponse(
url=url, status_code=e.code, body=body, headers=e.headers,
)
if e.code == 429:
resp.headers["Retry-After"] = resp.headers["retry-after"]
_debug_log_response(self.logger, resp)
return resp
except Exception as err:
self.logger.error(f"Failed to send a request to Slack API server: {err}")
raise err
| true | true |
f727c35e7fd050c837120bd4cb292fd43c9f546f | 73 | py | Python | Codewars/8kyu/reversed-words/Python/solution1.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | 7 | 2017-09-20T16:40:39.000Z | 2021-08-31T18:15:08.000Z | Codewars/8kyu/reversed-words/Python/solution1.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | null | null | null | Codewars/8kyu/reversed-words/Python/solution1.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | null | null | null | # Python - 3.6.0
reverseWords = lambda str: ' '.join(str.split()[::-1])
| 18.25 | 54 | 0.589041 |
reverseWords = lambda str: ' '.join(str.split()[::-1])
| true | true |
f727c565e144343ffc38a97bdeaadb5f3c26c99d | 3,587 | py | Python | restui/lib/alignments.py | brjones/gifts_rest | 8217e45fd1a692b00c9e9ae9f022ac2d2fab211e | [
"Apache-2.0"
] | null | null | null | restui/lib/alignments.py | brjones/gifts_rest | 8217e45fd1a692b00c9e9ae9f022ac2d2fab211e | [
"Apache-2.0"
] | 7 | 2018-09-05T10:53:44.000Z | 2022-03-08T09:36:14.000Z | restui/lib/alignments.py | brjones/gifts_rest | 8217e45fd1a692b00c9e9ae9f022ac2d2fab211e | [
"Apache-2.0"
] | 8 | 2018-09-03T14:29:28.000Z | 2020-07-30T12:54:04.000Z | """
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from sam_alignment_reconstructor.pairwise import pairwise_alignment
from sam_alignment_reconstructor.pairwise import cigar_split
from restui.lib.external import ensembl_sequence
from restui.lib.external import ensembl_protein
def fetch_pairwise(mapping):
"""
Function to fetch the pairwise sequence alignment for a given mapping
between a mapped sequence in ensembl and UniProt.
Parameters
----------
mappings
Returns
-------
dict
mapping_id : int
alignments : list
List of the alignments and the matching strings
"""
pairwise_alignments = []
enst = mapping.transcript.enst_id
uniprot_id = mapping.uniprot.uniprot_acc
for alignment in mapping.alignments.all():
if alignment.alignment_run.score1_type == 'identity':
pairwise_alignments.append(
_fetch_alignment(alignment, enst, uniprot_id)
)
# Break out of the loop, we're done
break
elif (
alignment.alignment_run.score1_type == 'perfect_match' and
alignment.score1 == 1
):
pairwise_alignments.append(
_fetch_alignment(alignment, enst, uniprot_id)
)
return {
'mapping_id': mapping.mapping_id,
'alignments': pairwise_alignments
}
def _fetch_alignment(alignment, enst, uniprot_id):
"""
Parameters
----------
alignment
enst : str
uniprot_id : str
Returns
-------
pw_alignment : dict
Alignment object
"""
ens_release = alignment.alignment_run.ensembl_release
ensp = ensembl_protein(enst, ens_release)
seq = ensembl_sequence(ensp, ens_release)
ensembl_seq = seq
uniprot_seq = seq
match_str = '|' * len(seq)
alignment_type = 'perfect_match'
if alignment.alignment_run.score1_type == 'identity':
cigarplus = alignment.pairwise.cigarplus
mdz = alignment.pairwise.mdz
if mdz.startswith('MD:Z:'):
mdz = mdz[len('MD:Z:'):]
uniprot_seq, match_str, ensembl_seq = pairwise_alignment(seq, cigarplus, mdz)
alignment_type = 'identity'
pw_alignment = {
'uniprot_alignment': ensembl_seq,
'ensembl_alignment': uniprot_seq,
'match_str': match_str,
'alignment_id': alignment.alignment_id,
'ensembl_release': ens_release,
'ensembl_id': ensp,
'uniprot_id': uniprot_id,
'alignment_type': alignment_type
}
return pw_alignment
def calculate_difference(cigar):
"""
Calculate the difference between 2 sequences based on the cigar string
Parameters
----------
cigar : str
Returns
-------
diff_count : int
"""
diff_count = 0
for c, op in cigar_split(cigar):
if op in ('I', 'D', 'X'):
diff_count += c
return diff_count
| 26.182482 | 85 | 0.647338 |
from sam_alignment_reconstructor.pairwise import pairwise_alignment
from sam_alignment_reconstructor.pairwise import cigar_split
from restui.lib.external import ensembl_sequence
from restui.lib.external import ensembl_protein
def fetch_pairwise(mapping):
pairwise_alignments = []
enst = mapping.transcript.enst_id
uniprot_id = mapping.uniprot.uniprot_acc
for alignment in mapping.alignments.all():
if alignment.alignment_run.score1_type == 'identity':
pairwise_alignments.append(
_fetch_alignment(alignment, enst, uniprot_id)
)
break
elif (
alignment.alignment_run.score1_type == 'perfect_match' and
alignment.score1 == 1
):
pairwise_alignments.append(
_fetch_alignment(alignment, enst, uniprot_id)
)
return {
'mapping_id': mapping.mapping_id,
'alignments': pairwise_alignments
}
def _fetch_alignment(alignment, enst, uniprot_id):
ens_release = alignment.alignment_run.ensembl_release
ensp = ensembl_protein(enst, ens_release)
seq = ensembl_sequence(ensp, ens_release)
ensembl_seq = seq
uniprot_seq = seq
match_str = '|' * len(seq)
alignment_type = 'perfect_match'
if alignment.alignment_run.score1_type == 'identity':
cigarplus = alignment.pairwise.cigarplus
mdz = alignment.pairwise.mdz
if mdz.startswith('MD:Z:'):
mdz = mdz[len('MD:Z:'):]
uniprot_seq, match_str, ensembl_seq = pairwise_alignment(seq, cigarplus, mdz)
alignment_type = 'identity'
pw_alignment = {
'uniprot_alignment': ensembl_seq,
'ensembl_alignment': uniprot_seq,
'match_str': match_str,
'alignment_id': alignment.alignment_id,
'ensembl_release': ens_release,
'ensembl_id': ensp,
'uniprot_id': uniprot_id,
'alignment_type': alignment_type
}
return pw_alignment
def calculate_difference(cigar):
diff_count = 0
for c, op in cigar_split(cigar):
if op in ('I', 'D', 'X'):
diff_count += c
return diff_count
| true | true |
f727c5f55aa71d5cb21714681206c4fac4c7f6bc | 762 | py | Python | state_management/python/sdk/order-processor/app.py | amulyavarote/quickstarts | c21a8f58d515b28eaa8a3680388fa06995c2331b | [
"Apache-2.0"
] | null | null | null | state_management/python/sdk/order-processor/app.py | amulyavarote/quickstarts | c21a8f58d515b28eaa8a3680388fa06995c2331b | [
"Apache-2.0"
] | null | null | null | state_management/python/sdk/order-processor/app.py | amulyavarote/quickstarts | c21a8f58d515b28eaa8a3680388fa06995c2331b | [
"Apache-2.0"
] | null | null | null | from time import sleep
import logging
from dapr.clients import DaprClient
logging.basicConfig(level=logging.INFO)
DAPR_STORE_NAME = "statestore"
for i in range(1, 10):
orderId = str(i)
order = {'orderId': orderId}
with DaprClient() as client:
# Save state into the state store
client.save_state(DAPR_STORE_NAME, orderId, str(order))
logging.info('Saving Order: %s', order)
# Get state from the state store
result = client.get_state(DAPR_STORE_NAME, orderId)
logging.info('Result after get: ' + str(result.data))
# Delete state from the state store
client.delete_state(store_name=DAPR_STORE_NAME, key=orderId)
logging.info('Deleting Order: %s', order)
sleep(1)
| 29.307692 | 68 | 0.670604 | from time import sleep
import logging
from dapr.clients import DaprClient
logging.basicConfig(level=logging.INFO)
DAPR_STORE_NAME = "statestore"
for i in range(1, 10):
orderId = str(i)
order = {'orderId': orderId}
with DaprClient() as client:
client.save_state(DAPR_STORE_NAME, orderId, str(order))
logging.info('Saving Order: %s', order)
result = client.get_state(DAPR_STORE_NAME, orderId)
logging.info('Result after get: ' + str(result.data))
client.delete_state(store_name=DAPR_STORE_NAME, key=orderId)
logging.info('Deleting Order: %s', order)
sleep(1)
| true | true |
f727c7c44cfc150a303ad06b4cd37d80fe4a6d31 | 3,598 | py | Python | thomasdevri_es/thomasdevri_es/settings.py | jdevries3133/chaotic_christmas_present | c7ef3f9a8fdef43211c59398b6bb9b45383cdc0d | [
"Apache-2.0"
] | null | null | null | thomasdevri_es/thomasdevri_es/settings.py | jdevries3133/chaotic_christmas_present | c7ef3f9a8fdef43211c59398b6bb9b45383cdc0d | [
"Apache-2.0"
] | null | null | null | thomasdevri_es/thomasdevri_es/settings.py | jdevries3133/chaotic_christmas_present | c7ef3f9a8fdef43211c59398b6bb9b45383cdc0d | [
"Apache-2.0"
] | null | null | null | """
Django settings for thomasdevri_es project.
Generated by 'django-admin startproject' using Django 3.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('DJANGO_SECRET')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = [
'*'
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'staff',
'sql_vulnerable',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'thomasdevri_es.urls'
LOGIN_URL = '/staff/login/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'thomasdevri_es.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static_root')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': 'django.log',
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'INFO',
'propagate': True,
},
},
}
| 24.47619 | 91 | 0.663424 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = os.getenv('DJANGO_SECRET')
DEBUG = False
ALLOWED_HOSTS = [
'*'
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'staff',
'sql_vulnerable',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'thomasdevri_es.urls'
LOGIN_URL = '/staff/login/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'thomasdevri_es.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static_root')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': 'django.log',
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'INFO',
'propagate': True,
},
},
}
| true | true |
f727c8673af3cda14875414f836d4092d8d30ac1 | 6,282 | py | Python | compose/container.py | alunduil/fig | fc63454c99674b3758721bcc5df0cea841ed6ba5 | [
"Apache-2.0"
] | null | null | null | compose/container.py | alunduil/fig | fc63454c99674b3758721bcc5df0cea841ed6ba5 | [
"Apache-2.0"
] | null | null | null | compose/container.py | alunduil/fig | fc63454c99674b3758721bcc5df0cea841ed6ba5 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import unicode_literals
from functools import reduce
import six
from .const import LABEL_CONTAINER_NUMBER
from .const import LABEL_SERVICE
class Container(object):
"""
Represents a Docker container, constructed from the output of
GET /containers/:id:/json.
"""
def __init__(self, client, dictionary, has_been_inspected=False):
self.client = client
self.dictionary = dictionary
self.has_been_inspected = has_been_inspected
@classmethod
def from_ps(cls, client, dictionary, **kwargs):
"""
Construct a container object from the output of GET /containers/json.
"""
name = get_container_name(dictionary)
if name is None:
return None
new_dictionary = {
'Id': dictionary['Id'],
'Image': dictionary['Image'],
'Name': '/' + name,
}
return cls(client, new_dictionary, **kwargs)
@classmethod
def from_id(cls, client, id):
return cls(client, client.inspect_container(id))
@classmethod
def create(cls, client, **options):
response = client.create_container(**options)
return cls.from_id(client, response['Id'])
@property
def id(self):
return self.dictionary['Id']
@property
def image(self):
return self.dictionary['Image']
@property
def image_config(self):
return self.client.inspect_image(self.image)
@property
def short_id(self):
return self.id[:10]
@property
def name(self):
return self.dictionary['Name'][1:]
@property
def name_without_project(self):
return '{0}_{1}'.format(self.labels.get(LABEL_SERVICE), self.number)
@property
def number(self):
number = self.labels.get(LABEL_CONTAINER_NUMBER)
if not number:
raise ValueError("Container {0} does not have a {1} label".format(
self.short_id, LABEL_CONTAINER_NUMBER))
return int(number)
@property
def ports(self):
self.inspect_if_not_inspected()
return self.get('NetworkSettings.Ports') or {}
@property
def human_readable_ports(self):
def format_port(private, public):
if not public:
return private
return '{HostIp}:{HostPort}->{private}'.format(
private=private, **public[0])
return ', '.join(format_port(*item)
for item in sorted(six.iteritems(self.ports)))
@property
def labels(self):
return self.get('Config.Labels') or {}
@property
def log_config(self):
return self.get('HostConfig.LogConfig') or None
@property
def human_readable_state(self):
if self.is_paused:
return 'Paused'
if self.is_running:
return 'Ghost' if self.get('State.Ghost') else 'Up'
else:
return 'Exit %s' % self.get('State.ExitCode')
@property
def human_readable_command(self):
entrypoint = self.get('Config.Entrypoint') or []
cmd = self.get('Config.Cmd') or []
return ' '.join(entrypoint + cmd)
@property
def environment(self):
return dict(var.split("=", 1) for var in self.get('Config.Env') or [])
@property
def is_running(self):
return self.get('State.Running')
@property
def is_paused(self):
return self.get('State.Paused')
def get(self, key):
"""Return a value from the container or None if the value is not set.
:param key: a string using dotted notation for nested dictionary
lookups
"""
self.inspect_if_not_inspected()
def get_value(dictionary, key):
return (dictionary or {}).get(key)
return reduce(get_value, key.split('.'), self.dictionary)
def get_local_port(self, port, protocol='tcp'):
port = self.ports.get("%s/%s" % (port, protocol))
return "{HostIp}:{HostPort}".format(**port[0]) if port else None
def start(self, **options):
return self.client.start(self.id, **options)
def stop(self, **options):
return self.client.stop(self.id, **options)
def pause(self, **options):
return self.client.pause(self.id, **options)
def unpause(self, **options):
return self.client.unpause(self.id, **options)
def kill(self, **options):
return self.client.kill(self.id, **options)
def restart(self, **options):
return self.client.restart(self.id, **options)
def remove(self, **options):
return self.client.remove_container(self.id, **options)
def inspect_if_not_inspected(self):
if not self.has_been_inspected:
self.inspect()
def wait(self):
return self.client.wait(self.id)
def logs(self, *args, **kwargs):
return self.client.logs(self.id, *args, **kwargs)
def inspect(self):
self.dictionary = self.client.inspect_container(self.id)
self.has_been_inspected = True
return self.dictionary
# TODO: only used by tests, move to test module
def links(self):
links = []
for container in self.client.containers():
for name in container['Names']:
bits = name.split('/')
if len(bits) > 2 and bits[1] == self.name:
links.append(bits[2])
return links
def attach(self, *args, **kwargs):
return self.client.attach(self.id, *args, **kwargs)
def attach_socket(self, **kwargs):
return self.client.attach_socket(self.id, **kwargs)
def __repr__(self):
return '<Container: %s (%s)>' % (self.name, self.id[:6])
def __eq__(self, other):
if type(self) != type(other):
return False
return self.id == other.id
def __hash__(self):
return self.id.__hash__()
def get_container_name(container):
if not container.get('Name') and not container.get('Names'):
return None
# inspect
if 'Name' in container:
return container['Name']
# ps
shortest_name = min(container['Names'], key=lambda n: len(n.split('/')))
return shortest_name.split('/')[-1]
| 28.554545 | 78 | 0.60554 | from __future__ import absolute_import
from __future__ import unicode_literals
from functools import reduce
import six
from .const import LABEL_CONTAINER_NUMBER
from .const import LABEL_SERVICE
class Container(object):
def __init__(self, client, dictionary, has_been_inspected=False):
self.client = client
self.dictionary = dictionary
self.has_been_inspected = has_been_inspected
@classmethod
def from_ps(cls, client, dictionary, **kwargs):
name = get_container_name(dictionary)
if name is None:
return None
new_dictionary = {
'Id': dictionary['Id'],
'Image': dictionary['Image'],
'Name': '/' + name,
}
return cls(client, new_dictionary, **kwargs)
@classmethod
def from_id(cls, client, id):
return cls(client, client.inspect_container(id))
@classmethod
def create(cls, client, **options):
response = client.create_container(**options)
return cls.from_id(client, response['Id'])
@property
def id(self):
return self.dictionary['Id']
@property
def image(self):
return self.dictionary['Image']
@property
def image_config(self):
return self.client.inspect_image(self.image)
@property
def short_id(self):
return self.id[:10]
@property
def name(self):
return self.dictionary['Name'][1:]
@property
def name_without_project(self):
return '{0}_{1}'.format(self.labels.get(LABEL_SERVICE), self.number)
@property
def number(self):
number = self.labels.get(LABEL_CONTAINER_NUMBER)
if not number:
raise ValueError("Container {0} does not have a {1} label".format(
self.short_id, LABEL_CONTAINER_NUMBER))
return int(number)
@property
def ports(self):
self.inspect_if_not_inspected()
return self.get('NetworkSettings.Ports') or {}
@property
def human_readable_ports(self):
def format_port(private, public):
if not public:
return private
return '{HostIp}:{HostPort}->{private}'.format(
private=private, **public[0])
return ', '.join(format_port(*item)
for item in sorted(six.iteritems(self.ports)))
@property
def labels(self):
return self.get('Config.Labels') or {}
@property
def log_config(self):
return self.get('HostConfig.LogConfig') or None
@property
def human_readable_state(self):
if self.is_paused:
return 'Paused'
if self.is_running:
return 'Ghost' if self.get('State.Ghost') else 'Up'
else:
return 'Exit %s' % self.get('State.ExitCode')
@property
def human_readable_command(self):
entrypoint = self.get('Config.Entrypoint') or []
cmd = self.get('Config.Cmd') or []
return ' '.join(entrypoint + cmd)
@property
def environment(self):
return dict(var.split("=", 1) for var in self.get('Config.Env') or [])
@property
def is_running(self):
return self.get('State.Running')
@property
def is_paused(self):
return self.get('State.Paused')
def get(self, key):
self.inspect_if_not_inspected()
def get_value(dictionary, key):
return (dictionary or {}).get(key)
return reduce(get_value, key.split('.'), self.dictionary)
def get_local_port(self, port, protocol='tcp'):
port = self.ports.get("%s/%s" % (port, protocol))
return "{HostIp}:{HostPort}".format(**port[0]) if port else None
def start(self, **options):
return self.client.start(self.id, **options)
def stop(self, **options):
return self.client.stop(self.id, **options)
def pause(self, **options):
return self.client.pause(self.id, **options)
def unpause(self, **options):
return self.client.unpause(self.id, **options)
def kill(self, **options):
return self.client.kill(self.id, **options)
def restart(self, **options):
return self.client.restart(self.id, **options)
def remove(self, **options):
return self.client.remove_container(self.id, **options)
def inspect_if_not_inspected(self):
if not self.has_been_inspected:
self.inspect()
def wait(self):
return self.client.wait(self.id)
def logs(self, *args, **kwargs):
return self.client.logs(self.id, *args, **kwargs)
def inspect(self):
self.dictionary = self.client.inspect_container(self.id)
self.has_been_inspected = True
return self.dictionary
def links(self):
links = []
for container in self.client.containers():
for name in container['Names']:
bits = name.split('/')
if len(bits) > 2 and bits[1] == self.name:
links.append(bits[2])
return links
def attach(self, *args, **kwargs):
return self.client.attach(self.id, *args, **kwargs)
def attach_socket(self, **kwargs):
return self.client.attach_socket(self.id, **kwargs)
def __repr__(self):
return '<Container: %s (%s)>' % (self.name, self.id[:6])
def __eq__(self, other):
if type(self) != type(other):
return False
return self.id == other.id
def __hash__(self):
return self.id.__hash__()
def get_container_name(container):
if not container.get('Name') and not container.get('Names'):
return None
if 'Name' in container:
return container['Name']
shortest_name = min(container['Names'], key=lambda n: len(n.split('/')))
return shortest_name.split('/')[-1]
| true | true |
f727c89ca6002e8cedcf4b9d1b2de5100c7c9fad | 1,149 | py | Python | lib/systems/beta-l-lyxopyranose.py | pulsar-chem/BPModule | f8e64e04fdb01947708f098e833600c459c2ff0e | [
"BSD-3-Clause"
] | null | null | null | lib/systems/beta-l-lyxopyranose.py | pulsar-chem/BPModule | f8e64e04fdb01947708f098e833600c459c2ff0e | [
"BSD-3-Clause"
] | null | null | null | lib/systems/beta-l-lyxopyranose.py | pulsar-chem/BPModule | f8e64e04fdb01947708f098e833600c459c2ff0e | [
"BSD-3-Clause"
] | null | null | null | import pulsar as psr
def load_ref_system():
""" Returns beta-l-lyxopyranose as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C 1.4299 0.2461 0.7896
O 0.4230 1.0739 1.3901
C -0.9416 0.6206 1.4010
C -1.3780 0.0314 0.0512
C -0.4016 -1.0958 -0.3576
O -0.7829 -1.6098 -1.6359
C 1.0038 -0.4852 -0.5172
O 0.9231 0.4645 -1.5809
O -2.6663 -0.5389 0.3219
O 2.4097 1.2044 0.4043
H -1.0615 -0.1113 2.2208
H -1.4941 1.5476 1.6502
H 1.8808 -0.4028 1.5639
H 1.7537 -1.2585 -0.8006
H -0.3995 -1.9182 0.3913
H -1.4467 0.7936 -0.7574
H 2.0470 2.1205 0.5463
H 1.7591 1.0051 -1.5910
H -1.3277 -2.4163 -1.5204
H -3.1571 -0.6576 -0.5199
""")
| 41.035714 | 75 | 0.409051 | import pulsar as psr
def load_ref_system():
return psr.make_system("""
C 1.4299 0.2461 0.7896
O 0.4230 1.0739 1.3901
C -0.9416 0.6206 1.4010
C -1.3780 0.0314 0.0512
C -0.4016 -1.0958 -0.3576
O -0.7829 -1.6098 -1.6359
C 1.0038 -0.4852 -0.5172
O 0.9231 0.4645 -1.5809
O -2.6663 -0.5389 0.3219
O 2.4097 1.2044 0.4043
H -1.0615 -0.1113 2.2208
H -1.4941 1.5476 1.6502
H 1.8808 -0.4028 1.5639
H 1.7537 -1.2585 -0.8006
H -0.3995 -1.9182 0.3913
H -1.4467 0.7936 -0.7574
H 2.0470 2.1205 0.5463
H 1.7591 1.0051 -1.5910
H -1.3277 -2.4163 -1.5204
H -3.1571 -0.6576 -0.5199
""")
| true | true |
f727cb3319b78a06a0480e8cba138dfc007d6cb9 | 515 | py | Python | FreeTAKServer/controllers/services/federation/ConnectHandler.py | logikal/FreeTakServer | c0916ce65781b5c60079d6440e52db8fc6ee0467 | [
"MIT"
] | null | null | null | FreeTAKServer/controllers/services/federation/ConnectHandler.py | logikal/FreeTakServer | c0916ce65781b5c60079d6440e52db8fc6ee0467 | [
"MIT"
] | null | null | null | FreeTAKServer/controllers/services/federation/ConnectHandler.py | logikal/FreeTakServer | c0916ce65781b5c60079d6440e52db8fc6ee0467 | [
"MIT"
] | null | null | null | #######################################################
#
# ConnectHandler.py
# Python implementation of the Class ConnectHandler
# Generated by Enterprise Architect
# Created on: 29-Dec-2020 8:10:45 AM
# Original author: natha
#
#######################################################
from Catalog.Data.Federation.Handler import Handler
class ConnectHandler(Handler):
# default constructor def __init__(self):
def Handle(object, command):
pass
def setNextHandler(handler):
pass | 27.105263 | 55 | 0.56699 | true | true | |
f727cb4f493f60788d46a5d0eb9211f2230cd556 | 3,947 | py | Python | detr_tf/data/tfcsv.py | falcon2212/detr-tensorflow | 119da1390a02b6013e7147d822e72c38fc3a2dd9 | [
"MIT"
] | null | null | null | detr_tf/data/tfcsv.py | falcon2212/detr-tensorflow | 119da1390a02b6013e7147d822e72c38fc3a2dd9 | [
"MIT"
] | null | null | null | detr_tf/data/tfcsv.py | falcon2212/detr-tensorflow | 119da1390a02b6013e7147d822e72c38fc3a2dd9 | [
"MIT"
] | null | null | null | import tensorflow as tf
from random import shuffle
import pandas as pd
import numpy as np
import imageio
import os
from detr_tf.data import processing
from detr_tf.data.transformation import detr_transform
from detr_tf import bbox
def morethan1(img, tbbox, tclass):
ret = False
print("morethan1 ", tbbox.shape)
try:
ret = tbbox.shape[0] > 0
except:
ret = False
return ret
def load_data_from_index(index, class_names, filenames, anns, config, augmentation, img_dir):
# Open the image
image = imageio.imread(config.datadir+img_dir+"/"+filenames[index])
# Select all the annotatiom (bbox and class) on this image
image_anns = anns[anns["filename"] == filenames[index]]
# Convert all string class to number (the target class)
t_class = image_anns["class"].map(
lambda x: class_names.index(x)).to_numpy()
# Select the width&height of each image (should be the same since all the ann belongs to the same image)
width = image_anns["width"].to_numpy()
height = image_anns["height"].to_numpy()
# Select the xmin, ymin, xmax and ymax of each bbox, Then, normalized the bbox to be between and 0 and 1
# Finally, convert the bbox from xmin,ymin,xmax,ymax to x_center,y_center,width,height
bbox_list = image_anns[["xmin", "ymin", "xmax", "ymax"]].to_numpy()
bbox_list = bbox_list / [width[0], height[0], width[0], height[0]]
t_bbox = bbox.xy_min_xy_max_to_xcycwh(bbox_list)
# Transform and augment image with bbox and class if needed
image, t_bbox, t_class = detr_transform(
image, t_bbox, t_class, config, augmentation=augmentation)
# Normalized image
image = processing.normalized_images(image, config)
return image.astype(np.float32), t_bbox.astype(np.float32), np.expand_dims(t_class, axis=-1).astype(np.int64)
def load_tfcsv_dataset(config, batch_size, augmentation=False, exclude=[], ann_dir=None, ann_file=None, img_dir=None):
""" Load the hardhat dataset
"""
ann_dir = config.data.ann_dir if ann_dir is None else ann_dir
ann_file = config.data.ann_file if ann_file is None else ann_file
img_dir = config.data.img_dir if img_dir is None else img_dir
anns = pd.read_csv(config.datadir+ann_file)
for name in exclude:
anns = anns[anns["class"] != name]
unique_class = anns["class"].unique()
unique_class.sort()
# Set the background class to 0
config.background_class = 0
class_names = ["background"] + unique_class.tolist()
filenames = anns["filename"].unique().tolist()
indexes = list(range(0, len(filenames)))
shuffle(indexes)
dataset = tf.data.Dataset.from_tensor_slices(indexes)
dataset = dataset.map(lambda idx: processing.numpy_fc(
idx, load_data_from_index,
class_names=class_names, filenames=filenames, anns=anns, config=config, augmentation=augmentation, img_dir=img_dir), num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Filter labels to be sure to keep only sample with at least one bbox
dataset = dataset.filter(
lambda imgs, tbbox, tclass: tf.shape(tbbox)[0] > 0)
# Pad bbox and labels
dataset = dataset.map(processing.pad_labels,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Batch images
dataset = dataset.batch(batch_size, drop_remainder=True)
return dataset, class_names
# print(config.data_dir)
# train_iterator, class_names = load_tfcsv_dataset(
# config=config, batch_size=config.batch_size, augmentation=True, img_dir="train", ann_file="train/_annotations.csv")
# test_iterator, class_names = load_tfcsv_dataset(
# config=config, batch_size=config.batch_size, augmentation=True, img_dir="test", ann_file="test/_annotations.csv")
# print(test_iterator.cardinality())
# print(train_iterator.cardinality())
# # tmp = list(train_iterator)
# # for i, _ in enumerate(train_iterator):
# # print(i)
# # print(int(None))
| 39.079208 | 174 | 0.7132 | import tensorflow as tf
from random import shuffle
import pandas as pd
import numpy as np
import imageio
import os
from detr_tf.data import processing
from detr_tf.data.transformation import detr_transform
from detr_tf import bbox
def morethan1(img, tbbox, tclass):
ret = False
print("morethan1 ", tbbox.shape)
try:
ret = tbbox.shape[0] > 0
except:
ret = False
return ret
def load_data_from_index(index, class_names, filenames, anns, config, augmentation, img_dir):
image = imageio.imread(config.datadir+img_dir+"/"+filenames[index])
image_anns = anns[anns["filename"] == filenames[index]]
t_class = image_anns["class"].map(
lambda x: class_names.index(x)).to_numpy()
width = image_anns["width"].to_numpy()
height = image_anns["height"].to_numpy()
bbox_list = image_anns[["xmin", "ymin", "xmax", "ymax"]].to_numpy()
bbox_list = bbox_list / [width[0], height[0], width[0], height[0]]
t_bbox = bbox.xy_min_xy_max_to_xcycwh(bbox_list)
image, t_bbox, t_class = detr_transform(
image, t_bbox, t_class, config, augmentation=augmentation)
image = processing.normalized_images(image, config)
return image.astype(np.float32), t_bbox.astype(np.float32), np.expand_dims(t_class, axis=-1).astype(np.int64)
def load_tfcsv_dataset(config, batch_size, augmentation=False, exclude=[], ann_dir=None, ann_file=None, img_dir=None):
ann_dir = config.data.ann_dir if ann_dir is None else ann_dir
ann_file = config.data.ann_file if ann_file is None else ann_file
img_dir = config.data.img_dir if img_dir is None else img_dir
anns = pd.read_csv(config.datadir+ann_file)
for name in exclude:
anns = anns[anns["class"] != name]
unique_class = anns["class"].unique()
unique_class.sort()
config.background_class = 0
class_names = ["background"] + unique_class.tolist()
filenames = anns["filename"].unique().tolist()
indexes = list(range(0, len(filenames)))
shuffle(indexes)
dataset = tf.data.Dataset.from_tensor_slices(indexes)
dataset = dataset.map(lambda idx: processing.numpy_fc(
idx, load_data_from_index,
class_names=class_names, filenames=filenames, anns=anns, config=config, augmentation=augmentation, img_dir=img_dir), num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.filter(
lambda imgs, tbbox, tclass: tf.shape(tbbox)[0] > 0)
dataset = dataset.map(processing.pad_labels,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(batch_size, drop_remainder=True)
return dataset, class_names
| true | true |
f727cbca391bd911c82cd15a769a1235f13eebfd | 13,500 | py | Python | Simulation_Python/scenarios.py | tomAntoine/multi-UAV-simulator | 2fbd8b802ea1a5f388722714bac5563d0718b28f | [
"MIT"
] | 22 | 2021-04-07T21:10:53.000Z | 2022-03-26T08:21:06.000Z | Simulation_Python/scenarios.py | alexMarFar/multi-UAV-simulator-1 | 2fbd8b802ea1a5f388722714bac5563d0718b28f | [
"MIT"
] | 2 | 2021-04-12T06:23:50.000Z | 2021-05-20T04:33:35.000Z | Simulation_Python/scenarios.py | alexMarFar/multi-UAV-simulator-1 | 2fbd8b802ea1a5f388722714bac5563d0718b28f | [
"MIT"
] | 4 | 2021-05-21T06:11:34.000Z | 2022-03-09T18:41:10.000Z | # -*- coding: utf-8 -*-
"""
author: John Bass
email: john.bobzwik@gmail.com
license: MIT
Please feel free to use and modify this, but keep the above information. Thanks!
adaptation
author: Tom Antoine and Alex Martinez
"""
import numpy as np
import matplotlib.pyplot as plt
import time
import cProfile
from trajectory import Trajectory
from ctrl import Control
from quadFiles.quad import Quadcopter
from utils.windModel import Wind
import utils
import config
import mpl_toolkits.mplot3d.axes3d as p3
from matplotlib.legend import Legend
import random
"""
The variable “quad_id” is provided as an integer from 0 to the number of drones
in the simulation.
The “mode” is provided as a string and it can be split into three categories,
depending whether or not they are associated with the agent, the target or both.
The latter are simple actions such as “takeoff”, “home”, “land”, “fall” or
“charging”. Then, there are specific modes for agents like “guided” or “track”;
and targets like “enemy”. The change of mode can be pre-defined or provided
by the Mission planning and Task control subsystem. In the case of targets, the
transition is automated internally. They will be initialized in “enemy” mode and
changed into “neutralized” if the conditions are met to finally change into “fall”.
In the case of agents, the change of modes is performed externally after system
integration. However, due to the very intuitive transitions, some of them were
predefined in sequences for the subsystem validation and verification. For this
reason, “takeoff” and “land” mode were integrated at the beginning and end of
each mission. Similarly, after an agent in “track” mode neutralized its target, or
a “guided” one has reached its goal position, the mode was switched to “home”.
The “id_targ” is a specific integer input associated to the mode “track”. It
corresponds to the target identification number and is assigned as -1 by default
if any other mode is employed.
The “pos_goal” is a set of coordinates x, y and z in the global reference frame
that represent the goal position. It should be noted that although x and y are
not bounded, the z coordinate is restricted so that the drones cannot go through
the ground and by consistency with the guidance algorithms, it is defined as
negative. It should be noted that although this is an input from Mission planning
and Task control subsystem it will be updated for specific modes such as
“track”.
The “pos_obs” is a list of sets of coordinates x, y and z in the global reference
frame corresponding to the static obstacles and therefore should be kept
constant for all the drones in the simulation environment. This information is
predefined but will need to be provided by the Situation Awareness subsystem.
The “pos_ini” is a set of coordinates x, y and z in the global reference frame
that represent the initial position. It should be noted that as for the rest of
coordinates, the z coordinate is defined as negative.
The “color” is employed for the easy identification of the drones. It allows to
easily verify the correct functioning of the algorithms.
The “ctrlType” xyz_pos by default.
The “trajSelect” minimum velocity, no yaw control, average speedby default.
The “Ti” input is given as a number and indicates the initial time for the
simulation. It is common for all drones and by default set at 0s.
For most modes, the “Tf” input is given as a number and corresponds to the
final time of the simulation “Tf”. It is therefore employed for creating the
trajectories to reach goal position. However, in modes that require regular
updates as “track” or “guided”, it is substituted by the update time. In these
cases, it should be slightly modified within drones. It is usually around 0.5s.
The numerical time step “numTimeStep” is employed for the trajectories.
"""
def full_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
pos_obs = np.array([[1, 5, -2], [8, 2, -8], [5, 8, -9], [0, 0, -2], [3, 3, -1],[3, 9, -17],[5, 7, -18],[0, 0, -10],[5, 10, -16],[10,10,-12],[13,13,-13]])
quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,0], pos_goal= [15,15,-15], pos_obs = pos_obs)
quad1 = Quadcopter(Ti, Ts*90, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='guided', id_targ = -1, color = 'green', pos_ini = [0,3,0], pos_goal = [15,10,-15], pos_obs = pos_obs)
quad2 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 2, mode='track', id_targ = 0, color = 'pink', pos_ini = [3,0,0], pos_goal = [15,20,-15], pos_obs = pos_obs)
quads = [quad0, quad1, quad2]
return pos_obs,quads
def multi_waypoint_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
pos_obs = np.array([[50,0,0]])
quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,0], pos_goal= [0,-17,-10], pos_obs = pos_obs)
quad1 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [20,0,0], pos_goal = [-20,-15,-10], pos_obs = pos_obs)
quad2 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 2, mode='ennemy', id_targ = -1, color = 'red', pos_ini = [-20,-10,0], pos_goal = [-10,0,-20], pos_obs = pos_obs)
quads = [quad0, quad1, quad2]
return pos_obs,quads
def static_OA_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
pos_obs = []
for i in range(30):
pos_obs.append(random.sample(range(-10, 0), 3))
pos_obs = np.array(pos_obs)
print(pos_obs)
quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,0], pos_goal= [-10,-10,-10], pos_obs = pos_obs)
quads = [quad0]
return pos_obs,quads
def dynamic_CA_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
#Tf =8s
pos_obs = np.array([[50,0,0]])
quad0 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='guided', id_targ = -1, color = 'blue', pos_ini = [0,10,-5],pos_goal = [30,10,-5], pos_obs = pos_obs)
quad1 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [3,0,-5], pos_goal = [3,20,-5], pos_obs = pos_obs)
quad2 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 2, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [8,0,-5], pos_goal = [8,20,-5], pos_obs = pos_obs)
quad3 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 3, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [15,0,-5], pos_goal = [15,20,-5], pos_obs = pos_obs)
quads = [quad0, quad1,quad2,quad3]
return pos_obs,quads
def dynamic_CA_scenario_random_pos(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
#Tf =8s
pos_obs = np.array([[50,0,0]])
quad0 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='guided', id_targ = -1, color = 'blue', pos_ini = [0,10,-5],pos_goal = [30,10,-5], pos_obs = pos_obs)
x, z = random.randint(3,17),-1*random.randint(1,8)
quad1 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [x,0,z], pos_goal = [x,20,z], pos_obs = pos_obs)
x, z = random.randint(3,17),-1*random.randint(1,8)
quad2 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 2, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [x,0,z], pos_goal = [x,20,z], pos_obs = pos_obs)
x, z = random.randint(3,17),-1*random.randint(1,8)
quad3 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 3, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [x,0,z], pos_goal = [x,20,z], pos_obs = pos_obs)
quads = [quad0, quad1,quad2,quad3]
return pos_obs,quads
def simple_tracking_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
pos_obs = np.array([[-10,-10,0]])
quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,0], pos_goal = [15,15,-15], pos_obs = pos_obs)
quad1 = Quadcopter(Ti, Ts*90, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='track', id_targ = 0, color = 'green', pos_ini = [5,5,0], pos_goal = [2,2,-10], pos_obs = pos_obs)
quads = [quad0, quad1]
return pos_obs,quads
def multi_tracking_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
pos_obs = np.array([[-10,-10,0]])
quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,0], pos_goal = [15,15,-15], pos_obs = pos_obs)
quad1 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='track', id_targ = 0, color = 'green', pos_ini = [4,0,0], pos_goal = [4,4,-10], pos_obs = pos_obs)
quad2 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 2, mode='track', id_targ = 0, color = 'green', pos_ini = [4,4,0], pos_goal = [4,4,-10], pos_obs = pos_obs)
quad3 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 3, mode='track', id_targ = 0, color = 'green', pos_ini = [4,-4,0], pos_goal = [4,4,-10], pos_obs = pos_obs)
quads = [quad0, quad1, quad2, quad3]
return pos_obs,quads
def tracking_loop_scenario(x,Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
pos_obs = np.array([[x/2,x/2,-10]])
quad0 = Quadcopter(Ti, Ts*99, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='track', id_targ = 1, color = 'blue', pos_ini = [0,0,-10], pos_goal = [0,x,-10], pos_obs = pos_obs)
quad1 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='track', id_targ = 2, color = 'green', pos_ini = [x,0,-10], pos_goal = [0,0,-10], pos_obs = pos_obs)
quad2 = Quadcopter(Ti, Ts*101, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 2, mode='track', id_targ = 3, color = 'orange', pos_ini = [x,x,-10],pos_goal = [x,0,-10], pos_obs = pos_obs)
quad3 = Quadcopter(Ti, Ts*102, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 3, mode='track', id_targ = 0, color = 'pink', pos_ini = [0,x,-10], pos_goal = [x,x,-10],pos_obs = pos_obs)
quads = [quad0, quad1,quad2,quad3]
return pos_obs,quads
def tracking_and_kill_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
pos_obs = np.array([[-10,-10,0]])
quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,-5], pos_goal = [20,15,-20], pos_obs = pos_obs)
quad1 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='track', id_targ = 0, color = 'green', pos_ini = [5,0,0], pos_goal = [4,4,-10], pos_obs = pos_obs)
quads = [quad0, quad1]
return pos_obs,quads
def simple_guided_for_PF(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
pos_obs = []
for i in range(20):
pos_obs.append(random.sample(range(-10, 0), 3))
pos_obs = np.array(pos_obs)
quad0 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='guided', id_targ = -1, color = 'blue', pos_ini = [0,0,-5], pos_goal = [-10,-10,-10], pos_obs = pos_obs)
quads = [quad0]
return pos_obs,quads
def ROS_simu(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
fire_station=[]
fire_truck=[]
tree_1=[]
tree_2=[]
pos_obs=[]
for i in range(20):
x = random.sample(range(-10, 10), 1)[0]
y = random.sample(range(-55, -45), 1)[0]
z = random.sample(range(-12, 0), 1)[0]
fire_station.append([x,y,z])
for i in range(5):
x = random.sample(range(-19, 21), 1)[0]
y = random.sample(range(-55, -45), 1)[0]
z = random.sample(range(-3, 0), 1)[0]
fire_truck.append([x,y,z])
for i in range(5):
x = random.sample(range(-12, -8), 1)[0]
y = random.sample(range(-42,-38), 1)[0]
z = random.sample(range(-5, 0), 1)[0]
tree_1.append([x,y,z])
for i in range(5):
x = random.sample(range(8, 12), 1)[0]
y = random.sample(range(-42,-38), 1)[0]
z = random.sample(range(-5, 0), 1)[0]
tree_2.append([x,y,z])
pos_obs = fire_station + fire_truck + tree_1 + tree_2
pos_obs = np.array(pos_obs)
quad0 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='guided', id_targ = -1, color = 'blue', pos_ini = [0,0,0], pos_goal = [0,-100,-10], pos_obs = pos_obs)
quads = [quad0]
return(pos_obs,quads)
def real_map(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
xs = [-1,0,1]
ys = [-1,0,1]
zs = [0,-1,-2,-3,-4,-5,-6,-7,-8,-9,-10]
tower = [[x,y,z] for x in xs for y in ys for z in zs]
xs = [-20,5,10]
ys = [5,-10,10]
zs = [0,-1,-2,-3]
trees = [[x,y,z] for x in xs for y in ys for z in zs]
xs = [-20,5,10]
ys = [5,-10,10]
zs = [-4,-5]
tops = []
for i in range(3):
x, y = xs[i], ys[i]
for z in zs:
tops = tops + [[x-1,y,z],[x+1,y,z],[x,y,z],[x,y-1,z],[x,y+1,z]]
print(tops)
pos_obs = np.array(tower + trees + tops)
quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,-5], pos_goal = [-10,-10,-10], pos_obs = pos_obs)
quads = [quad0]
return pos_obs,quads
| 56.25 | 199 | 0.669333 |
import numpy as np
import matplotlib.pyplot as plt
import time
import cProfile
from trajectory import Trajectory
from ctrl import Control
from quadFiles.quad import Quadcopter
from utils.windModel import Wind
import utils
import config
import mpl_toolkits.mplot3d.axes3d as p3
from matplotlib.legend import Legend
import random
def full_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
pos_obs = np.array([[1, 5, -2], [8, 2, -8], [5, 8, -9], [0, 0, -2], [3, 3, -1],[3, 9, -17],[5, 7, -18],[0, 0, -10],[5, 10, -16],[10,10,-12],[13,13,-13]])
quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,0], pos_goal= [15,15,-15], pos_obs = pos_obs)
quad1 = Quadcopter(Ti, Ts*90, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='guided', id_targ = -1, color = 'green', pos_ini = [0,3,0], pos_goal = [15,10,-15], pos_obs = pos_obs)
quad2 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 2, mode='track', id_targ = 0, color = 'pink', pos_ini = [3,0,0], pos_goal = [15,20,-15], pos_obs = pos_obs)
quads = [quad0, quad1, quad2]
return pos_obs,quads
def multi_waypoint_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
pos_obs = np.array([[50,0,0]])
quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,0], pos_goal= [0,-17,-10], pos_obs = pos_obs)
quad1 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [20,0,0], pos_goal = [-20,-15,-10], pos_obs = pos_obs)
quad2 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 2, mode='ennemy', id_targ = -1, color = 'red', pos_ini = [-20,-10,0], pos_goal = [-10,0,-20], pos_obs = pos_obs)
quads = [quad0, quad1, quad2]
return pos_obs,quads
def static_OA_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
pos_obs = []
for i in range(30):
pos_obs.append(random.sample(range(-10, 0), 3))
pos_obs = np.array(pos_obs)
print(pos_obs)
quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,0], pos_goal= [-10,-10,-10], pos_obs = pos_obs)
quads = [quad0]
return pos_obs,quads
def dynamic_CA_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
pos_obs = np.array([[50,0,0]])
quad0 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='guided', id_targ = -1, color = 'blue', pos_ini = [0,10,-5],pos_goal = [30,10,-5], pos_obs = pos_obs)
quad1 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [3,0,-5], pos_goal = [3,20,-5], pos_obs = pos_obs)
quad2 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 2, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [8,0,-5], pos_goal = [8,20,-5], pos_obs = pos_obs)
quad3 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 3, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [15,0,-5], pos_goal = [15,20,-5], pos_obs = pos_obs)
quads = [quad0, quad1,quad2,quad3]
return pos_obs,quads
def dynamic_CA_scenario_random_pos(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
pos_obs = np.array([[50,0,0]])
quad0 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='guided', id_targ = -1, color = 'blue', pos_ini = [0,10,-5],pos_goal = [30,10,-5], pos_obs = pos_obs)
x, z = random.randint(3,17),-1*random.randint(1,8)
quad1 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [x,0,z], pos_goal = [x,20,z], pos_obs = pos_obs)
x, z = random.randint(3,17),-1*random.randint(1,8)
quad2 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 2, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [x,0,z], pos_goal = [x,20,z], pos_obs = pos_obs)
x, z = random.randint(3,17),-1*random.randint(1,8)
quad3 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 3, mode='ennemy', id_targ = -1, color = 'green', pos_ini = [x,0,z], pos_goal = [x,20,z], pos_obs = pos_obs)
quads = [quad0, quad1,quad2,quad3]
return pos_obs,quads
def simple_tracking_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
pos_obs = np.array([[-10,-10,0]])
quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,0], pos_goal = [15,15,-15], pos_obs = pos_obs)
quad1 = Quadcopter(Ti, Ts*90, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='track', id_targ = 0, color = 'green', pos_ini = [5,5,0], pos_goal = [2,2,-10], pos_obs = pos_obs)
quads = [quad0, quad1]
return pos_obs,quads
def multi_tracking_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
pos_obs = np.array([[-10,-10,0]])
quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,0], pos_goal = [15,15,-15], pos_obs = pos_obs)
quad1 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='track', id_targ = 0, color = 'green', pos_ini = [4,0,0], pos_goal = [4,4,-10], pos_obs = pos_obs)
quad2 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 2, mode='track', id_targ = 0, color = 'green', pos_ini = [4,4,0], pos_goal = [4,4,-10], pos_obs = pos_obs)
quad3 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 3, mode='track', id_targ = 0, color = 'green', pos_ini = [4,-4,0], pos_goal = [4,4,-10], pos_obs = pos_obs)
quads = [quad0, quad1, quad2, quad3]
return pos_obs,quads
def tracking_loop_scenario(x,Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
pos_obs = np.array([[x/2,x/2,-10]])
quad0 = Quadcopter(Ti, Ts*99, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='track', id_targ = 1, color = 'blue', pos_ini = [0,0,-10], pos_goal = [0,x,-10], pos_obs = pos_obs)
quad1 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='track', id_targ = 2, color = 'green', pos_ini = [x,0,-10], pos_goal = [0,0,-10], pos_obs = pos_obs)
quad2 = Quadcopter(Ti, Ts*101, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 2, mode='track', id_targ = 3, color = 'orange', pos_ini = [x,x,-10],pos_goal = [x,0,-10], pos_obs = pos_obs)
quad3 = Quadcopter(Ti, Ts*102, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 3, mode='track', id_targ = 0, color = 'pink', pos_ini = [0,x,-10], pos_goal = [x,x,-10],pos_obs = pos_obs)
quads = [quad0, quad1,quad2,quad3]
return pos_obs,quads
def tracking_and_kill_scenario(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
pos_obs = np.array([[-10,-10,0]])
quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,-5], pos_goal = [20,15,-20], pos_obs = pos_obs)
quad1 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 1, mode='track', id_targ = 0, color = 'green', pos_ini = [5,0,0], pos_goal = [4,4,-10], pos_obs = pos_obs)
quads = [quad0, quad1]
return pos_obs,quads
def simple_guided_for_PF(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
pos_obs = []
for i in range(20):
pos_obs.append(random.sample(range(-10, 0), 3))
pos_obs = np.array(pos_obs)
quad0 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='guided', id_targ = -1, color = 'blue', pos_ini = [0,0,-5], pos_goal = [-10,-10,-10], pos_obs = pos_obs)
quads = [quad0]
return pos_obs,quads
def ROS_simu(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
fire_station=[]
fire_truck=[]
tree_1=[]
tree_2=[]
pos_obs=[]
for i in range(20):
x = random.sample(range(-10, 10), 1)[0]
y = random.sample(range(-55, -45), 1)[0]
z = random.sample(range(-12, 0), 1)[0]
fire_station.append([x,y,z])
for i in range(5):
x = random.sample(range(-19, 21), 1)[0]
y = random.sample(range(-55, -45), 1)[0]
z = random.sample(range(-3, 0), 1)[0]
fire_truck.append([x,y,z])
for i in range(5):
x = random.sample(range(-12, -8), 1)[0]
y = random.sample(range(-42,-38), 1)[0]
z = random.sample(range(-5, 0), 1)[0]
tree_1.append([x,y,z])
for i in range(5):
x = random.sample(range(8, 12), 1)[0]
y = random.sample(range(-42,-38), 1)[0]
z = random.sample(range(-5, 0), 1)[0]
tree_2.append([x,y,z])
pos_obs = fire_station + fire_truck + tree_1 + tree_2
pos_obs = np.array(pos_obs)
quad0 = Quadcopter(Ti, Ts*100, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='guided', id_targ = -1, color = 'blue', pos_ini = [0,0,0], pos_goal = [0,-100,-10], pos_obs = pos_obs)
quads = [quad0]
return(pos_obs,quads)
def real_map(Ti,Ts,Tf,ctrlType,trajSelect,numTimeStep):
xs = [-1,0,1]
ys = [-1,0,1]
zs = [0,-1,-2,-3,-4,-5,-6,-7,-8,-9,-10]
tower = [[x,y,z] for x in xs for y in ys for z in zs]
xs = [-20,5,10]
ys = [5,-10,10]
zs = [0,-1,-2,-3]
trees = [[x,y,z] for x in xs for y in ys for z in zs]
xs = [-20,5,10]
ys = [5,-10,10]
zs = [-4,-5]
tops = []
for i in range(3):
x, y = xs[i], ys[i]
for z in zs:
tops = tops + [[x-1,y,z],[x+1,y,z],[x,y,z],[x,y-1,z],[x,y+1,z]]
print(tops)
pos_obs = np.array(tower + trees + tops)
quad0 = Quadcopter(Ti, Tf, ctrlType, trajSelect, numTimeStep, Ts, quad_id = 0, mode='ennemy', id_targ = -1, color = 'blue', pos_ini = [0,0,-5], pos_goal = [-10,-10,-10], pos_obs = pos_obs)
quads = [quad0]
return pos_obs,quads
| true | true |
f727cc1948a85ac6d72771c8c995e728612019c7 | 4,358 | py | Python | src/tequila/quantumchemistry/__init__.py | naomicurnow/tequila | 739a76222005558d348a428cf2ce7cb5dfe290de | [
"MIT"
] | 1 | 2021-01-11T18:40:47.000Z | 2021-01-11T18:40:47.000Z | src/tequila/quantumchemistry/__init__.py | kiminh/tequila | 464085265e125222c63e65446861e9c0a2428bab | [
"MIT"
] | null | null | null | src/tequila/quantumchemistry/__init__.py | kiminh/tequila | 464085265e125222c63e65446861e9c0a2428bab | [
"MIT"
] | null | null | null | import typing
from .qc_base import ParametersQC, QuantumChemistryBase
SUPPORTED_QCHEMISTRY_BACKENDS = ["base", "psi4"]
INSTALLED_QCHEMISTRY_BACKENDS = {"base": QuantumChemistryBase}
try:
from .psi4_interface import QuantumChemistryPsi4
INSTALLED_QCHEMISTRY_BACKENDS["psi4"] = QuantumChemistryPsi4
except ImportError:
pass
def show_available_modules():
print("Available QuantumChemistry Modules:")
for k in INSTALLED_QCHEMISTRY_BACKENDS.keys():
print(k)
def show_supported_modules():
print(SUPPORTED_QCHEMISTRY_BACKENDS)
def Molecule(geometry: str,
basis_set: str = None,
transformation: typing.Union[str, typing.Callable] = None,
backend: str = None,
guess_wfn=None,
*args,
**kwargs) -> QuantumChemistryBase:
"""
Parameters
----------
geometry
molecular geometry as string or as filename (needs to be in xyz format with .xyz ending)
basis_set
quantum chemistry basis set (sto-3g, cc-pvdz, etc)
transformation
The Fermion to Qubit Transformation (jordan-wigner, bravyi-kitaev, bravyi-kitaev-tree and whatever OpenFermion supports)
backend
quantum chemistry backend (psi4, pyscf)
guess_wfn
pass down a psi4 guess wavefunction to start the scf cycle from
can also be a filename leading to a stored wavefunction
args
kwargs
Returns
-------
The Fermion to Qubit Transformation (jordan-wigner, bravyi-kitaev, bravyi-kitaev-tree and whatever OpenFermion supports)
"""
keyvals = {}
for k, v in kwargs.items():
if k in ParametersQC.__dict__.keys():
keyvals[k] = v
parameters = ParametersQC(geometry=geometry, basis_set=basis_set, multiplicity=1, **keyvals)
if backend is None:
if "psi4" in INSTALLED_QCHEMISTRY_BACKENDS:
backend = "psi4"
elif "pyscf" in INSTALLED_QCHEMISTRY_BACKENDS:
backend = "pyscf"
else:
requirements = [key in kwargs for key in ["one_body_integrals", "two_body_integrals", "nuclear_repulsion", "n_orbitals"]]
if not all(requirements):
raise Exception("No quantum chemistry backends installed on your system\n"
"To use the base functionality you need to pass the following tensors via keyword\n"
"one_body_integrals, two_body_integrals, nuclear_repulsion, n_orbitals\n")
else:
backend = "base"
if backend not in SUPPORTED_QCHEMISTRY_BACKENDS:
raise Exception(str(backend) + " is not (yet) supported by tequila")
if backend not in INSTALLED_QCHEMISTRY_BACKENDS:
raise Exception(str(backend) + " was not found on your system")
if guess_wfn is not None and backend != 'psi4':
raise Exception("guess_wfn only works for psi4")
if basis_set is None and backend != "base":
raise Exception("no basis_set provided for backend={}".format(backend))
elif basis_set is None:
basis_set = "custom"
parameters.basis_set=basis_set
return INSTALLED_QCHEMISTRY_BACKENDS[backend.lower()](parameters=parameters, transformation=transformation, guess_wfn=guess_wfn, *args, **kwargs)
def MoleculeFromOpenFermion(molecule,
transformation: typing.Union[str, typing.Callable] = None,
backend: str = None,
*args,
**kwargs) -> QuantumChemistryBase:
"""
Initialize a tequila Molecule directly from an openfermion molecule object
Parameters
----------
molecule
The openfermion molecule
transformation
The Fermion to Qubit Transformation (jordan-wigner, bravyi-kitaev, bravyi-kitaev-tree and whatever OpenFermion supports)
backend
The quantum chemistry backend, can be None in this case
Returns
-------
The tequila molecule
"""
if backend is None:
return QuantumChemistryBase.from_openfermion(molecule=molecule, transformation=transformation, *args, **kwargs)
else:
INSTALLED_QCHEMISTRY_BACKENDS[backend].from_openfermion(molecule=molecule, transformation=transformation, *args,
**kwargs)
| 37.568966 | 149 | 0.652593 | import typing
from .qc_base import ParametersQC, QuantumChemistryBase
SUPPORTED_QCHEMISTRY_BACKENDS = ["base", "psi4"]
INSTALLED_QCHEMISTRY_BACKENDS = {"base": QuantumChemistryBase}
try:
from .psi4_interface import QuantumChemistryPsi4
INSTALLED_QCHEMISTRY_BACKENDS["psi4"] = QuantumChemistryPsi4
except ImportError:
pass
def show_available_modules():
print("Available QuantumChemistry Modules:")
for k in INSTALLED_QCHEMISTRY_BACKENDS.keys():
print(k)
def show_supported_modules():
print(SUPPORTED_QCHEMISTRY_BACKENDS)
def Molecule(geometry: str,
basis_set: str = None,
transformation: typing.Union[str, typing.Callable] = None,
backend: str = None,
guess_wfn=None,
*args,
**kwargs) -> QuantumChemistryBase:
keyvals = {}
for k, v in kwargs.items():
if k in ParametersQC.__dict__.keys():
keyvals[k] = v
parameters = ParametersQC(geometry=geometry, basis_set=basis_set, multiplicity=1, **keyvals)
if backend is None:
if "psi4" in INSTALLED_QCHEMISTRY_BACKENDS:
backend = "psi4"
elif "pyscf" in INSTALLED_QCHEMISTRY_BACKENDS:
backend = "pyscf"
else:
requirements = [key in kwargs for key in ["one_body_integrals", "two_body_integrals", "nuclear_repulsion", "n_orbitals"]]
if not all(requirements):
raise Exception("No quantum chemistry backends installed on your system\n"
"To use the base functionality you need to pass the following tensors via keyword\n"
"one_body_integrals, two_body_integrals, nuclear_repulsion, n_orbitals\n")
else:
backend = "base"
if backend not in SUPPORTED_QCHEMISTRY_BACKENDS:
raise Exception(str(backend) + " is not (yet) supported by tequila")
if backend not in INSTALLED_QCHEMISTRY_BACKENDS:
raise Exception(str(backend) + " was not found on your system")
if guess_wfn is not None and backend != 'psi4':
raise Exception("guess_wfn only works for psi4")
if basis_set is None and backend != "base":
raise Exception("no basis_set provided for backend={}".format(backend))
elif basis_set is None:
basis_set = "custom"
parameters.basis_set=basis_set
return INSTALLED_QCHEMISTRY_BACKENDS[backend.lower()](parameters=parameters, transformation=transformation, guess_wfn=guess_wfn, *args, **kwargs)
def MoleculeFromOpenFermion(molecule,
transformation: typing.Union[str, typing.Callable] = None,
backend: str = None,
*args,
**kwargs) -> QuantumChemistryBase:
if backend is None:
return QuantumChemistryBase.from_openfermion(molecule=molecule, transformation=transformation, *args, **kwargs)
else:
INSTALLED_QCHEMISTRY_BACKENDS[backend].from_openfermion(molecule=molecule, transformation=transformation, *args,
**kwargs)
| true | true |
f727cd0adc9bd8230963be0b0db67ba35b37d18b | 2,254 | py | Python | tests/apitests/python/test_user_group.py | shaobo322/harbor | eca3de3489a009dfced253779e6aabdaa14dad70 | [
"Apache-2.0"
] | 5 | 2021-06-08T07:10:55.000Z | 2021-09-29T03:17:09.000Z | tests/apitests/python/test_user_group.py | shaobo322/harbor | eca3de3489a009dfced253779e6aabdaa14dad70 | [
"Apache-2.0"
] | 10 | 2020-04-02T01:58:11.000Z | 2021-04-19T02:13:29.000Z | tests/apitests/python/test_user_group.py | shaobo322/harbor | eca3de3489a009dfced253779e6aabdaa14dad70 | [
"Apache-2.0"
] | 4 | 2021-02-19T08:41:13.000Z | 2021-09-29T03:17:12.000Z | # coding: utf-8
"""
Harbor API
These APIs provide services for manipulating Harbor project.
OpenAPI spec version: 1.4.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
sys.path.append(os.environ["SWAGGER_CLIENT_PATH"])
import unittest
import testutils
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.user_group import UserGroup
from swagger_client.models.configurations import Configurations
from pprint import pprint
#Testcase
#12-01-LDAP-usergroup-add
#12-02-LDAP-usergroup-update
#12-03-LDAP-usergroup-delete
class TestUserGroup(unittest.TestCase):
"""UserGroup unit test stubs"""
product_api = testutils.GetProductApi("admin", "Harbor12345")
groupId = 0
def setUp(self):
result = self.product_api.configurations_put(configurations=Configurations(ldap_group_attribute_name="cn", ldap_group_base_dn="ou=groups,dc=example,dc=com", ldap_group_search_filter="objectclass=groupOfNames", ldap_group_search_scope=2))
pprint(result)
pass
def tearDown(self):
if self.groupId > 0 :
self.product_api.usergroups_group_id_delete(group_id=self.groupId)
pass
def testAddUpdateUserGroup(self):
"""Test UserGroup"""
user_group = UserGroup(group_name="harbor_group123", group_type=1, ldap_group_dn="cn=harbor_group,ou=groups,dc=example,dc=com")
result = self.product_api.usergroups_post(usergroup=user_group)
pprint(result)
user_groups = self.product_api.usergroups_get()
found = False
for ug in user_groups :
if ug.group_name == "harbor_group123" :
found = True
print("Found usergroup")
pprint(ug)
self.groupId = ug.id
self.assertTrue(found)
result = self.product_api.usergroups_group_id_put(self.groupId, usergroup = UserGroup(group_name = "newharbor_group"))
new_user_group = self.product_api.usergroups_group_id_get(group_id=self.groupId)
self.assertEqual("newharbor_group", new_user_group.group_name)
pass
if __name__ == '__main__':
unittest.main()
| 30.053333 | 245 | 0.712067 |
from __future__ import absolute_import
import os
import sys
sys.path.append(os.environ["SWAGGER_CLIENT_PATH"])
import unittest
import testutils
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.user_group import UserGroup
from swagger_client.models.configurations import Configurations
from pprint import pprint
class TestUserGroup(unittest.TestCase):
product_api = testutils.GetProductApi("admin", "Harbor12345")
groupId = 0
def setUp(self):
result = self.product_api.configurations_put(configurations=Configurations(ldap_group_attribute_name="cn", ldap_group_base_dn="ou=groups,dc=example,dc=com", ldap_group_search_filter="objectclass=groupOfNames", ldap_group_search_scope=2))
pprint(result)
pass
def tearDown(self):
if self.groupId > 0 :
self.product_api.usergroups_group_id_delete(group_id=self.groupId)
pass
def testAddUpdateUserGroup(self):
user_group = UserGroup(group_name="harbor_group123", group_type=1, ldap_group_dn="cn=harbor_group,ou=groups,dc=example,dc=com")
result = self.product_api.usergroups_post(usergroup=user_group)
pprint(result)
user_groups = self.product_api.usergroups_get()
found = False
for ug in user_groups :
if ug.group_name == "harbor_group123" :
found = True
print("Found usergroup")
pprint(ug)
self.groupId = ug.id
self.assertTrue(found)
result = self.product_api.usergroups_group_id_put(self.groupId, usergroup = UserGroup(group_name = "newharbor_group"))
new_user_group = self.product_api.usergroups_group_id_get(group_id=self.groupId)
self.assertEqual("newharbor_group", new_user_group.group_name)
pass
if __name__ == '__main__':
unittest.main()
| true | true |
f727ce30bcfff6735069c4eb270c2a0dffe87867 | 2,140 | py | Python | Sketch.py | sunkr1995/genetic-drawing | 6e5cc755a55c1994770c3f18fb14f1cc651bb700 | [
"MIT"
] | null | null | null | Sketch.py | sunkr1995/genetic-drawing | 6e5cc755a55c1994770c3f18fb14f1cc651bb700 | [
"MIT"
] | null | null | null | Sketch.py | sunkr1995/genetic-drawing | 6e5cc755a55c1994770c3f18fb14f1cc651bb700 | [
"MIT"
] | null | null | null | '''
Author: your name
Date: 2021-07-02 17:20:23
LastEditTime: 2021-07-08 16:28:05
LastEditors: Please set LastEditors
Description: In User Settings Edit
FilePath: /genetic-drawing/2.py
'''
#coding:utf-8
import cv2
import math
import numpy as np
def dodgeNaive(image, mask):
# determine the shape of the input image
width, height = image.shape[:2]
# prepare output argument with same size as image
blend = np.zeros((width, height), np.uint8)
for col in range(width):
for row in range(height):
# do for every pixel
if mask[col, row] == 255:
# avoid division by zero
blend[col, row] = 255
else:
# shift image pixel value by 8 bits
# divide by the inverse of the mask
tmp = (image[col, row] << 8) / (255 - mask)
# print('tmp={}'.format(tmp.shape))
# make sure resulting value stays within bounds
if tmp.any() > 255:
tmp = 255
blend[col, row] = tmp
return blend
def dodgeV2(image, mask):
return cv2.divide(image, 255 - mask, scale=256)
def burnV2(image, mask):
return 255 - cv2.divide(255 - image, 255 - mask, scale=256)
def rgb_to_sketch(src_image_name, dst_image_name):
img_rgb = cv2.imread(src_image_name)
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
# 读取图片时直接转换操作
# img_gray = cv2.imread('example.jpg', cv2.IMREAD_GRAYSCALE)
img_gray_inv = 255 - img_gray
img_blur = cv2.GaussianBlur(img_gray_inv, ksize=(21, 21),
sigmaX=0, sigmaY=0)
img_blend = dodgeV2(img_gray, img_blur)
cv2.imshow('original', img_rgb)
cv2.imshow('gray', img_gray)
cv2.imshow('gray_inv', img_gray_inv)
cv2.imshow('gray_blur', img_blur)
cv2.imshow("pencil sketch", img_blend)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite(dst_image_name, img_blend)
if __name__ == '__main__':
src_image_name = '02.jpg'
dst_image_name = 'sketch_02.jpg'
rgb_to_sketch(src_image_name, dst_image_name)
| 28.918919 | 64 | 0.614019 |
import cv2
import math
import numpy as np
def dodgeNaive(image, mask):
width, height = image.shape[:2]
blend = np.zeros((width, height), np.uint8)
for col in range(width):
for row in range(height):
if mask[col, row] == 255:
blend[col, row] = 255
else:
tmp = (image[col, row] << 8) / (255 - mask)
if tmp.any() > 255:
tmp = 255
blend[col, row] = tmp
return blend
def dodgeV2(image, mask):
return cv2.divide(image, 255 - mask, scale=256)
def burnV2(image, mask):
return 255 - cv2.divide(255 - image, 255 - mask, scale=256)
def rgb_to_sketch(src_image_name, dst_image_name):
img_rgb = cv2.imread(src_image_name)
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
img_gray_inv = 255 - img_gray
img_blur = cv2.GaussianBlur(img_gray_inv, ksize=(21, 21),
sigmaX=0, sigmaY=0)
img_blend = dodgeV2(img_gray, img_blur)
cv2.imshow('original', img_rgb)
cv2.imshow('gray', img_gray)
cv2.imshow('gray_inv', img_gray_inv)
cv2.imshow('gray_blur', img_blur)
cv2.imshow("pencil sketch", img_blend)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite(dst_image_name, img_blend)
if __name__ == '__main__':
src_image_name = '02.jpg'
dst_image_name = 'sketch_02.jpg'
rgb_to_sketch(src_image_name, dst_image_name)
| true | true |
f727cfe26e46b219012e5f007bb0788969651bf9 | 1,045 | py | Python | app/core/migrations/0004_recipe.py | samacyc/loanBook | 75f50635bfe15e5fd022e9c3fbf2ed165c51a494 | [
"MIT"
] | null | null | null | app/core/migrations/0004_recipe.py | samacyc/loanBook | 75f50635bfe15e5fd022e9c3fbf2ed165c51a494 | [
"MIT"
] | null | null | null | app/core/migrations/0004_recipe.py | samacyc/loanBook | 75f50635bfe15e5fd022e9c3fbf2ed165c51a494 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.15 on 2020-04-25 00:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_ingredients'),
]
operations = [
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('time_minutes', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('link', models.CharField(blank=True, max_length=255)),
('ingredients', models.ManyToManyField(to='core.Ingredients')),
('tags', models.ManyToManyField(to='core.Tag')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 36.034483 | 118 | 0.604785 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0003_ingredients'),
]
operations = [
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('time_minutes', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('link', models.CharField(blank=True, max_length=255)),
('ingredients', models.ManyToManyField(to='core.Ingredients')),
('tags', models.ManyToManyField(to='core.Tag')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
f727d135c106864dba69965703e7f5f5144d703d | 1,168 | py | Python | tests/util/network.py | zcomputerwiz/gojiv2-blockchain | 3be896d4dcb48a734f8d2a901ab5648201fbd4d7 | [
"Apache-2.0"
] | 2 | 2022-02-09T04:30:19.000Z | 2022-03-19T14:01:43.000Z | tests/util/network.py | zcomputerwiz/goji-blockchain | 3be896d4dcb48a734f8d2a901ab5648201fbd4d7 | [
"Apache-2.0"
] | 1 | 2021-12-30T09:17:47.000Z | 2021-12-30T09:17:47.000Z | tests/util/network.py | zcomputerwiz/gojiv2-blockchain | 3be896d4dcb48a734f8d2a901ab5648201fbd4d7 | [
"Apache-2.0"
] | 1 | 2022-03-15T08:42:52.000Z | 2022-03-15T08:42:52.000Z | import pytest
from goji.util.network import get_host_addr
class TestNetwork:
@pytest.mark.asyncio
async def test_get_host_addr4(self):
# Run these tests forcing IPv4 resolution
prefer_ipv6 = False
assert get_host_addr("127.0.0.1", prefer_ipv6) == "127.0.0.1"
assert get_host_addr("10.11.12.13", prefer_ipv6) == "10.11.12.13"
assert get_host_addr("localhost", prefer_ipv6) == "127.0.0.1"
assert get_host_addr("example.net", prefer_ipv6) == "93.184.216.34"
@pytest.mark.asyncio
async def test_get_host_addr6(self):
# Run these tests forcing IPv6 resolution
prefer_ipv6 = True
assert get_host_addr("::1", prefer_ipv6) == "::1"
assert get_host_addr("2000:1000::1234:abcd", prefer_ipv6) == "2000:1000::1234:abcd"
# ip6-localhost is not always available, and localhost is IPv4 only
# on some systems. Just test neither here.
# assert get_host_addr("ip6-localhost", prefer_ipv6) == "::1"
# assert get_host_addr("localhost", prefer_ipv6) == "::1"
assert get_host_addr("example.net", prefer_ipv6) == "2606:2800:220:1:248:1893:25c8:1946"
| 44.923077 | 96 | 0.662671 | import pytest
from goji.util.network import get_host_addr
class TestNetwork:
@pytest.mark.asyncio
async def test_get_host_addr4(self):
prefer_ipv6 = False
assert get_host_addr("127.0.0.1", prefer_ipv6) == "127.0.0.1"
assert get_host_addr("10.11.12.13", prefer_ipv6) == "10.11.12.13"
assert get_host_addr("localhost", prefer_ipv6) == "127.0.0.1"
assert get_host_addr("example.net", prefer_ipv6) == "93.184.216.34"
@pytest.mark.asyncio
async def test_get_host_addr6(self):
prefer_ipv6 = True
assert get_host_addr("::1", prefer_ipv6) == "::1"
assert get_host_addr("2000:1000::1234:abcd", prefer_ipv6) == "2000:1000::1234:abcd"
assert get_host_addr("example.net", prefer_ipv6) == "2606:2800:220:1:248:1893:25c8:1946"
| true | true |
f727d1701f52f74bac125ca10ff531847da7e541 | 3,111 | py | Python | test/test_session.py | SergeyBurma/B2_Command_Line_Tool | 65d8adf080e1502cd51c78f9bc9ce3b0bc787147 | [
"MIT"
] | 1 | 2020-09-06T09:32:44.000Z | 2020-09-06T09:32:44.000Z | test/test_session.py | SergeyBurma/B2_Command_Line_Tool | 65d8adf080e1502cd51c78f9bc9ce3b0bc787147 | [
"MIT"
] | null | null | null | test/test_session.py | SergeyBurma/B2_Command_Line_Tool | 65d8adf080e1502cd51c78f9bc9ce3b0bc787147 | [
"MIT"
] | null | null | null | ######################################################################
#
# File: test_session.py
#
# Copyright 2018 Backblaze Inc. All Rights Reserved.
#
# License https://www.backblaze.com/using_b2_code.html
#
######################################################################
from b2.exception import InvalidAuthToken, Unauthorized
from b2.raw_api import ALL_CAPABILITIES
from b2.session import B2Session
from .test_base import TestBase
try:
import unittest.mock as mock
except ImportError:
import mock
class TestB2Session(TestBase):
def setUp(self):
self.account_info = mock.MagicMock()
self.account_info.get_account_auth_token.return_value = 'auth_token'
self.api = mock.MagicMock()
self.api.account_info = self.account_info
self.raw_api = mock.MagicMock()
self.raw_api.do_it.__name__ = 'do_it'
self.raw_api.do_it.side_effect = ['ok']
self.session = B2Session(self.api, self.raw_api)
def test_works_first_time(self):
self.assertEqual('ok', self.session.do_it())
def test_works_second_time(self):
self.raw_api.do_it.side_effect = [
InvalidAuthToken('message', 'code'),
'ok',
]
self.assertEqual('ok', self.session.do_it())
def test_fails_second_time(self):
self.raw_api.do_it.side_effect = [
InvalidAuthToken('message', 'code'),
InvalidAuthToken('message', 'code'),
]
with self.assertRaises(InvalidAuthToken):
self.session.do_it()
def test_app_key_info_no_info(self):
self.account_info.get_allowed.return_value = dict(
bucketId=None,
bucketName=None,
capabilities=ALL_CAPABILITIES,
namePrefix=None,
)
self.raw_api.do_it.side_effect = Unauthorized('no_go', 'code')
with self.assertRaisesRegexp(
Unauthorized, r'no_go for application key with no restrictions \(code\)'
):
self.session.do_it()
def test_app_key_info_no_info_no_message(self):
self.account_info.get_allowed.return_value = dict(
bucketId=None,
bucketName=None,
capabilities=ALL_CAPABILITIES,
namePrefix=None,
)
self.raw_api.do_it.side_effect = Unauthorized('', 'code')
with self.assertRaisesRegexp(
Unauthorized, r'unauthorized for application key with no restrictions \(code\)'
):
self.session.do_it()
def test_app_key_info_all_info(self):
self.account_info.get_allowed.return_value = dict(
bucketId='123456',
bucketName='my-bucket',
capabilities=['readFiles'],
namePrefix='prefix/',
)
self.raw_api.do_it.side_effect = Unauthorized('no_go', 'code')
with self.assertRaisesRegexp(
Unauthorized,
r"no_go for application key with capabilities 'readFiles', restricted to bucket 'my-bucket', restricted to files that start with 'prefix/' \(code\)"
):
self.session.do_it()
| 33.451613 | 160 | 0.608807 | true | true | |
f727d21a828f288a73ec9d141abf5a7e7130abba | 16,717 | py | Python | salt/utils/verify.py | jkur/salt | 3e62675550f9869d550d7787800270e632955d2f | [
"Apache-2.0"
] | null | null | null | salt/utils/verify.py | jkur/salt | 3e62675550f9869d550d7787800270e632955d2f | [
"Apache-2.0"
] | null | null | null | salt/utils/verify.py | jkur/salt | 3e62675550f9869d550d7787800270e632955d2f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
A few checks to make sure the environment is sane
'''
from __future__ import absolute_import
# Original Author: Jeff Schroeder <jeffschroeder@computer.org>
# Import python libs
import os
import re
import sys
import stat
import errno
import socket
import logging
# Import third party libs
if sys.platform.startswith('win'):
import win32file
else:
import resource
# Import salt libs
from salt.log import is_console_configured
from salt.exceptions import SaltClientError
import salt.defaults.exitcodes
import salt.utils
log = logging.getLogger(__name__)
def zmq_version():
'''
ZeroMQ python bindings >= 2.1.9 are required
'''
try:
import zmq
except Exception:
# Return True for local mode
return True
ver = zmq.__version__
# The last matched group can be None if the version
# is something like 3.1 and that will work properly
match = re.match(r'^(\d+)\.(\d+)(?:\.(\d+))?', ver)
# Fallthrough and hope for the best
if not match:
msg = "Using untested zmq python bindings version: '{0}'".format(ver)
if is_console_configured():
log.warn(msg)
else:
sys.stderr.write("WARNING {0}\n".format(msg))
return True
major, minor, point = match.groups()
if major.isdigit():
major = int(major)
if minor.isdigit():
minor = int(minor)
# point very well could be None
if point and point.isdigit():
point = int(point)
if major == 2 and minor == 1:
# zmq 2.1dev could be built against a newer libzmq
if "dev" in ver and not point:
msg = 'Using dev zmq module, please report unexpected results'
if is_console_configured():
log.warn(msg)
else:
sys.stderr.write("WARNING: {0}\n".format(msg))
return True
elif point and point >= 9:
return True
elif major > 2 or (major == 2 and minor > 1):
return True
# If all else fails, gracefully croak and warn the user
log.critical('ZeroMQ python bindings >= 2.1.9 are required')
if 'salt-master' in sys.argv[0]:
msg = ('The Salt Master is unstable using a ZeroMQ version '
'lower than 2.1.11 and requires this fix: http://lists.zeromq.'
'org/pipermail/zeromq-dev/2011-June/012094.html')
if is_console_configured():
log.critical(msg)
else:
sys.stderr.write('CRITICAL {0}\n'.format(msg))
return False
def lookup_family(hostname):
'''
Lookup a hostname and determine its address family. The first address returned
will be AF_INET6 if the system is IPv6-enabled, and AF_INET otherwise.
'''
# If lookups fail, fall back to AF_INET sockets (and v4 addresses).
fallback = socket.AF_INET
try:
hostnames = socket.getaddrinfo(
hostname or None, None, socket.AF_UNSPEC, socket.SOCK_STREAM
)
if not hostnames:
return fallback
h = hostnames[0]
return h[0]
except socket.gaierror:
return fallback
def verify_socket(interface, pub_port, ret_port):
'''
Attempt to bind to the sockets to verify that they are available
'''
addr_family = lookup_family(interface)
pubsock = socket.socket(addr_family, socket.SOCK_STREAM)
retsock = socket.socket(addr_family, socket.SOCK_STREAM)
try:
pubsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
pubsock.bind((interface, int(pub_port)))
pubsock.close()
retsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
retsock.bind((interface, int(ret_port)))
retsock.close()
result = True
except Exception as exc:
if exc.args:
msg = ('Unable to bind socket, error: {0}'.format(str(exc)))
else:
msg = ('Unable to bind socket, this might not be a problem.'
' Is there another salt-master running?')
if is_console_configured():
log.warn(msg)
else:
sys.stderr.write('WARNING: {0}\n'.format(msg))
result = False
finally:
pubsock.close()
retsock.close()
return result
def verify_files(files, user):
'''
Verify that the named files exist and are owned by the named user
'''
if salt.utils.is_windows():
return True
import pwd # after confirming not running Windows
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
except KeyError:
err = ('Failed to prepare the Salt environment for user '
'{0}. The user is not available.\n').format(user)
sys.stderr.write(err)
sys.exit(salt.defaults.exitcodes.EX_NOUSER)
for fn_ in files:
dirname = os.path.dirname(fn_)
try:
try:
os.makedirs(dirname)
except OSError as err:
if err.errno != errno.EEXIST:
raise
if not os.path.isfile(fn_):
with salt.utils.fopen(fn_, 'w+') as fp_:
fp_.write('')
except OSError as err:
msg = 'Failed to create path "{0}" - {1}\n'
sys.stderr.write(msg.format(fn_, err))
sys.exit(err.errno)
stats = os.stat(fn_)
if uid != stats.st_uid:
try:
os.chown(fn_, uid, -1)
except OSError:
pass
return True
def verify_env(dirs, user, permissive=False, pki_dir=''):
'''
Verify that the named directories are in place and that the environment
can shake the salt
'''
if salt.utils.is_windows():
return True
import pwd # after confirming not running Windows
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
groups = salt.utils.get_gid_list(user, include_default=False)
except KeyError:
err = ('Failed to prepare the Salt environment for user '
'{0}. The user is not available.\n').format(user)
sys.stderr.write(err)
sys.exit(salt.defaults.exitcodes.EX_NOUSER)
for dir_ in dirs:
if not dir_:
continue
if not os.path.isdir(dir_):
try:
cumask = os.umask(18) # 077
os.makedirs(dir_)
# If starting the process as root, chown the new dirs
if os.getuid() == 0:
os.chown(dir_, uid, gid)
os.umask(cumask)
except OSError as err:
msg = 'Failed to create directory path "{0}" - {1}\n'
sys.stderr.write(msg.format(dir_, err))
sys.exit(err.errno)
mode = os.stat(dir_)
# If starting the process as root, chown the new dirs
if os.getuid() == 0:
fmode = os.stat(dir_)
if fmode.st_uid != uid or fmode.st_gid != gid:
if permissive and fmode.st_gid in groups:
# Allow the directory to be owned by any group root
# belongs to if we say it's ok to be permissive
pass
else:
# chown the file for the new user
os.chown(dir_, uid, gid)
for subdir in [a for a in os.listdir(dir_) if 'jobs' not in a]:
fsubdir = os.path.join(dir_, subdir)
if '{0}jobs'.format(os.path.sep) in fsubdir:
continue
for root, dirs, files in os.walk(fsubdir):
for name in files:
if name.startswith('.'):
continue
path = os.path.join(root, name)
try:
fmode = os.stat(path)
except (IOError, OSError):
pass
if fmode.st_uid != uid or fmode.st_gid != gid:
if permissive and fmode.st_gid in groups:
pass
else:
# chown the file for the new user
os.chown(path, uid, gid)
for name in dirs:
path = os.path.join(root, name)
fmode = os.stat(path)
if fmode.st_uid != uid or fmode.st_gid != gid:
if permissive and fmode.st_gid in groups:
pass
else:
# chown the file for the new user
os.chown(path, uid, gid)
# Allow the pki dir to be 700 or 750, but nothing else.
# This prevents other users from writing out keys, while
# allowing the use-case of 3rd-party software (like django)
# to read in what it needs to integrate.
#
# If the permissions aren't correct, default to the more secure 700.
# If acls are enabled, the pki_dir needs to remain readable, this
# is still secure because the private keys are still only readbale
# by the user running the master
if dir_ == pki_dir:
smode = stat.S_IMODE(mode.st_mode)
if smode != 448 and smode != 488:
if os.access(dir_, os.W_OK):
os.chmod(dir_, 448)
else:
msg = 'Unable to securely set the permissions of "{0}".'
msg = msg.format(dir_)
if is_console_configured():
log.critical(msg)
else:
sys.stderr.write("CRITICAL: {0}\n".format(msg))
# Run the extra verification checks
zmq_version()
def check_user(user):
'''
Check user and assign process uid/gid.
'''
if salt.utils.is_windows():
return True
if user == salt.utils.get_user():
return True
import pwd # after confirming not running Windows
try:
pwuser = pwd.getpwnam(user)
try:
if hasattr(os, 'initgroups'):
os.initgroups(user, pwuser.pw_gid)
else:
os.setgroups(salt.utils.get_gid_list(user, include_default=False))
os.setgid(pwuser.pw_gid)
os.setuid(pwuser.pw_uid)
except OSError:
msg = 'Salt configured to run as user "{0}" but unable to switch.'
msg = msg.format(user)
if is_console_configured():
log.critical(msg)
else:
sys.stderr.write("CRITICAL: {0}\n".format(msg))
return False
except KeyError:
msg = 'User not found: "{0}"'.format(user)
if is_console_configured():
log.critical(msg)
else:
sys.stderr.write("CRITICAL: {0}\n".format(msg))
return False
return True
def list_path_traversal(path):
'''
Returns a full list of directories leading up to, and including, a path.
So list_path_traversal('/path/to/salt') would return:
['/', '/path', '/path/to', '/path/to/salt']
in that order.
This routine has been tested on Windows systems as well.
list_path_traversal('c:\\path\\to\\salt') on Windows would return:
['c:\\', 'c:\\path', 'c:\\path\\to', 'c:\\path\\to\\salt']
'''
out = [path]
(head, tail) = os.path.split(path)
if tail == '':
# paths with trailing separators will return an empty string
out = [head]
(head, tail) = os.path.split(head)
while head != out[0]:
# loop until head is the same two consecutive times
out.insert(0, head)
(head, tail) = os.path.split(head)
return out
def check_path_traversal(path, user='root', skip_perm_errors=False):
'''
Walk from the root up to a directory and verify that the current
user has access to read each directory. This is used for making
sure a user can read all parent directories of the minion's key
before trying to go and generate a new key and raising an IOError
'''
for tpath in list_path_traversal(path):
if not os.access(tpath, os.R_OK):
msg = 'Could not access {0}.'.format(tpath)
if not os.path.exists(tpath):
msg += ' Path does not exist.'
else:
current_user = salt.utils.get_user()
# Make the error message more intelligent based on how
# the user invokes salt-call or whatever other script.
if user != current_user:
msg += ' Try running as user {0}.'.format(user)
else:
msg += ' Please give {0} read permissions.'.format(user)
# We don't need to bail on config file permission errors
# if the CLI
# process is run with the -a flag
if skip_perm_errors:
return
# Propagate this exception up so there isn't a sys.exit()
# in the middle of code that could be imported elsewhere.
raise SaltClientError(msg)
def check_max_open_files(opts):
'''
Check the number of max allowed open files and adjust if needed
'''
mof_c = opts.get('max_open_files', 100000)
if sys.platform.startswith('win'):
# Check the Windows API for more detail on this
# http://msdn.microsoft.com/en-us/library/xt874334(v=vs.71).aspx
# and the python binding http://timgolden.me.uk/pywin32-docs/win32file.html
mof_s = mof_h = win32file._getmaxstdio()
else:
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
accepted_keys_dir = os.path.join(opts.get('pki_dir'), 'minions')
accepted_count = len(os.listdir(accepted_keys_dir))
log.debug(
'This salt-master instance has accepted {0} minion keys.'.format(
accepted_count
)
)
level = logging.INFO
if (accepted_count * 4) <= mof_s:
# We check for the soft value of max open files here because that's the
# value the user chose to raise to.
#
# The number of accepted keys multiplied by four(4) is lower than the
# soft value, everything should be OK
return
msg = (
'The number of accepted minion keys({0}) should be lower than 1/4 '
'of the max open files soft setting({1}). '.format(
accepted_count, mof_s
)
)
if accepted_count >= mof_s:
# This should never occur, it might have already crashed
msg += 'salt-master will crash pretty soon! '
level = logging.CRITICAL
elif (accepted_count * 2) >= mof_s:
# This is way too low, CRITICAL
level = logging.CRITICAL
elif (accepted_count * 3) >= mof_s:
level = logging.WARNING
# The accepted count is more than 3 time, WARN
elif (accepted_count * 4) >= mof_s:
level = logging.INFO
if mof_c < mof_h:
msg += ('According to the system\'s hard limit, there\'s still a '
'margin of {0} to raise the salt\'s max_open_files '
'setting. ').format(mof_h - mof_c)
msg += 'Please consider raising this value.'
log.log(level=level, msg=msg)
def clean_path(root, path, subdir=False):
'''
Accepts the root the path needs to be under and verifies that the path is
under said root. Pass in subdir=True if the path can result in a
subdirectory of the root instead of having to reside directly in the root
'''
if not os.path.isabs(root):
return ''
if not os.path.isabs(path):
path = os.path.join(root, path)
path = os.path.normpath(path)
if subdir:
if path.startswith(root):
return path
else:
if os.path.dirname(path) == os.path.normpath(root):
return path
return ''
def valid_id(opts, id_):
'''
Returns if the passed id is valid
'''
try:
return bool(clean_path(opts['pki_dir'], id_))
except (AttributeError, KeyError) as e:
return False
def safe_py_code(code):
'''
Check a string to see if it has any potentially unsafe routines which
could be executed via python, this routine is used to improve the
safety of modules suct as virtualenv
'''
bads = (
'import',
';',
'subprocess',
'eval',
'open',
'file',
'exec',
'input')
for bad in bads:
if code.count(bad):
return False
return True
| 34.046843 | 83 | 0.562481 |
from __future__ import absolute_import
import os
import re
import sys
import stat
import errno
import socket
import logging
if sys.platform.startswith('win'):
import win32file
else:
import resource
from salt.log import is_console_configured
from salt.exceptions import SaltClientError
import salt.defaults.exitcodes
import salt.utils
log = logging.getLogger(__name__)
def zmq_version():
try:
import zmq
except Exception:
return True
ver = zmq.__version__
match = re.match(r'^(\d+)\.(\d+)(?:\.(\d+))?', ver)
if not match:
msg = "Using untested zmq python bindings version: '{0}'".format(ver)
if is_console_configured():
log.warn(msg)
else:
sys.stderr.write("WARNING {0}\n".format(msg))
return True
major, minor, point = match.groups()
if major.isdigit():
major = int(major)
if minor.isdigit():
minor = int(minor)
if point and point.isdigit():
point = int(point)
if major == 2 and minor == 1:
if "dev" in ver and not point:
msg = 'Using dev zmq module, please report unexpected results'
if is_console_configured():
log.warn(msg)
else:
sys.stderr.write("WARNING: {0}\n".format(msg))
return True
elif point and point >= 9:
return True
elif major > 2 or (major == 2 and minor > 1):
return True
log.critical('ZeroMQ python bindings >= 2.1.9 are required')
if 'salt-master' in sys.argv[0]:
msg = ('The Salt Master is unstable using a ZeroMQ version '
'lower than 2.1.11 and requires this fix: http://lists.zeromq.'
'org/pipermail/zeromq-dev/2011-June/012094.html')
if is_console_configured():
log.critical(msg)
else:
sys.stderr.write('CRITICAL {0}\n'.format(msg))
return False
def lookup_family(hostname):
fallback = socket.AF_INET
try:
hostnames = socket.getaddrinfo(
hostname or None, None, socket.AF_UNSPEC, socket.SOCK_STREAM
)
if not hostnames:
return fallback
h = hostnames[0]
return h[0]
except socket.gaierror:
return fallback
def verify_socket(interface, pub_port, ret_port):
addr_family = lookup_family(interface)
pubsock = socket.socket(addr_family, socket.SOCK_STREAM)
retsock = socket.socket(addr_family, socket.SOCK_STREAM)
try:
pubsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
pubsock.bind((interface, int(pub_port)))
pubsock.close()
retsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
retsock.bind((interface, int(ret_port)))
retsock.close()
result = True
except Exception as exc:
if exc.args:
msg = ('Unable to bind socket, error: {0}'.format(str(exc)))
else:
msg = ('Unable to bind socket, this might not be a problem.'
' Is there another salt-master running?')
if is_console_configured():
log.warn(msg)
else:
sys.stderr.write('WARNING: {0}\n'.format(msg))
result = False
finally:
pubsock.close()
retsock.close()
return result
def verify_files(files, user):
if salt.utils.is_windows():
return True
import pwd
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
except KeyError:
err = ('Failed to prepare the Salt environment for user '
'{0}. The user is not available.\n').format(user)
sys.stderr.write(err)
sys.exit(salt.defaults.exitcodes.EX_NOUSER)
for fn_ in files:
dirname = os.path.dirname(fn_)
try:
try:
os.makedirs(dirname)
except OSError as err:
if err.errno != errno.EEXIST:
raise
if not os.path.isfile(fn_):
with salt.utils.fopen(fn_, 'w+') as fp_:
fp_.write('')
except OSError as err:
msg = 'Failed to create path "{0}" - {1}\n'
sys.stderr.write(msg.format(fn_, err))
sys.exit(err.errno)
stats = os.stat(fn_)
if uid != stats.st_uid:
try:
os.chown(fn_, uid, -1)
except OSError:
pass
return True
def verify_env(dirs, user, permissive=False, pki_dir=''):
if salt.utils.is_windows():
return True
import pwd
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
groups = salt.utils.get_gid_list(user, include_default=False)
except KeyError:
err = ('Failed to prepare the Salt environment for user '
'{0}. The user is not available.\n').format(user)
sys.stderr.write(err)
sys.exit(salt.defaults.exitcodes.EX_NOUSER)
for dir_ in dirs:
if not dir_:
continue
if not os.path.isdir(dir_):
try:
cumask = os.umask(18)
os.makedirs(dir_)
if os.getuid() == 0:
os.chown(dir_, uid, gid)
os.umask(cumask)
except OSError as err:
msg = 'Failed to create directory path "{0}" - {1}\n'
sys.stderr.write(msg.format(dir_, err))
sys.exit(err.errno)
mode = os.stat(dir_)
if os.getuid() == 0:
fmode = os.stat(dir_)
if fmode.st_uid != uid or fmode.st_gid != gid:
if permissive and fmode.st_gid in groups:
pass
else:
# chown the file for the new user
os.chown(dir_, uid, gid)
for subdir in [a for a in os.listdir(dir_) if 'jobs' not in a]:
fsubdir = os.path.join(dir_, subdir)
if '{0}jobs'.format(os.path.sep) in fsubdir:
continue
for root, dirs, files in os.walk(fsubdir):
for name in files:
if name.startswith('.'):
continue
path = os.path.join(root, name)
try:
fmode = os.stat(path)
except (IOError, OSError):
pass
if fmode.st_uid != uid or fmode.st_gid != gid:
if permissive and fmode.st_gid in groups:
pass
else:
# chown the file for the new user
os.chown(path, uid, gid)
for name in dirs:
path = os.path.join(root, name)
fmode = os.stat(path)
if fmode.st_uid != uid or fmode.st_gid != gid:
if permissive and fmode.st_gid in groups:
pass
else:
# chown the file for the new user
os.chown(path, uid, gid)
# Allow the pki dir to be 700 or 750, but nothing else.
# This prevents other users from writing out keys, while
# allowing the use-case of 3rd-party software (like django)
# to read in what it needs to integrate.
#
# If the permissions aren't correct, default to the more secure 700.
if dir_ == pki_dir:
smode = stat.S_IMODE(mode.st_mode)
if smode != 448 and smode != 488:
if os.access(dir_, os.W_OK):
os.chmod(dir_, 448)
else:
msg = 'Unable to securely set the permissions of "{0}".'
msg = msg.format(dir_)
if is_console_configured():
log.critical(msg)
else:
sys.stderr.write("CRITICAL: {0}\n".format(msg))
zmq_version()
def check_user(user):
if salt.utils.is_windows():
return True
if user == salt.utils.get_user():
return True
import pwd
try:
pwuser = pwd.getpwnam(user)
try:
if hasattr(os, 'initgroups'):
os.initgroups(user, pwuser.pw_gid)
else:
os.setgroups(salt.utils.get_gid_list(user, include_default=False))
os.setgid(pwuser.pw_gid)
os.setuid(pwuser.pw_uid)
except OSError:
msg = 'Salt configured to run as user "{0}" but unable to switch.'
msg = msg.format(user)
if is_console_configured():
log.critical(msg)
else:
sys.stderr.write("CRITICAL: {0}\n".format(msg))
return False
except KeyError:
msg = 'User not found: "{0}"'.format(user)
if is_console_configured():
log.critical(msg)
else:
sys.stderr.write("CRITICAL: {0}\n".format(msg))
return False
return True
def list_path_traversal(path):
out = [path]
(head, tail) = os.path.split(path)
if tail == '':
out = [head]
(head, tail) = os.path.split(head)
while head != out[0]:
out.insert(0, head)
(head, tail) = os.path.split(head)
return out
def check_path_traversal(path, user='root', skip_perm_errors=False):
for tpath in list_path_traversal(path):
if not os.access(tpath, os.R_OK):
msg = 'Could not access {0}.'.format(tpath)
if not os.path.exists(tpath):
msg += ' Path does not exist.'
else:
current_user = salt.utils.get_user()
if user != current_user:
msg += ' Try running as user {0}.'.format(user)
else:
msg += ' Please give {0} read permissions.'.format(user)
# if the CLI
# process is run with the -a flag
if skip_perm_errors:
return
# Propagate this exception up so there isn't a sys.exit()
raise SaltClientError(msg)
def check_max_open_files(opts):
mof_c = opts.get('max_open_files', 100000)
if sys.platform.startswith('win'):
mof_s = mof_h = win32file._getmaxstdio()
else:
mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
accepted_keys_dir = os.path.join(opts.get('pki_dir'), 'minions')
accepted_count = len(os.listdir(accepted_keys_dir))
log.debug(
'This salt-master instance has accepted {0} minion keys.'.format(
accepted_count
)
)
level = logging.INFO
if (accepted_count * 4) <= mof_s:
# value the user chose to raise to.
#
# The number of accepted keys multiplied by four(4) is lower than the
# soft value, everything should be OK
return
msg = (
'The number of accepted minion keys({0}) should be lower than 1/4 '
'of the max open files soft setting({1}). '.format(
accepted_count, mof_s
)
)
if accepted_count >= mof_s:
# This should never occur, it might have already crashed
msg += 'salt-master will crash pretty soon! '
level = logging.CRITICAL
elif (accepted_count * 2) >= mof_s:
# This is way too low, CRITICAL
level = logging.CRITICAL
elif (accepted_count * 3) >= mof_s:
level = logging.WARNING
# The accepted count is more than 3 time, WARN
elif (accepted_count * 4) >= mof_s:
level = logging.INFO
if mof_c < mof_h:
msg += ('According to the system\'s hard limit, there\'s still a '
'margin of {0} to raise the salt\'s max_open_files '
'setting. ').format(mof_h - mof_c)
msg += 'Please consider raising this value.'
log.log(level=level, msg=msg)
def clean_path(root, path, subdir=False):
if not os.path.isabs(root):
return ''
if not os.path.isabs(path):
path = os.path.join(root, path)
path = os.path.normpath(path)
if subdir:
if path.startswith(root):
return path
else:
if os.path.dirname(path) == os.path.normpath(root):
return path
return ''
def valid_id(opts, id_):
try:
return bool(clean_path(opts['pki_dir'], id_))
except (AttributeError, KeyError) as e:
return False
def safe_py_code(code):
bads = (
'import',
';',
'subprocess',
'eval',
'open',
'file',
'exec',
'input')
for bad in bads:
if code.count(bad):
return False
return True
| true | true |
f727d2d7e7376be3e613532b9e1ea017af35747c | 1,794 | py | Python | tests/ml/pipeline_test.py | cyrusradfar/vaex | 6a37bd4509c9a0823b4f01075049f3331fabea77 | [
"MIT"
] | 2 | 2020-12-01T09:41:54.000Z | 2020-12-13T14:10:19.000Z | tests/ml/pipeline_test.py | cyrusradfar/vaex | 6a37bd4509c9a0823b4f01075049f3331fabea77 | [
"MIT"
] | null | null | null | tests/ml/pipeline_test.py | cyrusradfar/vaex | 6a37bd4509c9a0823b4f01075049f3331fabea77 | [
"MIT"
] | null | null | null | import vaex
import vaex.ml
import tempfile
import vaex.ml.datasets
features = ['petal_length', 'petal_width', 'sepal_length', 'sepal_width']
def test_pca():
ds = vaex.ml.datasets.load_iris()
pca = vaex.ml.PCA(features=features, n_components=2)
pca.fit(ds)
ds1 = pca.transform(ds)
path = tempfile.mktemp('.yaml')
pipeline = vaex.ml.Pipeline([pca])
pipeline.save(path)
pipeline = vaex.ml.Pipeline()
pipeline.load(path)
ds2 = pipeline.transform(ds)
assert ds1.virtual_columns['PCA_1'] == ds2.virtual_columns['PCA_1']
path = tempfile.mktemp('.yaml')
pipeline = vaex.ml.Pipeline([ds1.ml.state_transfer()])
pipeline.save(path)
pipeline = vaex.ml.Pipeline()
pipeline.load(path)
ds3 = pipeline.transform(ds)
assert ds1.virtual_columns['PCA_1'] == ds3.virtual_columns['PCA_1']
def test_selections():
ds = vaex.ml.datasets.load_iris()
ds.select('class_ == 1')
count1 = ds.count(selection=True)
path = tempfile.mktemp('.yaml')
pipeline = vaex.ml.Pipeline([ds.ml.state_transfer()])
pipeline.save(path)
print(path)
pipeline = vaex.ml.Pipeline()
pipeline.load(path)
ds2 = pipeline.transform(ds)
assert ds2.count(selection=True) == count1
def test_state_transfer():
ds = vaex.ml.datasets.load_iris()
ds['test'] = ds.petal_width * ds.petal_length
test_values = ds.test.evaluate()
state_transfer = ds.ml.state_transfer()
# clean dataset
ds = vaex.ml.datasets.load_iris()
ds = state_transfer.transform(ds)
assert test_values.tolist() == ds.test.evaluate().tolist()
ds1, ds2 = ds.split(0.5)
state_transfer = ds1.ml.state_transfer()
path = tempfile.mktemp('.yaml')
pipeline = vaex.ml.Pipeline([state_transfer])
pipeline.save(path)
| 27.181818 | 73 | 0.673356 | import vaex
import vaex.ml
import tempfile
import vaex.ml.datasets
features = ['petal_length', 'petal_width', 'sepal_length', 'sepal_width']
def test_pca():
ds = vaex.ml.datasets.load_iris()
pca = vaex.ml.PCA(features=features, n_components=2)
pca.fit(ds)
ds1 = pca.transform(ds)
path = tempfile.mktemp('.yaml')
pipeline = vaex.ml.Pipeline([pca])
pipeline.save(path)
pipeline = vaex.ml.Pipeline()
pipeline.load(path)
ds2 = pipeline.transform(ds)
assert ds1.virtual_columns['PCA_1'] == ds2.virtual_columns['PCA_1']
path = tempfile.mktemp('.yaml')
pipeline = vaex.ml.Pipeline([ds1.ml.state_transfer()])
pipeline.save(path)
pipeline = vaex.ml.Pipeline()
pipeline.load(path)
ds3 = pipeline.transform(ds)
assert ds1.virtual_columns['PCA_1'] == ds3.virtual_columns['PCA_1']
def test_selections():
ds = vaex.ml.datasets.load_iris()
ds.select('class_ == 1')
count1 = ds.count(selection=True)
path = tempfile.mktemp('.yaml')
pipeline = vaex.ml.Pipeline([ds.ml.state_transfer()])
pipeline.save(path)
print(path)
pipeline = vaex.ml.Pipeline()
pipeline.load(path)
ds2 = pipeline.transform(ds)
assert ds2.count(selection=True) == count1
def test_state_transfer():
ds = vaex.ml.datasets.load_iris()
ds['test'] = ds.petal_width * ds.petal_length
test_values = ds.test.evaluate()
state_transfer = ds.ml.state_transfer()
ds = vaex.ml.datasets.load_iris()
ds = state_transfer.transform(ds)
assert test_values.tolist() == ds.test.evaluate().tolist()
ds1, ds2 = ds.split(0.5)
state_transfer = ds1.ml.state_transfer()
path = tempfile.mktemp('.yaml')
pipeline = vaex.ml.Pipeline([state_transfer])
pipeline.save(path)
| true | true |
f727d359d96619c0985b1970043b681970bc7181 | 937 | py | Python | Project_Tuples_Alpha/moduleForFindingTuplesTime.py | zacandcheese/Keyboard-Biometric-Project | 0cdc0fef65b34624e80a5e96e2457c9cf958fb6d | [
"MIT"
] | 1 | 2017-10-03T14:40:09.000Z | 2017-10-03T14:40:09.000Z | Project_Tuples_Alpha/moduleForFindingTuplesTime.py | zacandcheese/Keyboard-Biometric-Project | 0cdc0fef65b34624e80a5e96e2457c9cf958fb6d | [
"MIT"
] | null | null | null | Project_Tuples_Alpha/moduleForFindingTuplesTime.py | zacandcheese/Keyboard-Biometric-Project | 0cdc0fef65b34624e80a5e96e2457c9cf958fb6d | [
"MIT"
] | 2 | 2019-02-20T02:28:13.000Z | 2021-12-01T19:50:19.000Z | __version__ = '1.0'
__author__ = 'Zachary Nowak'
"""STANDARD LIBRARY IMPORTS"""
from statistics import *
def create_dict(tupleList,pressCharTimeLine,pressTimeLine,dataDict):
keyHistory = ""
timingList = [[] for i in range(len(tupleList))]
"""CREATE A STRING WITH ALL THE EVENTS"""
for char in pressCharTimeLine:
keyHistory += char
"""FIND THE TIME IT TAKES TO TYPE EACH WORD FROM PRESS TO PRESS"""
for string in tupleList:
i = 0
index = tupleList.index(string)
while(keyHistory.find(string.upper(), i))!= -1:
position = keyHistory.find(string.upper(),i)
i = position + len(tupleList[0])
timingList[index].append(pressTimeLine[i - 1] - pressTimeLine[position])
"""ASSIGN THE TUPLE WITH IT'S MEDIAN TOTAL PRESS TIME"""
i = 0
for tuple in timingList:
print("The median is: ", median(tuple))
print("The mean is: ", harmonic_mean(tuple))
dataDict[tupleList[i]] = median(tuple)
i += 1
return dataDict
| 30.225806 | 75 | 0.701174 | __version__ = '1.0'
__author__ = 'Zachary Nowak'
from statistics import *
def create_dict(tupleList,pressCharTimeLine,pressTimeLine,dataDict):
keyHistory = ""
timingList = [[] for i in range(len(tupleList))]
for char in pressCharTimeLine:
keyHistory += char
for string in tupleList:
i = 0
index = tupleList.index(string)
while(keyHistory.find(string.upper(), i))!= -1:
position = keyHistory.find(string.upper(),i)
i = position + len(tupleList[0])
timingList[index].append(pressTimeLine[i - 1] - pressTimeLine[position])
i = 0
for tuple in timingList:
print("The median is: ", median(tuple))
print("The mean is: ", harmonic_mean(tuple))
dataDict[tupleList[i]] = median(tuple)
i += 1
return dataDict
| true | true |
f727d3a4b3cb35b73b91afefa5df649fd90c9c96 | 1,670 | py | Python | test/python/foursquare_test/source_code_analysis/scala/test_scala_unused_import_remover.py | foursquare/source_code_analysis | 4323efdb1b41c3726c8ddf0d7276698400640bfd | [
"Apache-2.0"
] | 5 | 2015-01-28T14:32:27.000Z | 2020-03-23T03:01:34.000Z | test/python/foursquare_test/source_code_analysis/scala/test_scala_unused_import_remover.py | caiocasado/source_code_analysis | 4323efdb1b41c3726c8ddf0d7276698400640bfd | [
"Apache-2.0"
] | 1 | 2015-01-19T20:16:18.000Z | 2015-02-03T02:35:56.000Z | test/python/foursquare_test/source_code_analysis/scala/test_scala_unused_import_remover.py | caiocasado/source_code_analysis | 4323efdb1b41c3726c8ddf0d7276698400640bfd | [
"Apache-2.0"
] | 6 | 2015-01-09T21:05:16.000Z | 2020-10-29T09:53:14.000Z | # coding=utf-8
# Copyright 2011 Foursquare Labs Inc. All Rights Reserved
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import unittest
from foursquare.source_code_analysis.scala.scala_unused_import_remover import ScalaUnusedImportRemover
class ScalaUnusedImportRemoverTest(unittest.TestCase):
def _do_test_remover(self, input_text, expected_text):
remover = ScalaUnusedImportRemover(False)
removed_text = remover.apply_to_text('test.scala', input_text).new_text
self.assertEqual(expected_text, removed_text)
def test_basic_removal(self):
self._do_test_remover(
"""
import scala.foo.Foo
import com.baz.Baz
import java.bar.Bar
if(Foo) {
Baz()
}
""",
"""
import scala.foo.Foo
import com.baz.Baz
if(Foo) {
Baz()
}
""")
def test_no_removal(self):
input_text = """
import scala.foo.Foo
import com.baz.Baz
import java.bar.Bar
if(Foo) {
Baz()
} else {
Bar
}
"""
self._do_test_remover(input_text, input_text)
def test_all_removal(self):
self._do_test_remover(
"""
import scala.foo.Foo
import com.baz.Baz
import java.bar.Bar
if(x) {
y()
}
""",
"""
if(x) {
y()
}
""")
def test_keep_only_wildcards(self):
self._do_test_remover(
"""
import scala.foo.Foo
import com.baz.Baz
import java.bar.Bar
import boo.biz._
if(x) {
y()
}
""",
"""
import boo.biz._
if(x) {
y()
}
""")
def test_keep_wildcards(self):
self._do_test_remover(
"""
import scala.foo.Foo
import com.baz.Baz
import java.bar.Bar
import boo.biz._
if(Foo) {
y()
}
""",
"""
import scala.foo.Foo
import boo.biz._
if(Foo) {
y()
}
""")
| 14.910714 | 102 | 0.697006 |
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import unittest
from foursquare.source_code_analysis.scala.scala_unused_import_remover import ScalaUnusedImportRemover
class ScalaUnusedImportRemoverTest(unittest.TestCase):
def _do_test_remover(self, input_text, expected_text):
remover = ScalaUnusedImportRemover(False)
removed_text = remover.apply_to_text('test.scala', input_text).new_text
self.assertEqual(expected_text, removed_text)
def test_basic_removal(self):
self._do_test_remover(
"""
import scala.foo.Foo
import com.baz.Baz
import java.bar.Bar
if(Foo) {
Baz()
}
""",
"""
import scala.foo.Foo
import com.baz.Baz
if(Foo) {
Baz()
}
""")
def test_no_removal(self):
input_text = """
import scala.foo.Foo
import com.baz.Baz
import java.bar.Bar
if(Foo) {
Baz()
} else {
Bar
}
"""
self._do_test_remover(input_text, input_text)
def test_all_removal(self):
self._do_test_remover(
"""
import scala.foo.Foo
import com.baz.Baz
import java.bar.Bar
if(x) {
y()
}
""",
"""
if(x) {
y()
}
""")
def test_keep_only_wildcards(self):
self._do_test_remover(
"""
import scala.foo.Foo
import com.baz.Baz
import java.bar.Bar
import boo.biz._
if(x) {
y()
}
""",
"""
import boo.biz._
if(x) {
y()
}
""")
def test_keep_wildcards(self):
self._do_test_remover(
"""
import scala.foo.Foo
import com.baz.Baz
import java.bar.Bar
import boo.biz._
if(Foo) {
y()
}
""",
"""
import scala.foo.Foo
import boo.biz._
if(Foo) {
y()
}
""")
| true | true |
f727d3d70ff583c6743d4843417bbc2ecf9584ca | 4,770 | py | Python | stackoverflow/venv/lib/python3.6/site-packages/twisted/positioning/test/test_sentence.py | wusirs/learn_python3_spider | a3301f8112e4ded25c3578162db8c6a263a0693b | [
"MIT"
] | 9,953 | 2019-04-03T23:41:04.000Z | 2022-03-31T11:54:44.000Z | stackoverflow/venv/lib/python3.6/site-packages/twisted/positioning/test/test_sentence.py | W4LKURE/learn_python3_spider | 98dd354a41598b31302641f9a0ea49d1ecfa0fb1 | [
"MIT"
] | 44 | 2019-05-27T10:59:29.000Z | 2022-03-31T14:14:29.000Z | stackoverflow/venv/lib/python3.6/site-packages/twisted/positioning/test/test_sentence.py | W4LKURE/learn_python3_spider | 98dd354a41598b31302641f9a0ea49d1ecfa0fb1 | [
"MIT"
] | 2,803 | 2019-04-06T13:15:33.000Z | 2022-03-31T07:42:01.000Z | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for positioning sentences.
"""
from __future__ import absolute_import, division
import itertools
from twisted.positioning import _sentence
from twisted.trial.unittest import TestCase
sentinelValueOne = "someStringValue"
sentinelValueTwo = "someOtherStringValue"
class DummyProtocol(object):
"""
A simple, fake protocol.
"""
@staticmethod
def getSentenceAttributes():
return ["type", sentinelValueOne, sentinelValueTwo]
class DummySentence(_sentence._BaseSentence):
"""
A sentence for L{DummyProtocol}.
"""
ALLOWED_ATTRIBUTES = DummyProtocol.getSentenceAttributes()
class MixinProtocol(_sentence._PositioningSentenceProducerMixin):
"""
A simple, fake protocol that declaratively tells you the sentences
it produces using L{base.PositioningSentenceProducerMixin}.
"""
_SENTENCE_CONTENTS = {
None: [
sentinelValueOne,
sentinelValueTwo,
None # See MixinTests.test_noNoneInSentenceAttributes
],
}
class MixinSentence(_sentence._BaseSentence):
"""
A sentence for L{MixinProtocol}.
"""
ALLOWED_ATTRIBUTES = MixinProtocol.getSentenceAttributes()
class SentenceTestsMixin(object):
"""
Tests for positioning protocols and their respective sentences.
"""
def test_attributeAccess(self):
"""
A sentence attribute gets the correct value, and accessing an
unset attribute (which is specified as being a valid sentence
attribute) gets L{None}.
"""
thisSentinel = object()
sentence = self.sentenceClass({sentinelValueOne: thisSentinel})
self.assertEqual(getattr(sentence, sentinelValueOne), thisSentinel)
self.assertIsNone(getattr(sentence, sentinelValueTwo))
def test_raiseOnMissingAttributeAccess(self):
"""
Accessing a nonexistent attribute raises C{AttributeError}.
"""
sentence = self.sentenceClass({})
self.assertRaises(AttributeError, getattr, sentence, "BOGUS")
def test_raiseOnBadAttributeAccess(self):
"""
Accessing bogus attributes raises C{AttributeError}, *even*
when that attribute actually is in the sentence data.
"""
sentence = self.sentenceClass({"BOGUS": None})
self.assertRaises(AttributeError, getattr, sentence, "BOGUS")
sentenceType = "tummies"
reprTemplate = "<%s (%s) {%s}>"
def _expectedRepr(self, sentenceType="unknown type", dataRepr=""):
"""
Builds the expected repr for a sentence.
@param sentenceType: The name of the sentence type (e.g "GPGGA").
@type sentenceType: C{str}
@param dataRepr: The repr of the data in the sentence.
@type dataRepr: C{str}
@return: The expected repr of the sentence.
@rtype: C{str}
"""
clsName = self.sentenceClass.__name__
return self.reprTemplate % (clsName, sentenceType, dataRepr)
def test_unknownTypeRepr(self):
"""
Test the repr of an empty sentence of unknown type.
"""
sentence = self.sentenceClass({})
expectedRepr = self._expectedRepr()
self.assertEqual(repr(sentence), expectedRepr)
def test_knownTypeRepr(self):
"""
Test the repr of an empty sentence of known type.
"""
sentence = self.sentenceClass({"type": self.sentenceType})
expectedRepr = self._expectedRepr(self.sentenceType)
self.assertEqual(repr(sentence), expectedRepr)
class MixinTests(TestCase, SentenceTestsMixin):
"""
Tests for protocols deriving from L{base.PositioningSentenceProducerMixin}
and their sentences.
"""
def setUp(self):
self.protocol = MixinProtocol()
self.sentenceClass = MixinSentence
def test_noNoneInSentenceAttributes(self):
"""
L{None} does not appear in the sentence attributes of the
protocol, even though it's in the specification.
This is because L{None} is a placeholder for parts of the sentence you
don't really need or want, but there are some bits later on in the
sentence that you do want. The alternative would be to have to specify
things like "_UNUSED0", "_UNUSED1"... which would end up cluttering
the sentence data and eventually adapter state.
"""
sentenceAttributes = self.protocol.getSentenceAttributes()
self.assertNotIn(None, sentenceAttributes)
sentenceContents = self.protocol._SENTENCE_CONTENTS
sentenceSpecAttributes = itertools.chain(*sentenceContents.values())
self.assertIn(None, sentenceSpecAttributes)
| 30 | 78 | 0.674843 |
from __future__ import absolute_import, division
import itertools
from twisted.positioning import _sentence
from twisted.trial.unittest import TestCase
sentinelValueOne = "someStringValue"
sentinelValueTwo = "someOtherStringValue"
class DummyProtocol(object):
@staticmethod
def getSentenceAttributes():
return ["type", sentinelValueOne, sentinelValueTwo]
class DummySentence(_sentence._BaseSentence):
ALLOWED_ATTRIBUTES = DummyProtocol.getSentenceAttributes()
class MixinProtocol(_sentence._PositioningSentenceProducerMixin):
_SENTENCE_CONTENTS = {
None: [
sentinelValueOne,
sentinelValueTwo,
None
],
}
class MixinSentence(_sentence._BaseSentence):
ALLOWED_ATTRIBUTES = MixinProtocol.getSentenceAttributes()
class SentenceTestsMixin(object):
def test_attributeAccess(self):
thisSentinel = object()
sentence = self.sentenceClass({sentinelValueOne: thisSentinel})
self.assertEqual(getattr(sentence, sentinelValueOne), thisSentinel)
self.assertIsNone(getattr(sentence, sentinelValueTwo))
def test_raiseOnMissingAttributeAccess(self):
sentence = self.sentenceClass({})
self.assertRaises(AttributeError, getattr, sentence, "BOGUS")
def test_raiseOnBadAttributeAccess(self):
sentence = self.sentenceClass({"BOGUS": None})
self.assertRaises(AttributeError, getattr, sentence, "BOGUS")
sentenceType = "tummies"
reprTemplate = "<%s (%s) {%s}>"
def _expectedRepr(self, sentenceType="unknown type", dataRepr=""):
clsName = self.sentenceClass.__name__
return self.reprTemplate % (clsName, sentenceType, dataRepr)
def test_unknownTypeRepr(self):
sentence = self.sentenceClass({})
expectedRepr = self._expectedRepr()
self.assertEqual(repr(sentence), expectedRepr)
def test_knownTypeRepr(self):
sentence = self.sentenceClass({"type": self.sentenceType})
expectedRepr = self._expectedRepr(self.sentenceType)
self.assertEqual(repr(sentence), expectedRepr)
class MixinTests(TestCase, SentenceTestsMixin):
def setUp(self):
self.protocol = MixinProtocol()
self.sentenceClass = MixinSentence
def test_noNoneInSentenceAttributes(self):
sentenceAttributes = self.protocol.getSentenceAttributes()
self.assertNotIn(None, sentenceAttributes)
sentenceContents = self.protocol._SENTENCE_CONTENTS
sentenceSpecAttributes = itertools.chain(*sentenceContents.values())
self.assertIn(None, sentenceSpecAttributes)
| true | true |
f727d41d6e60219428f3f297b23ca0f713529146 | 6,573 | py | Python | FatherSon/HelloWorld2_source_code/listing_22-8.py | axetang/AxePython | 3b517fa3123ce2e939680ad1ae14f7e602d446a6 | [
"Apache-2.0"
] | 1 | 2019-01-04T05:47:50.000Z | 2019-01-04T05:47:50.000Z | FatherSon/HelloWorld2_source_code/listing_22-8.py | axetang/AxePython | 3b517fa3123ce2e939680ad1ae14f7e602d446a6 | [
"Apache-2.0"
] | null | null | null | FatherSon/HelloWorld2_source_code/listing_22-8.py | axetang/AxePython | 3b517fa3123ce2e939680ad1ae14f7e602d446a6 | [
"Apache-2.0"
] | null | null | null | # Listing_22-8.py
# Copyright Warren & Csrter Sande, 2013
# Released under MIT license http://www.opensource.org/licenses/mit-license.php
# Version $version ----------------------------
# Hangman game using PyQt
import sys
from PyQt4 import QtCore, QtGui, uic
import random
form_class = uic.loadUiType("hangman.ui")[0]
# Find the locations(s) of guessed letters in the secret word
def find_letters(letter, a_string):
locations = []
start = 0
while a_string.find(letter, start, len(a_string)) != -1:
location = a_string.find(letter, start, len(a_string))
locations.append(location)
start = location + 1
return locations
# Replace dashes with letters when the player guesses a letter correctly
def replace_letters(string, locations, letter):
new_string = ''
for i in range (0, len(string)):
if i in locations:
new_string = new_string + letter
else:
new_string = new_string + string[i]
return new_string
# Replace letters with dashes at the start of the program
def dashes(word):
letters = "abcdefghijklmnopqrstuvwxyz"
new_string = ''
for i in word:
if i in letters:
new_string += "-"
else:
new_string += i
return new_string
class MyWidget(QtGui.QMainWindow, form_class):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.setupUi(self)
self.btn_guess.clicked.connect(self.btn_guess_clicked) # Connect event handlers
self.actionExit.triggered.connect(self.menuExit_selected) #
self.pieces = [self.head, self.body, self.leftarm, self.leftleg, # Parts of the man
self.rightarm, self.rightleg] #
self.gallows = [self.line1, self.line2, self.line3, self.line4] # Parts of the gallows
self.pieces_shown = 0
self.currentword = ""
#Get the word list
f=open("words.txt", 'r')
self.lines = f.readlines()
f.close()
self.new_game()
def new_game(self):
self.guesses.setText("")
self.currentword = random.choice(self.lines) # Randomly pick a word from the list
self.currentword = self.currentword.strip()
for i in self.pieces: # Hide the man
i.setFrameShadow(QtGui.QFrame.Plain) #
i.setHidden(True) #
for i in self.gallows:
i.setFrameShadow(QtGui.QFrame.Plain)
self.word.setText(dashes(self.currentword)) # Call the function to replace letters with dashes
self.pieces_shown = 0
# Let the player guess a letter or word
def btn_guess_clicked(self):
guess = str(self.guessBox.text())
if str(self.guesses.text()) != "":
self.guesses.setText(str(self.guesses.text())+", "+guess)
else:
self.guesses.setText(guess)
if len(guess) == 1: # Guess a letter
if guess in self.currentword: #
locations = find_letters(guess, self.currentword) #
self.word.setText(replace_letters(str(self.word.text()), #
locations,guess)) #
if str(self.word.text()) == self.currentword: #
self.win() #
else:
self.wrong()
else: # Guess a word
if guess == self.currentword: #
self.win() #
else: #
self.wrong() #
self.guessBox.setText("")
def win(self): #Display a dialog if player wins
QtGui.QMessageBox.information(self,"Hangman","You win!") #
self.new_game() #
# handle a wrong guess
def wrong(self):
self.pieces_shown += 1
for i in range(self.pieces_shown): # Reveal another piece of the man
self.pieces[i].setHidden(False) #
if self.pieces_shown == len(self.pieces):
message = "You lose. The word was " + self.currentword # Player lost
QtGui.QMessageBox.warning(self,"Hangman", message) #
self.new_game()
def menuExit_selected(self):
self.close()
app = QtGui.QApplication(sys.argv)
myapp = MyWidget(None)
myapp.show()
app.exec_()
| 54.775 | 138 | 0.386125 |
import sys
from PyQt4 import QtCore, QtGui, uic
import random
form_class = uic.loadUiType("hangman.ui")[0]
def find_letters(letter, a_string):
locations = []
start = 0
while a_string.find(letter, start, len(a_string)) != -1:
location = a_string.find(letter, start, len(a_string))
locations.append(location)
start = location + 1
return locations
def replace_letters(string, locations, letter):
new_string = ''
for i in range (0, len(string)):
if i in locations:
new_string = new_string + letter
else:
new_string = new_string + string[i]
return new_string
def dashes(word):
letters = "abcdefghijklmnopqrstuvwxyz"
new_string = ''
for i in word:
if i in letters:
new_string += "-"
else:
new_string += i
return new_string
class MyWidget(QtGui.QMainWindow, form_class):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.setupUi(self)
self.btn_guess.clicked.connect(self.btn_guess_clicked)
self.actionExit.triggered.connect(self.menuExit_selected)
self.pieces = [self.head, self.body, self.leftarm, self.leftleg,
self.rightarm, self.rightleg]
self.gallows = [self.line1, self.line2, self.line3, self.line4]
self.pieces_shown = 0
self.currentword = ""
f=open("words.txt", 'r')
self.lines = f.readlines()
f.close()
self.new_game()
def new_game(self):
self.guesses.setText("")
self.currentword = random.choice(self.lines)
self.currentword = self.currentword.strip()
for i in self.pieces:
i.setFrameShadow(QtGui.QFrame.Plain)
i.setHidden(True)
for i in self.gallows:
i.setFrameShadow(QtGui.QFrame.Plain)
self.word.setText(dashes(self.currentword))
self.pieces_shown = 0
def btn_guess_clicked(self):
guess = str(self.guessBox.text())
if str(self.guesses.text()) != "":
self.guesses.setText(str(self.guesses.text())+", "+guess)
else:
self.guesses.setText(guess)
if len(guess) == 1:
if guess in self.currentword:
locations = find_letters(guess, self.currentword)
self.word.setText(replace_letters(str(self.word.text()),
locations,guess))
if str(self.word.text()) == self.currentword:
self.win()
else:
self.wrong()
else:
if guess == self.currentword:
self.win()
else:
self.wrong()
self.guessBox.setText("")
def win(self):
QtGui.QMessageBox.information(self,"Hangman","You win!")
self.new_game()
def wrong(self):
self.pieces_shown += 1
for i in range(self.pieces_shown):
self.pieces[i].setHidden(False)
if self.pieces_shown == len(self.pieces):
message = "You lose. The word was " + self.currentword
QtGui.QMessageBox.warning(self,"Hangman", message)
self.new_game()
def menuExit_selected(self):
self.close()
app = QtGui.QApplication(sys.argv)
myapp = MyWidget(None)
myapp.show()
app.exec_()
| true | true |
f727d4361a24d0843a7fbc97eb2728198e8f07f6 | 1,923 | py | Python | models/Gan.py | debashishc/texygan-analysis | f44d559b15da988080bc1a1d84399db04e69d755 | [
"MIT"
] | 881 | 2018-02-06T18:20:34.000Z | 2022-03-29T13:18:12.000Z | models/Gan.py | debashishc/texygan-analysis | f44d559b15da988080bc1a1d84399db04e69d755 | [
"MIT"
] | 48 | 2018-02-13T21:31:24.000Z | 2021-07-03T13:35:21.000Z | models/Gan.py | debashishc/texygan-analysis | f44d559b15da988080bc1a1d84399db04e69d755 | [
"MIT"
] | 224 | 2018-02-07T04:48:31.000Z | 2022-03-18T12:26:25.000Z | from abc import abstractmethod
from utils.utils import init_sess
class Gan:
def __init__(self):
self.oracle = None
self.generator = None
self.discriminator = None
self.gen_data_loader = None
self.dis_data_loader = None
self.oracle_data_loader = None
self.sess = init_sess()
self.metrics = list()
self.epoch = 0
self.pre_epoch_num = 80
self.adversarial_epoch_num = 100
self.log = None
self.reward = None
def set_oracle(self, oracle):
self.oracle = oracle
def set_generator(self, generator):
self.generator = generator
def set_discriminator(self, discriminator):
self.discriminator = discriminator
def set_data_loader(self, gen_loader, dis_loader, oracle_loader):
self.gen_data_loader = gen_loader
self.dis_data_loader = dis_loader
self.oracle_data_loader = oracle_loader
def set_sess(self, sess):
self.sess = sess
def add_metric(self, metric):
self.metrics.append(metric)
def add_epoch(self):
self.epoch += 1
def reset_epoch(self):
# current not in use
return
self.epoch = 0
def evaluate(self):
from time import time
log = "epoch:" + str(self.epoch) + '\t'
scores = list()
scores.append(self.epoch)
for metric in self.metrics:
tic = time()
score = metric.get_score()
log += metric.get_name() + ":" + str(score) + '\t'
toc = time()
print('time elapsed of ' + metric.get_name() + ': ' + str(toc - tic))
scores.append(score)
print(log)
return scores
def check_valid(self):
# TODO
pass
@abstractmethod
def train_oracle(self):
pass
def train_cfg(self):
pass
def train_real(self):
pass
| 24.653846 | 81 | 0.583983 | from abc import abstractmethod
from utils.utils import init_sess
class Gan:
def __init__(self):
self.oracle = None
self.generator = None
self.discriminator = None
self.gen_data_loader = None
self.dis_data_loader = None
self.oracle_data_loader = None
self.sess = init_sess()
self.metrics = list()
self.epoch = 0
self.pre_epoch_num = 80
self.adversarial_epoch_num = 100
self.log = None
self.reward = None
def set_oracle(self, oracle):
self.oracle = oracle
def set_generator(self, generator):
self.generator = generator
def set_discriminator(self, discriminator):
self.discriminator = discriminator
def set_data_loader(self, gen_loader, dis_loader, oracle_loader):
self.gen_data_loader = gen_loader
self.dis_data_loader = dis_loader
self.oracle_data_loader = oracle_loader
def set_sess(self, sess):
self.sess = sess
def add_metric(self, metric):
self.metrics.append(metric)
def add_epoch(self):
self.epoch += 1
def reset_epoch(self):
return
self.epoch = 0
def evaluate(self):
from time import time
log = "epoch:" + str(self.epoch) + '\t'
scores = list()
scores.append(self.epoch)
for metric in self.metrics:
tic = time()
score = metric.get_score()
log += metric.get_name() + ":" + str(score) + '\t'
toc = time()
print('time elapsed of ' + metric.get_name() + ': ' + str(toc - tic))
scores.append(score)
print(log)
return scores
def check_valid(self):
pass
@abstractmethod
def train_oracle(self):
pass
def train_cfg(self):
pass
def train_real(self):
pass
| true | true |
f727d50e8dc0e67cf07b6f0c64f482507f96fdd0 | 1,306 | py | Python | helloWorld/helloWorldApp/consumers.py | jcheon/reddit_clone | e4efc5dac7b131e564296cb2421b296c860b47bf | [
"MIT"
] | 4 | 2019-10-29T22:49:54.000Z | 2020-02-17T06:14:07.000Z | helloWorld/helloWorldApp/consumers.py | jcheon/reddit_clone | e4efc5dac7b131e564296cb2421b296c860b47bf | [
"MIT"
] | 16 | 2019-11-25T02:39:18.000Z | 2022-02-10T13:28:22.000Z | helloWorld/helloWorldApp/consumers.py | jcheon/reddit_clone | e4efc5dac7b131e564296cb2421b296c860b47bf | [
"MIT"
] | 1 | 2020-02-17T06:14:08.000Z | 2020-02-17T06:14:08.000Z | # chat/consumers.py
from channels.generic.websocket import AsyncWebsocketConsumer
import json
class ChatConsumer(AsyncWebsocketConsumer):
async def connect(self):
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = 'chat_%s' % self.room_name
# Join room group
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name
)
await self.accept()
async def disconnect(self, close_code):
# Leave room group
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
# Receive message from WebSocket
async def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json['message']
# Send message to room group
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'chat_message',
'message': message
}
)
# Receive message from room group
async def chat_message(self, event):
message = event['message']
# Send message to WebSocket
await self.send(text_data=json.dumps({
'message': message
})) | 28.391304 | 71 | 0.607198 |
from channels.generic.websocket import AsyncWebsocketConsumer
import json
class ChatConsumer(AsyncWebsocketConsumer):
async def connect(self):
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = 'chat_%s' % self.room_name
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name
)
await self.accept()
async def disconnect(self, close_code):
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
async def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json['message']
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'chat_message',
'message': message
}
)
async def chat_message(self, event):
message = event['message']
await self.send(text_data=json.dumps({
'message': message
})) | true | true |
f727d5d867163c130092b8569593782b91841822 | 716 | py | Python | robustness/CLEVER/cal_clever.py | asplos2020/DRTest | c3de497142d9b226e518a1a0f95f7350d2f7acd6 | [
"MIT"
] | 1 | 2021-04-01T07:31:17.000Z | 2021-04-01T07:31:17.000Z | robustness/CLEVER/cal_clever.py | Justobe/DRTest | 85c3c9b2a46cafa7184130f2596c5f9eb3b20bff | [
"MIT"
] | null | null | null | robustness/CLEVER/cal_clever.py | Justobe/DRTest | 85c3c9b2a46cafa7184130f2596c5f9eb3b20bff | [
"MIT"
] | 1 | 2020-12-24T12:12:54.000Z | 2020-12-24T12:12:54.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
clever.py
Compute CLEVER score using collected Lipschitz constants
Copyright (C) 2017-2018, IBM Corp.
Copyright (C) 2017, Lily Weng <twweng@mit.edu>
and Huan Zhang <ecezhang@ucdavis.edu>
This program is licenced under the Apache 2.0 licence,
contained in the LICENCE file in this directory.
"""
from clever import clever_score
dataset='mnist'
models=['vgg13']
attacks=['oritest']
istarget='target'
#clever_score(data_folder='lipschitz_mat/target/mnist_lenet1')
for model in models:
for attack in attacks:
clever_score(data_folder='lipschitz_mat/'+istarget+'/'+dataset+'/'+model+'/'+attack+'/'+dataset+'_'+model, untargeted=False)
| 26.518519 | 132 | 0.717877 |
from clever import clever_score
dataset='mnist'
models=['vgg13']
attacks=['oritest']
istarget='target'
for model in models:
for attack in attacks:
clever_score(data_folder='lipschitz_mat/'+istarget+'/'+dataset+'/'+model+'/'+attack+'/'+dataset+'_'+model, untargeted=False)
| true | true |
f727d699f5a615818259e65a2e917920aa02c900 | 6,699 | py | Python | libs/segment_tree.py | yskang/AlgorithmPracticeWithPython | f7129bd1924a7961489198f0ee052d2cd1e9cf40 | [
"MIT"
] | null | null | null | libs/segment_tree.py | yskang/AlgorithmPracticeWithPython | f7129bd1924a7961489198f0ee052d2cd1e9cf40 | [
"MIT"
] | null | null | null | libs/segment_tree.py | yskang/AlgorithmPracticeWithPython | f7129bd1924a7961489198f0ee052d2cd1e9cf40 | [
"MIT"
] | null | null | null | from types import SimpleNamespace
class Segment_Tree:
'''
A Class used to get partial sum of an array and update data
...
Attributes
----------
array : list
a list which to make a segment tree
Methods
-------
init(tree, start, end, node)
make segment tree from the array. don't call this method directly.
sum(left, right, node=1, start=0, end=-1)
return the partial sum of the array.
update(index, diff, node=1, start=0, end=-1)
update the value of the index of array as +diff.
'''
def __init__(self, array):
'''
Parameters
----------
array : list
the array that you want to make tree
'''
self.array = array
self.tree = [SimpleNamespace(value=0, lazy=0) for _ in range(len(self.array) * 4)]
self.init(self.tree, 0, len(self.array)-1, 1)
self.last_index = len(array)-1
def init(self, tree, start, end, node):
'''
Don't Call This Method Directly
'''
if start == end:
tree[node].value = self.array[start]
return tree[node].value
mid = (start + end) // 2
tree[node].value = self.init(tree, start, mid, node * 2) + self.init(tree, mid + 1, end, node * 2 + 1)
return tree[node].value
def sum(self, left, right, node=1, start=0, end=-1):
'''
Parameters
----------
left : int
start index of the part [left, left+1, left+2 .. right-2, right-1, right]
right : int
end index of the part [left, left+1, left+2 .. right-2, right-1, right]
Returns
-------
int
a sum of the part of the array. sum([left, left+1, left+2 .. right-2, right-1, right])
'''
if end == -1:
end = self.last_index
if left > end or right < start:
return 0
if left <= start and end <= right:
return self.tree[node].value
return self.sum(left, right, node*2, start, (start+end)//2) + self.sum(left, right, node*2+1, (start+end)//2+1, end)
def lazy_sum(self, left, right, node=1, start=0, end=-1):
if end == -1:
end = self.last_index
if self.tree[node].lazy != 0:
self.tree[node].value += (end-start+1)*self.tree[node].lazy
if start != end:
self.tree[node*2].lazy += self.tree[node].lazy
self.tree[node*2+1].lazy += self.tree[node].lazy
self.tree[node].lazy = 0
if right < start or end < left:
return 0
if left <= start and end <= right:
return self.tree[node].value
return self.lazy_sum(left, right, node*2, start, (start+end)//2) + self.lazy_sum(left, right, node*2+1, (start+end)//2+1, end)
def update(self, index, diff, node=1, start=0, end=-1):
'''
Parameters
----------
index: int
the index of array. which you want to update value
diff: int
the amount of value which you wnat to add. if you want to make 4 to 10, put diff to 6
'''
if end == -1:
end = self.last_index
if not(start <= index <= end):
return
self.tree[node].value += diff
if start != end:
self.update(index, diff, node*2, start, (start+end)//2)
self.update(index, diff, node*2+1, (start+end)//2+1, end)
def update_range(self, diff, left, right, node=1, start=0, end=-1):
if end == -1:
end = self.last_index
if self.tree[node].lazy != 0:
self.tree[node].value += (end-start+1)*self.tree[node].lazy
if start != end:
self.tree[node*2].lazy += self.tree[node].lazy
self.tree[node*2+1].lazy += self.tree[node].lazy
self.tree[node].lazy = 0
if right < start or end < left:
return
if left <= start and end <= right:
self.tree[node].value += (end-start+1)*diff
if start != end:
self.tree[node*2].lazy += diff
self.tree[node*2+1].lazy += diff
return
self.update_range(diff, left, right, node*2, start, (start+end)//2)
self.update_range(diff, left, right, node*2+1, (start+end)//2+1, end)
self.tree[node].value = self.tree[node*2].value + self.tree[node*2+1].value
# init segment tree of an array from index start to end.
# return index of minimum value of array in range from start to end.
def init_segment_min(array, tree, node, start, end):
if start == end:
tree[node] = start
return tree[node]
mid = (start + end) // 2
left = init_segment_min(array, tree, node * 2, start, mid)
right = init_segment_min(array, tree, node * 2 + 1, mid + 1, end)
if array[left] < array[right]:
tree[node] = left
else:
tree[node] = right
return tree[node]
def find_min(array, tree, node, start, end, left, right):
if left > end or right < start:
return -1
if left <= start and end <= right:
return tree[node]
left_index = find_min(array, tree, node*2, start, (start+end)//2, left, right)
right_index = find_min(array, tree, node*2+1, (start+end)//2+1, end, left, right)
if left_index == -1 and right_index == -1:
return -1
elif left_index == -1:
return right_index
elif right_index == -1:
return left_index
else:
if array[left_index] < array[right_index]:
return left_index
return right_index
if __name__ == '__main__':
a = [3, 5, 6, 7, 2, 9, 4, 5, 2, 8, 1, 5]
# tree = [0 for _ in range(len(a) * 4)]
# tree2 = [0 for _ in range(len(a) * 4)]
# init_segment_sum(a, tree, 0, 11)
# print('a: {}'.format(a))
# print('segment tree(sum): {}'.format(tree))
# print('partial sum of (3~9): {}'.format(segment_sum(tree, 1, 0, 11, 3, 9)))
# print('update a[3] to 8')
# update(tree, 1, 0, 11, 3, 1)
# print('segment tree(sum): {}'.format(tree))
# print('partial sum of (3~9): {}'.format(segment_sum(tree, 1, 0, 11, 3, 9)))
segment = Segment_Tree(a)
print(segment.sum(3, 9))
segment.update(3, 1)
print(segment.sum(3, 9))
a = [1,2,3,4,5,6,7,8,9,10]
segment = Segment_Tree(a)
print(segment.lazy_sum(0, 9))
segment.update_range(10, 0, 4)
print(segment.lazy_sum(0, 9)) | 35.632979 | 135 | 0.526198 | from types import SimpleNamespace
class Segment_Tree:
def __init__(self, array):
self.array = array
self.tree = [SimpleNamespace(value=0, lazy=0) for _ in range(len(self.array) * 4)]
self.init(self.tree, 0, len(self.array)-1, 1)
self.last_index = len(array)-1
def init(self, tree, start, end, node):
if start == end:
tree[node].value = self.array[start]
return tree[node].value
mid = (start + end) // 2
tree[node].value = self.init(tree, start, mid, node * 2) + self.init(tree, mid + 1, end, node * 2 + 1)
return tree[node].value
def sum(self, left, right, node=1, start=0, end=-1):
if end == -1:
end = self.last_index
if left > end or right < start:
return 0
if left <= start and end <= right:
return self.tree[node].value
return self.sum(left, right, node*2, start, (start+end)//2) + self.sum(left, right, node*2+1, (start+end)//2+1, end)
def lazy_sum(self, left, right, node=1, start=0, end=-1):
if end == -1:
end = self.last_index
if self.tree[node].lazy != 0:
self.tree[node].value += (end-start+1)*self.tree[node].lazy
if start != end:
self.tree[node*2].lazy += self.tree[node].lazy
self.tree[node*2+1].lazy += self.tree[node].lazy
self.tree[node].lazy = 0
if right < start or end < left:
return 0
if left <= start and end <= right:
return self.tree[node].value
return self.lazy_sum(left, right, node*2, start, (start+end)//2) + self.lazy_sum(left, right, node*2+1, (start+end)//2+1, end)
def update(self, index, diff, node=1, start=0, end=-1):
if end == -1:
end = self.last_index
if not(start <= index <= end):
return
self.tree[node].value += diff
if start != end:
self.update(index, diff, node*2, start, (start+end)//2)
self.update(index, diff, node*2+1, (start+end)//2+1, end)
def update_range(self, diff, left, right, node=1, start=0, end=-1):
if end == -1:
end = self.last_index
if self.tree[node].lazy != 0:
self.tree[node].value += (end-start+1)*self.tree[node].lazy
if start != end:
self.tree[node*2].lazy += self.tree[node].lazy
self.tree[node*2+1].lazy += self.tree[node].lazy
self.tree[node].lazy = 0
if right < start or end < left:
return
if left <= start and end <= right:
self.tree[node].value += (end-start+1)*diff
if start != end:
self.tree[node*2].lazy += diff
self.tree[node*2+1].lazy += diff
return
self.update_range(diff, left, right, node*2, start, (start+end)//2)
self.update_range(diff, left, right, node*2+1, (start+end)//2+1, end)
self.tree[node].value = self.tree[node*2].value + self.tree[node*2+1].value
def init_segment_min(array, tree, node, start, end):
if start == end:
tree[node] = start
return tree[node]
mid = (start + end) // 2
left = init_segment_min(array, tree, node * 2, start, mid)
right = init_segment_min(array, tree, node * 2 + 1, mid + 1, end)
if array[left] < array[right]:
tree[node] = left
else:
tree[node] = right
return tree[node]
def find_min(array, tree, node, start, end, left, right):
if left > end or right < start:
return -1
if left <= start and end <= right:
return tree[node]
left_index = find_min(array, tree, node*2, start, (start+end)//2, left, right)
right_index = find_min(array, tree, node*2+1, (start+end)//2+1, end, left, right)
if left_index == -1 and right_index == -1:
return -1
elif left_index == -1:
return right_index
elif right_index == -1:
return left_index
else:
if array[left_index] < array[right_index]:
return left_index
return right_index
if __name__ == '__main__':
a = [3, 5, 6, 7, 2, 9, 4, 5, 2, 8, 1, 5]
segment = Segment_Tree(a)
print(segment.sum(3, 9))
segment.update(3, 1)
print(segment.sum(3, 9))
a = [1,2,3,4,5,6,7,8,9,10]
segment = Segment_Tree(a)
print(segment.lazy_sum(0, 9))
segment.update_range(10, 0, 4)
print(segment.lazy_sum(0, 9)) | true | true |
f727d70b1803aaf3658f028f09b317b8765886e1 | 1,110 | py | Python | DataSynthesizer/datatypes/IntegerAttribute.py | crangelsmith/synthetic-data-tutorial | a997e045fa8d8384a812ba615af55ae418d68439 | [
"MIT"
] | 68 | 2019-03-21T13:39:01.000Z | 2022-01-22T13:53:43.000Z | DataSynthesizer/datatypes/IntegerAttribute.py | crangelsmith/synthetic-data-tutorial | a997e045fa8d8384a812ba615af55ae418d68439 | [
"MIT"
] | 3 | 2019-03-18T13:29:45.000Z | 2021-08-04T11:13:23.000Z | DataSynthesizer/datatypes/IntegerAttribute.py | crangelsmith/synthetic-data-tutorial | a997e045fa8d8384a812ba615af55ae418d68439 | [
"MIT"
] | 23 | 2020-01-14T10:05:03.000Z | 2021-12-08T03:43:10.000Z | from typing import Union
from pandas import Series
from datatypes.AbstractAttribute import AbstractAttribute
from datatypes.utils.DataType import DataType
class IntegerAttribute(AbstractAttribute):
def __init__(self, name: str, is_candidate_key, is_categorical, histogram_size: Union[int, str], data: Series):
super().__init__(name, is_candidate_key, is_categorical, histogram_size, data)
self.is_numerical = True
self.data_type = DataType.INTEGER
def infer_domain(self, categorical_domain=None, numerical_range=None):
super().infer_domain(categorical_domain, numerical_range)
self.min = int(self.min)
self.max = int(self.max)
def infer_distribution(self):
super().infer_distribution()
def generate_values_as_candidate_key(self, n):
return super().generate_values_as_candidate_key(n)
def sample_values_from_binning_indices(self, binning_indices):
column = super().sample_values_from_binning_indices(binning_indices)
column[~column.isnull()] = column[~column.isnull()].astype(int)
return column
| 37 | 115 | 0.741441 | from typing import Union
from pandas import Series
from datatypes.AbstractAttribute import AbstractAttribute
from datatypes.utils.DataType import DataType
class IntegerAttribute(AbstractAttribute):
def __init__(self, name: str, is_candidate_key, is_categorical, histogram_size: Union[int, str], data: Series):
super().__init__(name, is_candidate_key, is_categorical, histogram_size, data)
self.is_numerical = True
self.data_type = DataType.INTEGER
def infer_domain(self, categorical_domain=None, numerical_range=None):
super().infer_domain(categorical_domain, numerical_range)
self.min = int(self.min)
self.max = int(self.max)
def infer_distribution(self):
super().infer_distribution()
def generate_values_as_candidate_key(self, n):
return super().generate_values_as_candidate_key(n)
def sample_values_from_binning_indices(self, binning_indices):
column = super().sample_values_from_binning_indices(binning_indices)
column[~column.isnull()] = column[~column.isnull()].astype(int)
return column
| true | true |
f727d812d6c4a543c146cb1e1a5fbdc9a8873c15 | 45,449 | py | Python | t2t_bert/utils/tensor2tensor/trax/rlax/ppo.py | yyht/bert | 480c909e0835a455606e829310ff949c9dd23549 | [
"Apache-2.0"
] | 34 | 2018-12-19T01:00:57.000Z | 2021-03-26T09:36:37.000Z | t2t_bert/utils/tensor2tensor/trax/rlax/ppo.py | yyht/bert | 480c909e0835a455606e829310ff949c9dd23549 | [
"Apache-2.0"
] | 11 | 2018-12-25T03:37:59.000Z | 2021-08-25T14:43:58.000Z | t2t_bert/utils/tensor2tensor/trax/rlax/ppo.py | yyht/bert | 480c909e0835a455606e829310ff949c9dd23549 | [
"Apache-2.0"
] | 9 | 2018-12-27T08:00:44.000Z | 2020-06-08T03:05:14.000Z | # coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PPO in JAX.
Notation:
B, scalar - batch size
T, scalar - number of time-steps in a trajectory, or the value of the padded
time-step dimension.
OBS, tuple - shape of a singular observation from the environment.
Ex: For CartPole-v0 this is (4,) and Pong-v0 it's (210, 160, 3)
A, scalar - Number of actions, assuming a discrete space.
Policy and Value function signatures:
Policy Function :: [B, T] + OBS -> [B, T, A]
Value Function :: [B, T] + OBS -> [B, T, 1]
Policy and Value Function :: [B, T] + OBS -> ([B, T, A], [B, T, 1])
i.e. the policy net should take a batch of *trajectories* and at each time-step
in each batch deliver a probability distribution over actions.
NOTE: It doesn't return logits, rather the expectation is that it returns
log-probabilities instead.
NOTE: The policy and value functions need to take care to not take into account
future time-steps while deciding the actions (or value) for the current
time-step.
Policy and Value Function produces a tuple of the expected output of a policy
function and a value function.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import os
import time
from absl import logging
import cloudpickle as pickle
import gin
import gym
from jax import grad
from jax import jit
from jax import lax
from jax import numpy as np
from jax import random as jax_random
import numpy as onp
from tensor2tensor.envs import env_problem
from tensor2tensor.envs import env_problem_utils
from tensor2tensor.trax import jaxboard
from tensor2tensor.trax import layers as tl
from tensor2tensor.trax import optimizers as trax_opt
from tensor2tensor.trax import trax
from tensorflow.io import gfile
DEBUG_LOGGING = False
GAMMA = 0.99
LAMBDA = 0.95
EPSILON = 0.1
EPOCHS = 50 # 100
N_OPTIMIZER_STEPS = 100
PRINT_EVERY_OPTIMIZER_STEP = 20
BATCH_TRAJECTORIES = 32
def policy_and_value_net(rng_key,
batch_observations_shape,
observations_dtype,
n_actions,
bottom_layers_fn=(),
two_towers=True):
"""A policy and value net function."""
# Layers.
# Now, with the current logits, one head computes action probabilities and the
# other computes the value function.
# NOTE: The LogSoftmax instead of the Softmax because of numerical stability.
if two_towers:
layers = [
tl.Dup(),
tl.Parallel(
[bottom_layers_fn(), tl.Dense(n_actions), tl.LogSoftmax()],
[bottom_layers_fn(), tl.Dense(1)],
)
]
else:
layers = [
bottom_layers_fn(),
tl.Dup(),
tl.Parallel(
[tl.Dense(n_actions), tl.LogSoftmax()],
[tl.Dense(1)],
)
]
net = tl.Model(layers)
params = net.initialize(batch_observations_shape, observations_dtype, rng_key)
return params, net
def optimizer_fn(net_params, step_size=1e-3):
opt = trax_opt.Adam(step_size=step_size, b1=0.9, b2=0.999, eps=1e-08)
opt_init = lambda x: (x, opt.tree_init(x))
opt_update = lambda i, g, s: opt.tree_update(i, g, s[0], s[1])
get_params = lambda x: x[0]
opt_state = opt_init(net_params)
return opt_state, opt_update, get_params
# Should this be collect 'n' trajectories, or
# Run the env for 'n' steps and take completed trajectories, or
# Any other option?
def collect_trajectories(env,
policy_fn,
n_trajectories=1,
policy=env_problem_utils.GUMBEL_SAMPLING,
max_timestep=None,
epsilon=0.1,
reset=True,
len_history_for_policy=32,
rng=None):
"""Collect trajectories with the given policy net and behaviour.
Args:
env: A gym env interface, for now this is not-batched.
policy_fn: observations(B,T+1) -> log-probabs(B,T+1, A) callable.
n_trajectories: int, number of trajectories.
policy: string, "greedy", "epsilon-greedy", or "categorical-sampling" i.e.
how to use the policy_fn to return an action.
max_timestep: int or None, the index of the maximum time-step at which we
return the trajectory, None for ending a trajectory only when env returns
done.
epsilon: float, the epsilon for `epsilon-greedy` policy.
reset: bool, true if we want to reset the envs. The envs are also reset if
max_max_timestep is None or < 0
len_history_for_policy: int, the maximum history to keep for applying the
policy on.
rng: jax rng, splittable.
Returns:
A tuple (trajectory, number of trajectories that are done)
trajectory: list of (observation, action, reward) tuples, where each element
`i` is a tuple of numpy arrays with shapes as follows:
observation[i] = (B, T_i + 1)
action[i] = (B, T_i)
reward[i] = (B, T_i)
"""
assert isinstance(env, env_problem.EnvProblem)
# This is an env_problem, run its collect function.
trajs, n_done, timing_info = env_problem_utils.play_env_problem_with_policy(
env,
policy_fn,
num_trajectories=n_trajectories,
max_timestep=max_timestep,
policy_sampling=policy,
eps=epsilon,
reset=reset,
len_history_for_policy=len_history_for_policy,
rng=rng)
# Skip returning raw_rewards here, since they aren't used.
# t is the return value of Trajectory.as_numpy, so:
# (observation, action, processed_reward, raw_reward, infos)
return [(t[0], t[1], t[2], t[4]) for t in trajs], n_done, timing_info
# This function can probably be simplified, ask how?
# Can we do something much simpler than lax.pad, maybe np.pad?
# Others?
def get_padding_value(dtype):
"""Returns the padding value given a dtype."""
padding_value = None
if dtype == np.uint8:
padding_value = np.uint8(0)
elif dtype == np.uint16:
padding_value = np.uint16(0)
elif dtype == np.float32 or dtype == np.float64:
padding_value = 0.0
else:
padding_value = 0
assert padding_value is not None
return padding_value
# TODO(afrozm): Use np.pad instead and make jittable?
def pad_trajectories(trajectories, boundary=20):
"""Pad trajectories to a bucket length that is a multiple of boundary.
Args:
trajectories: list[(observation, actions, rewards)], where each observation
is shaped (t+1,) + OBS and actions & rewards are shaped (t,), with the
length of the list being B (batch size).
boundary: int, bucket length, the actions and rewards are padded to integer
multiples of boundary.
Returns:
tuple: (padding lengths, reward_mask, padded_observations, padded_actions,
padded_rewards) where padded_observations is shaped (B, T+1) + OBS and
padded_actions, padded_rewards & reward_mask are shaped (B, T).
Where T is max(t) rounded up to an integer multiple of boundary.
padded_length is how much padding we've added and
reward_mask is 1s for actual rewards and 0s for the padding.
"""
# Let's compute max(t) over all trajectories.
t_max = max(r.shape[0] for (_, _, r, _) in trajectories)
# t_max is rounded to the next multiple of `boundary`
boundary = int(boundary)
bucket_length = boundary * int(np.ceil(float(t_max) / boundary))
# So all obs will be padded to t_max + 1 and actions and rewards to t_max.
padded_observations = []
padded_actions = []
padded_rewards = []
padded_infos = collections.defaultdict(list)
padded_lengths = []
reward_masks = []
for (o, a, r, i) in trajectories:
# Determine the amount to pad, this holds true for obs, actions and rewards.
num_to_pad = bucket_length + 1 - o.shape[0]
padded_lengths.append(num_to_pad)
if num_to_pad == 0:
padded_observations.append(o)
padded_actions.append(a)
padded_rewards.append(r)
reward_masks.append(onp.ones_like(r, dtype=np.int32))
if i:
for k, v in i.items():
padded_infos[k].append(v)
continue
# First pad observations.
padding_config = tuple([(0, num_to_pad, 0)] + [(0, 0, 0)] * (o.ndim - 1))
padding_value = get_padding_value(o.dtype)
action_padding_value = get_padding_value(a.dtype)
reward_padding_value = get_padding_value(r.dtype)
padded_obs = lax.pad(o, padding_value, padding_config)
padded_observations.append(padded_obs)
# Now pad actions and rewards.
assert a.ndim == 1 and r.ndim == 1
padding_config = ((0, num_to_pad, 0),)
padded_action = lax.pad(a, action_padding_value, padding_config)
padded_actions.append(padded_action)
padded_reward = lax.pad(r, reward_padding_value, padding_config)
padded_rewards.append(padded_reward)
# Also create the mask to use later.
reward_mask = onp.ones_like(r, dtype=np.int32)
reward_masks.append(lax.pad(reward_mask, 0, padding_config))
if i:
for k, v in i.items():
# Create a padding configuration for this value.
padding_config = [(0, num_to_pad, 0)] + [(0, 0, 0)] * (v.ndim - 1)
padded_infos[k].append(lax.pad(v, 0.0, tuple(padding_config)))
# Now stack these padded_infos if they exist.
stacked_padded_infos = None
if padded_infos:
stacked_padded_infos = {k: np.stack(v) for k, v in padded_infos.items()}
return padded_lengths, np.stack(reward_masks), np.stack(
padded_observations), np.stack(padded_actions), np.stack(
padded_rewards), stacked_padded_infos
def rewards_to_go(rewards, mask, gamma=0.99):
r"""Computes rewards to go.
Reward to go is defined as follows, the discounted reward that we have to
yet collect, going forward from this point, i.e.:
r2g_t = \sum_{l=0}^{\infty} (\gamma^{l} * reward_{t+l})
Args:
rewards: np.ndarray of shape (B, T) of rewards.
mask: np.ndarray of shape (B, T) of mask for the rewards.
gamma: float, discount factor.
Returns:
rewards to go, np.ndarray of shape (B, T).
"""
B, T = rewards.shape # pylint: disable=invalid-name,unused-variable
masked_rewards = rewards * mask # (B, T)
# The lax.scan version of this is slow, but we still show it here for
# completeness.
# rewards_rev = np.flip(masked_rewards, axis=1) # (B, T) flipped on time.
# rrt = np.transpose(rewards_rev) # (T, B) transpose to scan over time.
#
# def discounting_add(carry, reward):
# x = reward + (gamma * carry)
# return x, x
#
# _, ys = lax.scan(discounting_add,
# np.zeros_like(rrt[0], dtype=np.float32),
# rrt.astype(np.float32))
#
# # ys is (T, B) and T is in reverse order.
# return np.flip(np.transpose(ys), axis=1)
# We use the following recurrence relation, derived from the equation above:
#
# r2g[t+1] = (r2g[t] - r[t]) / gamma
#
# This means we'll need to calculate r2g[0] first and then r2g[1] and so on ..
#
# **However** this leads to overflows for long sequences: r2g[t] - r[t] > 0
# and gamma < 1.0, so the division keeps increasing.
#
# So we just run the recurrence in reverse, i.e.
#
# r2g[t] = r[t] + (gamma*r2g[t+1])
#
# This is much better, but might have lost updates since the (small) rewards
# at earlier time-steps may get added to a (very?) large sum.
# Compute r2g_{T-1} at the start and then compute backwards in time.
r2gs = [masked_rewards[:, -1]]
# Go from T-2 down to 0.
for t in reversed(range(T - 1)):
r2gs.append(masked_rewards[:, t] + (gamma * r2gs[-1]))
# The list should have length T.
assert T == len(r2gs)
# First we stack them in the correct way to make it (B, T), but these are
# still from newest (T-1) to oldest (0), so then we flip it on time axis.
return np.flip(np.stack(r2gs, axis=1), axis=1)
@jit
def value_loss_given_predictions(value_prediction,
rewards,
reward_mask,
gamma=0.99,
epsilon=0.2,
value_prediction_old=None):
"""Computes the value loss given the prediction of the value function.
Args:
value_prediction: np.ndarray of shape (B, T+1, 1)
rewards: np.ndarray of shape (B, T) of rewards.
reward_mask: np.ndarray of shape (B, T), the mask over rewards.
gamma: float, discount factor.
epsilon: float, clip-fraction, used if value_value_prediction_old isn't None
value_prediction_old: np.ndarray of shape (B, T+1, 1) of value predictions
using the old parameters. If provided, we incorporate this in the loss as
well. This is from the OpenAI baselines implementation.
Returns:
The average L2 value loss, averaged over instances where reward_mask is 1.
"""
B, T = rewards.shape # pylint: disable=invalid-name
assert (B, T) == reward_mask.shape
assert (B, T + 1, 1) == value_prediction.shape
value_prediction = np.squeeze(value_prediction, axis=2) # (B, T+1)
value_prediction = value_prediction[:, :-1] * reward_mask # (B, T)
r2g = rewards_to_go(rewards, reward_mask, gamma=gamma) # (B, T)
loss = (value_prediction - r2g)**2
# From the baselines implementation.
if value_prediction_old is not None:
value_prediction_old = np.squeeze(value_prediction_old, axis=2) # (B, T+1)
value_prediction_old = value_prediction_old[:, :-1] * reward_mask # (B, T)
v_clipped = value_prediction_old + np.clip(
value_prediction - value_prediction_old, -epsilon, epsilon)
v_clipped_loss = (v_clipped - r2g)**2
loss = np.maximum(v_clipped_loss, loss)
# Take an average on only the points where mask != 0.
return np.sum(loss) / np.sum(reward_mask)
def deltas(predicted_values, rewards, mask, gamma=0.99):
r"""Computes TD-residuals from V(s) and rewards.
Where a `delta`, i.e. a td-residual is defined as:
delta_{b,t} = r_{b,t} + \gamma * v_{b,t+1} - v_{b,t}.
Args:
predicted_values: ndarray of shape (B, T+1). NOTE: Expects axis 2 was
squeezed. These represent V(s_bt) for b < B and t < T+1
rewards: ndarray of shape (B, T) of rewards.
mask: ndarray of shape (B, T) of mask for rewards.
gamma: float, discount factor.
Returns:
ndarray of shape (B, T) of one-step TD-residuals.
"""
# Predicted values at time t, cutting off the last to have shape (B, T).
predicted_values_bt = predicted_values[:, :-1]
# Predicted values at time t+1, by cutting off the first to have shape (B, T)
predicted_values_btplus1 = predicted_values[:, 1:]
# Return the deltas as defined above.
return (rewards +
(gamma * predicted_values_btplus1) - predicted_values_bt) * mask
def gae_advantages(td_deltas, mask, lambda_=0.95, gamma=0.99):
r"""Computes the GAE advantages given the one step TD-residuals.
The formula for a GAE advantage estimator is as follows:
A_{bt} = \sum_{l=0}^{\infty}(\gamma * \lambda)^{l}(\delta_{b,t+l}).
Internally we just call rewards_to_go, since it is the same computation.
Args:
td_deltas: np.ndarray of shape (B, T) of one step TD-residuals.
mask: np.ndarray of shape (B, T) of mask for the residuals. It maybe the
case that the `td_deltas` are already masked correctly since they are
produced by `deltas(...)`
lambda_: float, lambda parameter for GAE estimators.
gamma: float, lambda parameter for GAE estimators.
Returns:
GAE advantage estimates.
"""
return rewards_to_go(td_deltas, mask, lambda_ * gamma)
def chosen_probabs(probab_observations, actions):
"""Picks out the probabilities of the actions along batch and time-steps.
Args:
probab_observations: ndarray of shape `[B, T+1, A]`, where
probab_observations[b, t, i] contains the log-probability of action = i at
the t^th time-step in the b^th trajectory.
actions: ndarray of shape `[B, T]`, with each entry in [0, A) denoting which
action was chosen in the b^th trajectory's t^th time-step.
Returns:
`[B, T]` ndarray with the log-probabilities of the chosen actions.
"""
B, T = actions.shape # pylint: disable=invalid-name
assert (B, T + 1) == probab_observations.shape[:2]
return probab_observations[np.arange(B)[:, None], np.arange(T), actions]
def compute_probab_ratios(p_new, p_old, actions, reward_mask):
"""Computes the probability ratios for each time-step in a trajectory.
Args:
p_new: ndarray of shape [B, T+1, A] of the log-probabilities that the policy
network assigns to all the actions at each time-step in each batch using
the old parameters.
p_old: ndarray of shape [B, T+1, A], same as above, but using old policy
network parameters.
actions: ndarray of shape [B, T] where each element is from [0, A).
reward_mask: ndarray of shape [B, T] masking over probabilities.
Returns:
probab_ratios: ndarray of shape [B, T], where
probab_ratios_{b,t} = p_new_{b,t,action_{b,t}} / p_old_{b,t,action_{b,t}}
"""
B, T = actions.shape # pylint: disable=invalid-name
assert (B, T + 1) == p_old.shape[:2]
assert (B, T + 1) == p_new.shape[:2]
logp_old = chosen_probabs(p_old, actions)
logp_new = chosen_probabs(p_new, actions)
assert (B, T) == logp_old.shape
assert (B, T) == logp_new.shape
# Since these are log-probabilities, we just subtract them.
probab_ratios = np.exp(logp_new - logp_old) * reward_mask
assert (B, T) == probab_ratios.shape
return probab_ratios
def clipped_probab_ratios(probab_ratios, epsilon=0.2):
return np.clip(probab_ratios, 1 - epsilon, 1 + epsilon)
def clipped_objective(probab_ratios, advantages, reward_mask, epsilon=0.2):
return np.minimum(
probab_ratios * advantages,
clipped_probab_ratios(probab_ratios, epsilon=epsilon) *
advantages) * reward_mask
@jit
def ppo_loss_given_predictions(log_probab_actions_new,
log_probab_actions_old,
value_predictions_old,
padded_actions,
padded_rewards,
reward_mask,
gamma=0.99,
lambda_=0.95,
epsilon=0.2):
"""PPO objective, with an eventual minus sign, given predictions."""
B, T = padded_rewards.shape # pylint: disable=invalid-name
assert (B, T) == padded_actions.shape
assert (B, T) == reward_mask.shape
_, _, A = log_probab_actions_old.shape # pylint: disable=invalid-name
assert (B, T + 1, 1) == value_predictions_old.shape
assert (B, T + 1, A) == log_probab_actions_old.shape
assert (B, T + 1, A) == log_probab_actions_new.shape
# (B, T)
td_deltas = deltas(
np.squeeze(value_predictions_old, axis=2), # (B, T+1)
padded_rewards,
reward_mask,
gamma=gamma)
# (B, T)
advantages = gae_advantages(
td_deltas, reward_mask, lambda_=lambda_, gamma=gamma)
# Normalize the advantages.
advantages = (advantages - np.mean(advantages)) / np.std(advantages)
# (B, T)
ratios = compute_probab_ratios(log_probab_actions_new, log_probab_actions_old,
padded_actions, reward_mask)
assert (B, T) == ratios.shape
# (B, T)
objective = clipped_objective(
ratios, advantages, reward_mask, epsilon=epsilon)
assert (B, T) == objective.shape
# ()
average_objective = np.sum(objective) / np.sum(reward_mask)
# Loss is negative objective.
return -average_objective
@jit
def combined_loss_given_predictions(log_probab_actions_new,
log_probab_actions_old,
value_prediction_new,
value_prediction_old,
padded_actions,
padded_rewards,
reward_mask,
gamma=0.99,
lambda_=0.95,
epsilon=0.2,
c1=1.0,
c2=0.01):
"""Computes the combined (clipped loss + value loss) given predictions."""
loss_value = value_loss_given_predictions(
value_prediction_new,
padded_rewards,
reward_mask,
gamma=gamma,
value_prediction_old=value_prediction_old,
epsilon=epsilon)
loss_ppo = ppo_loss_given_predictions(
log_probab_actions_new,
log_probab_actions_old,
value_prediction_old,
padded_actions,
padded_rewards,
reward_mask,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon)
entropy_bonus = masked_entropy(log_probab_actions_new, reward_mask)
return (loss_ppo + (c1 * loss_value) - (c2 * entropy_bonus), loss_ppo,
loss_value, entropy_bonus)
@functools.partial(jit, static_argnums=(3,))
def combined_loss(new_params,
log_probab_actions_old,
value_predictions_old,
policy_and_value_net_apply,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
gamma=0.99,
lambda_=0.95,
epsilon=0.2,
c1=1.0,
c2=0.01,
rng=None):
"""Computes the combined (clipped loss + value loss) given observations."""
log_probab_actions_new, value_predictions_new = policy_and_value_net_apply(
padded_observations, new_params, rng=rng)
# (combined_loss, ppo_loss, value_loss, entropy_bonus)
return combined_loss_given_predictions(
log_probab_actions_new,
log_probab_actions_old,
value_predictions_new,
value_predictions_old,
padded_actions,
padded_rewards,
reward_mask,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon,
c1=c1,
c2=c2)
@functools.partial(jit, static_argnums=(2, 3, 4))
def policy_and_value_opt_step(i,
opt_state,
opt_update,
get_params,
policy_and_value_net_apply,
log_probab_actions_old,
value_predictions_old,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
c1=1.0,
c2=0.01,
gamma=0.99,
lambda_=0.95,
epsilon=0.1,
rng=None):
"""Policy and Value optimizer step."""
# Combined loss function given the new params.
def policy_and_value_loss(params):
"""Returns the combined loss given just parameters."""
(loss, _, _, _) = combined_loss(
params,
log_probab_actions_old,
value_predictions_old,
policy_and_value_net_apply,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
c1=c1,
c2=c2,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon,
rng=rng)
return loss
new_params = get_params(opt_state)
g = grad(policy_and_value_loss)(new_params)
# TODO(afrozm): Maybe clip gradients?
return opt_update(i, g, opt_state)
def get_time(t1, t2=None):
if t2 is None:
t2 = time.time()
return round((t2 - t1) * 1000, 2)
def approximate_kl(log_prob_new, log_prob_old, mask):
"""Computes the approximate KL divergence between the old and new log-probs.
Args:
log_prob_new: (B, T+1, A) log probs new
log_prob_old: (B, T+1, A) log probs old
mask: (B, T)
Returns:
Approximate KL.
"""
diff = log_prob_old - log_prob_new
# Cut the last time-step out.
diff = diff[:, :-1]
# Mask out the irrelevant part.
diff *= mask[:, :, np.newaxis] # make mask (B, T, 1)
# Average on non-masked part.
return np.sum(diff) / np.sum(mask)
def masked_entropy(log_probs, mask):
"""Computes the entropy for the given log-probs.
Args:
log_probs: (B, T+1, A) log probs
mask: (B, T) mask.
Returns:
Entropy.
"""
# Cut the last time-step out.
lp = log_probs[:, :-1]
# Mask out the irrelevant part.
lp *= mask[:, :, np.newaxis] # make mask (B, T, 1)
p = np.exp(lp) * mask[:, :, np.newaxis] # (B, T, 1)
# Average on non-masked part and take negative.
return -(np.sum(lp * p) / np.sum(mask))
def evaluate_policy(eval_env,
get_predictions,
temperatures,
max_timestep=20000,
n_evals=1,
len_history_for_policy=32,
rng=None):
"""Evaluate the policy."""
processed_reward_sums = collections.defaultdict(list)
raw_reward_sums = collections.defaultdict(list)
for eval_rng in jax_random.split(rng, num=n_evals):
for temperature in temperatures:
trajs, _, _ = env_problem_utils.play_env_problem_with_policy(
eval_env,
get_predictions,
num_trajectories=eval_env.batch_size,
max_timestep=max_timestep,
reset=True,
policy_sampling=env_problem_utils.GUMBEL_SAMPLING,
temperature=temperature,
rng=eval_rng,
len_history_for_policy=len_history_for_policy)
processed_reward_sums[temperature].extend(sum(traj[2]) for traj in trajs)
raw_reward_sums[temperature].extend(sum(traj[3]) for traj in trajs)
# Return the mean and standard deviation for each temperature.
def compute_stats(reward_dict):
return {
temperature: {"mean": onp.mean(rewards), "std": onp.std(rewards)}
for (temperature, rewards) in reward_dict.items()
}
return {
"processed": compute_stats(processed_reward_sums),
"raw": compute_stats(raw_reward_sums),
}
def maybe_restore_params(output_dir, policy_and_value_net_params):
"""Maybe restore the params from the checkpoint dir.
Args:
output_dir: Directory where saved model checkpoints are stored.
policy_and_value_net_params: Default params, returned if model is'nt found.
Returns:
triple (restore (bool), params, iter(int)) where iter is the epoch from
which we restored the params, 0 is restore = False.
"""
model_files = gfile.glob(os.path.join(output_dir, "model-??????.pkl"))
for model_file in reversed(sorted(model_files)):
logging.info("Trying to restore model from %s", model_file)
try:
with gfile.GFile(model_file, "rb") as f:
loaded_policy_and_value_net_params = pickle.load(f)
policy_and_value_net_params = loaded_policy_and_value_net_params
model_file_basename = os.path.basename(model_file) # model-??????.pkl
i = int(filter(str.isdigit, model_file_basename))
return True, policy_and_value_net_params, i
except EOFError as e:
logging.error("Unable to load model from: %s with %s", model_file, e)
# Try an older version.
continue
return False, policy_and_value_net_params, 0
def write_eval_reward_summaries(reward_stats_by_mode, summary_writer, epoch):
"""Writes evaluation reward statistics to summary and logs them.
Args:
reward_stats_by_mode: Nested dict of structure:
{
"raw": {
<temperature 1>: {
"mean": <reward mean>,
"std": <reward std>,
},
<temperature 2>: ...
},
"processed": ...
}
summary_writer: jaxboard.SummaryWriter.
epoch: Current epoch number.
"""
for (reward_mode, reward_stats_by_temp) in reward_stats_by_mode.items():
for (temperature, reward_stats) in reward_stats_by_temp.items():
for (stat_name, stat) in reward_stats.items():
summary_writer.scalar(
"eval/{reward_mode}_reward_{stat_name}/"
"temperature_{temperature}".format(reward_mode=reward_mode,
stat_name=stat_name,
temperature=temperature),
stat, step=epoch)
logging.info("Epoch [% 6d] Policy Evaluation (%s reward) "
"[temperature %.2f] = %10.2f (+/- %.2f)",
epoch, reward_mode, temperature,
reward_stats["mean"], reward_stats["std"])
@gin.configurable(blacklist=["output_dir"])
def training_loop(
env,
eval_env,
env_name,
policy_and_value_net_fn,
policy_and_value_optimizer_fn,
output_dir,
epochs=EPOCHS,
n_optimizer_steps=N_OPTIMIZER_STEPS,
print_every_optimizer_steps=PRINT_EVERY_OPTIMIZER_STEP,
target_kl=0.01,
boundary=20,
max_timestep=None,
max_timestep_eval=20000,
random_seed=None,
gamma=GAMMA,
lambda_=LAMBDA,
epsilon=EPSILON,
c1=1.0,
c2=0.01,
eval_every_n=1000,
done_frac_for_policy_save=0.5,
enable_early_stopping=True,
n_evals=1,
len_history_for_policy=4,
eval_temperatures=(1.0, 0.5),
):
"""Runs the training loop for PPO, with fixed policy and value nets.
Args:
env: gym.Env to use for training.
eval_env: gym.Env to use for evaluation.
env_name: Name of the environment.
policy_and_value_net_fn: Function defining the policy and value network.
policy_and_value_optimizer_fn: Function defining the optimizer.
output_dir: Output dir.
epochs: Number of epochs to run for.
n_optimizer_steps: Number of optimizer steps.
print_every_optimizer_steps: How often to log during the policy optimization
process.
target_kl: Policy iteration early stopping.
boundary: We pad trajectories at integer multiples of this number.
max_timestep: If set to an integer, maximum number of time-steps in
a trajectory. Used in the collect procedure.
max_timestep_eval: If set to an integer, maximum number of time-steps in an
evaluation trajectory. Used in the collect procedure.
random_seed: Random seed.
gamma: Reward discount factor.
lambda_: N-step TD-error discount factor in GAE.
epsilon: Random action probability in epsilon-greedy sampling.
c1: Value loss coefficient.
c2: Entropy loss coefficient.
eval_every_n: How frequently to eval the policy.
done_frac_for_policy_save: Fraction of the trajectories that should be done
to checkpoint the policy.
enable_early_stopping: Whether to enable early stopping.
n_evals: Number of times to evaluate.
len_history_for_policy: How much of history to give to the policy.
eval_temperatures: Sequence of temperatures to try for categorical sampling
during evaluation.
"""
gfile.makedirs(output_dir)
# Create summary writers and history.
train_sw = jaxboard.SummaryWriter(os.path.join(output_dir, "train"))
timing_sw = jaxboard.SummaryWriter(os.path.join(output_dir, "timing"))
eval_sw = jaxboard.SummaryWriter(os.path.join(output_dir, "eval"))
train_sw.text("env_name", env_name)
timing_sw.text("env_name", env_name)
eval_sw.text("env_name", env_name)
jax_rng_key = trax.get_random_number_generator_and_set_seed(random_seed)
# Batch Observations Shape = [1, 1] + OBS, because we will eventually call
# policy and value networks on shape [B, T] +_OBS
batch_observations_shape = (1, 1) + env.observation_space.shape
observations_dtype = env.observation_space.dtype
assert isinstance(env.action_space, gym.spaces.Discrete)
n_actions = env.action_space.n
jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2)
# Initialize the policy and value network.
policy_and_value_net_params, policy_and_value_net_apply = (
policy_and_value_net_fn(key1, batch_observations_shape,
observations_dtype, n_actions))
# Maybe restore the policy params. If there is nothing to restore, then
# iteration = 0 and policy_and_value_net_params are returned as is.
restore, policy_and_value_net_params, iteration = (
maybe_restore_params(output_dir, policy_and_value_net_params))
if restore:
logging.info("Restored parameters from iteration [%d]", iteration)
# We should start from the next iteration.
iteration += 1
policy_and_value_net_apply = jit(policy_and_value_net_apply)
# Initialize the optimizers.
policy_and_value_optimizer = (
policy_and_value_optimizer_fn(policy_and_value_net_params))
(policy_and_value_opt_state, policy_and_value_opt_update,
policy_and_value_get_params) = policy_and_value_optimizer
n_trajectories_done = 0
last_saved_at = 0
logging.info("Starting the PPO training loop.")
for i in range(iteration, epochs):
epoch_start_time = time.time()
# Params we'll use to collect the trajectories.
policy_and_value_net_params = policy_and_value_get_params(
policy_and_value_opt_state)
# A function to get the policy and value predictions.
def get_predictions(observations, rng=None):
"""Returns log-probs, value predictions and key back."""
key, key1 = jax_random.split(rng, num=2)
log_probs, value_preds = policy_and_value_net_apply(
observations, policy_and_value_net_params, rng=key1)
return log_probs, value_preds, key
# Evaluate the policy.
policy_eval_start_time = time.time()
if ((i + 1) % eval_every_n == 0) or (i == epochs - 1):
jax_rng_key, key = jax_random.split(jax_rng_key, num=2)
logging.vlog(1, "Epoch [% 6d] evaluating policy.", i)
reward_stats = evaluate_policy(
eval_env,
get_predictions,
temperatures=eval_temperatures,
max_timestep=max_timestep_eval,
n_evals=n_evals,
len_history_for_policy=len_history_for_policy,
rng=key)
write_eval_reward_summaries(reward_stats, eval_sw, epoch=i)
policy_eval_time = get_time(policy_eval_start_time)
trajectory_collection_start_time = time.time()
logging.vlog(1, "Epoch [% 6d] collecting trajectories.", i)
jax_rng_key, key = jax_random.split(jax_rng_key)
trajs, n_done, timing_info = collect_trajectories(
env,
policy_fn=get_predictions,
n_trajectories=env.batch_size,
max_timestep=max_timestep,
rng=key,
len_history_for_policy=len_history_for_policy,
reset=(i == 0) or restore,
epsilon=(10.0 / (i + 10.0))) # this is a different epsilon.
trajectory_collection_time = get_time(trajectory_collection_start_time)
logging.vlog(1, "Collecting trajectories took %0.2f msec.",
trajectory_collection_time)
avg_reward = float(sum(np.sum(traj[2]) for traj in trajs)) / len(trajs)
max_reward = max(np.sum(traj[2]) for traj in trajs)
min_reward = min(np.sum(traj[2]) for traj in trajs)
train_sw.scalar("train/reward_mean_truncated", avg_reward, step=i)
logging.vlog(1, "Rewards avg=[%0.2f], max=[%0.2f], min=[%0.2f], all=%s",
avg_reward, max_reward, min_reward,
[float(np.sum(traj[2])) for traj in trajs])
logging.vlog(1,
"Trajectory Length average=[%0.2f], max=[%0.2f], min=[%0.2f]",
float(sum(len(traj[0]) for traj in trajs)) / len(trajs),
max(len(traj[0]) for traj in trajs),
min(len(traj[0]) for traj in trajs))
logging.vlog(2, "Trajectory Lengths: %s", [len(traj[0]) for traj in trajs])
padding_start_time = time.time()
(_, reward_mask, padded_observations, padded_actions,
padded_rewards, padded_infos) = pad_trajectories(
trajs, boundary=boundary)
padding_time = get_time(padding_start_time)
logging.vlog(1, "Padding trajectories took %0.2f msec.",
get_time(padding_start_time))
logging.vlog(1, "Padded Observations' shape [%s]",
str(padded_observations.shape))
logging.vlog(1, "Padded Actions' shape [%s]", str(padded_actions.shape))
logging.vlog(1, "Padded Rewards' shape [%s]", str(padded_rewards.shape))
# Some assertions.
B, T = padded_actions.shape # pylint: disable=invalid-name
assert (B, T) == padded_rewards.shape
assert (B, T) == reward_mask.shape
assert (B, T + 1) == padded_observations.shape[:2]
assert (B, T + 1) + env.observation_space.shape == padded_observations.shape
log_prob_recompute_start_time = time.time()
assert ("log_prob_actions" in padded_infos and
"value_predictions" in padded_infos)
# These are the actual log-probabs and value predictions seen while picking
# the actions.
actual_log_probabs_traj = padded_infos["log_prob_actions"]
actual_value_predictions_traj = padded_infos["value_predictions"]
assert (B, T) == actual_log_probabs_traj.shape[:2]
A = actual_log_probabs_traj.shape[2] # pylint: disable=invalid-name
assert (B, T, 1) == actual_value_predictions_traj.shape
# TODO(afrozm): log-probabs doesn't need to be (B, T+1, A) it can do with
# (B, T, A), so make that change throughout.
# NOTE: We don't have the log-probabs and value-predictions for the last
# observation, so we re-calculate for everything, but use the original ones
# for all but the last time-step.
jax_rng_key, key = jax_random.split(jax_rng_key)
log_probabs_traj, value_predictions_traj, _ = get_predictions(
padded_observations, rng=key)
assert (B, T + 1, A) == log_probabs_traj.shape
assert (B, T + 1, 1) == value_predictions_traj.shape
# Concatenate the last time-step's log-probabs and value predictions to the
# actual log-probabs and value predictions and use those going forward.
log_probabs_traj = np.concatenate(
(actual_log_probabs_traj, log_probabs_traj[:, -1:, :]), axis=1)
value_predictions_traj = np.concatenate(
(actual_value_predictions_traj, value_predictions_traj[:, -1:, :]),
axis=1)
log_prob_recompute_time = get_time(log_prob_recompute_start_time)
# Linear annealing from 0.1 to 0.0
# epsilon_schedule = epsilon if epochs == 1 else epsilon * (1.0 -
# (i /
# (epochs - 1)))
# Constant epsilon.
epsilon_schedule = epsilon
# Compute value and ppo losses.
jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2)
logging.vlog(2, "Starting to compute P&V loss.")
loss_compute_start_time = time.time()
cur_combined_loss, cur_ppo_loss, cur_value_loss, entropy_bonus = (
combined_loss(
policy_and_value_net_params,
log_probabs_traj,
value_predictions_traj,
policy_and_value_net_apply,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon_schedule,
c1=c1,
c2=c2,
rng=key1))
loss_compute_time = get_time(loss_compute_start_time)
logging.vlog(
1,
"Calculating P&V loss [%10.2f(%10.2f, %10.2f, %10.2f)] took %0.2f msec.",
cur_combined_loss, cur_value_loss, cur_ppo_loss, entropy_bonus,
get_time(loss_compute_start_time))
jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2)
logging.vlog(1, "Policy and Value Optimization")
optimization_start_time = time.time()
keys = jax_random.split(key1, num=n_optimizer_steps)
for j in range(n_optimizer_steps):
k1, k2, k3 = jax_random.split(keys[j], num=3)
t = time.time()
# Update the optimizer state.
policy_and_value_opt_state = policy_and_value_opt_step(
j,
policy_and_value_opt_state,
policy_and_value_opt_update,
policy_and_value_get_params,
policy_and_value_net_apply,
log_probabs_traj,
value_predictions_traj,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
c1=c1,
c2=c2,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon_schedule,
rng=k1)
# Compute the approx KL for early stopping.
new_policy_and_value_net_params = policy_and_value_get_params(
policy_and_value_opt_state)
log_probab_actions_new, _ = policy_and_value_net_apply(
padded_observations, new_policy_and_value_net_params, rng=k2)
approx_kl = approximate_kl(log_probab_actions_new, log_probabs_traj,
reward_mask)
early_stopping = enable_early_stopping and approx_kl > 1.5 * target_kl
if early_stopping:
logging.vlog(
1, "Early stopping policy and value optimization at iter: %d, "
"with approx_kl: %0.2f", j, approx_kl)
# We don't return right-away, we want the below to execute on the last
# iteration.
t2 = time.time()
if (((j + 1) % print_every_optimizer_steps == 0) or
(j == n_optimizer_steps - 1) or early_stopping):
# Compute and log the loss.
(loss_combined, loss_ppo, loss_value, entropy_bonus) = (
combined_loss(
new_policy_and_value_net_params,
log_probabs_traj,
value_predictions_traj,
policy_and_value_net_apply,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon_schedule,
c1=c1,
c2=c2,
rng=k3))
logging.vlog(1, "One Policy and Value grad desc took: %0.2f msec",
get_time(t, t2))
logging.vlog(
1, "Combined Loss(value, ppo, entropy_bonus) [%10.2f] ->"
" [%10.2f(%10.2f,%10.2f,%10.2f)]", cur_combined_loss, loss_combined,
loss_value, loss_ppo, entropy_bonus)
if early_stopping:
break
optimization_time = get_time(optimization_start_time)
logging.vlog(
1, "Total Combined Loss reduction [%0.2f]%%",
(100 * (cur_combined_loss - loss_combined) / np.abs(cur_combined_loss)))
# Save parameters every time we see the end of at least a fraction of batch
# number of trajectories that are done (not completed -- completed includes
# truncated and done).
# Also don't save too frequently, enforce a minimum gap.
# Or if this is the last iteration.
policy_save_start_time = time.time()
n_trajectories_done += n_done
# TODO(afrozm): Refactor to trax.save_state.
if (((n_trajectories_done >= done_frac_for_policy_save * env.batch_size) and
(i - last_saved_at > eval_every_n) and
(((i + 1) % eval_every_n == 0))) or (i == epochs - 1)):
logging.vlog(1, "Epoch [% 6d] saving model.", i)
old_model_files = gfile.glob(os.path.join(output_dir, "model-??????.pkl"))
params_file = os.path.join(output_dir, "model-%06d.pkl" % i)
with gfile.GFile(params_file, "wb") as f:
pickle.dump(policy_and_value_net_params, f)
# Remove the old model files.
for path in old_model_files:
gfile.remove(path)
# Reset this number.
n_trajectories_done = 0
last_saved_at = i
policy_save_time = get_time(policy_save_start_time)
epoch_time = get_time(epoch_start_time)
logging.info(
"Epoch [% 6d], Reward[min, max, avg] [%5.2f,%5.2f,%5.2f], Combined"
" Loss(value, ppo, entropy) [%2.5f(%2.5f,%2.5f,%2.5f)]", i, min_reward,
max_reward, avg_reward, loss_combined, loss_value, loss_ppo,
entropy_bonus)
timing_dict = {
"epoch": epoch_time,
"policy_eval": policy_eval_time,
"trajectory_collection": trajectory_collection_time,
"padding": padding_time,
"log_prob_recompute": log_prob_recompute_time,
"loss_compute": loss_compute_time,
"optimization": optimization_time,
"policy_save": policy_save_time,
}
timing_dict.update(timing_info)
for k, v in timing_dict.items():
timing_sw.scalar("timing/%s" % k, v, step=i)
max_key_len = max(len(k) for k in timing_dict)
timing_info_list = [
"%s : % 10.2f" % (k.rjust(max_key_len + 1), v)
for k, v in sorted(timing_dict.items())
]
logging.info("Epoch [% 6d], Timings: \n%s", i, "\n".join(timing_info_list))
# Reset restore.
restore = False
# Flush summary writers once in a while.
if (i + 1) % 1000 == 0 or i == epochs - 1:
train_sw.flush()
timing_sw.flush()
eval_sw.flush()
| 36.388311 | 81 | 0.650685 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import os
import time
from absl import logging
import cloudpickle as pickle
import gin
import gym
from jax import grad
from jax import jit
from jax import lax
from jax import numpy as np
from jax import random as jax_random
import numpy as onp
from tensor2tensor.envs import env_problem
from tensor2tensor.envs import env_problem_utils
from tensor2tensor.trax import jaxboard
from tensor2tensor.trax import layers as tl
from tensor2tensor.trax import optimizers as trax_opt
from tensor2tensor.trax import trax
from tensorflow.io import gfile
DEBUG_LOGGING = False
GAMMA = 0.99
LAMBDA = 0.95
EPSILON = 0.1
EPOCHS = 50
N_OPTIMIZER_STEPS = 100
PRINT_EVERY_OPTIMIZER_STEP = 20
BATCH_TRAJECTORIES = 32
def policy_and_value_net(rng_key,
batch_observations_shape,
observations_dtype,
n_actions,
bottom_layers_fn=(),
two_towers=True):
if two_towers:
layers = [
tl.Dup(),
tl.Parallel(
[bottom_layers_fn(), tl.Dense(n_actions), tl.LogSoftmax()],
[bottom_layers_fn(), tl.Dense(1)],
)
]
else:
layers = [
bottom_layers_fn(),
tl.Dup(),
tl.Parallel(
[tl.Dense(n_actions), tl.LogSoftmax()],
[tl.Dense(1)],
)
]
net = tl.Model(layers)
params = net.initialize(batch_observations_shape, observations_dtype, rng_key)
return params, net
def optimizer_fn(net_params, step_size=1e-3):
opt = trax_opt.Adam(step_size=step_size, b1=0.9, b2=0.999, eps=1e-08)
opt_init = lambda x: (x, opt.tree_init(x))
opt_update = lambda i, g, s: opt.tree_update(i, g, s[0], s[1])
get_params = lambda x: x[0]
opt_state = opt_init(net_params)
return opt_state, opt_update, get_params
def collect_trajectories(env,
policy_fn,
n_trajectories=1,
policy=env_problem_utils.GUMBEL_SAMPLING,
max_timestep=None,
epsilon=0.1,
reset=True,
len_history_for_policy=32,
rng=None):
assert isinstance(env, env_problem.EnvProblem)
trajs, n_done, timing_info = env_problem_utils.play_env_problem_with_policy(
env,
policy_fn,
num_trajectories=n_trajectories,
max_timestep=max_timestep,
policy_sampling=policy,
eps=epsilon,
reset=reset,
len_history_for_policy=len_history_for_policy,
rng=rng)
# t is the return value of Trajectory.as_numpy, so:
# (observation, action, processed_reward, raw_reward, infos)
return [(t[0], t[1], t[2], t[4]) for t in trajs], n_done, timing_info
# This function can probably be simplified, ask how?
# Can we do something much simpler than lax.pad, maybe np.pad?
# Others?
def get_padding_value(dtype):
padding_value = None
if dtype == np.uint8:
padding_value = np.uint8(0)
elif dtype == np.uint16:
padding_value = np.uint16(0)
elif dtype == np.float32 or dtype == np.float64:
padding_value = 0.0
else:
padding_value = 0
assert padding_value is not None
return padding_value
# TODO(afrozm): Use np.pad instead and make jittable?
def pad_trajectories(trajectories, boundary=20):
# Let's compute max(t) over all trajectories.
t_max = max(r.shape[0] for (_, _, r, _) in trajectories)
boundary = int(boundary)
bucket_length = boundary * int(np.ceil(float(t_max) / boundary))
padded_observations = []
padded_actions = []
padded_rewards = []
padded_infos = collections.defaultdict(list)
padded_lengths = []
reward_masks = []
for (o, a, r, i) in trajectories:
num_to_pad = bucket_length + 1 - o.shape[0]
padded_lengths.append(num_to_pad)
if num_to_pad == 0:
padded_observations.append(o)
padded_actions.append(a)
padded_rewards.append(r)
reward_masks.append(onp.ones_like(r, dtype=np.int32))
if i:
for k, v in i.items():
padded_infos[k].append(v)
continue
padding_config = tuple([(0, num_to_pad, 0)] + [(0, 0, 0)] * (o.ndim - 1))
padding_value = get_padding_value(o.dtype)
action_padding_value = get_padding_value(a.dtype)
reward_padding_value = get_padding_value(r.dtype)
padded_obs = lax.pad(o, padding_value, padding_config)
padded_observations.append(padded_obs)
assert a.ndim == 1 and r.ndim == 1
padding_config = ((0, num_to_pad, 0),)
padded_action = lax.pad(a, action_padding_value, padding_config)
padded_actions.append(padded_action)
padded_reward = lax.pad(r, reward_padding_value, padding_config)
padded_rewards.append(padded_reward)
reward_mask = onp.ones_like(r, dtype=np.int32)
reward_masks.append(lax.pad(reward_mask, 0, padding_config))
if i:
for k, v in i.items():
padding_config = [(0, num_to_pad, 0)] + [(0, 0, 0)] * (v.ndim - 1)
padded_infos[k].append(lax.pad(v, 0.0, tuple(padding_config)))
stacked_padded_infos = None
if padded_infos:
stacked_padded_infos = {k: np.stack(v) for k, v in padded_infos.items()}
return padded_lengths, np.stack(reward_masks), np.stack(
padded_observations), np.stack(padded_actions), np.stack(
padded_rewards), stacked_padded_infos
def rewards_to_go(rewards, mask, gamma=0.99):
B, T = rewards.shape
masked_rewards = rewards * mask
ng sequences: r2g[t] - r[t] > 0
# and gamma < 1.0, so the division keeps increasing.
#
# So we just run the recurrence in reverse, i.e.
#
# r2g[t] = r[t] + (gamma*r2g[t+1])
#
# This is much better, but might have lost updates since the (small) rewards
# at earlier time-steps may get added to a (very?) large sum.
# Compute r2g_{T-1} at the start and then compute backwards in time.
r2gs = [masked_rewards[:, -1]]
# Go from T-2 down to 0.
for t in reversed(range(T - 1)):
r2gs.append(masked_rewards[:, t] + (gamma * r2gs[-1]))
# The list should have length T.
assert T == len(r2gs)
# First we stack them in the correct way to make it (B, T), but these are
# still from newest (T-1) to oldest (0), so then we flip it on time axis.
return np.flip(np.stack(r2gs, axis=1), axis=1)
@jit
def value_loss_given_predictions(value_prediction,
rewards,
reward_mask,
gamma=0.99,
epsilon=0.2,
value_prediction_old=None):
B, T = rewards.shape # pylint: disable=invalid-name
assert (B, T) == reward_mask.shape
assert (B, T + 1, 1) == value_prediction.shape
value_prediction = np.squeeze(value_prediction, axis=2) # (B, T+1)
value_prediction = value_prediction[:, :-1] * reward_mask # (B, T)
r2g = rewards_to_go(rewards, reward_mask, gamma=gamma) # (B, T)
loss = (value_prediction - r2g)**2
# From the baselines implementation.
if value_prediction_old is not None:
value_prediction_old = np.squeeze(value_prediction_old, axis=2) # (B, T+1)
value_prediction_old = value_prediction_old[:, :-1] * reward_mask # (B, T)
v_clipped = value_prediction_old + np.clip(
value_prediction - value_prediction_old, -epsilon, epsilon)
v_clipped_loss = (v_clipped - r2g)**2
loss = np.maximum(v_clipped_loss, loss)
# Take an average on only the points where mask != 0.
return np.sum(loss) / np.sum(reward_mask)
def deltas(predicted_values, rewards, mask, gamma=0.99):
# Predicted values at time t, cutting off the last to have shape (B, T).
predicted_values_bt = predicted_values[:, :-1]
# Predicted values at time t+1, by cutting off the first to have shape (B, T)
predicted_values_btplus1 = predicted_values[:, 1:]
# Return the deltas as defined above.
return (rewards +
(gamma * predicted_values_btplus1) - predicted_values_bt) * mask
def gae_advantages(td_deltas, mask, lambda_=0.95, gamma=0.99):
return rewards_to_go(td_deltas, mask, lambda_ * gamma)
def chosen_probabs(probab_observations, actions):
B, T = actions.shape # pylint: disable=invalid-name
assert (B, T + 1) == probab_observations.shape[:2]
return probab_observations[np.arange(B)[:, None], np.arange(T), actions]
def compute_probab_ratios(p_new, p_old, actions, reward_mask):
B, T = actions.shape # pylint: disable=invalid-name
assert (B, T + 1) == p_old.shape[:2]
assert (B, T + 1) == p_new.shape[:2]
logp_old = chosen_probabs(p_old, actions)
logp_new = chosen_probabs(p_new, actions)
assert (B, T) == logp_old.shape
assert (B, T) == logp_new.shape
# Since these are log-probabilities, we just subtract them.
probab_ratios = np.exp(logp_new - logp_old) * reward_mask
assert (B, T) == probab_ratios.shape
return probab_ratios
def clipped_probab_ratios(probab_ratios, epsilon=0.2):
return np.clip(probab_ratios, 1 - epsilon, 1 + epsilon)
def clipped_objective(probab_ratios, advantages, reward_mask, epsilon=0.2):
return np.minimum(
probab_ratios * advantages,
clipped_probab_ratios(probab_ratios, epsilon=epsilon) *
advantages) * reward_mask
@jit
def ppo_loss_given_predictions(log_probab_actions_new,
log_probab_actions_old,
value_predictions_old,
padded_actions,
padded_rewards,
reward_mask,
gamma=0.99,
lambda_=0.95,
epsilon=0.2):
B, T = padded_rewards.shape # pylint: disable=invalid-name
assert (B, T) == padded_actions.shape
assert (B, T) == reward_mask.shape
_, _, A = log_probab_actions_old.shape # pylint: disable=invalid-name
assert (B, T + 1, 1) == value_predictions_old.shape
assert (B, T + 1, A) == log_probab_actions_old.shape
assert (B, T + 1, A) == log_probab_actions_new.shape
# (B, T)
td_deltas = deltas(
np.squeeze(value_predictions_old, axis=2), # (B, T+1)
padded_rewards,
reward_mask,
gamma=gamma)
# (B, T)
advantages = gae_advantages(
td_deltas, reward_mask, lambda_=lambda_, gamma=gamma)
# Normalize the advantages.
advantages = (advantages - np.mean(advantages)) / np.std(advantages)
# (B, T)
ratios = compute_probab_ratios(log_probab_actions_new, log_probab_actions_old,
padded_actions, reward_mask)
assert (B, T) == ratios.shape
# (B, T)
objective = clipped_objective(
ratios, advantages, reward_mask, epsilon=epsilon)
assert (B, T) == objective.shape
# ()
average_objective = np.sum(objective) / np.sum(reward_mask)
# Loss is negative objective.
return -average_objective
@jit
def combined_loss_given_predictions(log_probab_actions_new,
log_probab_actions_old,
value_prediction_new,
value_prediction_old,
padded_actions,
padded_rewards,
reward_mask,
gamma=0.99,
lambda_=0.95,
epsilon=0.2,
c1=1.0,
c2=0.01):
loss_value = value_loss_given_predictions(
value_prediction_new,
padded_rewards,
reward_mask,
gamma=gamma,
value_prediction_old=value_prediction_old,
epsilon=epsilon)
loss_ppo = ppo_loss_given_predictions(
log_probab_actions_new,
log_probab_actions_old,
value_prediction_old,
padded_actions,
padded_rewards,
reward_mask,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon)
entropy_bonus = masked_entropy(log_probab_actions_new, reward_mask)
return (loss_ppo + (c1 * loss_value) - (c2 * entropy_bonus), loss_ppo,
loss_value, entropy_bonus)
@functools.partial(jit, static_argnums=(3,))
def combined_loss(new_params,
log_probab_actions_old,
value_predictions_old,
policy_and_value_net_apply,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
gamma=0.99,
lambda_=0.95,
epsilon=0.2,
c1=1.0,
c2=0.01,
rng=None):
log_probab_actions_new, value_predictions_new = policy_and_value_net_apply(
padded_observations, new_params, rng=rng)
# (combined_loss, ppo_loss, value_loss, entropy_bonus)
return combined_loss_given_predictions(
log_probab_actions_new,
log_probab_actions_old,
value_predictions_new,
value_predictions_old,
padded_actions,
padded_rewards,
reward_mask,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon,
c1=c1,
c2=c2)
@functools.partial(jit, static_argnums=(2, 3, 4))
def policy_and_value_opt_step(i,
opt_state,
opt_update,
get_params,
policy_and_value_net_apply,
log_probab_actions_old,
value_predictions_old,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
c1=1.0,
c2=0.01,
gamma=0.99,
lambda_=0.95,
epsilon=0.1,
rng=None):
# Combined loss function given the new params.
def policy_and_value_loss(params):
(loss, _, _, _) = combined_loss(
params,
log_probab_actions_old,
value_predictions_old,
policy_and_value_net_apply,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
c1=c1,
c2=c2,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon,
rng=rng)
return loss
new_params = get_params(opt_state)
g = grad(policy_and_value_loss)(new_params)
# TODO(afrozm): Maybe clip gradients?
return opt_update(i, g, opt_state)
def get_time(t1, t2=None):
if t2 is None:
t2 = time.time()
return round((t2 - t1) * 1000, 2)
def approximate_kl(log_prob_new, log_prob_old, mask):
diff = log_prob_old - log_prob_new
# Cut the last time-step out.
diff = diff[:, :-1]
# Mask out the irrelevant part.
diff *= mask[:, :, np.newaxis] # make mask (B, T, 1)
# Average on non-masked part.
return np.sum(diff) / np.sum(mask)
def masked_entropy(log_probs, mask):
# Cut the last time-step out.
lp = log_probs[:, :-1]
# Mask out the irrelevant part.
lp *= mask[:, :, np.newaxis] # make mask (B, T, 1)
p = np.exp(lp) * mask[:, :, np.newaxis] # (B, T, 1)
# Average on non-masked part and take negative.
return -(np.sum(lp * p) / np.sum(mask))
def evaluate_policy(eval_env,
get_predictions,
temperatures,
max_timestep=20000,
n_evals=1,
len_history_for_policy=32,
rng=None):
processed_reward_sums = collections.defaultdict(list)
raw_reward_sums = collections.defaultdict(list)
for eval_rng in jax_random.split(rng, num=n_evals):
for temperature in temperatures:
trajs, _, _ = env_problem_utils.play_env_problem_with_policy(
eval_env,
get_predictions,
num_trajectories=eval_env.batch_size,
max_timestep=max_timestep,
reset=True,
policy_sampling=env_problem_utils.GUMBEL_SAMPLING,
temperature=temperature,
rng=eval_rng,
len_history_for_policy=len_history_for_policy)
processed_reward_sums[temperature].extend(sum(traj[2]) for traj in trajs)
raw_reward_sums[temperature].extend(sum(traj[3]) for traj in trajs)
# Return the mean and standard deviation for each temperature.
def compute_stats(reward_dict):
return {
temperature: {"mean": onp.mean(rewards), "std": onp.std(rewards)}
for (temperature, rewards) in reward_dict.items()
}
return {
"processed": compute_stats(processed_reward_sums),
"raw": compute_stats(raw_reward_sums),
}
def maybe_restore_params(output_dir, policy_and_value_net_params):
model_files = gfile.glob(os.path.join(output_dir, "model-??????.pkl"))
for model_file in reversed(sorted(model_files)):
logging.info("Trying to restore model from %s", model_file)
try:
with gfile.GFile(model_file, "rb") as f:
loaded_policy_and_value_net_params = pickle.load(f)
policy_and_value_net_params = loaded_policy_and_value_net_params
model_file_basename = os.path.basename(model_file) # model-??????.pkl
i = int(filter(str.isdigit, model_file_basename))
return True, policy_and_value_net_params, i
except EOFError as e:
logging.error("Unable to load model from: %s with %s", model_file, e)
# Try an older version.
continue
return False, policy_and_value_net_params, 0
def write_eval_reward_summaries(reward_stats_by_mode, summary_writer, epoch):
for (reward_mode, reward_stats_by_temp) in reward_stats_by_mode.items():
for (temperature, reward_stats) in reward_stats_by_temp.items():
for (stat_name, stat) in reward_stats.items():
summary_writer.scalar(
"eval/{reward_mode}_reward_{stat_name}/"
"temperature_{temperature}".format(reward_mode=reward_mode,
stat_name=stat_name,
temperature=temperature),
stat, step=epoch)
logging.info("Epoch [% 6d] Policy Evaluation (%s reward) "
"[temperature %.2f] = %10.2f (+/- %.2f)",
epoch, reward_mode, temperature,
reward_stats["mean"], reward_stats["std"])
@gin.configurable(blacklist=["output_dir"])
def training_loop(
env,
eval_env,
env_name,
policy_and_value_net_fn,
policy_and_value_optimizer_fn,
output_dir,
epochs=EPOCHS,
n_optimizer_steps=N_OPTIMIZER_STEPS,
print_every_optimizer_steps=PRINT_EVERY_OPTIMIZER_STEP,
target_kl=0.01,
boundary=20,
max_timestep=None,
max_timestep_eval=20000,
random_seed=None,
gamma=GAMMA,
lambda_=LAMBDA,
epsilon=EPSILON,
c1=1.0,
c2=0.01,
eval_every_n=1000,
done_frac_for_policy_save=0.5,
enable_early_stopping=True,
n_evals=1,
len_history_for_policy=4,
eval_temperatures=(1.0, 0.5),
):
gfile.makedirs(output_dir)
# Create summary writers and history.
train_sw = jaxboard.SummaryWriter(os.path.join(output_dir, "train"))
timing_sw = jaxboard.SummaryWriter(os.path.join(output_dir, "timing"))
eval_sw = jaxboard.SummaryWriter(os.path.join(output_dir, "eval"))
train_sw.text("env_name", env_name)
timing_sw.text("env_name", env_name)
eval_sw.text("env_name", env_name)
jax_rng_key = trax.get_random_number_generator_and_set_seed(random_seed)
# Batch Observations Shape = [1, 1] + OBS, because we will eventually call
# policy and value networks on shape [B, T] +_OBS
batch_observations_shape = (1, 1) + env.observation_space.shape
observations_dtype = env.observation_space.dtype
assert isinstance(env.action_space, gym.spaces.Discrete)
n_actions = env.action_space.n
jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2)
# Initialize the policy and value network.
policy_and_value_net_params, policy_and_value_net_apply = (
policy_and_value_net_fn(key1, batch_observations_shape,
observations_dtype, n_actions))
# Maybe restore the policy params. If there is nothing to restore, then
# iteration = 0 and policy_and_value_net_params are returned as is.
restore, policy_and_value_net_params, iteration = (
maybe_restore_params(output_dir, policy_and_value_net_params))
if restore:
logging.info("Restored parameters from iteration [%d]", iteration)
# We should start from the next iteration.
iteration += 1
policy_and_value_net_apply = jit(policy_and_value_net_apply)
# Initialize the optimizers.
policy_and_value_optimizer = (
policy_and_value_optimizer_fn(policy_and_value_net_params))
(policy_and_value_opt_state, policy_and_value_opt_update,
policy_and_value_get_params) = policy_and_value_optimizer
n_trajectories_done = 0
last_saved_at = 0
logging.info("Starting the PPO training loop.")
for i in range(iteration, epochs):
epoch_start_time = time.time()
# Params we'll use to collect the trajectories.
policy_and_value_net_params = policy_and_value_get_params(
policy_and_value_opt_state)
def get_predictions(observations, rng=None):
key, key1 = jax_random.split(rng, num=2)
log_probs, value_preds = policy_and_value_net_apply(
observations, policy_and_value_net_params, rng=key1)
return log_probs, value_preds, key
policy_eval_start_time = time.time()
if ((i + 1) % eval_every_n == 0) or (i == epochs - 1):
jax_rng_key, key = jax_random.split(jax_rng_key, num=2)
logging.vlog(1, "Epoch [% 6d] evaluating policy.", i)
reward_stats = evaluate_policy(
eval_env,
get_predictions,
temperatures=eval_temperatures,
max_timestep=max_timestep_eval,
n_evals=n_evals,
len_history_for_policy=len_history_for_policy,
rng=key)
write_eval_reward_summaries(reward_stats, eval_sw, epoch=i)
policy_eval_time = get_time(policy_eval_start_time)
trajectory_collection_start_time = time.time()
logging.vlog(1, "Epoch [% 6d] collecting trajectories.", i)
jax_rng_key, key = jax_random.split(jax_rng_key)
trajs, n_done, timing_info = collect_trajectories(
env,
policy_fn=get_predictions,
n_trajectories=env.batch_size,
max_timestep=max_timestep,
rng=key,
len_history_for_policy=len_history_for_policy,
reset=(i == 0) or restore,
epsilon=(10.0 / (i + 10.0)))
trajectory_collection_time = get_time(trajectory_collection_start_time)
logging.vlog(1, "Collecting trajectories took %0.2f msec.",
trajectory_collection_time)
avg_reward = float(sum(np.sum(traj[2]) for traj in trajs)) / len(trajs)
max_reward = max(np.sum(traj[2]) for traj in trajs)
min_reward = min(np.sum(traj[2]) for traj in trajs)
train_sw.scalar("train/reward_mean_truncated", avg_reward, step=i)
logging.vlog(1, "Rewards avg=[%0.2f], max=[%0.2f], min=[%0.2f], all=%s",
avg_reward, max_reward, min_reward,
[float(np.sum(traj[2])) for traj in trajs])
logging.vlog(1,
"Trajectory Length average=[%0.2f], max=[%0.2f], min=[%0.2f]",
float(sum(len(traj[0]) for traj in trajs)) / len(trajs),
max(len(traj[0]) for traj in trajs),
min(len(traj[0]) for traj in trajs))
logging.vlog(2, "Trajectory Lengths: %s", [len(traj[0]) for traj in trajs])
padding_start_time = time.time()
(_, reward_mask, padded_observations, padded_actions,
padded_rewards, padded_infos) = pad_trajectories(
trajs, boundary=boundary)
padding_time = get_time(padding_start_time)
logging.vlog(1, "Padding trajectories took %0.2f msec.",
get_time(padding_start_time))
logging.vlog(1, "Padded Observations' shape [%s]",
str(padded_observations.shape))
logging.vlog(1, "Padded Actions' shape [%s]", str(padded_actions.shape))
logging.vlog(1, "Padded Rewards' shape [%s]", str(padded_rewards.shape))
# Some assertions.
B, T = padded_actions.shape # pylint: disable=invalid-name
assert (B, T) == padded_rewards.shape
assert (B, T) == reward_mask.shape
assert (B, T + 1) == padded_observations.shape[:2]
assert (B, T + 1) + env.observation_space.shape == padded_observations.shape
log_prob_recompute_start_time = time.time()
assert ("log_prob_actions" in padded_infos and
"value_predictions" in padded_infos)
# These are the actual log-probabs and value predictions seen while picking
# the actions.
actual_log_probabs_traj = padded_infos["log_prob_actions"]
actual_value_predictions_traj = padded_infos["value_predictions"]
assert (B, T) == actual_log_probabs_traj.shape[:2]
A = actual_log_probabs_traj.shape[2] # pylint: disable=invalid-name
assert (B, T, 1) == actual_value_predictions_traj.shape
# TODO(afrozm): log-probabs doesn't need to be (B, T+1, A) it can do with
# observation, so we re-calculate for everything, but use the original ones
# for all but the last time-step.
jax_rng_key, key = jax_random.split(jax_rng_key)
log_probabs_traj, value_predictions_traj, _ = get_predictions(
padded_observations, rng=key)
assert (B, T + 1, A) == log_probabs_traj.shape
assert (B, T + 1, 1) == value_predictions_traj.shape
# Concatenate the last time-step's log-probabs and value predictions to the
log_probabs_traj = np.concatenate(
(actual_log_probabs_traj, log_probabs_traj[:, -1:, :]), axis=1)
value_predictions_traj = np.concatenate(
(actual_value_predictions_traj, value_predictions_traj[:, -1:, :]),
axis=1)
log_prob_recompute_time = get_time(log_prob_recompute_start_time)
epsilon_schedule = epsilon
jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2)
logging.vlog(2, "Starting to compute P&V loss.")
loss_compute_start_time = time.time()
cur_combined_loss, cur_ppo_loss, cur_value_loss, entropy_bonus = (
combined_loss(
policy_and_value_net_params,
log_probabs_traj,
value_predictions_traj,
policy_and_value_net_apply,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon_schedule,
c1=c1,
c2=c2,
rng=key1))
loss_compute_time = get_time(loss_compute_start_time)
logging.vlog(
1,
"Calculating P&V loss [%10.2f(%10.2f, %10.2f, %10.2f)] took %0.2f msec.",
cur_combined_loss, cur_value_loss, cur_ppo_loss, entropy_bonus,
get_time(loss_compute_start_time))
jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2)
logging.vlog(1, "Policy and Value Optimization")
optimization_start_time = time.time()
keys = jax_random.split(key1, num=n_optimizer_steps)
for j in range(n_optimizer_steps):
k1, k2, k3 = jax_random.split(keys[j], num=3)
t = time.time()
policy_and_value_opt_state = policy_and_value_opt_step(
j,
policy_and_value_opt_state,
policy_and_value_opt_update,
policy_and_value_get_params,
policy_and_value_net_apply,
log_probabs_traj,
value_predictions_traj,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
c1=c1,
c2=c2,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon_schedule,
rng=k1)
new_policy_and_value_net_params = policy_and_value_get_params(
policy_and_value_opt_state)
log_probab_actions_new, _ = policy_and_value_net_apply(
padded_observations, new_policy_and_value_net_params, rng=k2)
approx_kl = approximate_kl(log_probab_actions_new, log_probabs_traj,
reward_mask)
early_stopping = enable_early_stopping and approx_kl > 1.5 * target_kl
if early_stopping:
logging.vlog(
1, "Early stopping policy and value optimization at iter: %d, "
"with approx_kl: %0.2f", j, approx_kl)
# iteration.
t2 = time.time()
if (((j + 1) % print_every_optimizer_steps == 0) or
(j == n_optimizer_steps - 1) or early_stopping):
# Compute and log the loss.
(loss_combined, loss_ppo, loss_value, entropy_bonus) = (
combined_loss(
new_policy_and_value_net_params,
log_probabs_traj,
value_predictions_traj,
policy_and_value_net_apply,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon_schedule,
c1=c1,
c2=c2,
rng=k3))
logging.vlog(1, "One Policy and Value grad desc took: %0.2f msec",
get_time(t, t2))
logging.vlog(
1, "Combined Loss(value, ppo, entropy_bonus) [%10.2f] ->"
" [%10.2f(%10.2f,%10.2f,%10.2f)]", cur_combined_loss, loss_combined,
loss_value, loss_ppo, entropy_bonus)
if early_stopping:
break
optimization_time = get_time(optimization_start_time)
logging.vlog(
1, "Total Combined Loss reduction [%0.2f]%%",
(100 * (cur_combined_loss - loss_combined) / np.abs(cur_combined_loss)))
# Save parameters every time we see the end of at least a fraction of batch
# number of trajectories that are done (not completed -- completed includes
# truncated and done).
# Also don't save too frequently, enforce a minimum gap.
policy_save_start_time = time.time()
n_trajectories_done += n_done
if (((n_trajectories_done >= done_frac_for_policy_save * env.batch_size) and
(i - last_saved_at > eval_every_n) and
(((i + 1) % eval_every_n == 0))) or (i == epochs - 1)):
logging.vlog(1, "Epoch [% 6d] saving model.", i)
old_model_files = gfile.glob(os.path.join(output_dir, "model-??????.pkl"))
params_file = os.path.join(output_dir, "model-%06d.pkl" % i)
with gfile.GFile(params_file, "wb") as f:
pickle.dump(policy_and_value_net_params, f)
for path in old_model_files:
gfile.remove(path)
n_trajectories_done = 0
last_saved_at = i
policy_save_time = get_time(policy_save_start_time)
epoch_time = get_time(epoch_start_time)
logging.info(
"Epoch [% 6d], Reward[min, max, avg] [%5.2f,%5.2f,%5.2f], Combined"
" Loss(value, ppo, entropy) [%2.5f(%2.5f,%2.5f,%2.5f)]", i, min_reward,
max_reward, avg_reward, loss_combined, loss_value, loss_ppo,
entropy_bonus)
timing_dict = {
"epoch": epoch_time,
"policy_eval": policy_eval_time,
"trajectory_collection": trajectory_collection_time,
"padding": padding_time,
"log_prob_recompute": log_prob_recompute_time,
"loss_compute": loss_compute_time,
"optimization": optimization_time,
"policy_save": policy_save_time,
}
timing_dict.update(timing_info)
for k, v in timing_dict.items():
timing_sw.scalar("timing/%s" % k, v, step=i)
max_key_len = max(len(k) for k in timing_dict)
timing_info_list = [
"%s : % 10.2f" % (k.rjust(max_key_len + 1), v)
for k, v in sorted(timing_dict.items())
]
logging.info("Epoch [% 6d], Timings: \n%s", i, "\n".join(timing_info_list))
restore = False
if (i + 1) % 1000 == 0 or i == epochs - 1:
train_sw.flush()
timing_sw.flush()
eval_sw.flush()
| true | true |
f727db28729a6cb29c683b37edffca2af1fdd1c2 | 6,875 | py | Python | dashboard/dashboard_build/preprocess.py | alekzonder/catapult | f1017f0c7bd2b766674888d5e88d42fcc61d632c | [
"BSD-3-Clause"
] | null | null | null | dashboard/dashboard_build/preprocess.py | alekzonder/catapult | f1017f0c7bd2b766674888d5e88d42fcc61d632c | [
"BSD-3-Clause"
] | null | null | null | dashboard/dashboard_build/preprocess.py | alekzonder/catapult | f1017f0c7bd2b766674888d5e88d42fcc61d632c | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import contextlib
import logging
import os
import subprocess
import sys
import time
def _AddToPathIfNeeded(path):
if path not in sys.path:
sys.path.insert(0, path)
@contextlib.contextmanager
def Chdir(path):
pwd = os.getcwd()
try:
yield os.chdir(path)
finally:
os.chdir(pwd)
def PackPinpoint(catapult_path, temp_dir, deployment_paths):
with Chdir(catapult_path):
_AddToPathIfNeeded(os.path.join(catapult_path, 'common', 'node_runner'))
from node_runner import node_util
node_path = node_util.GetNodePath()
node_modules = node_util.GetNodeModulesPath()
def PinpointRelativePath(*components):
return os.path.join('dashboard', 'pinpoint', *components)
# When packing Pinpoint, we need some extra symlinks in the temporary
# directory, so we can find the correct elements at bundle time. This is
# simulating the paths we would be serving as defined in the pinpoint.yaml
# file.
os.symlink(
os.path.join(catapult_path, 'dashboard', 'dashboard', 'pinpoint',
'elements'), os.path.join(temp_dir, 'elements'))
os.symlink(
os.path.join(catapult_path, 'third_party', 'polymer', 'components'),
os.path.join(temp_dir, 'components'))
os.symlink(
os.path.join(catapult_path, 'third_party', 'd3'),
os.path.join(temp_dir, 'd3'))
# We don't yet use any webpack in Pinpoint, so let's use the polymer bundler
# for now.
bundler_cmd = [
node_path,
os.path.join(node_modules, 'polymer-bundler', 'lib', 'bin',
'polymer-bundler.js'),
'--inline-scripts',
'--inline-css',
# Exclude some paths from the bundling.
'--exclude',
'//fonts.googleapis.com/*',
'--exclude',
'//apis.google.com/*',
# Then set up the rest of the options for the bundler.
'--out-dir',
os.path.join(temp_dir, 'bundled'),
'--root',
temp_dir,
'--treeshake',
]
# Change to the temporary directory, and run the bundler from there.
with Chdir(temp_dir):
bundler_cmd.extend(
['--in-file',
PinpointRelativePath('index', 'index.html')])
logging.info('Bundler Command:\n%s', ' '.join(bundler_cmd))
proc = subprocess.Popen(
bundler_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, bundler_err = proc.communicate()
if proc.returncode != 0:
print('ERROR from bundler:')
print(bundler_err)
raise RuntimeError('Vulcanize failed with exit code', proc.returncode)
deployment_paths.append(os.path.join(temp_dir, 'bundled'))
def PackSPA(catapult_path, temp_dir, deployment_paths):
with Chdir(catapult_path):
dashboard_path = os.path.join(catapult_path, 'dashboard')
app_yaml = os.path.join(dashboard_path, 'app.yaml')
if 'webpack/service-worker.js' not in open(app_yaml).read():
# Only webpack if the service-worker is going to be served.
return
_AddToPathIfNeeded(os.path.join(catapult_path, 'common', 'node_runner'))
from node_runner import node_util
node_modules = node_util.GetNodeModulesPath()
# TODO(crbug.com/918193): Remove this after migrating to lit-element.
js_parse_filename = os.path.join(node_modules, 'hydrolysis', 'lib',
'ast-utils', 'js-parse.js')
subprocess.check_output(
['sed', '-i', 's/ecmaVersion: 6/ecmaVersion: 9/g', js_parse_filename])
spa_path = os.path.join(dashboard_path, 'dashboard', 'spa')
webpack_dir = os.path.join(temp_dir, 'webpack')
config_filename = os.path.join(spa_path, 'webpack.config.js')
webpack_command = os.path.join(node_modules, '.bin', 'webpack-command')
os.environ['WEBPACK_OUTPUT_PATH'] = webpack_dir
os.environ['WEBPACK_NODE_MODULES'] = node_modules
os.environ['WEBPACK_THIRD_PARTY'] = os.path.join(catapult_path,
'third_party')
proc = subprocess.Popen([webpack_command, '--config', config_filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
webpack_out, webpack_err = proc.communicate()
if proc.returncode != 0:
print('ERROR from webpack:')
print(webpack_out)
print(webpack_err)
raise RuntimeError('Webpack failed with exit code', proc.returncode)
vulcanize_cmd = [
os.path.join(node_modules, 'vulcanize', 'bin', 'vulcanize'),
'--strip-comments',
'--inline-scripts',
'--inline-css',
'--exclude=/index.js',
]
for path in sorted(deployment_paths):
isdir = os.path.isdir(path) and not path.endswith('/')
# Some directory names are prefixes of others. Add an explicit slash to
# prevent confusing vulcanize.
vulcanize_cmd.append('--redirect')
vulcanize_cmd.append('/' + os.path.basename(path) +
('/' if isdir else '') + '|' +
path[len(catapult_path) + 1:])
vulcanize_cmd.append(
os.path.join('dashboard', 'dashboard', 'spa', 'index.html'))
proc = subprocess.Popen(
vulcanize_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
html, vulcanize_err = proc.communicate()
if proc.returncode != 0:
print('ERROR from vulcanize:')
print(vulcanize_err)
raise RuntimeError('Vulcanize failed with exit code', proc.returncode)
# Write the html to a temp file.
vulcanized_index = os.path.join(temp_dir, 'index.vulcanized.html')
open(vulcanized_index, 'w').write(html)
minify = os.path.join(node_modules, '..', 'minify')
subprocess.check_output([minify, vulcanized_index])
packed_index_js_filename = os.path.join(webpack_dir, 'index.js')
AddTimestamp(packed_index_js_filename)
minifyjs = os.path.join(node_modules, '..', 'minifyjs')
subprocess.check_output([minifyjs, packed_index_js_filename])
sw_js = os.path.join(webpack_dir, 'service-worker.js')
subprocess.check_output([minifyjs, sw_js])
deployment_paths.append(webpack_dir)
deployment_paths.append(vulcanized_index)
def AddTimestamp(js_name):
# V2SPA displays its version as this timestamp in this format to make it easy
# to check whether a change is visible.
now = time.time()
print('vulcanized',
time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(now - (60 * 60 * 7))))
js = open(js_name).read()
with open(js_name, 'w') as fp:
fp.write('window.VULCANIZED_TIMESTAMP=new Date(%d);\n' % (now * 1000))
fp.write(js)
| 36.184211 | 80 | 0.652945 |
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import contextlib
import logging
import os
import subprocess
import sys
import time
def _AddToPathIfNeeded(path):
if path not in sys.path:
sys.path.insert(0, path)
@contextlib.contextmanager
def Chdir(path):
pwd = os.getcwd()
try:
yield os.chdir(path)
finally:
os.chdir(pwd)
def PackPinpoint(catapult_path, temp_dir, deployment_paths):
with Chdir(catapult_path):
_AddToPathIfNeeded(os.path.join(catapult_path, 'common', 'node_runner'))
from node_runner import node_util
node_path = node_util.GetNodePath()
node_modules = node_util.GetNodeModulesPath()
def PinpointRelativePath(*components):
return os.path.join('dashboard', 'pinpoint', *components)
os.symlink(
os.path.join(catapult_path, 'dashboard', 'dashboard', 'pinpoint',
'elements'), os.path.join(temp_dir, 'elements'))
os.symlink(
os.path.join(catapult_path, 'third_party', 'polymer', 'components'),
os.path.join(temp_dir, 'components'))
os.symlink(
os.path.join(catapult_path, 'third_party', 'd3'),
os.path.join(temp_dir, 'd3'))
bundler_cmd = [
node_path,
os.path.join(node_modules, 'polymer-bundler', 'lib', 'bin',
'polymer-bundler.js'),
'--inline-scripts',
'--inline-css',
'--exclude',
'//fonts.googleapis.com/*',
'--exclude',
'//apis.google.com/*',
'--out-dir',
os.path.join(temp_dir, 'bundled'),
'--root',
temp_dir,
'--treeshake',
]
with Chdir(temp_dir):
bundler_cmd.extend(
['--in-file',
PinpointRelativePath('index', 'index.html')])
logging.info('Bundler Command:\n%s', ' '.join(bundler_cmd))
proc = subprocess.Popen(
bundler_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, bundler_err = proc.communicate()
if proc.returncode != 0:
print('ERROR from bundler:')
print(bundler_err)
raise RuntimeError('Vulcanize failed with exit code', proc.returncode)
deployment_paths.append(os.path.join(temp_dir, 'bundled'))
def PackSPA(catapult_path, temp_dir, deployment_paths):
with Chdir(catapult_path):
dashboard_path = os.path.join(catapult_path, 'dashboard')
app_yaml = os.path.join(dashboard_path, 'app.yaml')
if 'webpack/service-worker.js' not in open(app_yaml).read():
return
_AddToPathIfNeeded(os.path.join(catapult_path, 'common', 'node_runner'))
from node_runner import node_util
node_modules = node_util.GetNodeModulesPath()
js_parse_filename = os.path.join(node_modules, 'hydrolysis', 'lib',
'ast-utils', 'js-parse.js')
subprocess.check_output(
['sed', '-i', 's/ecmaVersion: 6/ecmaVersion: 9/g', js_parse_filename])
spa_path = os.path.join(dashboard_path, 'dashboard', 'spa')
webpack_dir = os.path.join(temp_dir, 'webpack')
config_filename = os.path.join(spa_path, 'webpack.config.js')
webpack_command = os.path.join(node_modules, '.bin', 'webpack-command')
os.environ['WEBPACK_OUTPUT_PATH'] = webpack_dir
os.environ['WEBPACK_NODE_MODULES'] = node_modules
os.environ['WEBPACK_THIRD_PARTY'] = os.path.join(catapult_path,
'third_party')
proc = subprocess.Popen([webpack_command, '--config', config_filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
webpack_out, webpack_err = proc.communicate()
if proc.returncode != 0:
print('ERROR from webpack:')
print(webpack_out)
print(webpack_err)
raise RuntimeError('Webpack failed with exit code', proc.returncode)
vulcanize_cmd = [
os.path.join(node_modules, 'vulcanize', 'bin', 'vulcanize'),
'--strip-comments',
'--inline-scripts',
'--inline-css',
'--exclude=/index.js',
]
for path in sorted(deployment_paths):
isdir = os.path.isdir(path) and not path.endswith('/')
vulcanize_cmd.append('--redirect')
vulcanize_cmd.append('/' + os.path.basename(path) +
('/' if isdir else '') + '|' +
path[len(catapult_path) + 1:])
vulcanize_cmd.append(
os.path.join('dashboard', 'dashboard', 'spa', 'index.html'))
proc = subprocess.Popen(
vulcanize_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
html, vulcanize_err = proc.communicate()
if proc.returncode != 0:
print('ERROR from vulcanize:')
print(vulcanize_err)
raise RuntimeError('Vulcanize failed with exit code', proc.returncode)
vulcanized_index = os.path.join(temp_dir, 'index.vulcanized.html')
open(vulcanized_index, 'w').write(html)
minify = os.path.join(node_modules, '..', 'minify')
subprocess.check_output([minify, vulcanized_index])
packed_index_js_filename = os.path.join(webpack_dir, 'index.js')
AddTimestamp(packed_index_js_filename)
minifyjs = os.path.join(node_modules, '..', 'minifyjs')
subprocess.check_output([minifyjs, packed_index_js_filename])
sw_js = os.path.join(webpack_dir, 'service-worker.js')
subprocess.check_output([minifyjs, sw_js])
deployment_paths.append(webpack_dir)
deployment_paths.append(vulcanized_index)
def AddTimestamp(js_name):
now = time.time()
print('vulcanized',
time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(now - (60 * 60 * 7))))
js = open(js_name).read()
with open(js_name, 'w') as fp:
fp.write('window.VULCANIZED_TIMESTAMP=new Date(%d);\n' % (now * 1000))
fp.write(js)
| true | true |
f727dba422b880a9a9bc67a389966d3ac11e1460 | 463 | py | Python | econml/tree/__init__.py | lwschm/EconML | 6e7b107e1f8a7a5922489eb81143db8656ff01af | [
"BSD-3-Clause"
] | 1 | 2021-02-08T22:58:39.000Z | 2021-02-08T22:58:39.000Z | econml/tree/__init__.py | Jimmy-INL/EconML | 3e66b9507b43f8af291009d26186283fa4bb4ced | [
"BSD-3-Clause"
] | null | null | null | econml/tree/__init__.py | Jimmy-INL/EconML | 3e66b9507b43f8af291009d26186283fa4bb4ced | [
"BSD-3-Clause"
] | 1 | 2021-08-20T09:06:42.000Z | 2021-08-20T09:06:42.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from ._criterion import Criterion, RegressionCriterion, MSE
from ._splitter import Splitter, BestSplitter
from ._tree import DepthFirstTreeBuilder
from ._tree import Tree
__all__ = ["Tree",
"Splitter",
"BestSplitter",
"DepthFirstTreeBuilder",
"Criterion",
"RegressionCriterion",
"MSE"]
| 28.9375 | 60 | 0.650108 |
from ._criterion import Criterion, RegressionCriterion, MSE
from ._splitter import Splitter, BestSplitter
from ._tree import DepthFirstTreeBuilder
from ._tree import Tree
__all__ = ["Tree",
"Splitter",
"BestSplitter",
"DepthFirstTreeBuilder",
"Criterion",
"RegressionCriterion",
"MSE"]
| true | true |
f727dd0ea24c6f6c7eef4fa81bae3e2d89fcaac3 | 3,250 | py | Python | Results/ResultUniLex.py | viitormiiguel/AnalysisFinancial | 21d19c4eb200655ffd8605d4c38ab280a4552384 | [
"MIT"
] | null | null | null | Results/ResultUniLex.py | viitormiiguel/AnalysisFinancial | 21d19c4eb200655ffd8605d4c38ab280a4552384 | [
"MIT"
] | null | null | null | Results/ResultUniLex.py | viitormiiguel/AnalysisFinancial | 21d19c4eb200655ffd8605d4c38ab280a4552384 | [
"MIT"
] | 2 | 2020-04-30T18:47:05.000Z | 2021-05-24T15:07:41.000Z | import nltk
import csv
import datetime
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
now = datetime.datetime.now()
today = now.strftime("%Y-%m-%d")
dInfoMoney = 'C:/Users/vitor/Documents/GetDataset/Infomoney/'
dInvesting = 'C:/Users/vitor/Documents/GetDataset/Investing.com/'
dTrading = 'C:/Users/vitor/Documents/GetDataset/TradingView/'
# Resultados Investing.com
r_investing = open(dInvesting + today +'/polarityUniLexPre.csv', 'r', encoding='utf8')
# r_investing = open(dInvesting + today +'/polarityUniLexNo.csv', 'r', encoding='utf8')
posInv = 0
neuInv = 0
negInv = 0
for t in r_investing.readlines():
if 'Positivo' in t:
posInv += 1
if 'Neutro' in t:
neuInv += 1
if 'Negativo' in t:
negInv += 1
print('Investing Pos ', posInv)
print('Investing Neu ', neuInv)
print('Investing Neg ', negInv)
# Resultados InfoMoney
r_infomoney = open(dInfoMoney + today +'/polarityUniLexPre.csv', 'r', encoding='utf8')
# r_infomoney = open(dInfoMoney + today +'/polarityUniLexNo.csv', 'r', encoding='utf8')
posInf = 0
neuInf = 0
negInf = 0
for t in r_infomoney.readlines():
if 'Positivo' in t:
posInf += 1
if 'Neutro' in t:
neuInf += 1
if 'Negativo' in t:
negInf += 1
print('InfoMoney Pos ', posInf)
print('InfoMoney Neu ', neuInf)
print('InfoMoney Neg ', negInf)
# Resultados TradingView
r_tradingview = open(dTrading + today +'/polarityUniLexPre.csv', 'r', encoding='utf8')
# r_tradingview = open(dTrading + today +'/polarityUniLexNo.csv', 'r', encoding='utf8')
posTrd = 0
neuTrd = 0
negTrd = 0
for t in r_tradingview.readlines():
if 'Positivo' in t:
posTrd += 1
if 'Neutro' in t:
neuTrd += 1
if 'Negativo' in t:
negTrd += 1
print('TradingView Pos ', posTrd)
print('TradingView Neu ', neuTrd)
print('TradingView Neg ', negTrd)
raw_data = {'Fonte de Dados': ['Investing.com', 'InfoMoney', 'TradingView'],
'Pos': [posInv, posInf, posTrd],
'Neu': [neuInv, neuInf, neuTrd],
'Neg': [negInv, negInf, negTrd]}
df = pd.DataFrame(raw_data, columns = ['Fonte de Dados', 'Pos', 'Neu', 'Neg'])
df
# Setting the positions and width for the bars
pos = list(range(len(df['Pos'])))
width = 0.25
fig, ax = plt.subplots(figsize=(10,5))
# Create a bar with pre_score data, # in position pos,
plt.bar(pos, df['Pos'], width, alpha=0.5, color='#EE3224', label=df['Fonte de Dados'][0])
# Create a bar with mid_score data, # in position pos + some width buffer,
plt.bar([p + width for p in pos], df['Neu'], width, alpha=0.5, color='#F78F1E', label=df['Fonte de Dados'][1])
# Create a bar with post_score data, # in position pos + some width buffer,
plt.bar([p + width*2 for p in pos], df['Neg'], width, alpha=0.5, color='#FFC222', label=df['Fonte de Dados'][2])
ax.set_title("OpLexicon sem Pré-Processamento")
ax.set_ylabel('N° de Textos')
ax.set_xticks([p + 1 * width for p in pos])
ax.set_xticklabels(df['Fonte de Dados'])
plt.xlim(min(pos)-width, max(pos)+width*4)
plt.ylim([0, max(df['Pos'] + df['Neu'] + df['Neg'])] )
plt.legend(['Positivo', 'Neutro', 'Negativo'], loc='upper left')
plt.grid()
plt.show() | 33.505155 | 114 | 0.641538 | import nltk
import csv
import datetime
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
now = datetime.datetime.now()
today = now.strftime("%Y-%m-%d")
dInfoMoney = 'C:/Users/vitor/Documents/GetDataset/Infomoney/'
dInvesting = 'C:/Users/vitor/Documents/GetDataset/Investing.com/'
dTrading = 'C:/Users/vitor/Documents/GetDataset/TradingView/'
r_investing = open(dInvesting + today +'/polarityUniLexPre.csv', 'r', encoding='utf8')
posInv = 0
neuInv = 0
negInv = 0
for t in r_investing.readlines():
if 'Positivo' in t:
posInv += 1
if 'Neutro' in t:
neuInv += 1
if 'Negativo' in t:
negInv += 1
print('Investing Pos ', posInv)
print('Investing Neu ', neuInv)
print('Investing Neg ', negInv)
r_infomoney = open(dInfoMoney + today +'/polarityUniLexPre.csv', 'r', encoding='utf8')
posInf = 0
neuInf = 0
negInf = 0
for t in r_infomoney.readlines():
if 'Positivo' in t:
posInf += 1
if 'Neutro' in t:
neuInf += 1
if 'Negativo' in t:
negInf += 1
print('InfoMoney Pos ', posInf)
print('InfoMoney Neu ', neuInf)
print('InfoMoney Neg ', negInf)
r_tradingview = open(dTrading + today +'/polarityUniLexPre.csv', 'r', encoding='utf8')
posTrd = 0
neuTrd = 0
negTrd = 0
for t in r_tradingview.readlines():
if 'Positivo' in t:
posTrd += 1
if 'Neutro' in t:
neuTrd += 1
if 'Negativo' in t:
negTrd += 1
print('TradingView Pos ', posTrd)
print('TradingView Neu ', neuTrd)
print('TradingView Neg ', negTrd)
raw_data = {'Fonte de Dados': ['Investing.com', 'InfoMoney', 'TradingView'],
'Pos': [posInv, posInf, posTrd],
'Neu': [neuInv, neuInf, neuTrd],
'Neg': [negInv, negInf, negTrd]}
df = pd.DataFrame(raw_data, columns = ['Fonte de Dados', 'Pos', 'Neu', 'Neg'])
df
pos = list(range(len(df['Pos'])))
width = 0.25
fig, ax = plt.subplots(figsize=(10,5))
os'], width, alpha=0.5, color='#EE3224', label=df['Fonte de Dados'][0])
Neu'], width, alpha=0.5, color='#F78F1E', label=df['Fonte de Dados'][1])
['Neg'], width, alpha=0.5, color='#FFC222', label=df['Fonte de Dados'][2])
ax.set_title("OpLexicon sem Pré-Processamento")
ax.set_ylabel('N° de Textos')
ax.set_xticks([p + 1 * width for p in pos])
ax.set_xticklabels(df['Fonte de Dados'])
plt.xlim(min(pos)-width, max(pos)+width*4)
plt.ylim([0, max(df['Pos'] + df['Neu'] + df['Neg'])] )
plt.legend(['Positivo', 'Neutro', 'Negativo'], loc='upper left')
plt.grid()
plt.show() | true | true |
f727dd3b7d4b012d0f7dec1bce4d8082d376ec1c | 295,763 | py | Python | kubernetes/client/apis/apps_v1beta1_api.py | TomasTomecek/kubernetes-python | c37c074303a13c72662b9201ccc023fb0ca45755 | [
"Apache-2.0"
] | null | null | null | kubernetes/client/apis/apps_v1beta1_api.py | TomasTomecek/kubernetes-python | c37c074303a13c72662b9201ccc023fb0ca45755 | [
"Apache-2.0"
] | 1 | 2021-04-30T20:41:19.000Z | 2021-04-30T20:41:19.000Z | venv/lib/python2.7/site-packages/kubernetes/client/apis/apps_v1beta1_api.py | 784134748/kubernetes-install | 5df59632c2619632e422948b667fb68eab9ff5be | [
"MIT"
] | 1 | 2020-05-09T07:16:55.000Z | 2020-05-09T07:16:55.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class AppsV1beta1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_controller_revision(self, namespace, body, **kwargs):
"""
create a ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_controller_revision(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1ControllerRevision body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1ControllerRevision
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_controller_revision_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_controller_revision_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_controller_revision_with_http_info(self, namespace, body, **kwargs):
"""
create a ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_controller_revision_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1ControllerRevision body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1ControllerRevision
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_controller_revision" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_controller_revision`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_controller_revision`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/controllerrevisions', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ControllerRevision',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_deployment(self, namespace, body, **kwargs):
"""
create a Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_deployment(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param AppsV1beta1Deployment body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: AppsV1beta1Deployment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_deployment_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_deployment_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_deployment_with_http_info(self, namespace, body, **kwargs):
"""
create a Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_deployment_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param AppsV1beta1Deployment body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: AppsV1beta1Deployment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_deployment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_deployment`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_deployment`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1Deployment',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_deployment_rollback(self, name, namespace, body, **kwargs):
"""
create rollback of a Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_deployment_rollback(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the DeploymentRollback (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param AppsV1beta1DeploymentRollback body: (required)
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param bool include_uninitialized: If IncludeUninitialized is specified, the object may be returned without completing initialization.
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_deployment_rollback_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.create_namespaced_deployment_rollback_with_http_info(name, namespace, body, **kwargs)
return data
def create_namespaced_deployment_rollback_with_http_info(self, name, namespace, body, **kwargs):
"""
create rollback of a Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_deployment_rollback_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the DeploymentRollback (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param AppsV1beta1DeploymentRollback body: (required)
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param bool include_uninitialized: If IncludeUninitialized is specified, the object may be returned without completing initialization.
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'dry_run', 'include_uninitialized', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_deployment_rollback" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `create_namespaced_deployment_rollback`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_deployment_rollback`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_deployment_rollback`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}/rollback', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_stateful_set(self, namespace, body, **kwargs):
"""
create a StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_stateful_set(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1StatefulSet body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_stateful_set_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_stateful_set_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_stateful_set_with_http_info(self, namespace, body, **kwargs):
"""
create a StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_namespaced_stateful_set_with_http_info(namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1StatefulSet body: (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_stateful_set`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_stateful_set`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1StatefulSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_controller_revision(self, namespace, **kwargs):
"""
delete collection of ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_controller_revision(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_controller_revision_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_controller_revision_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_controller_revision_with_http_info(self, namespace, **kwargs):
"""
delete collection of ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_controller_revision_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_controller_revision" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_controller_revision`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/controllerrevisions', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_deployment(self, namespace, **kwargs):
"""
delete collection of Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_deployment(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_deployment_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_deployment_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_deployment_with_http_info(self, namespace, **kwargs):
"""
delete collection of Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_deployment_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_deployment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_deployment`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_stateful_set(self, namespace, **kwargs):
"""
delete collection of StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_stateful_set(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_stateful_set_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_stateful_set_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_stateful_set_with_http_info(self, namespace, **kwargs):
"""
delete collection of StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_stateful_set_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_stateful_set`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_controller_revision(self, name, namespace, body, **kwargs):
"""
delete a ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_controller_revision(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ControllerRevision (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1DeleteOptions body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.delete_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs)
return data
def delete_namespaced_controller_revision_with_http_info(self, name, namespace, body, **kwargs):
"""
delete a ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_controller_revision_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ControllerRevision (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1DeleteOptions body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_controller_revision" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_controller_revision`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_controller_revision`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `delete_namespaced_controller_revision`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/controllerrevisions/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_deployment(self, name, namespace, body, **kwargs):
"""
delete a Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_deployment(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1DeleteOptions body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_deployment_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.delete_namespaced_deployment_with_http_info(name, namespace, body, **kwargs)
return data
def delete_namespaced_deployment_with_http_info(self, name, namespace, body, **kwargs):
"""
delete a Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_deployment_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1DeleteOptions body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_deployment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_deployment`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_deployment`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `delete_namespaced_deployment`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_stateful_set(self, name, namespace, body, **kwargs):
"""
delete a StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_stateful_set(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1DeleteOptions body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.delete_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs)
return data
def delete_namespaced_stateful_set_with_http_info(self, name, namespace, body, **kwargs):
"""
delete a StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_stateful_set_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1DeleteOptions body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_stateful_set`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_stateful_set`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `delete_namespaced_stateful_set`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_resources_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_controller_revision_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_controller_revision_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1beta1ControllerRevisionList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_controller_revision_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_controller_revision_for_all_namespaces_with_http_info(**kwargs)
return data
def list_controller_revision_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_controller_revision_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1beta1ControllerRevisionList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_controller_revision_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/controllerrevisions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ControllerRevisionList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_deployment_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_deployment_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: AppsV1beta1DeploymentList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_deployment_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_deployment_for_all_namespaces_with_http_info(**kwargs)
return data
def list_deployment_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_deployment_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: AppsV1beta1DeploymentList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_deployment_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/deployments', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1DeploymentList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_controller_revision(self, namespace, **kwargs):
"""
list or watch objects of kind ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_controller_revision(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1beta1ControllerRevisionList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_controller_revision_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_controller_revision_with_http_info(namespace, **kwargs)
return data
def list_namespaced_controller_revision_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_controller_revision_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1beta1ControllerRevisionList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_controller_revision" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_controller_revision`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/controllerrevisions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ControllerRevisionList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_deployment(self, namespace, **kwargs):
"""
list or watch objects of kind Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_deployment(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: AppsV1beta1DeploymentList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_deployment_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_deployment_with_http_info(namespace, **kwargs)
return data
def list_namespaced_deployment_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_deployment_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: AppsV1beta1DeploymentList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_deployment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_deployment`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1DeploymentList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_stateful_set(self, namespace, **kwargs):
"""
list or watch objects of kind StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_stateful_set(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1beta1StatefulSetList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_stateful_set_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_stateful_set_with_http_info(namespace, **kwargs)
return data
def list_namespaced_stateful_set_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_stateful_set_with_http_info(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1beta1StatefulSetList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_stateful_set`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1StatefulSetList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_stateful_set_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_stateful_set_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1beta1StatefulSetList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_stateful_set_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_stateful_set_for_all_namespaces_with_http_info(**kwargs)
return data
def list_stateful_set_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_stateful_set_for_all_namespaces_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1beta1StatefulSetList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_stateful_set_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/statefulsets', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1StatefulSetList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_controller_revision(self, name, namespace, body, **kwargs):
"""
partially update the specified ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_controller_revision(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ControllerRevision (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1ControllerRevision
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_controller_revision_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_controller_revision_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ControllerRevision (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1ControllerRevision
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_controller_revision" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_controller_revision`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_controller_revision`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_controller_revision`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/controllerrevisions/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ControllerRevision',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_deployment(self, name, namespace, body, **kwargs):
"""
partially update the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_deployment(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: AppsV1beta1Deployment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_deployment_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_deployment_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_deployment_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_deployment_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: AppsV1beta1Deployment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_deployment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_deployment`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_deployment`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_deployment`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1Deployment',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_deployment_scale(self, name, namespace, body, **kwargs):
"""
partially update scale of the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_deployment_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: AppsV1beta1Scale
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_deployment_scale_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update scale of the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_deployment_scale_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: AppsV1beta1Scale
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_deployment_scale" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_deployment_scale`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_deployment_scale`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_deployment_scale`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}/scale', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1Scale',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_deployment_status(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_deployment_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: AppsV1beta1Deployment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_deployment_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_deployment_status_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_deployment_status_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update status of the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_deployment_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: AppsV1beta1Deployment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_deployment_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_deployment_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_deployment_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_deployment_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1Deployment',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_stateful_set(self, name, namespace, body, **kwargs):
"""
partially update the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_stateful_set(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_stateful_set_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_stateful_set_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_stateful_set`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_stateful_set`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_stateful_set`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1StatefulSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_stateful_set_scale(self, name, namespace, body, **kwargs):
"""
partially update scale of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_stateful_set_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: AppsV1beta1Scale
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_stateful_set_scale_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_stateful_set_scale_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_stateful_set_scale_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update scale of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_stateful_set_scale_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: AppsV1beta1Scale
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_stateful_set_scale" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_stateful_set_scale`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_stateful_set_scale`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_stateful_set_scale`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}/scale', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1Scale',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_stateful_set_status(self, name, namespace, body, **kwargs):
"""
partially update status of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_stateful_set_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_stateful_set_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_stateful_set_status_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_stateful_set_status_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update status of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_namespaced_stateful_set_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_stateful_set_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_stateful_set_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_stateful_set_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_stateful_set_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1StatefulSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_controller_revision(self, name, namespace, **kwargs):
"""
read the specified ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_controller_revision(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ControllerRevision (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1beta1ControllerRevision
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_controller_revision_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_controller_revision_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_controller_revision_with_http_info(self, name, namespace, **kwargs):
"""
read the specified ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_controller_revision_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ControllerRevision (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1beta1ControllerRevision
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_controller_revision" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_controller_revision`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_controller_revision`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/controllerrevisions/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ControllerRevision',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_deployment(self, name, namespace, **kwargs):
"""
read the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_deployment(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: AppsV1beta1Deployment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_deployment_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_deployment_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_deployment_with_http_info(self, name, namespace, **kwargs):
"""
read the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_deployment_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: AppsV1beta1Deployment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_deployment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_deployment`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_deployment`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1Deployment',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_deployment_scale(self, name, namespace, **kwargs):
"""
read scale of the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_deployment_scale(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: AppsV1beta1Scale
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_deployment_scale_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_deployment_scale_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_deployment_scale_with_http_info(self, name, namespace, **kwargs):
"""
read scale of the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_deployment_scale_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: AppsV1beta1Scale
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_deployment_scale" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_deployment_scale`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_deployment_scale`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}/scale', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1Scale',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_deployment_status(self, name, namespace, **kwargs):
"""
read status of the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_deployment_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: AppsV1beta1Deployment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_deployment_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_deployment_status_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_deployment_status_with_http_info(self, name, namespace, **kwargs):
"""
read status of the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_deployment_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: AppsV1beta1Deployment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_deployment_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_deployment_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_deployment_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1Deployment',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_stateful_set(self, name, namespace, **kwargs):
"""
read the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_stateful_set(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1beta1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_stateful_set_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_stateful_set_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_stateful_set_with_http_info(self, name, namespace, **kwargs):
"""
read the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_stateful_set_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1beta1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_stateful_set`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_stateful_set`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1StatefulSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_stateful_set_scale(self, name, namespace, **kwargs):
"""
read scale of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_stateful_set_scale(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: AppsV1beta1Scale
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_stateful_set_scale_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_stateful_set_scale_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_stateful_set_scale_with_http_info(self, name, namespace, **kwargs):
"""
read scale of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_stateful_set_scale_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: AppsV1beta1Scale
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_stateful_set_scale" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_stateful_set_scale`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_stateful_set_scale`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}/scale', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1Scale',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_stateful_set_status(self, name, namespace, **kwargs):
"""
read status of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_stateful_set_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1beta1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_stateful_set_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_stateful_set_status_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_stateful_set_status_with_http_info(self, name, namespace, **kwargs):
"""
read status of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_stateful_set_status_with_http_info(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1beta1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_stateful_set_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_stateful_set_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_stateful_set_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1StatefulSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_controller_revision(self, name, namespace, body, **kwargs):
"""
replace the specified ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_controller_revision(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ControllerRevision (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1ControllerRevision body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1ControllerRevision
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_controller_revision_with_http_info(self, name, namespace, body, **kwargs):
"""
replace the specified ControllerRevision
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_controller_revision_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ControllerRevision (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1ControllerRevision body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1ControllerRevision
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_controller_revision" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_controller_revision`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_controller_revision`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_controller_revision`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/controllerrevisions/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ControllerRevision',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_deployment(self, name, namespace, body, **kwargs):
"""
replace the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_deployment(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param AppsV1beta1Deployment body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: AppsV1beta1Deployment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_deployment_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_deployment_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_deployment_with_http_info(self, name, namespace, body, **kwargs):
"""
replace the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_deployment_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param AppsV1beta1Deployment body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: AppsV1beta1Deployment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_deployment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_deployment`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_deployment`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_deployment`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1Deployment',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_deployment_scale(self, name, namespace, body, **kwargs):
"""
replace scale of the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_deployment_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param AppsV1beta1Scale body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: AppsV1beta1Scale
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_deployment_scale_with_http_info(self, name, namespace, body, **kwargs):
"""
replace scale of the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_deployment_scale_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param AppsV1beta1Scale body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: AppsV1beta1Scale
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_deployment_scale" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_deployment_scale`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_deployment_scale`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_deployment_scale`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}/scale', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1Scale',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_deployment_status(self, name, namespace, body, **kwargs):
"""
replace status of the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_deployment_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param AppsV1beta1Deployment body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: AppsV1beta1Deployment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_deployment_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_deployment_status_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_deployment_status_with_http_info(self, name, namespace, body, **kwargs):
"""
replace status of the specified Deployment
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_deployment_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Deployment (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param AppsV1beta1Deployment body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: AppsV1beta1Deployment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_deployment_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_deployment_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_deployment_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_deployment_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1Deployment',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_stateful_set(self, name, namespace, body, **kwargs):
"""
replace the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_stateful_set(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1StatefulSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_stateful_set_with_http_info(self, name, namespace, body, **kwargs):
"""
replace the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_stateful_set_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1StatefulSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_stateful_set`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_stateful_set`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_stateful_set`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1StatefulSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_stateful_set_scale(self, name, namespace, body, **kwargs):
"""
replace scale of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_stateful_set_scale(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param AppsV1beta1Scale body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: AppsV1beta1Scale
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_stateful_set_scale_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_stateful_set_scale_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_stateful_set_scale_with_http_info(self, name, namespace, body, **kwargs):
"""
replace scale of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_stateful_set_scale_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Scale (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param AppsV1beta1Scale body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: AppsV1beta1Scale
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_stateful_set_scale" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_stateful_set_scale`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_stateful_set_scale`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_stateful_set_scale`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}/scale', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1Scale',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_stateful_set_status(self, name, namespace, body, **kwargs):
"""
replace status of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_stateful_set_status(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1StatefulSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_stateful_set_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_stateful_set_status_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_stateful_set_status_with_http_info(self, name, namespace, body, **kwargs):
"""
replace status of the specified StatefulSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.replace_namespaced_stateful_set_status_with_http_info(name, namespace, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the StatefulSet (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1beta1StatefulSet body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1StatefulSet
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_stateful_set_status" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_stateful_set_status`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_stateful_set_status`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_stateful_set_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1StatefulSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 64.661784 | 1,390 | 0.651883 |
from __future__ import absolute_import
import sys
import os
import re
from six import iteritems
from ..api_client import ApiClient
class AppsV1beta1Api(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_controller_revision(self, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_controller_revision_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_controller_revision_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_controller_revision_with_http_info(self, namespace, body, **kwargs):
all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_controller_revision" % key
)
params[key] = val
del params['kwargs']
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_controller_revision`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_controller_revision`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/controllerrevisions', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ControllerRevision',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_deployment(self, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_deployment_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_deployment_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_deployment_with_http_info(self, namespace, body, **kwargs):
all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_deployment" % key
)
params[key] = val
del params['kwargs']
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_deployment`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_deployment`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1Deployment',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_deployment_rollback(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_deployment_rollback_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.create_namespaced_deployment_rollback_with_http_info(name, namespace, body, **kwargs)
return data
def create_namespaced_deployment_rollback_with_http_info(self, name, namespace, body, **kwargs):
all_params = ['name', 'namespace', 'body', 'dry_run', 'include_uninitialized', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_deployment_rollback" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `create_namespaced_deployment_rollback`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_deployment_rollback`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_deployment_rollback`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}/rollback', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_namespaced_stateful_set(self, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_namespaced_stateful_set_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_stateful_set_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_stateful_set_with_http_info(self, namespace, body, **kwargs):
all_params = ['namespace', 'body', 'include_uninitialized', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_stateful_set`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_stateful_set`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1StatefulSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_controller_revision(self, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_controller_revision_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_controller_revision_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_controller_revision_with_http_info(self, namespace, **kwargs):
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_controller_revision" % key
)
params[key] = val
del params['kwargs']
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_controller_revision`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/controllerrevisions', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_deployment(self, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_deployment_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_deployment_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_deployment_with_http_info(self, namespace, **kwargs):
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_deployment" % key
)
params[key] = val
del params['kwargs']
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_deployment`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_stateful_set(self, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_stateful_set_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_stateful_set_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_stateful_set_with_http_info(self, namespace, **kwargs):
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_stateful_set`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_controller_revision(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.delete_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs)
return data
def delete_namespaced_controller_revision_with_http_info(self, name, namespace, body, **kwargs):
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_controller_revision" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_controller_revision`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_controller_revision`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `delete_namespaced_controller_revision`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/controllerrevisions/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_deployment(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_deployment_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.delete_namespaced_deployment_with_http_info(name, namespace, body, **kwargs)
return data
def delete_namespaced_deployment_with_http_info(self, name, namespace, body, **kwargs):
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_deployment" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_deployment`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_deployment`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `delete_namespaced_deployment`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_stateful_set(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.delete_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs)
return data
def delete_namespaced_stateful_set_with_http_info(self, name, namespace, body, **kwargs):
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_stateful_set`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_stateful_set`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `delete_namespaced_stateful_set`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
all_params = []
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_controller_revision_for_all_namespaces(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_controller_revision_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_controller_revision_for_all_namespaces_with_http_info(**kwargs)
return data
def list_controller_revision_for_all_namespaces_with_http_info(self, **kwargs):
all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_controller_revision_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/controllerrevisions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ControllerRevisionList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_deployment_for_all_namespaces(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_deployment_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_deployment_for_all_namespaces_with_http_info(**kwargs)
return data
def list_deployment_for_all_namespaces_with_http_info(self, **kwargs):
all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_deployment_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/deployments', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1DeploymentList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_controller_revision(self, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_controller_revision_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_controller_revision_with_http_info(namespace, **kwargs)
return data
def list_namespaced_controller_revision_with_http_info(self, namespace, **kwargs):
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_controller_revision" % key
)
params[key] = val
del params['kwargs']
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_controller_revision`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/controllerrevisions', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ControllerRevisionList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_deployment(self, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_deployment_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_deployment_with_http_info(namespace, **kwargs)
return data
def list_namespaced_deployment_with_http_info(self, namespace, **kwargs):
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_deployment" % key
)
params[key] = val
del params['kwargs']
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_deployment`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1DeploymentList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_stateful_set(self, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_stateful_set_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_stateful_set_with_http_info(namespace, **kwargs)
return data
def list_namespaced_stateful_set_with_http_info(self, namespace, **kwargs):
all_params = ['namespace', 'include_uninitialized', 'pretty', '_continue', 'field_selector', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_stateful_set`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1StatefulSetList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_stateful_set_for_all_namespaces(self, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_stateful_set_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_stateful_set_for_all_namespaces_with_http_info(**kwargs)
return data
def list_stateful_set_for_all_namespaces_with_http_info(self, **kwargs):
all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_stateful_set_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/statefulsets', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1StatefulSetList',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_controller_revision(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_controller_revision_with_http_info(self, name, namespace, body, **kwargs):
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_controller_revision" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_controller_revision`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_controller_revision`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_controller_revision`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/controllerrevisions/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ControllerRevision',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_deployment(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_deployment_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_deployment_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_deployment_with_http_info(self, name, namespace, body, **kwargs):
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_deployment" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_deployment`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_deployment`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_deployment`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1Deployment',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_deployment_scale(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_deployment_scale_with_http_info(self, name, namespace, body, **kwargs):
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_deployment_scale" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_deployment_scale`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_deployment_scale`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_deployment_scale`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}/scale', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1Scale',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_deployment_status(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_deployment_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_deployment_status_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_deployment_status_with_http_info(self, name, namespace, body, **kwargs):
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_deployment_status" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_deployment_status`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_deployment_status`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_deployment_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1Deployment',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_stateful_set(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_stateful_set_with_http_info(self, name, namespace, body, **kwargs):
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_stateful_set`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_stateful_set`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_stateful_set`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1StatefulSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_stateful_set_scale(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_stateful_set_scale_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_stateful_set_scale_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_stateful_set_scale_with_http_info(self, name, namespace, body, **kwargs):
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_stateful_set_scale" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_stateful_set_scale`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_stateful_set_scale`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_stateful_set_scale`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}/scale', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1Scale',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_stateful_set_status(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_namespaced_stateful_set_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_stateful_set_status_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_stateful_set_status_with_http_info(self, name, namespace, body, **kwargs):
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_stateful_set_status" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_stateful_set_status`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_stateful_set_status`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_stateful_set_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}/status', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1StatefulSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_controller_revision(self, name, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_controller_revision_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_controller_revision_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_controller_revision_with_http_info(self, name, namespace, **kwargs):
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_controller_revision" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_controller_revision`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_controller_revision`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/controllerrevisions/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ControllerRevision',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_deployment(self, name, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_deployment_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_deployment_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_deployment_with_http_info(self, name, namespace, **kwargs):
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_deployment" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_deployment`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_deployment`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1Deployment',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_deployment_scale(self, name, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_deployment_scale_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_deployment_scale_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_deployment_scale_with_http_info(self, name, namespace, **kwargs):
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_deployment_scale" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_deployment_scale`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_deployment_scale`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}/scale', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1Scale',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_deployment_status(self, name, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_deployment_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_deployment_status_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_deployment_status_with_http_info(self, name, namespace, **kwargs):
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_deployment_status" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_deployment_status`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_deployment_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1Deployment',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_stateful_set(self, name, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_stateful_set_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_stateful_set_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_stateful_set_with_http_info(self, name, namespace, **kwargs):
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_stateful_set`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_stateful_set`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1StatefulSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_stateful_set_scale(self, name, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_stateful_set_scale_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_stateful_set_scale_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_stateful_set_scale_with_http_info(self, name, namespace, **kwargs):
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_stateful_set_scale" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_stateful_set_scale`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_stateful_set_scale`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}/scale', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1Scale',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_stateful_set_status(self, name, namespace, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_stateful_set_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_stateful_set_status_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_stateful_set_status_with_http_info(self, name, namespace, **kwargs):
all_params = ['name', 'namespace', 'pretty']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_stateful_set_status" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_stateful_set_status`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_stateful_set_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}/status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1StatefulSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_controller_revision(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_controller_revision_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_controller_revision_with_http_info(self, name, namespace, body, **kwargs):
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_controller_revision" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_controller_revision`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_controller_revision`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_controller_revision`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/controllerrevisions/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1ControllerRevision',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_deployment(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_deployment_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_deployment_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_deployment_with_http_info(self, name, namespace, body, **kwargs):
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_deployment" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_deployment`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_deployment`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_deployment`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1Deployment',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_deployment_scale(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_deployment_scale_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_deployment_scale_with_http_info(self, name, namespace, body, **kwargs):
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_deployment_scale" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_deployment_scale`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_deployment_scale`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_deployment_scale`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}/scale', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1Scale',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_deployment_status(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_deployment_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_deployment_status_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_deployment_status_with_http_info(self, name, namespace, body, **kwargs):
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_deployment_status" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_deployment_status`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_deployment_status`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_deployment_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/deployments/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1Deployment',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_stateful_set(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_stateful_set_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_stateful_set_with_http_info(self, name, namespace, body, **kwargs):
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_stateful_set" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_stateful_set`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_stateful_set`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_stateful_set`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1StatefulSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_stateful_set_scale(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_stateful_set_scale_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_stateful_set_scale_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_stateful_set_scale_with_http_info(self, name, namespace, body, **kwargs):
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_stateful_set_scale" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_stateful_set_scale`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_stateful_set_scale`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_stateful_set_scale`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}/scale', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AppsV1beta1Scale',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_stateful_set_status(self, name, namespace, body, **kwargs):
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.replace_namespaced_stateful_set_status_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_stateful_set_status_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_stateful_set_status_with_http_info(self, name, namespace, body, **kwargs):
all_params = ['name', 'namespace', 'body', 'pretty', 'dry_run']
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_stateful_set_status" % key
)
params[key] = val
del params['kwargs']
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_stateful_set_status`")
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_stateful_set_status`")
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_stateful_set_status`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'dry_run' in params:
query_params.append(('dryRun', params['dry_run']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/apps/v1beta1/namespaces/{namespace}/statefulsets/{name}/status', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1beta1StatefulSet',
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| true | true |
f727dd8d7d3f08eb3d6179104991040375e54f90 | 5,354 | py | Python | test/integration/smoke/test_async_job.py | saliven1970/cloudstack | 4617be458387421bbbfc120c1f054c9939ba52eb | [
"Apache-2.0"
] | 2 | 2021-10-31T01:04:26.000Z | 2021-11-08T09:43:30.000Z | test/integration/smoke/test_async_job.py | saliven1970/cloudstack | 4617be458387421bbbfc120c1f054c9939ba52eb | [
"Apache-2.0"
] | 20 | 2020-12-19T22:32:23.000Z | 2022-02-01T01:07:06.000Z | test/integration/smoke/test_async_job.py | saliven1970/cloudstack | 4617be458387421bbbfc120c1f054c9939ba52eb | [
"Apache-2.0"
] | 2 | 2016-11-10T16:29:26.000Z | 2019-05-20T12:23:35.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.utils import cleanup_resources
from marvin.lib.base import ServiceOffering, DiskOffering, Account, VirtualMachine,\
queryAsyncJobResult, PASS
from marvin.lib.common import get_domain, get_zone, get_test_template
from pytz import timezone
class TestAsyncJob(cloudstackTestCase):
"""
Test queryAsyncJobResult
"""
@classmethod
def setUpClass(cls):
cls.testClient = super(TestAsyncJob, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.hypervisor = cls.testClient.getHypervisorInfo()
cls.template = get_test_template(
cls.api_client,
cls.zone.id,
cls.hypervisor
)
# Create service, disk offerings etc
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testdata["service_offering"]
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.testdata["disk_offering"]
)
cls._cleanup = [
cls.service_offering,
cls.disk_offering
]
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as exception:
raise Exception("Warning: Exception during cleanup : %s" % exception)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.hypervisor = self.testClient.getHypervisorInfo()
self.testdata["virtual_machine"]["zoneid"] = self.zone.id
self.testdata["virtual_machine"]["template"] = self.template.id
self.testdata["iso"]["zoneid"] = self.zone.id
self.account = Account.create(
self.apiclient,
self.testdata["account"],
domainid=self.domain.id
)
self.cleanup = [self.account]
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self.cleanup)
self.debug("Cleanup complete!")
except Exception as exception:
self.debug("Warning! Exception in tearDown: %s" % exception)
@attr(tags=["advanced", "eip", "advancedns", "basic", "sg"], required_hardware="false")
def test_query_async_job_result(self):
"""
Test queryAsyncJobResult API for expected values
"""
self.debug("Deploying instance in the account: %s" %
self.account.name)
virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
hypervisor=self.hypervisor
)
response = virtual_machine.getState(
self.apiclient,
VirtualMachine.RUNNING)
self.assertEqual(response[0], PASS, response[1])
cmd = queryAsyncJobResult.queryAsyncJobResultCmd()
cmd.jobid = virtual_machine.jobid
cmd_response = self.apiclient.queryAsyncJobResult(cmd)
db_result = self.dbclient.execute("select * from async_job where uuid='%s'" %
virtual_machine.jobid)
# verify that 'completed' value from api equals 'removed' db column value
completed = cmd_response.completed
removed = timezone('UTC').localize(db_result[0][17])
removed = removed.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertEqual(completed, removed,
"Expected 'completed' timestamp value %s to be equal to "
"'removed' db column value %s." % (completed, removed))
# verify that api job_status value equals db job_status value
jobstatus_db = db_result[0][8]
jobstatus_api = cmd_response.jobstatus
self.assertEqual(jobstatus_api, jobstatus_db,
"Expected 'jobstatus' api value %s to be equal to "
"'job_status' db column value %s." % (jobstatus_api, jobstatus_db))
| 39.367647 | 92 | 0.649234 |
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.utils import cleanup_resources
from marvin.lib.base import ServiceOffering, DiskOffering, Account, VirtualMachine,\
queryAsyncJobResult, PASS
from marvin.lib.common import get_domain, get_zone, get_test_template
from pytz import timezone
class TestAsyncJob(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestAsyncJob, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
cls.hypervisor = cls.testClient.getHypervisorInfo()
cls.template = get_test_template(
cls.api_client,
cls.zone.id,
cls.hypervisor
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.testdata["service_offering"]
)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.testdata["disk_offering"]
)
cls._cleanup = [
cls.service_offering,
cls.disk_offering
]
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as exception:
raise Exception("Warning: Exception during cleanup : %s" % exception)
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.hypervisor = self.testClient.getHypervisorInfo()
self.testdata["virtual_machine"]["zoneid"] = self.zone.id
self.testdata["virtual_machine"]["template"] = self.template.id
self.testdata["iso"]["zoneid"] = self.zone.id
self.account = Account.create(
self.apiclient,
self.testdata["account"],
domainid=self.domain.id
)
self.cleanup = [self.account]
def tearDown(self):
try:
self.debug("Cleaning up the resources")
cleanup_resources(self.apiclient, self.cleanup)
self.debug("Cleanup complete!")
except Exception as exception:
self.debug("Warning! Exception in tearDown: %s" % exception)
@attr(tags=["advanced", "eip", "advancedns", "basic", "sg"], required_hardware="false")
def test_query_async_job_result(self):
self.debug("Deploying instance in the account: %s" %
self.account.name)
virtual_machine = VirtualMachine.create(
self.apiclient,
self.testdata["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
diskofferingid=self.disk_offering.id,
hypervisor=self.hypervisor
)
response = virtual_machine.getState(
self.apiclient,
VirtualMachine.RUNNING)
self.assertEqual(response[0], PASS, response[1])
cmd = queryAsyncJobResult.queryAsyncJobResultCmd()
cmd.jobid = virtual_machine.jobid
cmd_response = self.apiclient.queryAsyncJobResult(cmd)
db_result = self.dbclient.execute("select * from async_job where uuid='%s'" %
virtual_machine.jobid)
completed = cmd_response.completed
removed = timezone('UTC').localize(db_result[0][17])
removed = removed.strftime("%Y-%m-%dT%H:%M:%S%z")
self.assertEqual(completed, removed,
"Expected 'completed' timestamp value %s to be equal to "
"'removed' db column value %s." % (completed, removed))
jobstatus_db = db_result[0][8]
jobstatus_api = cmd_response.jobstatus
self.assertEqual(jobstatus_api, jobstatus_db,
"Expected 'jobstatus' api value %s to be equal to "
"'job_status' db column value %s." % (jobstatus_api, jobstatus_db))
| true | true |
f727ddec1d4bf396d3560c014f5899414eb5618e | 180 | py | Python | src/app/blueprints/main/controllers.py | Dev-Nebe/student-hub | fe6718aced065dab4bb8d92372bfe098c1a75137 | [
"MIT"
] | 3 | 2020-05-25T19:36:11.000Z | 2021-09-15T09:05:57.000Z | src/app/blueprints/main/controllers.py | Dev-Nebe/student-hub | fe6718aced065dab4bb8d92372bfe098c1a75137 | [
"MIT"
] | 1 | 2021-04-30T21:11:44.000Z | 2021-04-30T21:11:44.000Z | src/app/blueprints/main/controllers.py | Dev-Nebe/student-hub | fe6718aced065dab4bb8d92372bfe098c1a75137 | [
"MIT"
] | null | null | null | from flask import Blueprint, jsonify
main_bp = Blueprint('main', __name__)
@main_bp.route('/')
def api_home():
return jsonify({"message": "Welcome to the Student Hub API"})
| 20 | 65 | 0.705556 | from flask import Blueprint, jsonify
main_bp = Blueprint('main', __name__)
@main_bp.route('/')
def api_home():
return jsonify({"message": "Welcome to the Student Hub API"})
| true | true |
f727e110b2656c38f2c08d157ab83662cd7bb00d | 3,506 | py | Python | tools/validators/instance_validator/tests/instance_parser_test.py | HTKshimo/digitalbuildings | a6dad0282cac7dbe2a31d1b3c9a6dc9adddb5177 | [
"Apache-2.0"
] | null | null | null | tools/validators/instance_validator/tests/instance_parser_test.py | HTKshimo/digitalbuildings | a6dad0282cac7dbe2a31d1b3c9a6dc9adddb5177 | [
"Apache-2.0"
] | null | null | null | tools/validators/instance_validator/tests/instance_parser_test.py | HTKshimo/digitalbuildings | a6dad0282cac7dbe2a31d1b3c9a6dc9adddb5177 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests tools.validators.instance_validator.instance_parser"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import instance_parser
from absl.testing import absltest
_TESTCASE_PATH = os.path.join('.', 'tests', 'fake_instances')
class ParserTest(absltest.TestCase):
def testInstanceValidatorDetectDuplicateKeys(self):
with self.assertRaises(SystemExit):
parse = instance_parser.parse_yaml(
os.path.join(_TESTCASE_PATH,
'BAD',
'bad_duplicate_keys.yaml'))
self.assertIsNone(parse)
def testInstanceValidatorDetectMissingColon(self):
with self.assertRaises(SystemExit):
parse = instance_parser.parse_yaml(
os.path.join(_TESTCASE_PATH,
'BAD',
'bad_missing_colon.yaml'))
self.assertIsNone(parse)
def testInstanceValidatorDetectImproperSpacing(self):
with self.assertRaises(SystemExit):
parse = instance_parser.parse_yaml(
os.path.join(_TESTCASE_PATH,
'BAD',
'bad_spacing.yaml'))
self.assertIsNone(parse)
def testInstanceValidatorDetectImproperTabbing(self):
with self.assertRaises(SystemExit):
parse = instance_parser.parse_yaml(
os.path.join(_TESTCASE_PATH,
'BAD',
'bad_tabbing.yaml'))
self.assertIsNone(parse)
def testInstanceValidatorParseProperFormat(self):
parse = instance_parser.parse_yaml(
os.path.join(_TESTCASE_PATH,
'GOOD',
'good_building_type.yaml'))
self.assertIsNotNone(parse)
def testInstanceValidatorParseProperConnections(self):
parse = instance_parser.parse_yaml(
os.path.join(_TESTCASE_PATH,
'GOOD',
'good_building_connections.yaml'))
self.assertIsNotNone(parse)
def testInstanceValidatorDetectImproperTranslationCompliance(self):
with self.assertRaises(SystemExit):
parse = instance_parser.parse_yaml(
os.path.join(_TESTCASE_PATH,
'BAD',
'bad_translation_compliant.yaml'))
self.assertIsNone(parse)
def testInstanceValidatorDetectImproperTranslationKeys(self):
with self.assertRaises(SystemExit):
parse = instance_parser.parse_yaml(
os.path.join(_TESTCASE_PATH,
'BAD',
'bad_translation_keys.yaml'))
self.assertIsNone(parse)
def testInstanceValidatorDetectImproperUnitsKeys(self):
with self.assertRaises(SystemExit):
parse = instance_parser.parse_yaml(
os.path.join(_TESTCASE_PATH,
'BAD',
'bad_translation_units_format.yaml'))
self.assertIsNone(parse)
if __name__ == '__main__':
absltest.main()
| 34.372549 | 74 | 0.666572 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import instance_parser
from absl.testing import absltest
_TESTCASE_PATH = os.path.join('.', 'tests', 'fake_instances')
class ParserTest(absltest.TestCase):
def testInstanceValidatorDetectDuplicateKeys(self):
with self.assertRaises(SystemExit):
parse = instance_parser.parse_yaml(
os.path.join(_TESTCASE_PATH,
'BAD',
'bad_duplicate_keys.yaml'))
self.assertIsNone(parse)
def testInstanceValidatorDetectMissingColon(self):
with self.assertRaises(SystemExit):
parse = instance_parser.parse_yaml(
os.path.join(_TESTCASE_PATH,
'BAD',
'bad_missing_colon.yaml'))
self.assertIsNone(parse)
def testInstanceValidatorDetectImproperSpacing(self):
with self.assertRaises(SystemExit):
parse = instance_parser.parse_yaml(
os.path.join(_TESTCASE_PATH,
'BAD',
'bad_spacing.yaml'))
self.assertIsNone(parse)
def testInstanceValidatorDetectImproperTabbing(self):
with self.assertRaises(SystemExit):
parse = instance_parser.parse_yaml(
os.path.join(_TESTCASE_PATH,
'BAD',
'bad_tabbing.yaml'))
self.assertIsNone(parse)
def testInstanceValidatorParseProperFormat(self):
parse = instance_parser.parse_yaml(
os.path.join(_TESTCASE_PATH,
'GOOD',
'good_building_type.yaml'))
self.assertIsNotNone(parse)
def testInstanceValidatorParseProperConnections(self):
parse = instance_parser.parse_yaml(
os.path.join(_TESTCASE_PATH,
'GOOD',
'good_building_connections.yaml'))
self.assertIsNotNone(parse)
def testInstanceValidatorDetectImproperTranslationCompliance(self):
with self.assertRaises(SystemExit):
parse = instance_parser.parse_yaml(
os.path.join(_TESTCASE_PATH,
'BAD',
'bad_translation_compliant.yaml'))
self.assertIsNone(parse)
def testInstanceValidatorDetectImproperTranslationKeys(self):
with self.assertRaises(SystemExit):
parse = instance_parser.parse_yaml(
os.path.join(_TESTCASE_PATH,
'BAD',
'bad_translation_keys.yaml'))
self.assertIsNone(parse)
def testInstanceValidatorDetectImproperUnitsKeys(self):
with self.assertRaises(SystemExit):
parse = instance_parser.parse_yaml(
os.path.join(_TESTCASE_PATH,
'BAD',
'bad_translation_units_format.yaml'))
self.assertIsNone(parse)
if __name__ == '__main__':
absltest.main()
| true | true |
f727e15a3cb3d79ce95f8fecbf0b1a12b57e08b8 | 1,268 | py | Python | prova/relazioni/views.py | mary023010/prova_django | a69a37a4f26f21018cef48e5d637dd630ca68877 | [
"MIT"
] | null | null | null | prova/relazioni/views.py | mary023010/prova_django | a69a37a4f26f21018cef48e5d637dd630ca68877 | [
"MIT"
] | null | null | null | prova/relazioni/views.py | mary023010/prova_django | a69a37a4f26f21018cef48e5d637dd630ca68877 | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect
from django.views.generic.detail import DetailView
from .models import Fly,Airport
from django.db.models import Q
from django.contrib import messages
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required(login_url='/login/')
def selection_airport(request):
ls_airports = Airport.objects.all()
context = {'airports' : ls_airports}
return render(request,'relazioni/form_prenotazione.html',context)
def visualizza_voli(request):
ls_voli = []
if request.method == 'POST':
aeroporto_partenza = request.POST['a_partenza']
aeroporto_arrivo = request.POST['a_arrivo']
data = request.POST['data']
ls_voli = Fly.objects.filter(Q(aeroporto_partenza=aeroporto_partenza) & Q(aeroporto_arrivo=aeroporto_arrivo) & Q(data_partenza=data))
messages.success(request, 'Ecco tutti i voli disponibili')
voli = []
for index in ls_voli:
volo = Fly.objects.get(code_volo=index)
voli.append(volo)
context = {
'voli': voli,
}
else:
messages.error(request, 'Non ci sono voli disponibili!')
return render(request,'relazioni/voli.html',context)
| 31.7 | 141 | 0.689274 | from django.shortcuts import render, redirect
from django.views.generic.detail import DetailView
from .models import Fly,Airport
from django.db.models import Q
from django.contrib import messages
from django.contrib.auth.decorators import login_required
@login_required(login_url='/login/')
def selection_airport(request):
ls_airports = Airport.objects.all()
context = {'airports' : ls_airports}
return render(request,'relazioni/form_prenotazione.html',context)
def visualizza_voli(request):
ls_voli = []
if request.method == 'POST':
aeroporto_partenza = request.POST['a_partenza']
aeroporto_arrivo = request.POST['a_arrivo']
data = request.POST['data']
ls_voli = Fly.objects.filter(Q(aeroporto_partenza=aeroporto_partenza) & Q(aeroporto_arrivo=aeroporto_arrivo) & Q(data_partenza=data))
messages.success(request, 'Ecco tutti i voli disponibili')
voli = []
for index in ls_voli:
volo = Fly.objects.get(code_volo=index)
voli.append(volo)
context = {
'voli': voli,
}
else:
messages.error(request, 'Non ci sono voli disponibili!')
return render(request,'relazioni/voli.html',context)
| true | true |
f727e2c8e1a28f98877f1994152992973984a11f | 879 | py | Python | recipes/models.py | asis2016/momo-ristorante-v1 | d46c36d1b92212ade34d781c4e2adc91cb52cac7 | [
"MIT"
] | null | null | null | recipes/models.py | asis2016/momo-ristorante-v1 | d46c36d1b92212ade34d781c4e2adc91cb52cac7 | [
"MIT"
] | null | null | null | recipes/models.py | asis2016/momo-ristorante-v1 | d46c36d1b92212ade34d781c4e2adc91cb52cac7 | [
"MIT"
] | null | null | null | import uuid
from django.db import models
from django.urls import reverse
from core.models import (Authorable,
Titleable,
TimeStampedModel)
class Recipe(Authorable, Titleable, TimeStampedModel):
"""
Recipe model as of v.1.0.
"""
CATEGORY = (
('WS', 'Western'),
('TB', 'Tibetan'),
('NP', 'Nepalese')
)
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
excerpt = models.TextField(max_length=200, blank=True)
content = models.TextField(blank=True)
image = models.ImageField(upload_to='', default='default.png', blank=True)
image_url = models.CharField(max_length=200, blank=True)
def __str__(self):
return str(self.title)
def get_absolute_url(self):
return reverse('dashboard:recipe_detail', args=[str(self.id)])
| 28.354839 | 79 | 0.633675 | import uuid
from django.db import models
from django.urls import reverse
from core.models import (Authorable,
Titleable,
TimeStampedModel)
class Recipe(Authorable, Titleable, TimeStampedModel):
CATEGORY = (
('WS', 'Western'),
('TB', 'Tibetan'),
('NP', 'Nepalese')
)
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
excerpt = models.TextField(max_length=200, blank=True)
content = models.TextField(blank=True)
image = models.ImageField(upload_to='', default='default.png', blank=True)
image_url = models.CharField(max_length=200, blank=True)
def __str__(self):
return str(self.title)
def get_absolute_url(self):
return reverse('dashboard:recipe_detail', args=[str(self.id)])
| true | true |
f727e34363c19a7e7e17351055f27d2efc065ca3 | 1,932 | py | Python | src/bot/keyboards/claim_parts.py | nchursin/claimant | 890ce1a3ced8db9d2e2fbddb8a3207e82ac05326 | [
"BSD-3-Clause"
] | 3 | 2022-03-03T19:10:25.000Z | 2022-03-03T19:57:15.000Z | src/bot/keyboards/claim_parts.py | nchursin/claimant | 890ce1a3ced8db9d2e2fbddb8a3207e82ac05326 | [
"BSD-3-Clause"
] | 9 | 2022-03-03T18:56:37.000Z | 2022-03-29T18:34:02.000Z | src/bot/keyboards/claim_parts.py | nchursin/claimant | 890ce1a3ced8db9d2e2fbddb8a3207e82ac05326 | [
"BSD-3-Clause"
] | 1 | 2022-03-04T11:59:11.000Z | 2022-03-04T11:59:11.000Z | from typing import List
from aiogram.types import KeyboardButton, ReplyKeyboardMarkup
from keyboards import emojis
from repository import Repository
PART_NAMES: List[str] = ["head", "story", "essence", "proofs", "claims", "additions"]
def get_claim_parts_kb(user_id: int) -> ReplyKeyboardMarkup:
parts_status: dict = get_claim_parts_status(user_id)
claim_parts_kb = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
claim_parts_kb\
.add(KeyboardButton(f"{emojis.top_hat} шапка {emojis.check_mark if parts_status['head'] is True else ''}")) \
.add(KeyboardButton(f"{emojis.speech_balloon} фабула {emojis.check_mark if parts_status['story'] is True else ''}")) \
.add(KeyboardButton(f"{emojis.key} суть нарушения {emojis.check_mark if parts_status['essence'] is True else ''}")) \
.add(KeyboardButton(f"{emojis.page_with_curl} доказательства {emojis.check_mark if parts_status['proofs'] is True else ''}")) \
.add(KeyboardButton(f"{emojis.index_pointing_up} требования {emojis.check_mark if parts_status['claims'] is True else ''}")) \
.add(KeyboardButton(f"{emojis.card_index_dividers} приложения {emojis.check_mark if parts_status['additions'] is True else ''}"))
claim_parts_kb.row(*[KeyboardButton(f"{emojis.left_arrow} к шаблонам"),
KeyboardButton(f"{emojis.inbox_tray} получить")])
return claim_parts_kb
def get_claim_parts_status(user_id: int) -> dict:
repository: Repository = Repository()
claim_data: dict = repository.get_claim_data(user_id)
if "claim_data" not in claim_data.keys():
return {pn: False for pn in PART_NAMES}
parts_status: dict = {}
for part_name in PART_NAMES:
if part_name in claim_data["claim_data"].keys():
parts_status.update(**{part_name: True})
else:
parts_status.update(**{part_name: False})
return parts_status
| 49.538462 | 137 | 0.710145 | from typing import List
from aiogram.types import KeyboardButton, ReplyKeyboardMarkup
from keyboards import emojis
from repository import Repository
PART_NAMES: List[str] = ["head", "story", "essence", "proofs", "claims", "additions"]
def get_claim_parts_kb(user_id: int) -> ReplyKeyboardMarkup:
parts_status: dict = get_claim_parts_status(user_id)
claim_parts_kb = ReplyKeyboardMarkup(resize_keyboard=True, one_time_keyboard=True)
claim_parts_kb\
.add(KeyboardButton(f"{emojis.top_hat} шапка {emojis.check_mark if parts_status['head'] is True else ''}")) \
.add(KeyboardButton(f"{emojis.speech_balloon} фабула {emojis.check_mark if parts_status['story'] is True else ''}")) \
.add(KeyboardButton(f"{emojis.key} суть нарушения {emojis.check_mark if parts_status['essence'] is True else ''}")) \
.add(KeyboardButton(f"{emojis.page_with_curl} доказательства {emojis.check_mark if parts_status['proofs'] is True else ''}")) \
.add(KeyboardButton(f"{emojis.index_pointing_up} требования {emojis.check_mark if parts_status['claims'] is True else ''}")) \
.add(KeyboardButton(f"{emojis.card_index_dividers} приложения {emojis.check_mark if parts_status['additions'] is True else ''}"))
claim_parts_kb.row(*[KeyboardButton(f"{emojis.left_arrow} к шаблонам"),
KeyboardButton(f"{emojis.inbox_tray} получить")])
return claim_parts_kb
def get_claim_parts_status(user_id: int) -> dict:
repository: Repository = Repository()
claim_data: dict = repository.get_claim_data(user_id)
if "claim_data" not in claim_data.keys():
return {pn: False for pn in PART_NAMES}
parts_status: dict = {}
for part_name in PART_NAMES:
if part_name in claim_data["claim_data"].keys():
parts_status.update(**{part_name: True})
else:
parts_status.update(**{part_name: False})
return parts_status
| true | true |
f727e3458cf9ec525e06fec4d8066046877248f1 | 5,378 | py | Python | modules/REPORT_RESULTS/scripts/select-motifs.py | gruber-sciencelab/MAPP | 81563f676b284c5b283a193a698ce618c044d3b5 | [
"Apache-2.0"
] | null | null | null | modules/REPORT_RESULTS/scripts/select-motifs.py | gruber-sciencelab/MAPP | 81563f676b284c5b283a193a698ce618c044d3b5 | [
"Apache-2.0"
] | null | null | null | modules/REPORT_RESULTS/scripts/select-motifs.py | gruber-sciencelab/MAPP | 81563f676b284c5b283a193a698ce618c044d3b5 | [
"Apache-2.0"
] | 1 | 2022-01-15T04:39:30.000Z | 2022-01-15T04:39:30.000Z | """
##############################################################################
#
# Select top N distinct motifs with highest (statistically significant)
# activity Z-score (for every site separately)
#
# AUTHOR: Maciej_Bak
# AFFILIATION: University_of_Basel
# AFFILIATION: Swiss_Institute_of_Bioinformatics
# CONTACT: maciej.bak@unibas.ch
# CREATED: 04-06-2020
# LICENSE: Apache_2.0
#
##############################################################################
"""
# imports
import time
import logging
import logging.handlers
from argparse import ArgumentParser, RawTextHelpFormatter
import os
import pandas as pd
def parse_arguments():
"""Parser of the command-line arguments."""
parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument(
"-v",
"--verbosity",
dest="verbosity",
choices=("DEBUG", "INFO", "WARN", "ERROR", "CRITICAL"),
default="ERROR",
help="Verbosity/Log level. Defaults to ERROR",
)
parser.add_argument(
"-l", "--logfile", dest="logfile", help="Store log to this file."
)
parser.add_argument(
"--topN-motifs",
dest="N",
default=1000000, # by default: effectively select all stat. sign. motifs
required=False,
help="Number of top motifs to select.",
)
parser.add_argument(
"--infile-splicing-3ss",
dest="results_3ss",
required=True,
help="Annotated results table (3ss).",
)
parser.add_argument(
"--infile-splicing-5ss",
dest="results_5ss",
required=True,
help="Annotated results table (5ss).",
)
parser.add_argument(
"--infile-polyadenylation-pas",
dest="results_pas",
required=True,
help="Annotated results table (pas).",
)
parser.add_argument(
"--outfile-splicing-3ss-motifs",
dest="motifs_3ss",
required=True,
help="Path for the text file with top motifs (3ss).",
)
parser.add_argument(
"--outfile-splicing-5ss-motifs",
dest="motifs_5ss",
required=True,
help="Path for the text file with top motifs (5ss).",
)
parser.add_argument(
"--outfile-polyadenylation-pas-motifs",
dest="motifs_pas",
required=True,
help="Path for the text file with top motifs (pas).",
)
return parser
##############################################################################
def main():
"""Main body of the script."""
df = pd.read_csv(options.results_3ss, sep="\t", index_col=0)
df = df[df["significance-marker"]]
motifs = []
for ID, row in df.iterrows():
if len(motifs) == int(options.N):
break
m = ID.split("|")[-1]
if m not in motifs:
motifs.append(m)
with open(options.motifs_3ss, "w") as f:
for m in motifs:
f.write(m + os.linesep)
df = pd.read_csv(options.results_5ss, sep="\t", index_col=0)
df = df[df["significance-marker"]]
motifs = []
for ID, row in df.iterrows():
if len(motifs) == int(options.N):
break
m = ID.split("|")[-1]
if m not in motifs:
motifs.append(m)
with open(options.motifs_5ss, "w") as f:
for m in motifs:
f.write(m + os.linesep)
df = pd.read_csv(options.results_pas, sep="\t", index_col=0)
df = df[df["significance-marker"]]
motifs = []
for ID, row in df.iterrows():
if len(motifs) == int(options.N):
break
m = ID.split("|")[-1]
if m not in motifs:
motifs.append(m)
with open(options.motifs_pas, "w") as f:
for m in motifs:
f.write(m + os.linesep)
##############################################################################
if __name__ == "__main__":
try:
# parse the command-line arguments
options = parse_arguments().parse_args()
# set up logging during the execution
formatter = logging.Formatter(
fmt="[%(asctime)s] %(levelname)s - %(message)s",
datefmt="%d-%b-%Y %H:%M:%S",
)
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
logger = logging.getLogger("logger")
logger.setLevel(logging.getLevelName(options.verbosity))
logger.addHandler(console_handler)
if options.logfile is not None:
logfile_handler = logging.handlers.RotatingFileHandler(
options.logfile, maxBytes=50000, backupCount=2
)
logfile_handler.setFormatter(formatter)
logger.addHandler(logfile_handler)
# execute the body of the script
start_time = time.time()
logger.info("Starting script")
main()
seconds = time.time() - start_time
# log the execution time
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
logger.info(
"Successfully finished in {hours}h:{minutes}m:{seconds}s",
hours=int(hours),
minutes=int(minutes),
seconds=int(seconds) if seconds > 1.0 else 1,
)
# log the exception in case it happens
except Exception as e:
logger.exception(str(e))
raise e
| 30.556818 | 86 | 0.554481 |
import time
import logging
import logging.handlers
from argparse import ArgumentParser, RawTextHelpFormatter
import os
import pandas as pd
def parse_arguments():
parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument(
"-v",
"--verbosity",
dest="verbosity",
choices=("DEBUG", "INFO", "WARN", "ERROR", "CRITICAL"),
default="ERROR",
help="Verbosity/Log level. Defaults to ERROR",
)
parser.add_argument(
"-l", "--logfile", dest="logfile", help="Store log to this file."
)
parser.add_argument(
"--topN-motifs",
dest="N",
default=1000000,
required=False,
help="Number of top motifs to select.",
)
parser.add_argument(
"--infile-splicing-3ss",
dest="results_3ss",
required=True,
help="Annotated results table (3ss).",
)
parser.add_argument(
"--infile-splicing-5ss",
dest="results_5ss",
required=True,
help="Annotated results table (5ss).",
)
parser.add_argument(
"--infile-polyadenylation-pas",
dest="results_pas",
required=True,
help="Annotated results table (pas).",
)
parser.add_argument(
"--outfile-splicing-3ss-motifs",
dest="motifs_3ss",
required=True,
help="Path for the text file with top motifs (3ss).",
)
parser.add_argument(
"--outfile-splicing-5ss-motifs",
dest="motifs_5ss",
required=True,
help="Path for the text file with top motifs (5ss).",
)
parser.add_argument(
"--outfile-polyadenylation-pas-motifs",
dest="motifs_pas",
required=True,
help="Path for the text file with top motifs (pas).",
)
return parser
| true | true |
f727e3880049704ad4f93c71f9dada6ffc47feb8 | 1,530 | py | Python | top10losers/top10losers.py | KevBarbour/cryptobot | 57239c83ca5dd84d2a0e273f20782cf608ce99ba | [
"MIT"
] | null | null | null | top10losers/top10losers.py | KevBarbour/cryptobot | 57239c83ca5dd84d2a0e273f20782cf608ce99ba | [
"MIT"
] | null | null | null | top10losers/top10losers.py | KevBarbour/cryptobot | 57239c83ca5dd84d2a0e273f20782cf608ce99ba | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import requests
from bs4 import BeautifulSoup
import sys
from twython import Twython
import numpy as np
apiKey = '...'
apiSecret = '...'
accessToken = '...'
accessTokenSecret = '...'
#BeautifulSoup scraping algorythm
url = 'https://coinmarketcap.com'
soup = BeautifulSoup(requests.get(url).text, 'lxml')
L=[]
#H =["Rank","Name","M Cap","$/1", "HURR", "DURR", "24 hr"]
F=0
for tr in soup.select('#currencies tr'):
if not tr.select('td'):
continue
for i, td in enumerate(tr.select('td')[:7]) :
txt = td.text.replace('\n',' ').replace('*', '').replace('%','').replace('.com','').replace('chain','').replace('coin','').strip()
L.append(txt)
#dictates how many lines will be read
F=F+1
if F>99:
break
#reshapes array to only include necessary columns and re orders them
A = np.reshape(L, (100,7))
Perm = [1,3,6,2,4,5,0]
A = A[:, Perm]
A = np.delete(A, (1,3,4,5,6), 1)
#sorting array based on percent change
A = sorted(A,key=lambda x: (float(x[1])))
A = A[:10]
#write table to a python file and re reads it, possibly poor method
with open("output10losers.txt", "w") as txt_file:
for line in A:
txt_file.write("#" + " ".join(line) + "%" + "\n" )
T = open("output10losers.txt", "r")
finaltweet = T.read()
tweetStr = "Top 10 #Crypto Losers 24hrs:" + "\n" + finaltweet
#twitter API commands
api = Twython(apiKey,apiSecret,accessToken,accessTokenSecret)
api.update_status(status=tweetStr)
print("Tweeted: " + tweetStr)
| 26.37931 | 138 | 0.62549 |
import requests
from bs4 import BeautifulSoup
import sys
from twython import Twython
import numpy as np
apiKey = '...'
apiSecret = '...'
accessToken = '...'
accessTokenSecret = '...'
url = 'https://coinmarketcap.com'
soup = BeautifulSoup(requests.get(url).text, 'lxml')
L=[]
F=0
for tr in soup.select('#currencies tr'):
if not tr.select('td'):
continue
for i, td in enumerate(tr.select('td')[:7]) :
txt = td.text.replace('\n',' ').replace('*', '').replace('%','').replace('.com','').replace('chain','').replace('coin','').strip()
L.append(txt)
F=F+1
if F>99:
break
A = np.reshape(L, (100,7))
Perm = [1,3,6,2,4,5,0]
A = A[:, Perm]
A = np.delete(A, (1,3,4,5,6), 1)
A = sorted(A,key=lambda x: (float(x[1])))
A = A[:10]
with open("output10losers.txt", "w") as txt_file:
for line in A:
txt_file.write("#" + " ".join(line) + "%" + "\n" )
T = open("output10losers.txt", "r")
finaltweet = T.read()
tweetStr = "Top 10 #Crypto Losers 24hrs:" + "\n" + finaltweet
api = Twython(apiKey,apiSecret,accessToken,accessTokenSecret)
api.update_status(status=tweetStr)
print("Tweeted: " + tweetStr)
| true | true |
f727e3d280dbac1f16c6fe5d8782bb4bd6564767 | 59,646 | py | Python | allennlp/nn/util.py | threefoldo/allennlp | 983db284cb46fd18c898dd3b0e04eed6cb932768 | [
"Apache-2.0"
] | 3 | 2019-06-17T21:09:07.000Z | 2022-03-18T05:19:31.000Z | allennlp/nn/util.py | alisdairv/allennlp | 9fcc79566cc148cce9f967a7962ac03bc300f011 | [
"Apache-2.0"
] | null | null | null | allennlp/nn/util.py | alisdairv/allennlp | 9fcc79566cc148cce9f967a7962ac03bc300f011 | [
"Apache-2.0"
] | 1 | 2020-03-12T06:53:53.000Z | 2020-03-12T06:53:53.000Z | """
Assorted utilities for working with neural networks in AllenNLP.
"""
# pylint: disable=too-many-lines
from collections import defaultdict
from typing import Any, Dict, List, Optional, Sequence, Tuple, TypeVar
import logging
import math
import warnings
import torch
from allennlp.common.checks import ConfigurationError
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
T = TypeVar('T')
def has_tensor(obj) -> bool:
"""
Given a possibly complex data structure,
check if it has any torch.Tensors in it.
"""
if isinstance(obj, torch.Tensor):
return True
elif isinstance(obj, dict):
return any(has_tensor(value) for value in obj.values())
elif isinstance(obj, (list, tuple)):
return any(has_tensor(item) for item in obj)
else:
return False
def move_to_device(obj, cuda_device: int):
"""
Given a structure (possibly) containing Tensors on the CPU,
move all the Tensors to the specified GPU (or do nothing, if they should be on the CPU).
"""
if cuda_device < 0 or not has_tensor(obj):
return obj
elif isinstance(obj, torch.Tensor):
return obj.cuda(cuda_device)
elif isinstance(obj, dict):
return {key: move_to_device(value, cuda_device) for key, value in obj.items()}
elif isinstance(obj, list):
return [move_to_device(item, cuda_device) for item in obj]
elif isinstance(obj, tuple):
return tuple([move_to_device(item, cuda_device) for item in obj])
else:
return obj
def batch_tensor_dicts(tensor_dicts: List[Dict[str, torch.Tensor]],
remove_trailing_dimension: bool = False) -> Dict[str, torch.Tensor]:
"""
Takes a list of tensor dictionaries, where each dictionary is assumed to have matching keys,
and returns a single dictionary with all tensors with the same key batched together.
Parameters
----------
tensor_dicts : ``List[Dict[str, torch.Tensor]]``
The list of tensor dictionaries to batch.
remove_trailing_dimension : ``bool``
If ``True``, we will check for a trailing dimension of size 1 on the tensors that are being
batched, and remove it if we find it.
"""
key_to_tensors: Dict[str, List[torch.Tensor]] = defaultdict(list)
for tensor_dict in tensor_dicts:
for key, tensor in tensor_dict.items():
key_to_tensors[key].append(tensor)
batched_tensors = {}
for key, tensor_list in key_to_tensors.items():
batched_tensor = torch.stack(tensor_list)
if remove_trailing_dimension and all(tensor.size(-1) == 1 for tensor in tensor_list):
batched_tensor = batched_tensor.squeeze(-1)
batched_tensors[key] = batched_tensor
return batched_tensors
def get_lengths_from_binary_sequence_mask(mask: torch.Tensor):
"""
Compute sequence lengths for each batch element in a tensor using a
binary mask.
Parameters
----------
mask : torch.Tensor, required.
A 2D binary mask of shape (batch_size, sequence_length) to
calculate the per-batch sequence lengths from.
Returns
-------
A torch.LongTensor of shape (batch_size,) representing the lengths
of the sequences in the batch.
"""
return mask.long().sum(-1)
def get_mask_from_sequence_lengths(sequence_lengths: torch.Tensor, max_length: int) -> torch.Tensor:
"""
Given a variable of shape ``(batch_size,)`` that represents the sequence lengths of each batch
element, this function returns a ``(batch_size, max_length)`` mask variable. For example, if
our input was ``[2, 2, 3]``, with a ``max_length`` of 4, we'd return
``[[1, 1, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0]]``.
We require ``max_length`` here instead of just computing it from the input ``sequence_lengths``
because it lets us avoid finding the max, then copying that value from the GPU to the CPU so
that we can use it to construct a new tensor.
"""
# (batch_size, max_length)
ones = sequence_lengths.new_ones(sequence_lengths.size(0), max_length)
range_tensor = ones.cumsum(dim=1)
return (sequence_lengths.unsqueeze(1) >= range_tensor).long()
def sort_batch_by_length(tensor: torch.Tensor, sequence_lengths: torch.Tensor):
"""
Sort a batch first tensor by some specified lengths.
Parameters
----------
tensor : torch.FloatTensor, required.
A batch first Pytorch tensor.
sequence_lengths : torch.LongTensor, required.
A tensor representing the lengths of some dimension of the tensor which
we want to sort by.
Returns
-------
sorted_tensor : torch.FloatTensor
The original tensor sorted along the batch dimension with respect to sequence_lengths.
sorted_sequence_lengths : torch.LongTensor
The original sequence_lengths sorted by decreasing size.
restoration_indices : torch.LongTensor
Indices into the sorted_tensor such that
``sorted_tensor.index_select(0, restoration_indices) == original_tensor``
permuation_index : torch.LongTensor
The indices used to sort the tensor. This is useful if you want to sort many
tensors using the same ordering.
"""
if not isinstance(tensor, torch.Tensor) or not isinstance(sequence_lengths, torch.Tensor):
raise ConfigurationError("Both the tensor and sequence lengths must be torch.Tensors.")
sorted_sequence_lengths, permutation_index = sequence_lengths.sort(0, descending=True)
sorted_tensor = tensor.index_select(0, permutation_index)
index_range = sequence_lengths.new_tensor(torch.arange(0, len(sequence_lengths)))
# This is the equivalent of zipping with index, sorting by the original
# sequence lengths and returning the now sorted indices.
_, reverse_mapping = permutation_index.sort(0, descending=False)
restoration_indices = index_range.index_select(0, reverse_mapping)
return sorted_tensor, sorted_sequence_lengths, restoration_indices, permutation_index
def get_final_encoder_states(encoder_outputs: torch.Tensor,
mask: torch.Tensor,
bidirectional: bool = False) -> torch.Tensor:
"""
Given the output from a ``Seq2SeqEncoder``, with shape ``(batch_size, sequence_length,
encoding_dim)``, this method returns the final hidden state for each element of the batch,
giving a tensor of shape ``(batch_size, encoding_dim)``. This is not as simple as
``encoder_outputs[:, -1]``, because the sequences could have different lengths. We use the
mask (which has shape ``(batch_size, sequence_length)``) to find the final state for each batch
instance.
Additionally, if ``bidirectional`` is ``True``, we will split the final dimension of the
``encoder_outputs`` into two and assume that the first half is for the forward direction of the
encoder and the second half is for the backward direction. We will concatenate the last state
for each encoder dimension, giving ``encoder_outputs[:, -1, :encoding_dim/2]`` concated with
``encoder_outputs[:, 0, encoding_dim/2:]``.
"""
# These are the indices of the last words in the sequences (i.e. length sans padding - 1). We
# are assuming sequences are right padded.
# Shape: (batch_size,)
last_word_indices = mask.sum(1).long() - 1
batch_size, _, encoder_output_dim = encoder_outputs.size()
expanded_indices = last_word_indices.view(-1, 1, 1).expand(batch_size, 1, encoder_output_dim)
# Shape: (batch_size, 1, encoder_output_dim)
final_encoder_output = encoder_outputs.gather(1, expanded_indices)
final_encoder_output = final_encoder_output.squeeze(1) # (batch_size, encoder_output_dim)
if bidirectional:
final_forward_output = final_encoder_output[:, :(encoder_output_dim // 2)]
final_backward_output = encoder_outputs[:, 0, (encoder_output_dim // 2):]
final_encoder_output = torch.cat([final_forward_output, final_backward_output], dim=-1)
return final_encoder_output
def get_dropout_mask(dropout_probability: float, tensor_for_masking: torch.Tensor):
"""
Computes and returns an element-wise dropout mask for a given tensor, where
each element in the mask is dropped out with probability dropout_probability.
Note that the mask is NOT applied to the tensor - the tensor is passed to retain
the correct CUDA tensor type for the mask.
Parameters
----------
dropout_probability : float, required.
Probability of dropping a dimension of the input.
tensor_for_masking : torch.Tensor, required.
Returns
-------
A torch.FloatTensor consisting of the binary mask scaled by 1/ (1 - dropout_probability).
This scaling ensures expected values and variances of the output of applying this mask
and the original tensor are the same.
"""
binary_mask = tensor_for_masking.new_tensor(torch.rand(tensor_for_masking.size()) > dropout_probability)
# Scale mask by 1/keep_prob to preserve output statistics.
dropout_mask = binary_mask.float().div(1.0 - dropout_probability)
return dropout_mask
def masked_softmax(vector: torch.Tensor, mask: torch.Tensor, dim: int = -1) -> torch.Tensor:
"""
``torch.nn.functional.softmax(vector)`` does not work if some elements of ``vector`` should be
masked. This performs a softmax on just the non-masked portions of ``vector``. Passing
``None`` in for the mask is also acceptable; you'll just get a regular softmax.
``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is
broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will
unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask,
do it yourself before passing the mask into this function.
In the case that the input vector is completely masked, this function returns an array
of ``0.0``. This behavior may cause ``NaN`` if this is used as the last layer of a model
that uses categorical cross-entropy loss.
"""
if mask is None:
result = torch.nn.functional.softmax(vector, dim=dim)
else:
mask = mask.float()
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
# To limit numerical errors from large vector elements outside the mask, we zero these out.
result = torch.nn.functional.softmax(vector * mask, dim=dim)
result = result * mask
result = result / (result.sum(dim=dim, keepdim=True) + 1e-13)
return result
def masked_log_softmax(vector: torch.Tensor, mask: torch.Tensor, dim: int = -1) -> torch.Tensor:
"""
``torch.nn.functional.log_softmax(vector)`` does not work if some elements of ``vector`` should be
masked. This performs a log_softmax on just the non-masked portions of ``vector``. Passing
``None`` in for the mask is also acceptable; you'll just get a regular log_softmax.
``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is
broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will
unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask,
do it yourself before passing the mask into this function.
In the case that the input vector is completely masked, the return value of this function is
arbitrary, but not ``nan``. You should be masking the result of whatever computation comes out
of this in that case, anyway, so the specific values returned shouldn't matter. Also, the way
that we deal with this case relies on having single-precision floats; mixing half-precision
floats with fully-masked vectors will likely give you ``nans``.
If your logits are all extremely negative (i.e., the max value in your logit vector is -50 or
lower), the way we handle masking here could mess you up. But if you've got logit values that
extreme, you've got bigger problems than this.
"""
if mask is not None:
mask = mask.float()
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
# vector + mask.log() is an easy way to zero out masked elements in logspace, but it
# results in nans when the whole vector is masked. We need a very small value instead of a
# zero in the mask for these cases. log(1 + 1e-45) is still basically 0, so we can safely
# just add 1e-45 before calling mask.log(). We use 1e-45 because 1e-46 is so small it
# becomes 0 - this is just the smallest value we can actually use.
vector = vector + (mask + 1e-45).log()
return torch.nn.functional.log_softmax(vector, dim=dim)
def masked_max(vector: torch.Tensor,
mask: torch.Tensor,
dim: int,
keepdim: bool = False,
min_val: float = -1e7) -> torch.Tensor:
"""
To calculate max along certain dimensions on masked values
Parameters
----------
vector : ``torch.Tensor``
The vector to calculate max, assume unmasked parts are already zeros
mask : ``torch.Tensor``
The mask of the vector. It must be broadcastable with vector.
dim : ``int``
The dimension to calculate max
keepdim : ``bool``
Whether to keep dimension
min_val : ``float``
The minimal value for paddings
Returns
-------
A ``torch.Tensor`` of including the maximum values.
"""
one_minus_mask = (1.0 - mask).byte()
replaced_vector = vector.masked_fill(one_minus_mask, min_val)
max_value, _ = replaced_vector.max(dim=dim, keepdim=keepdim)
return max_value
def masked_mean(vector: torch.Tensor,
mask: torch.Tensor,
dim: int,
keepdim: bool = False,
eps: float = 1e-8) -> torch.Tensor:
"""
To calculate mean along certain dimensions on masked values
Parameters
----------
vector : ``torch.Tensor``
The vector to calculate mean.
mask : ``torch.Tensor``
The mask of the vector. It must be broadcastable with vector.
dim : ``int``
The dimension to calculate mean
keepdim : ``bool``
Whether to keep dimension
eps : ``float``
A small value to avoid zero division problem.
Returns
-------
A ``torch.Tensor`` of including the mean values.
"""
one_minus_mask = (1.0 - mask).byte()
replaced_vector = vector.masked_fill(one_minus_mask, 0.0)
value_sum = torch.sum(replaced_vector, dim=dim, keepdim=keepdim)
value_count = torch.sum(mask.float(), dim=dim, keepdim=keepdim)
return value_sum / value_count.clamp(min=eps)
def viterbi_decode(tag_sequence: torch.Tensor,
transition_matrix: torch.Tensor,
tag_observations: Optional[List[int]] = None):
"""
Perform Viterbi decoding in log space over a sequence given a transition matrix
specifying pairwise (transition) potentials between tags and a matrix of shape
(sequence_length, num_tags) specifying unary potentials for possible tags per
timestep.
Parameters
----------
tag_sequence : torch.Tensor, required.
A tensor of shape (sequence_length, num_tags) representing scores for
a set of tags over a given sequence.
transition_matrix : torch.Tensor, required.
A tensor of shape (num_tags, num_tags) representing the binary potentials
for transitioning between a given pair of tags.
tag_observations : Optional[List[int]], optional, (default = None)
A list of length ``sequence_length`` containing the class ids of observed
elements in the sequence, with unobserved elements being set to -1. Note that
it is possible to provide evidence which results in degenerate labellings if
the sequences of tags you provide as evidence cannot transition between each
other, or those transitions are extremely unlikely. In this situation we log a
warning, but the responsibility for providing self-consistent evidence ultimately
lies with the user.
Returns
-------
viterbi_path : List[int]
The tag indices of the maximum likelihood tag sequence.
viterbi_score : torch.Tensor
The score of the viterbi path.
"""
sequence_length, num_tags = list(tag_sequence.size())
if tag_observations:
if len(tag_observations) != sequence_length:
raise ConfigurationError("Observations were provided, but they were not the same length "
"as the sequence. Found sequence of length: {} and evidence: {}"
.format(sequence_length, tag_observations))
else:
tag_observations = [-1 for _ in range(sequence_length)]
path_scores = []
path_indices = []
if tag_observations[0] != -1:
one_hot = torch.zeros(num_tags)
one_hot[tag_observations[0]] = 100000.
path_scores.append(one_hot)
else:
path_scores.append(tag_sequence[0, :])
# Evaluate the scores for all possible paths.
for timestep in range(1, sequence_length):
# Add pairwise potentials to current scores.
summed_potentials = path_scores[timestep - 1].unsqueeze(-1) + transition_matrix
scores, paths = torch.max(summed_potentials, 0)
# If we have an observation for this timestep, use it
# instead of the distribution over tags.
observation = tag_observations[timestep]
# Warn the user if they have passed
# invalid/extremely unlikely evidence.
if tag_observations[timestep - 1] != -1:
if transition_matrix[tag_observations[timestep - 1], observation] < -10000:
logger.warning("The pairwise potential between tags you have passed as "
"observations is extremely unlikely. Double check your evidence "
"or transition potentials!")
if observation != -1:
one_hot = torch.zeros(num_tags)
one_hot[observation] = 100000.
path_scores.append(one_hot)
else:
path_scores.append(tag_sequence[timestep, :] + scores.squeeze())
path_indices.append(paths.squeeze())
# Construct the most likely sequence backwards.
viterbi_score, best_path = torch.max(path_scores[-1], 0)
viterbi_path = [int(best_path.numpy())]
for backward_timestep in reversed(path_indices):
viterbi_path.append(int(backward_timestep[viterbi_path[-1]]))
# Reverse the backward path.
viterbi_path.reverse()
return viterbi_path, viterbi_score
def get_text_field_mask(text_field_tensors: Dict[str, torch.Tensor],
num_wrapping_dims: int = 0) -> torch.LongTensor:
"""
Takes the dictionary of tensors produced by a ``TextField`` and returns a mask
with 0 where the tokens are padding, and 1 otherwise. We also handle ``TextFields``
wrapped by an arbitrary number of ``ListFields``, where the number of wrapping ``ListFields``
is given by ``num_wrapping_dims``.
If ``num_wrapping_dims == 0``, the returned mask has shape ``(batch_size, num_tokens)``.
If ``num_wrapping_dims > 0`` then the returned mask has ``num_wrapping_dims`` extra
dimensions, so the shape will be ``(batch_size, ..., num_tokens)``.
There could be several entries in the tensor dictionary with different shapes (e.g., one for
word ids, one for character ids). In order to get a token mask, we use the tensor in
the dictionary with the lowest number of dimensions. After subtracting ``num_wrapping_dims``,
if this tensor has two dimensions we assume it has shape ``(batch_size, ..., num_tokens)``,
and use it for the mask. If instead it has three dimensions, we assume it has shape
``(batch_size, ..., num_tokens, num_features)``, and sum over the last dimension to produce
the mask. Most frequently this will be a character id tensor, but it could also be a
featurized representation of each token, etc.
If the input ``text_field_tensors`` contains the "mask" key, this is returned instead of inferring the mask.
TODO(joelgrus): can we change this?
NOTE: Our functions for generating masks create torch.LongTensors, because using
torch.ByteTensors makes it easy to run into overflow errors
when doing mask manipulation, such as summing to get the lengths of sequences - see below.
>>> mask = torch.ones([260]).byte()
>>> mask.sum() # equals 260.
>>> var_mask = torch.autograd.V(mask)
>>> var_mask.sum() # equals 4, due to 8 bit precision - the sum overflows.
"""
if "mask" in text_field_tensors:
return text_field_tensors["mask"]
tensor_dims = [(tensor.dim(), tensor) for tensor in text_field_tensors.values()]
tensor_dims.sort(key=lambda x: x[0])
smallest_dim = tensor_dims[0][0] - num_wrapping_dims
if smallest_dim == 2:
token_tensor = tensor_dims[0][1]
return (token_tensor != 0).long()
elif smallest_dim == 3:
character_tensor = tensor_dims[0][1]
return ((character_tensor > 0).long().sum(dim=-1) > 0).long()
else:
raise ValueError("Expected a tensor with dimension 2 or 3, found {}".format(smallest_dim))
def last_dim_softmax(tensor: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
"""
Takes a tensor with 3 or more dimensions and does a masked softmax over the last dimension. We
assume the tensor has shape ``(batch_size, ..., sequence_length)`` and that the mask (if given)
has shape ``(batch_size, sequence_length)``.
.. deprecated:: 0.6.1
``last_dim_softmax`` was deprecated in favor of just using ``masked_softmax`` in version
0.6.1. It will be removed in version 0.8.
"""
warnings.warn("``last_dim_softmax`` was deprecated in favor of just using ``masked_softmax`` "
"in version 0.6.1. It will be removed in version 0.8.", DeprecationWarning)
return masked_softmax(tensor, mask, dim=-1)
def last_dim_log_softmax(tensor: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
"""
Takes a tensor with 3 or more dimensions and does a masked log softmax over the last dimension.
We assume the tensor has shape ``(batch_size, ..., sequence_length)`` and that the mask (if given)
has shape ``(batch_size, sequence_length)``.
.. deprecated:: 0.6.1
``last_dim_log_softmax`` was deprecated in favor of just using ``masked_log_softmax`` in
version 0.6.1. It will be removed in version 0.8.
"""
warnings.warn("``last_dim_log_softmax`` was deprecated in favor of just using "
"``masked_log_softmax`` in version 0.6.1. It will be removed in version 0.8.",
DeprecationWarning)
return masked_log_softmax(tensor, mask, dim=-1)
def weighted_sum(matrix: torch.Tensor, attention: torch.Tensor) -> torch.Tensor:
"""
Takes a matrix of vectors and a set of weights over the rows in the matrix (which we call an
"attention" vector), and returns a weighted sum of the rows in the matrix. This is the typical
computation performed after an attention mechanism.
Note that while we call this a "matrix" of vectors and an attention "vector", we also handle
higher-order tensors. We always sum over the second-to-last dimension of the "matrix", and we
assume that all dimensions in the "matrix" prior to the last dimension are matched in the
"vector". Non-matched dimensions in the "vector" must be `directly after the batch dimension`.
For example, say I have a "matrix" with dimensions ``(batch_size, num_queries, num_words,
embedding_dim)``. The attention "vector" then must have at least those dimensions, and could
have more. Both:
- ``(batch_size, num_queries, num_words)`` (distribution over words for each query)
- ``(batch_size, num_documents, num_queries, num_words)`` (distribution over words in a
query for each document)
are valid input "vectors", producing tensors of shape:
``(batch_size, num_queries, embedding_dim)`` and
``(batch_size, num_documents, num_queries, embedding_dim)`` respectively.
"""
# We'll special-case a few settings here, where there are efficient (but poorly-named)
# operations in pytorch that already do the computation we need.
if attention.dim() == 2 and matrix.dim() == 3:
return attention.unsqueeze(1).bmm(matrix).squeeze(1)
if attention.dim() == 3 and matrix.dim() == 3:
return attention.bmm(matrix)
if matrix.dim() - 1 < attention.dim():
expanded_size = list(matrix.size())
for i in range(attention.dim() - matrix.dim() + 1):
matrix = matrix.unsqueeze(1)
expanded_size.insert(i + 1, attention.size(i + 1))
matrix = matrix.expand(*expanded_size)
intermediate = attention.unsqueeze(-1).expand_as(matrix) * matrix
return intermediate.sum(dim=-2)
def sequence_cross_entropy_with_logits(logits: torch.FloatTensor,
targets: torch.LongTensor,
weights: torch.FloatTensor,
batch_average: bool = None,
average: str = "batch",
label_smoothing: float = None) -> torch.FloatTensor:
"""
Computes the cross entropy loss of a sequence, weighted with respect to
some user provided weights. Note that the weighting here is not the same as
in the :func:`torch.nn.CrossEntropyLoss()` criterion, which is weighting
classes; here we are weighting the loss contribution from particular elements
in the sequence. This allows loss computations for models which use padding.
Parameters
----------
logits : ``torch.FloatTensor``, required.
A ``torch.FloatTensor`` of size (batch_size, sequence_length, num_classes)
which contains the unnormalized probability for each class.
targets : ``torch.LongTensor``, required.
A ``torch.LongTensor`` of size (batch, sequence_length) which contains the
index of the true class for each corresponding step.
weights : ``torch.FloatTensor``, required.
A ``torch.FloatTensor`` of size (batch, sequence_length)
batch_average : bool, optional, (default = None).
A bool indicating whether the loss should be averaged across the batch,
or returned as a vector of losses per batch element.
.. deprecated:: 0.6.2
``batch_average`` was deprecated and replaced with
the more general ``average`` in version 0.6.2. It will be removed
in version 0.8.
average: str, optional (default = "batch")
If "batch", average the loss across the batches. If "token", average
the loss across each item in the input. If ``None``, return a vector
of losses per batch element.
label_smoothing : ``float``, optional (default = None)
Whether or not to apply label smoothing to the cross-entropy loss.
For example, with a label smoothing value of 0.2, a 4 class classifcation
target would look like ``[0.05, 0.05, 0.85, 0.05]`` if the 3rd class was
the correct label.
Returns
-------
A torch.FloatTensor representing the cross entropy loss.
If ``average=="batch"`` or ``average=="token"``, the returned loss is a scalar.
If ``average is None``, the returned loss is a vector of shape (batch_size,).
"""
if batch_average is not None:
# Maintain old behavior
if batch_average:
warnings.warn("batch_average=True was deprecated and replaced "
"with average='batch' in version 0.6.2. It will be "
"removed in version 0.8.", DeprecationWarning)
average = "batch"
else:
warnings.warn("batch_average=False was deprecated and replaced "
"with average=None in version 0.6.2. It will be "
"removed in version 0.8.", DeprecationWarning)
average = None
if average not in {None, "token", "batch"}:
raise ValueError("Got average f{average}, expected one of "
"None, 'token', or 'batch'")
# shape : (batch * sequence_length, num_classes)
logits_flat = logits.view(-1, logits.size(-1))
# shape : (batch * sequence_length, num_classes)
log_probs_flat = torch.nn.functional.log_softmax(logits_flat, dim=-1)
# shape : (batch * max_len, 1)
targets_flat = targets.view(-1, 1).long()
if label_smoothing is not None and label_smoothing > 0.0:
num_classes = logits.size(-1)
smoothing_value = label_smoothing / num_classes
# Fill all the correct indices with 1 - smoothing value.
one_hot_targets = torch.zeros_like(log_probs_flat).scatter_(-1, targets_flat, 1.0 - label_smoothing)
smoothed_targets = one_hot_targets + smoothing_value
negative_log_likelihood_flat = - log_probs_flat * smoothed_targets
negative_log_likelihood_flat = negative_log_likelihood_flat.sum(-1, keepdim=True)
else:
# Contribution to the negative log likelihood only comes from the exact indices
# of the targets, as the target distributions are one-hot. Here we use torch.gather
# to extract the indices of the num_classes dimension which contribute to the loss.
# shape : (batch * sequence_length, 1)
negative_log_likelihood_flat = - torch.gather(log_probs_flat, dim=1, index=targets_flat)
# shape : (batch, sequence_length)
negative_log_likelihood = negative_log_likelihood_flat.view(*targets.size())
# shape : (batch, sequence_length)
negative_log_likelihood = negative_log_likelihood * weights.float()
if average == "batch":
# shape : (batch_size,)
per_batch_loss = negative_log_likelihood.sum(1) / (weights.sum(1).float() + 1e-13)
num_non_empty_sequences = ((weights.sum(1) > 0).float().sum() + 1e-13)
return per_batch_loss.sum() / num_non_empty_sequences
elif average == "token":
return negative_log_likelihood.sum() / (weights.sum().float() + 1e-13)
else:
# shape : (batch_size,)
per_batch_loss = negative_log_likelihood.sum(1) / (weights.sum(1).float() + 1e-13)
return per_batch_loss
def replace_masked_values(tensor: torch.Tensor, mask: torch.Tensor, replace_with: float) -> torch.Tensor:
"""
Replaces all masked values in ``tensor`` with ``replace_with``. ``mask`` must be broadcastable
to the same shape as ``tensor``. We require that ``tensor.dim() == mask.dim()``, as otherwise we
won't know which dimensions of the mask to unsqueeze.
This just does ``tensor.masked_fill()``, except the pytorch method fills in things with a mask
value of 1, where we want the opposite. You can do this in your own code with
``tensor.masked_fill((1 - mask).byte(), replace_with)``.
"""
if tensor.dim() != mask.dim():
raise ConfigurationError("tensor.dim() (%d) != mask.dim() (%d)" % (tensor.dim(), mask.dim()))
return tensor.masked_fill((1 - mask).byte(), replace_with)
def tensors_equal(tensor1: torch.Tensor, tensor2: torch.Tensor, tolerance: float = 1e-12) -> bool:
"""
A check for tensor equality (by value). We make sure that the tensors have the same shape,
then check all of the entries in the tensor for equality. We additionally allow the input
tensors to be lists or dictionaries, where we then do the above check on every position in the
list / item in the dictionary. If we find objects that aren't tensors as we're doing that, we
just defer to their equality check.
This is kind of a catch-all method that's designed to make implementing ``__eq__`` methods
easier, in a way that's really only intended to be useful for tests.
"""
# pylint: disable=too-many-return-statements
if isinstance(tensor1, (list, tuple)):
if not isinstance(tensor2, (list, tuple)) or len(tensor1) != len(tensor2):
return False
return all([tensors_equal(t1, t2, tolerance) for t1, t2 in zip(tensor1, tensor2)])
elif isinstance(tensor1, dict):
if not isinstance(tensor2, dict):
return False
if tensor1.keys() != tensor2.keys():
return False
return all([tensors_equal(tensor1[key], tensor2[key], tolerance) for key in tensor1])
elif isinstance(tensor1, torch.Tensor):
if not isinstance(tensor2, torch.Tensor):
return False
if tensor1.size() != tensor2.size():
return False
return ((tensor1 - tensor2).abs().float() < tolerance).all()
else:
try:
return tensor1 == tensor2
except RuntimeError:
print(type(tensor1), type(tensor2))
raise
def device_mapping(cuda_device: int):
"""
In order to `torch.load()` a GPU-trained model onto a CPU (or specific GPU),
you have to supply a `map_location` function. Call this with
the desired `cuda_device` to get the function that `torch.load()` needs.
"""
def inner_device_mapping(storage: torch.Storage, location) -> torch.Storage: # pylint: disable=unused-argument
if cuda_device >= 0:
return storage.cuda(cuda_device)
else:
return storage
return inner_device_mapping
def combine_tensors(combination: str, tensors: List[torch.Tensor]) -> torch.Tensor:
"""
Combines a list of tensors using element-wise operations and concatenation, specified by a
``combination`` string. The string refers to (1-indexed) positions in the input tensor list,
and looks like ``"1,2,1+2,3-1"``.
We allow the following kinds of combinations: ``x``, ``x*y``, ``x+y``, ``x-y``, and ``x/y``,
where ``x`` and ``y`` are positive integers less than or equal to ``len(tensors)``. Each of
the binary operations is performed elementwise. You can give as many combinations as you want
in the ``combination`` string. For example, for the input string ``"1,2,1*2"``, the result
would be ``[1;2;1*2]``, as you would expect, where ``[;]`` is concatenation along the last
dimension.
If you have a fixed, known way to combine tensors that you use in a model, you should probably
just use something like ``torch.cat([x_tensor, y_tensor, x_tensor * y_tensor])``. This
function adds some complexity that is only necessary if you want the specific combination used
to be `configurable`.
If you want to do any element-wise operations, the tensors involved in each element-wise
operation must have the same shape.
This function also accepts ``x`` and ``y`` in place of ``1`` and ``2`` in the combination
string.
"""
if len(tensors) > 9:
raise ConfigurationError("Double-digit tensor lists not currently supported")
combination = combination.replace('x', '1').replace('y', '2')
to_concatenate = [_get_combination(piece, tensors) for piece in combination.split(',')]
return torch.cat(to_concatenate, dim=-1)
def _rindex(sequence: Sequence[T], obj: T) -> int:
"""
Return zero-based index in the sequence of the last item whose value is equal to obj. Raises a
ValueError if there is no such item.
Parameters
----------
sequence : ``Sequence[T]``
obj : ``T``
Returns
-------
zero-based index associated to the position of the last item equal to obj
"""
for i in range(len(sequence) - 1, -1, -1):
if sequence[i] == obj:
return i
raise ValueError(f"Unable to find {obj} in sequence {sequence}.")
def _get_combination(combination: str, tensors: List[torch.Tensor]) -> torch.Tensor:
if combination.isdigit():
index = int(combination) - 1
return tensors[index]
else:
if len(combination) != 3:
raise ConfigurationError("Invalid combination: " + combination)
first_tensor = _get_combination(combination[0], tensors)
second_tensor = _get_combination(combination[2], tensors)
operation = combination[1]
if operation == '*':
return first_tensor * second_tensor
elif operation == '/':
return first_tensor / second_tensor
elif operation == '+':
return first_tensor + second_tensor
elif operation == '-':
return first_tensor - second_tensor
else:
raise ConfigurationError("Invalid operation: " + operation)
def combine_tensors_and_multiply(combination: str,
tensors: List[torch.Tensor],
weights: torch.nn.Parameter) -> torch.Tensor:
"""
Like :func:`combine_tensors`, but does a weighted (linear) multiplication while combining.
This is a separate function from ``combine_tensors`` because we try to avoid instantiating
large intermediate tensors during the combination, which is possible because we know that we're
going to be multiplying by a weight vector in the end.
Parameters
----------
combination : ``str``
Same as in :func:`combine_tensors`
tensors : ``List[torch.Tensor]``
A list of tensors to combine, where the integers in the ``combination`` are (1-indexed)
positions in this list of tensors. These tensors are all expected to have either three or
four dimensions, with the final dimension being an embedding. If there are four
dimensions, one of them must have length 1.
weights : ``torch.nn.Parameter``
A vector of weights to use for the combinations. This should have shape (combined_dim,),
as calculated by :func:`get_combined_dim`.
"""
if len(tensors) > 9:
raise ConfigurationError("Double-digit tensor lists not currently supported")
combination = combination.replace('x', '1').replace('y', '2')
pieces = combination.split(',')
tensor_dims = [tensor.size(-1) for tensor in tensors]
combination_dims = [_get_combination_dim(piece, tensor_dims) for piece in pieces]
dims_so_far = 0
to_sum = []
for piece, combination_dim in zip(pieces, combination_dims):
weight = weights[dims_so_far:(dims_so_far + combination_dim)]
dims_so_far += combination_dim
to_sum.append(_get_combination_and_multiply(piece, tensors, weight))
result = to_sum[0]
for result_piece in to_sum[1:]:
result = result + result_piece
return result
def _get_combination_and_multiply(combination: str,
tensors: List[torch.Tensor],
weight: torch.nn.Parameter) -> torch.Tensor:
if combination.isdigit():
index = int(combination) - 1
return torch.matmul(tensors[index], weight)
else:
if len(combination) != 3:
raise ConfigurationError("Invalid combination: " + combination)
first_tensor = _get_combination(combination[0], tensors)
second_tensor = _get_combination(combination[2], tensors)
operation = combination[1]
if operation == '*':
if first_tensor.dim() > 4 or second_tensor.dim() > 4:
raise ValueError("Tensors with dim > 4 not currently supported")
if first_tensor.dim() == 4:
expanded_dim = _rindex(first_tensor.size(), 1)
first_tensor = first_tensor.squeeze(expanded_dim)
if second_tensor.dim() == 4:
expanded_dim = _rindex(second_tensor.size(), 1)
second_tensor = second_tensor.squeeze(expanded_dim)
intermediate = first_tensor * weight
return torch.matmul(intermediate, second_tensor.transpose(-1, -2)).squeeze(-1)
elif operation == '/':
if first_tensor.dim() > 4 or second_tensor.dim() > 4:
raise ValueError("Tensors with dim > 4 not currently supported")
if first_tensor.dim() == 4:
expanded_dim = _rindex(first_tensor.size(), 1)
first_tensor = first_tensor.squeeze(expanded_dim)
if second_tensor.dim() == 4:
expanded_dim = _rindex(second_tensor.size(), 1)
second_tensor = second_tensor.squeeze(expanded_dim)
intermediate = first_tensor * weight
return torch.matmul(intermediate, second_tensor.pow(-1).transpose(-1, -2)).squeeze(-1)
elif operation == '+':
return torch.matmul(first_tensor, weight) + torch.matmul(second_tensor, weight)
elif operation == '-':
return torch.matmul(first_tensor, weight) - torch.matmul(second_tensor, weight)
else:
raise ConfigurationError("Invalid operation: " + operation)
def get_combined_dim(combination: str, tensor_dims: List[int]) -> int:
"""
For use with :func:`combine_tensors`. This function computes the resultant dimension when
calling ``combine_tensors(combination, tensors)``, when the tensor dimension is known. This is
necessary for knowing the sizes of weight matrices when building models that use
``combine_tensors``.
Parameters
----------
combination : ``str``
A comma-separated list of combination pieces, like ``"1,2,1*2"``, specified identically to
``combination`` in :func:`combine_tensors`.
tensor_dims : ``List[int]``
A list of tensor dimensions, where each dimension is from the `last axis` of the tensors
that will be input to :func:`combine_tensors`.
"""
if len(tensor_dims) > 9:
raise ConfigurationError("Double-digit tensor lists not currently supported")
combination = combination.replace('x', '1').replace('y', '2')
return sum([_get_combination_dim(piece, tensor_dims) for piece in combination.split(',')])
def _get_combination_dim(combination: str, tensor_dims: List[int]) -> int:
if combination.isdigit():
index = int(combination) - 1
return tensor_dims[index]
else:
if len(combination) != 3:
raise ConfigurationError("Invalid combination: " + combination)
first_tensor_dim = _get_combination_dim(combination[0], tensor_dims)
second_tensor_dim = _get_combination_dim(combination[2], tensor_dims)
operation = combination[1]
if first_tensor_dim != second_tensor_dim:
raise ConfigurationError("Tensor dims must match for operation \"{}\"".format(operation))
return first_tensor_dim
def logsumexp(tensor: torch.Tensor,
dim: int = -1,
keepdim: bool = False) -> torch.Tensor:
"""
A numerically stable computation of logsumexp. This is mathematically equivalent to
`tensor.exp().sum(dim, keep=keepdim).log()`. This function is typically used for summing log
probabilities.
Parameters
----------
tensor : torch.FloatTensor, required.
A tensor of arbitrary size.
dim : int, optional (default = -1)
The dimension of the tensor to apply the logsumexp to.
keepdim: bool, optional (default = False)
Whether to retain a dimension of size one at the dimension we reduce over.
"""
max_score, _ = tensor.max(dim, keepdim=keepdim)
if keepdim:
stable_vec = tensor - max_score
else:
stable_vec = tensor - max_score.unsqueeze(dim)
return max_score + (stable_vec.exp().sum(dim, keepdim=keepdim)).log()
def get_device_of(tensor: torch.Tensor) -> int:
"""
Returns the device of the tensor.
"""
if not tensor.is_cuda:
return -1
else:
return tensor.get_device()
def flatten_and_batch_shift_indices(indices: torch.Tensor,
sequence_length: int) -> torch.Tensor:
"""
This is a subroutine for :func:`~batched_index_select`. The given ``indices`` of size
``(batch_size, d_1, ..., d_n)`` indexes into dimension 2 of a target tensor, which has size
``(batch_size, sequence_length, embedding_size)``. This function returns a vector that
correctly indexes into the flattened target. The sequence length of the target must be
provided to compute the appropriate offsets.
.. code-block:: python
indices = torch.ones([2,3], dtype=torch.long)
# Sequence length of the target tensor.
sequence_length = 10
shifted_indices = flatten_and_batch_shift_indices(indices, sequence_length)
# Indices into the second element in the batch are correctly shifted
# to take into account that the target tensor will be flattened before
# the indices are applied.
assert shifted_indices == [1, 1, 1, 11, 11, 11]
Parameters
----------
indices : ``torch.LongTensor``, required.
sequence_length : ``int``, required.
The length of the sequence the indices index into.
This must be the second dimension of the tensor.
Returns
-------
offset_indices : ``torch.LongTensor``
"""
# Shape: (batch_size)
offsets = get_range_vector(indices.size(0), get_device_of(indices)) * sequence_length
for _ in range(len(indices.size()) - 1):
offsets = offsets.unsqueeze(1)
# Shape: (batch_size, d_1, ..., d_n)
offset_indices = indices + offsets
# Shape: (batch_size * d_1 * ... * d_n)
offset_indices = offset_indices.view(-1)
return offset_indices
def batched_index_select(target: torch.Tensor,
indices: torch.LongTensor,
flattened_indices: Optional[torch.LongTensor] = None) -> torch.Tensor:
"""
The given ``indices`` of size ``(batch_size, d_1, ..., d_n)`` indexes into the sequence
dimension (dimension 2) of the target, which has size ``(batch_size, sequence_length,
embedding_size)``.
This function returns selected values in the target with respect to the provided indices, which
have size ``(batch_size, d_1, ..., d_n, embedding_size)``. This can use the optionally
precomputed :func:`~flattened_indices` with size ``(batch_size * d_1 * ... * d_n)`` if given.
An example use case of this function is looking up the start and end indices of spans in a
sequence tensor. This is used in the
:class:`~allennlp.models.coreference_resolution.CoreferenceResolver`. Model to select
contextual word representations corresponding to the start and end indices of mentions. The key
reason this can't be done with basic torch functions is that we want to be able to use look-up
tensors with an arbitrary number of dimensions (for example, in the coref model, we don't know
a-priori how many spans we are looking up).
Parameters
----------
target : ``torch.Tensor``, required.
A 3 dimensional tensor of shape (batch_size, sequence_length, embedding_size).
This is the tensor to be indexed.
indices : ``torch.LongTensor``
A tensor of shape (batch_size, ...), where each element is an index into the
``sequence_length`` dimension of the ``target`` tensor.
flattened_indices : Optional[torch.Tensor], optional (default = None)
An optional tensor representing the result of calling :func:~`flatten_and_batch_shift_indices`
on ``indices``. This is helpful in the case that the indices can be flattened once and
cached for many batch lookups.
Returns
-------
selected_targets : ``torch.Tensor``
A tensor with shape [indices.size(), target.size(-1)] representing the embedded indices
extracted from the batch flattened target tensor.
"""
if flattened_indices is None:
# Shape: (batch_size * d_1 * ... * d_n)
flattened_indices = flatten_and_batch_shift_indices(indices, target.size(1))
# Shape: (batch_size * sequence_length, embedding_size)
flattened_target = target.view(-1, target.size(-1))
# Shape: (batch_size * d_1 * ... * d_n, embedding_size)
flattened_selected = flattened_target.index_select(0, flattened_indices)
selected_shape = list(indices.size()) + [target.size(-1)]
# Shape: (batch_size, d_1, ..., d_n, embedding_size)
selected_targets = flattened_selected.view(*selected_shape)
return selected_targets
def flattened_index_select(target: torch.Tensor,
indices: torch.LongTensor) -> torch.Tensor:
"""
The given ``indices`` of size ``(set_size, subset_size)`` specifies subsets of the ``target``
that each of the set_size rows should select. The `target` has size
``(batch_size, sequence_length, embedding_size)``, and the resulting selected tensor has size
``(batch_size, set_size, subset_size, embedding_size)``.
Parameters
----------
target : ``torch.Tensor``, required.
A Tensor of shape (batch_size, sequence_length, embedding_size).
indices : ``torch.LongTensor``, required.
A LongTensor of shape (set_size, subset_size). All indices must be < sequence_length
as this tensor is an index into the sequence_length dimension of the target.
Returns
-------
selected : ``torch.Tensor``, required.
A Tensor of shape (batch_size, set_size, subset_size, embedding_size).
"""
if indices.dim() != 2:
raise ConfigurationError("Indices passed to flattened_index_select had shape {} but "
"only 2 dimensional inputs are supported.".format(indices.size()))
# Shape: (batch_size, set_size * subset_size, embedding_size)
flattened_selected = target.index_select(1, indices.view(-1))
# Shape: (batch_size, set_size, subset_size, embedding_size)
selected = flattened_selected.view(target.size(0), indices.size(0), indices.size(1), -1)
return selected
def get_range_vector(size: int, device: int) -> torch.Tensor:
"""
Returns a range vector with the desired size, starting at 0. The CUDA implementation
is meant to avoid copy data from CPU to GPU.
"""
if device > -1:
return torch.cuda.LongTensor(size, device=device).fill_(1).cumsum(0) - 1
else:
return torch.arange(0, size, dtype=torch.long)
def bucket_values(distances: torch.Tensor,
num_identity_buckets: int = 4,
num_total_buckets: int = 10) -> torch.Tensor:
"""
Places the given values (designed for distances) into ``num_total_buckets``semi-logscale
buckets, with ``num_identity_buckets`` of these capturing single values.
The default settings will bucket values into the following buckets:
[0, 1, 2, 3, 4, 5-7, 8-15, 16-31, 32-63, 64+].
Parameters
----------
distances : ``torch.Tensor``, required.
A Tensor of any size, to be bucketed.
num_identity_buckets: int, optional (default = 4).
The number of identity buckets (those only holding a single value).
num_total_buckets : int, (default = 10)
The total number of buckets to bucket values into.
Returns
-------
A tensor of the same shape as the input, containing the indices of the buckets
the values were placed in.
"""
# Chunk the values into semi-logscale buckets using .floor().
# This is a semi-logscale bucketing because we divide by log(2) after taking the log.
# We do this to make the buckets more granular in the initial range, where we expect
# most values to fall. We then add (num_identity_buckets - 1) because we want these indices
# to start _after_ the fixed number of buckets which we specified would only hold single values.
logspace_index = (distances.float().log() / math.log(2)).floor().long() + (num_identity_buckets - 1)
# create a mask for values which will go into single number buckets (i.e not a range).
use_identity_mask = (distances <= num_identity_buckets).long()
use_buckets_mask = 1 + (-1 * use_identity_mask)
# Use the original values if they are less than num_identity_buckets, otherwise
# use the logspace indices.
combined_index = use_identity_mask * distances + use_buckets_mask * logspace_index
# Clamp to put anything > num_total_buckets into the final bucket.
return combined_index.clamp(0, num_total_buckets - 1)
def add_sentence_boundary_token_ids(tensor: torch.Tensor,
mask: torch.Tensor,
sentence_begin_token: Any,
sentence_end_token: Any) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Add begin/end of sentence tokens to the batch of sentences.
Given a batch of sentences with size ``(batch_size, timesteps)`` or
``(batch_size, timesteps, dim)`` this returns a tensor of shape
``(batch_size, timesteps + 2)`` or ``(batch_size, timesteps + 2, dim)`` respectively.
Returns both the new tensor and updated mask.
Parameters
----------
tensor : ``torch.Tensor``
A tensor of shape ``(batch_size, timesteps)`` or ``(batch_size, timesteps, dim)``
mask : ``torch.Tensor``
A tensor of shape ``(batch_size, timesteps)``
sentence_begin_token: Any (anything that can be broadcast in torch for assignment)
For 2D input, a scalar with the <S> id. For 3D input, a tensor with length dim.
sentence_end_token: Any (anything that can be broadcast in torch for assignment)
For 2D input, a scalar with the </S> id. For 3D input, a tensor with length dim.
Returns
-------
tensor_with_boundary_tokens : ``torch.Tensor``
The tensor with the appended and prepended boundary tokens. If the input was 2D,
it has shape (batch_size, timesteps + 2) and if the input was 3D, it has shape
(batch_size, timesteps + 2, dim).
new_mask : ``torch.Tensor``
The new mask for the tensor, taking into account the appended tokens
marking the beginning and end of the sentence.
"""
# TODO: matthewp, profile this transfer
sequence_lengths = mask.sum(dim=1).detach().cpu().numpy()
tensor_shape = list(tensor.data.shape)
new_shape = list(tensor_shape)
new_shape[1] = tensor_shape[1] + 2
tensor_with_boundary_tokens = tensor.new_zeros(*new_shape)
if len(tensor_shape) == 2:
tensor_with_boundary_tokens[:, 1:-1] = tensor
tensor_with_boundary_tokens[:, 0] = sentence_begin_token
for i, j in enumerate(sequence_lengths):
tensor_with_boundary_tokens[i, j + 1] = sentence_end_token
new_mask = (tensor_with_boundary_tokens != 0).long()
elif len(tensor_shape) == 3:
tensor_with_boundary_tokens[:, 1:-1, :] = tensor
for i, j in enumerate(sequence_lengths):
tensor_with_boundary_tokens[i, 0, :] = sentence_begin_token
tensor_with_boundary_tokens[i, j + 1, :] = sentence_end_token
new_mask = ((tensor_with_boundary_tokens > 0).long().sum(dim=-1) > 0).long()
else:
raise ValueError("add_sentence_boundary_token_ids only accepts 2D and 3D input")
return tensor_with_boundary_tokens, new_mask
def remove_sentence_boundaries(tensor: torch.Tensor,
mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Remove begin/end of sentence embeddings from the batch of sentences.
Given a batch of sentences with size ``(batch_size, timesteps, dim)``
this returns a tensor of shape ``(batch_size, timesteps - 2, dim)`` after removing
the beginning and end sentence markers. The sentences are assumed to be padded on the right,
with the beginning of each sentence assumed to occur at index 0 (i.e., ``mask[:, 0]`` is assumed
to be 1).
Returns both the new tensor and updated mask.
This function is the inverse of ``add_sentence_boundary_token_ids``.
Parameters
----------
tensor : ``torch.Tensor``
A tensor of shape ``(batch_size, timesteps, dim)``
mask : ``torch.Tensor``
A tensor of shape ``(batch_size, timesteps)``
Returns
-------
tensor_without_boundary_tokens : ``torch.Tensor``
The tensor after removing the boundary tokens of shape ``(batch_size, timesteps - 2, dim)``
new_mask : ``torch.Tensor``
The new mask for the tensor of shape ``(batch_size, timesteps - 2)``.
"""
# TODO: matthewp, profile this transfer
sequence_lengths = mask.sum(dim=1).detach().cpu().numpy()
tensor_shape = list(tensor.data.shape)
new_shape = list(tensor_shape)
new_shape[1] = tensor_shape[1] - 2
tensor_without_boundary_tokens = tensor.new_zeros(*new_shape)
new_mask = tensor.new_zeros((new_shape[0], new_shape[1]), dtype=torch.long)
for i, j in enumerate(sequence_lengths):
if j > 2:
tensor_without_boundary_tokens[i, :(j - 2), :] = tensor[i, 1:(j - 1), :]
new_mask[i, :(j - 2)] = 1
return tensor_without_boundary_tokens, new_mask
def add_positional_features(tensor: torch.Tensor,
min_timescale: float = 1.0,
max_timescale: float = 1.0e4):
# pylint: disable=line-too-long
"""
Implements the frequency-based positional encoding described
in `Attention is all you Need
<https://www.semanticscholar.org/paper/Attention-Is-All-You-Need-Vaswani-Shazeer/0737da0767d77606169cbf4187b83e1ab62f6077>`_ .
Adds sinusoids of different frequencies to a ``Tensor``. A sinusoid of a
different frequency and phase is added to each dimension of the input ``Tensor``.
This allows the attention heads to use absolute and relative positions.
The number of timescales is equal to hidden_dim / 2 within the range
(min_timescale, max_timescale). For each timescale, the two sinusoidal
signals sin(timestep / timescale) and cos(timestep / timescale) are
generated and concatenated along the hidden_dim dimension.
Parameters
----------
tensor : ``torch.Tensor``
a Tensor with shape (batch_size, timesteps, hidden_dim).
min_timescale : ``float``, optional (default = 1.0)
The smallest timescale to use.
max_timescale : ``float``, optional (default = 1.0e4)
The largest timescale to use.
Returns
-------
The input tensor augmented with the sinusoidal frequencies.
"""
_, timesteps, hidden_dim = tensor.size()
timestep_range = get_range_vector(timesteps, get_device_of(tensor)).data.float()
# We're generating both cos and sin frequencies,
# so half for each.
num_timescales = hidden_dim // 2
timescale_range = get_range_vector(num_timescales, get_device_of(tensor)).data.float()
log_timescale_increments = math.log(float(max_timescale) / float(min_timescale)) / float(num_timescales - 1)
inverse_timescales = min_timescale * torch.exp(timescale_range * -log_timescale_increments)
# Broadcasted multiplication - shape (timesteps, num_timescales)
scaled_time = timestep_range.unsqueeze(1) * inverse_timescales.unsqueeze(0)
# shape (timesteps, 2 * num_timescales)
sinusoids = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 1)
if hidden_dim % 2 != 0:
# if the number of dimensions is odd, the cos and sin
# timescales had size (hidden_dim - 1) / 2, so we need
# to add a row of zeros to make up the difference.
sinusoids = torch.cat([sinusoids, sinusoids.new_zeros(timesteps, 1)], 1)
return tensor + sinusoids.unsqueeze(0)
| 46.634871 | 130 | 0.669349 |
from collections import defaultdict
from typing import Any, Dict, List, Optional, Sequence, Tuple, TypeVar
import logging
import math
import warnings
import torch
from allennlp.common.checks import ConfigurationError
logger = logging.getLogger(__name__)
T = TypeVar('T')
def has_tensor(obj) -> bool:
if isinstance(obj, torch.Tensor):
return True
elif isinstance(obj, dict):
return any(has_tensor(value) for value in obj.values())
elif isinstance(obj, (list, tuple)):
return any(has_tensor(item) for item in obj)
else:
return False
def move_to_device(obj, cuda_device: int):
if cuda_device < 0 or not has_tensor(obj):
return obj
elif isinstance(obj, torch.Tensor):
return obj.cuda(cuda_device)
elif isinstance(obj, dict):
return {key: move_to_device(value, cuda_device) for key, value in obj.items()}
elif isinstance(obj, list):
return [move_to_device(item, cuda_device) for item in obj]
elif isinstance(obj, tuple):
return tuple([move_to_device(item, cuda_device) for item in obj])
else:
return obj
def batch_tensor_dicts(tensor_dicts: List[Dict[str, torch.Tensor]],
remove_trailing_dimension: bool = False) -> Dict[str, torch.Tensor]:
key_to_tensors: Dict[str, List[torch.Tensor]] = defaultdict(list)
for tensor_dict in tensor_dicts:
for key, tensor in tensor_dict.items():
key_to_tensors[key].append(tensor)
batched_tensors = {}
for key, tensor_list in key_to_tensors.items():
batched_tensor = torch.stack(tensor_list)
if remove_trailing_dimension and all(tensor.size(-1) == 1 for tensor in tensor_list):
batched_tensor = batched_tensor.squeeze(-1)
batched_tensors[key] = batched_tensor
return batched_tensors
def get_lengths_from_binary_sequence_mask(mask: torch.Tensor):
return mask.long().sum(-1)
def get_mask_from_sequence_lengths(sequence_lengths: torch.Tensor, max_length: int) -> torch.Tensor:
ones = sequence_lengths.new_ones(sequence_lengths.size(0), max_length)
range_tensor = ones.cumsum(dim=1)
return (sequence_lengths.unsqueeze(1) >= range_tensor).long()
def sort_batch_by_length(tensor: torch.Tensor, sequence_lengths: torch.Tensor):
if not isinstance(tensor, torch.Tensor) or not isinstance(sequence_lengths, torch.Tensor):
raise ConfigurationError("Both the tensor and sequence lengths must be torch.Tensors.")
sorted_sequence_lengths, permutation_index = sequence_lengths.sort(0, descending=True)
sorted_tensor = tensor.index_select(0, permutation_index)
index_range = sequence_lengths.new_tensor(torch.arange(0, len(sequence_lengths)))
_, reverse_mapping = permutation_index.sort(0, descending=False)
restoration_indices = index_range.index_select(0, reverse_mapping)
return sorted_tensor, sorted_sequence_lengths, restoration_indices, permutation_index
def get_final_encoder_states(encoder_outputs: torch.Tensor,
mask: torch.Tensor,
bidirectional: bool = False) -> torch.Tensor:
last_word_indices = mask.sum(1).long() - 1
batch_size, _, encoder_output_dim = encoder_outputs.size()
expanded_indices = last_word_indices.view(-1, 1, 1).expand(batch_size, 1, encoder_output_dim)
final_encoder_output = encoder_outputs.gather(1, expanded_indices)
final_encoder_output = final_encoder_output.squeeze(1)
if bidirectional:
final_forward_output = final_encoder_output[:, :(encoder_output_dim // 2)]
final_backward_output = encoder_outputs[:, 0, (encoder_output_dim // 2):]
final_encoder_output = torch.cat([final_forward_output, final_backward_output], dim=-1)
return final_encoder_output
def get_dropout_mask(dropout_probability: float, tensor_for_masking: torch.Tensor):
binary_mask = tensor_for_masking.new_tensor(torch.rand(tensor_for_masking.size()) > dropout_probability)
dropout_mask = binary_mask.float().div(1.0 - dropout_probability)
return dropout_mask
def masked_softmax(vector: torch.Tensor, mask: torch.Tensor, dim: int = -1) -> torch.Tensor:
if mask is None:
result = torch.nn.functional.softmax(vector, dim=dim)
else:
mask = mask.float()
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
result = torch.nn.functional.softmax(vector * mask, dim=dim)
result = result * mask
result = result / (result.sum(dim=dim, keepdim=True) + 1e-13)
return result
def masked_log_softmax(vector: torch.Tensor, mask: torch.Tensor, dim: int = -1) -> torch.Tensor:
if mask is not None:
mask = mask.float()
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
vector = vector + (mask + 1e-45).log()
return torch.nn.functional.log_softmax(vector, dim=dim)
def masked_max(vector: torch.Tensor,
mask: torch.Tensor,
dim: int,
keepdim: bool = False,
min_val: float = -1e7) -> torch.Tensor:
one_minus_mask = (1.0 - mask).byte()
replaced_vector = vector.masked_fill(one_minus_mask, min_val)
max_value, _ = replaced_vector.max(dim=dim, keepdim=keepdim)
return max_value
def masked_mean(vector: torch.Tensor,
mask: torch.Tensor,
dim: int,
keepdim: bool = False,
eps: float = 1e-8) -> torch.Tensor:
one_minus_mask = (1.0 - mask).byte()
replaced_vector = vector.masked_fill(one_minus_mask, 0.0)
value_sum = torch.sum(replaced_vector, dim=dim, keepdim=keepdim)
value_count = torch.sum(mask.float(), dim=dim, keepdim=keepdim)
return value_sum / value_count.clamp(min=eps)
def viterbi_decode(tag_sequence: torch.Tensor,
transition_matrix: torch.Tensor,
tag_observations: Optional[List[int]] = None):
sequence_length, num_tags = list(tag_sequence.size())
if tag_observations:
if len(tag_observations) != sequence_length:
raise ConfigurationError("Observations were provided, but they were not the same length "
"as the sequence. Found sequence of length: {} and evidence: {}"
.format(sequence_length, tag_observations))
else:
tag_observations = [-1 for _ in range(sequence_length)]
path_scores = []
path_indices = []
if tag_observations[0] != -1:
one_hot = torch.zeros(num_tags)
one_hot[tag_observations[0]] = 100000.
path_scores.append(one_hot)
else:
path_scores.append(tag_sequence[0, :])
for timestep in range(1, sequence_length):
summed_potentials = path_scores[timestep - 1].unsqueeze(-1) + transition_matrix
scores, paths = torch.max(summed_potentials, 0)
observation = tag_observations[timestep]
if tag_observations[timestep - 1] != -1:
if transition_matrix[tag_observations[timestep - 1], observation] < -10000:
logger.warning("The pairwise potential between tags you have passed as "
"observations is extremely unlikely. Double check your evidence "
"or transition potentials!")
if observation != -1:
one_hot = torch.zeros(num_tags)
one_hot[observation] = 100000.
path_scores.append(one_hot)
else:
path_scores.append(tag_sequence[timestep, :] + scores.squeeze())
path_indices.append(paths.squeeze())
viterbi_score, best_path = torch.max(path_scores[-1], 0)
viterbi_path = [int(best_path.numpy())]
for backward_timestep in reversed(path_indices):
viterbi_path.append(int(backward_timestep[viterbi_path[-1]]))
viterbi_path.reverse()
return viterbi_path, viterbi_score
def get_text_field_mask(text_field_tensors: Dict[str, torch.Tensor],
num_wrapping_dims: int = 0) -> torch.LongTensor:
if "mask" in text_field_tensors:
return text_field_tensors["mask"]
tensor_dims = [(tensor.dim(), tensor) for tensor in text_field_tensors.values()]
tensor_dims.sort(key=lambda x: x[0])
smallest_dim = tensor_dims[0][0] - num_wrapping_dims
if smallest_dim == 2:
token_tensor = tensor_dims[0][1]
return (token_tensor != 0).long()
elif smallest_dim == 3:
character_tensor = tensor_dims[0][1]
return ((character_tensor > 0).long().sum(dim=-1) > 0).long()
else:
raise ValueError("Expected a tensor with dimension 2 or 3, found {}".format(smallest_dim))
def last_dim_softmax(tensor: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
warnings.warn("``last_dim_softmax`` was deprecated in favor of just using ``masked_softmax`` "
"in version 0.6.1. It will be removed in version 0.8.", DeprecationWarning)
return masked_softmax(tensor, mask, dim=-1)
def last_dim_log_softmax(tensor: torch.Tensor, mask: Optional[torch.Tensor] = None) -> torch.Tensor:
warnings.warn("``last_dim_log_softmax`` was deprecated in favor of just using "
"``masked_log_softmax`` in version 0.6.1. It will be removed in version 0.8.",
DeprecationWarning)
return masked_log_softmax(tensor, mask, dim=-1)
def weighted_sum(matrix: torch.Tensor, attention: torch.Tensor) -> torch.Tensor:
# operations in pytorch that already do the computation we need.
if attention.dim() == 2 and matrix.dim() == 3:
return attention.unsqueeze(1).bmm(matrix).squeeze(1)
if attention.dim() == 3 and matrix.dim() == 3:
return attention.bmm(matrix)
if matrix.dim() - 1 < attention.dim():
expanded_size = list(matrix.size())
for i in range(attention.dim() - matrix.dim() + 1):
matrix = matrix.unsqueeze(1)
expanded_size.insert(i + 1, attention.size(i + 1))
matrix = matrix.expand(*expanded_size)
intermediate = attention.unsqueeze(-1).expand_as(matrix) * matrix
return intermediate.sum(dim=-2)
def sequence_cross_entropy_with_logits(logits: torch.FloatTensor,
targets: torch.LongTensor,
weights: torch.FloatTensor,
batch_average: bool = None,
average: str = "batch",
label_smoothing: float = None) -> torch.FloatTensor:
if batch_average is not None:
# Maintain old behavior
if batch_average:
warnings.warn("batch_average=True was deprecated and replaced "
"with average='batch' in version 0.6.2. It will be "
"removed in version 0.8.", DeprecationWarning)
average = "batch"
else:
warnings.warn("batch_average=False was deprecated and replaced "
"with average=None in version 0.6.2. It will be "
"removed in version 0.8.", DeprecationWarning)
average = None
if average not in {None, "token", "batch"}:
raise ValueError("Got average f{average}, expected one of "
"None, 'token', or 'batch'")
# shape : (batch * sequence_length, num_classes)
logits_flat = logits.view(-1, logits.size(-1))
# shape : (batch * sequence_length, num_classes)
log_probs_flat = torch.nn.functional.log_softmax(logits_flat, dim=-1)
# shape : (batch * max_len, 1)
targets_flat = targets.view(-1, 1).long()
if label_smoothing is not None and label_smoothing > 0.0:
num_classes = logits.size(-1)
smoothing_value = label_smoothing / num_classes
# Fill all the correct indices with 1 - smoothing value.
one_hot_targets = torch.zeros_like(log_probs_flat).scatter_(-1, targets_flat, 1.0 - label_smoothing)
smoothed_targets = one_hot_targets + smoothing_value
negative_log_likelihood_flat = - log_probs_flat * smoothed_targets
negative_log_likelihood_flat = negative_log_likelihood_flat.sum(-1, keepdim=True)
else:
# Contribution to the negative log likelihood only comes from the exact indices
# of the targets, as the target distributions are one-hot. Here we use torch.gather
# to extract the indices of the num_classes dimension which contribute to the loss.
# shape : (batch * sequence_length, 1)
negative_log_likelihood_flat = - torch.gather(log_probs_flat, dim=1, index=targets_flat)
# shape : (batch, sequence_length)
negative_log_likelihood = negative_log_likelihood_flat.view(*targets.size())
# shape : (batch, sequence_length)
negative_log_likelihood = negative_log_likelihood * weights.float()
if average == "batch":
# shape : (batch_size,)
per_batch_loss = negative_log_likelihood.sum(1) / (weights.sum(1).float() + 1e-13)
num_non_empty_sequences = ((weights.sum(1) > 0).float().sum() + 1e-13)
return per_batch_loss.sum() / num_non_empty_sequences
elif average == "token":
return negative_log_likelihood.sum() / (weights.sum().float() + 1e-13)
else:
# shape : (batch_size,)
per_batch_loss = negative_log_likelihood.sum(1) / (weights.sum(1).float() + 1e-13)
return per_batch_loss
def replace_masked_values(tensor: torch.Tensor, mask: torch.Tensor, replace_with: float) -> torch.Tensor:
if tensor.dim() != mask.dim():
raise ConfigurationError("tensor.dim() (%d) != mask.dim() (%d)" % (tensor.dim(), mask.dim()))
return tensor.masked_fill((1 - mask).byte(), replace_with)
def tensors_equal(tensor1: torch.Tensor, tensor2: torch.Tensor, tolerance: float = 1e-12) -> bool:
# pylint: disable=too-many-return-statements
if isinstance(tensor1, (list, tuple)):
if not isinstance(tensor2, (list, tuple)) or len(tensor1) != len(tensor2):
return False
return all([tensors_equal(t1, t2, tolerance) for t1, t2 in zip(tensor1, tensor2)])
elif isinstance(tensor1, dict):
if not isinstance(tensor2, dict):
return False
if tensor1.keys() != tensor2.keys():
return False
return all([tensors_equal(tensor1[key], tensor2[key], tolerance) for key in tensor1])
elif isinstance(tensor1, torch.Tensor):
if not isinstance(tensor2, torch.Tensor):
return False
if tensor1.size() != tensor2.size():
return False
return ((tensor1 - tensor2).abs().float() < tolerance).all()
else:
try:
return tensor1 == tensor2
except RuntimeError:
print(type(tensor1), type(tensor2))
raise
def device_mapping(cuda_device: int):
def inner_device_mapping(storage: torch.Storage, location) -> torch.Storage: # pylint: disable=unused-argument
if cuda_device >= 0:
return storage.cuda(cuda_device)
else:
return storage
return inner_device_mapping
def combine_tensors(combination: str, tensors: List[torch.Tensor]) -> torch.Tensor:
if len(tensors) > 9:
raise ConfigurationError("Double-digit tensor lists not currently supported")
combination = combination.replace('x', '1').replace('y', '2')
to_concatenate = [_get_combination(piece, tensors) for piece in combination.split(',')]
return torch.cat(to_concatenate, dim=-1)
def _rindex(sequence: Sequence[T], obj: T) -> int:
for i in range(len(sequence) - 1, -1, -1):
if sequence[i] == obj:
return i
raise ValueError(f"Unable to find {obj} in sequence {sequence}.")
def _get_combination(combination: str, tensors: List[torch.Tensor]) -> torch.Tensor:
if combination.isdigit():
index = int(combination) - 1
return tensors[index]
else:
if len(combination) != 3:
raise ConfigurationError("Invalid combination: " + combination)
first_tensor = _get_combination(combination[0], tensors)
second_tensor = _get_combination(combination[2], tensors)
operation = combination[1]
if operation == '*':
return first_tensor * second_tensor
elif operation == '/':
return first_tensor / second_tensor
elif operation == '+':
return first_tensor + second_tensor
elif operation == '-':
return first_tensor - second_tensor
else:
raise ConfigurationError("Invalid operation: " + operation)
def combine_tensors_and_multiply(combination: str,
tensors: List[torch.Tensor],
weights: torch.nn.Parameter) -> torch.Tensor:
if len(tensors) > 9:
raise ConfigurationError("Double-digit tensor lists not currently supported")
combination = combination.replace('x', '1').replace('y', '2')
pieces = combination.split(',')
tensor_dims = [tensor.size(-1) for tensor in tensors]
combination_dims = [_get_combination_dim(piece, tensor_dims) for piece in pieces]
dims_so_far = 0
to_sum = []
for piece, combination_dim in zip(pieces, combination_dims):
weight = weights[dims_so_far:(dims_so_far + combination_dim)]
dims_so_far += combination_dim
to_sum.append(_get_combination_and_multiply(piece, tensors, weight))
result = to_sum[0]
for result_piece in to_sum[1:]:
result = result + result_piece
return result
def _get_combination_and_multiply(combination: str,
tensors: List[torch.Tensor],
weight: torch.nn.Parameter) -> torch.Tensor:
if combination.isdigit():
index = int(combination) - 1
return torch.matmul(tensors[index], weight)
else:
if len(combination) != 3:
raise ConfigurationError("Invalid combination: " + combination)
first_tensor = _get_combination(combination[0], tensors)
second_tensor = _get_combination(combination[2], tensors)
operation = combination[1]
if operation == '*':
if first_tensor.dim() > 4 or second_tensor.dim() > 4:
raise ValueError("Tensors with dim > 4 not currently supported")
if first_tensor.dim() == 4:
expanded_dim = _rindex(first_tensor.size(), 1)
first_tensor = first_tensor.squeeze(expanded_dim)
if second_tensor.dim() == 4:
expanded_dim = _rindex(second_tensor.size(), 1)
second_tensor = second_tensor.squeeze(expanded_dim)
intermediate = first_tensor * weight
return torch.matmul(intermediate, second_tensor.transpose(-1, -2)).squeeze(-1)
elif operation == '/':
if first_tensor.dim() > 4 or second_tensor.dim() > 4:
raise ValueError("Tensors with dim > 4 not currently supported")
if first_tensor.dim() == 4:
expanded_dim = _rindex(first_tensor.size(), 1)
first_tensor = first_tensor.squeeze(expanded_dim)
if second_tensor.dim() == 4:
expanded_dim = _rindex(second_tensor.size(), 1)
second_tensor = second_tensor.squeeze(expanded_dim)
intermediate = first_tensor * weight
return torch.matmul(intermediate, second_tensor.pow(-1).transpose(-1, -2)).squeeze(-1)
elif operation == '+':
return torch.matmul(first_tensor, weight) + torch.matmul(second_tensor, weight)
elif operation == '-':
return torch.matmul(first_tensor, weight) - torch.matmul(second_tensor, weight)
else:
raise ConfigurationError("Invalid operation: " + operation)
def get_combined_dim(combination: str, tensor_dims: List[int]) -> int:
if len(tensor_dims) > 9:
raise ConfigurationError("Double-digit tensor lists not currently supported")
combination = combination.replace('x', '1').replace('y', '2')
return sum([_get_combination_dim(piece, tensor_dims) for piece in combination.split(',')])
def _get_combination_dim(combination: str, tensor_dims: List[int]) -> int:
if combination.isdigit():
index = int(combination) - 1
return tensor_dims[index]
else:
if len(combination) != 3:
raise ConfigurationError("Invalid combination: " + combination)
first_tensor_dim = _get_combination_dim(combination[0], tensor_dims)
second_tensor_dim = _get_combination_dim(combination[2], tensor_dims)
operation = combination[1]
if first_tensor_dim != second_tensor_dim:
raise ConfigurationError("Tensor dims must match for operation \"{}\"".format(operation))
return first_tensor_dim
def logsumexp(tensor: torch.Tensor,
dim: int = -1,
keepdim: bool = False) -> torch.Tensor:
max_score, _ = tensor.max(dim, keepdim=keepdim)
if keepdim:
stable_vec = tensor - max_score
else:
stable_vec = tensor - max_score.unsqueeze(dim)
return max_score + (stable_vec.exp().sum(dim, keepdim=keepdim)).log()
def get_device_of(tensor: torch.Tensor) -> int:
if not tensor.is_cuda:
return -1
else:
return tensor.get_device()
def flatten_and_batch_shift_indices(indices: torch.Tensor,
sequence_length: int) -> torch.Tensor:
# Shape: (batch_size)
offsets = get_range_vector(indices.size(0), get_device_of(indices)) * sequence_length
for _ in range(len(indices.size()) - 1):
offsets = offsets.unsqueeze(1)
# Shape: (batch_size, d_1, ..., d_n)
offset_indices = indices + offsets
# Shape: (batch_size * d_1 * ... * d_n)
offset_indices = offset_indices.view(-1)
return offset_indices
def batched_index_select(target: torch.Tensor,
indices: torch.LongTensor,
flattened_indices: Optional[torch.LongTensor] = None) -> torch.Tensor:
if flattened_indices is None:
# Shape: (batch_size * d_1 * ... * d_n)
flattened_indices = flatten_and_batch_shift_indices(indices, target.size(1))
# Shape: (batch_size * sequence_length, embedding_size)
flattened_target = target.view(-1, target.size(-1))
# Shape: (batch_size * d_1 * ... * d_n, embedding_size)
flattened_selected = flattened_target.index_select(0, flattened_indices)
selected_shape = list(indices.size()) + [target.size(-1)]
# Shape: (batch_size, d_1, ..., d_n, embedding_size)
selected_targets = flattened_selected.view(*selected_shape)
return selected_targets
def flattened_index_select(target: torch.Tensor,
indices: torch.LongTensor) -> torch.Tensor:
if indices.dim() != 2:
raise ConfigurationError("Indices passed to flattened_index_select had shape {} but "
"only 2 dimensional inputs are supported.".format(indices.size()))
# Shape: (batch_size, set_size * subset_size, embedding_size)
flattened_selected = target.index_select(1, indices.view(-1))
# Shape: (batch_size, set_size, subset_size, embedding_size)
selected = flattened_selected.view(target.size(0), indices.size(0), indices.size(1), -1)
return selected
def get_range_vector(size: int, device: int) -> torch.Tensor:
if device > -1:
return torch.cuda.LongTensor(size, device=device).fill_(1).cumsum(0) - 1
else:
return torch.arange(0, size, dtype=torch.long)
def bucket_values(distances: torch.Tensor,
num_identity_buckets: int = 4,
num_total_buckets: int = 10) -> torch.Tensor:
# Chunk the values into semi-logscale buckets using .floor().
# This is a semi-logscale bucketing because we divide by log(2) after taking the log.
# We do this to make the buckets more granular in the initial range, where we expect
# most values to fall. We then add (num_identity_buckets - 1) because we want these indices
# to start _after_ the fixed number of buckets which we specified would only hold single values.
logspace_index = (distances.float().log() / math.log(2)).floor().long() + (num_identity_buckets - 1)
# create a mask for values which will go into single number buckets (i.e not a range).
use_identity_mask = (distances <= num_identity_buckets).long()
use_buckets_mask = 1 + (-1 * use_identity_mask)
# Use the original values if they are less than num_identity_buckets, otherwise
# use the logspace indices.
combined_index = use_identity_mask * distances + use_buckets_mask * logspace_index
# Clamp to put anything > num_total_buckets into the final bucket.
return combined_index.clamp(0, num_total_buckets - 1)
def add_sentence_boundary_token_ids(tensor: torch.Tensor,
mask: torch.Tensor,
sentence_begin_token: Any,
sentence_end_token: Any) -> Tuple[torch.Tensor, torch.Tensor]:
# TODO: matthewp, profile this transfer
sequence_lengths = mask.sum(dim=1).detach().cpu().numpy()
tensor_shape = list(tensor.data.shape)
new_shape = list(tensor_shape)
new_shape[1] = tensor_shape[1] + 2
tensor_with_boundary_tokens = tensor.new_zeros(*new_shape)
if len(tensor_shape) == 2:
tensor_with_boundary_tokens[:, 1:-1] = tensor
tensor_with_boundary_tokens[:, 0] = sentence_begin_token
for i, j in enumerate(sequence_lengths):
tensor_with_boundary_tokens[i, j + 1] = sentence_end_token
new_mask = (tensor_with_boundary_tokens != 0).long()
elif len(tensor_shape) == 3:
tensor_with_boundary_tokens[:, 1:-1, :] = tensor
for i, j in enumerate(sequence_lengths):
tensor_with_boundary_tokens[i, 0, :] = sentence_begin_token
tensor_with_boundary_tokens[i, j + 1, :] = sentence_end_token
new_mask = ((tensor_with_boundary_tokens > 0).long().sum(dim=-1) > 0).long()
else:
raise ValueError("add_sentence_boundary_token_ids only accepts 2D and 3D input")
return tensor_with_boundary_tokens, new_mask
def remove_sentence_boundaries(tensor: torch.Tensor,
mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
# TODO: matthewp, profile this transfer
sequence_lengths = mask.sum(dim=1).detach().cpu().numpy()
tensor_shape = list(tensor.data.shape)
new_shape = list(tensor_shape)
new_shape[1] = tensor_shape[1] - 2
tensor_without_boundary_tokens = tensor.new_zeros(*new_shape)
new_mask = tensor.new_zeros((new_shape[0], new_shape[1]), dtype=torch.long)
for i, j in enumerate(sequence_lengths):
if j > 2:
tensor_without_boundary_tokens[i, :(j - 2), :] = tensor[i, 1:(j - 1), :]
new_mask[i, :(j - 2)] = 1
return tensor_without_boundary_tokens, new_mask
def add_positional_features(tensor: torch.Tensor,
min_timescale: float = 1.0,
max_timescale: float = 1.0e4):
# pylint: disable=line-too-long
_, timesteps, hidden_dim = tensor.size()
timestep_range = get_range_vector(timesteps, get_device_of(tensor)).data.float()
# We're generating both cos and sin frequencies,
num_timescales = hidden_dim // 2
timescale_range = get_range_vector(num_timescales, get_device_of(tensor)).data.float()
log_timescale_increments = math.log(float(max_timescale) / float(min_timescale)) / float(num_timescales - 1)
inverse_timescales = min_timescale * torch.exp(timescale_range * -log_timescale_increments)
scaled_time = timestep_range.unsqueeze(1) * inverse_timescales.unsqueeze(0)
sinusoids = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 1)
if hidden_dim % 2 != 0:
sinusoids = torch.cat([sinusoids, sinusoids.new_zeros(timesteps, 1)], 1)
return tensor + sinusoids.unsqueeze(0)
| true | true |
f727e5f78b42c3a1e0eaf2ad37a23824a5767c53 | 828 | py | Python | build_scripts/generate_rc_build_number.py | MATTHEWFRAZER/trochilidae | 35e907ba9dcb1f283f79f4f32d61db6b53a1ca97 | [
"MIT"
] | null | null | null | build_scripts/generate_rc_build_number.py | MATTHEWFRAZER/trochilidae | 35e907ba9dcb1f283f79f4f32d61db6b53a1ca97 | [
"MIT"
] | null | null | null | build_scripts/generate_rc_build_number.py | MATTHEWFRAZER/trochilidae | 35e907ba9dcb1f283f79f4f32d61db6b53a1ca97 | [
"MIT"
] | 1 | 2021-11-12T18:49:15.000Z | 2021-11-12T18:49:15.000Z | #!/usr/bin/python
import subprocess
try:
from subprocess import DEVNULL # py3k
except ImportError:
import os
DEVNULL = open(os.devnull, 'wb')
version_path = "version.txt"
build_type = "rc"
with open(version_path, "r") as f:
version = f.readline()
build_number = 0
tag = "{0}-{1}.{2}".format(version, build_type, build_number)
# TODO: revisit
# while ideally, I would like separation between one liners and scripts in our build process
# this was the simplest way I could think of to implement this
# because it is done this way, it forces us to pay attention to the order in which we call into this script
while subprocess.call(["git", "rev-parse", "--verify", tag], stdout=DEVNULL, stderr=DEVNULL) == 0:
build_number += 1
tag = "{0}-{1}.{2}".format(version, build_type, build_number)
print(tag) | 28.551724 | 107 | 0.704106 |
import subprocess
try:
from subprocess import DEVNULL
except ImportError:
import os
DEVNULL = open(os.devnull, 'wb')
version_path = "version.txt"
build_type = "rc"
with open(version_path, "r") as f:
version = f.readline()
build_number = 0
tag = "{0}-{1}.{2}".format(version, build_type, build_number)
while subprocess.call(["git", "rev-parse", "--verify", tag], stdout=DEVNULL, stderr=DEVNULL) == 0:
build_number += 1
tag = "{0}-{1}.{2}".format(version, build_type, build_number)
print(tag) | true | true |
f727e839bc9f60bee5264f963b12f7ba8265d8ef | 6,935 | py | Python | dsmr_parser/parsers.py | Smeedy/dsmr_parser | 1ab4cb4b11eec41c559a33d73e70c211216854d1 | [
"MIT"
] | null | null | null | dsmr_parser/parsers.py | Smeedy/dsmr_parser | 1ab4cb4b11eec41c559a33d73e70c211216854d1 | [
"MIT"
] | null | null | null | dsmr_parser/parsers.py | Smeedy/dsmr_parser | 1ab4cb4b11eec41c559a33d73e70c211216854d1 | [
"MIT"
] | null | null | null | import logging
import re
from PyCRC.CRC16 import CRC16
from dsmr_parser.objects import MBusObject, CosemObject
from dsmr_parser.exceptions import ParseContentError, InvalidChecksumError, NoChecksumError
logger = logging.getLogger(__name__)
class TelegramParser(object):
def __init__(self, telegram_specification, apply_checksum_validation=True):
"""
:param telegram_specification: determines how the telegram is parsed
:param apply_checksum_validation: validate checksum if applicable for
telegram DSMR version (v4 and up).
:type telegram_specification: dict
"""
self.telegram_specification = telegram_specification
self.apply_checksum_validation = apply_checksum_validation
def parse(self, telegram_data):
"""
Parse telegram from string to dict.
The telegram str type makes python 2.x integration easier.
:param str telegram_data: full telegram from start ('/') to checksum
('!ABCD') including line endings in between the telegram's lines
:rtype: dict
:returns: Shortened example:
{
..
r'\d-\d:96\.1\.1.+?\r\n': <CosemObject>, # EQUIPMENT_IDENTIFIER
r'\d-\d:1\.8\.1.+?\r\n': <CosemObject>, # ELECTRICITY_USED_TARIFF_1
r'\d-\d:24\.3\.0.+?\r\n.+?\r\n': <MBusObject>, # GAS_METER_READING
..
}
:raises ParseError:
:raises InvalidChecksumError:
"""
if self.apply_checksum_validation \
and self.telegram_specification['checksum_support']:
self.validate_checksum(telegram_data)
telegram = {}
for signature, parser in self.telegram_specification['objects'].items():
match = re.search(signature, telegram_data, re.DOTALL)
# Some signatures are optional and may not be present,
# so only parse lines that match
if match:
telegram[signature] = parser.parse(match.group(0))
return telegram
@staticmethod
def validate_checksum(telegram):
"""
:param str telegram:
:raises ParseError:
:raises InvalidChecksumError:
"""
# Extract the part for which the checksum applies.
checksum_contents = re.search(r'\/.+\!', telegram, re.DOTALL)
# Extract the hexadecimal checksum value itself.
# The line ending '\r\n' for the checksum line can be ignored.
checksum_hex = re.search(r'((?<=\!)[0-9A-Z]{4})+', telegram)
if not checksum_contents:
raise ParseContentError(
'Failed to perform CRC validation because the telegram is '
'incomplete: The content value is missing.'
)
elif checksum_contents and not checksum_hex:
raise NoChecksumError(
'Failed to perform CRC validation because the telegram is '
'incomplete: The CRC is missing.'
)
calculated_crc = CRC16().calculate(checksum_contents.group(0))
expected_crc = int(checksum_hex.group(0), base=16)
if calculated_crc != expected_crc:
raise InvalidChecksumError(
"Invalid telegram. The CRC checksum '{}' does not match the "
"expected '{}'".format(
calculated_crc,
expected_crc
)
)
class DSMRObjectParser(object):
"""
Parses an object (can also be see as a 'line') from a telegram.
"""
def __init__(self, *value_formats):
self.value_formats = value_formats
def _parse(self, line):
# Match value groups, but exclude the parentheses
pattern = re.compile(r'((?<=\()[0-9a-zA-Z\.\*]{0,}(?=\)))+')
values = re.findall(pattern, line)
# Convert empty value groups to None for clarity.
values = [None if value == '' else value for value in values]
if not values or len(values) != len(self.value_formats):
raise ParseError("Invalid '%s' line for '%s'", line, self)
return [self.value_formats[i].parse(value)
for i, value in enumerate(values)]
class MBusParser(DSMRObjectParser):
"""
Gas meter value parser.
These are lines with a timestamp and gas meter value.
Line format:
'ID (TST) (Mv1*U1)'
1 2 3 4
1) OBIS Reduced ID-code
2) Time Stamp (TST) of capture time of measurement value
3) Measurement value 1 (most recent entry of buffer attribute without unit)
4) Unit of measurement values (Unit of capture objects attribute)
"""
def parse(self, line):
return MBusObject(self._parse(line))
class CosemParser(DSMRObjectParser):
"""
Cosem object parser.
These are data objects with a single value that optionally have a unit of
measurement.
Line format:
ID (Mv*U)
1 23 45
1) OBIS Reduced ID-code
2) Separator "(", ASCII 28h
3) COSEM object attribute value
4) Unit of measurement values (Unit of capture objects attribute) - only if
applicable
5) Separator ")", ASCII 29h
"""
def parse(self, line):
return CosemObject(self._parse(line))
class ProfileGenericParser(DSMRObjectParser):
"""
Power failure log parser.
These are data objects with multiple repeating groups of values.
Line format:
ID (z) (ID1) (TST) (Bv1*U1) (TST) (Bvz*Uz)
1 2 3 4 5 6 7 8 9
1) OBIS Reduced ID-code
2) Number of values z (max 10).
3) Identifications of buffer values (OBIS Reduced ID codes of capture objects attribute)
4) Time Stamp (TST) of power failure end time
5) Buffer value 1 (most recent entry of buffer attribute without unit)
6) Unit of buffer values (Unit of capture objects attribute)
7) Time Stamp (TST) of power failure end time
8) Buffer value 2 (oldest entry of buffer attribute without unit)
9) Unit of buffer values (Unit of capture objects attribute)
"""
def parse(self, line):
raise NotImplementedError()
class ValueParser(object):
"""
Parses a single value from DSMRObject's.
Example with coerce_type being int:
(002*A) becomes {'value': 1, 'unit': 'A'}
Example with coerce_type being str:
(42) becomes {'value': '42', 'unit': None}
"""
def __init__(self, coerce_type):
self.coerce_type = coerce_type
def parse(self, value):
unit_of_measurement = None
if value and '*' in value:
value, unit_of_measurement = value.split('*')
# A value group is not required to have a value, and then coercing does
# not apply.
value = self.coerce_type(value) if value is not None else value
return {
'value': value,
'unit': unit_of_measurement
}
| 31.098655 | 92 | 0.617159 | import logging
import re
from PyCRC.CRC16 import CRC16
from dsmr_parser.objects import MBusObject, CosemObject
from dsmr_parser.exceptions import ParseContentError, InvalidChecksumError, NoChecksumError
logger = logging.getLogger(__name__)
class TelegramParser(object):
def __init__(self, telegram_specification, apply_checksum_validation=True):
self.telegram_specification = telegram_specification
self.apply_checksum_validation = apply_checksum_validation
def parse(self, telegram_data):
if self.apply_checksum_validation \
and self.telegram_specification['checksum_support']:
self.validate_checksum(telegram_data)
telegram = {}
for signature, parser in self.telegram_specification['objects'].items():
match = re.search(signature, telegram_data, re.DOTALL)
if match:
telegram[signature] = parser.parse(match.group(0))
return telegram
@staticmethod
def validate_checksum(telegram):
checksum_contents = re.search(r'\/.+\!', telegram, re.DOTALL)
checksum_hex = re.search(r'((?<=\!)[0-9A-Z]{4})+', telegram)
if not checksum_contents:
raise ParseContentError(
'Failed to perform CRC validation because the telegram is '
'incomplete: The content value is missing.'
)
elif checksum_contents and not checksum_hex:
raise NoChecksumError(
'Failed to perform CRC validation because the telegram is '
'incomplete: The CRC is missing.'
)
calculated_crc = CRC16().calculate(checksum_contents.group(0))
expected_crc = int(checksum_hex.group(0), base=16)
if calculated_crc != expected_crc:
raise InvalidChecksumError(
"Invalid telegram. The CRC checksum '{}' does not match the "
"expected '{}'".format(
calculated_crc,
expected_crc
)
)
class DSMRObjectParser(object):
def __init__(self, *value_formats):
self.value_formats = value_formats
def _parse(self, line):
pattern = re.compile(r'((?<=\()[0-9a-zA-Z\.\*]{0,}(?=\)))+')
values = re.findall(pattern, line)
values = [None if value == '' else value for value in values]
if not values or len(values) != len(self.value_formats):
raise ParseError("Invalid '%s' line for '%s'", line, self)
return [self.value_formats[i].parse(value)
for i, value in enumerate(values)]
class MBusParser(DSMRObjectParser):
def parse(self, line):
return MBusObject(self._parse(line))
class CosemParser(DSMRObjectParser):
def parse(self, line):
return CosemObject(self._parse(line))
class ProfileGenericParser(DSMRObjectParser):
def parse(self, line):
raise NotImplementedError()
class ValueParser(object):
def __init__(self, coerce_type):
self.coerce_type = coerce_type
def parse(self, value):
unit_of_measurement = None
if value and '*' in value:
value, unit_of_measurement = value.split('*')
value = self.coerce_type(value) if value is not None else value
return {
'value': value,
'unit': unit_of_measurement
}
| true | true |
f727e89ef20c8fb9be92012ad38858b527ba3813 | 579 | py | Python | src/bot/discord_ext.py | MycroftKang/mulgyeol-mkbot | 77bcfc5c93e02dbc983d2e6a137ddf835d450c29 | [
"MIT"
] | null | null | null | src/bot/discord_ext.py | MycroftKang/mulgyeol-mkbot | 77bcfc5c93e02dbc983d2e6a137ddf835d450c29 | [
"MIT"
] | null | null | null | src/bot/discord_ext.py | MycroftKang/mulgyeol-mkbot | 77bcfc5c93e02dbc983d2e6a137ddf835d450c29 | [
"MIT"
] | null | null | null | discord_extensions = (
"core.controllers.discord.delete",
"core.controllers.discord.join",
"core.controllers.discord.leave",
"core.controllers.discord.logout",
"core.controllers.discord.ping",
"core.controllers.discord.tts",
"core.controllers.discord.poll",
"core.controllers.discord.roulette",
"core.controllers.discord.translate",
"core.controllers.discord.music",
"core.controllers.discord.highlighter",
"core.controllers.discord.tic_tac_toe",
"core.controllers.discord.feedback",
"core.controllers.discord.timezone",
)
| 34.058824 | 43 | 0.721934 | discord_extensions = (
"core.controllers.discord.delete",
"core.controllers.discord.join",
"core.controllers.discord.leave",
"core.controllers.discord.logout",
"core.controllers.discord.ping",
"core.controllers.discord.tts",
"core.controllers.discord.poll",
"core.controllers.discord.roulette",
"core.controllers.discord.translate",
"core.controllers.discord.music",
"core.controllers.discord.highlighter",
"core.controllers.discord.tic_tac_toe",
"core.controllers.discord.feedback",
"core.controllers.discord.timezone",
)
| true | true |
f727e976fa0a94180dd9383f9dc5be94b4aa493a | 8,537 | py | Python | train.py | DylanHooz/uestc_yolov3 | 72ed60aaf68a0ab2dbc8d4dfad7bddffce826dde | [
"MIT"
] | null | null | null | train.py | DylanHooz/uestc_yolov3 | 72ed60aaf68a0ab2dbc8d4dfad7bddffce826dde | [
"MIT"
] | null | null | null | train.py | DylanHooz/uestc_yolov3 | 72ed60aaf68a0ab2dbc8d4dfad7bddffce826dde | [
"MIT"
] | null | null | null | """
Retrain the YOLO model for your own dataset.
"""
import numpy as np
import keras.backend as K
from keras.layers import Input, Lambda
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss
from yolo3.utils import get_random_data
def _main():
annotation_path = '2007_trainval.txt'
log_dir = 'logs/000/'
classes_path = 'model_data/helmet_classes.txt'
anchors_path = 'model_data/helmet_anchors.txt'
class_names = get_classes(classes_path)
num_classes = len(class_names)
anchors = get_anchors(anchors_path)
input_shape = (416,416) # multiple of 32, hw
is_tiny_version = len(anchors)==6 # default setting
if is_tiny_version:
model = create_tiny_model(input_shape, anchors, num_classes,
freeze_body=2, weights_path='model_data/tiny_yolo_weights.h5')
else:
model = create_model(input_shape, anchors, num_classes,
freeze_body=2, weights_path='model_data/yolo_weights.h5') # make sure you know what you freeze
logging = TensorBoard(log_dir=log_dir)
checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
val_split = 0.1
with open(annotation_path) as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
num_val = int(len(lines)*val_split)
num_train = len(lines) - num_val
# Train with frozen layers first, to get a stable loss.
# Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
if True:
model.compile(optimizer=Adam(lr=1e-3), loss={
# use custom yolo_loss Lambda layer.
'yolo_loss': lambda y_true, y_pred: y_pred})
batch_size = 32
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps=max(1, num_val//batch_size),
epochs=50,
initial_epoch=0,
callbacks=[logging, checkpoint])
model.save_weights(log_dir + 'trained_weights_stage_1.h5')
# Unfreeze and continue training, to fine-tune.
# Train longer if the result is not good.
if True:
for i in range(len(model.layers)):
model.layers[i].trainable = True
model.compile(optimizer=Adam(lr=1e-4), loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change
print('Unfreeze all of the layers.')
batch_size = 16 # note that more GPU memory is required after unfreezing the body
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps=max(1, num_val//batch_size),
epochs=100,
initial_epoch=50,
callbacks=[logging, checkpoint, reduce_lr, early_stopping])
model.save_weights(log_dir + 'trained_weights_final.h5')
# Further training if needed.
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='model_data/yolo_weights.h5'):
'''create the training model'''
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \
num_anchors//3, num_classes+5)) for l in range(3)]
model_body = yolo_body(image_input, num_anchors//3, num_classes)
print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
# Freeze darknet53 body or freeze all but 3 output layers.
num = (185, len(model_body.layers)-3)[freeze_body-1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
def create_tiny_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='model_data/tiny_yolo_weights.h5'):
'''create the training model, for Tiny YOLOv3'''
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16}[l], w//{0:32, 1:16}[l], \
num_anchors//2, num_classes+5)) for l in range(2)]
model_body = tiny_yolo_body(image_input, num_anchors//2, num_classes)
print('Create Tiny YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
# Freeze the darknet body or freeze all but 2 output layers.
num = (20, len(model_body.layers)-2)[freeze_body-1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes):
'''data generator for fit_generator'''
n = len(annotation_lines)
i = 0
while True:
image_data = []
box_data = []
for b in range(batch_size):
if i==0:
np.random.shuffle(annotation_lines)
image, box = get_random_data(annotation_lines[i], input_shape, random=True)
image_data.append(image)
box_data.append(box)
i = (i+1) % n
image_data = np.array(image_data)
box_data = np.array(box_data)
y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
yield [image_data, *y_true], np.zeros(batch_size)
def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes):
n = len(annotation_lines)
if n==0 or batch_size<=0: return None
return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes)
if __name__ == '__main__':
_main()
| 44.463542 | 130 | 0.660536 |
import numpy as np
import keras.backend as K
from keras.layers import Input, Lambda
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss
from yolo3.utils import get_random_data
def _main():
annotation_path = '2007_trainval.txt'
log_dir = 'logs/000/'
classes_path = 'model_data/helmet_classes.txt'
anchors_path = 'model_data/helmet_anchors.txt'
class_names = get_classes(classes_path)
num_classes = len(class_names)
anchors = get_anchors(anchors_path)
input_shape = (416,416)
is_tiny_version = len(anchors)==6
if is_tiny_version:
model = create_tiny_model(input_shape, anchors, num_classes,
freeze_body=2, weights_path='model_data/tiny_yolo_weights.h5')
else:
model = create_model(input_shape, anchors, num_classes,
freeze_body=2, weights_path='model_data/yolo_weights.h5')
logging = TensorBoard(log_dir=log_dir)
checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
val_split = 0.1
with open(annotation_path) as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
num_val = int(len(lines)*val_split)
num_train = len(lines) - num_val
if True:
model.compile(optimizer=Adam(lr=1e-3), loss={
'yolo_loss': lambda y_true, y_pred: y_pred})
batch_size = 32
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps=max(1, num_val//batch_size),
epochs=50,
initial_epoch=0,
callbacks=[logging, checkpoint])
model.save_weights(log_dir + 'trained_weights_stage_1.h5')
if True:
for i in range(len(model.layers)):
model.layers[i].trainable = True
model.compile(optimizer=Adam(lr=1e-4), loss={'yolo_loss': lambda y_true, y_pred: y_pred})
print('Unfreeze all of the layers.')
batch_size = 16
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps=max(1, num_val//batch_size),
epochs=100,
initial_epoch=50,
callbacks=[logging, checkpoint, reduce_lr, early_stopping])
model.save_weights(log_dir + 'trained_weights_final.h5')
def get_classes(classes_path):
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='model_data/yolo_weights.h5'):
K.clear_session()
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \
num_anchors//3, num_classes+5)) for l in range(3)]
model_body = yolo_body(image_input, num_anchors//3, num_classes)
print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
num = (185, len(model_body.layers)-3)[freeze_body-1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
def create_tiny_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='model_data/tiny_yolo_weights.h5'):
K.clear_session()
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16}[l], w//{0:32, 1:16}[l], \
num_anchors//2, num_classes+5)) for l in range(2)]
model_body = tiny_yolo_body(image_input, num_anchors//2, num_classes)
print('Create Tiny YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
num = (20, len(model_body.layers)-2)[freeze_body-1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes):
n = len(annotation_lines)
i = 0
while True:
image_data = []
box_data = []
for b in range(batch_size):
if i==0:
np.random.shuffle(annotation_lines)
image, box = get_random_data(annotation_lines[i], input_shape, random=True)
image_data.append(image)
box_data.append(box)
i = (i+1) % n
image_data = np.array(image_data)
box_data = np.array(box_data)
y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
yield [image_data, *y_true], np.zeros(batch_size)
def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes):
n = len(annotation_lines)
if n==0 or batch_size<=0: return None
return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes)
if __name__ == '__main__':
_main()
| true | true |
f727ea71d77bc5a71cddd8eb8f7f9da14738cd39 | 165 | py | Python | src/timeatlas/generators/anomaly_generator/__init__.py | fredmontet/timeatlas | 9a439a913ef9a8a1ef9833b42e5fb4e988d7e35e | [
"MIT"
] | 10 | 2020-08-25T09:23:02.000Z | 2021-01-12T14:00:35.000Z | src/timeatlas/generators/anomaly_generator/__init__.py | fredmontet/timeatlas | 9a439a913ef9a8a1ef9833b42e5fb4e988d7e35e | [
"MIT"
] | 140 | 2020-06-30T11:59:47.000Z | 2021-08-23T20:58:43.000Z | src/timeatlas/generators/anomaly_generator/__init__.py | fredmontet/timeatlas | 9a439a913ef9a8a1ef9833b42e5fb4e988d7e35e | [
"MIT"
] | null | null | null | from .anomalies import AnomalyABC
from .labeler import AnomalySetLabeler
from .anomaly_generator import AnomalyGenerator
from .config import AnomalyGeneratorTemplate | 41.25 | 47 | 0.884848 | from .anomalies import AnomalyABC
from .labeler import AnomalySetLabeler
from .anomaly_generator import AnomalyGenerator
from .config import AnomalyGeneratorTemplate | true | true |
f727eacf052868602d35b1841a9c93d769a8fe24 | 4,382 | py | Python | packages/syft/src/syft/core/node/common/action/get_enum_attribute_action.py | jackbandy/PySyft | 0e20e90abab6a7a7ca672d6eedfa1e7f83c4981b | [
"Apache-2.0"
] | null | null | null | packages/syft/src/syft/core/node/common/action/get_enum_attribute_action.py | jackbandy/PySyft | 0e20e90abab6a7a7ca672d6eedfa1e7f83c4981b | [
"Apache-2.0"
] | null | null | null | packages/syft/src/syft/core/node/common/action/get_enum_attribute_action.py | jackbandy/PySyft | 0e20e90abab6a7a7ca672d6eedfa1e7f83c4981b | [
"Apache-2.0"
] | null | null | null | # stdlib
from typing import Dict
from typing import Optional
# third party
from google.protobuf.reflection import GeneratedProtocolMessageType
from nacl.signing import VerifyKey
# syft absolute
import syft as sy
# relative
from ..... import lib
from .....proto.core.node.common.action.get_enum_attribute_pb2 import (
GetEnumAttributeAction as GetEnumAttributeAction_PB,
)
from ....common.serde.serializable import serializable
from ....common.uid import UID
from ....io.address import Address
from ....store.storeable_object import StorableObject
from ...abstract.node import AbstractNode
from .common import ImmediateActionWithoutReply
from .run_class_method_action import RunClassMethodAction
@serializable()
class EnumAttributeAction(ImmediateActionWithoutReply):
def __init__(
self,
path: str,
id_at_location: UID,
address: Address,
msg_id: Optional[UID] = None,
):
super().__init__(address, msg_id=msg_id)
self.id_at_location = id_at_location
self.path = path
def intersect_keys(
self,
left: Dict[VerifyKey, Optional[UID]],
right: Dict[VerifyKey, Optional[UID]],
) -> Dict[VerifyKey, Optional[UID]]:
return RunClassMethodAction.intersect_keys(left, right)
def execute_action(self, node: AbstractNode, verify_key: VerifyKey) -> None:
enum_attribute = node.lib_ast.query(self.path)
result = enum_attribute.solve_get_enum_attribute().value
result = lib.python.primitive_factory.PrimitiveFactory.generate_primitive(
value=result, id=self.id_at_location
)
result = StorableObject(
id=self.id_at_location,
data=result,
)
node.store[self.id_at_location] = result
def _object2proto(self) -> GetEnumAttributeAction_PB:
"""Returns a protobuf serialization of self.
As a requirement of all objects which inherit from Serializable,
this method transforms the current object into the corresponding
Protobuf object so that it can be further serialized.
:return: returns a protobuf object
:rtype: GetOrSetPropertyAction_PB
.. note::
This method is purely an internal method. Please use serialize(object) or one of
the other public serialization methods if you wish to serialize an
object.
"""
return GetEnumAttributeAction_PB(
path=self.path,
id_at_location=sy.serialize(self.id_at_location),
address=sy.serialize(self.address),
msg_id=sy.serialize(self.id),
)
@staticmethod
def _proto2object(
proto: GetEnumAttributeAction_PB,
) -> "EnumAttributeAction":
"""Creates a ObjectWithID from a protobuf
As a requirement of all objects which inherit from Serializable,
this method transforms a protobuf object into an instance of this class.
:return: returns an instance of GetOrSetPropertyAction
:rtype: GetOrSetPropertyAction
.. note::
This method is purely an internal method. Please use syft.deserialize()
if you wish to deserialize an object.
"""
return EnumAttributeAction(
path=proto.path,
id_at_location=sy.deserialize(blob=proto.id_at_location),
address=sy.deserialize(blob=proto.address),
msg_id=sy.deserialize(blob=proto.msg_id),
)
@staticmethod
def get_protobuf_schema() -> GeneratedProtocolMessageType:
"""Return the type of protobuf object which stores a class of this type
As a part of serialization and deserialization, we need the ability to
lookup the protobuf object type directly from the object type. This
static method allows us to do this.
Importantly, this method is also used to create the reverse lookup ability within
the metaclass of Serializable. In the metaclass, it calls this method and then
it takes whatever type is returned from this method and adds an attribute to it
with the type of this class attached to it. See the MetaSerializable class for details.
:return: the type of protobuf object which corresponds to this class.
:rtype: GeneratedProtocolMessageType
"""
return GetEnumAttributeAction_PB
| 38.104348 | 95 | 0.689411 |
from typing import Dict
from typing import Optional
from google.protobuf.reflection import GeneratedProtocolMessageType
from nacl.signing import VerifyKey
import syft as sy
from ..... import lib
from .....proto.core.node.common.action.get_enum_attribute_pb2 import (
GetEnumAttributeAction as GetEnumAttributeAction_PB,
)
from ....common.serde.serializable import serializable
from ....common.uid import UID
from ....io.address import Address
from ....store.storeable_object import StorableObject
from ...abstract.node import AbstractNode
from .common import ImmediateActionWithoutReply
from .run_class_method_action import RunClassMethodAction
@serializable()
class EnumAttributeAction(ImmediateActionWithoutReply):
def __init__(
self,
path: str,
id_at_location: UID,
address: Address,
msg_id: Optional[UID] = None,
):
super().__init__(address, msg_id=msg_id)
self.id_at_location = id_at_location
self.path = path
def intersect_keys(
self,
left: Dict[VerifyKey, Optional[UID]],
right: Dict[VerifyKey, Optional[UID]],
) -> Dict[VerifyKey, Optional[UID]]:
return RunClassMethodAction.intersect_keys(left, right)
def execute_action(self, node: AbstractNode, verify_key: VerifyKey) -> None:
enum_attribute = node.lib_ast.query(self.path)
result = enum_attribute.solve_get_enum_attribute().value
result = lib.python.primitive_factory.PrimitiveFactory.generate_primitive(
value=result, id=self.id_at_location
)
result = StorableObject(
id=self.id_at_location,
data=result,
)
node.store[self.id_at_location] = result
def _object2proto(self) -> GetEnumAttributeAction_PB:
return GetEnumAttributeAction_PB(
path=self.path,
id_at_location=sy.serialize(self.id_at_location),
address=sy.serialize(self.address),
msg_id=sy.serialize(self.id),
)
@staticmethod
def _proto2object(
proto: GetEnumAttributeAction_PB,
) -> "EnumAttributeAction":
return EnumAttributeAction(
path=proto.path,
id_at_location=sy.deserialize(blob=proto.id_at_location),
address=sy.deserialize(blob=proto.address),
msg_id=sy.deserialize(blob=proto.msg_id),
)
@staticmethod
def get_protobuf_schema() -> GeneratedProtocolMessageType:
return GetEnumAttributeAction_PB
| true | true |
f727ec286ffb28b7cbdb0a43ec55f52b2d947849 | 496 | py | Python | package_maker/py2exe/single_file/setup.py | thanhkaist/Qt-Python-Binding-Examples | 25b3313fd03e396014cce0e8f7eec8823b3ebd29 | [
"BSD-3-Clause"
] | 2 | 2019-10-20T05:40:51.000Z | 2019-10-31T17:26:27.000Z | package_maker/py2exe/single_file/setup.py | thanhkaist/Qt-Python-Binding-Examples | 25b3313fd03e396014cce0e8f7eec8823b3ebd29 | [
"BSD-3-Clause"
] | null | null | null | package_maker/py2exe/single_file/setup.py | thanhkaist/Qt-Python-Binding-Examples | 25b3313fd03e396014cce0e8f7eec8823b3ebd29 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import os
import glob
##
from distutils.core import setup
import py2exe
assert py2exe != None
##
osp=os.path
windows = [
{
"script": "btn.py",
"icon_resources": [(1, "gui.ico")],
}
]
options = {
"py2exe": {
"includes": ["PyQt4", "sip"],
"dll_excludes": ["MSVCP90.dll"],
"bundle_files" : 1,
}
}
setup(
name = "foo",
windows = windows,
options = options,
zipfile = None,
) | 16 | 43 | 0.528226 |
import os
import glob
from distutils.core import setup
import py2exe
assert py2exe != None
osp=os.path
windows = [
{
"script": "btn.py",
"icon_resources": [(1, "gui.ico")],
}
]
options = {
"py2exe": {
"includes": ["PyQt4", "sip"],
"dll_excludes": ["MSVCP90.dll"],
"bundle_files" : 1,
}
}
setup(
name = "foo",
windows = windows,
options = options,
zipfile = None,
) | true | true |
f727ecacbdc8f8fdc864e9a7692a7f8384a45b5b | 763 | py | Python | account/forms.py | mateuszwwwrobel/Expense_Tracker_Django | e84bda82433427608e026faa00a634c46a433179 | [
"MIT"
] | null | null | null | account/forms.py | mateuszwwwrobel/Expense_Tracker_Django | e84bda82433427608e026faa00a634c46a433179 | [
"MIT"
] | null | null | null | account/forms.py | mateuszwwwrobel/Expense_Tracker_Django | e84bda82433427608e026faa00a634c46a433179 | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
class SignUpForm(UserCreationForm):
"""Custom signup form with additional fields."""
first_name = forms.CharField(max_length=50, required=False,
help_text='Optional. 50 characters or fewer.')
last_name = forms.CharField(max_length=50, required=False,
help_text='Optional. 50 characters or fewer.')
email = forms.EmailField(max_length=254,
help_text='Required. Email confirmation will be send.')
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'password1', 'password2',)
| 40.157895 | 92 | 0.646134 | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
class SignUpForm(UserCreationForm):
first_name = forms.CharField(max_length=50, required=False,
help_text='Optional. 50 characters or fewer.')
last_name = forms.CharField(max_length=50, required=False,
help_text='Optional. 50 characters or fewer.')
email = forms.EmailField(max_length=254,
help_text='Required. Email confirmation will be send.')
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'password1', 'password2',)
| true | true |
f727ed1dd0b38515ab9c60f47bb34d9f08bb15ec | 253 | py | Python | test.py | essethon/Python_HC_SR501 | 2ccae65651d82875a449e51b77aa5077ae78c19a | [
"MIT"
] | 4 | 2017-12-27T08:00:57.000Z | 2020-07-12T08:32:58.000Z | test.py | essethon/Python_HC_SR501 | 2ccae65651d82875a449e51b77aa5077ae78c19a | [
"MIT"
] | null | null | null | test.py | essethon/Python_HC_SR501 | 2ccae65651d82875a449e51b77aa5077ae78c19a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import time
import motion_detector
if __name__ == "__main__":
while True:
if motion_detector.motion_detect():
print("Somebody is closing")
else:
print("Nobody")
time.sleep(1)
| 18.071429 | 43 | 0.596838 |
import time
import motion_detector
if __name__ == "__main__":
while True:
if motion_detector.motion_detect():
print("Somebody is closing")
else:
print("Nobody")
time.sleep(1)
| true | true |
f727ed746f16c529677c000a761dd7cf38da957f | 8,929 | py | Python | .history/pages/intro_20220304123207.py | rypaik/Streamlit_Ref | 5ce11cecbe8307238463c126b88b3beed66c99fa | [
"MIT"
] | null | null | null | .history/pages/intro_20220304123207.py | rypaik/Streamlit_Ref | 5ce11cecbe8307238463c126b88b3beed66c99fa | [
"MIT"
] | null | null | null | .history/pages/intro_20220304123207.py | rypaik/Streamlit_Ref | 5ce11cecbe8307238463c126b88b3beed66c99fa | [
"MIT"
] | null | null | null | """
Off Multipage Cheatsheet
https://github.com/daniellewisDL/streamlit-cheat-sheet
@daniellewisDL : https://github.com/daniellewisDL
"""
import streamlit as st
from pathlib import Path
import base64
from modules.toc import *
# Initial page config
st.set_page_config(
page_title='Code Compendium Intro Page',
layout="wide"
# initial_sidebar_state="expanded",
)
# col2.title("Table of contents")
# col2.write("http://localhost:8502/#display-progress-and-status")
# toc.header("Header 1")
# toc.header("Header 2")
# toc.subheader("Subheader 1")
# toc.subheader("Subheader 2")
# toc.generate()
# Thanks to streamlitopedia for the following code snippet
def img_to_bytes(img_path):
img_bytes = Path(img_path).read_bytes()
encoded = base64.b64encode(img_bytes).decode()
return encoded
# sidebar
# def cs_sidebar():
# st.sidebar.markdown('''[<img src='data:image/png;base64,{}' class='img-fluid' width=32 height=32>](https://streamlit.io/)'''.format(img_to_bytes("logomark_website.png")), unsafe_allow_html=True)
# st.sidebar.header('Streamlit cheat sheet')
# st.sidebar.markdown('''
# <small>Summary of the [docs](https://docs.streamlit.io/en/stable/api.html), as of [Streamlit v1.0.0](https://www.streamlit.io/).</small>
# ''', unsafe_allow_html=True)
# st.sidebar.markdown('__How to install and import__')
# st.sidebar.code('$ pip install streamlit')
# st.sidebar.markdown('Import convention')
# st.sidebar.code('>>> import streamlit as st')
# st.sidebar.markdown('__Add widgets to sidebar__')
# st.sidebar.code('''
# st.sidebar.<widget>
# >>> a = st.sidebar.radio(\'R:\',[1,2])
# ''')
# st.sidebar.markdown('__Command line__')
# st.sidebar.code('''
# $ streamlit --help
# $ streamlit run your_script.py
# $ streamlit hello
# $ streamlit config show
# $ streamlit cache clear
# $ streamlit docs
# $ streamlit --version
# ''')
# st.sidebar.markdown('__Pre-release features__')
# st.sidebar.markdown('[Beta and experimental features](https://docs.streamlit.io/en/stable/api.html#beta-and-experimental-features)')
# st.sidebar.code('''
# pip uninstall streamlit
# pip install streamlit-nightly --upgrade
# ''')
# st.sidebar.markdown('''<small>[st.cheat_sheet v1.0.0](https://github.com/daniellewisDL/streamlit-cheat-sheet) | Oct 2021</small>''', unsafe_allow_html=True)
# return None
##########################
# Main body of cheat sheet
##########################
def cs_body():
# col1 = st.columns(1)
st.title("Ryan Paik's Coding Compendium")
st.markdown('''
----------
#### “*You don't learn to walk by following rules. You learn by doing, and by falling over.*”
##### ~ Richard Branson
--------
''')
st.subheader("Welcome to my Code Compendium.")
st.markdown('''
This website/webapp is my personal cheatsheet for of all the code snippets that I have needed over the past 2 years. This ended up being a quick detour into Streamlit while I was building flask api's.
-----
#### **Programming is only as deep as you want to dive in.**
i
This webapp features the basic code snippets from all the "googling" from programming I have done.
I have taken the plunge and have created my own markdown notebooks organizing information from quick solution tidbits to documentation for programming languages.
Please visit my github for practical code and my research notebooks:
*[rypaik (Ryan Paik) · GitHub](https://github.com/rypaik)*
If you would like access to my Gist please email me.
ryanpaik@protonmail.com
-------
##### **Bio:**
Currently a Sophomore at University of Illinois at Urbana-Champaign.
I am working Nights on my Bachelor's of Science in Systems Engineering and Design
##### **Hobbies:**
Trying to become a real guitar hero minus the game system, playing Valorant with the St Mark's crew, getting interesting eats no matter where I am, and playing toss with my baseball field rat of a cousin.
The newest hobby is figuring out what I can build with all the new breakthroughs in technology.
##### **Currently Working On:**
##### Frameworks and Languages:
- Flask, Django, FastAPI, PyTorch, Streamlit, OpenCV, shell scripting, Python, C++
##### Databases:
- Postgres, Redis, MongoDB, and applicable ORMs
##### When I can get up for Air:
- React, swift(ios), Rust, GO!!
- Find a team to get a paper In Arxiv
**This site will be constantly updated as long as I program. Feel free to pass on the URL.**
''')
# col2.subheader('Display interactive widgets')
# col2.code('''
# st.button('Hit me')
# st.download_button('On the dl', data)
# st.checkbox('Check me out')
# st.radio('Radio', [1,2,3])
# st.selectbox('Select', [1,2,3])
# st.multiselect('Multiselect', [1,2,3])
# st.slider('Slide me', min_value=0, max_value=10)
# st.select_slider('Slide to select', options=[1,'2'])
# st.text_input('Enter some text')
# st.number_input('Enter a number')
# st.text_area('Area for textual entry')
# st.date_input('Date input')
# st.time_input('Time entry')
# st.file_uploader('File uploader')
# st.color_picker('Pick a color')
# ''')
# col2.write('Use widgets\' returned values in variables:')
# col2.code('''
# >>> for i in range(int(st.number_input('Num:'))): foo()
# >>> if st.sidebar.selectbox('I:',['f']) == 'f': b()
# >>> my_slider_val = st.slider('Quinn Mallory', 1, 88)
# >>> st.write(slider_val)
# ''')
# # Control flow
# col2.subheader('Control flow')
# col2.code('''
# st.stop()
# ''')
# # Lay out your app
# col2.subheader('Lay out your app')
# col2.code('''
# st.form('my_form_identifier')
# st.form_submit_button('Submit to me')
# st.container()
# st.columns(spec)
# >>> col1, col2 = st.columns(2)
# >>> col1.subheader('Columnisation')
# st.expander('Expander')
# >>> with st.expander('Expand'):
# >>> st.write('Juicy deets')
# ''')
# col2.write('Batch widgets together in a form:')
# col2.code('''
# >>> with st.form(key='my_form'):
# >>> text_input = st.text_input(label='Enter some text')
# >>> submit_button = st.form_submit_button(label='Submit')
# ''')
# # Display code
# col2.subheader('Display code')
# col2.code('''
# st.echo()
# >>> with st.echo():
# >>> st.write('Code will be executed and printed')
# ''')
# # Display progress and status
# col2.subheader('Display progress and status')
# col2.code('''
# st.progress(progress_variable_1_to_100)
# st.spinner()
# >>> with st.spinner(text='In progress'):
# >>> time.sleep(5)
# >>> st.success('Done')
# st.balloons()
# st.error('Error message')
# st.warning('Warning message')
# st.info('Info message')
# st.success('Success message')
# st.exception(e)
# ''')
# # Placeholders, help, and options
# col2.subheader('Placeholders, help, and options')
# col2.code('''
# st.empty()
# >>> my_placeholder = st.empty()
# >>> my_placeholder.text('Replaced!')
# st.help(pandas.DataFrame)
# st.get_option(key)
# st.set_option(key, value)
# st.set_page_config(layout='wide')
# ''')
# # Mutate data
# col2.subheader('Mutate data')
# col2.code('''
# DeltaGenerator.add_rows(data)
# >>> my_table = st.table(df1)
# >>> my_table.add_rows(df2)
# >>> my_chart = st.line_chart(df1)
# >>> my_chart.add_rows(df2)
# ''')
# # Optimize performance
# col2.subheader('Optimize performance')
# col2.code('''
# @st.cache
# >>> @st.cache
# ... def fetch_and_clean_data(url):
# ... # Mutate data at url
# ... return data
# >>> # Executes d1 as first time
# >>> d1 = fetch_and_clean_data(ref1)
# >>> # Does not execute d1; returns cached value, d1==d2
# >>> d2 = fetch_and_clean_data(ref1)
# >>> # Different arg, so function d1 executes
# >>> d3 = fetch_and_clean_data(ref2)
# ''')
# col2.subheader('Other key parts of the API')
# col2.markdown('''
# <small>[State API](https://docs.streamlit.io/en/stable/session_state_api.html)</small><br>
# <small>[Theme option reference](https://docs.streamlit.io/en/stable/theme_options.html)</small><br>
# <small>[Components API reference](https://docs.streamlit.io/en/stable/develop_streamlit_components.html)</small><br>
# <small>[API cheat sheet](https://share.streamlit.io/daniellewisdl/streamlit-cheat-sheet/app.py)</small><br>
# ''', unsafe_allow_html=True)
# Column 3 TOC Generator
# col3.subheader('test')
# toc = Toc(col3)
# # col2.title("Table of contents")
# col3.write("http://localhost:8502/#display-progress-and-status", unsafe_allow_html=True)
# toc.header("Header 1")
# toc.header("Header 2")
# toc.generate()
# toc.subheader("Subheader 1")
# toc.subheader("Subheader 2")
# toc.generate()
# return None
# Run main()
# if __name__ == '__main__':
# main()
# def main():
def app():
# cs_sidebar()
cs_body()
return None
| 28.527157 | 206 | 0.652593 |
import streamlit as st
from pathlib import Path
import base64
from modules.toc import *
st.set_page_config(
page_title='Code Compendium Intro Page',
layout="wide"
)
def img_to_bytes(img_path):
img_bytes = Path(img_path).read_bytes()
encoded = base64.b64encode(img_bytes).decode()
return encoded
# <small>Summary of the [docs](https://docs.streamlit.io/en/stable/api.html), as of [Streamlit v1.0.0](https://www.streamlit.io/).</small>
# ''', unsafe_allow_html=True)
# st.sidebar.<widget>
# >>> a = st.sidebar.radio(\'R:\',[1,2])
# ''')
# $ streamlit --help
# $ streamlit run your_script.py
# $ streamlit hello
# $ streamlit config show
# $ streamlit cache clear
# $ streamlit docs
# $ streamlit --version
# ''')
# pip uninstall streamlit
# pip install streamlit-nightly --upgrade
# ''')
code snippets from all the "googling" from programming I have done.
I have taken the plunge and have created my own markdown notebooks organizing information from quick solution tidbits to documentation for programming languages.
Please visit my github for practical code and my research notebooks:
*[rypaik (Ryan Paik) · GitHub](https://github.com/rypaik)*
If you would like access to my Gist please email me.
ryanpaik@protonmail.com
-------
##### **Bio:**
Currently a Sophomore at University of Illinois at Urbana-Champaign.
I am working Nights on my Bachelor's of Science in Systems Engineering and Design
##### **Hobbies:**
Trying to become a real guitar hero minus the game system, playing Valorant with the St Mark's crew, getting interesting eats no matter where I am, and playing toss with my baseball field rat of a cousin.
The newest hobby is figuring out what I can build with all the new breakthroughs in technology.
##### **Currently Working On:**
##### Frameworks and Languages:
- Flask, Django, FastAPI, PyTorch, Streamlit, OpenCV, shell scripting, Python, C++
##### Databases:
- Postgres, Redis, MongoDB, and applicable ORMs
##### When I can get up for Air:
- React, swift(ios), Rust, GO!!
- Find a team to get a paper In Arxiv
**This site will be constantly updated as long as I program. Feel free to pass on the URL.**
''')
# col2.subheader('Display interactive widgets')
# col2.code('''
# st.button('Hit me')
# st.download_button('On the dl', data)
# st.checkbox('Check me out')
# st.radio('Radio', [1,2,3])
# st.selectbox('Select', [1,2,3])
# st.multiselect('Multiselect', [1,2,3])
# st.slider('Slide me', min_value=0, max_value=10)
# st.select_slider('Slide to select', options=[1,'2'])
# st.text_input('Enter some text')
# st.number_input('Enter a number')
# st.text_area('Area for textual entry')
# st.date_input('Date input')
# st.time_input('Time entry')
# st.file_uploader('File uploader')
# st.color_picker('Pick a color')
# ''')
# col2.write('Use widgets\' returned values in variables:')
# >>> for i in range(int(st.number_input('Num:'))): foo()
# >>> if st.sidebar.selectbox('I:',['f']) == 'f': b()
# >>> my_slider_val = st.slider('Quinn Mallory', 1, 88)
# >>> st.write(slider_val)
# ''')
)
# ''')
form_identifier')
# st.form_submit_button('Submit to me')
# st.container()
# st.columns(spec)
# >>> col1, col2 = st.columns(2)
# >>> col1.subheader('Columnisation')
# st.expander('Expander')
# >>> with st.expander('Expand'):
# >>> st.write('Juicy deets')
# ''')
# >>> with st.form(key='my_form'):
# >>> text_input = st.text_input(label='Enter some text')
# >>> submit_button = st.form_submit_button(label='Submit')
# ''')
)
# >>> with st.echo():
# >>> st.write('Code will be executed and printed')
# ''')
riable_1_to_100)
# st.spinner()
# >>> with st.spinner(text='In progress'):
# >>> time.sleep(5)
# >>> st.success('Done')
# st.balloons()
# st.error('Error message')
# st.warning('Warning message')
# st.info('Info message')
# st.success('Success message')
# st.exception(e)
# ''')
lder = st.empty()
# >>> my_placeholder.text('Replaced!')
# st.help(pandas.DataFrame)
# st.get_option(key)
# st.set_option(key, value)
# st.set_page_config(layout='wide')
# ''')
nerator.add_rows(data)
# >>> my_table = st.table(df1)
# >>> my_table.add_rows(df2)
# >>> my_chart = st.line_chart(df1)
# >>> my_chart.add_rows(df2)
# ''')
@st.cache
# ... def fetch_and_clean_data(url):
# ... # Mutate data at url
# ... return data
# >>> # Executes d1 as first time
# >>> d1 = fetch_and_clean_data(ref1)
# >>> # Does not execute d1; returns cached value, d1==d2
# >>> d2 = fetch_and_clean_data(ref1)
# >>> # Different arg, so function d1 executes
# >>> d3 = fetch_and_clean_data(ref2)
# ''')
# <small>[State API](https://docs.streamlit.io/en/stable/session_state_api.html)</small><br>
# <small>[Theme option reference](https://docs.streamlit.io/en/stable/theme_options.html)</small><br>
# <small>[Components API reference](https://docs.streamlit.io/en/stable/develop_streamlit_components.html)</small><br>
# <small>[API cheat sheet](https://share.streamlit.io/daniellewisdl/streamlit-cheat-sheet/app.py)</small><br>
# ''', unsafe_allow_html=True)
def app():
cs_body()
return None
| true | true |
f727ee1e7b53a6244678c9e08fe4eea5eec42c76 | 509 | py | Python | src/waldur_ansible/playbook_jobs/extension.py | opennode/waldur-ansible | c81c5f0491be02fa9a55a6d5bf9d845750fd1ba9 | [
"MIT"
] | 1 | 2017-09-05T08:09:47.000Z | 2017-09-05T08:09:47.000Z | src/waldur_ansible/playbook_jobs/extension.py | opennode/waldur-ansible | c81c5f0491be02fa9a55a6d5bf9d845750fd1ba9 | [
"MIT"
] | null | null | null | src/waldur_ansible/playbook_jobs/extension.py | opennode/waldur-ansible | c81c5f0491be02fa9a55a6d5bf9d845750fd1ba9 | [
"MIT"
] | 3 | 2017-09-24T03:13:19.000Z | 2018-08-12T07:44:38.000Z | from waldur_core.core import WaldurExtension
class PlaybookJobsExtension(WaldurExtension):
class Settings:
WALDUR_PLAYBOOK_JOBS = {
'PLAYBOOKS_DIR_NAME': 'ansible_playbooks',
'PLAYBOOK_ICON_SIZE': (64, 64),
}
@staticmethod
def django_app():
return 'waldur_ansible.playbook_jobs'
@staticmethod
def rest_urls():
from .urls import register_in
return register_in
@staticmethod
def is_assembly():
return True
| 22.130435 | 54 | 0.650295 | from waldur_core.core import WaldurExtension
class PlaybookJobsExtension(WaldurExtension):
class Settings:
WALDUR_PLAYBOOK_JOBS = {
'PLAYBOOKS_DIR_NAME': 'ansible_playbooks',
'PLAYBOOK_ICON_SIZE': (64, 64),
}
@staticmethod
def django_app():
return 'waldur_ansible.playbook_jobs'
@staticmethod
def rest_urls():
from .urls import register_in
return register_in
@staticmethod
def is_assembly():
return True
| true | true |
f727eea3db865e0f61cecbb74cf9356d9badf3b1 | 558 | py | Python | pythonaem/error.py | mbloch1986/pythonaem | ce3ac1cb045a3cae912e7a76148130f645f61b91 | [
"Apache-2.0"
] | 3 | 2017-09-18T18:02:42.000Z | 2021-05-19T06:47:46.000Z | pythonaem/error.py | mbloch1986/pythonaem | ce3ac1cb045a3cae912e7a76148130f645f61b91 | [
"Apache-2.0"
] | 1 | 2021-05-19T01:49:04.000Z | 2021-05-19T01:49:04.000Z | pythonaem/error.py | mbloch1986/pythonaem | ce3ac1cb045a3cae912e7a76148130f645f61b91 | [
"Apache-2.0"
] | 5 | 2017-07-13T11:31:38.000Z | 2021-05-19T01:12:47.000Z | """
PythonAEM error, contains a message and PythonAEM Result object
"""
class Error(RuntimeError):
"""
PythonAEM error, contains a message and PythonAEM Result object
useful for debugging the result and response when an error occurs
"""
def __init__(self, message, result):
"""
Initialise a result.
:param message: result message
:param resi;t: PythonAEM Result
:return PythonAEM Error instance
"""
super().__init__()
self.message = message
self.result = result
| 24.26087 | 69 | 0.636201 |
class Error(RuntimeError):
def __init__(self, message, result):
super().__init__()
self.message = message
self.result = result
| true | true |
f727eee784d7ad52a67a2cd8eda72f4b1d6284ef | 407 | py | Python | app/run/migrations/0015_alter_run_pipeline_command.py | Masado/django-app-api-3 | 88def27f1cd8974c62dead282cd04d1384054888 | [
"MIT"
] | null | null | null | app/run/migrations/0015_alter_run_pipeline_command.py | Masado/django-app-api-3 | 88def27f1cd8974c62dead282cd04d1384054888 | [
"MIT"
] | null | null | null | app/run/migrations/0015_alter_run_pipeline_command.py | Masado/django-app-api-3 | 88def27f1cd8974c62dead282cd04d1384054888 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.9 on 2021-11-25 10:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('run', '0014_run_duration'),
]
operations = [
migrations.AlterField(
model_name='run',
name='pipeline_command',
field=models.CharField(blank=True, max_length=600, null=True),
),
]
| 21.421053 | 74 | 0.604423 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('run', '0014_run_duration'),
]
operations = [
migrations.AlterField(
model_name='run',
name='pipeline_command',
field=models.CharField(blank=True, max_length=600, null=True),
),
]
| true | true |
f727f0ca02c8ddd7cc426b246a89148e585b2b62 | 1,181 | py | Python | daemon/examples/api/switch_inject.py | shanv82/core | 70abb8cc1426ffceb53a03e84edc26f56f9ed4c0 | [
"BSD-2-Clause"
] | null | null | null | daemon/examples/api/switch_inject.py | shanv82/core | 70abb8cc1426ffceb53a03e84edc26f56f9ed4c0 | [
"BSD-2-Clause"
] | null | null | null | daemon/examples/api/switch_inject.py | shanv82/core | 70abb8cc1426ffceb53a03e84edc26f56f9ed4c0 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/python
#
# run iperf to measure the effective throughput between two nodes when
# n nodes are connected to a virtual wlan; run test for testsec
# and repeat for minnodes <= n <= maxnodes with a step size of
# nodestep
from core import load_logging_config
from core.emulator.emudata import IpPrefixes
from core.enumerations import NodeTypes, EventTypes
load_logging_config()
def example(nodes):
# ip generator for example
prefixes = IpPrefixes("10.83.0.0/16")
# create emulator instance for creating sessions and utility methods
coreemu = globals()["coreemu"]
session = coreemu.create_session()
# must be in configuration state for nodes to start, when using "node_add" below
session.set_state(EventTypes.CONFIGURATION_STATE)
# create switch network node
switch = session.add_node(_type=NodeTypes.SWITCH)
# create nodes
for _ in xrange(nodes):
node = session.add_node()
interface = prefixes.create_interface(node)
session.add_link(node.objid, switch.objid, interface_one=interface)
# instantiate session
session.instantiate()
if __name__ in {"__main__", "__builtin__"}:
example(2)
| 29.525 | 84 | 0.731583 |
from core import load_logging_config
from core.emulator.emudata import IpPrefixes
from core.enumerations import NodeTypes, EventTypes
load_logging_config()
def example(nodes):
prefixes = IpPrefixes("10.83.0.0/16")
coreemu = globals()["coreemu"]
session = coreemu.create_session()
session.set_state(EventTypes.CONFIGURATION_STATE)
switch = session.add_node(_type=NodeTypes.SWITCH)
for _ in xrange(nodes):
node = session.add_node()
interface = prefixes.create_interface(node)
session.add_link(node.objid, switch.objid, interface_one=interface)
session.instantiate()
if __name__ in {"__main__", "__builtin__"}:
example(2)
| true | true |
f727f10e73f0dd4907a634b339ec124db37c3bc9 | 20,980 | py | Python | makepanda/makewheel.py | sean5470/panda3d | ea2d4fecd4af1d4064c5fe2ae2a902ef4c9b903d | [
"PHP-3.0",
"PHP-3.01"
] | null | null | null | makepanda/makewheel.py | sean5470/panda3d | ea2d4fecd4af1d4064c5fe2ae2a902ef4c9b903d | [
"PHP-3.0",
"PHP-3.01"
] | null | null | null | makepanda/makewheel.py | sean5470/panda3d | ea2d4fecd4af1d4064c5fe2ae2a902ef4c9b903d | [
"PHP-3.0",
"PHP-3.01"
] | null | null | null | """
Generates a wheel (.whl) file from the output of makepanda.
Since the wheel requires special linking, this will only work if compiled with
the `--wheel` parameter.
Please keep this file work with Panda3D 1.9 until that reaches EOL.
"""
from __future__ import print_function, unicode_literals
from distutils.util import get_platform
import json
import sys
import os
from os.path import join
import shutil
import zipfile
import hashlib
import tempfile
import subprocess
from distutils.sysconfig import get_config_var
from optparse import OptionParser
from makepandacore import ColorText, LocateBinary, ParsePandaVersion, GetExtensionSuffix, SetVerbose, GetVerbose, GetMetadataValue
from base64 import urlsafe_b64encode
default_platform = get_platform()
if default_platform.startswith("linux-"):
# Is this manylinux1?
if os.path.isfile("/lib/libc-2.5.so") and os.path.isdir("/opt/python"):
default_platform = default_platform.replace("linux", "manylinux1")
def get_abi_tag():
if sys.version_info >= (3, 0):
soabi = get_config_var('SOABI')
if soabi and soabi.startswith('cpython-'):
return 'cp' + soabi.split('-')[1]
elif soabi:
return soabi.replace('.', '_').replace('-', '_')
soabi = 'cp%d%d' % (sys.version_info[:2])
debug_flag = get_config_var('Py_DEBUG')
if (debug_flag is None and hasattr(sys, 'gettotalrefcount')) or debug_flag:
soabi += 'd'
malloc_flag = get_config_var('WITH_PYMALLOC')
if malloc_flag is None or malloc_flag:
soabi += 'm'
if sys.version_info < (3, 3):
usize = get_config_var('Py_UNICODE_SIZE')
if (usize is None and sys.maxunicode == 0x10ffff) or usize == 4:
soabi += 'u'
return soabi
def is_exe_file(path):
return os.path.isfile(path) and path.lower().endswith('.exe')
def is_elf_file(path):
base = os.path.basename(path)
return os.path.isfile(path) and '.' not in base and \
open(path, 'rb').read(4) == b'\x7FELF'
def is_mach_o_file(path):
base = os.path.basename(path)
return os.path.isfile(path) and '.' not in base and \
open(path, 'rb').read(4) in (b'\xCA\xFE\xBA\xBE', b'\xBE\xBA\xFE\bCA',
b'\xFE\xED\xFA\xCE', b'\xCE\xFA\xED\xFE',
b'\xFE\xED\xFA\xCF', b'\xCF\xFA\xED\xFE')
def is_fat_file(path):
return os.path.isfile(path) and \
open(path, 'rb').read(4) in (b'\xCA\xFE\xBA\xBE', b'\xBE\xBA\xFE\bCA')
if sys.platform in ('win32', 'cygwin'):
is_executable = is_exe_file
elif sys.platform == 'darwin':
is_executable = is_mach_o_file
else:
is_executable = is_elf_file
# Other global parameters
PY_VERSION = "cp{0}{1}".format(*sys.version_info)
ABI_TAG = get_abi_tag()
EXCLUDE_EXT = [".pyc", ".pyo", ".N", ".prebuilt", ".xcf", ".plist", ".vcproj", ".sln"]
# Plug-ins to install.
PLUGIN_LIBS = ["pandagl", "pandagles", "pandagles2", "pandadx9", "p3tinydisplay", "p3ptloader", "p3assimp", "p3ffmpeg", "p3openal_audio", "p3fmod_audio"]
WHEEL_DATA = """Wheel-Version: 1.0
Generator: makepanda
Root-Is-Purelib: false
Tag: {0}-{1}-{2}
"""
METADATA = {
"license": GetMetadataValue('license'),
"name": GetMetadataValue('name'),
"metadata_version": "2.0",
"generator": "makepanda",
"summary": GetMetadataValue('description'),
"extensions": {
"python.details": {
"project_urls": {
"Home": GetMetadataValue('url'),
},
"document_names": {
"license": "LICENSE.txt"
},
"contacts": [
{
"role": "author",
"name": GetMetadataValue('author'),
"email": GetMetadataValue('author_email'),
}
]
}
},
"classifiers": GetMetadataValue('classifiers'),
}
PANDA3D_TOOLS_INIT = """import os, sys
import panda3d
if sys.platform in ('win32', 'cygwin'):
path_var = 'PATH'
elif sys.platform == 'darwin':
path_var = 'DYLD_LIBRARY_PATH'
else:
path_var = 'LD_LIBRARY_PATH'
dir = os.path.dirname(panda3d.__file__)
del panda3d
if not os.environ.get(path_var):
os.environ[path_var] = dir
else:
os.environ[path_var] = dir + os.pathsep + os.environ[path_var]
del os, sys, path_var, dir
def _exec_tool(tool):
import os, sys
from subprocess import Popen
tools_dir = os.path.dirname(__file__)
handle = Popen(sys.argv, executable=os.path.join(tools_dir, tool))
try:
try:
return handle.wait()
except KeyboardInterrupt:
# Give the program a chance to handle the signal gracefully.
return handle.wait()
except:
handle.kill()
handle.wait()
raise
# Register all the executables in this directory as global functions.
{0}
"""
def parse_dependencies_windows(data):
""" Parses the given output from dumpbin /dependents to determine the list
of dll's this executable file depends on. """
lines = data.splitlines()
li = 0
while li < len(lines):
line = lines[li]
li += 1
if line.find(' has the following dependencies') != -1:
break
if li < len(lines):
line = lines[li]
if line.strip() == '':
# Skip a blank line.
li += 1
# Now we're finding filenames, until the next blank line.
filenames = []
while li < len(lines):
line = lines[li]
li += 1
line = line.strip()
if line == '':
# We're done.
return filenames
filenames.append(line)
# At least we got some data.
return filenames
def parse_dependencies_unix(data):
""" Parses the given output from otool -XL or ldd to determine the list of
libraries this executable file depends on. """
lines = data.splitlines()
filenames = []
for l in lines:
l = l.strip()
if l != "statically linked":
filenames.append(l.split(' ', 1)[0])
return filenames
def scan_dependencies(pathname):
""" Checks the named file for DLL dependencies, and adds any appropriate
dependencies found into pluginDependencies and dependentFiles. """
if sys.platform == "darwin":
command = ['otool', '-XL', pathname]
elif sys.platform in ("win32", "cygwin"):
command = ['dumpbin', '/dependents', pathname]
else:
command = ['ldd', pathname]
process = subprocess.Popen(command, stdout=subprocess.PIPE, universal_newlines=True)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
raise subprocess.CalledProcessError(retcode, command[0], output=output)
filenames = None
if sys.platform in ("win32", "cygwin"):
filenames = parse_dependencies_windows(output)
else:
filenames = parse_dependencies_unix(output)
if filenames is None:
sys.exit("Unable to determine dependencies from %s" % (pathname))
if sys.platform == "darwin" and len(filenames) > 0:
# Filter out the library ID.
if os.path.basename(filenames[0]).split('.', 1)[0] == os.path.basename(pathname).split('.', 1)[0]:
del filenames[0]
return filenames
class WheelFile(object):
def __init__(self, name, version, platform):
self.name = name
self.version = version
self.platform = platform
wheel_name = "{0}-{1}-{2}-{3}-{4}.whl".format(
name, version, PY_VERSION, ABI_TAG, platform)
print("Writing %s" % (wheel_name))
self.zip_file = zipfile.ZipFile(wheel_name, 'w', zipfile.ZIP_DEFLATED)
self.records = []
# Used to locate dependency libraries.
self.lib_path = []
self.dep_paths = {}
def consider_add_dependency(self, target_path, dep, search_path=None):
"""Considers adding a dependency library.
Returns the target_path if it was added, which may be different from
target_path if it was already added earlier, or None if it wasn't."""
if dep in self.dep_paths:
# Already considered this.
return self.dep_paths[dep]
self.dep_paths[dep] = None
if dep.lower().startswith("python") or os.path.basename(dep).startswith("libpython"):
# Don't include the Python library.
return
if sys.platform == "darwin" and dep.endswith(".so"):
# Temporary hack for 1.9, which had link deps on modules.
return
source_path = None
if search_path is None:
search_path = self.lib_path
for lib_dir in search_path:
# Ignore static stuff.
path = os.path.join(lib_dir, dep)
if os.path.isfile(path):
source_path = os.path.normpath(path)
break
if not source_path:
# Couldn't find library in the panda3d lib dir.
#print("Ignoring %s" % (dep))
return
self.dep_paths[dep] = target_path
self.write_file(target_path, source_path)
return target_path
def write_file(self, target_path, source_path):
"""Adds the given file to the .whl file."""
# If this is a .so file, we should set the rpath appropriately.
temp = None
ext = os.path.splitext(source_path)[1]
if ext in ('.so', '.dylib') or '.so.' in os.path.basename(source_path) or \
(not ext and is_executable(source_path)):
# Scan and add Unix dependencies.
deps = scan_dependencies(source_path)
for dep in deps:
# Only include dependencies with relative path. Otherwise we
# end up overwriting system files like /lib/ld-linux.so.2!
# Yes, it happened to me.
if '/' not in dep:
target_dep = os.path.dirname(target_path) + '/' + dep
self.consider_add_dependency(target_dep, dep)
suffix = ''
if '.so' in os.path.basename(source_path):
suffix = '.so'
elif ext == '.dylib':
suffix = '.dylib'
temp = tempfile.NamedTemporaryFile(suffix=suffix, prefix='whl', delete=False)
# On macOS, if no fat wheel was requested, extract the right architecture.
if sys.platform == "darwin" and is_fat_file(source_path) and not self.platform.endswith("_intel"):
if self.platform.endswith("_x86_64"):
arch = 'x86_64'
else:
arch = self.platform.split('_')[-1]
subprocess.call(['lipo', source_path, '-extract', arch, '-output', temp.name])
else:
# Otherwise, just copy it over.
temp.write(open(source_path, 'rb').read())
os.fchmod(temp.fileno(), os.fstat(temp.fileno()).st_mode | 0o111)
temp.close()
# Fix things like @loader_path/../lib references
if sys.platform == "darwin":
loader_path = [os.path.dirname(source_path)]
for dep in deps:
if '@loader_path' not in dep:
continue
dep_path = dep.replace('@loader_path', '.')
target_dep = os.path.dirname(target_path) + '/' + os.path.basename(dep)
target_dep = self.consider_add_dependency(target_dep, dep_path, loader_path)
if not target_dep:
# It won't be included, so no use adjusting the path.
continue
new_dep = os.path.join('@loader_path', os.path.relpath(target_dep, os.path.dirname(target_path)))
subprocess.call(["install_name_tool", "-change", dep, new_dep, temp.name])
else:
subprocess.call(["strip", "-s", temp.name])
subprocess.call(["patchelf", "--set-rpath", "$ORIGIN", temp.name])
source_path = temp.name
ext = ext.lower()
if ext in ('.dll', '.pyd', '.exe'):
# Scan and add Win32 dependencies.
for dep in scan_dependencies(source_path):
target_dep = os.path.dirname(target_path) + '/' + dep
self.consider_add_dependency(target_dep, dep)
# Calculate the SHA-256 hash and size.
sha = hashlib.sha256()
fp = open(source_path, 'rb')
size = 0
data = fp.read(1024 * 1024)
while data:
size += len(data)
sha.update(data)
data = fp.read(1024 * 1024)
fp.close()
# Save it in PEP-0376 format for writing out later.
digest = urlsafe_b64encode(sha.digest()).decode('ascii')
digest = digest.rstrip('=')
self.records.append("{0},sha256={1},{2}\n".format(target_path, digest, size))
if GetVerbose():
print("Adding %s from %s" % (target_path, source_path))
self.zip_file.write(source_path, target_path)
#if temp:
# os.unlink(temp.name)
def write_file_data(self, target_path, source_data):
"""Adds the given file from a string."""
sha = hashlib.sha256()
sha.update(source_data.encode())
digest = urlsafe_b64encode(sha.digest()).decode('ascii')
digest = digest.rstrip('=')
self.records.append("{0},sha256={1},{2}\n".format(target_path, digest, len(source_data)))
if GetVerbose():
print("Adding %s from data" % target_path)
self.zip_file.writestr(target_path, source_data)
def write_directory(self, target_dir, source_dir):
"""Adds the given directory recursively to the .whl file."""
for root, dirs, files in os.walk(source_dir):
for file in files:
if os.path.splitext(file)[1] in EXCLUDE_EXT:
continue
source_path = os.path.join(root, file)
target_path = os.path.join(target_dir, os.path.relpath(source_path, source_dir))
target_path = target_path.replace('\\', '/')
self.write_file(target_path, source_path)
def close(self):
# Write the RECORD file.
record_file = "{0}-{1}.dist-info/RECORD".format(self.name, self.version)
self.records.append(record_file + ",,\n")
self.zip_file.writestr(record_file, "".join(self.records))
self.zip_file.close()
def makewheel(version, output_dir, platform=default_platform):
if sys.platform not in ("win32", "darwin") and not sys.platform.startswith("cygwin"):
if not LocateBinary("patchelf"):
raise Exception("patchelf is required when building a Linux wheel.")
platform = platform.replace('-', '_').replace('.', '_')
# Global filepaths
panda3d_dir = join(output_dir, "panda3d")
pandac_dir = join(output_dir, "pandac")
direct_dir = join(output_dir, "direct")
models_dir = join(output_dir, "models")
etc_dir = join(output_dir, "etc")
bin_dir = join(output_dir, "bin")
if sys.platform == "win32":
libs_dir = join(output_dir, "bin")
else:
libs_dir = join(output_dir, "lib")
license_src = "LICENSE"
readme_src = "README.md"
# Update relevant METADATA entries
METADATA['version'] = version
version_classifiers = [
"Programming Language :: Python :: {0}".format(*sys.version_info),
"Programming Language :: Python :: {0}.{1}".format(*sys.version_info),
]
METADATA['classifiers'].extend(version_classifiers)
# Build out the metadata
details = METADATA["extensions"]["python.details"]
homepage = details["project_urls"]["Home"]
author = details["contacts"][0]["name"]
email = details["contacts"][0]["email"]
metadata = ''.join([
"Metadata-Version: {metadata_version}\n" \
"Name: {name}\n" \
"Version: {version}\n" \
"Summary: {summary}\n" \
"License: {license}\n".format(**METADATA),
"Home-page: {0}\n".format(homepage),
"Author: {0}\n".format(author),
"Author-email: {0}\n".format(email),
"Platform: {0}\n".format(platform),
] + ["Classifier: {0}\n".format(c) for c in METADATA['classifiers']])
# Zip it up and name it the right thing
whl = WheelFile('panda3d', version, platform)
whl.lib_path = [libs_dir]
# Add the trees with Python modules.
whl.write_directory('direct', direct_dir)
# Write the panda3d tree. We use a custom empty __init__ since the
# default one adds the bin directory to the PATH, which we don't have.
whl.write_file_data('panda3d/__init__.py', '')
ext_suffix = GetExtensionSuffix()
for file in os.listdir(panda3d_dir):
if file == '__init__.py':
pass
elif file.endswith(ext_suffix) or file.endswith('.py'):
source_path = os.path.join(panda3d_dir, file)
if file.endswith('.pyd') and platform.startswith('cygwin'):
# Rename it to .dll for cygwin Python to be able to load it.
target_path = 'panda3d/' + os.path.splitext(file)[0] + '.dll'
else:
target_path = 'panda3d/' + file
whl.write_file(target_path, source_path)
# Add plug-ins.
for lib in PLUGIN_LIBS:
plugin_name = 'lib' + lib
if sys.platform in ('win32', 'cygwin'):
plugin_name += '.dll'
elif sys.platform == 'darwin':
plugin_name += '.dylib'
else:
plugin_name += '.so'
plugin_path = os.path.join(libs_dir, plugin_name)
if os.path.isfile(plugin_path):
whl.write_file('panda3d/' + plugin_name, plugin_path)
# Add the .data directory, containing additional files.
data_dir = 'panda3d-{0}.data'.format(version)
#whl.write_directory(data_dir + '/data/etc', etc_dir)
#whl.write_directory(data_dir + '/data/models', models_dir)
# Actually, let's not. That seems to install the files to the strangest
# places in the user's filesystem. Let's instead put them in panda3d.
whl.write_directory('panda3d/etc', etc_dir)
whl.write_directory('panda3d/models', models_dir)
# Add the pandac tree for backward compatibility.
for file in os.listdir(pandac_dir):
if file.endswith('.py'):
whl.write_file('pandac/' + file, os.path.join(pandac_dir, file))
# Add a panda3d-tools directory containing the executables.
entry_points = '[console_scripts]\n'
entry_points += 'eggcacher = direct.directscripts.eggcacher:main\n'
entry_points += 'pfreeze = direct.showutil.pfreeze:main\n'
tools_init = ''
for file in os.listdir(bin_dir):
basename = os.path.splitext(file)[0]
if basename in ('eggcacher', 'packpanda'):
continue
source_path = os.path.join(bin_dir, file)
if is_executable(source_path):
# Put the .exe files inside the panda3d-tools directory.
whl.write_file('panda3d_tools/' + file, source_path)
# Tell pip to create a wrapper script.
funcname = basename.replace('-', '_')
entry_points += '{0} = panda3d_tools:{1}\n'.format(basename, funcname)
tools_init += '{0} = lambda: _exec_tool({1!r})\n'.format(funcname, file)
whl.write_file_data('panda3d_tools/__init__.py', PANDA3D_TOOLS_INIT.format(tools_init))
# Add the dist-info directory last.
info_dir = 'panda3d-{0}.dist-info'.format(version)
whl.write_file_data(info_dir + '/entry_points.txt', entry_points)
whl.write_file_data(info_dir + '/metadata.json', json.dumps(METADATA, indent=4, separators=(',', ': ')))
whl.write_file_data(info_dir + '/METADATA', metadata)
whl.write_file_data(info_dir + '/WHEEL', WHEEL_DATA.format(PY_VERSION, ABI_TAG, platform))
whl.write_file(info_dir + '/LICENSE.txt', license_src)
whl.write_file(info_dir + '/README.md', readme_src)
whl.write_file_data(info_dir + '/top_level.txt', 'direct\npanda3d\npandac\npanda3d_tools\n')
whl.close()
if __name__ == "__main__":
version = ParsePandaVersion("dtool/PandaVersion.pp")
parser = OptionParser()
parser.add_option('', '--version', dest = 'version', help = 'Panda3D version number (default: %s)' % (version), default = version)
parser.add_option('', '--outputdir', dest = 'outputdir', help = 'Makepanda\'s output directory (default: built)', default = 'built')
parser.add_option('', '--verbose', dest = 'verbose', help = 'Enable verbose output', action = 'store_true', default = False)
parser.add_option('', '--platform', dest = 'platform', help = 'Override platform tag (default: %s)' % (default_platform), default = get_platform())
(options, args) = parser.parse_args()
SetVerbose(options.verbose)
makewheel(options.version, options.outputdir, options.platform)
| 35.863248 | 153 | 0.60715 | from __future__ import print_function, unicode_literals
from distutils.util import get_platform
import json
import sys
import os
from os.path import join
import shutil
import zipfile
import hashlib
import tempfile
import subprocess
from distutils.sysconfig import get_config_var
from optparse import OptionParser
from makepandacore import ColorText, LocateBinary, ParsePandaVersion, GetExtensionSuffix, SetVerbose, GetVerbose, GetMetadataValue
from base64 import urlsafe_b64encode
default_platform = get_platform()
if default_platform.startswith("linux-"):
if os.path.isfile("/lib/libc-2.5.so") and os.path.isdir("/opt/python"):
default_platform = default_platform.replace("linux", "manylinux1")
def get_abi_tag():
if sys.version_info >= (3, 0):
soabi = get_config_var('SOABI')
if soabi and soabi.startswith('cpython-'):
return 'cp' + soabi.split('-')[1]
elif soabi:
return soabi.replace('.', '_').replace('-', '_')
soabi = 'cp%d%d' % (sys.version_info[:2])
debug_flag = get_config_var('Py_DEBUG')
if (debug_flag is None and hasattr(sys, 'gettotalrefcount')) or debug_flag:
soabi += 'd'
malloc_flag = get_config_var('WITH_PYMALLOC')
if malloc_flag is None or malloc_flag:
soabi += 'm'
if sys.version_info < (3, 3):
usize = get_config_var('Py_UNICODE_SIZE')
if (usize is None and sys.maxunicode == 0x10ffff) or usize == 4:
soabi += 'u'
return soabi
def is_exe_file(path):
return os.path.isfile(path) and path.lower().endswith('.exe')
def is_elf_file(path):
base = os.path.basename(path)
return os.path.isfile(path) and '.' not in base and \
open(path, 'rb').read(4) == b'\x7FELF'
def is_mach_o_file(path):
base = os.path.basename(path)
return os.path.isfile(path) and '.' not in base and \
open(path, 'rb').read(4) in (b'\xCA\xFE\xBA\xBE', b'\xBE\xBA\xFE\bCA',
b'\xFE\xED\xFA\xCE', b'\xCE\xFA\xED\xFE',
b'\xFE\xED\xFA\xCF', b'\xCF\xFA\xED\xFE')
def is_fat_file(path):
return os.path.isfile(path) and \
open(path, 'rb').read(4) in (b'\xCA\xFE\xBA\xBE', b'\xBE\xBA\xFE\bCA')
if sys.platform in ('win32', 'cygwin'):
is_executable = is_exe_file
elif sys.platform == 'darwin':
is_executable = is_mach_o_file
else:
is_executable = is_elf_file
PY_VERSION = "cp{0}{1}".format(*sys.version_info)
ABI_TAG = get_abi_tag()
EXCLUDE_EXT = [".pyc", ".pyo", ".N", ".prebuilt", ".xcf", ".plist", ".vcproj", ".sln"]
PLUGIN_LIBS = ["pandagl", "pandagles", "pandagles2", "pandadx9", "p3tinydisplay", "p3ptloader", "p3assimp", "p3ffmpeg", "p3openal_audio", "p3fmod_audio"]
WHEEL_DATA = """Wheel-Version: 1.0
Generator: makepanda
Root-Is-Purelib: false
Tag: {0}-{1}-{2}
"""
METADATA = {
"license": GetMetadataValue('license'),
"name": GetMetadataValue('name'),
"metadata_version": "2.0",
"generator": "makepanda",
"summary": GetMetadataValue('description'),
"extensions": {
"python.details": {
"project_urls": {
"Home": GetMetadataValue('url'),
},
"document_names": {
"license": "LICENSE.txt"
},
"contacts": [
{
"role": "author",
"name": GetMetadataValue('author'),
"email": GetMetadataValue('author_email'),
}
]
}
},
"classifiers": GetMetadataValue('classifiers'),
}
PANDA3D_TOOLS_INIT = """import os, sys
import panda3d
if sys.platform in ('win32', 'cygwin'):
path_var = 'PATH'
elif sys.platform == 'darwin':
path_var = 'DYLD_LIBRARY_PATH'
else:
path_var = 'LD_LIBRARY_PATH'
dir = os.path.dirname(panda3d.__file__)
del panda3d
if not os.environ.get(path_var):
os.environ[path_var] = dir
else:
os.environ[path_var] = dir + os.pathsep + os.environ[path_var]
del os, sys, path_var, dir
def _exec_tool(tool):
import os, sys
from subprocess import Popen
tools_dir = os.path.dirname(__file__)
handle = Popen(sys.argv, executable=os.path.join(tools_dir, tool))
try:
try:
return handle.wait()
except KeyboardInterrupt:
# Give the program a chance to handle the signal gracefully.
return handle.wait()
except:
handle.kill()
handle.wait()
raise
# Register all the executables in this directory as global functions.
{0}
"""
def parse_dependencies_windows(data):
lines = data.splitlines()
li = 0
while li < len(lines):
line = lines[li]
li += 1
if line.find(' has the following dependencies') != -1:
break
if li < len(lines):
line = lines[li]
if line.strip() == '':
li += 1
filenames = []
while li < len(lines):
line = lines[li]
li += 1
line = line.strip()
if line == '':
# We're done.
return filenames
filenames.append(line)
return filenames
def parse_dependencies_unix(data):
lines = data.splitlines()
filenames = []
for l in lines:
l = l.strip()
if l != "statically linked":
filenames.append(l.split(' ', 1)[0])
return filenames
def scan_dependencies(pathname):
if sys.platform == "darwin":
command = ['otool', '-XL', pathname]
elif sys.platform in ("win32", "cygwin"):
command = ['dumpbin', '/dependents', pathname]
else:
command = ['ldd', pathname]
process = subprocess.Popen(command, stdout=subprocess.PIPE, universal_newlines=True)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
raise subprocess.CalledProcessError(retcode, command[0], output=output)
filenames = None
if sys.platform in ("win32", "cygwin"):
filenames = parse_dependencies_windows(output)
else:
filenames = parse_dependencies_unix(output)
if filenames is None:
sys.exit("Unable to determine dependencies from %s" % (pathname))
if sys.platform == "darwin" and len(filenames) > 0:
if os.path.basename(filenames[0]).split('.', 1)[0] == os.path.basename(pathname).split('.', 1)[0]:
del filenames[0]
return filenames
class WheelFile(object):
def __init__(self, name, version, platform):
self.name = name
self.version = version
self.platform = platform
wheel_name = "{0}-{1}-{2}-{3}-{4}.whl".format(
name, version, PY_VERSION, ABI_TAG, platform)
print("Writing %s" % (wheel_name))
self.zip_file = zipfile.ZipFile(wheel_name, 'w', zipfile.ZIP_DEFLATED)
self.records = []
self.lib_path = []
self.dep_paths = {}
def consider_add_dependency(self, target_path, dep, search_path=None):
if dep in self.dep_paths:
return self.dep_paths[dep]
self.dep_paths[dep] = None
if dep.lower().startswith("python") or os.path.basename(dep).startswith("libpython"):
return
if sys.platform == "darwin" and dep.endswith(".so"):
# Temporary hack for 1.9, which had link deps on modules.
return
source_path = None
if search_path is None:
search_path = self.lib_path
for lib_dir in search_path:
# Ignore static stuff.
path = os.path.join(lib_dir, dep)
if os.path.isfile(path):
source_path = os.path.normpath(path)
break
if not source_path:
# Couldn't find library in the panda3d lib dir.
return
self.dep_paths[dep] = target_path
self.write_file(target_path, source_path)
return target_path
def write_file(self, target_path, source_path):
temp = None
ext = os.path.splitext(source_path)[1]
if ext in ('.so', '.dylib') or '.so.' in os.path.basename(source_path) or \
(not ext and is_executable(source_path)):
deps = scan_dependencies(source_path)
for dep in deps:
if '/' not in dep:
target_dep = os.path.dirname(target_path) + '/' + dep
self.consider_add_dependency(target_dep, dep)
suffix = ''
if '.so' in os.path.basename(source_path):
suffix = '.so'
elif ext == '.dylib':
suffix = '.dylib'
temp = tempfile.NamedTemporaryFile(suffix=suffix, prefix='whl', delete=False)
if sys.platform == "darwin" and is_fat_file(source_path) and not self.platform.endswith("_intel"):
if self.platform.endswith("_x86_64"):
arch = 'x86_64'
else:
arch = self.platform.split('_')[-1]
subprocess.call(['lipo', source_path, '-extract', arch, '-output', temp.name])
else:
temp.write(open(source_path, 'rb').read())
os.fchmod(temp.fileno(), os.fstat(temp.fileno()).st_mode | 0o111)
temp.close()
if sys.platform == "darwin":
loader_path = [os.path.dirname(source_path)]
for dep in deps:
if '@loader_path' not in dep:
continue
dep_path = dep.replace('@loader_path', '.')
target_dep = os.path.dirname(target_path) + '/' + os.path.basename(dep)
target_dep = self.consider_add_dependency(target_dep, dep_path, loader_path)
if not target_dep:
continue
new_dep = os.path.join('@loader_path', os.path.relpath(target_dep, os.path.dirname(target_path)))
subprocess.call(["install_name_tool", "-change", dep, new_dep, temp.name])
else:
subprocess.call(["strip", "-s", temp.name])
subprocess.call(["patchelf", "--set-rpath", "$ORIGIN", temp.name])
source_path = temp.name
ext = ext.lower()
if ext in ('.dll', '.pyd', '.exe'):
# Scan and add Win32 dependencies.
for dep in scan_dependencies(source_path):
target_dep = os.path.dirname(target_path) + '/' + dep
self.consider_add_dependency(target_dep, dep)
# Calculate the SHA-256 hash and size.
sha = hashlib.sha256()
fp = open(source_path, 'rb')
size = 0
data = fp.read(1024 * 1024)
while data:
size += len(data)
sha.update(data)
data = fp.read(1024 * 1024)
fp.close()
# Save it in PEP-0376 format for writing out later.
digest = urlsafe_b64encode(sha.digest()).decode('ascii')
digest = digest.rstrip('=')
self.records.append("{0},sha256={1},{2}\n".format(target_path, digest, size))
if GetVerbose():
print("Adding %s from %s" % (target_path, source_path))
self.zip_file.write(source_path, target_path)
#if temp:
# os.unlink(temp.name)
def write_file_data(self, target_path, source_data):
sha = hashlib.sha256()
sha.update(source_data.encode())
digest = urlsafe_b64encode(sha.digest()).decode('ascii')
digest = digest.rstrip('=')
self.records.append("{0},sha256={1},{2}\n".format(target_path, digest, len(source_data)))
if GetVerbose():
print("Adding %s from data" % target_path)
self.zip_file.writestr(target_path, source_data)
def write_directory(self, target_dir, source_dir):
for root, dirs, files in os.walk(source_dir):
for file in files:
if os.path.splitext(file)[1] in EXCLUDE_EXT:
continue
source_path = os.path.join(root, file)
target_path = os.path.join(target_dir, os.path.relpath(source_path, source_dir))
target_path = target_path.replace('\\', '/')
self.write_file(target_path, source_path)
def close(self):
# Write the RECORD file.
record_file = "{0}-{1}.dist-info/RECORD".format(self.name, self.version)
self.records.append(record_file + ",,\n")
self.zip_file.writestr(record_file, "".join(self.records))
self.zip_file.close()
def makewheel(version, output_dir, platform=default_platform):
if sys.platform not in ("win32", "darwin") and not sys.platform.startswith("cygwin"):
if not LocateBinary("patchelf"):
raise Exception("patchelf is required when building a Linux wheel.")
platform = platform.replace('-', '_').replace('.', '_')
# Global filepaths
panda3d_dir = join(output_dir, "panda3d")
pandac_dir = join(output_dir, "pandac")
direct_dir = join(output_dir, "direct")
models_dir = join(output_dir, "models")
etc_dir = join(output_dir, "etc")
bin_dir = join(output_dir, "bin")
if sys.platform == "win32":
libs_dir = join(output_dir, "bin")
else:
libs_dir = join(output_dir, "lib")
license_src = "LICENSE"
readme_src = "README.md"
# Update relevant METADATA entries
METADATA['version'] = version
version_classifiers = [
"Programming Language :: Python :: {0}".format(*sys.version_info),
"Programming Language :: Python :: {0}.{1}".format(*sys.version_info),
]
METADATA['classifiers'].extend(version_classifiers)
# Build out the metadata
details = METADATA["extensions"]["python.details"]
homepage = details["project_urls"]["Home"]
author = details["contacts"][0]["name"]
email = details["contacts"][0]["email"]
metadata = ''.join([
"Metadata-Version: {metadata_version}\n" \
"Name: {name}\n" \
"Version: {version}\n" \
"Summary: {summary}\n" \
"License: {license}\n".format(**METADATA),
"Home-page: {0}\n".format(homepage),
"Author: {0}\n".format(author),
"Author-email: {0}\n".format(email),
"Platform: {0}\n".format(platform),
] + ["Classifier: {0}\n".format(c) for c in METADATA['classifiers']])
# Zip it up and name it the right thing
whl = WheelFile('panda3d', version, platform)
whl.lib_path = [libs_dir]
# Add the trees with Python modules.
whl.write_directory('direct', direct_dir)
# Write the panda3d tree. We use a custom empty __init__ since the
# default one adds the bin directory to the PATH, which we don't have.
whl.write_file_data('panda3d/__init__.py', '')
ext_suffix = GetExtensionSuffix()
for file in os.listdir(panda3d_dir):
if file == '__init__.py':
pass
elif file.endswith(ext_suffix) or file.endswith('.py'):
source_path = os.path.join(panda3d_dir, file)
if file.endswith('.pyd') and platform.startswith('cygwin'):
target_path = 'panda3d/' + os.path.splitext(file)[0] + '.dll'
else:
target_path = 'panda3d/' + file
whl.write_file(target_path, source_path)
for lib in PLUGIN_LIBS:
plugin_name = 'lib' + lib
if sys.platform in ('win32', 'cygwin'):
plugin_name += '.dll'
elif sys.platform == 'darwin':
plugin_name += '.dylib'
else:
plugin_name += '.so'
plugin_path = os.path.join(libs_dir, plugin_name)
if os.path.isfile(plugin_path):
whl.write_file('panda3d/' + plugin_name, plugin_path)
data_dir = 'panda3d-{0}.data'.format(version)
# places in the user's filesystem. Let's instead put them in panda3d.
whl.write_directory('panda3d/etc', etc_dir)
whl.write_directory('panda3d/models', models_dir)
# Add the pandac tree for backward compatibility.
for file in os.listdir(pandac_dir):
if file.endswith('.py'):
whl.write_file('pandac/' + file, os.path.join(pandac_dir, file))
# Add a panda3d-tools directory containing the executables.
entry_points = '[console_scripts]\n'
entry_points += 'eggcacher = direct.directscripts.eggcacher:main\n'
entry_points += 'pfreeze = direct.showutil.pfreeze:main\n'
tools_init = ''
for file in os.listdir(bin_dir):
basename = os.path.splitext(file)[0]
if basename in ('eggcacher', 'packpanda'):
continue
source_path = os.path.join(bin_dir, file)
if is_executable(source_path):
# Put the .exe files inside the panda3d-tools directory.
whl.write_file('panda3d_tools/' + file, source_path)
# Tell pip to create a wrapper script.
funcname = basename.replace('-', '_')
entry_points += '{0} = panda3d_tools:{1}\n'.format(basename, funcname)
tools_init += '{0} = lambda: _exec_tool({1!r})\n'.format(funcname, file)
whl.write_file_data('panda3d_tools/__init__.py', PANDA3D_TOOLS_INIT.format(tools_init))
# Add the dist-info directory last.
info_dir = 'panda3d-{0}.dist-info'.format(version)
whl.write_file_data(info_dir + '/entry_points.txt', entry_points)
whl.write_file_data(info_dir + '/metadata.json', json.dumps(METADATA, indent=4, separators=(',', ': ')))
whl.write_file_data(info_dir + '/METADATA', metadata)
whl.write_file_data(info_dir + '/WHEEL', WHEEL_DATA.format(PY_VERSION, ABI_TAG, platform))
whl.write_file(info_dir + '/LICENSE.txt', license_src)
whl.write_file(info_dir + '/README.md', readme_src)
whl.write_file_data(info_dir + '/top_level.txt', 'direct\npanda3d\npandac\npanda3d_tools\n')
whl.close()
if __name__ == "__main__":
version = ParsePandaVersion("dtool/PandaVersion.pp")
parser = OptionParser()
parser.add_option('', '--version', dest = 'version', help = 'Panda3D version number (default: %s)' % (version), default = version)
parser.add_option('', '--outputdir', dest = 'outputdir', help = 'Makepanda\'s output directory (default: built)', default = 'built')
parser.add_option('', '--verbose', dest = 'verbose', help = 'Enable verbose output', action = 'store_true', default = False)
parser.add_option('', '--platform', dest = 'platform', help = 'Override platform tag (default: %s)' % (default_platform), default = get_platform())
(options, args) = parser.parse_args()
SetVerbose(options.verbose)
makewheel(options.version, options.outputdir, options.platform)
| true | true |
f727f2af04e77662cb1b105b6ea82138d5057c4d | 435 | py | Python | sigmoid.py | tyburam/python-machine-learning | 7cb346c99d24e959c1af63532603dd118558b16f | [
"MIT"
] | 1 | 2021-04-28T05:40:59.000Z | 2021-04-28T05:40:59.000Z | sigmoid.py | tyburam/python-machine-learning | 7cb346c99d24e959c1af63532603dd118558b16f | [
"MIT"
] | null | null | null | sigmoid.py | tyburam/python-machine-learning | 7cb346c99d24e959c1af63532603dd118558b16f | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import matplotlib.pyplot as plt
import numpy as np
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
z = np.arange(-7, 7, 0.01)
phi_z = sigmoid(z)
plt.plot(z, phi_z)
plt.axvline(0.0, color = 'k')
plt.axhspan(0.0, 1.0, facecolor = '1.0', alpha = 1.0, ls = 'dotted')
plt.axhline(0.5, ls = 'dotted', color = 'k')
plt.yticks([0.0, 0.5, 1.0])
plt.ylim(-0.1, 1.1)
plt.xlabel('z')
plt.ylabel('$\phi (z)$')
plt.show() | 20.714286 | 68 | 0.602299 |
import matplotlib.pyplot as plt
import numpy as np
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
z = np.arange(-7, 7, 0.01)
phi_z = sigmoid(z)
plt.plot(z, phi_z)
plt.axvline(0.0, color = 'k')
plt.axhspan(0.0, 1.0, facecolor = '1.0', alpha = 1.0, ls = 'dotted')
plt.axhline(0.5, ls = 'dotted', color = 'k')
plt.yticks([0.0, 0.5, 1.0])
plt.ylim(-0.1, 1.1)
plt.xlabel('z')
plt.ylabel('$\phi (z)$')
plt.show() | true | true |
f727f32f70eef26c04e670bca0235c128dc1f09b | 11,477 | py | Python | simplejson/simplejson/tests/test_decoder.py | gollum18/simplejson-test-suite | 3fea15709adb79ef33d7e020e38ec29bf82f2269 | [
"MIT"
] | null | null | null | simplejson/simplejson/tests/test_decoder.py | gollum18/simplejson-test-suite | 3fea15709adb79ef33d7e020e38ec29bf82f2269 | [
"MIT"
] | null | null | null | simplejson/simplejson/tests/test_decoder.py | gollum18/simplejson-test-suite | 3fea15709adb79ef33d7e020e38ec29bf82f2269 | [
"MIT"
] | null | null | null | # Name: test_decoder.py
# Since: April 13th, 2020
# Author: Christen Ford
# Purpose: Implementes unit tests for simplejson.decoder.
from unittest import TestCase
import simplejson.decoder as decoder
import simplejson.errors as errors
class TestDecoder(TestCase):
"""Implements a set of unit tests for the simplejson.decoder
module. These test cases make sane attempts at testing each
class and method found in the decoder module but they
are not exhaustively extensive.
"""
def test_scanstring_correct(self):
"""
Description: Tests that the py_scanstring() function
is able to parse valid JSON. Assumes optional
functional parameters are left at their defaults.
Input: '{'abc': 0, 'def': 1, 'ghi': 2'}'
Output: A tuple of the decoded JSON string and
the index in the string after the ending quote.
Test Case: Corresponds to test TEST-0000.
"""
test_input = '"{"abc": 0, "def": 1, "ghi": 2}"'
decoded_str, last_char_index = decoder.py_scanstring(
s=test_input,
end=1
)
self.assertEqual(decoded_str, "{")
self.assertEqual(last_char_index, 3)
def test_scanstring_malformed(self):
"""
Description: Tests that the py_scanstring() function is
able to properly detect malformed JSON. This test case
may include multiple different strings to ensure
well-rounded error detection.
Input:
(tuple): ("{]", "[}")
Output:
(JSONDecodeError)
Test Case: Corresponds to test TEST-0001.
"""
test_inputs = ('{]', '[}')
for test_input in test_inputs:
self.assertRaises(
decoder.JSONDecodeError,
decoder.py_scanstring,
s=test_input,
end=1
)
def test_scanstring_empty(self):
"""
Description: Tests that the py_scanstring() function is
able to properly detect empty strings.
Input:
(str): ""
Output:
(JSONDecodeError)
Test Case: Corresponds to test TEST-0002.
"""
test_input = ''
self.assertRaises(
errors.JSONDecodeError,
decoder.py_scanstring,
s=test_input,
end=1
)
def test_json_object_correct(self):
"""
Description: Test that the JSONObject() method can properly
decode JSON objects to Python dictionaries.
Input:
(tuple): ("{"abc": 0, "def": 1, "ghi": 2}", 0)
Output:
(dict): ({"abc": 0, "def": 1, "ghi": 2}, 30)
Test Case: Corresponds to test TEST-0003.
"""
test_input = ('{"abc": 0, "def": 1, "ghi": 2}', 1)
out_dict = dict()
out_dict["abc"] = 0
out_dict["def"] = 1
out_dict["ghi"] = 2
test_output = (out_dict, 30)
dcdr = decoder.JSONDecoder()
self.assertEqual(
decoder.JSONObject(
state=test_input,
encoding=dcdr.encoding,
strict=dcdr.strict,
scan_once=dcdr.scan_once,
object_hook=dcdr.object_hook,
object_pairs_hook=dcdr.object_pairs_hook
),
test_output
)
def test_json_object_malformed(self):
"""
Description: Tests that the JSONObject() method can detect
improperly formed JSON object.
Input:
(tuple): ("{"abc": 0, "def": 1, "ghi" :2]", 1)
Output:
(JSONDecodeError)
Test Case: Corresponds to test TEST-0004.
"""
test_input = ('\"{"abc": 0, "def": 1, "ghi": 2]\"', 1)
dcdr = decoder.JSONDecoder()
self.assertRaises(
errors.JSONDecodeError,
decoder.JSONObject,
state=test_input,
encoding=dcdr.encoding,
strict = dcdr.strict,
scan_once = dcdr.scan_once,
object_hook = dcdr.object_hook,
object_pairs_hook = dcdr.object_pairs_hook
)
def test_json_object_empty(self):
"""
Description: Tests that the JSONObject() method can detect
empty strings.
Input:
(tuple): ('', 0)
Output:
(JSONDecodeError)
Test Case: Corresponds to test TEST-0005.
"""
test_input = ("", 0)
dcdr = decoder.JSONDecoder()
self.assertRaises(
errors.JSONDecodeError,
decoder.JSONObject,
state=test_input,
encoding=dcdr.encoding,
strict = dcdr.strict,
scan_once = dcdr.scan_once,
object_hook = dcdr.object_hook,
object_pairs_hook = dcdr.object_pairs_hook
)
def test_json_array_correct(self):
"""
Description: Tests that the JSONArray method can decode
a properly formed JSONArray.
Input:
(tuple): ("["abc", "def", "ghi"]", 1)
Output:
Test Case: Corresponds to test TEST-0006.
"""
test_input = ('["abc", "def", "ghi"]', 1)
test_output = (['abc', 'def', 'ghi'], 21)
dcdr = decoder.JSONDecoder()
self.assertEqual(
decoder.JSONArray(
test_input,
dcdr.scan_once
),
test_output
)
def test_json_array_malformed(self):
"""
Description: Tests that the JSONArray method can properly
detect a malformed JSON array.
Input:
(str): ("["abc", "def", "ghi"}", 1)
Output:
(JSONDecodeError)
Test Case: Corresponds to test TEST-0007.
"""
test_input = ('["abc", "def", "ghi"}', 1)
dcdr = decoder.JSONDecoder()
self.assertRaises(
errors.JSONDecodeError,
decoder.JSONArray,
test_input,
dcdr.scan_once
)
def test_json_array_empty(self):
"""
Description: Tests that the JSONArray() method can
properly detect an empty string.
Input:
(tuple): ("", 0)
Output:
(JSONDecodeError)
Test Case: Corresponds to test TEST-0008.
"""
test_input = ('', 0)
dcdr = decoder.JSONDecoder()
self.assertRaises(
errors.JSONDecodeError,
decoder.JSONArray,
test_input,
dcdr.scan_once
)
def test_json_decoder_create_utf(self):
"""
Description: Tests that a JSONDecoder object can be created
to decode JSON strings with the 'utf-8' character encoding.
Input:
(str): "utf-8"
Output:
(JSONDecoder)
Test Case: Corresponds to test TEST-0009.
"""
dcdr = decoder.JSONDecoder(encoding="utf-8")
self.assertEqual(dcdr.encoding, "utf-8")
def test_json_decoder_create_unicode(self):
"""
Description: Tests that a JSONDecoder object can be created
with the unicode character encoding.
Input:
(str): "unicode"
Output:
(JSONDecoder)
TestCase: Corresponds to test TEST-0010.
"""
dcdr = decoder.JSONDecoder(encoding="unicode")
self.assertEqual(dcdr.encoding, "unicode")
def test_json_decoder_create_invalid(self):
"""
Description: Tests that a JSONDecoder object cannot be
created when given an invalid encoding.
Input:
(str): "ISO-8859-1"
Output:
(JSONDecodeError)
Test Case: Corresponds to test TEST-0011.
"""
self.assertRaises(
errors.JSONDecodeError,
decoder.JSONDecoder,
encoding="ISO-8859-1"
)
def test_json_decoder_decode_correct(self):
"""
Description: Tests that the decode() method of the
JSONDecoder class can decode a properly formed JSON
document.
Input:
(str): {"id": "001", "name": "test-012", "items": ["a", "b", "c"]}
Output:
Test Case: Corresponds to test TEST-0012.
"""
test_input = '{"id": "001", "name": "test-012", "items": ["a", "b", "c"]}'
test_output = dict()
test_output["id"] = "001"
test_output["name"] = "test-012"
test_output["items"] = ["a", "b", "c"]
dcdr = decoder.JSONDecoder()
self.assertEqual(dcdr.decode(test_input), test_output)
def test_json_decoder_decode_malformed(self):
"""
Description: Tests that the decode() method of the
JSONDecoder class can properly recognize a malformed JSON
document.
Input:
(str): {"id": "001", "name": "test-012", "items": ["a", "b", "c"]]
Output:
(JSONDecodeError)
Test Case: Corresponds to test TEST-0013.
"""
test_input = '{"id": "001", "name": "test-012", "items": ["a", "b", "c"]]]'
dcdr = decoder.JSONDecoder()
self.assertRaises(
errors.JSONDecodeError,
dcdr.decode,
test_input
)
def test_json_decoder_decoder_empty(self):
"""
Decsription: Tests that the decode() method of the
JSONDecode class can recognize an empty string.
Input:
(str): ""
Output:
(JSONDecodeError)
Test Case: Corresponds to test TEST-0014.
"""
test_input = ''
dcdr = decoder.JSONDecoder()
self.assertRaises(
errors.JSONDecodeError,
dcdr.decode,
test_input
)
def test_raw_decoder_decode_correct(self):
"""
Description: Tests that the raw_decode() method of the
JSONDecoder class can properly decode an embedded JSON
document.
Input:
(str): "["abc", "def", "ghi"] This is a test!"
Output:
Test Case: Corresponds to test TEST-0015.
"""
dcdr = decoder.JSONDecoder()
test_input = '["abc", "def", "ghi"] This is a test!'
test_output = (['abc', 'def', 'ghi'], 21)
self.assertEqual(dcdr.raw_decode(test_input), test_output)
def test_raw_decoder_decode_malformed(self):
"""
Description: Tests that the raw_decode() method of the
JSONDecoder class can recognize a malformed JSON document.
Input:
(str): "["abc", "def", "ghi"} This is a test!"
Output:
(JSONDecodeError)
Test Case: Corresponds to test TEST-0016.
"""
dcdr = decoder.JSONDecoder()
test_input = '["abc", "def", "ghi"} This is a test!'
self.assertRaises(
errors.JSONDecodeError,
dcdr.raw_decode,
test_input
)
def test_raw_decoder_decoder_empty(self):
"""
Description: Tests that the raw_decode() method of the
JSONDecoder class can recognize an empty string.
Input:
(str): ""
Output:
(JSONDecodeError)
Test Case: Corresponds to test TEST-0017.
"""
test_input = ''
dcdr = decoder.JSONDecoder()
self.assertRaises(
errors.JSONDecodeError,
dcdr.raw_decode,
test_input
)
| 26.87822 | 83 | 0.544393 |
from unittest import TestCase
import simplejson.decoder as decoder
import simplejson.errors as errors
class TestDecoder(TestCase):
def test_scanstring_correct(self):
test_input = '"{"abc": 0, "def": 1, "ghi": 2}"'
decoded_str, last_char_index = decoder.py_scanstring(
s=test_input,
end=1
)
self.assertEqual(decoded_str, "{")
self.assertEqual(last_char_index, 3)
def test_scanstring_malformed(self):
test_inputs = ('{]', '[}')
for test_input in test_inputs:
self.assertRaises(
decoder.JSONDecodeError,
decoder.py_scanstring,
s=test_input,
end=1
)
def test_scanstring_empty(self):
test_input = ''
self.assertRaises(
errors.JSONDecodeError,
decoder.py_scanstring,
s=test_input,
end=1
)
def test_json_object_correct(self):
test_input = ('{"abc": 0, "def": 1, "ghi": 2}', 1)
out_dict = dict()
out_dict["abc"] = 0
out_dict["def"] = 1
out_dict["ghi"] = 2
test_output = (out_dict, 30)
dcdr = decoder.JSONDecoder()
self.assertEqual(
decoder.JSONObject(
state=test_input,
encoding=dcdr.encoding,
strict=dcdr.strict,
scan_once=dcdr.scan_once,
object_hook=dcdr.object_hook,
object_pairs_hook=dcdr.object_pairs_hook
),
test_output
)
def test_json_object_malformed(self):
test_input = ('\"{"abc": 0, "def": 1, "ghi": 2]\"', 1)
dcdr = decoder.JSONDecoder()
self.assertRaises(
errors.JSONDecodeError,
decoder.JSONObject,
state=test_input,
encoding=dcdr.encoding,
strict = dcdr.strict,
scan_once = dcdr.scan_once,
object_hook = dcdr.object_hook,
object_pairs_hook = dcdr.object_pairs_hook
)
def test_json_object_empty(self):
test_input = ("", 0)
dcdr = decoder.JSONDecoder()
self.assertRaises(
errors.JSONDecodeError,
decoder.JSONObject,
state=test_input,
encoding=dcdr.encoding,
strict = dcdr.strict,
scan_once = dcdr.scan_once,
object_hook = dcdr.object_hook,
object_pairs_hook = dcdr.object_pairs_hook
)
def test_json_array_correct(self):
test_input = ('["abc", "def", "ghi"]', 1)
test_output = (['abc', 'def', 'ghi'], 21)
dcdr = decoder.JSONDecoder()
self.assertEqual(
decoder.JSONArray(
test_input,
dcdr.scan_once
),
test_output
)
def test_json_array_malformed(self):
test_input = ('["abc", "def", "ghi"}', 1)
dcdr = decoder.JSONDecoder()
self.assertRaises(
errors.JSONDecodeError,
decoder.JSONArray,
test_input,
dcdr.scan_once
)
def test_json_array_empty(self):
test_input = ('', 0)
dcdr = decoder.JSONDecoder()
self.assertRaises(
errors.JSONDecodeError,
decoder.JSONArray,
test_input,
dcdr.scan_once
)
def test_json_decoder_create_utf(self):
dcdr = decoder.JSONDecoder(encoding="utf-8")
self.assertEqual(dcdr.encoding, "utf-8")
def test_json_decoder_create_unicode(self):
dcdr = decoder.JSONDecoder(encoding="unicode")
self.assertEqual(dcdr.encoding, "unicode")
def test_json_decoder_create_invalid(self):
self.assertRaises(
errors.JSONDecodeError,
decoder.JSONDecoder,
encoding="ISO-8859-1"
)
def test_json_decoder_decode_correct(self):
test_input = '{"id": "001", "name": "test-012", "items": ["a", "b", "c"]}'
test_output = dict()
test_output["id"] = "001"
test_output["name"] = "test-012"
test_output["items"] = ["a", "b", "c"]
dcdr = decoder.JSONDecoder()
self.assertEqual(dcdr.decode(test_input), test_output)
def test_json_decoder_decode_malformed(self):
test_input = '{"id": "001", "name": "test-012", "items": ["a", "b", "c"]]]'
dcdr = decoder.JSONDecoder()
self.assertRaises(
errors.JSONDecodeError,
dcdr.decode,
test_input
)
def test_json_decoder_decoder_empty(self):
test_input = ''
dcdr = decoder.JSONDecoder()
self.assertRaises(
errors.JSONDecodeError,
dcdr.decode,
test_input
)
def test_raw_decoder_decode_correct(self):
dcdr = decoder.JSONDecoder()
test_input = '["abc", "def", "ghi"] This is a test!'
test_output = (['abc', 'def', 'ghi'], 21)
self.assertEqual(dcdr.raw_decode(test_input), test_output)
def test_raw_decoder_decode_malformed(self):
dcdr = decoder.JSONDecoder()
test_input = '["abc", "def", "ghi"} This is a test!'
self.assertRaises(
errors.JSONDecodeError,
dcdr.raw_decode,
test_input
)
def test_raw_decoder_decoder_empty(self):
test_input = ''
dcdr = decoder.JSONDecoder()
self.assertRaises(
errors.JSONDecodeError,
dcdr.raw_decode,
test_input
)
| true | true |
f727f373f944a10554dfcea5975a2aeb12ec2e24 | 21,937 | py | Python | src/pyrobot/locobot/camera.py | kalyanvasudev/pyrobot | 839ab89a5b3cdd6af9b1e884fa8e8f0007497e32 | [
"MIT"
] | 1 | 2021-12-22T04:14:08.000Z | 2021-12-22T04:14:08.000Z | src/pyrobot/locobot/camera.py | kalyanvasudev/pyrobot | 839ab89a5b3cdd6af9b1e884fa8e8f0007497e32 | [
"MIT"
] | 16 | 2020-01-28T22:49:47.000Z | 2022-03-11T23:51:24.000Z | src/pyrobot/locobot/camera.py | kalyanvasudev/pyrobot | 839ab89a5b3cdd6af9b1e884fa8e8f0007497e32 | [
"MIT"
] | 1 | 2020-09-30T15:14:19.000Z | 2020-09-30T15:14:19.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import rospkg
import threading
import yaml
from copy import deepcopy
import message_filters
import numpy as np
import pyrobot.util as prutil
import rospy
from cv_bridge import CvBridge, CvBridgeError
from pyrobot.core import Camera
from sensor_msgs.msg import CameraInfo
from sensor_msgs.msg import Image
from sensor_msgs.msg import JointState
from std_msgs.msg import Float64
from tf import TransformListener
def constrain_within_range(value, MIN, MAX):
return min(max(value, MIN), MAX)
def is_within_range(value, MIN, MAX):
return (value <= MAX) and (value >= MIN)
class SimpleCamera(Camera):
"""
This is camera class that interfaces with the Realsense
camera on the locobot and locobot-lite.
This class does not have the pan and tilt actuation
capabilities for the camera.
"""
def __init__(self, configs):
"""
Constructor of the SimpleCamera class.
:param configs: Camera specific configuration object
:type configs: YACS CfgNode
"""
super(SimpleCamera, self).__init__(configs=configs)
self.cv_bridge = CvBridge()
self.camera_info_lock = threading.RLock()
self.camera_img_lock = threading.RLock()
self._tf_listener = TransformListener()
self.rgb_img = None
self.depth_img = None
self.camera_info = None
self.camera_P = None
rospy.Subscriber(self.configs.CAMERA.ROSTOPIC_CAMERA_INFO_STREAM,
CameraInfo,
self._camera_info_callback)
rgb_topic = self.configs.CAMERA.ROSTOPIC_CAMERA_RGB_STREAM
self.rgb_sub = message_filters.Subscriber(rgb_topic, Image)
depth_topic = self.configs.CAMERA.ROSTOPIC_CAMERA_DEPTH_STREAM
self.depth_sub = message_filters.Subscriber(depth_topic, Image)
img_subs = [self.rgb_sub, self.depth_sub]
self.sync = message_filters.ApproximateTimeSynchronizer(img_subs,
queue_size=10,
slop=0.2)
self.sync.registerCallback(self._sync_callback)
depth_threshold = (self.configs.BASE.VSLAM.DEPTH_MIN,
self.configs.BASE.VSLAM.DEPTH_MAX)
cfg_filename = self.configs.BASE.VSLAM.CFG_FILENAME
self.depth_cam = DepthImgProcessor(subsample_pixs=1,
depth_threshold=depth_threshold,
cfg_filename=cfg_filename)
self.cam_cf = self.configs.BASE.VSLAM.RGB_CAMERA_CENTER_FRAME
self.base_f = self.configs.BASE.VSLAM.VSLAM_BASE_FRAME
def _sync_callback(self, rgb, depth):
self.camera_img_lock.acquire()
try:
self.rgb_img = self.cv_bridge.imgmsg_to_cv2(rgb, "bgr8")
self.rgb_img = self.rgb_img[:, :, ::-1]
self.depth_img = self.cv_bridge.imgmsg_to_cv2(depth, "passthrough")
except CvBridgeError as e:
rospy.logerr(e)
self.camera_img_lock.release()
def _camera_info_callback(self, msg):
self.camera_info_lock.acquire()
self.camera_info = msg
self.camera_P = np.array(msg.P).reshape((3, 4))
self.camera_info_lock.release()
def get_rgb(self):
'''
This function returns the RGB image perceived by the camera.
:rtype: np.ndarray or None
'''
self.camera_img_lock.acquire()
rgb = deepcopy(self.rgb_img)
self.camera_img_lock.release()
return rgb
def get_depth(self):
'''
This function returns the depth image perceived by the camera.
:rtype: np.ndarray or None
'''
self.camera_img_lock.acquire()
depth = deepcopy(self.depth_img)
self.camera_img_lock.release()
return depth
def get_rgb_depth(self):
'''
This function returns both the RGB and depth
images perceived by the camera.
:rtype: np.ndarray or None
'''
self.camera_img_lock.acquire()
rgb = deepcopy(self.rgb_img)
depth = deepcopy(self.depth_img)
self.camera_img_lock.release()
return rgb, depth
def get_intrinsics(self):
"""
This function returns the camera intrinsics.
:rtype: np.ndarray
"""
if self.camera_P is None:
return self.camera_P
self.camera_info_lock.acquire()
P = deepcopy(self.camera_P)
self.camera_info_lock.release()
return P[:3, :3]
def get_current_pcd(self, in_cam=True):
"""
Return the point cloud at current time step (one frame only)
:param in_cam: return points in camera frame,
otherwise, return points in base frame
:type in_cam: bool
:returns: tuple (pts, colors)
pts: point coordinates in world frame (shape: :math:`[N, 3]`)
colors: rgb values for pts_in_cam (shape: :math:`[N, 3]`)
:rtype: tuple(np.ndarray, np.ndarray)
"""
trans, rot, T = self.get_link_transform(self.cam_cf,
self.base_f)
base2cam_trans = np.array(trans).reshape(-1, 1)
base2cam_rot = np.array(rot)
rgb_im, depth_im = self.get_rgb_depth()
pcd_in_cam, colors = self.depth_cam.get_pcd_ic(depth_im=depth_im,
rgb_im=rgb_im)
pts = pcd_in_cam[:3, :].T
if in_cam:
return pts, colors
pts = np.dot(pts, base2cam_rot.T)
pts = pts + base2cam_trans.T
return pts, colors
def pix_to_3dpt(self, rs, cs, in_cam=False):
"""
Get the 3D points of the pixels in RGB images.
:param rs: rows of interest in the RGB image.
It can be a list or 1D numpy array
which contains the row indices.
The default value is None,
which means all rows.
:param cs: columns of interest in the RGB image.
It can be a list or 1D numpy array
which contains the column indices.
The default value is None,
which means all columns.
:param in_cam: return points in camera frame,
otherwise, return points in base frame
:type rs: list or np.ndarray
:type cs: list or np.ndarray
:type in_cam: bool
:returns: tuple (pts, colors)
pts: point coordinates in world frame
(shape: :math:`[N, 3]`)
colors: rgb values for pts_in_cam
(shape: :math:`[N, 3]`)
:rtype: tuple(np.ndarray, np.ndarray)
"""
trans, rot, T = self.get_link_transform(self.cam_cf,
self.base_f)
base2cam_trans = np.array(trans).reshape(-1, 1)
base2cam_rot = np.array(rot)
rgb_im, depth_im = self.get_rgb_depth()
pcd_in_cam = self.depth_cam.get_pix_3dpt(depth_im=depth_im,
rs=rs,
cs=cs)
pts = pcd_in_cam[:3, :].T
colors = rgb_im[rs, cs].reshape(-1, 3)
if in_cam:
return pts, colors
pts = np.dot(pts, base2cam_rot.T)
pts = pts + base2cam_trans.T
return pts, colors
def get_link_transform(self, src, tgt):
"""
Returns the latest transformation from the
target_frame to the source frame,
i.e., the transform of source frame w.r.t
target frame. If the returned
transform is applied to data, it will transform
data in the source_frame into
the target_frame
For more information, please refer to
http://wiki.ros.org/tf/Overview/Using%20Published%20Transforms
:param src: source frame
:param tgt: target frame
:type src: string
:type tgt: string
:returns: tuple(trans, rot, T)
trans: translational vector (shape: :math:`[3,]`)
rot: rotation matrix (shape: :math:`[3, 3]`)
T: transofrmation matrix (shape: :math:`[4, 4]`)
:rtype: tuple(np.ndarray, np.ndarray, np.ndarray)
"""
trans, quat = prutil.get_tf_transform(self._tf_listener,
tgt,
src)
rot = prutil.quat_to_rot_mat(quat)
T = np.eye(4)
T[:3, :3] = rot
T[:3, 3] = trans
return trans, rot, T
class LoCoBotCamera(SimpleCamera):
"""
This is camera class that interfaces with the Realsense
camera and the pan and tilt joints on the robot.
"""
def __init__(self, configs):
"""
Constructor of the LoCoBotCamera class.
:param configs: Object containing configurations for camera,
pan joint and tilt joint.
:type configs: YACS CfgNode
"""
use_camera = rospy.get_param('use_camera', False)
use_sim = rospy.get_param('use_sim', False)
use_camera = use_camera or use_sim
if not use_camera:
rospy.logwarn('Neither use_camera, nor use_sim, is not set'
' to True when the LoCoBot driver is launched.'
'You may not be able to command the camera'
' correctly using PyRobot!!!')
return
super(LoCoBotCamera, self).__init__(configs=configs)
rospy.Subscriber(self.configs.ARM.ROSTOPIC_JOINT_STATES,
JointState,
self._camera_pose_callback)
self.set_pan_pub = rospy.Publisher(
self.configs.CAMERA.ROSTOPIC_SET_PAN, Float64, queue_size=1)
self.set_tilt_pub = rospy.Publisher(
self.configs.CAMERA.ROSTOPIC_SET_TILT, Float64, queue_size=1)
self.pan = None
self.tilt = None
self.tol = 0.01
def _camera_pose_callback(self, msg):
if 'head_pan_joint' in msg.name:
pan_id = msg.name.index('head_pan_joint')
self.pan = msg.position[pan_id]
if 'head_tilt_joint' in msg.name:
tilt_id = msg.name.index('head_tilt_joint')
self.tilt = msg.position[tilt_id]
@property
def state(self):
"""
Return the current pan and tilt joint angles of the robot camera.
:return:
pan_tilt: A list the form [pan angle, tilt angle]
:rtype: list
"""
return self.get_state()
def get_state(self):
"""
Return the current pan and tilt joint angles of the robot camera.
:return:
pan_tilt: A list the form [pan angle, tilt angle]
:rtype: list
"""
return [self.pan, self.tilt]
def get_pan(self):
"""
Return the current pan joint angle of the robot camera.
:return:
pan: Pan joint angle
:rtype: float
"""
return self.pan
def get_tilt(self):
"""
Return the current tilt joint angle of the robot camera.
:return:
tilt: Tilt joint angle
:rtype: float
"""
return self.tilt
def set_pan(self, pan, wait=True):
"""
Sets the pan joint angle to the specified value.
:param pan: value to be set for pan joint
:param wait: wait until the pan angle is set to
the target angle.
:type pan: float
:type wait: bool
"""
pan = constrain_within_range(np.mod(pan + np.pi,
2 * np.pi) - np.pi,
self.configs.CAMERA.MIN_PAN,
self.configs.CAMERA.MAX_PAN)
self.set_pan_pub.publish(pan)
if wait:
for i in range(30):
rospy.sleep(0.1)
if np.fabs(self.get_pan() - pan) < self.tol:
break
def set_tilt(self, tilt, wait=True):
"""
Sets the tilt joint angle to the specified value.
:param tilt: value to be set for the tilt joint
:param wait: wait until the tilt angle is set to
the target angle.
:type tilt: float
:type wait: bool
"""
tilt = constrain_within_range(np.mod(tilt + np.pi,
2 * np.pi) - np.pi,
self.configs.CAMERA.MIN_TILT,
self.configs.CAMERA.MAX_TILT)
self.set_tilt_pub.publish(tilt)
if wait:
for i in range(30):
rospy.sleep(0.1)
if np.fabs(self.get_tilt() - tilt) < self.tol:
break
def set_pan_tilt(self, pan, tilt, wait=True):
"""
Sets both the pan and tilt joint angles to the specified values.
:param pan: value to be set for pan joint
:param tilt: value to be set for the tilt joint
:param wait: wait until the pan and tilt angles are set to
the target angles.
:type pan: float
:type tilt: float
:type wait: bool
"""
pan = constrain_within_range(np.mod(pan + np.pi,
2 * np.pi) - np.pi,
self.configs.CAMERA.MIN_PAN,
self.configs.CAMERA.MAX_PAN)
tilt = constrain_within_range(np.mod(tilt + np.pi,
2 * np.pi) - np.pi,
self.configs.CAMERA.MIN_TILT,
self.configs.CAMERA.MAX_TILT)
self.set_pan_pub.publish(pan)
self.set_tilt_pub.publish(tilt)
if wait:
for i in range(30):
rospy.sleep(0.1)
if np.fabs(self.get_pan() - pan) < self.tol and \
np.fabs(self.get_tilt() - tilt) < self.tol:
break
def reset(self):
"""
This function resets the pan and tilt joints by actuating
them to their home configuration.
"""
self.set_pan_tilt(self.configs.CAMERA.RESET_PAN,
self.configs.CAMERA.RESET_TILT)
class DepthImgProcessor:
"""
This class transforms the depth image and rgb image to point cloud
"""
def __init__(self, subsample_pixs=1, depth_threshold=(0, 1.5),
cfg_filename='realsense_d435.yaml'):
"""
The constructor for :class:`DepthImgProcessor` class.
:param subsample_pixs: sample rows and columns for the images
:param depth_threshold: minimum and maximum of valid depth values
:param cfg_filename: configuration file name for ORB-SLAM2
:type subsample_pixs: int
:type depth_threshold: tuple
:type cfg_filename: string
"""
assert (type(depth_threshold) is tuple and
0 < len(depth_threshold) < 3) or \
(depth_threshold is None)
self.subsample_pixs = subsample_pixs
self.depth_threshold = depth_threshold
self.cfg_data = self.read_cfg(cfg_filename)
self.intrinsic_mat = self.get_intrinsic()
self.intrinsic_mat_inv = np.linalg.inv(self.intrinsic_mat)
img_pixs = np.mgrid[0: self.cfg_data['Camera.height']: subsample_pixs,
0: self.cfg_data['Camera.width']: subsample_pixs]
img_pixs = img_pixs.reshape(2, -1)
img_pixs[[0, 1], :] = img_pixs[[1, 0], :]
self.uv_one = np.concatenate((img_pixs,
np.ones((1, img_pixs.shape[1]))))
self.uv_one_in_cam = np.dot(self.intrinsic_mat_inv, self.uv_one)
def get_pix_3dpt(self, depth_im, rs, cs):
"""
:param depth_im: depth image (shape: :math:`[H, W]`)
:param rs: rows of interest. It can be a list or 1D numpy array
which contains the row indices. The default value is None,
which means all rows.
:param cs: columns of interest. It can be a list or 1D numpy array
which contains the column indices.
The default value is None,
which means all columns.
:type depth_im: np.ndarray
:type rs: list or np.ndarray
:type cs: list or np.ndarray
:return: 3D point coordinates of the pixels in
camera frame (shape: :math:`[4, N]`)
:rtype np.ndarray
"""
assert isinstance(rs,
int) or isinstance(rs,
list) or isinstance(rs,
np.ndarray)
assert isinstance(cs,
int) or isinstance(cs,
list) or isinstance(cs,
np.ndarray)
if isinstance(rs, int):
rs = [rs]
if isinstance(cs, int):
cs = [cs]
if isinstance(rs, np.ndarray):
rs = rs.flatten()
if isinstance(cs, np.ndarray):
cs = cs.flatten()
depth_im = depth_im[rs, cs]
depth = depth_im.reshape(-1) / float(self.cfg_data['DepthMapFactor'])
img_pixs = np.stack((rs, cs)).reshape(2, -1)
img_pixs[[0, 1], :] = img_pixs[[1, 0], :]
uv_one = np.concatenate((img_pixs,
np.ones((1, img_pixs.shape[1]))))
uv_one_in_cam = np.dot(self.intrinsic_mat_inv, uv_one)
pts_in_cam = np.multiply(uv_one_in_cam, depth)
pts_in_cam = np.concatenate((pts_in_cam,
np.ones((1, pts_in_cam.shape[1]))),
axis=0)
return pts_in_cam
def get_pcd_ic(self, depth_im, rgb_im=None):
"""
Returns the point cloud (filtered by minimum
and maximum depth threshold)
in camera's coordinate frame
:param depth_im: depth image (shape: :math:`[H, W]`)
:param rgb_im: rgb image (shape: :math:`[H, W, 3]`)
:type depth_im: np.ndarray
:type rgb_im: np.ndarray
:returns: tuple (pts_in_cam, rgb_im)
pts_in_cam: point coordinates in
camera frame (shape: :math:`[4, N]`)
rgb: rgb values for pts_in_cam (shape: :math:`[N, 3]`)
:rtype tuple(np.ndarray, np.ndarray)
"""
# pcd in camera from depth
depth_im = depth_im[0::self.subsample_pixs, 0::self.subsample_pixs]
rgb_im = rgb_im[0::self.subsample_pixs, 0::self.subsample_pixs]
depth = depth_im.reshape(-1) / float(self.cfg_data['DepthMapFactor'])
rgb = None
if rgb_im is not None:
rgb = rgb_im.reshape(-1, 3)
if self.depth_threshold is not None:
valid = depth > self.depth_threshold[0]
if len(self.depth_threshold) > 1:
valid = np.logical_and(valid,
depth < self.depth_threshold[1])
uv_one_in_cam = self.uv_one_in_cam[:, valid]
depth = depth[valid]
rgb = rgb[valid]
else:
uv_one_in_cam = self.uv_one_in_cam
pts_in_cam = np.multiply(uv_one_in_cam, depth)
pts_in_cam = np.concatenate((pts_in_cam,
np.ones((1, pts_in_cam.shape[1]))),
axis=0)
return pts_in_cam, rgb
def get_pcd_iw(self, pts_in_cam, extrinsic_mat):
"""
Returns the point cloud in the world coordinate frame
:param pts_in_cam: point coordinates in
camera frame (shape: :math:`[4, N]`)
:param extrinsic_mat: extrinsic matrix for
the camera (shape: :math:`[4, 4]`)
:type pts_in_cam: np.ndarray
:type extrinsic_mat: np.ndarray
:return: point coordinates in
ORB-SLAM2's world frame (shape: :math:`[N, 3]`)
:rtype: np.ndarray
"""
# pcd in world
pts_in_world = np.dot(extrinsic_mat, pts_in_cam)
pts_in_world = pts_in_world[:3, :].T
return pts_in_world
def read_cfg(self, cfg_filename):
"""
Reads the configuration file
:param cfg_filename: configuration file name for ORB-SLAM2
:type cfg_filename: string
:return: configurations in the configuration file
:rtype: dict
"""
rospack = rospkg.RosPack()
slam_pkg_path = rospack.get_path('orb_slam2_ros')
cfg_path = os.path.join(slam_pkg_path,
'cfg',
cfg_filename)
with open(cfg_path, 'r') as f:
for i in range(1):
f.readline()
cfg_data = yaml.load(f)
return cfg_data
def get_intrinsic(self):
"""
Returns the instrinsic matrix of the camera
:return: the intrinsic matrix (shape: :math:`[3, 3]`)
:rtype: np.ndarray
"""
fx = self.cfg_data['Camera.fx']
fy = self.cfg_data['Camera.fy']
cx = self.cfg_data['Camera.cx']
cy = self.cfg_data['Camera.cy']
Itc = np.array([[fx, 0, cx],
[0, fy, cy],
[0, 0, 1]])
return Itc
| 35.962295 | 79 | 0.548161 |
import os
import rospkg
import threading
import yaml
from copy import deepcopy
import message_filters
import numpy as np
import pyrobot.util as prutil
import rospy
from cv_bridge import CvBridge, CvBridgeError
from pyrobot.core import Camera
from sensor_msgs.msg import CameraInfo
from sensor_msgs.msg import Image
from sensor_msgs.msg import JointState
from std_msgs.msg import Float64
from tf import TransformListener
def constrain_within_range(value, MIN, MAX):
return min(max(value, MIN), MAX)
def is_within_range(value, MIN, MAX):
return (value <= MAX) and (value >= MIN)
class SimpleCamera(Camera):
def __init__(self, configs):
super(SimpleCamera, self).__init__(configs=configs)
self.cv_bridge = CvBridge()
self.camera_info_lock = threading.RLock()
self.camera_img_lock = threading.RLock()
self._tf_listener = TransformListener()
self.rgb_img = None
self.depth_img = None
self.camera_info = None
self.camera_P = None
rospy.Subscriber(self.configs.CAMERA.ROSTOPIC_CAMERA_INFO_STREAM,
CameraInfo,
self._camera_info_callback)
rgb_topic = self.configs.CAMERA.ROSTOPIC_CAMERA_RGB_STREAM
self.rgb_sub = message_filters.Subscriber(rgb_topic, Image)
depth_topic = self.configs.CAMERA.ROSTOPIC_CAMERA_DEPTH_STREAM
self.depth_sub = message_filters.Subscriber(depth_topic, Image)
img_subs = [self.rgb_sub, self.depth_sub]
self.sync = message_filters.ApproximateTimeSynchronizer(img_subs,
queue_size=10,
slop=0.2)
self.sync.registerCallback(self._sync_callback)
depth_threshold = (self.configs.BASE.VSLAM.DEPTH_MIN,
self.configs.BASE.VSLAM.DEPTH_MAX)
cfg_filename = self.configs.BASE.VSLAM.CFG_FILENAME
self.depth_cam = DepthImgProcessor(subsample_pixs=1,
depth_threshold=depth_threshold,
cfg_filename=cfg_filename)
self.cam_cf = self.configs.BASE.VSLAM.RGB_CAMERA_CENTER_FRAME
self.base_f = self.configs.BASE.VSLAM.VSLAM_BASE_FRAME
def _sync_callback(self, rgb, depth):
self.camera_img_lock.acquire()
try:
self.rgb_img = self.cv_bridge.imgmsg_to_cv2(rgb, "bgr8")
self.rgb_img = self.rgb_img[:, :, ::-1]
self.depth_img = self.cv_bridge.imgmsg_to_cv2(depth, "passthrough")
except CvBridgeError as e:
rospy.logerr(e)
self.camera_img_lock.release()
def _camera_info_callback(self, msg):
self.camera_info_lock.acquire()
self.camera_info = msg
self.camera_P = np.array(msg.P).reshape((3, 4))
self.camera_info_lock.release()
def get_rgb(self):
self.camera_img_lock.acquire()
rgb = deepcopy(self.rgb_img)
self.camera_img_lock.release()
return rgb
def get_depth(self):
self.camera_img_lock.acquire()
depth = deepcopy(self.depth_img)
self.camera_img_lock.release()
return depth
def get_rgb_depth(self):
self.camera_img_lock.acquire()
rgb = deepcopy(self.rgb_img)
depth = deepcopy(self.depth_img)
self.camera_img_lock.release()
return rgb, depth
def get_intrinsics(self):
if self.camera_P is None:
return self.camera_P
self.camera_info_lock.acquire()
P = deepcopy(self.camera_P)
self.camera_info_lock.release()
return P[:3, :3]
def get_current_pcd(self, in_cam=True):
trans, rot, T = self.get_link_transform(self.cam_cf,
self.base_f)
base2cam_trans = np.array(trans).reshape(-1, 1)
base2cam_rot = np.array(rot)
rgb_im, depth_im = self.get_rgb_depth()
pcd_in_cam, colors = self.depth_cam.get_pcd_ic(depth_im=depth_im,
rgb_im=rgb_im)
pts = pcd_in_cam[:3, :].T
if in_cam:
return pts, colors
pts = np.dot(pts, base2cam_rot.T)
pts = pts + base2cam_trans.T
return pts, colors
def pix_to_3dpt(self, rs, cs, in_cam=False):
trans, rot, T = self.get_link_transform(self.cam_cf,
self.base_f)
base2cam_trans = np.array(trans).reshape(-1, 1)
base2cam_rot = np.array(rot)
rgb_im, depth_im = self.get_rgb_depth()
pcd_in_cam = self.depth_cam.get_pix_3dpt(depth_im=depth_im,
rs=rs,
cs=cs)
pts = pcd_in_cam[:3, :].T
colors = rgb_im[rs, cs].reshape(-1, 3)
if in_cam:
return pts, colors
pts = np.dot(pts, base2cam_rot.T)
pts = pts + base2cam_trans.T
return pts, colors
def get_link_transform(self, src, tgt):
trans, quat = prutil.get_tf_transform(self._tf_listener,
tgt,
src)
rot = prutil.quat_to_rot_mat(quat)
T = np.eye(4)
T[:3, :3] = rot
T[:3, 3] = trans
return trans, rot, T
class LoCoBotCamera(SimpleCamera):
def __init__(self, configs):
use_camera = rospy.get_param('use_camera', False)
use_sim = rospy.get_param('use_sim', False)
use_camera = use_camera or use_sim
if not use_camera:
rospy.logwarn('Neither use_camera, nor use_sim, is not set'
' to True when the LoCoBot driver is launched.'
'You may not be able to command the camera'
' correctly using PyRobot!!!')
return
super(LoCoBotCamera, self).__init__(configs=configs)
rospy.Subscriber(self.configs.ARM.ROSTOPIC_JOINT_STATES,
JointState,
self._camera_pose_callback)
self.set_pan_pub = rospy.Publisher(
self.configs.CAMERA.ROSTOPIC_SET_PAN, Float64, queue_size=1)
self.set_tilt_pub = rospy.Publisher(
self.configs.CAMERA.ROSTOPIC_SET_TILT, Float64, queue_size=1)
self.pan = None
self.tilt = None
self.tol = 0.01
def _camera_pose_callback(self, msg):
if 'head_pan_joint' in msg.name:
pan_id = msg.name.index('head_pan_joint')
self.pan = msg.position[pan_id]
if 'head_tilt_joint' in msg.name:
tilt_id = msg.name.index('head_tilt_joint')
self.tilt = msg.position[tilt_id]
@property
def state(self):
return self.get_state()
def get_state(self):
return [self.pan, self.tilt]
def get_pan(self):
return self.pan
def get_tilt(self):
return self.tilt
def set_pan(self, pan, wait=True):
pan = constrain_within_range(np.mod(pan + np.pi,
2 * np.pi) - np.pi,
self.configs.CAMERA.MIN_PAN,
self.configs.CAMERA.MAX_PAN)
self.set_pan_pub.publish(pan)
if wait:
for i in range(30):
rospy.sleep(0.1)
if np.fabs(self.get_pan() - pan) < self.tol:
break
def set_tilt(self, tilt, wait=True):
tilt = constrain_within_range(np.mod(tilt + np.pi,
2 * np.pi) - np.pi,
self.configs.CAMERA.MIN_TILT,
self.configs.CAMERA.MAX_TILT)
self.set_tilt_pub.publish(tilt)
if wait:
for i in range(30):
rospy.sleep(0.1)
if np.fabs(self.get_tilt() - tilt) < self.tol:
break
def set_pan_tilt(self, pan, tilt, wait=True):
pan = constrain_within_range(np.mod(pan + np.pi,
2 * np.pi) - np.pi,
self.configs.CAMERA.MIN_PAN,
self.configs.CAMERA.MAX_PAN)
tilt = constrain_within_range(np.mod(tilt + np.pi,
2 * np.pi) - np.pi,
self.configs.CAMERA.MIN_TILT,
self.configs.CAMERA.MAX_TILT)
self.set_pan_pub.publish(pan)
self.set_tilt_pub.publish(tilt)
if wait:
for i in range(30):
rospy.sleep(0.1)
if np.fabs(self.get_pan() - pan) < self.tol and \
np.fabs(self.get_tilt() - tilt) < self.tol:
break
def reset(self):
self.set_pan_tilt(self.configs.CAMERA.RESET_PAN,
self.configs.CAMERA.RESET_TILT)
class DepthImgProcessor:
def __init__(self, subsample_pixs=1, depth_threshold=(0, 1.5),
cfg_filename='realsense_d435.yaml'):
assert (type(depth_threshold) is tuple and
0 < len(depth_threshold) < 3) or \
(depth_threshold is None)
self.subsample_pixs = subsample_pixs
self.depth_threshold = depth_threshold
self.cfg_data = self.read_cfg(cfg_filename)
self.intrinsic_mat = self.get_intrinsic()
self.intrinsic_mat_inv = np.linalg.inv(self.intrinsic_mat)
img_pixs = np.mgrid[0: self.cfg_data['Camera.height']: subsample_pixs,
0: self.cfg_data['Camera.width']: subsample_pixs]
img_pixs = img_pixs.reshape(2, -1)
img_pixs[[0, 1], :] = img_pixs[[1, 0], :]
self.uv_one = np.concatenate((img_pixs,
np.ones((1, img_pixs.shape[1]))))
self.uv_one_in_cam = np.dot(self.intrinsic_mat_inv, self.uv_one)
def get_pix_3dpt(self, depth_im, rs, cs):
assert isinstance(rs,
int) or isinstance(rs,
list) or isinstance(rs,
np.ndarray)
assert isinstance(cs,
int) or isinstance(cs,
list) or isinstance(cs,
np.ndarray)
if isinstance(rs, int):
rs = [rs]
if isinstance(cs, int):
cs = [cs]
if isinstance(rs, np.ndarray):
rs = rs.flatten()
if isinstance(cs, np.ndarray):
cs = cs.flatten()
depth_im = depth_im[rs, cs]
depth = depth_im.reshape(-1) / float(self.cfg_data['DepthMapFactor'])
img_pixs = np.stack((rs, cs)).reshape(2, -1)
img_pixs[[0, 1], :] = img_pixs[[1, 0], :]
uv_one = np.concatenate((img_pixs,
np.ones((1, img_pixs.shape[1]))))
uv_one_in_cam = np.dot(self.intrinsic_mat_inv, uv_one)
pts_in_cam = np.multiply(uv_one_in_cam, depth)
pts_in_cam = np.concatenate((pts_in_cam,
np.ones((1, pts_in_cam.shape[1]))),
axis=0)
return pts_in_cam
def get_pcd_ic(self, depth_im, rgb_im=None):
depth_im = depth_im[0::self.subsample_pixs, 0::self.subsample_pixs]
rgb_im = rgb_im[0::self.subsample_pixs, 0::self.subsample_pixs]
depth = depth_im.reshape(-1) / float(self.cfg_data['DepthMapFactor'])
rgb = None
if rgb_im is not None:
rgb = rgb_im.reshape(-1, 3)
if self.depth_threshold is not None:
valid = depth > self.depth_threshold[0]
if len(self.depth_threshold) > 1:
valid = np.logical_and(valid,
depth < self.depth_threshold[1])
uv_one_in_cam = self.uv_one_in_cam[:, valid]
depth = depth[valid]
rgb = rgb[valid]
else:
uv_one_in_cam = self.uv_one_in_cam
pts_in_cam = np.multiply(uv_one_in_cam, depth)
pts_in_cam = np.concatenate((pts_in_cam,
np.ones((1, pts_in_cam.shape[1]))),
axis=0)
return pts_in_cam, rgb
def get_pcd_iw(self, pts_in_cam, extrinsic_mat):
pts_in_world = np.dot(extrinsic_mat, pts_in_cam)
pts_in_world = pts_in_world[:3, :].T
return pts_in_world
def read_cfg(self, cfg_filename):
rospack = rospkg.RosPack()
slam_pkg_path = rospack.get_path('orb_slam2_ros')
cfg_path = os.path.join(slam_pkg_path,
'cfg',
cfg_filename)
with open(cfg_path, 'r') as f:
for i in range(1):
f.readline()
cfg_data = yaml.load(f)
return cfg_data
def get_intrinsic(self):
fx = self.cfg_data['Camera.fx']
fy = self.cfg_data['Camera.fy']
cx = self.cfg_data['Camera.cx']
cy = self.cfg_data['Camera.cy']
Itc = np.array([[fx, 0, cx],
[0, fy, cy],
[0, 0, 1]])
return Itc
| true | true |
f727f3abb30dc023a71e5a4bb0a7f4af4c4e0c66 | 8,529 | py | Python | pyglet/input/evdev_constants.py | qbektrix/pyglet | ab3f73dfd37abf75041e86310416b18138c34c33 | [
"BSD-3-Clause"
] | 2 | 2015-06-02T19:14:37.000Z | 2017-09-17T03:49:19.000Z | pyglet/input/evdev_constants.py | qbektrix/pyglet | ab3f73dfd37abf75041e86310416b18138c34c33 | [
"BSD-3-Clause"
] | 1 | 2018-08-27T22:31:16.000Z | 2018-08-27T22:31:16.000Z | pyglet/input/evdev_constants.py | qbektrix/pyglet | ab3f73dfd37abf75041e86310416b18138c34c33 | [
"BSD-3-Clause"
] | 2 | 2016-07-28T18:45:57.000Z | 2020-12-05T06:13:00.000Z | #!/usr/bin/env python
'''Event constants from /usr/include/linux/input.h
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
EV_SYN = 0x00
EV_KEY = 0x01
EV_REL = 0x02
EV_ABS = 0x03
EV_MSC = 0x04
EV_LED = 0x11
EV_SND = 0x12
EV_REP = 0x14
EV_FF = 0x15
EV_PWR = 0x16
EV_FF_STATUS = 0x17
EV_MAX = 0x1f
# Synchronization events.
SYN_REPORT = 0
SYN_CONFIG = 1
# Keys and buttons
KEY_RESERVED = 0
KEY_ESC = 1
KEY_1 = 2
KEY_2 = 3
KEY_3 = 4
KEY_4 = 5
KEY_5 = 6
KEY_6 = 7
KEY_7 = 8
KEY_8 = 9
KEY_9 = 10
KEY_0 = 11
KEY_MINUS = 12
KEY_EQUAL = 13
KEY_BACKSPACE = 14
KEY_TAB = 15
KEY_Q = 16
KEY_W = 17
KEY_E = 18
KEY_R = 19
KEY_T = 20
KEY_Y = 21
KEY_U = 22
KEY_I = 23
KEY_O = 24
KEY_P = 25
KEY_LEFTBRACE = 26
KEY_RIGHTBRACE = 27
KEY_ENTER = 28
KEY_LEFTCTRL = 29
KEY_A = 30
KEY_S = 31
KEY_D = 32
KEY_F = 33
KEY_G = 34
KEY_H = 35
KEY_J = 36
KEY_K = 37
KEY_L = 38
KEY_SEMICOLON = 39
KEY_APOSTROPHE = 40
KEY_GRAVE = 41
KEY_LEFTSHIFT = 42
KEY_BACKSLASH = 43
KEY_Z = 44
KEY_X = 45
KEY_C = 46
KEY_V = 47
KEY_B = 48
KEY_N = 49
KEY_M = 50
KEY_COMMA = 51
KEY_DOT = 52
KEY_SLASH = 53
KEY_RIGHTSHIFT = 54
KEY_KPASTERISK = 55
KEY_LEFTALT = 56
KEY_SPACE = 57
KEY_CAPSLOCK = 58
KEY_F1 = 59
KEY_F2 = 60
KEY_F3 = 61
KEY_F4 = 62
KEY_F5 = 63
KEY_F6 = 64
KEY_F7 = 65
KEY_F8 = 66
KEY_F9 = 67
KEY_F10 = 68
KEY_NUMLOCK = 69
KEY_SCROLLLOCK = 70
KEY_KP7 = 71
KEY_KP8 = 72
KEY_KP9 = 73
KEY_KPMINUS = 74
KEY_KP4 = 75
KEY_KP5 = 76
KEY_KP6 = 77
KEY_KPPLUS = 78
KEY_KP1 = 79
KEY_KP2 = 80
KEY_KP3 = 81
KEY_KP0 = 82
KEY_KPDOT = 83
KEY_ZENKAKUHANKAKU = 85
KEY_102ND = 86
KEY_F11 = 87
KEY_F12 = 88
KEY_RO = 89
KEY_KATAKANA = 90
KEY_HIRAGANA = 91
KEY_HENKAN = 92
KEY_KATAKANAHIRAGANA = 93
KEY_MUHENKAN = 94
KEY_KPJPCOMMA = 95
KEY_KPENTER = 96
KEY_RIGHTCTRL = 97
KEY_KPSLASH = 98
KEY_SYSRQ = 99
KEY_RIGHTALT = 100
KEY_LINEFEED = 101
KEY_HOME = 102
KEY_UP = 103
KEY_PAGEUP = 104
KEY_LEFT = 105
KEY_RIGHT = 106
KEY_END = 107
KEY_DOWN = 108
KEY_PAGEDOWN = 109
KEY_INSERT = 110
KEY_DELETE = 111
KEY_MACRO = 112
KEY_MUTE = 113
KEY_VOLUMEDOWN = 114
KEY_VOLUMEUP = 115
KEY_POWER = 116
KEY_KPEQUAL = 117
KEY_KPPLUSMINUS = 118
KEY_PAUSE = 119
KEY_KPCOMMA = 121
KEY_HANGUEL = 122
KEY_HANJA = 123
KEY_YEN = 124
KEY_LEFTMETA = 125
KEY_RIGHTMETA = 126
KEY_COMPOSE = 127
KEY_STOP = 128
KEY_AGAIN = 129
KEY_PROPS = 130
KEY_UNDO = 131
KEY_FRONT = 132
KEY_COPY = 133
KEY_OPEN = 134
KEY_PASTE = 135
KEY_FIND = 136
KEY_CUT = 137
KEY_HELP = 138
KEY_MENU = 139
KEY_CALC = 140
KEY_SETUP = 141
KEY_SLEEP = 142
KEY_WAKEUP = 143
KEY_FILE = 144
KEY_SENDFILE = 145
KEY_DELETEFILE = 146
KEY_XFER = 147
KEY_PROG1 = 148
KEY_PROG2 = 149
KEY_WWW = 150
KEY_MSDOS = 151
KEY_COFFEE = 152
KEY_DIRECTION = 153
KEY_CYCLEWINDOWS = 154
KEY_MAIL = 155
KEY_BOOKMARKS = 156
KEY_COMPUTER = 157
KEY_BACK = 158
KEY_FORWARD = 159
KEY_CLOSECD = 160
KEY_EJECTCD = 161
KEY_EJECTCLOSECD = 162
KEY_NEXTSONG = 163
KEY_PLAYPAUSE = 164
KEY_PREVIOUSSONG = 165
KEY_STOPCD = 166
KEY_RECORD = 167
KEY_REWIND = 168
KEY_PHONE = 169
KEY_ISO = 170
KEY_CONFIG = 171
KEY_HOMEPAGE = 172
KEY_REFRESH = 173
KEY_EXIT = 174
KEY_MOVE = 175
KEY_EDIT = 176
KEY_SCROLLUP = 177
KEY_SCROLLDOWN = 178
KEY_KPLEFTPAREN = 179
KEY_KPRIGHTPAREN = 180
KEY_F13 = 183
KEY_F14 = 184
KEY_F15 = 185
KEY_F16 = 186
KEY_F17 = 187
KEY_F18 = 188
KEY_F19 = 189
KEY_F20 = 190
KEY_F21 = 191
KEY_F22 = 192
KEY_F23 = 193
KEY_F24 = 194
KEY_PLAYCD = 200
KEY_PAUSECD = 201
KEY_PROG3 = 202
KEY_PROG4 = 203
KEY_SUSPEND = 205
KEY_CLOSE = 206
KEY_PLAY = 207
KEY_FASTFORWARD = 208
KEY_BASSBOOST = 209
KEY_PRINT = 210
KEY_HP = 211
KEY_CAMERA = 212
KEY_SOUND = 213
KEY_QUESTION = 214
KEY_EMAIL = 215
KEY_CHAT = 216
KEY_SEARCH = 217
KEY_CONNECT = 218
KEY_FINANCE = 219
KEY_SPORT = 220
KEY_SHOP = 221
KEY_ALTERASE = 222
KEY_CANCEL = 223
KEY_BRIGHTNESSDOWN = 224
KEY_BRIGHTNESSUP = 225
KEY_MEDIA = 226
KEY_UNKNOWN = 240
BTN_MISC = 0x100
BTN_0 = 0x100
BTN_1 = 0x101
BTN_2 = 0x102
BTN_3 = 0x103
BTN_4 = 0x104
BTN_5 = 0x105
BTN_6 = 0x106
BTN_7 = 0x107
BTN_8 = 0x108
BTN_9 = 0x109
BTN_MOUSE = 0x110
BTN_LEFT = 0x110
BTN_RIGHT = 0x111
BTN_MIDDLE = 0x112
BTN_SIDE = 0x113
BTN_EXTRA = 0x114
BTN_FORWARD = 0x115
BTN_BACK = 0x116
BTN_TASK = 0x117
BTN_JOYSTICK = 0x120
BTN_TRIGGER = 0x120
BTN_THUMB = 0x121
BTN_THUMB2 = 0x122
BTN_TOP = 0x123
BTN_TOP2 = 0x124
BTN_PINKIE = 0x125
BTN_BASE = 0x126
BTN_BASE2 = 0x127
BTN_BASE3 = 0x128
BTN_BASE4 = 0x129
BTN_BASE5 = 0x12a
BTN_BASE6 = 0x12b
BTN_DEAD = 0x12f
BTN_GAMEPAD = 0x130
BTN_A = 0x130
BTN_B = 0x131
BTN_C = 0x132
BTN_X = 0x133
BTN_Y = 0x134
BTN_Z = 0x135
BTN_TL = 0x136
BTN_TR = 0x137
BTN_TL2 = 0x138
BTN_TR2 = 0x139
BTN_SELECT = 0x13a
BTN_START = 0x13b
BTN_MODE = 0x13c
BTN_THUMBL = 0x13d
BTN_THUMBR = 0x13e
BTN_DIGI = 0x140
BTN_TOOL_PEN = 0x140
BTN_TOOL_RUBBER = 0x141
BTN_TOOL_BRUSH = 0x142
BTN_TOOL_PENCIL = 0x143
BTN_TOOL_AIRBRUSH = 0x144
BTN_TOOL_FINGER = 0x145
BTN_TOOL_MOUSE = 0x146
BTN_TOOL_LENS = 0x147
BTN_TOUCH = 0x14a
BTN_STYLUS = 0x14b
BTN_STYLUS2 = 0x14c
BTN_TOOL_DOUBLETAP = 0x14d
BTN_TOOL_TRIPLETAP = 0x14e
BTN_WHEEL = 0x150
BTN_GEAR_DOWN = 0x150
BTN_GEAR_UP = 0x151
KEY_OK = 0x160
KEY_SELECT = 0x161
KEY_GOTO = 0x162
KEY_CLEAR = 0x163
KEY_POWER2 = 0x164
KEY_OPTION = 0x165
KEY_INFO = 0x166
KEY_TIME = 0x167
KEY_VENDOR = 0x168
KEY_ARCHIVE = 0x169
KEY_PROGRAM = 0x16a
KEY_CHANNEL = 0x16b
KEY_FAVORITES = 0x16c
KEY_EPG = 0x16d
KEY_PVR = 0x16e
KEY_MHP = 0x16f
KEY_LANGUAGE = 0x170
KEY_TITLE = 0x171
KEY_SUBTITLE = 0x172
KEY_ANGLE = 0x173
KEY_ZOOM = 0x174
KEY_MODE = 0x175
KEY_KEYBOARD = 0x176
KEY_SCREEN = 0x177
KEY_PC = 0x178
KEY_TV = 0x179
KEY_TV2 = 0x17a
KEY_VCR = 0x17b
KEY_VCR2 = 0x17c
KEY_SAT = 0x17d
KEY_SAT2 = 0x17e
KEY_CD = 0x17f
KEY_TAPE = 0x180
KEY_RADIO = 0x181
KEY_TUNER = 0x182
KEY_PLAYER = 0x183
KEY_TEXT = 0x184
KEY_DVD = 0x185
KEY_AUX = 0x186
KEY_MP3 = 0x187
KEY_AUDIO = 0x188
KEY_VIDEO = 0x189
KEY_DIRECTORY = 0x18a
KEY_LIST = 0x18b
KEY_MEMO = 0x18c
KEY_CALENDAR = 0x18d
KEY_RED = 0x18e
KEY_GREEN = 0x18f
KEY_YELLOW = 0x190
KEY_BLUE = 0x191
KEY_CHANNELUP = 0x192
KEY_CHANNELDOWN = 0x193
KEY_FIRST = 0x194
KEY_LAST = 0x195
KEY_AB = 0x196
KEY_NEXT = 0x197
KEY_RESTART = 0x198
KEY_SLOW = 0x199
KEY_SHUFFLE = 0x19a
KEY_BREAK = 0x19b
KEY_PREVIOUS = 0x19c
KEY_DIGITS = 0x19d
KEY_TEEN = 0x19e
KEY_TWEN = 0x19f
KEY_DEL_EOL = 0x1c0
KEY_DEL_EOS = 0x1c1
KEY_INS_LINE = 0x1c2
KEY_DEL_LINE = 0x1c3
KEY_FN = 0x1d0
KEY_FN_ESC = 0x1d1
KEY_FN_F1 = 0x1d2
KEY_FN_F2 = 0x1d3
KEY_FN_F3 = 0x1d4
KEY_FN_F4 = 0x1d5
KEY_FN_F5 = 0x1d6
KEY_FN_F6 = 0x1d7
KEY_FN_F7 = 0x1d8
KEY_FN_F8 = 0x1d9
KEY_FN_F9 = 0x1da
KEY_FN_F10 = 0x1db
KEY_FN_F11 = 0x1dc
KEY_FN_F12 = 0x1dd
KEY_FN_1 = 0x1de
KEY_FN_2 = 0x1df
KEY_FN_D = 0x1e0
KEY_FN_E = 0x1e1
KEY_FN_F = 0x1e2
KEY_FN_S = 0x1e3
KEY_FN_B = 0x1e4
KEY_MAX = 0x1ff
# Relative axes
REL_X = 0x00
REL_Y = 0x01
REL_Z = 0x02
REL_RX = 0x03
REL_RY = 0x04
REL_RZ = 0x05
REL_HWHEEL = 0x06
REL_DIAL = 0x07
REL_WHEEL = 0x08
REL_MISC = 0x09
REL_MAX = 0x0f
# Absolute axes
ABS_X = 0x00
ABS_Y = 0x01
ABS_Z = 0x02
ABS_RX = 0x03
ABS_RY = 0x04
ABS_RZ = 0x05
ABS_THROTTLE = 0x06
ABS_RUDDER = 0x07
ABS_WHEEL = 0x08
ABS_GAS = 0x09
ABS_BRAKE = 0x0a
ABS_HAT0X = 0x10
ABS_HAT0Y = 0x11
ABS_HAT1X = 0x12
ABS_HAT1Y = 0x13
ABS_HAT2X = 0x14
ABS_HAT2Y = 0x15
ABS_HAT3X = 0x16
ABS_HAT3Y = 0x17
ABS_PRESSURE = 0x18
ABS_DISTANCE = 0x19
ABS_TILT_X = 0x1a
ABS_TILT_Y = 0x1b
ABS_TOOL_WIDTH = 0x1c
ABS_VOLUME = 0x20
ABS_MISC = 0x28
ABS_MAX = 0x3f
# Misc events
MSC_SERIAL = 0x00
MSC_PULSELED = 0x01
MSC_GESTURE = 0x02
MSC_RAW = 0x03
MSC_SCAN = 0x04
MSC_MAX = 0x07
# LEDs
LED_NUML = 0x00
LED_CAPSL = 0x01
LED_SCROLLL = 0x02
LED_COMPOSE = 0x03
LED_KANA = 0x04
LED_SLEEP = 0x05
LED_SUSPEND = 0x06
LED_MUTE = 0x07
LED_MISC = 0x08
LED_MAIL = 0x09
LED_CHARGING = 0x0a
LED_MAX = 0x0f
# Autorepeat values
REP_DELAY = 0x00
REP_PERIOD = 0x01
REP_MAX = 0x01
# Sounds
SND_CLICK = 0x00
SND_BELL = 0x01
SND_TONE = 0x02
SND_MAX = 0x07
# IDs.
ID_BUS = 0
ID_VENDOR = 1
ID_PRODUCT = 2
ID_VERSION = 3
BUS_PCI = 0x01
BUS_ISAPNP = 0x02
BUS_USB = 0x03
BUS_HIL = 0x04
BUS_BLUETOOTH = 0x05
BUS_ISA = 0x10
BUS_I8042 = 0x11
BUS_XTKBD = 0x12
BUS_RS232 = 0x13
BUS_GAMEPORT = 0x14
BUS_PARPORT = 0x15
BUS_AMIGA = 0x16
BUS_ADB = 0x17
BUS_I2C = 0x18
BUS_HOST = 0x19
# Values describing the status of an effect
FF_STATUS_STOPPED = 0x00
FF_STATUS_PLAYING = 0x01
FF_STATUS_MAX = 0x01
_rel_raw_names = {}
_abs_raw_names = {}
_key_raw_names = {}
for _name, _val in locals().copy().items():
if _name.startswith('REL_'):
_rel_raw_names[_val] = _name
elif _name.startswith('ABS_'):
_abs_raw_names[_val] = _name
elif _name.startswith('KEY_') or _name.startswith('BTN_'):
_key_raw_names[_val] = _name
| 15.736162 | 62 | 0.749326 |
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
EV_SYN = 0x00
EV_KEY = 0x01
EV_REL = 0x02
EV_ABS = 0x03
EV_MSC = 0x04
EV_LED = 0x11
EV_SND = 0x12
EV_REP = 0x14
EV_FF = 0x15
EV_PWR = 0x16
EV_FF_STATUS = 0x17
EV_MAX = 0x1f
SYN_REPORT = 0
SYN_CONFIG = 1
KEY_RESERVED = 0
KEY_ESC = 1
KEY_1 = 2
KEY_2 = 3
KEY_3 = 4
KEY_4 = 5
KEY_5 = 6
KEY_6 = 7
KEY_7 = 8
KEY_8 = 9
KEY_9 = 10
KEY_0 = 11
KEY_MINUS = 12
KEY_EQUAL = 13
KEY_BACKSPACE = 14
KEY_TAB = 15
KEY_Q = 16
KEY_W = 17
KEY_E = 18
KEY_R = 19
KEY_T = 20
KEY_Y = 21
KEY_U = 22
KEY_I = 23
KEY_O = 24
KEY_P = 25
KEY_LEFTBRACE = 26
KEY_RIGHTBRACE = 27
KEY_ENTER = 28
KEY_LEFTCTRL = 29
KEY_A = 30
KEY_S = 31
KEY_D = 32
KEY_F = 33
KEY_G = 34
KEY_H = 35
KEY_J = 36
KEY_K = 37
KEY_L = 38
KEY_SEMICOLON = 39
KEY_APOSTROPHE = 40
KEY_GRAVE = 41
KEY_LEFTSHIFT = 42
KEY_BACKSLASH = 43
KEY_Z = 44
KEY_X = 45
KEY_C = 46
KEY_V = 47
KEY_B = 48
KEY_N = 49
KEY_M = 50
KEY_COMMA = 51
KEY_DOT = 52
KEY_SLASH = 53
KEY_RIGHTSHIFT = 54
KEY_KPASTERISK = 55
KEY_LEFTALT = 56
KEY_SPACE = 57
KEY_CAPSLOCK = 58
KEY_F1 = 59
KEY_F2 = 60
KEY_F3 = 61
KEY_F4 = 62
KEY_F5 = 63
KEY_F6 = 64
KEY_F7 = 65
KEY_F8 = 66
KEY_F9 = 67
KEY_F10 = 68
KEY_NUMLOCK = 69
KEY_SCROLLLOCK = 70
KEY_KP7 = 71
KEY_KP8 = 72
KEY_KP9 = 73
KEY_KPMINUS = 74
KEY_KP4 = 75
KEY_KP5 = 76
KEY_KP6 = 77
KEY_KPPLUS = 78
KEY_KP1 = 79
KEY_KP2 = 80
KEY_KP3 = 81
KEY_KP0 = 82
KEY_KPDOT = 83
KEY_ZENKAKUHANKAKU = 85
KEY_102ND = 86
KEY_F11 = 87
KEY_F12 = 88
KEY_RO = 89
KEY_KATAKANA = 90
KEY_HIRAGANA = 91
KEY_HENKAN = 92
KEY_KATAKANAHIRAGANA = 93
KEY_MUHENKAN = 94
KEY_KPJPCOMMA = 95
KEY_KPENTER = 96
KEY_RIGHTCTRL = 97
KEY_KPSLASH = 98
KEY_SYSRQ = 99
KEY_RIGHTALT = 100
KEY_LINEFEED = 101
KEY_HOME = 102
KEY_UP = 103
KEY_PAGEUP = 104
KEY_LEFT = 105
KEY_RIGHT = 106
KEY_END = 107
KEY_DOWN = 108
KEY_PAGEDOWN = 109
KEY_INSERT = 110
KEY_DELETE = 111
KEY_MACRO = 112
KEY_MUTE = 113
KEY_VOLUMEDOWN = 114
KEY_VOLUMEUP = 115
KEY_POWER = 116
KEY_KPEQUAL = 117
KEY_KPPLUSMINUS = 118
KEY_PAUSE = 119
KEY_KPCOMMA = 121
KEY_HANGUEL = 122
KEY_HANJA = 123
KEY_YEN = 124
KEY_LEFTMETA = 125
KEY_RIGHTMETA = 126
KEY_COMPOSE = 127
KEY_STOP = 128
KEY_AGAIN = 129
KEY_PROPS = 130
KEY_UNDO = 131
KEY_FRONT = 132
KEY_COPY = 133
KEY_OPEN = 134
KEY_PASTE = 135
KEY_FIND = 136
KEY_CUT = 137
KEY_HELP = 138
KEY_MENU = 139
KEY_CALC = 140
KEY_SETUP = 141
KEY_SLEEP = 142
KEY_WAKEUP = 143
KEY_FILE = 144
KEY_SENDFILE = 145
KEY_DELETEFILE = 146
KEY_XFER = 147
KEY_PROG1 = 148
KEY_PROG2 = 149
KEY_WWW = 150
KEY_MSDOS = 151
KEY_COFFEE = 152
KEY_DIRECTION = 153
KEY_CYCLEWINDOWS = 154
KEY_MAIL = 155
KEY_BOOKMARKS = 156
KEY_COMPUTER = 157
KEY_BACK = 158
KEY_FORWARD = 159
KEY_CLOSECD = 160
KEY_EJECTCD = 161
KEY_EJECTCLOSECD = 162
KEY_NEXTSONG = 163
KEY_PLAYPAUSE = 164
KEY_PREVIOUSSONG = 165
KEY_STOPCD = 166
KEY_RECORD = 167
KEY_REWIND = 168
KEY_PHONE = 169
KEY_ISO = 170
KEY_CONFIG = 171
KEY_HOMEPAGE = 172
KEY_REFRESH = 173
KEY_EXIT = 174
KEY_MOVE = 175
KEY_EDIT = 176
KEY_SCROLLUP = 177
KEY_SCROLLDOWN = 178
KEY_KPLEFTPAREN = 179
KEY_KPRIGHTPAREN = 180
KEY_F13 = 183
KEY_F14 = 184
KEY_F15 = 185
KEY_F16 = 186
KEY_F17 = 187
KEY_F18 = 188
KEY_F19 = 189
KEY_F20 = 190
KEY_F21 = 191
KEY_F22 = 192
KEY_F23 = 193
KEY_F24 = 194
KEY_PLAYCD = 200
KEY_PAUSECD = 201
KEY_PROG3 = 202
KEY_PROG4 = 203
KEY_SUSPEND = 205
KEY_CLOSE = 206
KEY_PLAY = 207
KEY_FASTFORWARD = 208
KEY_BASSBOOST = 209
KEY_PRINT = 210
KEY_HP = 211
KEY_CAMERA = 212
KEY_SOUND = 213
KEY_QUESTION = 214
KEY_EMAIL = 215
KEY_CHAT = 216
KEY_SEARCH = 217
KEY_CONNECT = 218
KEY_FINANCE = 219
KEY_SPORT = 220
KEY_SHOP = 221
KEY_ALTERASE = 222
KEY_CANCEL = 223
KEY_BRIGHTNESSDOWN = 224
KEY_BRIGHTNESSUP = 225
KEY_MEDIA = 226
KEY_UNKNOWN = 240
BTN_MISC = 0x100
BTN_0 = 0x100
BTN_1 = 0x101
BTN_2 = 0x102
BTN_3 = 0x103
BTN_4 = 0x104
BTN_5 = 0x105
BTN_6 = 0x106
BTN_7 = 0x107
BTN_8 = 0x108
BTN_9 = 0x109
BTN_MOUSE = 0x110
BTN_LEFT = 0x110
BTN_RIGHT = 0x111
BTN_MIDDLE = 0x112
BTN_SIDE = 0x113
BTN_EXTRA = 0x114
BTN_FORWARD = 0x115
BTN_BACK = 0x116
BTN_TASK = 0x117
BTN_JOYSTICK = 0x120
BTN_TRIGGER = 0x120
BTN_THUMB = 0x121
BTN_THUMB2 = 0x122
BTN_TOP = 0x123
BTN_TOP2 = 0x124
BTN_PINKIE = 0x125
BTN_BASE = 0x126
BTN_BASE2 = 0x127
BTN_BASE3 = 0x128
BTN_BASE4 = 0x129
BTN_BASE5 = 0x12a
BTN_BASE6 = 0x12b
BTN_DEAD = 0x12f
BTN_GAMEPAD = 0x130
BTN_A = 0x130
BTN_B = 0x131
BTN_C = 0x132
BTN_X = 0x133
BTN_Y = 0x134
BTN_Z = 0x135
BTN_TL = 0x136
BTN_TR = 0x137
BTN_TL2 = 0x138
BTN_TR2 = 0x139
BTN_SELECT = 0x13a
BTN_START = 0x13b
BTN_MODE = 0x13c
BTN_THUMBL = 0x13d
BTN_THUMBR = 0x13e
BTN_DIGI = 0x140
BTN_TOOL_PEN = 0x140
BTN_TOOL_RUBBER = 0x141
BTN_TOOL_BRUSH = 0x142
BTN_TOOL_PENCIL = 0x143
BTN_TOOL_AIRBRUSH = 0x144
BTN_TOOL_FINGER = 0x145
BTN_TOOL_MOUSE = 0x146
BTN_TOOL_LENS = 0x147
BTN_TOUCH = 0x14a
BTN_STYLUS = 0x14b
BTN_STYLUS2 = 0x14c
BTN_TOOL_DOUBLETAP = 0x14d
BTN_TOOL_TRIPLETAP = 0x14e
BTN_WHEEL = 0x150
BTN_GEAR_DOWN = 0x150
BTN_GEAR_UP = 0x151
KEY_OK = 0x160
KEY_SELECT = 0x161
KEY_GOTO = 0x162
KEY_CLEAR = 0x163
KEY_POWER2 = 0x164
KEY_OPTION = 0x165
KEY_INFO = 0x166
KEY_TIME = 0x167
KEY_VENDOR = 0x168
KEY_ARCHIVE = 0x169
KEY_PROGRAM = 0x16a
KEY_CHANNEL = 0x16b
KEY_FAVORITES = 0x16c
KEY_EPG = 0x16d
KEY_PVR = 0x16e
KEY_MHP = 0x16f
KEY_LANGUAGE = 0x170
KEY_TITLE = 0x171
KEY_SUBTITLE = 0x172
KEY_ANGLE = 0x173
KEY_ZOOM = 0x174
KEY_MODE = 0x175
KEY_KEYBOARD = 0x176
KEY_SCREEN = 0x177
KEY_PC = 0x178
KEY_TV = 0x179
KEY_TV2 = 0x17a
KEY_VCR = 0x17b
KEY_VCR2 = 0x17c
KEY_SAT = 0x17d
KEY_SAT2 = 0x17e
KEY_CD = 0x17f
KEY_TAPE = 0x180
KEY_RADIO = 0x181
KEY_TUNER = 0x182
KEY_PLAYER = 0x183
KEY_TEXT = 0x184
KEY_DVD = 0x185
KEY_AUX = 0x186
KEY_MP3 = 0x187
KEY_AUDIO = 0x188
KEY_VIDEO = 0x189
KEY_DIRECTORY = 0x18a
KEY_LIST = 0x18b
KEY_MEMO = 0x18c
KEY_CALENDAR = 0x18d
KEY_RED = 0x18e
KEY_GREEN = 0x18f
KEY_YELLOW = 0x190
KEY_BLUE = 0x191
KEY_CHANNELUP = 0x192
KEY_CHANNELDOWN = 0x193
KEY_FIRST = 0x194
KEY_LAST = 0x195
KEY_AB = 0x196
KEY_NEXT = 0x197
KEY_RESTART = 0x198
KEY_SLOW = 0x199
KEY_SHUFFLE = 0x19a
KEY_BREAK = 0x19b
KEY_PREVIOUS = 0x19c
KEY_DIGITS = 0x19d
KEY_TEEN = 0x19e
KEY_TWEN = 0x19f
KEY_DEL_EOL = 0x1c0
KEY_DEL_EOS = 0x1c1
KEY_INS_LINE = 0x1c2
KEY_DEL_LINE = 0x1c3
KEY_FN = 0x1d0
KEY_FN_ESC = 0x1d1
KEY_FN_F1 = 0x1d2
KEY_FN_F2 = 0x1d3
KEY_FN_F3 = 0x1d4
KEY_FN_F4 = 0x1d5
KEY_FN_F5 = 0x1d6
KEY_FN_F6 = 0x1d7
KEY_FN_F7 = 0x1d8
KEY_FN_F8 = 0x1d9
KEY_FN_F9 = 0x1da
KEY_FN_F10 = 0x1db
KEY_FN_F11 = 0x1dc
KEY_FN_F12 = 0x1dd
KEY_FN_1 = 0x1de
KEY_FN_2 = 0x1df
KEY_FN_D = 0x1e0
KEY_FN_E = 0x1e1
KEY_FN_F = 0x1e2
KEY_FN_S = 0x1e3
KEY_FN_B = 0x1e4
KEY_MAX = 0x1ff
REL_X = 0x00
REL_Y = 0x01
REL_Z = 0x02
REL_RX = 0x03
REL_RY = 0x04
REL_RZ = 0x05
REL_HWHEEL = 0x06
REL_DIAL = 0x07
REL_WHEEL = 0x08
REL_MISC = 0x09
REL_MAX = 0x0f
ABS_X = 0x00
ABS_Y = 0x01
ABS_Z = 0x02
ABS_RX = 0x03
ABS_RY = 0x04
ABS_RZ = 0x05
ABS_THROTTLE = 0x06
ABS_RUDDER = 0x07
ABS_WHEEL = 0x08
ABS_GAS = 0x09
ABS_BRAKE = 0x0a
ABS_HAT0X = 0x10
ABS_HAT0Y = 0x11
ABS_HAT1X = 0x12
ABS_HAT1Y = 0x13
ABS_HAT2X = 0x14
ABS_HAT2Y = 0x15
ABS_HAT3X = 0x16
ABS_HAT3Y = 0x17
ABS_PRESSURE = 0x18
ABS_DISTANCE = 0x19
ABS_TILT_X = 0x1a
ABS_TILT_Y = 0x1b
ABS_TOOL_WIDTH = 0x1c
ABS_VOLUME = 0x20
ABS_MISC = 0x28
ABS_MAX = 0x3f
MSC_SERIAL = 0x00
MSC_PULSELED = 0x01
MSC_GESTURE = 0x02
MSC_RAW = 0x03
MSC_SCAN = 0x04
MSC_MAX = 0x07
LED_NUML = 0x00
LED_CAPSL = 0x01
LED_SCROLLL = 0x02
LED_COMPOSE = 0x03
LED_KANA = 0x04
LED_SLEEP = 0x05
LED_SUSPEND = 0x06
LED_MUTE = 0x07
LED_MISC = 0x08
LED_MAIL = 0x09
LED_CHARGING = 0x0a
LED_MAX = 0x0f
REP_DELAY = 0x00
REP_PERIOD = 0x01
REP_MAX = 0x01
SND_CLICK = 0x00
SND_BELL = 0x01
SND_TONE = 0x02
SND_MAX = 0x07
ID_BUS = 0
ID_VENDOR = 1
ID_PRODUCT = 2
ID_VERSION = 3
BUS_PCI = 0x01
BUS_ISAPNP = 0x02
BUS_USB = 0x03
BUS_HIL = 0x04
BUS_BLUETOOTH = 0x05
BUS_ISA = 0x10
BUS_I8042 = 0x11
BUS_XTKBD = 0x12
BUS_RS232 = 0x13
BUS_GAMEPORT = 0x14
BUS_PARPORT = 0x15
BUS_AMIGA = 0x16
BUS_ADB = 0x17
BUS_I2C = 0x18
BUS_HOST = 0x19
FF_STATUS_STOPPED = 0x00
FF_STATUS_PLAYING = 0x01
FF_STATUS_MAX = 0x01
_rel_raw_names = {}
_abs_raw_names = {}
_key_raw_names = {}
for _name, _val in locals().copy().items():
if _name.startswith('REL_'):
_rel_raw_names[_val] = _name
elif _name.startswith('ABS_'):
_abs_raw_names[_val] = _name
elif _name.startswith('KEY_') or _name.startswith('BTN_'):
_key_raw_names[_val] = _name
| true | true |
f727f3b193a37188b7de74a5fd1ff300f301659a | 1,439 | py | Python | MoneyChangeAlgorithm/moneychange.py | HeavyWolfPL/SchoolTools | d665d5cbecae7a7d1e7c2406b08471cf0242ed7f | [
"MIT"
] | null | null | null | MoneyChangeAlgorithm/moneychange.py | HeavyWolfPL/SchoolTools | d665d5cbecae7a7d1e7c2406b08471cf0242ed7f | [
"MIT"
] | null | null | null | MoneyChangeAlgorithm/moneychange.py | HeavyWolfPL/SchoolTools | d665d5cbecae7a7d1e7c2406b08471cf0242ed7f | [
"MIT"
] | null | null | null |
monety = input("Podaj posiadane monety, dzieląc je przecinkiem: ")
monety = monety.replace(" ", "")
monety = monety.split(",")
monety = list(map(int, monety))
ilosc = len(monety)
reszta = int(input("Jaką reszte chcesz wydać? "))
licznik = 0
historia = []
while reszta > 0:
if licznik >= ilosc:
print("Nie udało się wydać pełnej reszty. Można wydać tylko " + str(sum(monety)) + " zł.")
exit()
else:
nominal = 0
for i in range(ilosc):
if (monety[i] <= reszta) and (monety[i] > nominal):
nominal = monety[i]
reszta = reszta - nominal
historia.append(nominal)
licznik += 1
historia = str(historia).replace("[", "").replace("]", "")
if licznik != 1:
print("Użyto " + str(licznik) + " monet(y): " + str(historia))
else:
print("Resztę można wydać monetą: " + str(historia) + " zł")
############################
# Wersja 1:1 do wersji C++ #
############################
# ilosc = 3
# monety = [1, 2, 5]
# reszta = int(input("Jaka reszte chcesz wydac? "))
# licznik = 0
# historia = []
# while reszta > 0:
# nominal = 0
# for i in range(ilosc):
# if (monety[i] <= reszta) and (monety[i] > nominal):
# nominal = monety[i]
# reszta = reszta - nominal
# historia.append(nominal)
# licznik += 1
# print("Resztę mozna wydać " + str(licznik) + " monetami")
# print("Użyte monety: " + str(historia))
| 24.389831 | 98 | 0.549687 |
monety = input("Podaj posiadane monety, dzieląc je przecinkiem: ")
monety = monety.replace(" ", "")
monety = monety.split(",")
monety = list(map(int, monety))
ilosc = len(monety)
reszta = int(input("Jaką reszte chcesz wydać? "))
licznik = 0
historia = []
while reszta > 0:
if licznik >= ilosc:
print("Nie udało się wydać pełnej reszty. Można wydać tylko " + str(sum(monety)) + " zł.")
exit()
else:
nominal = 0
for i in range(ilosc):
if (monety[i] <= reszta) and (monety[i] > nominal):
nominal = monety[i]
reszta = reszta - nominal
historia.append(nominal)
licznik += 1
historia = str(historia).replace("[", "").replace("]", "")
if licznik != 1:
print("Użyto " + str(licznik) + " monet(y): " + str(historia))
else:
print("Resztę można wydać monetą: " + str(historia) + " zł")
| true | true |
f727f4410396c7d32520f3d46daef479d465926c | 991 | py | Python | coto/clients/signin/__init__.py | wvanheerde/coto | d7eeb2e98a24b743d879ef5e2da9cbbacc417d8d | [
"Apache-2.0"
] | 42 | 2018-04-13T18:02:04.000Z | 2022-03-30T00:21:26.000Z | coto/clients/signin/__init__.py | wvanheerde/coto | d7eeb2e98a24b743d879ef5e2da9cbbacc417d8d | [
"Apache-2.0"
] | 18 | 2019-02-08T11:50:46.000Z | 2022-03-29T10:12:03.000Z | coto/clients/signin/__init__.py | wvanheerde/coto | d7eeb2e98a24b743d879ef5e2da9cbbacc417d8d | [
"Apache-2.0"
] | 19 | 2019-02-04T14:57:46.000Z | 2022-03-24T13:39:21.000Z | from .. import BaseClient
class Client(BaseClient):
REQUIRES_AUTHENTICATION = False
def __init__(self, session):
super().__init__(session)
self._signin_aws = self.session().client('signin_aws')
self._signin_amazon = self.session().client('signin_amazon')
def signin(self, email, password, mfa_secret=None):
# check account type
account_type = self._signin_aws.get_account_type(email)
if account_type == 'Decoupled':
return self._signin_aws.signin(
email,
password,
mfa_secret,
)
elif account_type == 'Coupled':
return self._signin_amazon.signin(
email,
password,
mfa_secret,
)
elif account_type == 'Unknown':
raise Exception("account {0} not active".format(email))
else:
raise Exception("unsupported account type {0}".format(email))
| 30.030303 | 73 | 0.576186 | from .. import BaseClient
class Client(BaseClient):
REQUIRES_AUTHENTICATION = False
def __init__(self, session):
super().__init__(session)
self._signin_aws = self.session().client('signin_aws')
self._signin_amazon = self.session().client('signin_amazon')
def signin(self, email, password, mfa_secret=None):
account_type = self._signin_aws.get_account_type(email)
if account_type == 'Decoupled':
return self._signin_aws.signin(
email,
password,
mfa_secret,
)
elif account_type == 'Coupled':
return self._signin_amazon.signin(
email,
password,
mfa_secret,
)
elif account_type == 'Unknown':
raise Exception("account {0} not active".format(email))
else:
raise Exception("unsupported account type {0}".format(email))
| true | true |
f727f4f0204f36cf8008b3978cac8641ec0e174c | 216 | py | Python | beneficiaries/beneficiaries/doctype/beneficiary_aid_type/test_beneficiary_aid_type.py | baidalala/beneficiaries | b7299e0a7da91e90c607e70d76994ec0aebae402 | [
"MIT"
] | null | null | null | beneficiaries/beneficiaries/doctype/beneficiary_aid_type/test_beneficiary_aid_type.py | baidalala/beneficiaries | b7299e0a7da91e90c607e70d76994ec0aebae402 | [
"MIT"
] | null | null | null | beneficiaries/beneficiaries/doctype/beneficiary_aid_type/test_beneficiary_aid_type.py | baidalala/beneficiaries | b7299e0a7da91e90c607e70d76994ec0aebae402 | [
"MIT"
] | 1 | 2021-08-31T18:47:58.000Z | 2021-08-31T18:47:58.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Baida and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestBeneficiaryAidType(unittest.TestCase):
pass
| 19.636364 | 48 | 0.768519 |
from __future__ import unicode_literals
import unittest
class TestBeneficiaryAidType(unittest.TestCase):
pass
| true | true |
f727f5054ef0bf3d339c999db4daac4c53effb65 | 3,466 | py | Python | enas/cifar10/data_utils.py | j-varun/enas | 1a19ccbd7c06168ae51e0de2986b30ea01cce070 | [
"Apache-2.0"
] | 10 | 2018-05-07T15:59:55.000Z | 2021-04-18T12:51:14.000Z | enas/cifar10/data_utils.py | j-varun/enas | 1a19ccbd7c06168ae51e0de2986b30ea01cce070 | [
"Apache-2.0"
] | 4 | 2018-06-03T16:46:57.000Z | 2018-08-08T21:48:05.000Z | enas/cifar10/data_utils.py | j-varun/enas | 1a19ccbd7c06168ae51e0de2986b30ea01cce070 | [
"Apache-2.0"
] | 4 | 2018-05-25T04:39:56.000Z | 2019-04-29T05:08:25.000Z | import os
import sys
try:
import cPickle as pickle
except ImportError:
import _pickle as pickle
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
def _read_data(data_path, train_files):
"""Reads CIFAR-10 format data. Always returns NHWC format.
Returns:
images: np tensor of size [N, H, W, C]
labels: np tensor of size [N]
"""
images, labels = [], []
for file_name in train_files:
print(file_name)
full_name = os.path.join(data_path, file_name)
with open(full_name, 'rb') as finp:
data = pickle.load(finp, encoding='bytes')
batch_images = data[b"data"].astype(np.float32) / 255.0
batch_labels = np.array(data[b"labels"], dtype=np.int32)
images.append(batch_images)
labels.append(batch_labels)
images = np.concatenate(images, axis=0)
labels = np.concatenate(labels, axis=0)
images = np.reshape(images, [-1, 3, 32, 32])
images = np.transpose(images, [0, 2, 3, 1])
return images, labels
def _read_fmnist_data(data_path):
"""Reads Fashion-Mnist data. Returns NHWC format.
Returns:
images: np tensor of size [N, H, W, C]
labels: np tensor of size [N]
"""
images, labels = {},{}
data = input_data.read_data_sets(data_path)
images["train"] = data.train.images.reshape(-1, 1, 28, 28) / 255.0
images["test"] = data.test.images.reshape(-1, 1, 28, 28) / 255.0
images["train"] = np.transpose(images["train"], [0, 2, 3, 1])
images["test"] = np.transpose(images["test"], [0, 2, 3, 1])
labels["train"] = np.array(data.train.labels, dtype = np.int32)
labels["test"] = np.array(data.test.labels, dtype = np.int32)
print("Read and processed data..")
print(labels["test"])
return images, labels
def valid_split_data(images, labels, num_valids=5000):
if num_valids:
images["valid"] = images["train"][-num_valids:]
labels["valid"] = labels["train"][-num_valids:]
images["train"] = images["train"][:-num_valids]
labels["train"] = labels["train"][:-num_valids]
else:
images["valid"], labels["valid"] = None, None
return images, labels
def read_data(data_path, num_valids=5000, dataset = "cifar"):
print("-" * 80)
print("Reading data")
print(os.getcwd())
images, labels = {}, {}
if(dataset == "fmnist"):
print("Fashion-Mnist")
images, labels = _read_fmnist_data(data_path)
images, labels = valid_split_data(images, labels, num_valids)
return images, labels
if dataset == "stacking":
images["path"] = data_path
return images, labels
else:
train_files = [
"data_batch_1",
"data_batch_2",
"data_batch_3",
"data_batch_4",
"data_batch_5",
]
test_file = [
"test_batch",
]
images["train"], labels["train"] = _read_data(data_path, train_files)
images, labels = valid_split_data(images, labels, num_valids)
images["test"], labels["test"] = _read_data(data_path, test_file)
print("Prepropcess: [subtract mean], [divide std]")
mean = np.mean(images["train"], axis=(0, 1, 2), keepdims=True)
std = np.std(images["train"], axis=(0, 1, 2), keepdims=True)
print("mean: {}".format(np.reshape(mean * 255.0, [-1])))
print("std: {}".format(np.reshape(std * 255.0, [-1])))
images["train"] = (images["train"] - mean) / std
if num_valids:
images["valid"] = (images["valid"] - mean) / std
images["test"] = (images["test"] - mean) / std
return images, labels
| 29.372881 | 73 | 0.644547 | import os
import sys
try:
import cPickle as pickle
except ImportError:
import _pickle as pickle
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
def _read_data(data_path, train_files):
images, labels = [], []
for file_name in train_files:
print(file_name)
full_name = os.path.join(data_path, file_name)
with open(full_name, 'rb') as finp:
data = pickle.load(finp, encoding='bytes')
batch_images = data[b"data"].astype(np.float32) / 255.0
batch_labels = np.array(data[b"labels"], dtype=np.int32)
images.append(batch_images)
labels.append(batch_labels)
images = np.concatenate(images, axis=0)
labels = np.concatenate(labels, axis=0)
images = np.reshape(images, [-1, 3, 32, 32])
images = np.transpose(images, [0, 2, 3, 1])
return images, labels
def _read_fmnist_data(data_path):
images, labels = {},{}
data = input_data.read_data_sets(data_path)
images["train"] = data.train.images.reshape(-1, 1, 28, 28) / 255.0
images["test"] = data.test.images.reshape(-1, 1, 28, 28) / 255.0
images["train"] = np.transpose(images["train"], [0, 2, 3, 1])
images["test"] = np.transpose(images["test"], [0, 2, 3, 1])
labels["train"] = np.array(data.train.labels, dtype = np.int32)
labels["test"] = np.array(data.test.labels, dtype = np.int32)
print("Read and processed data..")
print(labels["test"])
return images, labels
def valid_split_data(images, labels, num_valids=5000):
if num_valids:
images["valid"] = images["train"][-num_valids:]
labels["valid"] = labels["train"][-num_valids:]
images["train"] = images["train"][:-num_valids]
labels["train"] = labels["train"][:-num_valids]
else:
images["valid"], labels["valid"] = None, None
return images, labels
def read_data(data_path, num_valids=5000, dataset = "cifar"):
print("-" * 80)
print("Reading data")
print(os.getcwd())
images, labels = {}, {}
if(dataset == "fmnist"):
print("Fashion-Mnist")
images, labels = _read_fmnist_data(data_path)
images, labels = valid_split_data(images, labels, num_valids)
return images, labels
if dataset == "stacking":
images["path"] = data_path
return images, labels
else:
train_files = [
"data_batch_1",
"data_batch_2",
"data_batch_3",
"data_batch_4",
"data_batch_5",
]
test_file = [
"test_batch",
]
images["train"], labels["train"] = _read_data(data_path, train_files)
images, labels = valid_split_data(images, labels, num_valids)
images["test"], labels["test"] = _read_data(data_path, test_file)
print("Prepropcess: [subtract mean], [divide std]")
mean = np.mean(images["train"], axis=(0, 1, 2), keepdims=True)
std = np.std(images["train"], axis=(0, 1, 2), keepdims=True)
print("mean: {}".format(np.reshape(mean * 255.0, [-1])))
print("std: {}".format(np.reshape(std * 255.0, [-1])))
images["train"] = (images["train"] - mean) / std
if num_valids:
images["valid"] = (images["valid"] - mean) / std
images["test"] = (images["test"] - mean) / std
return images, labels
| true | true |
f727f57947e671ebd44b45237afa3f439dc6b3c3 | 8,633 | py | Python | conans/client/rest/auth_manager.py | amatoshka/conan | c2726e8c255adb120b5f7bdee9e3ec0bc90f1d7a | [
"MIT"
] | null | null | null | conans/client/rest/auth_manager.py | amatoshka/conan | c2726e8c255adb120b5f7bdee9e3ec0bc90f1d7a | [
"MIT"
] | null | null | null | conans/client/rest/auth_manager.py | amatoshka/conan | c2726e8c255adb120b5f7bdee9e3ec0bc90f1d7a | [
"MIT"
] | null | null | null | """
Collaborate with RestApiClient to make remote anonymous and authenticated calls.
Uses user_io to request user's login and password and obtain a token for calling authenticated
methods if receives AuthenticationException from RestApiClient.
Flow:
Directly invoke a REST method in RestApiClient, example: get_conan.
if receives AuthenticationException (not open method) will ask user for login and password
and will invoke RestApiClient.get_token() (with LOGIN_RETRIES retries) and retry to call
get_conan with the new token.
"""
import hashlib
from uuid import getnode as get_mac
from conans.client.cmd.user import update_localdb
from conans.errors import AuthenticationException, ConanException, ForbiddenException
from conans.util.log import logger
def input_credentials_if_unauthorized(func):
"""Decorator. Handles AuthenticationException and request user
to input a user and a password"""
LOGIN_RETRIES = 3
def wrapper(self, *args, **kwargs):
try:
# Set custom headers of mac_digest and username
self.set_custom_headers(self.user)
ret = func(self, *args, **kwargs)
return ret
except ForbiddenException:
raise ForbiddenException("Permission denied for user: '%s'" % self.user)
except AuthenticationException:
# User valid but not enough permissions
if self.user is None or self._rest_client.token is None:
# token is None when you change user with user command
# Anonymous is not enough, ask for a user
remote = self.remote
self._user_io.out.info('Please log in to "%s" to perform this action. '
'Execute "conan user" command.' % remote.name)
if "bintray" in remote.url:
self._user_io.out.info('If you don\'t have an account sign up here: '
'https://bintray.com/signup/oss')
return retry_with_new_token(self, *args, **kwargs)
else:
# Token expired or not valid, so clean the token and repeat the call
# (will be anonymous call but exporting who is calling)
logger.info("Token expired or not valid, cleaning the saved token and retrying")
self._store_login((self.user, None))
self._rest_client.token = None
# Set custom headers of mac_digest and username
self.set_custom_headers(self.user)
return wrapper(self, *args, **kwargs)
def retry_with_new_token(self, *args, **kwargs):
"""Try LOGIN_RETRIES to obtain a password from user input for which
we can get a valid token from api_client. If a token is returned,
credentials are stored in localdb and rest method is called"""
for _ in range(LOGIN_RETRIES):
user, password = self._user_io.request_login(self._remote.name, self.user)
try:
token, _, _, _ = self.authenticate(user, password)
except AuthenticationException:
if self.user is None:
self._user_io.out.error('Wrong user or password')
else:
self._user_io.out.error(
'Wrong password for user "%s"' % self.user)
self._user_io.out.info(
'You can change username with "conan user <username>"')
else:
logger.debug("Got token: %s" % str(token))
self._rest_client.token = token
self.user = user
# Set custom headers of mac_digest and username
self.set_custom_headers(user)
return wrapper(self, *args, **kwargs)
raise AuthenticationException("Too many failed login attempts, bye!")
return wrapper
class ConanApiAuthManager(object):
def __init__(self, rest_client, user_io, localdb):
self._user_io = user_io
self._rest_client = rest_client
self._localdb = localdb
self._remote = None
@property
def remote(self):
return self._remote
@remote.setter
def remote(self, remote):
self._remote = remote
self._rest_client.remote_url = remote.url
self._rest_client.verify_ssl = remote.verify_ssl
self.user, self._rest_client.token = self._localdb.get_login(remote.url)
def _store_login(self, login):
try:
self._localdb.set_login(login, self._remote.url)
except Exception as e:
self._user_io.out.error(
'Your credentials could not be stored in local cache\n')
self._user_io.out.debug(str(e) + '\n')
@staticmethod
def get_mac_digest():
sha1 = hashlib.sha1()
sha1.update(str(get_mac()).encode())
return str(sha1.hexdigest())
def set_custom_headers(self, username):
# First identifies our machine, second the username even if it was not
# authenticated
custom_headers = self._rest_client.custom_headers
custom_headers['X-Client-Anonymous-Id'] = self.get_mac_digest()
custom_headers['X-Client-Id'] = str(username or "")
# ######### CONAN API METHODS ##########
@input_credentials_if_unauthorized
def upload_recipe(self, conan_reference, the_files, retry, retry_wait, policy, remote_manifest):
return self._rest_client.upload_recipe(conan_reference, the_files, retry, retry_wait,
policy, remote_manifest)
@input_credentials_if_unauthorized
def upload_package(self, package_reference, the_files, retry, retry_wait, policy):
return self._rest_client.upload_package(package_reference, the_files, retry, retry_wait,
policy)
@input_credentials_if_unauthorized
def get_conan_manifest(self, conan_reference):
return self._rest_client.get_conan_manifest(conan_reference)
@input_credentials_if_unauthorized
def get_package_manifest(self, package_reference):
return self._rest_client.get_package_manifest(package_reference)
@input_credentials_if_unauthorized
def get_package(self, package_reference, dest_folder):
return self._rest_client.get_package(package_reference, dest_folder)
@input_credentials_if_unauthorized
def get_recipe(self, reference, dest_folder):
return self._rest_client.get_recipe(reference, dest_folder)
@input_credentials_if_unauthorized
def get_recipe_sources(self, reference, dest_folder):
return self._rest_client.get_recipe_sources(reference, dest_folder)
@input_credentials_if_unauthorized
def download_files_to_folder(self, urls, dest_folder):
return self._rest_client.download_files_to_folder(urls, dest_folder)
@input_credentials_if_unauthorized
def get_package_info(self, package_reference):
return self._rest_client.get_package_info(package_reference)
@input_credentials_if_unauthorized
def search(self, pattern, ignorecase):
return self._rest_client.search(pattern, ignorecase)
@input_credentials_if_unauthorized
def search_packages(self, reference, query):
return self._rest_client.search_packages(reference, query)
@input_credentials_if_unauthorized
def remove(self, conan_refernce):
return self._rest_client.remove_conanfile(conan_refernce)
@input_credentials_if_unauthorized
def remove_packages(self, conan_reference, package_ids):
return self._rest_client.remove_packages(conan_reference, package_ids)
@input_credentials_if_unauthorized
def get_path(self, conan_reference, path, package_id):
return self._rest_client.get_path(conan_reference, path, package_id)
def authenticate(self, user, password):
if user is None: # The user is already in DB, just need the passwd
prev_user = self._localdb.get_username(self._remote.url)
if prev_user is None:
raise ConanException("User for remote '%s' is not defined" % self._remote.name)
else:
user = prev_user
try:
token = self._rest_client.authenticate(user, password)
except UnicodeDecodeError:
raise ConanException("Password contains not allowed symbols")
# Store result in DB
remote_name, prev_user, user = update_localdb(self._localdb, user, token, self._remote)
return token, remote_name, prev_user, user
| 42.737624 | 100 | 0.664196 |
import hashlib
from uuid import getnode as get_mac
from conans.client.cmd.user import update_localdb
from conans.errors import AuthenticationException, ConanException, ForbiddenException
from conans.util.log import logger
def input_credentials_if_unauthorized(func):
LOGIN_RETRIES = 3
def wrapper(self, *args, **kwargs):
try:
self.set_custom_headers(self.user)
ret = func(self, *args, **kwargs)
return ret
except ForbiddenException:
raise ForbiddenException("Permission denied for user: '%s'" % self.user)
except AuthenticationException:
if self.user is None or self._rest_client.token is None:
remote = self.remote
self._user_io.out.info('Please log in to "%s" to perform this action. '
'Execute "conan user" command.' % remote.name)
if "bintray" in remote.url:
self._user_io.out.info('If you don\'t have an account sign up here: '
'https://bintray.com/signup/oss')
return retry_with_new_token(self, *args, **kwargs)
else:
# Token expired or not valid, so clean the token and repeat the call
# (will be anonymous call but exporting who is calling)
logger.info("Token expired or not valid, cleaning the saved token and retrying")
self._store_login((self.user, None))
self._rest_client.token = None
# Set custom headers of mac_digest and username
self.set_custom_headers(self.user)
return wrapper(self, *args, **kwargs)
def retry_with_new_token(self, *args, **kwargs):
for _ in range(LOGIN_RETRIES):
user, password = self._user_io.request_login(self._remote.name, self.user)
try:
token, _, _, _ = self.authenticate(user, password)
except AuthenticationException:
if self.user is None:
self._user_io.out.error('Wrong user or password')
else:
self._user_io.out.error(
'Wrong password for user "%s"' % self.user)
self._user_io.out.info(
'You can change username with "conan user <username>"')
else:
logger.debug("Got token: %s" % str(token))
self._rest_client.token = token
self.user = user
# Set custom headers of mac_digest and username
self.set_custom_headers(user)
return wrapper(self, *args, **kwargs)
raise AuthenticationException("Too many failed login attempts, bye!")
return wrapper
class ConanApiAuthManager(object):
def __init__(self, rest_client, user_io, localdb):
self._user_io = user_io
self._rest_client = rest_client
self._localdb = localdb
self._remote = None
@property
def remote(self):
return self._remote
@remote.setter
def remote(self, remote):
self._remote = remote
self._rest_client.remote_url = remote.url
self._rest_client.verify_ssl = remote.verify_ssl
self.user, self._rest_client.token = self._localdb.get_login(remote.url)
def _store_login(self, login):
try:
self._localdb.set_login(login, self._remote.url)
except Exception as e:
self._user_io.out.error(
'Your credentials could not be stored in local cache\n')
self._user_io.out.debug(str(e) + '\n')
@staticmethod
def get_mac_digest():
sha1 = hashlib.sha1()
sha1.update(str(get_mac()).encode())
return str(sha1.hexdigest())
def set_custom_headers(self, username):
# First identifies our machine, second the username even if it was not
# authenticated
custom_headers = self._rest_client.custom_headers
custom_headers['X-Client-Anonymous-Id'] = self.get_mac_digest()
custom_headers['X-Client-Id'] = str(username or "")
# ######### CONAN API METHODS ##########
@input_credentials_if_unauthorized
def upload_recipe(self, conan_reference, the_files, retry, retry_wait, policy, remote_manifest):
return self._rest_client.upload_recipe(conan_reference, the_files, retry, retry_wait,
policy, remote_manifest)
@input_credentials_if_unauthorized
def upload_package(self, package_reference, the_files, retry, retry_wait, policy):
return self._rest_client.upload_package(package_reference, the_files, retry, retry_wait,
policy)
@input_credentials_if_unauthorized
def get_conan_manifest(self, conan_reference):
return self._rest_client.get_conan_manifest(conan_reference)
@input_credentials_if_unauthorized
def get_package_manifest(self, package_reference):
return self._rest_client.get_package_manifest(package_reference)
@input_credentials_if_unauthorized
def get_package(self, package_reference, dest_folder):
return self._rest_client.get_package(package_reference, dest_folder)
@input_credentials_if_unauthorized
def get_recipe(self, reference, dest_folder):
return self._rest_client.get_recipe(reference, dest_folder)
@input_credentials_if_unauthorized
def get_recipe_sources(self, reference, dest_folder):
return self._rest_client.get_recipe_sources(reference, dest_folder)
@input_credentials_if_unauthorized
def download_files_to_folder(self, urls, dest_folder):
return self._rest_client.download_files_to_folder(urls, dest_folder)
@input_credentials_if_unauthorized
def get_package_info(self, package_reference):
return self._rest_client.get_package_info(package_reference)
@input_credentials_if_unauthorized
def search(self, pattern, ignorecase):
return self._rest_client.search(pattern, ignorecase)
@input_credentials_if_unauthorized
def search_packages(self, reference, query):
return self._rest_client.search_packages(reference, query)
@input_credentials_if_unauthorized
def remove(self, conan_refernce):
return self._rest_client.remove_conanfile(conan_refernce)
@input_credentials_if_unauthorized
def remove_packages(self, conan_reference, package_ids):
return self._rest_client.remove_packages(conan_reference, package_ids)
@input_credentials_if_unauthorized
def get_path(self, conan_reference, path, package_id):
return self._rest_client.get_path(conan_reference, path, package_id)
def authenticate(self, user, password):
if user is None: # The user is already in DB, just need the passwd
prev_user = self._localdb.get_username(self._remote.url)
if prev_user is None:
raise ConanException("User for remote '%s' is not defined" % self._remote.name)
else:
user = prev_user
try:
token = self._rest_client.authenticate(user, password)
except UnicodeDecodeError:
raise ConanException("Password contains not allowed symbols")
# Store result in DB
remote_name, prev_user, user = update_localdb(self._localdb, user, token, self._remote)
return token, remote_name, prev_user, user
| true | true |
f727f68949533cfb5e6250be94f67cb523575870 | 21,471 | py | Python | src/runner/trainer.py | minhhoangbui/PICK-pytorch | c74d2d1e5d1f8c7e837ea9776146bc84a7ecf30a | [
"MIT"
] | null | null | null | src/runner/trainer.py | minhhoangbui/PICK-pytorch | c74d2d1e5d1f8c7e837ea9776146bc84a7ecf30a | [
"MIT"
] | null | null | null | src/runner/trainer.py | minhhoangbui/PICK-pytorch | c74d2d1e5d1f8c7e837ea9776146bc84a7ecf30a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# @Author: Wenwen Yu
# @Created Time: 7/12/2020 9:50 PM
import os
import numpy as np
from numpy import inf
import torch
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from src.utils import inf_loop
from src.utils.metrics import MetricTracker, SpanBasedF1MetricTracker
from torch.utils.tensorboard import SummaryWriter
# from src.logger import TensorboardWriter
from src.utils.utils import to_union
class Trainer:
"""
Trainer class
"""
def __init__(self, model, optimizer, config, data_loader, iob_labels_vocab_cls,
valid_data_loader=None, lr_scheduler=None, max_len_step=None):
"""
:param model:
:param optimizer:
:param config:
:param data_loader:
:param iob_labels_vocab_cls
:param valid_data_loader:
:param lr_scheduler:
:param max_len_step: controls number of batches(steps) in each epoch.
"""
self.config = config
self.iob_labels_vocab_cls = iob_labels_vocab_cls
self.distributed = config['distributed']
if self.distributed:
self.local_master = (config['local_rank'] == 0)
self.global_master = (dist.get_rank() == 0)
else:
self.local_master = True
self.global_master = True
self.logger = config.get_logger('trainer', config['trainer']['log_verbosity']) if self.local_master else None
# setup GPU device if available, move model into configured device
self.device, self.device_ids = self._prepare_device(config['local_rank'], config['local_world_size'])
self.model = model.to(self.device)
self.optimizer = optimizer
cfg_trainer = config['trainer']
self.epochs = cfg_trainer['epochs']
self.save_period = cfg_trainer['save_period']
monitor_open = cfg_trainer['monitor_open']
if monitor_open:
self.monitor = cfg_trainer.get('monitor', 'off')
else:
self.monitor = 'off'
# configuration to monitor model performance and save best
if self.monitor == 'off':
self.monitor_mode = 'off'
self.monitor_best = 0
else:
self.monitor_mode, self.monitor_metric = self.monitor.split()
assert self.monitor_mode in ['min', 'max']
self.monitor_best = inf if self.monitor_mode == 'min' else -inf
self.early_stop = cfg_trainer.get('early_stop', inf)
self.early_stop = inf if self.early_stop == -1 else self.early_stop
self.start_epoch = 1
if self.local_master:
self.checkpoint_dir = config.save_dir
# setup visualization writer instance
# self.writer = TensorboardWriter(config.log_dir, self.logger, cfg_trainer['tensorboard'])
self.writer = SummaryWriter(config.tensorboard_dir)
# load checkpoint for resume training
if config.resume is not None:
self._resume_checkpoint(config.resume)
# load checkpoint following load to multi-gpu, avoid 'module.' prefix
if self.config['trainer']['sync_batch_norm'] and self.distributed:
self.model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.model)
if self.distributed:
self.model = DDP(self.model, device_ids=self.device_ids, output_device=self.device_ids[0],
find_unused_parameters=True)
self.data_loader = data_loader
if max_len_step is None: # max length of iteration step of every epoch
# epoch-based training
self.len_step = len(self.data_loader)
else:
# iteration-based training
self.data_loader = inf_loop(data_loader)
self.len_step = max_len_step
self.valid_data_loader = valid_data_loader
self.do_validation = self.valid_data_loader is not None
self.lr_scheduler = lr_scheduler
log_step = self.config['trainer']['log_step_interval']
self.log_step = log_step if log_step != -1 and 0 < log_step < self.len_step else int(
np.sqrt(data_loader.batch_size))
self.val_epoch_interval = self.config['trainer']['val_epoch_interval']
self.gl_loss_lambda = self.config['trainer']['gl_loss_lambda']
self.train_loss_metrics = MetricTracker('loss', 'gl_loss', 'crf_loss',
writer=self.writer if self.local_master else None)
self.valid_f1_metrics = SpanBasedF1MetricTracker(iob_labels_vocab_cls)
def train(self):
"""
Full training logic, including train and validation.
"""
if self.distributed:
dist.barrier() # Syncing machines before training
not_improved_count = 0
val_result_dict = None
if self.config['evaluate_only']:
print("------Evaluation only------")
val_result_dict = self._valid_epoch(0)
val_res = SpanBasedF1MetricTracker.dict2str(val_result_dict)
self.logger_info('[Step Validation] Epoch:[{}/{}]] \n{}'.
format(0, self.epochs, val_res))
return
for epoch in range(self.start_epoch, self.epochs + 1):
# ensure distribute worker sample different data,
# set different random seed by passing epoch to sampler
if self.distributed:
self.data_loader.sampler.set_epoch(epoch)
result_dict = self._train_epoch(epoch)
# print logged information to the screen
if self.do_validation:
val_result_dict = result_dict['val_result_dict']
val_res = SpanBasedF1MetricTracker.dict2str(val_result_dict)
else:
val_res = ''
# every epoch log information
self.logger_info('[Epoch Validation] Epoch:[{}/{}] Total Loss: {:.6f} '
'GL_Loss: {:.6f} CRF_Loss: {:.6f} \n{}'.
format(epoch, self.epochs, result_dict['loss'],
result_dict['gl_loss'] * self.gl_loss_lambda,
result_dict['crf_loss'], val_res))
# evaluate model performance according to configured metric, check early stop, and
# save best checkpoint as model_best
best = False
if self.monitor_mode != 'off' and self.do_validation:
best, not_improved_count = self._is_best_monitor_metric(best, not_improved_count, val_result_dict)
if not_improved_count > self.early_stop:
self.logger_info("Validation performance didn't improve for {} epochs. "
"Training stops.".format(self.early_stop))
break
if epoch % self.save_period == 0:
self._save_checkpoint(epoch, save_best=best)
def _is_best_monitor_metric(self, best, not_improved_count, val_result_dict):
"""
monitor metric
:param best:
:param not_improved_count:
:param val_result_dict:
:return:
"""
entity_name, metric = self.monitor_metric.split('-')
val_monitor_metric_res = val_result_dict[entity_name][metric]
try:
# check whether model performance improved or not, according to specified metric(monitor_metric)
improved = (self.monitor_mode == 'min' and val_monitor_metric_res <= self.monitor_best) or \
(self.monitor_mode == 'max' and val_monitor_metric_res >= self.monitor_best)
except KeyError:
self.logger_warning("Warning: Metric '{}' is not found. "
"Model performance monitoring is disabled.".format(self.monitor_metric))
self.monitor_mode = 'off'
improved = False
if improved:
self.monitor_best = val_monitor_metric_res
not_improved_count = 0
best = True
else:
not_improved_count += 1
return best, not_improved_count
def _train_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Integer, current training epoch.
:return: A log dict that contains average loss and metric in this epoch.
"""
self.model.train()
self.train_loss_metrics.reset()
# step iteration start ##
for step_idx, input_data_item in enumerate(self.data_loader):
step_idx += 1
for key, input_value in input_data_item.items():
if input_value is not None and isinstance(input_value, torch.Tensor):
input_data_item[key] = input_value.to(self.device, non_blocking=True)
if self.config['trainer']['anomaly_detection']:
# This mode will increase the runtime and should only be enabled for debugging
with torch.autograd.detect_anomaly():
self.optimizer.zero_grad()
# model forward
output = self.model(**input_data_item)
# calculate loss
gl_loss = output['gl_loss']
crf_loss = output['crf_loss']
total_loss = torch.sum(crf_loss) + self.gl_loss_lambda * torch.sum(gl_loss)
# backward
total_loss.backward()
# self.average_gradients(self.model)
self.optimizer.step()
else:
self.optimizer.zero_grad()
# model forward
output = self.model(**input_data_item)
# calculate loss
gl_loss = output['gl_loss']
crf_loss = output['crf_loss']
total_loss = torch.sum(crf_loss) + self.gl_loss_lambda * torch.sum(gl_loss)
# backward
total_loss.backward()
# self.average_gradients(self.model)
self.optimizer.step()
# Use a barrier() to make sure that all process have finished forward and backward
if self.distributed:
dist.barrier()
# obtain the sum of all total_loss at all processes
dist.all_reduce(total_loss, op=dist.reduce_op.SUM)
size = dist.get_world_size()
else:
size = 1
gl_loss /= size # averages gl_loss across the whole world
crf_loss /= size # averages crf_loss across the whole world
# calculate average loss across the batch size
avg_gl_loss = torch.mean(gl_loss)
avg_crf_loss = torch.mean(crf_loss)
avg_loss = avg_crf_loss + self.gl_loss_lambda * avg_gl_loss
# update metrics
# self.writer.set_step((epoch - 1) * self.len_step + step_idx - 1) if self.local_master else None
self.train_loss_metrics.update('loss', avg_loss.item(), epoch)
self.train_loss_metrics.update('gl_loss', avg_gl_loss.item() * self.gl_loss_lambda, epoch)
self.train_loss_metrics.update('crf_loss', avg_crf_loss.item(), epoch)
# log messages
if step_idx % self.log_step == 0:
self.logger_info('Train Epoch:[{}/{}] Step:[{}/{}] Total Loss: {:.6f} GL_Loss: {:.6f} CRF_Loss: {:.6f}'.
format(epoch, self.epochs, step_idx, self.len_step,
avg_loss.item(), avg_gl_loss.item() * self.gl_loss_lambda, avg_crf_loss.item()))
# decide whether continue iter
if step_idx == self.len_step + 1:
break
# step iteration end ##
# do validation after val_step_interval iteration
if self.do_validation and epoch % self.val_epoch_interval == 0:
val_result_dict = self._valid_epoch(epoch)
self.logger_info('[Step Validation] Epoch:[{}/{}]] \n{}'.
format(epoch, self.epochs, self.len_step,
SpanBasedF1MetricTracker.dict2str(val_result_dict)))
# check if best metric, if true, then save as model_best checkpoint.
best, not_improved_count = self._is_best_monitor_metric(False, 0, val_result_dict)
if best:
self._save_checkpoint(epoch, best)
# {'loss': avg_loss, 'gl_loss': avg_gl_loss, 'crf_loss': avg_crf_loss}
log = self.train_loss_metrics.result()
# do validation after training an epoch
if self.do_validation:
val_result_dict = self._valid_epoch(epoch)
log['val_result_dict'] = val_result_dict
if self.lr_scheduler is not None:
self.lr_scheduler.step()
self.model.train()
return log
def _valid_epoch(self, epoch):
"""
Validate after training an epoch or regular step, this is a time-consuming procedure if validation data is big.
:param epoch: Integer, current training epoch.
:return: A dict that contains information about validation
"""
self.model.eval()
self.valid_f1_metrics.reset()
with torch.no_grad():
for step_idx, input_data_item in enumerate(self.valid_data_loader):
for key, input_value in input_data_item.items():
if input_value is not None and isinstance(input_value, torch.Tensor):
input_data_item[key] = input_value.to(self.device, non_blocking=True)
output = self.model(**input_data_item)
logits = output['logits']
new_mask = output['new_mask']
if hasattr(self.model, 'module'):
# List[(List[int], torch.Tensor)] contain the tag indices of the maximum likelihood tag sequence.
# and the score of the viterbi path.
best_paths = self.model.module.decoder.crf_layer.viterbi_tags(logits, mask=new_mask,
logits_batch_first=True)
else:
best_paths = self.model.decoder.crf_layer.viterbi_tags(logits, mask=new_mask,
logits_batch_first=True)
predicted_tags = []
for path, score in best_paths:
predicted_tags.append(path)
# self.writer.set_step((epoch - 1) * len(self.valid_data_loader) + step_idx, 'valid') \
# if self.local_master else None
# calculate and update f1 metrics
# (B, N*T, out_dim)
predicted_tags_hard_prob = logits * 0
for i, instance_tags in enumerate(predicted_tags):
for j, tag_id in enumerate(instance_tags):
predicted_tags_hard_prob[i, j, tag_id] = 1
golden_tags = input_data_item['iob_tags_label']
mask = input_data_item['mask']
union_iob_tags = to_union(golden_tags, mask, self.iob_labels_vocab_cls)
if self.distributed:
dist.barrier() #
self.valid_f1_metrics.update(predicted_tags_hard_prob.long(), union_iob_tags, new_mask)
# add histogram of model parameters to the tensorboard
# for name, p in self.model.named_parameters():
# self.writer.add_histogram(name, p, bins='auto')
f1_result_dict = self.valid_f1_metrics.result()
overall_dict = f1_result_dict['overall']
if self.local_master:
for key, value in overall_dict.items():
self.writer.add_scalar(key, value, epoch)
return f1_result_dict
@staticmethod
def average_gradients(model):
"""
Gradient averaging
:param model:
:return:
"""
size = float(dist.get_world_size())
for param in model.parameters():
dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM)
param.grad.data /= size
def logger_info(self, msg):
self.logger.info(msg) if self.local_master else None
def logger_warning(self, msg):
self.logger.warning(msg) if self.local_master else None
def _prepare_device(self, local_rank, local_world_size):
"""
setup GPU device if available, move model into configured device
:param local_rank:
:param local_world_size:
:return:
"""
if self.distributed:
n_gpu_per_process = torch.cuda.device_count() // local_world_size
device_ids = list(range(local_rank * n_gpu_per_process, (local_rank + 1) * n_gpu_per_process))
if torch.cuda.is_available() and local_rank != -1:
torch.cuda.set_device(device_ids[0]) # device_ids[0] =local_rank if local_world_size = n_gpu per node
device = 'cuda'
self.logger_info(
f"[Process {os.getpid()}] world_size = {dist.get_world_size()}, "
+ f"rank = {dist.get_rank()}, n_gpu/process = {n_gpu_per_process}, device_ids = {device_ids}"
)
else:
self.logger_warning('Training will be using CPU!')
device = 'cpu'
device = torch.device(device)
return device, device_ids
else:
n_gpu = torch.cuda.device_count()
n_gpu_use = local_world_size
if n_gpu_use > 0 and n_gpu == 0:
self.logger_warning("Warning: There\'s no GPU available on this machine,"
"training will be performed on CPU.")
n_gpu_use = 0
if n_gpu_use > n_gpu:
self.logger_warning("Warning: The number of GPU\'s configured to use is {}, but only {} are available "
"on this machine.".format(n_gpu_use, n_gpu))
n_gpu_use = n_gpu
list_ids = list(range(n_gpu_use))
if n_gpu_use > 0:
torch.cuda.set_device(list_ids[0]) # only use first available gpu as devices
self.logger_warning(f'Training is using GPU {list_ids[0]}!')
device = 'cuda'
else:
self.logger_warning('Training is using CPU!')
device = 'cpu'
device = torch.device(device)
return device, list_ids
def _save_checkpoint(self, epoch, save_best=False):
"""
Saving checkpoints
:param epoch: current epoch number
:param save_best: if True, rename the saved checkpoint to 'model_best.pth'
:return:
"""
# only local master process do save model
if not self.local_master:
return
if hasattr(self.model, 'module'):
arch = type(self.model.module).__name__
state_dict = self.model.module.state_dict()
else:
arch = type(self.model).__name__
state_dict = self.model.state_dict()
state = {
'arch': arch,
'epoch': epoch,
'state_dict': state_dict,
'optimizer': self.optimizer.state_dict(),
'monitor_best': self.monitor_best,
'config': self.config
}
if save_best:
best_path = str(self.checkpoint_dir / 'model_best.pth')
torch.save(state, best_path)
self.logger_info("Saving current best: model_best.pth ...")
else:
filename = str(self.checkpoint_dir / 'checkpoint-epoch{}.pth'.format(epoch))
torch.save(state, filename)
self.logger_info("Saving checkpoint: {} ...".format(filename))
def _resume_checkpoint(self, resume_path):
"""
Resume from saved checkpoints
:param resume_path: Checkpoint path to be resumed
:return:
"""
resume_path = str(resume_path)
self.logger_info("Loading checkpoint: {} ...".format(resume_path))
# map_location = {'cuda:%d' % 0: 'cuda:%d' % self.config['local_rank']}
checkpoint = torch.load(resume_path, map_location=self.device)
self.start_epoch = checkpoint['epoch'] + 1
self.monitor_best = checkpoint['monitor_best']
# load architecture params from checkpoint.
if checkpoint['config']['model_arch'] != self.config['model_arch']:
self.logger_warning("Warning: Architecture configuration given in config file is different from that of "
"checkpoint. This may yield an exception while state_dict is being loaded.")
self.model.load_state_dict(checkpoint['state_dict'])
# load optimizer state from checkpoint only when optimizer type is not changed.
if checkpoint['config']['optimizer']['type'] != self.config['optimizer']['type']:
self.logger_warning("Warning: Optimizer type given in config file is different from that of checkpoint. "
"Optimizer parameters not being resumed.")
else:
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.logger_info("Checkpoint loaded. Resume training from epoch {}".format(self.start_epoch))
| 44.088296 | 120 | 0.589912 |
import os
import numpy as np
from numpy import inf
import torch
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from src.utils import inf_loop
from src.utils.metrics import MetricTracker, SpanBasedF1MetricTracker
from torch.utils.tensorboard import SummaryWriter
from src.utils.utils import to_union
class Trainer:
def __init__(self, model, optimizer, config, data_loader, iob_labels_vocab_cls,
valid_data_loader=None, lr_scheduler=None, max_len_step=None):
self.config = config
self.iob_labels_vocab_cls = iob_labels_vocab_cls
self.distributed = config['distributed']
if self.distributed:
self.local_master = (config['local_rank'] == 0)
self.global_master = (dist.get_rank() == 0)
else:
self.local_master = True
self.global_master = True
self.logger = config.get_logger('trainer', config['trainer']['log_verbosity']) if self.local_master else None
self.device, self.device_ids = self._prepare_device(config['local_rank'], config['local_world_size'])
self.model = model.to(self.device)
self.optimizer = optimizer
cfg_trainer = config['trainer']
self.epochs = cfg_trainer['epochs']
self.save_period = cfg_trainer['save_period']
monitor_open = cfg_trainer['monitor_open']
if monitor_open:
self.monitor = cfg_trainer.get('monitor', 'off')
else:
self.monitor = 'off'
if self.monitor == 'off':
self.monitor_mode = 'off'
self.monitor_best = 0
else:
self.monitor_mode, self.monitor_metric = self.monitor.split()
assert self.monitor_mode in ['min', 'max']
self.monitor_best = inf if self.monitor_mode == 'min' else -inf
self.early_stop = cfg_trainer.get('early_stop', inf)
self.early_stop = inf if self.early_stop == -1 else self.early_stop
self.start_epoch = 1
if self.local_master:
self.checkpoint_dir = config.save_dir
self.writer = SummaryWriter(config.tensorboard_dir)
if config.resume is not None:
self._resume_checkpoint(config.resume)
if self.config['trainer']['sync_batch_norm'] and self.distributed:
self.model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.model)
if self.distributed:
self.model = DDP(self.model, device_ids=self.device_ids, output_device=self.device_ids[0],
find_unused_parameters=True)
self.data_loader = data_loader
if max_len_step is None:
self.len_step = len(self.data_loader)
else:
self.data_loader = inf_loop(data_loader)
self.len_step = max_len_step
self.valid_data_loader = valid_data_loader
self.do_validation = self.valid_data_loader is not None
self.lr_scheduler = lr_scheduler
log_step = self.config['trainer']['log_step_interval']
self.log_step = log_step if log_step != -1 and 0 < log_step < self.len_step else int(
np.sqrt(data_loader.batch_size))
self.val_epoch_interval = self.config['trainer']['val_epoch_interval']
self.gl_loss_lambda = self.config['trainer']['gl_loss_lambda']
self.train_loss_metrics = MetricTracker('loss', 'gl_loss', 'crf_loss',
writer=self.writer if self.local_master else None)
self.valid_f1_metrics = SpanBasedF1MetricTracker(iob_labels_vocab_cls)
def train(self):
if self.distributed:
dist.barrier()
not_improved_count = 0
val_result_dict = None
if self.config['evaluate_only']:
print("------Evaluation only------")
val_result_dict = self._valid_epoch(0)
val_res = SpanBasedF1MetricTracker.dict2str(val_result_dict)
self.logger_info('[Step Validation] Epoch:[{}/{}]] \n{}'.
format(0, self.epochs, val_res))
return
for epoch in range(self.start_epoch, self.epochs + 1):
if self.distributed:
self.data_loader.sampler.set_epoch(epoch)
result_dict = self._train_epoch(epoch)
if self.do_validation:
val_result_dict = result_dict['val_result_dict']
val_res = SpanBasedF1MetricTracker.dict2str(val_result_dict)
else:
val_res = ''
self.logger_info('[Epoch Validation] Epoch:[{}/{}] Total Loss: {:.6f} '
'GL_Loss: {:.6f} CRF_Loss: {:.6f} \n{}'.
format(epoch, self.epochs, result_dict['loss'],
result_dict['gl_loss'] * self.gl_loss_lambda,
result_dict['crf_loss'], val_res))
best = False
if self.monitor_mode != 'off' and self.do_validation:
best, not_improved_count = self._is_best_monitor_metric(best, not_improved_count, val_result_dict)
if not_improved_count > self.early_stop:
self.logger_info("Validation performance didn't improve for {} epochs. "
"Training stops.".format(self.early_stop))
break
if epoch % self.save_period == 0:
self._save_checkpoint(epoch, save_best=best)
def _is_best_monitor_metric(self, best, not_improved_count, val_result_dict):
entity_name, metric = self.monitor_metric.split('-')
val_monitor_metric_res = val_result_dict[entity_name][metric]
try:
# check whether model performance improved or not, according to specified metric(monitor_metric)
improved = (self.monitor_mode == 'min' and val_monitor_metric_res <= self.monitor_best) or \
(self.monitor_mode == 'max' and val_monitor_metric_res >= self.monitor_best)
except KeyError:
self.logger_warning("Warning: Metric '{}' is not found. "
"Model performance monitoring is disabled.".format(self.monitor_metric))
self.monitor_mode = 'off'
improved = False
if improved:
self.monitor_best = val_monitor_metric_res
not_improved_count = 0
best = True
else:
not_improved_count += 1
return best, not_improved_count
def _train_epoch(self, epoch):
self.model.train()
self.train_loss_metrics.reset()
# step iteration start ##
for step_idx, input_data_item in enumerate(self.data_loader):
step_idx += 1
for key, input_value in input_data_item.items():
if input_value is not None and isinstance(input_value, torch.Tensor):
input_data_item[key] = input_value.to(self.device, non_blocking=True)
if self.config['trainer']['anomaly_detection']:
# This mode will increase the runtime and should only be enabled for debugging
with torch.autograd.detect_anomaly():
self.optimizer.zero_grad()
# model forward
output = self.model(**input_data_item)
# calculate loss
gl_loss = output['gl_loss']
crf_loss = output['crf_loss']
total_loss = torch.sum(crf_loss) + self.gl_loss_lambda * torch.sum(gl_loss)
# backward
total_loss.backward()
# self.average_gradients(self.model)
self.optimizer.step()
else:
self.optimizer.zero_grad()
# model forward
output = self.model(**input_data_item)
# calculate loss
gl_loss = output['gl_loss']
crf_loss = output['crf_loss']
total_loss = torch.sum(crf_loss) + self.gl_loss_lambda * torch.sum(gl_loss)
# backward
total_loss.backward()
# self.average_gradients(self.model)
self.optimizer.step()
# Use a barrier() to make sure that all process have finished forward and backward
if self.distributed:
dist.barrier()
# obtain the sum of all total_loss at all processes
dist.all_reduce(total_loss, op=dist.reduce_op.SUM)
size = dist.get_world_size()
else:
size = 1
gl_loss /= size # averages gl_loss across the whole world
crf_loss /= size # averages crf_loss across the whole world
# calculate average loss across the batch size
avg_gl_loss = torch.mean(gl_loss)
avg_crf_loss = torch.mean(crf_loss)
avg_loss = avg_crf_loss + self.gl_loss_lambda * avg_gl_loss
# update metrics
# self.writer.set_step((epoch - 1) * self.len_step + step_idx - 1) if self.local_master else None
self.train_loss_metrics.update('loss', avg_loss.item(), epoch)
self.train_loss_metrics.update('gl_loss', avg_gl_loss.item() * self.gl_loss_lambda, epoch)
self.train_loss_metrics.update('crf_loss', avg_crf_loss.item(), epoch)
# log messages
if step_idx % self.log_step == 0:
self.logger_info('Train Epoch:[{}/{}] Step:[{}/{}] Total Loss: {:.6f} GL_Loss: {:.6f} CRF_Loss: {:.6f}'.
format(epoch, self.epochs, step_idx, self.len_step,
avg_loss.item(), avg_gl_loss.item() * self.gl_loss_lambda, avg_crf_loss.item()))
# decide whether continue iter
if step_idx == self.len_step + 1:
break
# step iteration end ##
# do validation after val_step_interval iteration
if self.do_validation and epoch % self.val_epoch_interval == 0:
val_result_dict = self._valid_epoch(epoch)
self.logger_info('[Step Validation] Epoch:[{}/{}]] \n{}'.
format(epoch, self.epochs, self.len_step,
SpanBasedF1MetricTracker.dict2str(val_result_dict)))
# check if best metric, if true, then save as model_best checkpoint.
best, not_improved_count = self._is_best_monitor_metric(False, 0, val_result_dict)
if best:
self._save_checkpoint(epoch, best)
# {'loss': avg_loss, 'gl_loss': avg_gl_loss, 'crf_loss': avg_crf_loss}
log = self.train_loss_metrics.result()
# do validation after training an epoch
if self.do_validation:
val_result_dict = self._valid_epoch(epoch)
log['val_result_dict'] = val_result_dict
if self.lr_scheduler is not None:
self.lr_scheduler.step()
self.model.train()
return log
def _valid_epoch(self, epoch):
self.model.eval()
self.valid_f1_metrics.reset()
with torch.no_grad():
for step_idx, input_data_item in enumerate(self.valid_data_loader):
for key, input_value in input_data_item.items():
if input_value is not None and isinstance(input_value, torch.Tensor):
input_data_item[key] = input_value.to(self.device, non_blocking=True)
output = self.model(**input_data_item)
logits = output['logits']
new_mask = output['new_mask']
if hasattr(self.model, 'module'):
# List[(List[int], torch.Tensor)] contain the tag indices of the maximum likelihood tag sequence.
# and the score of the viterbi path.
best_paths = self.model.module.decoder.crf_layer.viterbi_tags(logits, mask=new_mask,
logits_batch_first=True)
else:
best_paths = self.model.decoder.crf_layer.viterbi_tags(logits, mask=new_mask,
logits_batch_first=True)
predicted_tags = []
for path, score in best_paths:
predicted_tags.append(path)
# self.writer.set_step((epoch - 1) * len(self.valid_data_loader) + step_idx, 'valid') \
# if self.local_master else None
# calculate and update f1 metrics
# (B, N*T, out_dim)
predicted_tags_hard_prob = logits * 0
for i, instance_tags in enumerate(predicted_tags):
for j, tag_id in enumerate(instance_tags):
predicted_tags_hard_prob[i, j, tag_id] = 1
golden_tags = input_data_item['iob_tags_label']
mask = input_data_item['mask']
union_iob_tags = to_union(golden_tags, mask, self.iob_labels_vocab_cls)
if self.distributed:
dist.barrier() #
self.valid_f1_metrics.update(predicted_tags_hard_prob.long(), union_iob_tags, new_mask)
# add histogram of model parameters to the tensorboard
# for name, p in self.model.named_parameters():
# self.writer.add_histogram(name, p, bins='auto')
f1_result_dict = self.valid_f1_metrics.result()
overall_dict = f1_result_dict['overall']
if self.local_master:
for key, value in overall_dict.items():
self.writer.add_scalar(key, value, epoch)
return f1_result_dict
@staticmethod
def average_gradients(model):
size = float(dist.get_world_size())
for param in model.parameters():
dist.all_reduce(param.grad.data, op=dist.reduce_op.SUM)
param.grad.data /= size
def logger_info(self, msg):
self.logger.info(msg) if self.local_master else None
def logger_warning(self, msg):
self.logger.warning(msg) if self.local_master else None
def _prepare_device(self, local_rank, local_world_size):
if self.distributed:
n_gpu_per_process = torch.cuda.device_count() // local_world_size
device_ids = list(range(local_rank * n_gpu_per_process, (local_rank + 1) * n_gpu_per_process))
if torch.cuda.is_available() and local_rank != -1:
torch.cuda.set_device(device_ids[0]) # device_ids[0] =local_rank if local_world_size = n_gpu per node
device = 'cuda'
self.logger_info(
f"[Process {os.getpid()}] world_size = {dist.get_world_size()}, "
+ f"rank = {dist.get_rank()}, n_gpu/process = {n_gpu_per_process}, device_ids = {device_ids}"
)
else:
self.logger_warning('Training will be using CPU!')
device = 'cpu'
device = torch.device(device)
return device, device_ids
else:
n_gpu = torch.cuda.device_count()
n_gpu_use = local_world_size
if n_gpu_use > 0 and n_gpu == 0:
self.logger_warning("Warning: There\'s no GPU available on this machine,"
"training will be performed on CPU.")
n_gpu_use = 0
if n_gpu_use > n_gpu:
self.logger_warning("Warning: The number of GPU\'s configured to use is {}, but only {} are available "
"on this machine.".format(n_gpu_use, n_gpu))
n_gpu_use = n_gpu
list_ids = list(range(n_gpu_use))
if n_gpu_use > 0:
torch.cuda.set_device(list_ids[0]) # only use first available gpu as devices
self.logger_warning(f'Training is using GPU {list_ids[0]}!')
device = 'cuda'
else:
self.logger_warning('Training is using CPU!')
device = 'cpu'
device = torch.device(device)
return device, list_ids
def _save_checkpoint(self, epoch, save_best=False):
# only local master process do save model
if not self.local_master:
return
if hasattr(self.model, 'module'):
arch = type(self.model.module).__name__
state_dict = self.model.module.state_dict()
else:
arch = type(self.model).__name__
state_dict = self.model.state_dict()
state = {
'arch': arch,
'epoch': epoch,
'state_dict': state_dict,
'optimizer': self.optimizer.state_dict(),
'monitor_best': self.monitor_best,
'config': self.config
}
if save_best:
best_path = str(self.checkpoint_dir / 'model_best.pth')
torch.save(state, best_path)
self.logger_info("Saving current best: model_best.pth ...")
else:
filename = str(self.checkpoint_dir / 'checkpoint-epoch{}.pth'.format(epoch))
torch.save(state, filename)
self.logger_info("Saving checkpoint: {} ...".format(filename))
def _resume_checkpoint(self, resume_path):
resume_path = str(resume_path)
self.logger_info("Loading checkpoint: {} ...".format(resume_path))
# map_location = {'cuda:%d' % 0: 'cuda:%d' % self.config['local_rank']}
checkpoint = torch.load(resume_path, map_location=self.device)
self.start_epoch = checkpoint['epoch'] + 1
self.monitor_best = checkpoint['monitor_best']
# load architecture params from checkpoint.
if checkpoint['config']['model_arch'] != self.config['model_arch']:
self.logger_warning("Warning: Architecture configuration given in config file is different from that of "
"checkpoint. This may yield an exception while state_dict is being loaded.")
self.model.load_state_dict(checkpoint['state_dict'])
# load optimizer state from checkpoint only when optimizer type is not changed.
if checkpoint['config']['optimizer']['type'] != self.config['optimizer']['type']:
self.logger_warning("Warning: Optimizer type given in config file is different from that of checkpoint. "
"Optimizer parameters not being resumed.")
else:
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.logger_info("Checkpoint loaded. Resume training from epoch {}".format(self.start_epoch))
| true | true |
f727f6cf011e2798bbf44b01f9e61c7dbe87ceb3 | 142 | py | Python | gala/potential/potential/__init__.py | zilishen/gala | f7184e6b09fbc42a349f6b5a2bca6242f1e9936e | [
"MIT"
] | 1 | 2020-11-20T18:27:25.000Z | 2020-11-20T18:27:25.000Z | gala/potential/potential/__init__.py | ltlancas/gala | 2621bb599d67e74a85446abf72d5930ef70ca181 | [
"MIT"
] | 3 | 2021-07-26T15:07:25.000Z | 2021-09-13T15:04:27.000Z | gala/potential/potential/__init__.py | ltlancas/gala | 2621bb599d67e74a85446abf72d5930ef70ca181 | [
"MIT"
] | 1 | 2018-10-23T23:20:20.000Z | 2018-10-23T23:20:20.000Z | from .core import *
from .cpotential import *
from .ccompositepotential import *
from .builtin import *
from .io import *
from .util import *
| 20.285714 | 34 | 0.746479 | from .core import *
from .cpotential import *
from .ccompositepotential import *
from .builtin import *
from .io import *
from .util import *
| true | true |
f727f7ba434ba285d0316a1377f3b4ae81748ed9 | 22,575 | py | Python | scrap_players.py | Toulik1729231/WebScraping1-Using-Python | 42562c66c905f925ea0848b8ae7dfbca6b5a1afd | [
"MIT"
] | null | null | null | scrap_players.py | Toulik1729231/WebScraping1-Using-Python | 42562c66c905f925ea0848b8ae7dfbca6b5a1afd | [
"MIT"
] | null | null | null | scrap_players.py | Toulik1729231/WebScraping1-Using-Python | 42562c66c905f925ea0848b8ae7dfbca6b5a1afd | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup
from logger_impl import *
import MongoDao
import pandas as pd
import time
payload = {'key': 'ac9e8cf2dec81949d9ee1235ed6ae3fb', 'url':
'https://httpbin.org/ip'}
def scrapData(scorecardSoup, matchId, matchDesc, matchTypeText, pageUrl, season, Date, venue):
#pageUrl = "http://www.espncricinfo.com/series/11422/scorecard/858491/bangladesh-vs-pakistan-only-t20i-pakistan-tour-of-bangladesh-2015"
try:
"""page = urllib.request.urlopen(pageUrl)
## get match-id and match-name from url
pageUrlArr = pageUrl.split('/')
matchId = pageUrlArr[len(pageUrlArr ) - 2]
matchDesc = pageUrlArr[len(pageUrlArr ) - 1] """
#soup = BeautifulSoup(page, 'html.parser')
soup = scorecardSoup
#print("page html: ", soup.prettify())
scorecardDiv = soup.find_all('article', class_='sub-module scorecard')
playerBatsmanDict = {}
playerBowlerDict = {}
batsmanScorecardParam = ['run_scored', 'balls_faced','M', '4s', '6s', 'strike_rate']
bowlerScorecardParam = ['O', 'M', 'R', 'W', 'Econ', 'WD', 'NB']
teamList = []
teamIDList = []
inningsTeam = []
## print(len(scorecardDiv))
#creating playing team list
for scorecardVal in scorecardDiv:
#print(scorecardVal)
team = scorecardVal.find('h2').get_text()
if matchTypeText == 'Tests':
team = str(team).replace('1st Innings', '').replace('2nd Innings', '')
else:
team = str(team).replace('Innings', '')
if team.strip() in teamList:
break
teamList.append(team.strip())
count = {teamList[0]:0,teamList[1]:0}
for team in teamList:
word = team.split(' ')
if len(word) == 1:
id_ = team[:3]
teamIDList.append(id_)
else:
id_ = ''
for x in word:
id_ = id_ + x[0]
teamIDList.append(id_)
for scorecardVal in scorecardDiv:
team = scorecardVal.find('h2').get_text()
inn = ''
if matchTypeText == 'Tests':
inn = ' '.join(str(team).split(' ')[-2:])
team = str(team).replace('1st Innings', '').replace('2nd Innings', '')
else:
team = str(team).replace('Innings', '')
team = team.strip()
count[team] += 1
## print(count)
logger.info("team: " + team)
#print("batsman div: ", scorecardVal)
batsmanList = scorecardVal.find_all('div', class_='wrap batsmen')
batsmanListNotBatted = scorecardVal.find('div', class_='wrap dnb').find_all('a')
## for bt in batsmanListNotBatted:
## print(bt.get('href'))
## print(bt.get_text())
for batsman in batsmanList:
batsmanDict = {}
#print("batsman data: ", batsman)
batsmanAnchor = batsman.find('div', class_="cell batsmen").find('a')
batsmanLink = batsmanAnchor.get('href')
batsmanName = batsmanAnchor.get_text()
batsmanLinkArr = str(batsmanLink).split('/')
cricInfoBatsmanId = batsmanLinkArr[len(batsmanLinkArr) - 1]
cricInfoBatsmanId = str(cricInfoBatsmanId).replace('.html', '')
#print("batsman Name: ", batsmanName, " batsmanId: ", cricInfoBatsmanId)
batsmanDict['short_name'] = batsmanName
batsmanDict['player_cric_info_link'] = batsmanLink
batsmanDict['team'] = team
#print("batsmanDiv: ", batsmanDiv.get_text())
try:
commentry = batsman.find('div', class_="cell commentary").find('a').get_text()
batsmanDict['commentry'] = commentry
except AttributeError as ae:
batsmanDict['commentry'] = ''
#print("batsman commentry: ", commentry)
#print("commentryDiv: ", commentryDiv.get_text())
batsmanStatsList = batsman.find_all('div', class_="cell runs")
ctr = 0
tempList = []
for batsmanStats in batsmanStatsList:
#print("anchor: ", batsmanStats.get_text())
#param = batsmanScorecardParam[ctr]
#ctr += 1
#batsmanDict[param] = batsmanStats.get_text()
tempList.append(batsmanStats.get_text())
if len(tempList) == 6:
batsmanDict['run_scored'] = tempList[0]
batsmanDict['balls_faced'] = tempList[1]
batsmanDict['M'] = tempList[2]
batsmanDict['4s'] = tempList[3]
batsmanDict['6s'] = tempList[4]
batsmanDict['strike_rate'] = tempList[5]
else:
batsmanDict['run_scored'] = tempList[0]
batsmanDict['balls_faced'] = tempList[1]
batsmanDict['M'] = '-'
batsmanDict['4s'] = tempList[2]
batsmanDict['6s'] = tempList[3]
batsmanDict['strike_rate'] = tempList[4]
batsmanDict['innings'] = inn
key = cricInfoBatsmanId# + "_" + team
if matchTypeText == 'Tests':
key = key + inn[0]
playerBatsmanDict[key] = batsmanDict
#break
## print(batsmanListNotBatted)
for batsmen in batsmanListNotBatted:
batsmanDict={}
batsmanLink = batsmen.get('href')
batsmanName = batsmen.get_text()
batsmanLinkArr = str(batsmanLink).split('/')
cricInfoBatsmanId = batsmanLinkArr[len(batsmanLinkArr) - 1]
cricInfoBatsmanId = str(cricInfoBatsmanId).replace('.html', '')
batsmanDict['short_name'] = batsmanName
batsmanDict['player_cric_info_link'] = batsmanLink
batsmanDict['team'] = team
batsmanDict['run_scored'] = '-'
batsmanDict['balls_faced'] = '-'
batsmanDict['M'] = '-'
batsmanDict['4s'] = '-'
batsmanDict['6s'] = '-'
batsmanDict['strike_rate'] = '-'
batsmanDict['innings'] = inn
key = cricInfoBatsmanId# + "_" + team
#print('id : ',cricInfoBatsmanId)
#print('key : ',key)
#print(batsmanDict)
if matchTypeText == 'Tests':
key = key+inn[0]
playerBatsmanDict[key] = batsmanDict
#print('Dict added : ',playerBatsmanDict[key])
bowlersTR = scorecardVal.find('tbody').find_all('tr')
#print("bowler section: ", bowlersTR)
for bowlerRow in bowlersTR:
bowlersTD = bowlerRow.find_all('td')
bowlerAnchor = bowlersTD[0].find('a')
bowlerLink = bowlerAnchor.get('href')
bowlerName = bowlerAnchor.get_text()
#print("bowler name: ", bowlerName, " link: ", bowlerLink)
bowlerLinkArr = str(bowlerLink).split('/')
cricInfoBowlerId = bowlerLinkArr[len(bowlerLinkArr) - 1]
cricInfoBowlerId = str(cricInfoBowlerId).replace('.html', '')
logger.info("bowlersTD: " + str(bowlersTD))
logger.info("length bowlersTD: " + str(len(bowlersTD)))
if len(bowlersTD) == 13:
overs = bowlersTD[2].find(text=True)
maidens = bowlersTD[3].find(text=True)
runs = bowlersTD[4].find(text=True)
wickets = bowlersTD[5].find(text=True)
economy = bowlersTD[6].find(text=True)
dotBalls = bowlersTD[7].find(text=True)
ballerFours = bowlersTD[8].find(text=True)
ballerSixes = bowlersTD[9].find(text=True)
wideBalls = bowlersTD[10].find(text=True)
noBalls = bowlersTD[11].find(text=True)
else:
overs = bowlersTD[2].find(text=True)
maidens = bowlersTD[3].find(text=True)
runs = bowlersTD[4].find(text=True)
wickets = bowlersTD[5].find(text=True)
economy = bowlersTD[6].find(text=True)
dotBalls = 0
ballerFours = 0
ballerSixes = 0
wideBalls = bowlersTD[7].find(text=True)
noBalls = bowlersTD[8].find(text=True)
## print('o'+overs)
## print(maidens)
## print(runs)
## print(wickets)
## print(economy)
## print(dotBalls)
## print(ballerFours)
## print(ballerSixes)
## print(wideBalls)
## print(noBalls)
#['O', 'M', 'R', 'W', 'Econ', 'WD', 'NB']
bowlerDict = {}
bowlerDict['short_name'] = bowlerName
bowlerDict['player_cric_info_link'] = bowlerLink
if '.' in overs:
oversArr = overs.split('.')
totalBalls: int = int(oversArr[0]) * 6
totalBalls += int(oversArr[1])
else:
totalBalls: int = int(overs) * 6
# getting the bowling team name
if team == teamList[0]:
bowlingTeam = teamList[1]
else:
bowlingTeam = teamList[0]
bowlerDict['team'] = bowlingTeam
bowlerDict['balls_bowled'] = totalBalls
bowlerDict['maiden_overs'] = maidens
bowlerDict['runs_given'] = runs
bowlerDict['wicket'] = wickets
bowlerDict['econ'] = economy
bowlerDict['dot_delivery'] = dotBalls
bowlerDict['four_delivery'] = ballerFours
bowlerDict['six_delivery'] = ballerSixes
bowlerDict['wide_balls'] = wideBalls
bowlerDict['no_balls'] = noBalls
bowlerDict['innings'] = inn
#print(overs, maidens, runs, wickets, economy, wideBalls, noBalls)
key = cricInfoBowlerId# + "_" + team
if matchTypeText == 'Tests':
key = key+inn[0]
playerBowlerDict[key] = bowlerDict
#print("batsmanDict: ", playerBatsmanDict)
#print("bowlerDict: ", playerBowlerDict)
if matchTypeText == 'Tests' and ((count[teamList[0]] == 2 and count[teamList[1]] == 1) or (count[teamList[0]] == 1 and count[teamList[1]] == 2)):
# if
missing = ''
if count[teamList[0]] == 1:
missing = teamList[0]
elif count[teamList[1]] == 1:
missing = teamList[1]
for scorecardVal in scorecardDiv:
team = scorecardVal.find('h2').get_text()
inn = ' '.join(str(team).split(' ')[-2:])
team = str(team).replace('1st Innings', '').replace('2nd Innings', '')
team = team.strip()
if team == missing:
batsmanList = scorecardVal.find_all('div', class_='wrap batsmen')
batsmanListNotBatted = scorecardVal.find('div', class_='wrap dnb').find_all('a')
for batsman in batsmanList:
batsmanDict = {}
batsmanAnchor = batsman.find('div', class_="cell batsmen").find('a')
batsmanLink = batsmanAnchor.get('href')
batsmanName = batsmanAnchor.get_text()
batsmanLinkArr = str(batsmanLink).split('/')
cricInfoBatsmanId = batsmanLinkArr[len(batsmanLinkArr) - 1]
cricInfoBatsmanId = str(cricInfoBatsmanId).replace('.html', '')
batsmanDict['short_name'] = batsmanName
batsmanDict['player_cric_info_link'] = batsmanLink
batsmanDict['team'] = team
batsmanDict['run_scored'] = '-'
batsmanDict['balls_faced'] = '-'
batsmanDict['M'] = '-'
batsmanDict['4s'] = '-'
batsmanDict['6s'] = '-'
batsmanDict['strike_rate'] = '-'
batsmanDict['innings'] = '2nd Innings'
## print(batsmanList)
key = cricInfoBatsmanId
batsmanDict['commentry'] = '-'
if matchTypeText == 'Tests':
key = key+'2'
playerBatsmanDict[key] = batsmanDict
for batsmen in batsmanListNotBatted:
batsmanLink = batsmen.get('href')
batsmanName = batsmen.get_text()
batsmanLinkArr = str(batsmanLink).split('/')
cricInfoBatsmanId = batsmanLinkArr[len(batsmanLinkArr) - 1]
cricInfoBatsmanId = str(cricInfoBatsmanId).replace('.html', '')
batsmanDict['short_name'] = batsmanName
batsmanDict['player_cric_info_link'] = batsmanLink
batsmanDict['team'] = team
batsmanDict['run_scored'] = '-'
batsmanDict['balls_faced'] = '-'
batsmanDict['M'] = '-'
batsmanDict['4s'] = '-'
batsmanDict['6s'] = '-'
batsmanDict['strike_rate'] = '-'
batsmanDict['innings'] = '2nd Innings'
key = cricInfoBatsmanId# + "_" + team
if matchTypeText == 'Tests':
key = key+'2'
playerBatsmanDict[key] = batsmanDict
# checking batsman in bowler map, if found add them in playerBatsmanDict
if matchTypeText == 'Tests':
for batsmanKey, batsmanValue in playerBatsmanDict.items():
if batsmanKey in playerBowlerDict:
if playerBatsmanDict[batsmanKey]['innings'] == playerBowlerDict[batsmanKey]['innings']:
bowlerData = playerBowlerDict[batsmanKey]
fianlDict = {**batsmanValue, **bowlerData}
playerBatsmanDict[batsmanKey] = fianlDict
del playerBowlerDict[batsmanKey]
else:
for batsmanKey, batsmanValue in playerBatsmanDict.items():
if batsmanKey in playerBowlerDict:
bowlerData = playerBowlerDict[batsmanKey]
fianlDict = {**batsmanValue, **bowlerData}
playerBatsmanDict[batsmanKey] = fianlDict
del playerBowlerDict[batsmanKey]
## print("after merging batsmanDict: ", playerBatsmanDict)
## print("after merging bowlerDict: ", playerBowlerDict)
playerFinalDict = {**playerBatsmanDict, **playerBowlerDict}
##
## print("Player final dict: ", playerFinalDict)
##TODO mark player as 'Batsman', 'Bowler', 'WicketKeeper', 'All rounder'
pno = 0
for playerKey, playerValue in playerFinalDict.items():
flag = True
while flag:
try:
pno+=1
if pno <= 5:
shortName = playerValue['short_name']
playerDict = playerFinalDict[playerKey]
if '†' in shortName:
#checking for WicketKeeper positio
playerDict['Position'] = "WK"
elif 'econ' in playerDict:
playerDict['Position'] = "Bowler"
else:
playerDict['Position'] = "Batsman"
#print('Pno : ' + str(pno))
playerDict['match_id'] = matchId + '_' + playerDict['innings'][:2]
playerDict['match_desc'] = matchDesc
playerDict['match_type_text'] = matchTypeText +' '+ playerDict['innings']
playerDict['season'] = season
playerDict['MatchURL'] = pageUrl
playerDict['Match_start_Date'] = Date
playerDict['Venue'] = venue
if playerDict['team'] == teamList[0]:
playerDict['TeamID'] = teamIDList[0]
playerDict['OpponentID'] = teamIDList[1]
else:
playerDict['TeamID'] = teamIDList[1]
playerDict['OpponentID'] = teamIDList[0]
url = playerDict['player_cric_info_link']
page = requests.get(url,params = payload).text
soup = BeautifulSoup(page,'html.parser')
pees = soup.find_all('p',class_='ciPlayerinformationtxt')
val = []
key = []
for pee in pees:
key.append(pee.find('b').get_text())
val.append(pee.find('span').get_text())
if "Full name" in key:
playerDict['Player_Full_Name'] = val[key.index("Full name")]
else:
playerDict['Player_Full_Name'] = '-'
if 'Born' in key:
playerDict['date,place_of_birth'] = val[key.index('Born')].replace('\n','').strip()
else:
playerDict['date,place_of_birth'] = '-'
if 'Nickname' in key:
playerDict['Player_Nickname'] = val[key.index('Nickname')]
else:
playerDict['Player_Nickname'] = '-'
## playerDict['Player_Full_Name'] = data[0]
## playerDict['data,place_of_birth'] = data[1][1:]
## if data[4] == None:
## playerDict['Player_Nickname'] = '-'
## else:
## playerDict['Player_Nickname'] = data[4]
#DOB_PlaceOB = soup.fin_next('p',class_='ciPlayerinformationtxt').find('span').get_text()
# below adding missed parameters in player's dict with default 0 value
if not 'run_scored' in playerDict:
playerDict['run_scored'] = "-"
if not 'balls_faced' in playerDict:
playerDict['balls_faced'] = "-"
if not 'strike_rate' in playerDict:
playerDict['strike_rate'] = "-"
if not 'balls_bowled' in playerDict:
playerDict['balls_bowled'] = "-"
if not 'maiden_overs' in playerDict:
playerDict['maiden_overs'] = "-"
if not 'runs_given' in playerDict:
playerDict['runs_given'] = "-"
if not 'wicket' in playerDict:
playerDict['wicket'] = "-"
if not 'econ' in playerDict:
playerDict['econ'] = "-"
if not 'wide_balls' in playerDict:
playerDict['wide_balls'] = "-"
if not 'no_balls' in playerDict:
playerDict['no_balls'] = "-"
flag = False
else:
pno = 0
time.sleep(10)
except Exception as e:
print('pausing scrapping for 5 mins : '+str(e))
time.sleep(300)
flag = True
# print("Player final dict 2: ", playerFinalDict)
for key, val in playerFinalDict.items():
val['cric_info_id'] = key
val['_id'] = key + "-" + matchId
#print(key)
#MongoDao.insertToPlayerStats(val)
logger.info("players inserted successfully for url: " + pageUrl)
#MongoDao.insertToProcessedUrls(pageUrl)
#print(playerFinalDict.key())
df = pd.DataFrame(playerFinalDict)
return df
except Exception as e:
logger.error("ERROR while processing URL: " + pageUrl)
logger.exception("message")
print("Scrapping : "+str(e))
#print(("ERROR while processing URL: " + pageUrl))
#scrapODI_T20Data('', '', '', "T20", '', '')
| 48.340471 | 154 | 0.454707 | import requests
from bs4 import BeautifulSoup
from logger_impl import *
import MongoDao
import pandas as pd
import time
payload = {'key': 'ac9e8cf2dec81949d9ee1235ed6ae3fb', 'url':
'https://httpbin.org/ip'}
def scrapData(scorecardSoup, matchId, matchDesc, matchTypeText, pageUrl, season, Date, venue):
try:
soup = scorecardSoup
scorecardDiv = soup.find_all('article', class_='sub-module scorecard')
playerBatsmanDict = {}
playerBowlerDict = {}
batsmanScorecardParam = ['run_scored', 'balls_faced','M', '4s', '6s', 'strike_rate']
bowlerScorecardParam = ['O', 'M', 'R', 'W', 'Econ', 'WD', 'NB']
teamList = []
teamIDList = []
inningsTeam = []
in scorecardDiv:
team = scorecardVal.find('h2').get_text()
if matchTypeText == 'Tests':
team = str(team).replace('1st Innings', '').replace('2nd Innings', '')
else:
team = str(team).replace('Innings', '')
if team.strip() in teamList:
break
teamList.append(team.strip())
count = {teamList[0]:0,teamList[1]:0}
for team in teamList:
word = team.split(' ')
if len(word) == 1:
id_ = team[:3]
teamIDList.append(id_)
else:
id_ = ''
for x in word:
id_ = id_ + x[0]
teamIDList.append(id_)
for scorecardVal in scorecardDiv:
team = scorecardVal.find('h2').get_text()
inn = ''
if matchTypeText == 'Tests':
inn = ' '.join(str(team).split(' ')[-2:])
team = str(team).replace('1st Innings', '').replace('2nd Innings', '')
else:
team = str(team).replace('Innings', '')
team = team.strip()
count[team] += 1
team: " + team)
batsmanList = scorecardVal.find_all('div', class_='wrap batsmen')
batsmanListNotBatted = scorecardVal.find('div', class_='wrap dnb').find_all('a')
or = batsman.find('div', class_="cell batsmen").find('a')
batsmanLink = batsmanAnchor.get('href')
batsmanName = batsmanAnchor.get_text()
batsmanLinkArr = str(batsmanLink).split('/')
cricInfoBatsmanId = batsmanLinkArr[len(batsmanLinkArr) - 1]
cricInfoBatsmanId = str(cricInfoBatsmanId).replace('.html', '')
batsmanDict['short_name'] = batsmanName
batsmanDict['player_cric_info_link'] = batsmanLink
batsmanDict['team'] = team
try:
commentry = batsman.find('div', class_="cell commentary").find('a').get_text()
batsmanDict['commentry'] = commentry
except AttributeError as ae:
batsmanDict['commentry'] = ''
batsmanStatsList = batsman.find_all('div', class_="cell runs")
ctr = 0
tempList = []
for batsmanStats in batsmanStatsList:
tempList.append(batsmanStats.get_text())
if len(tempList) == 6:
batsmanDict['run_scored'] = tempList[0]
batsmanDict['balls_faced'] = tempList[1]
batsmanDict['M'] = tempList[2]
batsmanDict['4s'] = tempList[3]
batsmanDict['6s'] = tempList[4]
batsmanDict['strike_rate'] = tempList[5]
else:
batsmanDict['run_scored'] = tempList[0]
batsmanDict['balls_faced'] = tempList[1]
batsmanDict['M'] = '-'
batsmanDict['4s'] = tempList[2]
batsmanDict['6s'] = tempList[3]
batsmanDict['strike_rate'] = tempList[4]
batsmanDict['innings'] = inn
key = cricInfoBatsmanId
if matchTypeText == 'Tests':
key = key + inn[0]
playerBatsmanDict[key] = batsmanDict
NotBatted:
batsmanDict={}
batsmanLink = batsmen.get('href')
batsmanName = batsmen.get_text()
batsmanLinkArr = str(batsmanLink).split('/')
cricInfoBatsmanId = batsmanLinkArr[len(batsmanLinkArr) - 1]
cricInfoBatsmanId = str(cricInfoBatsmanId).replace('.html', '')
batsmanDict['short_name'] = batsmanName
batsmanDict['player_cric_info_link'] = batsmanLink
batsmanDict['team'] = team
batsmanDict['run_scored'] = '-'
batsmanDict['balls_faced'] = '-'
batsmanDict['M'] = '-'
batsmanDict['4s'] = '-'
batsmanDict['6s'] = '-'
batsmanDict['strike_rate'] = '-'
batsmanDict['innings'] = inn
key = cricInfoBatsmanId
if matchTypeText == 'Tests':
key = key+inn[0]
playerBatsmanDict[key] = batsmanDict
bowlersTR = scorecardVal.find('tbody').find_all('tr')
for bowlerRow in bowlersTR:
bowlersTD = bowlerRow.find_all('td')
bowlerAnchor = bowlersTD[0].find('a')
bowlerLink = bowlerAnchor.get('href')
bowlerName = bowlerAnchor.get_text()
bowlerLinkArr = str(bowlerLink).split('/')
cricInfoBowlerId = bowlerLinkArr[len(bowlerLinkArr) - 1]
cricInfoBowlerId = str(cricInfoBowlerId).replace('.html', '')
logger.info("bowlersTD: " + str(bowlersTD))
logger.info("length bowlersTD: " + str(len(bowlersTD)))
if len(bowlersTD) == 13:
overs = bowlersTD[2].find(text=True)
maidens = bowlersTD[3].find(text=True)
runs = bowlersTD[4].find(text=True)
wickets = bowlersTD[5].find(text=True)
economy = bowlersTD[6].find(text=True)
dotBalls = bowlersTD[7].find(text=True)
ballerFours = bowlersTD[8].find(text=True)
ballerSixes = bowlersTD[9].find(text=True)
wideBalls = bowlersTD[10].find(text=True)
noBalls = bowlersTD[11].find(text=True)
else:
overs = bowlersTD[2].find(text=True)
maidens = bowlersTD[3].find(text=True)
runs = bowlersTD[4].find(text=True)
wickets = bowlersTD[5].find(text=True)
economy = bowlersTD[6].find(text=True)
dotBalls = 0
ballerFours = 0
ballerSixes = 0
wideBalls = bowlersTD[7].find(text=True)
noBalls = bowlersTD[8].find(text=True)
= int(oversArr[0]) * 6
totalBalls += int(oversArr[1])
else:
totalBalls: int = int(overs) * 6
if team == teamList[0]:
bowlingTeam = teamList[1]
else:
bowlingTeam = teamList[0]
bowlerDict['team'] = bowlingTeam
bowlerDict['balls_bowled'] = totalBalls
bowlerDict['maiden_overs'] = maidens
bowlerDict['runs_given'] = runs
bowlerDict['wicket'] = wickets
bowlerDict['econ'] = economy
bowlerDict['dot_delivery'] = dotBalls
bowlerDict['four_delivery'] = ballerFours
bowlerDict['six_delivery'] = ballerSixes
bowlerDict['wide_balls'] = wideBalls
bowlerDict['no_balls'] = noBalls
bowlerDict['innings'] = inn
key = cricInfoBowlerId
if matchTypeText == 'Tests':
key = key+inn[0]
playerBowlerDict[key] = bowlerDict
if matchTypeText == 'Tests' and ((count[teamList[0]] == 2 and count[teamList[1]] == 1) or (count[teamList[0]] == 1 and count[teamList[1]] == 2)):
missing = ''
if count[teamList[0]] == 1:
missing = teamList[0]
elif count[teamList[1]] == 1:
missing = teamList[1]
for scorecardVal in scorecardDiv:
team = scorecardVal.find('h2').get_text()
inn = ' '.join(str(team).split(' ')[-2:])
team = str(team).replace('1st Innings', '').replace('2nd Innings', '')
team = team.strip()
if team == missing:
batsmanList = scorecardVal.find_all('div', class_='wrap batsmen')
batsmanListNotBatted = scorecardVal.find('div', class_='wrap dnb').find_all('a')
for batsman in batsmanList:
batsmanDict = {}
batsmanAnchor = batsman.find('div', class_="cell batsmen").find('a')
batsmanLink = batsmanAnchor.get('href')
batsmanName = batsmanAnchor.get_text()
batsmanLinkArr = str(batsmanLink).split('/')
cricInfoBatsmanId = batsmanLinkArr[len(batsmanLinkArr) - 1]
cricInfoBatsmanId = str(cricInfoBatsmanId).replace('.html', '')
batsmanDict['short_name'] = batsmanName
batsmanDict['player_cric_info_link'] = batsmanLink
batsmanDict['team'] = team
batsmanDict['run_scored'] = '-'
batsmanDict['balls_faced'] = '-'
batsmanDict['M'] = '-'
batsmanDict['4s'] = '-'
batsmanDict['6s'] = '-'
batsmanDict['strike_rate'] = '-'
batsmanDict['innings'] = '2nd Innings'
smanId
batsmanDict['commentry'] = '-'
if matchTypeText == 'Tests':
key = key+'2'
playerBatsmanDict[key] = batsmanDict
for batsmen in batsmanListNotBatted:
batsmanLink = batsmen.get('href')
batsmanName = batsmen.get_text()
batsmanLinkArr = str(batsmanLink).split('/')
cricInfoBatsmanId = batsmanLinkArr[len(batsmanLinkArr) - 1]
cricInfoBatsmanId = str(cricInfoBatsmanId).replace('.html', '')
batsmanDict['short_name'] = batsmanName
batsmanDict['player_cric_info_link'] = batsmanLink
batsmanDict['team'] = team
batsmanDict['run_scored'] = '-'
batsmanDict['balls_faced'] = '-'
batsmanDict['M'] = '-'
batsmanDict['4s'] = '-'
batsmanDict['6s'] = '-'
batsmanDict['strike_rate'] = '-'
batsmanDict['innings'] = '2nd Innings'
key = cricInfoBatsmanId
if matchTypeText == 'Tests':
key = key+'2'
playerBatsmanDict[key] = batsmanDict
if matchTypeText == 'Tests':
for batsmanKey, batsmanValue in playerBatsmanDict.items():
if batsmanKey in playerBowlerDict:
if playerBatsmanDict[batsmanKey]['innings'] == playerBowlerDict[batsmanKey]['innings']:
bowlerData = playerBowlerDict[batsmanKey]
fianlDict = {**batsmanValue, **bowlerData}
playerBatsmanDict[batsmanKey] = fianlDict
del playerBowlerDict[batsmanKey]
else:
for batsmanKey, batsmanValue in playerBatsmanDict.items():
if batsmanKey in playerBowlerDict:
bowlerData = playerBowlerDict[batsmanKey]
fianlDict = {**batsmanValue, **bowlerData}
playerBatsmanDict[batsmanKey] = fianlDict
del playerBowlerDict[batsmanKey]
pno+=1
if pno <= 5:
shortName = playerValue['short_name']
playerDict = playerFinalDict[playerKey]
if '†' in shortName:
playerDict['Position'] = "WK"
elif 'econ' in playerDict:
playerDict['Position'] = "Bowler"
else:
playerDict['Position'] = "Batsman"
playerDict['match_id'] = matchId + '_' + playerDict['innings'][:2]
playerDict['match_desc'] = matchDesc
playerDict['match_type_text'] = matchTypeText +' '+ playerDict['innings']
playerDict['season'] = season
playerDict['MatchURL'] = pageUrl
playerDict['Match_start_Date'] = Date
playerDict['Venue'] = venue
if playerDict['team'] == teamList[0]:
playerDict['TeamID'] = teamIDList[0]
playerDict['OpponentID'] = teamIDList[1]
else:
playerDict['TeamID'] = teamIDList[1]
playerDict['OpponentID'] = teamIDList[0]
url = playerDict['player_cric_info_link']
page = requests.get(url,params = payload).text
soup = BeautifulSoup(page,'html.parser')
pees = soup.find_all('p',class_='ciPlayerinformationtxt')
val = []
key = []
for pee in pees:
key.append(pee.find('b').get_text())
val.append(pee.find('span').get_text())
if "Full name" in key:
playerDict['Player_Full_Name'] = val[key.index("Full name")]
else:
playerDict['Player_Full_Name'] = '-'
if 'Born' in key:
playerDict['date,place_of_birth'] = val[key.index('Born')].replace('\n','').strip()
else:
playerDict['date,place_of_birth'] = '-'
if 'Nickname' in key:
playerDict['Player_Nickname'] = val[key.index('Nickname')]
else:
playerDict['Player_Nickname'] = '-'
if not 'run_scored' in playerDict:
playerDict['run_scored'] = "-"
if not 'balls_faced' in playerDict:
playerDict['balls_faced'] = "-"
if not 'strike_rate' in playerDict:
playerDict['strike_rate'] = "-"
if not 'balls_bowled' in playerDict:
playerDict['balls_bowled'] = "-"
if not 'maiden_overs' in playerDict:
playerDict['maiden_overs'] = "-"
if not 'runs_given' in playerDict:
playerDict['runs_given'] = "-"
if not 'wicket' in playerDict:
playerDict['wicket'] = "-"
if not 'econ' in playerDict:
playerDict['econ'] = "-"
if not 'wide_balls' in playerDict:
playerDict['wide_balls'] = "-"
if not 'no_balls' in playerDict:
playerDict['no_balls'] = "-"
flag = False
else:
pno = 0
time.sleep(10)
except Exception as e:
print('pausing scrapping for 5 mins : '+str(e))
time.sleep(300)
flag = True
# print("Player final dict 2: ", playerFinalDict)
for key, val in playerFinalDict.items():
val['cric_info_id'] = key
val['_id'] = key + "-" + matchId
#print(key)
#MongoDao.insertToPlayerStats(val)
logger.info("players inserted successfully for url: " + pageUrl)
#MongoDao.insertToProcessedUrls(pageUrl)
#print(playerFinalDict.key())
df = pd.DataFrame(playerFinalDict)
return df
except Exception as e:
logger.error("ERROR while processing URL: " + pageUrl)
logger.exception("message")
print("Scrapping : "+str(e))
#print(("ERROR while processing URL: " + pageUrl))
#scrapODI_T20Data('', '', '', "T20", '', '')
| true | true |
f727f81a283fdf0533813dc879af7cc5b21230da | 879 | py | Python | armi/meta.py | keckler/armi | b5f95b4795aa21e00fd6786f6994862a4bdccb16 | [
"Apache-2.0"
] | 162 | 2019-11-01T17:35:58.000Z | 2022-03-18T04:22:39.000Z | armi/meta.py | keckler/armi | b5f95b4795aa21e00fd6786f6994862a4bdccb16 | [
"Apache-2.0"
] | 315 | 2019-11-01T17:32:05.000Z | 2022-03-30T03:51:42.000Z | armi/meta.py | keckler/armi | b5f95b4795aa21e00fd6786f6994862a4bdccb16 | [
"Apache-2.0"
] | 55 | 2019-11-01T16:59:59.000Z | 2022-03-25T18:19:06.000Z | # Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Metadata describing an ARMI distribution.
"""
# duplicating with setup.py for now. This is because in order to import meta.py, we
# need to run armi.__init__, which does a whole heck of a lot of stuff that setup.py
# shouldn't need. We should clean this up in the future.
__version__ = "0.2.0"
| 38.217391 | 84 | 0.753129 |
__version__ = "0.2.0"
| true | true |
f727fa256e9c15830ec9c93a29d55173626569d5 | 4,224 | py | Python | generate_training_commands.py | dumpmemory/academic-budget-bert | ea000838156e3be251699ad6a3c8b1339c76e987 | [
"Apache-2.0"
] | 146 | 2021-08-01T12:51:04.000Z | 2022-03-27T18:34:11.000Z | generate_training_commands.py | dumpmemory/academic-budget-bert | ea000838156e3be251699ad6a3c8b1339c76e987 | [
"Apache-2.0"
] | 14 | 2021-08-01T12:53:27.000Z | 2022-03-24T09:55:53.000Z | generate_training_commands.py | dumpmemory/academic-budget-bert | ea000838156e3be251699ad6a3c8b1339c76e987 | [
"Apache-2.0"
] | 29 | 2021-08-02T12:04:14.000Z | 2022-03-31T03:56:55.000Z | # coding=utf-8
# Copyright 2021 Intel Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import random
from itertools import product
import yaml
def get_yaml(file_name):
with open(file_name, "r") as stream:
try:
ym = yaml.safe_load(stream)
return ym
except yaml.YAMLError as e:
print(e)
def get_run_id():
t = datetime.datetime.now()
time_str = t.strftime("%Y%m%d%H%M%S")
random_num = random.randint(10000, 100000)
return f"{time_str}-{random_num}"
def add_run_id_per_command(params_combinations_named):
for comb in params_combinations_named:
comb["current_run_id"] = get_run_id()
return params_combinations_named
def get_hyper_param_combinations_grid(parameters_json):
params = parameters_json["hyperparameters"]
map_index_name = list(params.keys())
all_params_list = [param_values for _, param_values in params.items()]
params_combinations = list(product(*all_params_list))
params_combinations_named = [
{map_index_name[i]: value for i, value in enumerate(comb)} for comb in params_combinations
]
params_combinations_named = add_run_id_per_command(params_combinations_named)
return params_combinations_named
def get_hyper_param_combinations(parameters_json, search_type="grid"):
cases = {"grid": get_hyper_param_combinations_grid}
how_to_get_hyper_param_combinations = cases["grid"]
if search_type in cases:
how_to_get_hyper_param_combinations = cases[search_type]
return how_to_get_hyper_param_combinations(parameters_json)
def add_param(key, value):
if type(value) == bool:
return f"--{key}"
return f"--{key} {value}"
def get_command_from_params(param_list):
return " ".join([add_param(k, v) for k, v in param_list.items()])
def append_command(command, addition):
return f"{command} {addition}"
def add_default_params(parameters_json, job_name):
parameters_json["default_parameters"]["job_name"] = job_name
return parameters_json
def get_command_per_combination(command_init, parameters_json, params_combinations_named):
all_commands = []
command_default = get_command_from_params(parameters_json["default_parameters"])
for comb in params_combinations_named:
command_current = f"{command_init}"
command_current = append_command(command_current, get_command_from_params(comb))
command_current = append_command(command_current, command_default)
all_commands.append(command_current)
return all_commands
def create_experiments(command_init, param_file, job_name, search_type="grid"):
parameters_json = get_yaml(param_file)
parameters_json = add_default_params(parameters_json, job_name)
params_combinations_named = get_hyper_param_combinations(parameters_json, search_type)
all_commands = get_command_per_combination(
command_init, parameters_json, params_combinations_named
)
for command in all_commands:
print(command)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--param_file", help="Hyperparameter and configuration yaml", required=True)
parser.add_argument("--job_name", help="job name", default="bert_large_experiment")
parser.add_argument(
"--init_cmd",
help="initialization command (deepspeed or python directly)",
default="deepspeed run_pretraining.py",
)
parser.add_argument("--search_type", help="hyperparameter search method", default="grid")
args = parser.parse_args()
create_experiments(args.init_cmd, args.param_file, args.job_name, args.search_type)
| 33.792 | 100 | 0.742661 |
import argparse
import datetime
import random
from itertools import product
import yaml
def get_yaml(file_name):
with open(file_name, "r") as stream:
try:
ym = yaml.safe_load(stream)
return ym
except yaml.YAMLError as e:
print(e)
def get_run_id():
t = datetime.datetime.now()
time_str = t.strftime("%Y%m%d%H%M%S")
random_num = random.randint(10000, 100000)
return f"{time_str}-{random_num}"
def add_run_id_per_command(params_combinations_named):
for comb in params_combinations_named:
comb["current_run_id"] = get_run_id()
return params_combinations_named
def get_hyper_param_combinations_grid(parameters_json):
params = parameters_json["hyperparameters"]
map_index_name = list(params.keys())
all_params_list = [param_values for _, param_values in params.items()]
params_combinations = list(product(*all_params_list))
params_combinations_named = [
{map_index_name[i]: value for i, value in enumerate(comb)} for comb in params_combinations
]
params_combinations_named = add_run_id_per_command(params_combinations_named)
return params_combinations_named
def get_hyper_param_combinations(parameters_json, search_type="grid"):
cases = {"grid": get_hyper_param_combinations_grid}
how_to_get_hyper_param_combinations = cases["grid"]
if search_type in cases:
how_to_get_hyper_param_combinations = cases[search_type]
return how_to_get_hyper_param_combinations(parameters_json)
def add_param(key, value):
if type(value) == bool:
return f"--{key}"
return f"--{key} {value}"
def get_command_from_params(param_list):
return " ".join([add_param(k, v) for k, v in param_list.items()])
def append_command(command, addition):
return f"{command} {addition}"
def add_default_params(parameters_json, job_name):
parameters_json["default_parameters"]["job_name"] = job_name
return parameters_json
def get_command_per_combination(command_init, parameters_json, params_combinations_named):
all_commands = []
command_default = get_command_from_params(parameters_json["default_parameters"])
for comb in params_combinations_named:
command_current = f"{command_init}"
command_current = append_command(command_current, get_command_from_params(comb))
command_current = append_command(command_current, command_default)
all_commands.append(command_current)
return all_commands
def create_experiments(command_init, param_file, job_name, search_type="grid"):
parameters_json = get_yaml(param_file)
parameters_json = add_default_params(parameters_json, job_name)
params_combinations_named = get_hyper_param_combinations(parameters_json, search_type)
all_commands = get_command_per_combination(
command_init, parameters_json, params_combinations_named
)
for command in all_commands:
print(command)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--param_file", help="Hyperparameter and configuration yaml", required=True)
parser.add_argument("--job_name", help="job name", default="bert_large_experiment")
parser.add_argument(
"--init_cmd",
help="initialization command (deepspeed or python directly)",
default="deepspeed run_pretraining.py",
)
parser.add_argument("--search_type", help="hyperparameter search method", default="grid")
args = parser.parse_args()
create_experiments(args.init_cmd, args.param_file, args.job_name, args.search_type)
| true | true |
f727fa6638c95253d96d2e8f5ab19ff10be0c38a | 655 | py | Python | App/Config/db.py | pyworksasia/pyworks | 01aefa9e7db4c980dc7518f40a84b99d6137f906 | [
"Apache-2.0"
] | null | null | null | App/Config/db.py | pyworksasia/pyworks | 01aefa9e7db4c980dc7518f40a84b99d6137f906 | [
"Apache-2.0"
] | null | null | null | App/Config/db.py | pyworksasia/pyworks | 01aefa9e7db4c980dc7518f40a84b99d6137f906 | [
"Apache-2.0"
] | null | null | null | from App.Config.app import settings
DATABASES = {
'default': 'mysql',
'mysql': {
'driver': 'mysql',
'host': settings.DB_HOST,
'port': settings.DB_PORT,
'database': settings.DB_DATABASE,
'user': settings.DB_USER,
'password': settings.DB_PASSWORD,
'prefix': settings.DB_PREFIX,
'log_queries': True
},
'postgres': {
'driver': 'postgres',
'host': settings.DB_HOST,
'database': settings.DB_DATABASE,
'user': settings.DB_USER,
'password': settings.DB_PASSWORD,
'prefix': settings.DB_PREFIX,
'port': settings.DB_PORT,
}
} | 27.291667 | 41 | 0.567939 | from App.Config.app import settings
DATABASES = {
'default': 'mysql',
'mysql': {
'driver': 'mysql',
'host': settings.DB_HOST,
'port': settings.DB_PORT,
'database': settings.DB_DATABASE,
'user': settings.DB_USER,
'password': settings.DB_PASSWORD,
'prefix': settings.DB_PREFIX,
'log_queries': True
},
'postgres': {
'driver': 'postgres',
'host': settings.DB_HOST,
'database': settings.DB_DATABASE,
'user': settings.DB_USER,
'password': settings.DB_PASSWORD,
'prefix': settings.DB_PREFIX,
'port': settings.DB_PORT,
}
} | true | true |
f727fb7b85952005743a11740a3245a5d33e2124 | 4,124 | py | Python | scatterauth/views.py | caniko2/django-scatter-auth | 962b352f4deaf91de7daa8a85b8ee82852962032 | [
"MIT"
] | null | null | null | scatterauth/views.py | caniko2/django-scatter-auth | 962b352f4deaf91de7daa8a85b8ee82852962032 | [
"MIT"
] | null | null | null | scatterauth/views.py | caniko2/django-scatter-auth | 962b352f4deaf91de7daa8a85b8ee82852962032 | [
"MIT"
] | null | null | null | import random
import string
import json
from django.shortcuts import render, redirect, reverse
from django.urls.exceptions import NoReverseMatch
from django.contrib.auth import login, authenticate
from django.conf import settings
from django.views.decorators.http import require_http_methods
from django.http import JsonResponse, HttpResponseBadRequest
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.csrf import csrf_exempt
from django.http.request import split_domain_port
from scatterauth.forms import LoginForm, SignupForm
from scatterauth.settings import app_settings
def get_redirect_url(request):
if request.GET.get('next'):
return request.GET.get('next')
elif request.POST.get('next'):
return request.POST.get('next')
elif settings.LOGIN_REDIRECT_URL:
try:
url = reverse(settings.LOGIN_REDIRECT_URL)
except NoReverseMatch:
url = settings.LOGIN_REDIRECT_URL
return url
@require_http_methods(['POST'])
def login_api(request):
form = LoginForm(request.POST)
if not form.is_valid():
return JsonResponse({'success': False, 'error': json.loads(form.errors.as_json())})
public_key = form.cleaned_data.get('public_key')
nonce = form.cleaned_data.get('nonce')
res = form.cleaned_data.get('res')
if not nonce or not res or not public_key:
return JsonResponse({'error': _(
'Please pass message, signed message, and public key'),
'success': False})
user = authenticate(request, public_key=public_key, nonce=nonce, res=res)
if user:
login(request, user, 'scatterauth.backend.ScatterAuthBackend')
return JsonResponse({'success': True, 'redirect_url': get_redirect_url(request)})
else:
error = _("Can't find a user for the provided signature with public key {public_key}").format(
public_key=public_key)
return JsonResponse({'success': False, 'error': error})
@require_http_methods(['POST'])
def signup_api(request):
if not app_settings.SCATTERAUTH_SIGNUP_ENABLED:
return JsonResponse({'success': False, 'error': _("Sorry, signup's are currently disabled")})
form = SignupForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
setattr(user, username, form.cleaned_data['publicKey'])
user.save()
login(request, user, 'scatterauth.backend.ScatterAuthBackend')
return JsonResponse({'success': True, 'redirect_url': get_redirect_url(request)})
else:
return JsonResponse({'success': False, 'error': json.loads(form.errors.as_json())})
@require_http_methods(['GET', 'POST'])
def signup_view(request, template_name='scatterauth/signup.html'):
'''
1. Creates an instance of a SignupForm.
2. Checks if the registration is enabled.
3. If the registration is closed or form has errors, returns form with errors
4. If the form is valid, saves the user without saving to DB
5. Sets the user address from the form, saves it to DB
6. Logins the user using scatterauth.backend.ScatterAuthBackend
7. Redirects the user to LOGIN_REDIRECT_URL or 'next' in get or post params
:param request: Django request
:param template_name: Template to render
:return: rendered template with form
'''
form = SignupForm()
if not app_settings.SCATTERAUTH_SIGNUP_ENABLED:
form.add_error(None, _("Sorry, signup's are currently disabled"))
else:
if request.method == 'POST':
form = SignupForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
pubkey_field = app_settings.SCATTERAUTH_USER_PUBKEY_FIELD
setattr(user, pubkey_field, form.cleaned_data[pubkey_field])
user.save()
print(pubkey_field)
print(user.email)
login(request, user, 'scatterauth.backend.ScatterAuthBackend')
return redirect(get_redirect_url(request))
return render(request, template_name, {'form': form})
| 39.653846 | 102 | 0.690592 | import random
import string
import json
from django.shortcuts import render, redirect, reverse
from django.urls.exceptions import NoReverseMatch
from django.contrib.auth import login, authenticate
from django.conf import settings
from django.views.decorators.http import require_http_methods
from django.http import JsonResponse, HttpResponseBadRequest
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.csrf import csrf_exempt
from django.http.request import split_domain_port
from scatterauth.forms import LoginForm, SignupForm
from scatterauth.settings import app_settings
def get_redirect_url(request):
if request.GET.get('next'):
return request.GET.get('next')
elif request.POST.get('next'):
return request.POST.get('next')
elif settings.LOGIN_REDIRECT_URL:
try:
url = reverse(settings.LOGIN_REDIRECT_URL)
except NoReverseMatch:
url = settings.LOGIN_REDIRECT_URL
return url
@require_http_methods(['POST'])
def login_api(request):
form = LoginForm(request.POST)
if not form.is_valid():
return JsonResponse({'success': False, 'error': json.loads(form.errors.as_json())})
public_key = form.cleaned_data.get('public_key')
nonce = form.cleaned_data.get('nonce')
res = form.cleaned_data.get('res')
if not nonce or not res or not public_key:
return JsonResponse({'error': _(
'Please pass message, signed message, and public key'),
'success': False})
user = authenticate(request, public_key=public_key, nonce=nonce, res=res)
if user:
login(request, user, 'scatterauth.backend.ScatterAuthBackend')
return JsonResponse({'success': True, 'redirect_url': get_redirect_url(request)})
else:
error = _("Can't find a user for the provided signature with public key {public_key}").format(
public_key=public_key)
return JsonResponse({'success': False, 'error': error})
@require_http_methods(['POST'])
def signup_api(request):
if not app_settings.SCATTERAUTH_SIGNUP_ENABLED:
return JsonResponse({'success': False, 'error': _("Sorry, signup's are currently disabled")})
form = SignupForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
setattr(user, username, form.cleaned_data['publicKey'])
user.save()
login(request, user, 'scatterauth.backend.ScatterAuthBackend')
return JsonResponse({'success': True, 'redirect_url': get_redirect_url(request)})
else:
return JsonResponse({'success': False, 'error': json.loads(form.errors.as_json())})
@require_http_methods(['GET', 'POST'])
def signup_view(request, template_name='scatterauth/signup.html'):
form = SignupForm()
if not app_settings.SCATTERAUTH_SIGNUP_ENABLED:
form.add_error(None, _("Sorry, signup's are currently disabled"))
else:
if request.method == 'POST':
form = SignupForm(request.POST)
if form.is_valid():
user = form.save(commit=False)
pubkey_field = app_settings.SCATTERAUTH_USER_PUBKEY_FIELD
setattr(user, pubkey_field, form.cleaned_data[pubkey_field])
user.save()
print(pubkey_field)
print(user.email)
login(request, user, 'scatterauth.backend.ScatterAuthBackend')
return redirect(get_redirect_url(request))
return render(request, template_name, {'form': form})
| true | true |
f727fd004d9faf9730132464a1939ae041c64ead | 13,745 | py | Python | plugins/modules/nsi_api_v2_0_fres.py | ciena/ciena.mcp | b266a7cbd912c547f6e4877597d67ea9254e5758 | [
"Apache-2.0"
] | 3 | 2021-07-19T23:56:34.000Z | 2021-11-08T14:23:53.000Z | plugins/modules/nsi_api_v2_0_fres.py | ciena/ciena.mcp | b266a7cbd912c547f6e4877597d67ea9254e5758 | [
"Apache-2.0"
] | 1 | 2022-01-19T22:06:49.000Z | 2022-01-24T15:16:53.000Z | plugins/modules/nsi_api_v2_0_fres.py | ciena/ciena.mcp | b266a7cbd912c547f6e4877597d67ea9254e5758 | [
"Apache-2.0"
] | 1 | 2021-11-08T14:25:29.000Z | 2021-11-08T14:25:29.000Z | #!/usr/bin/env python
# Info module template
#############################################
# WARNING #
#############################################
#
# This file is auto generated by
# https://github.com/jgroom33/vmware_rest_code_generator
#
# Do not edit this file manually.
#
# Changes should be made in the swagger used to
# generate this file or in the generator
#
#############################################
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import socket
import json
DOCUMENTATION = """
module: nsi_api_v2_0_fres
short_description: Handle resource of type nsi_api_v2_0_fres
description: Handle resource of type nsi_api_v2_0_fres
options:
childFreId:
description:
- (Optional) The child FRE identifier to return its parents
- Used by I(state=['get'])
type: str
data:
description:
- 'Validate attributes are:'
- ' - C(attributes) (dict): '
- ' - C(id) (str): The unique identifier for the FRE resource'
- ' - C(meta) (dict): A metadata object that contains non-standard meta information'
- ' - C(relationships) (dict): '
- ' - C(type) (str): The FRE resource type'
- Used by I(state=['post'])
type: dict
directionality:
choices:
- bidirectional
- unidirectional
description:
- (Optional) Indicates if unidirectional or bidirectional FREs should be returned
- Used by I(state=['get'])
type: str
endpoint.tpe.concrete:
description:
- Concrete TPE identifier for endpoints
- Used by I(state=['get'])
type: str
exclude:
choices:
- actual
- expectation
description:
- (Optional) The given type would be excluded from get parents call, only combine
with childFreId
- Used by I(state=['get'])
type: str
fields:
description:
- (Optional) List of comma separated fields to be included in the response. Fields
require full path (i.e. data.attributes.field)
- Used by I(state=['get'])
type: str
freExpectations.equipmentIntent.id:
description:
- (Optional) The equipment intent Id
- Used by I(state=['get'])
type: str
freExpectations.serviceIntent.id:
description:
- (Optional) The service intent Id
- Used by I(state=['get'])
type: str
freType:
description:
- 'FRE types in comma separated list The allowed values are: explicitRoute, explicitRouteGroup,
snc, sncGroup'
- Used by I(state=['get'])
type: str
group:
choices:
- dwa
- infrastructure
- packet
- packet_infrastructure
- tdm
description:
- FRE group :<ul><li>dwa for all FREs in OTU4 and all top level FREs in ETHERNET,
DSR, DSR_ETHERNET, OTSi(OCH), ODU2, ODU4<li>infrastructure for all FRE-APs representing
forwarding constructs between ROADM OTS'<li>packet for all L2 nodal and top
level FREs in ETHERNET including infrastructure</ul>
- Used by I(state=['get'])
type: str
identifierKey:
description:
- List of comma separated keys for an identifer object
- Used by I(state=['get'])
type: list
identifierValue:
description:
- List of comma separated values for an identifier object
- Used by I(state=['get'])
type: list
include:
description:
- '(Optional) List of comma separated resources to be side-loaded. The allowed
values are: tpes, expectations'
- Used by I(state=['get'])
type: str
includeMetaData:
description:
- 'MetaData to be included. The allowed values are: layerRate'
- Used by I(state=['get'])
type: str
included:
description:
- Resources related to a FRE, such as FreData, EndPointData, TpeData, EquipmentData,
EquipmentHolderData, FrePlannedData, FreExpectationData, FreDiscoveredData,
ResiliencyControllerData, EncapsulatedResiliencyData
- Used by I(state=['post'])
type: list
layerRate:
description:
- 'FRE layer rates in comma separated list The allowed values are: ETHERNET, OTU2,
OTU4, OTSi, OMS, OS, PHY, OTS, ODU2, ODU4, DSR, DSR_10GE, DSR_100GE, DSR_ETHERNET'
- Used by I(state=['get'])
type: str
limit:
description:
- The size of a returned page
- Used by I(state=['get'])
type: str
links:
description:
- Links related to the resource
- 'Validate attributes are:'
- ' - C(current) (str): The current page of data'
- ' - C(first) (str): The first page of data'
- ' - C(last) (str): The last page of data'
- ' - C(next) (str): The next page of data'
- ' - C(prev) (str): The previous page of data'
- ' - C(self) (str): A `self` member, whose value is a URL for the relationship
itself (a "relationship URL"). This URL allows the client to directly manipulate
the relationship. For example, it would allow a client to remove an `author`
from an `article` without deleting the people resource itself.'
- Used by I(state=['post'])
type: dict
managementName:
description:
- Management Name
- Used by I(state=['get'])
type: str
meta:
description:
- A metadata object that contains non-standard meta information
- 'Validate attributes are:'
- ' - C(absoluteTotal) (int): The unfiltered total number of entities in the data'
- ' - C(aggregations) (list): The aggregated data based on a requested aggregation
name and criteria'
- ' - C(filtered) (bool): Flags whether the current object is filtered using `fields`
query param or not'
- ' - C(missingReferenceIds) (list): The list of missing resource IDs'
- ' - C(missingReferences) (bool): boolean detailing if the GET FRE tree has any
missing references'
- ' - C(total) (int): The total number of entities in the data'
- Used by I(state=['post'])
type: dict
ncId:
description:
- (Deprecated) Network Construct identifier
- Used by I(state=['get'])
type: str
networkConstruct.id:
description:
- Network Construct identifier
- Used by I(state=['get'])
type: str
offset:
description:
- Offset for the second page
- Used by I(state=['get'])
type: str
roadmLineId:
description:
- (Optional) Find services configured over a roadmline based on the roadmline
FRE identifier.
- Used by I(state=['get'])
type: str
searchText:
description:
- (Optional) The searchable text
- Used by I(state=['get'])
type: str
signalContentType:
description:
- (Optional) The identifier indicating type of parent to be returned. If specified,
parent matching the criteria will be returned
- Used by I(state=['get'])
type: str
srlg:
description:
- (Optional) Find roadmlines by srlg values separated by comma. A roadmline is
a FRE between two SAM cards.
- Used by I(state=['get'])
type: str
state:
choices:
- get
- post
description: []
type: str
tpeId:
description:
- TPE identifier for endpoints
- Used by I(state=['get'])
type: str
type:
description:
- 'FRE types in comma separated list. The allowed values are: service, link, roadmline-ap,
roadmline'
- Used by I(state=['get'])
type: str
userLabel:
description:
- User label
- Used by I(state=['get'])
type: str
author: []
version_added: 1.0.0
requirements:
- python >= 3.6
"""
IN_QUERY_PARAMETER = [
"childFreId",
"directionality",
"endpoint.tpe.concrete",
"exclude",
"fields",
"freExpectations.equipmentIntent.id",
"freExpectations.serviceIntent.id",
"freType",
"group",
"identifierKey",
"identifierValue",
"include",
"includeMetaData",
"layerRate",
"limit",
"managementName",
"ncId",
"networkConstruct.id",
"offset",
"roadmLineId",
"searchText",
"signalContentType",
"srlg",
"tpeId",
"type",
"userLabel",
]
from ansible.module_utils.basic import env_fallback
try:
from ansible_module.turbo.module import AnsibleTurboModule as AnsibleModule
except ImportError:
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ciena.mcp.plugins.module_utils.mcp import (
gen_args,
open_session,
update_changed_flag,
)
def prepare_argument_spec():
argument_spec = {
"mcp_hostname": dict(
type="str", required=False, fallback=(env_fallback, ["MCP_HOST"])
),
"mcp_username": dict(
type="str", required=False, fallback=(env_fallback, ["MCP_USER"])
),
"mcp_password": dict(
type="str",
required=False,
no_log=True,
fallback=(env_fallback, ["MCP_PASSWORD"]),
),
}
argument_spec["userLabel"] = {"type": "str", "operationIds": ["get"]}
argument_spec["type"] = {"type": "str", "operationIds": ["get"]}
argument_spec["tpeId"] = {"type": "str", "operationIds": ["get"]}
argument_spec["state"] = {"type": "str", "choices": ["get", "post"]}
argument_spec["srlg"] = {"type": "str", "operationIds": ["get"]}
argument_spec["signalContentType"] = {"type": "str", "operationIds": ["get"]}
argument_spec["searchText"] = {"type": "str", "operationIds": ["get"]}
argument_spec["roadmLineId"] = {"type": "str", "operationIds": ["get"]}
argument_spec["offset"] = {"type": "str", "operationIds": ["get"]}
argument_spec["networkConstruct_id"] = {"type": "str", "operationIds": ["get"]}
argument_spec["ncId"] = {"type": "str", "operationIds": ["get"]}
argument_spec["meta"] = {"type": "dict", "operationIds": ["post"]}
argument_spec["managementName"] = {"type": "str", "operationIds": ["get"]}
argument_spec["links"] = {"type": "dict", "operationIds": ["post"]}
argument_spec["limit"] = {"type": "str", "operationIds": ["get"]}
argument_spec["layerRate"] = {"type": "str", "operationIds": ["get"]}
argument_spec["included"] = {"type": "list", "operationIds": ["post"]}
argument_spec["includeMetaData"] = {"type": "str", "operationIds": ["get"]}
argument_spec["include"] = {"type": "str", "operationIds": ["get"]}
argument_spec["identifierValue"] = {"type": "list", "operationIds": ["get"]}
argument_spec["identifierKey"] = {"type": "list", "operationIds": ["get"]}
argument_spec["group"] = {
"type": "str",
"choices": ["dwa", "infrastructure", "packet", "packet_infrastructure", "tdm"],
"operationIds": ["get"],
}
argument_spec["freType"] = {"type": "str", "operationIds": ["get"]}
argument_spec["freExpectations_serviceIntent_id"] = {
"type": "str",
"operationIds": ["get"],
}
argument_spec["freExpectations_equipmentIntent_id"] = {
"type": "str",
"operationIds": ["get"],
}
argument_spec["fields"] = {"type": "str", "operationIds": ["get"]}
argument_spec["exclude"] = {
"type": "str",
"choices": ["actual", "expectation"],
"operationIds": ["get"],
}
argument_spec["endpoint_tpe_concrete"] = {"type": "str", "operationIds": ["get"]}
argument_spec["directionality"] = {
"type": "str",
"choices": ["bidirectional", "unidirectional"],
"operationIds": ["get"],
}
argument_spec["data"] = {"type": "dict", "operationIds": ["post"]}
argument_spec["childFreId"] = {"type": "str", "operationIds": ["get"]}
return argument_spec
async def main():
module_args = prepare_argument_spec()
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
session = await open_session(
mcp_hostname=module.params["mcp_hostname"],
mcp_username=module.params["mcp_username"],
mcp_password=module.params["mcp_password"],
)
result = await entry_point(module, session)
module.exit_json(**result)
def url(params):
return "https://{mcp_hostname}/nsi/api/v2_0/fres".format(**params)
async def entry_point(module, session):
func = globals()[("_" + module.params["state"])]
return await func(module.params, session)
async def _get(params, session):
_url = "https://{mcp_hostname}/nsi/api/v2_0/fres".format(**params) + gen_args(
params, IN_QUERY_PARAMETER
)
async with session.get(_url) as resp:
content_types = [
"application/json-patch+json",
"application/vnd.api+json",
"application/json",
]
try:
if resp.headers["Content-Type"] in content_types:
_json = await resp.json()
else:
print("response Content-Type not supported")
except KeyError:
_json = {}
return await update_changed_flag(_json, resp.status, "get")
async def _post(params, session):
accepted_fields = ["data", "included", "links", "meta"]
spec = {}
for i in accepted_fields:
if params[i] is not None:
spec[i] = params[i]
_url = "https://{mcp_hostname}/nsi/api/v2_0/fres".format(**params) + gen_args(
params, IN_QUERY_PARAMETER
)
async with session.post(_url, json=spec) as resp:
content_types = [
"application/json-patch+json",
"application/vnd.api+json",
"application/json",
]
try:
if resp.headers["Content-Type"] in content_types:
_json = await resp.json()
else:
print("response Content-Type not supported")
except KeyError:
_json = {}
return await update_changed_flag(_json, resp.status, "post")
if __name__ == "__main__":
import asyncio
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| 32.72619 | 99 | 0.621462 |
s are: tpes, expectations'
- Used by I(state=['get'])
type: str
includeMetaData:
description:
- 'MetaData to be included. The allowed values are: layerRate'
- Used by I(state=['get'])
type: str
included:
description:
- Resources related to a FRE, such as FreData, EndPointData, TpeData, EquipmentData,
EquipmentHolderData, FrePlannedData, FreExpectationData, FreDiscoveredData,
ResiliencyControllerData, EncapsulatedResiliencyData
- Used by I(state=['post'])
type: list
layerRate:
description:
- 'FRE layer rates in comma separated list The allowed values are: ETHERNET, OTU2,
OTU4, OTSi, OMS, OS, PHY, OTS, ODU2, ODU4, DSR, DSR_10GE, DSR_100GE, DSR_ETHERNET'
- Used by I(state=['get'])
type: str
limit:
description:
- The size of a returned page
- Used by I(state=['get'])
type: str
links:
description:
- Links related to the resource
- 'Validate attributes are:'
- ' - C(current) (str): The current page of data'
- ' - C(first) (str): The first page of data'
- ' - C(last) (str): The last page of data'
- ' - C(next) (str): The next page of data'
- ' - C(prev) (str): The previous page of data'
- ' - C(self) (str): A `self` member, whose value is a URL for the relationship
itself (a "relationship URL"). This URL allows the client to directly manipulate
the relationship. For example, it would allow a client to remove an `author`
from an `article` without deleting the people resource itself.'
- Used by I(state=['post'])
type: dict
managementName:
description:
- Management Name
- Used by I(state=['get'])
type: str
meta:
description:
- A metadata object that contains non-standard meta information
- 'Validate attributes are:'
- ' - C(absoluteTotal) (int): The unfiltered total number of entities in the data'
- ' - C(aggregations) (list): The aggregated data based on a requested aggregation
name and criteria'
- ' - C(filtered) (bool): Flags whether the current object is filtered using `fields`
query param or not'
- ' - C(missingReferenceIds) (list): The list of missing resource IDs'
- ' - C(missingReferences) (bool): boolean detailing if the GET FRE tree has any
missing references'
- ' - C(total) (int): The total number of entities in the data'
- Used by I(state=['post'])
type: dict
ncId:
description:
- (Deprecated) Network Construct identifier
- Used by I(state=['get'])
type: str
networkConstruct.id:
description:
- Network Construct identifier
- Used by I(state=['get'])
type: str
offset:
description:
- Offset for the second page
- Used by I(state=['get'])
type: str
roadmLineId:
description:
- (Optional) Find services configured over a roadmline based on the roadmline
FRE identifier.
- Used by I(state=['get'])
type: str
searchText:
description:
- (Optional) The searchable text
- Used by I(state=['get'])
type: str
signalContentType:
description:
- (Optional) The identifier indicating type of parent to be returned. If specified,
parent matching the criteria will be returned
- Used by I(state=['get'])
type: str
srlg:
description:
- (Optional) Find roadmlines by srlg values separated by comma. A roadmline is
a FRE between two SAM cards.
- Used by I(state=['get'])
type: str
state:
choices:
- get
- post
description: []
type: str
tpeId:
description:
- TPE identifier for endpoints
- Used by I(state=['get'])
type: str
type:
description:
- 'FRE types in comma separated list. The allowed values are: service, link, roadmline-ap,
roadmline'
- Used by I(state=['get'])
type: str
userLabel:
description:
- User label
- Used by I(state=['get'])
type: str
author: []
version_added: 1.0.0
requirements:
- python >= 3.6
"""
IN_QUERY_PARAMETER = [
"childFreId",
"directionality",
"endpoint.tpe.concrete",
"exclude",
"fields",
"freExpectations.equipmentIntent.id",
"freExpectations.serviceIntent.id",
"freType",
"group",
"identifierKey",
"identifierValue",
"include",
"includeMetaData",
"layerRate",
"limit",
"managementName",
"ncId",
"networkConstruct.id",
"offset",
"roadmLineId",
"searchText",
"signalContentType",
"srlg",
"tpeId",
"type",
"userLabel",
]
from ansible.module_utils.basic import env_fallback
try:
from ansible_module.turbo.module import AnsibleTurboModule as AnsibleModule
except ImportError:
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ciena.mcp.plugins.module_utils.mcp import (
gen_args,
open_session,
update_changed_flag,
)
def prepare_argument_spec():
argument_spec = {
"mcp_hostname": dict(
type="str", required=False, fallback=(env_fallback, ["MCP_HOST"])
),
"mcp_username": dict(
type="str", required=False, fallback=(env_fallback, ["MCP_USER"])
),
"mcp_password": dict(
type="str",
required=False,
no_log=True,
fallback=(env_fallback, ["MCP_PASSWORD"]),
),
}
argument_spec["userLabel"] = {"type": "str", "operationIds": ["get"]}
argument_spec["type"] = {"type": "str", "operationIds": ["get"]}
argument_spec["tpeId"] = {"type": "str", "operationIds": ["get"]}
argument_spec["state"] = {"type": "str", "choices": ["get", "post"]}
argument_spec["srlg"] = {"type": "str", "operationIds": ["get"]}
argument_spec["signalContentType"] = {"type": "str", "operationIds": ["get"]}
argument_spec["searchText"] = {"type": "str", "operationIds": ["get"]}
argument_spec["roadmLineId"] = {"type": "str", "operationIds": ["get"]}
argument_spec["offset"] = {"type": "str", "operationIds": ["get"]}
argument_spec["networkConstruct_id"] = {"type": "str", "operationIds": ["get"]}
argument_spec["ncId"] = {"type": "str", "operationIds": ["get"]}
argument_spec["meta"] = {"type": "dict", "operationIds": ["post"]}
argument_spec["managementName"] = {"type": "str", "operationIds": ["get"]}
argument_spec["links"] = {"type": "dict", "operationIds": ["post"]}
argument_spec["limit"] = {"type": "str", "operationIds": ["get"]}
argument_spec["layerRate"] = {"type": "str", "operationIds": ["get"]}
argument_spec["included"] = {"type": "list", "operationIds": ["post"]}
argument_spec["includeMetaData"] = {"type": "str", "operationIds": ["get"]}
argument_spec["include"] = {"type": "str", "operationIds": ["get"]}
argument_spec["identifierValue"] = {"type": "list", "operationIds": ["get"]}
argument_spec["identifierKey"] = {"type": "list", "operationIds": ["get"]}
argument_spec["group"] = {
"type": "str",
"choices": ["dwa", "infrastructure", "packet", "packet_infrastructure", "tdm"],
"operationIds": ["get"],
}
argument_spec["freType"] = {"type": "str", "operationIds": ["get"]}
argument_spec["freExpectations_serviceIntent_id"] = {
"type": "str",
"operationIds": ["get"],
}
argument_spec["freExpectations_equipmentIntent_id"] = {
"type": "str",
"operationIds": ["get"],
}
argument_spec["fields"] = {"type": "str", "operationIds": ["get"]}
argument_spec["exclude"] = {
"type": "str",
"choices": ["actual", "expectation"],
"operationIds": ["get"],
}
argument_spec["endpoint_tpe_concrete"] = {"type": "str", "operationIds": ["get"]}
argument_spec["directionality"] = {
"type": "str",
"choices": ["bidirectional", "unidirectional"],
"operationIds": ["get"],
}
argument_spec["data"] = {"type": "dict", "operationIds": ["post"]}
argument_spec["childFreId"] = {"type": "str", "operationIds": ["get"]}
return argument_spec
async def main():
module_args = prepare_argument_spec()
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
session = await open_session(
mcp_hostname=module.params["mcp_hostname"],
mcp_username=module.params["mcp_username"],
mcp_password=module.params["mcp_password"],
)
result = await entry_point(module, session)
module.exit_json(**result)
def url(params):
return "https://{mcp_hostname}/nsi/api/v2_0/fres".format(**params)
async def entry_point(module, session):
func = globals()[("_" + module.params["state"])]
return await func(module.params, session)
async def _get(params, session):
_url = "https://{mcp_hostname}/nsi/api/v2_0/fres".format(**params) + gen_args(
params, IN_QUERY_PARAMETER
)
async with session.get(_url) as resp:
content_types = [
"application/json-patch+json",
"application/vnd.api+json",
"application/json",
]
try:
if resp.headers["Content-Type"] in content_types:
_json = await resp.json()
else:
print("response Content-Type not supported")
except KeyError:
_json = {}
return await update_changed_flag(_json, resp.status, "get")
async def _post(params, session):
accepted_fields = ["data", "included", "links", "meta"]
spec = {}
for i in accepted_fields:
if params[i] is not None:
spec[i] = params[i]
_url = "https://{mcp_hostname}/nsi/api/v2_0/fres".format(**params) + gen_args(
params, IN_QUERY_PARAMETER
)
async with session.post(_url, json=spec) as resp:
content_types = [
"application/json-patch+json",
"application/vnd.api+json",
"application/json",
]
try:
if resp.headers["Content-Type"] in content_types:
_json = await resp.json()
else:
print("response Content-Type not supported")
except KeyError:
_json = {}
return await update_changed_flag(_json, resp.status, "post")
if __name__ == "__main__":
import asyncio
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.