prompt listlengths 1 1 | compression_prompt listlengths 1 1 | target stringlengths 1.03k 828k |
|---|---|---|
[
{
"content": "```python\n# Copyright (c) 2012 NetApp, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/l... | [
{
"content": "<|memory_start|>```python\n# Copyright (c) 2012 NetApp, Inc.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://... | ```python
# Copyright (c) 2012 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the NetApp-specific NFS driver module."""
from lxml import etree
import mock
import mox
from mox import IgnoreArg
from mox import IsA
import os
from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.netapp import api
from cinder.volume.drivers.netapp import nfs as netapp_nfs
from cinder.volume.drivers.netapp import utils
from oslo.config import cfg
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def create_configuration():
configuration = mox.MockObject(conf.Configuration)
configuration.append_config_values(mox.IgnoreArg())
configuration.nfs_mount_point_base = '/mnt/test'
configuration.nfs_mount_options = None
return configuration
class FakeVolume(object):
def __init__(self, size=0):
self.size = size
self.id = hash(self)
self.name = None
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, val):
self.__dict__[key] = val
class FakeSnapshot(object):
def __init__(self, volume_size=0):
self.volume_name = None
self.name = None
self.volume_id = None
self.volume_size = volume_size
self.user_id = None
self.status = None
def __getitem__(self, key):
return self.__dict__[key]
class FakeResponse(object):
def __init__(self, status):
"""Initialize FakeResponse.
:param status: Either 'failed' or 'passed'
"""
self.Status = status
if status == 'failed':
self.Reason = 'Sample error'
class NetappDirectCmodeNfsDriverTestCase(test.TestCase):
"""Test direct NetApp C Mode driver."""
def setUp(self):
super(NetappDirectCmodeNfsDriverTestCase, self).setUp()
self._custom_setup()
def test_create_snapshot(self):
"""Test snapshot can be created and deleted."""
mox = self.mox
drv = self._driver
mox.StubOutWithMock(drv, '_clone_volume')
drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
mox.ReplayAll()
drv.create_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_create_volume_from_snapshot(self):
"""Tests volume creation from snapshot."""
drv = self._driver
mox = self.mox
volume = FakeVolume(1)
snapshot = FakeSnapshot(1)
location = '127.0.0.1:/nfs'
expected_result = {'provider_location': location}
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_get_volume_location')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
drv._get_volume_location(IgnoreArg()).AndReturn(location)
drv.local_path(IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all(IgnoreArg())
mox.ReplayAll()
loc = drv.create_volume_from_snapshot(volume, snapshot)
self.assertEqual(loc, expected_result)
mox.VerifyAll()
def _prepare_delete_snapshot_mock(self, snapshot_exists):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_get_provider_location')
mox.StubOutWithMock(drv, '_volume_not_present')
mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc')
if snapshot_exists:
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(IgnoreArg())
drv._get_provider_location(IgnoreArg())
drv._volume_not_present(IgnoreArg(), IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(IgnoreArg(), IgnoreArg())
drv._execute('rm', None, run_as_root=True)
drv._post_prov_deprov_in_ssc(IgnoreArg())
mox.ReplayAll()
return mox
def test_delete_existing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(True)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_delete_missing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(False)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
def _custom_setup(self):
kwargs = {}
kwargs['netapp_mode'] = 'proxy'
kwargs['configuration'] = create_configuration()
self._driver = netapp_nfs.NetAppDirectCmodeNfsDriver(**kwargs)
def test_check_for_setup_error(self):
mox = self.mox
drv = self._driver
required_flags = [
'netapp_transport_type',
'netapp_login',
'netapp_password',
'netapp_server_hostname',
'netapp_server_port']
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, None)
# check exception raises when flags are not set
self.assertRaises(exception.CinderException,
drv.check_for_setup_error)
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, 'val')
setattr(drv, 'ssc_enabled', False)
mox.StubOutWithMock(netapp_nfs.NetAppDirectNfsDriver, '_check_flags')
netapp_nfs.NetAppDirectNfsDriver._check_flags()
mox.ReplayAll()
drv.check_for_setup_error()
mox.VerifyAll()
# restore initial FLAGS
for flag in required_flags:
delattr(drv.configuration, flag)
def test_do_setup(self):
mox = self.mox
drv = self._driver
mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, 'do_setup')
mox.StubOutWithMock(drv, '_get_client')
mox.StubOutWithMock(drv, '_do_custom_setup')
netapp_nfs.NetAppNFSDriver.do_setup(IgnoreArg())
drv._get_client()
drv._do_custom_setup(IgnoreArg())
mox.ReplayAll()
drv.do_setup(IsA(context.RequestContext))
mox.VerifyAll()
def _prepare_clone_mock(self, status):
drv = self._driver
mox = self.mox
volume = FakeVolume()
setattr(volume, 'provider_location', '127.0.0.1:/nfs')
mox.StubOutWithMock(drv, '_get_host_ip')
mox.StubOutWithMock(drv, '_get_export_path')
mox.StubOutWithMock(drv, '_get_if_info_by_ip')
mox.StubOutWithMock(drv, '_get_vol_by_junc_vserver')
mox.StubOutWithMock(drv, '_clone_file')
mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc')
drv._get_host_ip(IgnoreArg()).AndReturn('127.0.0.1')
drv._get_export_path(IgnoreArg()).AndReturn('/nfs')
drv._get_if_info_by_ip('127.0.0.1').AndReturn(
self._prepare_info_by_ip_response())
drv._get_vol_by_junc_vserver('openstack', '/nfs').AndReturn('nfsvol')
drv._clone_file('nfsvol', 'volume_name', 'clone_name',
'openstack')
drv._post_prov_deprov_in_ssc(IgnoreArg())
return mox
def _prepare_info_by_ip_response(self):
res = """<attributes-list>
<net-interface-info>
<address>127.0.0.1</address>
<administrative-status>up</administrative-status>
<current-node>fas3170rre-cmode-01</current-node>
<current-port>e1b-1165</current-port>
<data-protocols>
<data-protocol>nfs</data-protocol>
</data-protocols>
<dns-domain-name>none</dns-domain-name>
<failover-group/>
<failover-policy>disabled</failover-policy>
<firewall-policy>data</firewall-policy>
<home-node>fas3170rre-cmode-01</home-node>
<home-port>e1b-1165</home-port>
<interface-name>nfs_data1</interface-name>
<is-auto-revert>false</is-auto-revert>
<is-home>true</is-home>
<netmask>255.255.255.0</netmask>
<netmask-length>24</netmask-length>
<operational-status>up</operational-status>
<role>data</role>
<routing-group-name>c10.63.165.0/24</routing-group-name>
<use-failover-group>disabled</use-failover-group>
<vserver>openstack</vserver>
</net-interface-info></attributes-list>"""
response_el = etree.XML(res)
return api.NaElement(response_el).get_children()
def test_clone_volume(self):
drv = self._driver
mox = self._prepare_clone_mock('pass')
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + str(hash(volume_name))
share = 'ip:/share'
drv._clone_volume(volume_name, clone_name, volume_id, share)
mox.VerifyAll()
def test_register_img_in_cache_noshare(self):
volume = {'id': '1', 'name': 'testvol'}
volume['provider_location'] = '10.61.170.1:/share/path'
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
drv._do_clone_rel_img_cache('testvol', 'img-cache-12345',
'10.61.170.1:/share/path',
'img-cache-12345')
mox.ReplayAll()
drv._register_image_in_cache(volume, '12345')
mox.VerifyAll()
def test_register_img_in_cache_with_share(self):
volume = {'id': '1', 'name': 'testvol'}
volume['provider_location'] = '10.61.170.1:/share/path'
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
drv._do_clone_rel_img_cache('testvol', 'img-cache-12345',
'10.61.170.1:/share/path',
'img-cache-12345')
mox.ReplayAll()
drv._register_image_in_cache(volume, '12345')
mox.VerifyAll()
def test_find_image_in_cache_no_shares(self):
drv = self._driver
drv._mounted_shares = []
result = drv._find_image_in_cache('image_id')
if not result:
pass
else:
self.fail('Return result is unexpected')
def test_find_image_in_cache_shares(self):
drv = self._driver
mox = self.mox
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(os.path, 'exists')
drv._get_mount_point_for_share('testshare').AndReturn('/mnt')
os.path.exists('/mnt/img-cache-id').AndReturn(True)
mox.ReplayAll()
result = drv._find_image_in_cache('id')
(share, file_name) = result[0]
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if (share == 'testshare' and file_name == 'img-cache-id'):
pass
else:
LOG.warn(_("Share %(share)s and file name %(file_name)s")
% {'share': share, 'file_name': file_name})
self.fail('Return result is unexpected')
def test_find_old_cache_files_notexists(self):
drv = self._driver
mox = self.mox
cmd = ['find', '/mnt', '-maxdepth', '1', '-name',
'img-cache*', '-amin', '+720']
setattr(drv.configuration, 'expiry_thres_minutes', 720)
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_execute')
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._execute(*cmd, run_as_root=True).AndReturn((None, ''))
mox.ReplayAll()
res = drv._find_old_cache_files('share')
mox.VerifyAll()
if len(res) == 0:
pass
else:
self.fail('No files expected but got return values.')
def test_find_old_cache_files_exists(self):
drv = self._driver
mox = self.mox
cmd = ['find', '/mnt', '-maxdepth', '1', '-name',
'img-cache*', '-amin', '+720']
setattr(drv.configuration, 'expiry_thres_minutes', '720')
files = '/mnt/img-id1\n/mnt/img-id2\n'
r_files = ['img-id1', 'img-id2']
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_shortlist_del_eligible_files')
drv._get_mount_point_for_share('share').AndReturn('/mnt')
drv._execute(*cmd, run_as_root=True).AndReturn((files, None))
drv._shortlist_del_eligible_files(
IgnoreArg(), r_files).AndReturn(r_files)
mox.ReplayAll()
res = drv._find_old_cache_files('share')
mox.VerifyAll()
if len(res) == len(r_files):
for f in res:
r_files.remove(f)
else:
self.fail('Returned files not same as expected.')
def test_delete_files_till_bytes_free_success(self):
drv = self._driver
mox = self.mox
files = [('img-cache-1', 230), ('img-cache-2', 380)]
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_delete_file')
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._delete_file('/mnt/img-cache-2').AndReturn(True)
drv._delete_file('/mnt/img-cache-1').AndReturn(True)
mox.ReplayAll()
drv._delete_files_till_bytes_free(files, 'share', bytes_to_free=1024)
mox.VerifyAll()
def test_clean_image_cache_exec(self):
drv = self._driver
mox = self.mox
drv.configuration.thres_avl_size_perc_start = 20
drv.configuration.thres_avl_size_perc_stop = 50
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_find_old_cache_files')
mox.StubOutWithMock(drv, '_delete_files_till_bytes_free')
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info('testshare').AndReturn((100, 19, 81))
drv._find_old_cache_files('testshare').AndReturn(['f1', 'f2'])
drv._delete_files_till_bytes_free(
['f1', 'f2'], 'testshare', bytes_to_free=31)
mox.ReplayAll()
drv._clean_image_cache()
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if not drv.cleaning:
pass
else:
self.fail('Clean image cache failed.')
def test_clean_image_cache_noexec(self):
drv = self._driver
mox = self.mox
drv.configuration.thres_avl_size_perc_start = 20
drv.configuration.thres_avl_size_perc_stop = 50
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info('testshare').AndReturn((100, 30, 70))
mox.ReplayAll()
drv._clean_image_cache()
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if not drv.cleaning:
pass
else:
self.fail('Clean image cache failed.')
def test_clone_image_fromcache(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
mox.StubOutWithMock(drv, '_post_clone_image')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn(
[('share', 'file_name')])
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._do_clone_rel_img_cache('file_name', 'vol', 'share', 'file_name')
drv._post_clone_image(volume)
mox.ReplayAll()
drv.clone_image(volume, ('image_location', None), 'image_id', {})
mox.VerifyAll()
def get_img_info(self, format):
class img_info(object):
def __init__(self, fmt):
self.file_format = fmt
return img_info(format)
def test_clone_image_cloneableshare_nospace(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share(IgnoreArg()).AndReturn('127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(False)
mox.ReplayAll()
(prop, cloned) = drv. clone_image(
volume, ('nfs://127.0.0.1:/share/img-id', None), 'image_id', {})
mox.VerifyAll()
if not cloned and not prop['provider_location']:
pass
else:
self.fail('Expected not cloned, got cloned.')
def test_clone_image_cloneableshare_raw(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share(IgnoreArg()).AndReturn('127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('raw'))
drv._clone_volume(
'img-id', 'vol', share='127.0.0.1:/share', volume_id=None)
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all('/mnt/vol')
drv._resize_image_file({'name': 'vol'}, IgnoreArg())
mox.ReplayAll()
drv. clone_image(
volume, ('nfs://127.0.0.1:/share/img-id', None), 'image_id', {})
mox.VerifyAll()
def test_clone_image_cloneableshare_notraw(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
image_utils.qemu_img_info('/mnt/vol').AndReturn(
self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all('/mnt/vol')
drv._resize_image_file({'name': 'vol'}, IgnoreArg())
mox.ReplayAll()
drv. clone_image(
volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
mox.VerifyAll()
def test_clone_image_file_not_discovered(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(os.path, 'exists')
mox.StubOutWithMock(drv, '_delete_file')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
image_utils.qemu_img_info('/mnt/vol').AndReturn(
self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(False)
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
os.path.exists('/mnt/vol').AndReturn(True)
drv._delete_file('/mnt/vol')
mox.ReplayAll()
vol_dict, result = drv. clone_image(
volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
mox.VerifyAll()
self.assertFalse(result)
self.assertFalse(vol_dict['bootable'])
self.assertIsNone(vol_dict['provider_location'])
def test_clone_image_resizefails(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(os.path, 'exists')
mox.StubOutWithMock(drv, '_delete_file')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
image_utils.qemu_img_info('/mnt/vol').AndReturn(
self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all('/mnt/vol')
drv._resize_image_file(
IgnoreArg(), IgnoreArg()).AndRaise(exception.InvalidResults())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
os.path.exists('/mnt/vol').AndReturn(True)
drv._delete_file('/mnt/vol')
mox.ReplayAll()
vol_dict, result = drv. clone_image(
volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
mox.VerifyAll()
self.assertFalse(result)
self.assertFalse(vol_dict['bootable'])
self.assertIsNone(vol_dict['provider_location'])
def test_is_cloneable_share_badformats(self):
drv = self._driver
strgs = ['10.61.666.22:/share/img',
'nfs://10.61.666.22:/share/img',
'nfs://10.61.666.22//share/img',
'nfs://com.netapp.com:/share/img',
'nfs://com.netapp.com//share/img',
'com.netapp.com://share/im\g',
'http://com.netapp.com://share/img',
'nfs://com.netapp.com:/share/img',
'nfs://com.netapp.com:8080//share/img'
'nfs://com.netapp.com//img',
'nfs://[ae::sr::ty::po]/img']
for strg in strgs:
res = drv._is_cloneable_share(strg)
if res:
msg = 'Invalid format matched for url %s.' % strg
self.fail(msg)
def test_is_cloneable_share_goodformat1(self):
drv = self._driver
mox = self.mox
strg = 'nfs://10.61.222.333/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat2(self):
drv = self._driver
mox = self.mox
strg = 'nfs://10.61.222.333:8080/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat3(self):
drv = self._driver
mox = self.mox
strg = 'nfs://com.netapp:8080/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat4(self):
drv = self._driver
mox = self.mox
strg = 'nfs://netapp.com/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat5(self):
drv = self._driver
mox = self.mox
strg = 'nfs://netapp.com/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_check_share_in_use_no_conn(self):
drv = self._driver
share = drv._check_share_in_use(None, '/dir')
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_invalid_conn(self):
drv = self._driver
share = drv._check_share_in_use(':8989', '/dir')
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_incorrect_host(self):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(utils, 'resolve_hostname')
utils.resolve_hostname(IgnoreArg()).AndRaise(Exception())
mox.ReplayAll()
share = drv._check_share_in_use('incorrect:8989', '/dir')
mox.VerifyAll()
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_success(self):
drv = self._driver
mox = self.mox
drv._mounted_shares = ['127.0.0.1:/dir/share']
mox.StubOutWithMock(utils, 'resolve_hostname')
mox.StubOutWithMock(drv, '_share_match_for_ip')
utils.resolve_hostname(IgnoreArg()).AndReturn('10.22.33.44')
drv._share_match_for_ip(
'10.22.33.44', ['127.0.0.1:/dir/share']).AndReturn('share')
mox.ReplayAll()
share = drv._check_share_in_use('127.0.0.1:8989', '/dir/share')
mox.VerifyAll()
if not share:
self.fail('Expected share not detected')
def test_construct_image_url_loc(self):
drv = self._driver
img_loc = (None,
[{'metadata':
{'share_location': 'nfs://host/path',
'mount_point': '/opt/stack/data/glance',
'type': 'nfs'},
'url': 'file:///opt/stack/data/glance/image-id'}])
location = drv._construct_image_nfs_url(img_loc)
if location != "nfs://host/path/image-id":
self.fail("Unexpected direct url.")
def test_construct_image_url_direct(self):
drv = self._driver
img_loc = ("nfs://host/path/image-id", None)
location = drv._construct_image_nfs_url(img_loc)
if location != "nfs://host/path/image-id":
self.fail("Unexpected direct url.")
class NetappDirectCmodeNfsDriverOnlyTestCase(test.TestCase):
"""Test direct NetApp C Mode driver only and not inherit."""
def setUp(self):
super(NetappDirectCmodeNfsDriverOnlyTestCase, self).setUp()
self._custom_setup()
def _custom_setup(self):
kwargs = {}
kwargs['netapp_mode'] = 'proxy'
kwargs['configuration'] = create_configuration()
self._driver = netapp_nfs.NetAppDirectCmodeNfsDriver(**kwargs)
self._driver.ssc_enabled = True
self._driver.configuration.netapp_copyoffload_tool_path = 'cof_path'
@mock.patch.object(netapp_nfs, 'get_volume_extra_specs')
def test_create_volume(self, mock_volume_extra_specs):
drv = self._driver
drv.ssc_enabled = False
extra_specs = {}
mock_volume_extra_specs.return_value = extra_specs
fake_share = 'localhost:myshare'
fake_qos_policy = 'qos_policy_1'
with mock.patch.object(drv, '_ensure_shares_mounted'):
with mock.patch.object(drv, '_find_shares',
return_value=['localhost:myshare']):
with mock.patch.object(drv, '_do_create_volume'):
volume_info = self._driver.create_volume(FakeVolume(1))
self.assertEqual(volume_info.get('provider_location'),
fake_share)
@mock.patch.object(netapp_nfs, 'get_volume_extra_specs')
def test_create_volume_with_qos_policy(self, mock_volume_extra_specs):
drv = self._driver
drv.ssc_enabled = False
extra_specs = {'netapp:qos_policy_group': 'qos_policy_1'}
fake_volume = FakeVolume(1)
fake_share = 'localhost:myshare'
fake_qos_policy = 'qos_policy_1'
mock_volume_extra_specs.return_value = extra_specs
with mock.patch.object(drv, '_ensure_shares_mounted'):
with mock.patch.object(drv, '_find_shares',
return_value=['localhost:myshare']):
with mock.patch.object(drv, '_do_create_volume'):
with mock.patch.object(drv,
'_set_qos_policy_group_on_volume'
) as mock_set_qos:
volume_info = self._driver.create_volume(fake_volume)
self.assertEqual(volume_info.get('provider_location'),
'localhost:myshare')
mock_set_qos.assert_called_once_with(fake_volume,
fake_share,
fake_qos_policy)
def test_copy_img_to_vol_copyoffload_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._try_copyoffload = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='share')
drv._get_vol_for_share = mock.Mock(return_value='vol')
drv._update_stale_vols = mock.Mock()
drv.copy_image_to_volume(context, volume, image_service, image_id)
drv._try_copyoffload.assert_called_once_with(context, volume,
image_service,
image_id)
drv._update_stale_vols.assert_called_once_with('vol')
def test_copy_img_to_vol_copyoffload_failure(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._try_copyoffload = mock.Mock(side_effect=Exception())
netapp_nfs.NetAppNFSDriver.copy_image_to_volume = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='share')
drv._get_vol_for_share = mock.Mock(return_value='vol')
drv._update_stale_vols = mock.Mock()
drv.copy_image_to_volume(context, volume, image_service, image_id)
drv._try_copyoffload.assert_called_once_with(context, volume,
image_service,
image_id)
netapp_nfs.NetAppNFSDriver.copy_image_to_volume.\
assert_called_once_with(context, volume, image_service, image_id)
drv._update_stale_vols.assert_called_once_with('vol')
def test_copy_img_to_vol_copyoffload_nonexistent_binary_path(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = mock.Mock()
image_service.get_location.return_value = (mock.Mock(), mock.Mock())
image_service.show.return_value = {'size': 0}
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._find_image_in_cache = mock.Mock(return_value=[])
drv._construct_image_nfs_url = mock.Mock(return_value="")
drv._check_get_nfs_path_segs = mock.Mock(return_value=("test:test",
"dr"))
drv._get_ip_verify_on_cluster = mock.Mock(return_value="192.1268.1.1")
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
drv._get_host_ip = mock.Mock()
drv._get_provider_location = mock.Mock()
drv._get_export_path = mock.Mock(return_value="dr")
drv._check_share_can_hold_size = mock.Mock()
# Raise error as if the copyoffload file can not be found
drv._clone_file_dst_exists = mock.Mock(side_effect=OSError())
# Verify the orignal error is propagated
self.assertRaises(OSError, drv._try_copyoffload,
context, volume, image_service, image_id)
def test_copyoffload_frm_cache_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._find_image_in_cache = mock.Mock(return_value=[('share', 'img')])
drv._copy_from_cache = mock.Mock(return_value=True)
drv._try_copyoffload(context, volume, image_service, image_id)
drv._copy_from_cache.assert_called_once_with(volume,
image_id,
[('share', 'img')])
def test_copyoffload_frm_img_service_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._find_image_in_cache = mock.Mock(return_value=[])
drv._copy_from_img_service = mock.Mock()
drv._try_copyoffload(context, volume, image_service, image_id)
drv._copy_from_img_service.assert_called_once_with(context,
volume,
image_service,
image_id)
def test_cache_copyoffload_workflow_success(self):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
cache_result = [('ip1:/openstack', 'img-cache-imgid')]
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._execute = mock.Mock()
drv._register_image_in_cache = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='/share')
drv._post_clone_image = mock.Mock()
copied = drv._copy_from_cache(volume, image_id, cache_result)
self.assertTrue(copied)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._execute.assert_called_once_with('cof_path', 'ip1', 'ip1',
'/openstack/img-cache-imgid',
'/exp_path/name',
run_as_root=False,
check_exit_code=0)
drv._post_clone_image.assert_called_with(volume)
drv._get_provider_location.assert_called_with('vol_id')
@mock.patch.object(image_utils, 'qemu_img_info')
def test_img_service_raw_copyoffload_workflow_success(self,
mock_qemu_img_info):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
context = object()
image_service = mock.Mock()
image_service.get_location.return_value = ('nfs://ip1/openstack/img',
None)
image_service.show.return_value = {'size': 1,
'disk_format': 'raw'}
drv._check_get_nfs_path_segs = mock.Mock(return_value=
('ip1', '/openstack'))
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._get_provider_location = mock.Mock(return_value='share')
drv._execute = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
drv._discover_file_till_timeout = mock.Mock(return_value=True)
img_inf = mock.Mock()
img_inf.file_format = 'raw'
mock_qemu_img_info.return_value = img_inf
drv._check_share_can_hold_size = mock.Mock()
drv._move_nfs_file = mock.Mock(return_value=True)
drv._delete_file = mock.Mock()
drv._clone_file_dst_exists = mock.Mock()
drv._post_clone_image = mock.Mock()
drv._copy_from_img_service(context, volume, image_service, image_id)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._check_share_can_hold_size.assert_called_with('share', 1)
assert drv._execute.call_count == 1
drv._post_clone_image.assert_called_with(volume)
@mock.patch.object(image_utils, 'convert_image')
@mock.patch.object(image_utils, 'qemu_img_info')
@mock.patch('os.path.exists')
def test_img_service_qcow2_copyoffload_workflow_success(self, mock_exists,
mock_qemu_img_info,
mock_cvrt_image):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
context = object()
image_service = mock.Mock()
image_service.get_location.return_value = ('nfs://ip1/openstack/img',
None)
image_service.show.return_value = {'size': 1,
'disk_format': 'qcow2'}
drv._check_get_nfs_path_segs = mock.Mock(return_value=
('ip1', '/openstack'))
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._get_provider_location = mock.Mock(return_value='share')
drv._execute = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
img_inf = mock.Mock()
img_inf.file_format = 'raw'
mock_qemu_img_info.return_value = img_inf
drv._check_share_can_hold_size = mock.Mock()
drv._move_nfs_file = mock.Mock(return_value=True)
drv._delete_file = mock.Mock()
drv._clone_file_dst_exists = mock.Mock()
drv._post_clone_image = mock.Mock()
drv._copy_from_img_service(context, volume, image_service, image_id)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._check_share_can_hold_size.assert_called_with('share', 1)
assert mock_cvrt_image.call_count == 1
assert drv._execute.call_count == 1
assert drv._delete_file.call_count == 2
drv._clone_file_dst_exists.call_count == 1
drv._post_clone_image.assert_called_with(volume)
class NetappDirect7modeNfsDriverTestCase(NetappDirectCmodeNfsDriverTestCase):
"""Test direct NetApp C Mode driver."""
def _custom_setup(self):
self._driver = netapp_nfs.NetAppDirect7modeNfsDriver(
configuration=create_configuration())
def _prepare_delete_snapshot_mock(self, snapshot_exists):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_get_provider_location')
mox.StubOutWithMock(drv, '_volume_not_present')
if snapshot_exists:
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(IgnoreArg())
drv._volume_not_present(IgnoreArg(), IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(IgnoreArg(), IgnoreArg())
drv._execute('rm', None, run_as_root=True)
mox.ReplayAll()
return mox
def test_check_for_setup_error_version(self):
drv = self._driver
drv._client = api.NaServer("127.0.0.1")
# check exception raises when version not found
self.assertRaises(exception.VolumeBackendAPIException,
drv.check_for_setup_error)
drv._client.set_api_version(1, 8)
# check exception raises when not supported version
self.assertRaises(exception.VolumeBackendAPIException,
drv.check_for_setup_error)
def test_check_for_setup_error(self):
mox = self.mox
drv = self._driver
drv._client = api.NaServer("127.0.0.1")
drv._client.set_api_version(1, 9)
required_flags = [
'netapp_transport_type',
'netapp_login',
'netapp_password',
'netapp_server_hostname',
'netapp_server_port']
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, None)
# check exception raises when flags are not set
self.assertRaises(exception.CinderException,
drv.check_for_setup_error)
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, 'val')
mox.ReplayAll()
drv.check_for_setup_error()
mox.VerifyAll()
# restore initial FLAGS
for flag in required_flags:
delattr(drv.configuration, flag)
def test_do_setup(self):
mox = self.mox
drv = self._driver
mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, 'do_setup')
mox.StubOutWithMock(drv, '_get_client')
mox.StubOutWithMock(drv, '_do_custom_setup')
netapp_nfs.NetAppNFSDriver.do_setup(IgnoreArg())
drv._get_client()
drv._do_custom_setup(IgnoreArg())
mox.ReplayAll()
drv.do_setup(IsA(context.RequestContext))
mox.VerifyAll()
def _prepare_clone_mock(self, status):
drv = self._driver
mox = self.mox
volume = FakeVolume()
setattr(volume, 'provider_location', '127.0.0.1:/nfs')
mox.StubOutWithMock(drv, '_get_export_ip_path')
mox.StubOutWithMock(drv, '_get_actual_path_for_export')
mox.StubOutWithMock(drv, '_start_clone')
mox.StubOutWithMock(drv, '_wait_for_clone_finish')
if status == 'fail':
mox.StubOutWithMock(drv, '_clear_clone')
drv._get_export_ip_path(
IgnoreArg(), IgnoreArg()).AndReturn(('127.0.0.1', '/nfs'))
drv._get_actual_path_for_export(IgnoreArg()).AndReturn('/vol/vol1/nfs')
drv._start_clone(IgnoreArg(), IgnoreArg()).AndReturn(('1', '2'))
if status == 'fail':
drv._wait_for_clone_finish('1', '2').AndRaise(
api.NaApiError('error', 'error'))
drv._clear_clone('1')
else:
drv._wait_for_clone_finish('1', '2')
return mox
def test_clone_volume_clear(self):
drv = self._driver
mox = self._prepare_clone_mock('fail')
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + str(hash(volume_name))
try:
drv._clone_volume(volume_name, clone_name, volume_id)
except Exception as e:
if isinstance(e, api.NaApiError):
pass
else:
raise
mox.VerifyAll()
``` |
[
{
"content": "Here is the source code:\n```python\n#!/usr/bin/env python\n# Copyright 2016 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\nimport decorators\nimport logging\nimport unittest\n\nfrom trace_test imp... | [
{
"content": "Here is the source code:\n<|memory_start|>```python\n#!/usr/bin/env python\n# Copyright 2016 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\nimport decorators\nimport logging\nimport unittest\n\nfro... | ```python
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import decorators
import logging
import unittest
from trace_test import TraceTest
#from .trace_test import TraceTest
def generator():
yield 1
yield 2
class DecoratorTests(unittest.TestCase):
def test_tracing_object_fails(self):
self.assertRaises(Exception, lambda: decorators.trace(1))
self.assertRaises(Exception, lambda: decorators.trace(""))
self.assertRaises(Exception, lambda: decorators.trace([]))
def test_tracing_generators_fail(self):
self.assertRaises(Exception, lambda: decorators.trace(generator))
class ClassToTest(object):
@decorators.traced
def method1(self):
return 1
@decorators.traced
def method2(self):
return 1
@decorators.traced
def traced_func():
return 1
class DecoratorTests(TraceTest):
def _get_decorated_method_name(self, f):
res = self.go(f)
events = res.findEventsOnThread(res.findThreadIds()[0])
# Sanity checks.
self.assertEquals(2, len(events))
self.assertEquals(events[0]["name"], events[1]["name"])
return events[1]["name"]
def test_func_names_work(self):
expected_method_name = __name__ + '.traced_func'
self.assertEquals(expected_method_name,
self._get_decorated_method_name(traced_func))
def test_method_names_work(self):
ctt = ClassToTest()
self.assertEquals('ClassToTest.method1',
self._get_decorated_method_name(ctt.method1))
self.assertEquals('ClassToTest.method2',
self._get_decorated_method_name(ctt.method2))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main(verbosity=2)
``` |
[
{
"content": "Write the code verbatim:\n```python\nimport fcntl\nimport os\n\n# FROM: http://code.activestate.com/recipes/577911-context-manager-for-a-daemon-pid-file/\nclass PidFile(object):\n \"\"\"Context manager that locks a pid file. Implemented as class\n not generator because daemon.py is calling ... | [
{
"content": "Write the code verbatim:\n<|memory_start|>```python\nimport fcntl\nimport os\n\n# FROM: http://code.activestate.com/recipes/577911-context-manager-for-a-daemon-pid-file/\nclass PidFile(object):\n \"\"\"Context manager that locks a pid file. Implemented as class\n not generator because daemo... | ```python
import fcntl
import os
# FROM: http://code.activestate.com/recipes/577911-context-manager-for-a-daemon-pid-file/
class PidFile(object):
"""Context manager that locks a pid file. Implemented as class
not generator because daemon.py is calling .__exit__() with no parameters
instead of the None, None, None specified by PEP-343."""
# pylint: disable=R0903
def __init__(self, path):
self.path = path
self.pidfile = None
def __enter__(self):
self.pidfile = open(self.path, "a+")
try:
fcntl.flock(self.pidfile.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
raise SystemExit("Already running according to " + self.path)
self.pidfile.seek(0)
self.pidfile.truncate()
self.pidfile.write(str(os.getpid()))
self.pidfile.flush()
self.pidfile.seek(0)
return self.pidfile
def __exit__(self, exc_type=None, exc_value=None, exc_tb=None):
try:
self.pidfile.close()
except IOError as err:
# ok if file was just closed elsewhere
if err.errno != 9:
raise
os.remove(self.path)
``` |
[
{
"content": "Return the code exactly, with no changes:\n```python\n\"\"\"This code does not work as for it will raise connection error\"\"\"\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nfrom time import sleep\n\ndef is_video(td):\n\t\"\"\"it is a video if two prerequisites\"\"\"\n\tpricelabels =... | [
{
"content": "Return the code exactly, with no changes:\n<|memory_start|>```python\n\"\"\"This code does not work as for it will raise connection error\"\"\"\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nfrom time import sleep\n\ndef is_video(td):\n\t\"\"\"it is a video if two prerequisites\"\"\"\... | ```python
"""This code does not work as for it will raise connection error"""
import requests
from bs4 import BeautifulSoup
import re
from time import sleep
def is_video(td):
"""it is a video if two prerequisites"""
pricelabels = td('span', 'pricelabel')
return(len(pricelabels) == 1 and
pricelabels[0].text.strip().startwith("Video"))
def book_info(td):
"""given a BeautifulSoup <td> Tag representing a book,
extract the book's details and return a dict"""
title = td.find("div", "thumbheader").a.text
by_author = td.find('div', 'AuthorName').text
authors = [x.strip() for x in re.sub("^By ", "", by_author).split(",")]
isbn_link = td.find("div", "thumbheader").a.get("href")
isbn = re.match("/product/(.*)\.do", isbn_link).groups()[0]
date = td.find("span", "directorydate").text.strip()
return {
"title" : title,
"authors" : authors,
"isbn" : isbn,
"date" : date
}
base_url = "http://shop.oreilly.com/category/browse-subjects/data.do?sortby=publicationDate&page="
books = []
NUM_PAGES = 31 # vary from time to time
for page_num in range(1, NUM_PAGES + 1):
print "souping page", page_num, ",", len(books), "found so far"
url = base_url + str(page_num)
soup = BeautifulSoup(requests.get(url).text, 'html5lib')
for td in soup('td', 'thumbtext'):
if not is_video(td):
books.append(book_info(td))
sleep(30) # respect to robots.txt
``` |
[
{
"content": "```python\n#!/usr/bin/env python\n# coding:utf-8\n# Contributor:\n# fffonion <fffonion@gmail.com>\n\nimport importlib\nfrom ..const import *\nfrom . import en_us as lng_fallback\n\ntry:\n _locale = LOCALE.lower() if LOCALE else 'en_us'\n if _locale in ('zh_cn', 'zh_sg'):\n ... | [
{
"content": "<|memory_start|>```python\n#!/usr/bin/env python\n# coding:utf-8\n# Contributor:\n# fffonion <fffonion@gmail.com>\n\nimport importlib\nfrom ..const import *\nfrom . import en_us as lng_fallback\n\ntry:\n _locale = LOCALE.lower() if LOCALE else 'en_us'\n if _locale in ('zh_cn', 'z... | ```python
#!/usr/bin/env python
# coding:utf-8
# Contributor:
# fffonion <fffonion@gmail.com>
import importlib
from ..const import *
from . import en_us as lng_fallback
try:
_locale = LOCALE.lower() if LOCALE else 'en_us'
if _locale in ('zh_cn', 'zh_sg'):
_locale = 'zh_hans'
elif _locale in ('zh_tw', 'zh_hk', 'zh_mo'):
_locale = 'zh_hant'
lng = importlib.import_module("%s.i18n.%s" % (SCRIPT_NAME, _locale))
except (ImportError, ValueError):
lng = lng_fallback
class _(object):
def c(cls, code):
_ = code not in lng.err_msg and \
(code not in lng_fallback.err_msg and \
(cls.ERR_NOMSG % code) or \
lng_fallback.err_msg[code] ) or \
lng.err_msg[code]
return _ if PY3K else (
_ if isinstance(_, unicode) else _.decode('utf-8')) # cls.ERR_NOMSG % code is unicode
def __getattr__(cls, idx):
_ = not hasattr(lng, idx) and \
getattr(lng_fallback, idx) or \
getattr(lng, idx)
return _ if PY3K else _.decode('utf-8')
i18n = _()
``` |
[
{
"content": "Recreate the original code text:\n```python\n#!/usr/bin/python\n\n\"\"\"\n Read mutmat.snp.tsv (tab delimited file with snps)\n and create a table for hotspot analysis\n\"\"\"\n\nimport sys\n\nclass Interval:\n\tdef __init__(self, start, stop, gene):\n\t\tself.start = start\n\t\tself.stop = s... | [
{
"content": "Recreate the original code text:\n<|memory_start|>```python\n#!/usr/bin/python\n\n\"\"\"\n Read mutmat.snp.tsv (tab delimited file with snps)\n and create a table for hotspot analysis\n\"\"\"\n\nimport sys\n\nclass Interval:\n\tdef __init__(self, start, stop, gene):\n\t\tself.start = start\n\t... | ```python
#!/usr/bin/python
"""
Read mutmat.snp.tsv (tab delimited file with snps)
and create a table for hotspot analysis
"""
import sys
class Interval:
def __init__(self, start, stop, gene):
self.start = start
self.stop = stop
self.gene = gene
def findGene(intervals, pos, sindex, eindex):
if eindex < sindex:
print "Interval error"
return "NA"
if pos > intervals[sindex].start and pos < intervals[sindex].stop:
return intervals[sindex].gene
if pos > intervals[eindex].start and pos < intervals[eindex].stop:
return intervals[eindex].gene
if eindex - sindex < 2:
return "NA"
mindex = sindex + ((eindex - sindex) / 2)
if intervals[sindex].start < pos and intervals[mindex].stop > pos:
return findGene(intervals, pos, sindex, mindex)
if intervals[mindex].start < pos and intervals[eindex].stop > pos:
return findGene(intervals, pos, mindex, eindex)
if pos > intervals[eindex].stop or pos < intervals[sindex].start:
return "NA"
return "bumpi"
geneann = dict()
for i in open(sys.argv[2]):
fields = i.rstrip().split()
gene = fields[0]
for intervals in fields[1:]:
(chrx, start, stop) = intervals.split(":")
if chrx not in geneann:
geneann[chrx] = list()
iv = Interval(int(start), int(stop), gene)
geneann[chrx].append(iv)
for chrx in geneann:
geneann[chrx] = sorted(geneann[chrx], key = lambda x: x.start)
mutmat = open(sys.argv[1])
header = mutmat.readline().rstrip().split()
# select only tumour samples
selectedcolumns = list()
for i in range(len(header)):
if "T" in header[i]:
selectedcolumns.append(i+1)
res = dict()
for i in mutmat:
fields = i.rstrip().split()
# count = sum([int(x) for x in fields[1:]])
count = sum([int(fields[x]) for x in selectedcolumns])
(chrx, pos) = fields[0].split("_")[0:2]
pos = int(pos)
if chrx not in res:
res[chrx] = dict()
if pos not in res[chrx]:
res[chrx][pos] = 0
res[chrx][pos] += count
for chrx in res:
allpos = sorted(res[chrx].keys())
for pos in allpos:
if chrx not in geneann:
gene = "NA"
else:
gene = findGene(geneann[chrx], pos, 0, len(geneann[chrx]) - 1)
print "%s\t%d\t%d\t%s" % (chrx, pos, res[chrx][pos], gene)
``` |
[
{
"content": "Recreate the entire code block with identical formatting:\n```python\n#!/usr/bin/python\n#\n# Copyright 2008 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtai... | [
{
"content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\n#!/usr/bin/python\n#\n# Copyright 2008 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\... | ```python
#!/usr/bin/python
#
# Copyright 2008 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: mwu@google.com (Mingyu Wu)
"""Unittest for baserunner module."""
__author__ = 'mwu@google.com (Mingyu Wu)'
import os
import shutil
import sys
import tempfile
import time
import unittest
from lib import baserunner
from lib import filesystemhandlerextend
from lib import mock_emailmessage
from lib import mock_reporter
from lib import mock_scanscripts
from lib import pyreringconfig
from lib import pyreringutil
global_settings = pyreringconfig.GlobalPyreRingConfig.settings
class BaseRunnerTest(unittest.TestCase):
"""Unit test cases for BaseRunner class."""
def setUp(self):
# I should config global_settings here instead of read it from file system.
self.tempdir = tempfile.mkdtemp()
root_dir = os.path.abspath(os.path.join(os.path.split(sys.argv[0])[0],
'../'))
global_settings.update(
{'report_dir': os.path.join(self.tempdir, 'report'),
'email_recipients': os.getenv('LOGNAME'),
'host_name': 'test.host',
'log_file': 'pyrering.log',
'file_errors': False,
'project_name': 'pyrering_unittest',
'root_dir': root_dir,
'sendmail': False,
'runner': 'baserunner',
'source_dir': os.path.join(root_dir, 'test'),
'tester': os.getenv('LOGNAME'),
'FATAL_STRING': 'Fatal:',
'header_file': 'header_info.txt',
'time': time.strftime('%Y%m%d%H%M'),
'skip_setup': False,
})
# get a default config and mocks
self.one_config = pyreringutil.PRConfigParser().Default()
self.scanner = mock_scanscripts.MockScanScripts()
self.emailmessage = mock_emailmessage.MockEmailMessage()
self.reporter = mock_reporter.MockTxtReporter()
self.runner = baserunner.BaseRunner(
name='test',
scanner=self.scanner,
email_message=self.emailmessage,
filesystem=filesystemhandlerextend.FileSystemHandlerExtend(),
reporter=self.reporter)
self.runner.Prepare()
if not os.path.isdir(global_settings['report_dir']):
os.makedirs(global_settings['report_dir'])
# I don't want the unit test to mess with the original log file.
global_settings['log_file'] += '.unittest'
def tearDown(self):
self.runner.CleanUp()
self.runner = ''
pyreringconfig.Reset()
self.scanner.CleanConfig()
shutil.rmtree(self.tempdir)
def testFindHeaderInfoFile(self):
global_settings['header_file'] = os.path.join(self.tempdir, 'header.txt')
fh = open(global_settings['header_file'], 'w')
fh.write('test info')
fh.close()
self.one_config['TEST_SCRIPT'] = 'echo 1'
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testFindHeaderInfoFile'], False)
self.assertEqual(self.reporter.header, 'test info')
self.assertEqual(result, 0)
self.assertEqual(self.runner.passed, 1)
# Positive Test Cases:
def testOneCommand(self):
"""A simple sleep command takes some time to finish."""
# prepare the test script here
self.one_config['TEST_SCRIPT'] = 'sleep 3'
# set the mock scanscript to return this thing.
self.scanner.SetConfig([self.one_config])
# now run the test and return should be expected.
result = self.runner.Run(['testOneCommand'], False)
self.assertEqual(result, 0)
self.assertEqual(self.runner.passed, 1)
def testEchoCommand(self):
"""A simple command has output on stdout."""
self.one_config['TEST_SCRIPT'] = 'echo testEchoCommand'
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testEchoCommand'], False)
self.assertEqual(result, 0)
self.assertEqual(self.runner.passed, 1)
#TODO(mwu): need to check the log file has this hello line
def testEchoToSTDERRCommand(self):
"""A simple command has output redirect to stderr."""
self.one_config['TEST_SCRIPT'] = 'echo testEchoToSTDERRCommand >&2'
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testEchoSTDERRCommand'], False)
self.assertEqual(result, 0)
self.assertEqual(self.runner.passed, 1)
#TODO(mwu): need to check the log file has this hello line
def testRunScript(self):
"""A real script to run."""
self.one_config['TEST_SCRIPT'] = os.path.join(global_settings['root_dir'],
'test/test1_echo.sh')
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testRunScript'], False)
self.assertEqual(result, 0)
self.assertEqual(self.runner.passed, 1)
#TODO(mwu): need to check the log file has the echo output
def testRunScripts(self):
"""2 scripts to be run."""
self.one_config['TEST_SCRIPT'] = 'echo testRunScripts1'
config2 = pyreringutil.PRConfigParser().Default()
config2['TEST_SCRIPT'] = 'echo testRunScripts2'
self.scanner.SetConfig([self.one_config, config2])
result = self.runner.Run(['testRunScripts'], False)
self.assertEqual(result, 0)
self.assertEqual(self.runner.passed, 2)
# TODO(mwu): verify both scripts run fine
def testEmailSend(self):
"""Test Email should be send."""
self.one_config['TEST_SCRIPT'] = 'echo send_email_test;exit 1'
self.scanner.SetConfig([self.one_config])
try:
self.runner.Run(['testEmailSend'], True)
except self.emailmessage.EmailCalledError:
self.assertTrue(True)
else:
self.fail(msg='Send email was not called')
def testEmailNotSendIfTestPass(self):
"""Test email should not go if all tests pass."""
self.one_config['TEST_SCRIPT'] = 'echo send_email_test;exit 0'
self.scanner.SetConfig([self.one_config])
try:
self.runner.Run(['testEmailSend'], True)
except self.emailmessage.EmailCalledError:
self.fail()
# Negative Test Cases
def testTimeoutCommand(self):
"""A command times out."""
self.one_config['TEST_SCRIPT'] = 'echo timeouttest; sleep 8'
self.one_config['TIMEOUT'] = 2
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testTimeoutCommand'], False)
self.assertEqual(result, 1)
self.assertEqual(self.runner.timeout, 1)
def testNonExistCommand(self):
"""Test a wrong system command."""
self.one_config['TEST_SCRIPT'] = 'nonexist_command'
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testNonExistCommand'], False)
self.assertEqual(result, 1)
self.assertEqual(self.runner.failed, 1)
def testNonExistScript(self):
"""Test a nonexist script."""
self.one_config['TEST_SCRIPT'] = '/tmp/nonexist_script.sh'
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testNonExistScript'], False)
self.assertEqual(result, 1)
self.assertEqual(self.runner.failed, 1)
def testPermissionDenied(self):
"""Test something without permission."""
self.one_config['TEST_SCRIPT'] = 'touch /pyrering.txt'
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testPermissionDenied'], False)
self.assertEqual(result, 1)
self.assertEqual(self.runner.failed, 1)
def testCatchWarningMessage(self):
"""Test a command has warning output."""
self.one_config['TEST_SCRIPT'] = 'echo warn message'
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testCatchWarningMessage'], False)
self.assertEqual(result, 0)
self.assertEqual(self.runner.passed, 1)
def testCatchFatalMessage(self):
"""Test a command has fatal error message even exit code still 0."""
self.one_config['TEST_SCRIPT'] = 'echo Fatal:;echo anotherline'
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testCatchFatalMessage'], False)
self.assertEqual(result, 1)
self.assertEqual(self.runner.failed, 1)
def testOutputLargeMessage(self):
"""Test a test can have large screen output.
As default the stdout only has a 4k buffer limit, so the code should clean
up the buffer while running the test, otherwise the writing to buffer will
be blocked when the buffer is full.
"""
self.one_config['TEST_SCRIPT'] = os.path.join(global_settings['root_dir'],
'test/outputlargetxt.py')
self.one_config['TIMEOUT'] = 4
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testLargeOutput'], False)
self.assertEqual(result, 0)
self.assertEqual(self.runner.passed, 1)
def testExitWithError(self):
"""Test a test have an error exit, which is not a failure."""
self.one_config['TEST_SCRIPT'] = 'exit 255'
self.scanner.SetConfig([self.one_config])
result = self.runner.Run(['testExitWithError'], False)
self.assertEqual(result, 1)
self.assertEqual(self.runner.failed, 0)
self.assertEqual(self.runner.error, 1)
def testSetupTestPassed(self):
"""Test a setup test case passes."""
self.one_config['TEST_SCRIPT'] = 'exit 0'
self.scanner.SetConfig([self.one_config])
config2 = pyreringutil.PRConfigParser().Default()
config2['TEST_SCRIPT'] = 'exit 0'
self.scanner.SetConfig([config2], 'setup')
result = self.runner.Run(['testSetupTestFailed'], False)
self.assertEqual(result, 0)
self.assertEqual(self.runner.failed, 0)
def testSetupTestFailed(self):
"""Test a setup test case failed, the test should exit at once."""
self.one_config['TEST_SCRIPT'] = 'exit 0'
self.scanner.SetConfig([self.one_config])
config2 = pyreringutil.PRConfigParser().Default()
config2['TEST_SCRIPT'] = 'exit 1'
self.scanner.SetConfig([config2], 'setup')
result = self.runner.Run(['testSetupTestFailed'], False)
self.assertEqual(result, 1)
self.assertEqual(self.runner.failed, 1)
def testTearDownFailed(self):
"""Test a teardown test case failed, the test still reports."""
self.one_config['TEST_SCRIPT'] = 'exit 0'
self.scanner.SetConfig([self.one_config])
config2 = pyreringutil.PRConfigParser().Default()
config2['TEST_SCRIPT'] = 'exit 1'
self.scanner.SetConfig([config2], 'teardown')
result = self.runner.Run(['testTearDownTestFailed'], False)
self.assertEqual(result, 4)
self.assertEqual(self.runner.failed, 4)
if __name__ == '__main__':
unittest.main()
``` |
[
{
"content": "```python\nimport numpy as np\nimport pandas as pd\nimport nltk\nfrom nltk.stem.snowball import SnowballStemmer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom bs4 import BeautifulSoup\nimport re\nimport os\nimport codecs\nfrom sklearn import feature_extraction\nfrom sklearn.clu... | [
{
"content": "<|memory_start|>```python\nimport numpy as np\nimport pandas as pd\nimport nltk\nfrom nltk.stem.snowball import SnowballStemmer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom bs4 import BeautifulSoup\nimport re\nimport os\nimport codecs\nfrom sklearn import feature_extraction\n... | ```python
import numpy as np
import pandas as pd
import nltk
from nltk.stem.snowball import SnowballStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from bs4 import BeautifulSoup
import re
import os
import codecs
from sklearn import feature_extraction
from sklearn.cluster import KMeans
from pprint import pprint
from sklearn.manifold import MDS
from sklearn.metrics.pairwise import cosine_similarity
import os
import matplotlib.pyplot as plt
import matplotlib as mpl
from textwrap import wrap
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from scipy.spatial import distance
from elasticsearch import Elasticsearch
from elasticsearch_dsl.connections import connections
connections.create_connection(hosts=['http://controcurator.org:80/ess'])
es = Elasticsearch(
['http://controcurator.org/ess/'],
port=80)
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
'''
query = {
"query": {
"bool": {
"must": [
{
"match_all": {}
}
]
}
},
"from": 0,
"size": 400
}
response = es.search(index="controcurator", doc_type="article", body=query)
'''
#article = es.get(index="controcurator", doc_type="article",id="58ed3daee4b0e0ec04effff7")
stopwords = nltk.corpus.stopwords.words('english')
stemmer = SnowballStemmer("english")
#response['hits']['hits'] = [hit for hit in response['hits']['hits'] if 'comments' in hit['_source']]
#response['hits']['hits'].sort(key=lambda d: len(d['_source']['comments']), reverse=True)
def tokenize_and_stem(text):
# first tokenize by sentence, then by word to ensure that punctuation is caught as it's own token
tokens = [word for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]
filtered_tokens = []
# filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)
for token in tokens:
if re.search('[a-zA-Z]', token):
filtered_tokens.append(token)
stems = [stemmer.stem(t) for t in filtered_tokens]
return stems
tfidf_vectorizer = TfidfVectorizer(max_df=0.9, max_features=200000,
min_df=0.1, stop_words='english',
use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1,3))
fcluster, axcluster = plt.subplots(6, 8,figsize=(24, 16))
fsenti, axsenti = plt.subplots(6, 8,figsize=(24, 16))
ftype, axtype = plt.subplots(6, 8,figsize=(24, 16))
#fig, ax = plt.subplots(figsize=(20, 10)) # set size
col = 0
row = 0
cluster_colors = {0: '#1b9e77', 1: '#d95f02', 2: '#7570b3', 3: '#e7298a', 4: '#66a61e'}
type_colors = {'guardian': '#FF9900', 'twitter': '#000099'}
senti_colors = {'neg': '#CC0000', 'neu': '#CCCCCC', 'pos' : '#00CC00'}
def classifySentiment(score):
if score < 0:
return 'neg'
elif score > 0:
return 'pos'
else:
return 'neu'
articles = ['https://www.theguardian.com/commentisfree/2017/apr/11/working-class-public-spaces-musee-d-orsay',
'https://www.theguardian.com/football/2017/apr/11/juventus-barcelona-champions-league-quarter-final-match-report',
'https://www.theguardian.com/world/2017/apr/11/us-defense-syria-chemical-weapons-attacks-assad-regime',
'https://www.theguardian.com/society/2017/apr/11/parents-fighting-to-keep-baby-charlie-gard-life-support-lose-high-court-battle',
'https://www.theguardian.com/football/2017/apr/11/borussia-dortmund-explosion-team-bus',
'https://www.theguardian.com/education/2017/apr/12/new-free-schools-despite-secondary-staff-cuts',
'https://www.theguardian.com/politics/2017/mar/21/martin-mcguinness-northern-ireland-former-deputy-first-minister-dies',
'https://www.theguardian.com/politics/2017/apr/12/foreign-states-may-have-interfered-in-brexit-vote-report-says',
'https://www.theguardian.com/us-news/2017/apr/11/homeland-security-searches-electronics-border',
'https://www.theguardian.com/environment/2017/mar/22/princess-anne-backs-gm-crops-livestock-unlike-prince-charles',
'https://www.theguardian.com/music/2017/apr/11/palestine-music-expo-pmx-musicians-shaking-up-the-occupied-territories',
'https://www.theguardian.com/world/2017/apr/11/g7-rejects-uk-call-for-sanctions-against-russia-and-syria',
'https://www.theguardian.com/commentisfree/2017/apr/11/frontline-brexit-culture-wars-ask-comedian-al-murray',
'https://www.theguardian.com/news/2017/apr/11/painting-a-new-picture-of-the-little-ice-age-weatherwatch',
'https://www.theguardian.com/us-news/2017/apr/11/detroit-michigan-500-dollar-house-rust-belt-america',
'https://www.theguardian.com/global-development/2017/apr/11/worrying-trend-as-aid-money-stays-in-wealthiest-countries',
'https://www.theguardian.com/society/2017/apr/11/recorded-childhood-cancers-rise-worldwide-world-health-organization',
'https://www.theguardian.com/commentisfree/2016/dec/08/modern-day-hermits-share-experiences',
'https://www.theguardian.com/football/2017/mar/22/ronnie-moran-liverpool-dies',
'https://www.theguardian.com/lifeandstyle/2017/apr/11/vision-thing-how-babies-colour-in-the-world',
'https://www.theguardian.com/world/2017/apr/11/nurses-grant-dying-man-final-wish-cigarette-glass-wine',
'https://www.theguardian.com/business/2017/apr/11/labour-declare-war-late-payers-marks-spencer-jeremy-corbyn',
'https://www.theguardian.com/science/2017/apr/12/scientists-unravel-mystery-of-the-loose-shoelace',
'https://www.theguardian.com/us-news/2017/apr/11/united-airlines-shares-plummet-passenger-removal-controversy',
'https://www.theguardian.com/business/2017/apr/11/judges-reject-us-bankers-claim-to-be-randy-work-genius-in-divorce-case',
'https://www.theguardian.com/business/2017/apr/12/tesco-profits-1bn-growth-supermarket',
'https://www.theguardian.com/money/2017/apr/11/probate-fees-plan-is-daft-as-well-as-devious',
'https://www.theguardian.com/commentisfree/2017/apr/11/donald-trump-russia-rex-tillersons-visit-syria',
'https://www.theguardian.com/environment/2017/apr/12/uk-butterflies-worst-hit-in-2016-with-70-of-species-in-decline-study-finds',
'https://www.theguardian.com/business/2017/apr/11/developing-countries-demands-for-better-life-must-be-met-says-world-bank-head',
'https://www.theguardian.com/politics/2017/apr/12/devon-and-cornwall-pcc-expenses-inquiry-prosecutors',
'https://www.theguardian.com/politics/shortcuts/2017/apr/11/deep-england-brexit-britain',
'https://www.theguardian.com/society/2017/apr/11/uk-supreme-court-denies-tobacco-firms-permission-for-plain-packaging-appeal',
'https://www.theguardian.com/society/2017/mar/21/dawn-butler-stood-up-for-deaf-people-but-we-need-more-than-gestures',
'https://www.theguardian.com/technology/2017/apr/11/gordon-ramsay-father-in-law-admits-hacking-company-computers',
'https://www.theguardian.com/tv-and-radio/2017/mar/20/richard-hammond-injured-in-grand-tour-crash-in-mozambique',
'https://www.theguardian.com/us-news/2017/apr/11/sean-spicer-hitler-chemical-weapons-holocaust-assad',
'https://www.theguardian.com/science/2017/mar/22/face-medieval-cambridge-man-emerges-700-years-after-death',
'https://www.theguardian.com/society/2017/mar/22/new-alzheimers-test-can-predict-age-when-disease-will-appear',
'https://www.theguardian.com/world/2017/apr/11/national-archives-mi5-file-new-zealand-diplomat-paddy-costello-kgb-spy',
'https://www.theguardian.com/australia-news/2017/mar/22/british-war-veteran-granted-permanent-residency-in-australia-ending-visa-drama',
'https://www.theguardian.com/books/2017/apr/11/x-men-illustrator-alleged-anti-christian-messages-marvel-ardian-syaf',
'https://www.theguardian.com/business/2017/apr/12/burger-king-ok-google-commercial',
'https://www.theguardian.com/business/2017/apr/12/edf-customers-price-rise-electricity-gas-energy',
'https://www.theguardian.com/business/2017/apr/12/ship-oil-rig-pioneer-spirit-shell-north-sea-decommissioning',
'https://www.theguardian.com/business/2017/mar/22/asian-shares-drop-investors-fear-trump-wont-deliver-promises',
'https://www.theguardian.com/football/2017/apr/11/tony-adams-vows-to-give-granada-players-a-kick-up-the-arse',
'https://www.theguardian.com/football/2017/mar/22/football-transfer-rumours-jermain-defoe-back-to-west-ham',
'https://www.theguardian.com/global-development/2017/apr/11/india-acts-to-help-acid-attack-victims',
'https://www.theguardian.com/money/2017/apr/11/student-loan-interest-rate-rise-uk-inflation-brexit',
'https://www.theguardian.com/uk-news/2017/mar/17/coroner-warns-of-dangers-after-man-electrocuted-in-bath-while-charging-phone',
'https://www.theguardian.com/business/2017/mar/22/london-taxi-company-coventry-electric-cabs-jobs-brexit',
'https://www.theguardian.com/commentisfree/2016/dec/14/experiences-accessing-mental-health-services-uk',
'https://www.theguardian.com/commentisfree/2017/apr/11/france-left-europe-jean-luc-melenchon-presidential-election',
'https://www.theguardian.com/commentisfree/2017/apr/11/sean-spicers-hitler-holocaust-speak-volumes',
'https://www.theguardian.com/commentisfree/2017/apr/11/united-airlines-flying-while-asian-fear',
'https://www.theguardian.com/environment/2017/mar/22/country-diary-long-mynd-shropshire-light-spout-waterfall',
'https://www.theguardian.com/football/2017/apr/11/borussia-dortmund-shock-team-bus-explosions',
'https://www.theguardian.com/football/2017/mar/17/stewart-downing-middlesbrough-karanka-row-agnew',
'https://www.theguardian.com/football/2017/mar/22/which-football-manager-has-been-sacked-by-one-club-the-most-times',
'https://www.theguardian.com/music/2017/mar/16/ed-sheeran-headline-sunday-night-glastonbury-2017',
'https://www.theguardian.com/sport/2017/apr/11/pennsylvania-woman-jail-threats-youth-football-league-officials',
'https://www.theguardian.com/sport/blog/2017/mar/22/talking-horses-best-wednesday-bets-for-warwick-and-newcastle',
'https://www.theguardian.com/technology/2017/mar/17/youtube-and-google-search-for-answers',
'https://www.theguardian.com/tv-and-radio/2017/mar/19/neighbours-tv-soap-could-disappear-from-british-screens',
'https://www.theguardian.com/uk-news/2017/apr/11/boris-johnson-full-support-failure-secure-sanctions-syria-russia',
'https://www.theguardian.com/world/2017/mar/22/brussels-unveil-terror-victims-memorial-one-year-after-attacks',
'https://www.theguardian.com/world/2017/mar/22/north-korea-missile-test-failure',
'https://www.theguardian.com/business/2017/mar/16/bank-of-england-uk-interest-rates-monetary-policy-committee',
'https://www.theguardian.com/business/2017/mar/21/inflation-uk-wages-lag-behind-prices-mark-carney',
'https://www.theguardian.com/business/2017/mar/22/nervous-markets-take-fright-at-prospect-of-trump-failing-to-deliver',
'https://www.theguardian.com/commentisfree/2016/dec/21/i-lost-my-mum-seven-weeks-ago-our-readers-on-coping-with-grief-at-christmas',
'https://www.theguardian.com/commentisfree/2017/jan/06/brexit-vote-have-you-applied-for-a-second-passport',
'https://www.theguardian.com/fashion/2017/mar/22/fiorucci-why-the-disco-friendly-label-is-perfect-for-2017',
'https://www.theguardian.com/film/2017/mar/17/from-the-corner-of-the-oval-obama-white-house-movie',
'https://www.theguardian.com/film/2017/mar/22/film-franchises-terminator-sequel-arnold-schwarzenegger-die-hard-alien',
'https://www.theguardian.com/law/2017/apr/12/judge-sacked-over-online-posts-calling-his-critics-donkeys',
'https://www.theguardian.com/lifeandstyle/2017/mar/17/monopoly-board-game-new-tokens-vote',
'https://www.theguardian.com/music/2017/mar/16/stormzy-condemns-nme-for-using-him-as-poster-boy-for-depression',
'https://www.theguardian.com/music/2017/mar/21/los-angeles-police-mistake-wyclef-jean-suspect-assault-case',
'https://www.theguardian.com/politics/2017/mar/22/uk-based-airlines-told-to-move-to-europe-after-brexit-or-lose-major-routes',
'https://www.theguardian.com/society/2017/apr/11/national-social-care-service-centralised-nhs',
'https://www.theguardian.com/sport/2017/mar/17/wales-france-six-nations-world-rankings',
'https://www.theguardian.com/tv-and-radio/2017/mar/22/n-word-taboo-tv-carmichael-show-atlanta-insecure-language',
'https://www.theguardian.com/uk-news/2017/mar/16/man-dies-explosion-former-petrol-station-highgate-north-london-swains-lane',
'https://www.theguardian.com/us-news/2017/mar/17/national-weather-service-forecasting-temperatures-storms',
'https://www.theguardian.com/us-news/2017/mar/22/fbi-muslim-employees-discrimination-religion-middle-east-travel',
'https://www.theguardian.com/us-news/2017/mar/22/zapier-pay-employees-move-silicon-valley-startup',
'https://www.theguardian.com/world/2017/mar/17/fleeing-from-dantes-hell-on-mount-etna',
'https://www.theguardian.com/world/2017/mar/22/gay-clergyman-jeffrey-johns-turned-down-welsh-bishop-twice-before-claims',
'https://www.theguardian.com/world/2017/mar/23/apple-paid-no-tax-in-new-zealand-for-at-least-a-decade-reports-say',
'https://www.theguardian.com/books/2017/mar/22/comics-chavez-redline-transformers-v-gi-joe',
'https://www.theguardian.com/business/2017/apr/11/uk-inflation-rate-stays-three-year-high',
'https://www.theguardian.com/commentisfree/2017/apr/12/charlie-gard-legal-aid',
'https://www.theguardian.com/commentisfree/2017/mar/22/rights-gig-economy-self-employed-worker',
'https://www.theguardian.com/media/2017/mar/14/face-off-mps-and-social-media-giants-online-hate-speech-facebook-twitter',
'https://www.theguardian.com/music/2017/apr/11/michael-buble-wife-says-son-noah-is-recovering-from-cancer',
'https://www.theguardian.com/society/2017/apr/11/bullying-and-violence-grip-out-of-control-guys-marsh-jail-dorset',
'https://www.theguardian.com/stage/2017/mar/22/trisha-brown-obituary',
'https://www.theguardian.com/travel/2017/mar/22/10-best-clubs-in-amsterdam-chosen-by-dj-experts',
'https://www.theguardian.com/us-news/2017/apr/11/us-universal-healthcare-single-payer-rallies',
'https://www.theguardian.com/us-news/2017/mar/22/us-border-agent-sexually-assaults-teenage-sisters-texas',
'https://www.theguardian.com/world/2017/apr/11/hundreds-of-refugees-missing-after-dunkirk-camp-fire',
'https://www.theguardian.com/world/2017/mar/22/unicef-condemns-sale-cambodian-breast-milk-us-mothers-firm-ambrosia-labs',
'https://www.theguardian.com/world/commentisfree/2017/mar/17/week-in-patriarchy-bbc-dad-jessica-valenti',
'https://www.theguardian.com/business/2017/mar/15/us-federal-reserve-raises-interest-rates-to-1',
'https://www.theguardian.com/business/2017/mar/21/london-cycle-courier-was-punished-for-refusing-work-after-eight-hours-in-cold',
'https://www.theguardian.com/football/2017/mar/17/tottenham-harry-kane-return-injury',
'https://www.theguardian.com/politics/2017/mar/15/browse-of-commons-explore-uk-parliament-with-first-virtual-tour',
'https://www.theguardian.com/politics/2017/mar/21/martin-mcguinness-sinn-fein-members-carry-coffin-home-in-derry',
'https://www.theguardian.com/sport/2017/mar/18/ireland-england-six-nations-dublin',
'https://www.theguardian.com/us-news/2017/mar/20/ivanka-trump-west-wing-office-security-clearance',
'https://www.theguardian.com/film/2017/mar/21/look-on-the-sweet-side-of-love-actually',
'https://www.theguardian.com/media/2017/mar/20/jamie-oliver-new-show-deal-channel-4-tv',
'https://www.theguardian.com/politics/2017/mar/16/theresa-may-vows-absolute-faith-in-hammond-after-u-turn',
'https://www.theguardian.com/politics/2017/mar/21/nicola-sturgeon-accused-of-hypocrisy-as-independence-debate-begins',
'https://www.theguardian.com/sport/2017/mar/17/jailed-transgender-fell-runner-thought-uk-athletics-was-trying-to-kill-her',
'https://www.theguardian.com/uk-news/2017/mar/16/former-marine-cleared-alexander-blackman-freed-immediately-ex-soldier-jail',
'https://www.theguardian.com/world/2017/mar/16/india-brexit-and-the-legacy-of-empire-in-africa',
'https://www.theguardian.com/world/2017/mar/18/a-good-looking-bird-the-bush-stone-curlew-that-loves-its-own-reflection',
'https://www.theguardian.com/world/2017/mar/21/electronics-ban-middle-east-flights-safety-hazards-airline-profit',
'https://www.theguardian.com/business/2017/mar/14/us-federal-reserve-interest-rates-janet-yellen-donald-trump',
'https://www.theguardian.com/business/2017/mar/16/rupert-murdoch-sky-bid-uk-ofcom',
'https://www.theguardian.com/business/2017/mar/20/us-forbids-devices-larger-cell-phones-flights-13-countries',
'https://www.theguardian.com/business/2017/mar/22/uk-ceos-national-living-wage-equality-trust-pay-gap',
'https://www.theguardian.com/football/2017/mar/17/arsene-wenger-granit-xhaka-referees',
'https://www.theguardian.com/lifeandstyle/2017/mar/17/chorizo-chicken-lemon-yoghurt-cavolo-nero-recipe-anna-hansen',
'https://www.theguardian.com/politics/2017/mar/17/george-osborne-london-evening-standard-editor-appointment-evgeny-lebedev',
'https://www.theguardian.com/uk-news/2017/mar/16/scotland-cannot-afford-to-ignore-its-deficit',
'https://www.theguardian.com/uk-news/2017/mar/17/prince-william-visits-paris-for-the-first-time-since-mother-dianas-death',
'https://www.theguardian.com/us-news/2017/mar/16/oc-actor-mischa-barton-speaks-out-sex-tapes-scandal',
'https://www.theguardian.com/world/2017/mar/15/uk-government-child-slavery-products-sold-britain-innovation-fund',
'https://www.theguardian.com/commentisfree/2017/mar/17/the-guardian-view-on-brexit-and-publishing-a-hardcore-problem',
'https://www.theguardian.com/politics/2017/mar/21/osborne-becomes-the-remainers-great-hope',
'https://www.theguardian.com/society/2017/mar/16/scotlands-exam-body-to-ensure-invigilators-get-living-wage',
'https://www.theguardian.com/society/2017/mar/18/rural-deprivation-and-ill-health-in-england-in-danger-of-being-overlooked',
'https://www.theguardian.com/sport/2017/mar/16/michael-oleary-team-not-ruling-out-return-mullins-yard-cheltenham-festival-horse-racing',
'https://www.theguardian.com/sport/2017/mar/17/ireland-v-england-lions-six-nations-rugby-union',
'https://www.theguardian.com/sport/2017/mar/18/this-is-your-night-conlans-dream-debut-wipes-out-nightmares-of-the-past',
'https://www.theguardian.com/sport/2017/mar/21/bha-dope-tests-horses-racecourse',
'https://www.theguardian.com/sport/2017/mar/21/donald-trump-colin-kaepernick-free-agent-anthem-protest',
'https://www.theguardian.com/uk-news/2017/mar/16/protect-survive-nuclear-war-republished-pamphlet',
'https://www.theguardian.com/uk-news/2017/mar/21/sisters-al-najjar-sue-cumberland-hotel-london-brutal-hammer-attack',
'https://www.theguardian.com/uk-news/2017/mar/22/what-support-does-your-employer-give-to-fathers',
'https://www.theguardian.com/artanddesign/2017/mar/21/winged-bull-and-giant-dollop-of-cream-to-adorn-trafalgar-squares-fourth-plinth',
'https://www.theguardian.com/books/2017/mar/17/the-bone-readers-jacob-ross-caribbean-thriller-jhalak-prize',
'https://www.theguardian.com/business/2017/mar/11/democrats-question-trump-conflict-of-interest-deutsche-bank-investigation-money-laundering',
'https://www.theguardian.com/business/2017/mar/17/barclays-bob-diamond-panmure-gordon',
'https://www.theguardian.com/commentisfree/2017/mar/15/brexit-was-an-english-vote-for-independence-you-cant-begrudge-the-scots-the-same',
'https://www.theguardian.com/environment/2017/mar/21/the-snow-buntings-drift-takes-them-much-further-than-somerset',
'https://www.theguardian.com/fashion/2017/mar/21/art-colour-victoria-beckham-van-gogh-fashion',
'https://www.theguardian.com/lifeandstyle/2017/mar/17/i-am-26-and-find-it-hard-to-meet-people-on-the-same-wavelength-as-me',
'https://www.theguardian.com/lifeandstyle/shortcuts/2017/mar/21/open-a-window-and-have-a-cold-shower-could-being-chilly-improve-your-health',
'https://www.theguardian.com/society/2017/mar/22/four-supersized-prisons-to-be-built-england-and-wales-elizabeth-truss-plan',
'https://www.theguardian.com/sport/2017/mar/17/ben-youngs-england-ireland-grand-slam-six-nations',
'https://www.theguardian.com/technology/2017/mar/17/google-ads-bike-helmets-adverts',
'https://www.theguardian.com/us-news/2017/mar/20/fbi-director-comey-confirms-investigation-trump-russia',
'https://www.theguardian.com/world/2017/mar/17/time-for-a-declaration-of-war-on-happiness']
# go through each file
for file in articles[0:5]:
query = {
"query": {
"constant_score": {
"filter": {
"term": {
"url": file
}
}
}
},
"from": 0,
"size": 1
}
response = es.search(index="controcurator", doc_type="article", body=query)
article = response['hits']['hits'][0]
print article['_source']['url']
print article['_id']
#for article in response['hits']['hits']:
if 'comments' not in article['_source']:
print "-- NO COMMENTS --"
continue
print len(article['_source']['comments'])
if len(article['_source']['comments']) > 500:
print "-- TOO MANY COMMENTS --"
continue
if len(article['_source']['comments']) < 50:
print "-- NOT ENOUGH COMMENTS --"
continue
# vectorization
tfidf_matrix = tfidf_vectorizer.fit_transform([c['text'] for c in article['_source']['comments']])
# clustering
num_clusters = 5
km = KMeans(n_clusters=num_clusters)
km.fit(tfidf_matrix)
centers = km.cluster_centers_
clusters = km.labels_.tolist()
# distances
similarity_distance = 1 - cosine_similarity(tfidf_matrix)
mds = MDS(n_components=2, dissimilarity="precomputed", random_state=1)
pos = mds.fit_transform(similarity_distance)
# save results to comments
for i, cluster in enumerate(clusters):
article['_source']['comments'][i]['cluster'] = cluster
article['_source']['comments'][i]['cluster_x'] = pos[i][0]
article['_source']['comments'][i]['cluster_y'] = pos[i][1]
#for comment in article['_source']['comments']:
# print comment['cluster'],',',comment['cluster_x'],',',comment['cluster_y'],',',comment['text'].encode('UTF-8')
for c in article['_source']['comments']:
if 'type' not in c:
c['type'] = 'guardian'
data = [{'x':c['cluster_x'], 'y':c['cluster_y'], 'label':c['cluster'], 'sentiment': classifySentiment(c['sentiment']['sentiment']), 'type':c['type'], 'title':c['text'].replace('\r', '').replace('\n', '')} for c in article['_source']['comments']]
#create data frame that has the result of the MDS plus the cluster numbers and titles
clustergroups = pd.DataFrame().from_dict(data).groupby('label')
typegroups = pd.DataFrame().from_dict(data).groupby('type')
sentigroups = pd.DataFrame().from_dict(data).groupby('sentiment')
#fig, ax = plt.subplots(figsize=(20, 10)) # set size
# ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
#iterate through groups to layer the plot
#note that I use the cluster_name and cluster_color dicts with the 'name' lookup to return the appropriate color/label
# ms: marker size
for name, group in clustergroups:
axcluster[row, col].plot(group.x, group.y, marker='o', linestyle='', ms=5, color=cluster_colors[name],
mec='none')
axcluster[row, col].set_aspect('auto')
axcluster[row, col].tick_params(\
axis= 'x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off')
axcluster[row, col].tick_params(\
axis= 'y', # changes apply to the y-axis
which='both', # both major and minor ticks are affected
left='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelleft='off')
axcluster[row, col].set_title("\n".join(wrap(article['_source']['document']['title'], 30)),fontsize=8)
for name, group in typegroups:
axtype[row, col].plot(group.x, group.y, marker='o', linestyle='', ms=5, color=type_colors[name],
mec='none')
axtype[row, col].set_aspect('auto')
axtype[row, col].tick_params(\
axis= 'x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off')
axtype[row, col].tick_params(\
axis= 'y', # changes apply to the y-axis
which='both', # both major and minor ticks are affected
left='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelleft='off')
axtype[row, col].set_title("\n".join(wrap(article['_source']['document']['title'], 30)),fontsize=8)
#title.set_y(1.05)
for name, group in sentigroups:
axsenti[row, col].plot(group.x, group.y, marker='o', linestyle='', ms=5, color=senti_colors[name],
mec='none')
axsenti[row, col].set_aspect('auto')
axsenti[row, col].tick_params(\
axis= 'x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off')
axsenti[row, col].tick_params(\
axis= 'y', # changes apply to the y-axis
which='both', # both major and minor ticks are affected
left='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelleft='off')
axsenti[row, col].set_title("\n".join(wrap(article['_source']['document']['title'], 30)),fontsize=8)
#ax.legend(numpoints=1) #show legend with only 1 point
#add label in x,y position with the label as the film title
# for i in range(len(df)):
# ax.text(df.ix[i]['x'], df.ix[i]['y'], df.ix[i]['title'], size=8)
col += 1
if col > 7:
col = 0
row += 1
if row > 5:
break
print article['_source']['document']['title'].encode('UTF-8')
for name, group in sentigroups:
avgx = group.x.mean()
avgy = group.y.mean()
group['dist'] = group.apply(lambda row: float(distance.pdist([(row['x'], row['y']), (avgx, avgy)])), axis=1)
print group
print "==="
# Fine-tune figure; hide x ticks for top plots and y ticks for right plots
#plt.setp([a.get_xticklabels() for a in axarr[:,-1]], visible=False)
#plt.setp([a.get_yticklabels() for a in axarr[0,:]], visible=False)
#plt.show() #show the plot
#fcluster.savefig('img/clusters.png', dpi=200)
#ftype.savefig('img/type.png', dpi=200)
#fsenti.savefig('img/sentiment.png', dpi=200)
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\n# -*- coding: utf-8 -*-\n\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# ... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the L... | ```python
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v6.enums.types import manager_link_status
__protobuf__ = proto.module(
package="google.ads.googleads.v6.resources",
marshal="google.ads.googleads.v6",
manifest={"CustomerClientLink",},
)
class CustomerClientLink(proto.Message):
r"""Represents customer client link relationship.
Attributes:
resource_name (str):
Immutable. Name of the resource. CustomerClientLink resource
names have the form:
``customers/{customer_id}/customerClientLinks/{client_customer_id}~{manager_link_id}``
client_customer (str):
Immutable. The client customer linked to this
customer.
manager_link_id (int):
Output only. This is uniquely identifies a
customer client link. Read only.
status (google.ads.googleads.v6.enums.types.ManagerLinkStatusEnum.ManagerLinkStatus):
This is the status of the link between client
and manager.
hidden (bool):
The visibility of the link. Users can choose
whether or not to see hidden links in the Google
Ads UI. Default value is false
"""
resource_name = proto.Field(proto.STRING, number=1)
client_customer = proto.Field(proto.STRING, number=7, optional=True)
manager_link_id = proto.Field(proto.INT64, number=8, optional=True)
status = proto.Field(
proto.ENUM,
number=5,
enum=manager_link_status.ManagerLinkStatusEnum.ManagerLinkStatus,
)
hidden = proto.Field(proto.BOOL, number=9, optional=True)
__all__ = tuple(sorted(__protobuf__.manifest))
``` |
[
{
"content": "```python\n# TASK\n# This should be a user plug-in\n\nfrom __future__ import print_function\nimport os\n\nclass Task:\n\n def __init__(self, logger, output, script, parallelism, number, params):\n self.logger = logger\n self.process = None\n self.fd = None\n self.out... | [
{
"content": "<|memory_start|>```python\n# TASK\n# This should be a user plug-in\n\nfrom __future__ import print_function\nimport os\n\nclass Task:\n\n def __init__(self, logger, output, script, parallelism, number, params):\n self.logger = logger\n self.process = None\n self.fd = None\n... | ```python
# TASK
# This should be a user plug-in
from __future__ import print_function
import os
class Task:
def __init__(self, logger, output, script, parallelism, number, params):
self.logger = logger
self.process = None
self.fd = None
self.output = output
self.script = script
self.parallelism = parallelism
self.number = number
self.params = params
def go(self):
import json, subprocess
J = json.loads(self.params)
learning_rate = J["learning_rate"]
self.open_output()
try:
args = [ self.script, self.output, "%04i"%self.number,
str(self.parallelism),
"adam", str(learning_rate) ]
self.logger.debug("task: " + " ".join(args))
self.process = subprocess.Popen(args=args,
stdin=None,
stdout=self.fd,
stderr=subprocess.STDOUT)
print("started: ", self.process.pid)
except Exception as e:
import traceback
traceback.print_exc()
print("")
print("error while attempting to run: " + " ".join(args))
print(e)
return False
return True
def open_output(self):
try:
output_file = self.output + ("/out-%04i.txt" % self.number)
self.fd = open(output_file, "w")
except Exception as e:
print("")
from utils import fail
fail("Could not open task output file: " +
output_file + "\n" + str(e))
def __del__(self):
if self.fd is not None:
print("closing: " + str(self.number))
self.fd.close()
``` |
[
{
"content": "Recreate the entire code block with identical formatting:\n```python\n\"\"\"\nRESTx: Sane, simple and effective data publishing and integration. \n\nCopyright (C) 2010 MuleSoft Inc. http://www.mulesoft.com\n\nThis program is free software: you can redistribute it and/or modify \nit under the ... | [
{
"content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\n\"\"\"\nRESTx: Sane, simple and effective data publishing and integration. \n\nCopyright (C) 2010 MuleSoft Inc. http://www.mulesoft.com\n\nThis program is free software: you can redistribute it and/or modify... | ```python
"""
RESTx: Sane, simple and effective data publishing and integration.
Copyright (C) 2010 MuleSoft Inc. http://www.mulesoft.com
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
"""
This module provides renderers for data into
different output formats.
You can import these classes straight from module level:
* BaseRenderer
"""
# Export classes on module level, so that users don't need
# to specify the individual file names in their imports.
from restx.render.htmlrenderer import HtmlRenderer
from restx.render.jsonrenderer import JsonRenderer
``` |
[
{
"content": "Repeat the code precisely as written (spacing intact):\n```python\nfrom collections import namedtuple\n \nentry = namedtuple('entry', 'qname flag rname pos mapq cigar rnext pnext tlen seq qual')\n\nVALID_HD_TAGS = ['VN', 'SO']\nVALID_SQ_TAGS = ['SN', 'LN', 'AS', 'M5', 'SP', 'UR']\n\nREQUIRED_HD_TA... | [
{
"content": "Repeat the code precisely as written (spacing intact):\n<|memory_start|>```python\nfrom collections import namedtuple\n \nentry = namedtuple('entry', 'qname flag rname pos mapq cigar rnext pnext tlen seq qual')\n\nVALID_HD_TAGS = ['VN', 'SO']\nVALID_SQ_TAGS = ['SN', 'LN', 'AS', 'M5', 'SP', 'UR']\n... | ```python
from collections import namedtuple
entry = namedtuple('entry', 'qname flag rname pos mapq cigar rnext pnext tlen seq qual')
VALID_HD_TAGS = ['VN', 'SO']
VALID_SQ_TAGS = ['SN', 'LN', 'AS', 'M5', 'SP', 'UR']
REQUIRED_HD_TAGS = ['VN']
REQUIRED_SQ_TAGS = ['SN', 'LN']
class SamHeader( object ):
def __init__(self, lines):
self._version = None
self._sort_order = None
self._references = {}
self._read_groups = []
self._programs = []
self._comments = []
self._parse_input_lines( lines )
def _parse_input_lines(self, lines):
for line in lines:
if line.startswith('@HD'):
self._parse_header_line( line )
elif line.startswith('@SQ'):
self._parse_sequence_line( line )
elif line.startswith('@RG'):
self._parse_read_group_line( line )
elif line.startswith('@PG'):
self._parse_program_line( line )
elif line.startswith('@CO'):
self._parse_comment_line( line )
else:
msg = "Not a recognized header line: {0}".format( line )
raise TypeError( msg )
def _parse_header_line(self, line):
if self._version:
msg = "Only 1 header line allowed, but 2 detected"
raise ValueError( msg )
# Parse and validate the tags
tags = tags_to_dictionary( line.strip().split()[1:] )
validate_tags( tags, VALID_HD_TAGS, REQUIRED_HD_TAGS )
# Set the appropriate variables
self._version = tags['VN']
if 'SO' in tags:
self._sort_order = tags['SO']
def _parse_sequence_line(self, line):
tags = tags_to_dictionary( line.strip().split()[1:] )
validate_tags( tags, VALID_SQ_TAGS, REQUIRED_SQ_TAGS )
if tags['SN'] in self._references:
msg = 'Sequence name "{0}" is duplicated!'.format(tags['SN'])
raise ValueError( msg )
tags['LN'] = int(tags['LN'])
self._references[tags['SN']] = tags
def _parse_read_group_line(self, line):
pass
def _parse_program_line(self, line):
pass
def _parse_comment_line(self, line):
pass
@property
def version(self):
return self._version
@property
def sort_order(self):
return self._sort_order
@property
def references(self):
return self._references
@property
def read_groups(self):
return self._read_groups
@property
def program(self):
return self._program
@property
def comments(self):
return self._comments
class SamEntry( object ):
def __init__(self, line):
parts = line.strip().split()[:11]
self.entry = entry._make(parts)
self._pos = int(self.entry.pos)
self._tlen = int(self.entry.tlen)
@property
def qname(self):
return self.entry.qname
@property
def flag(self):
return self.entry.flag
@property
def rname(self):
return self.entry.rname
@property
def pos(self):
return self._pos
@property
def mapq(self):
return self.entry.mapq
@property
def cigar(self):
return self.entry.cigar
@property
def rnext(self):
return self.entry.rnext
@property
def pnext(self):
return self.entry.pnext
@property
def tlen(self):
return self._tlen
@property
def seq(self):
return self.entry.seq
@property
def qual(self):
return self.entry.qual
@property
def aend(self):
return self.pos + self.tlen
class SamReader( object ):
def __init__(self, f):
self._file = open(f, "r")
self._header = self.parse_header()
self._file = open(f, "r") # Reset the file position
def parse_header(self):
header_lines = []
line_start = 0
for line in self._file:
if line.startswith('@'):
header_lines.append( line )
else:
break
return SamHeader( header_lines )
@property
def header(self):
return self._header
@property
def version(self):
return self.header.version
@property
def sort_order(self):
return self.header.sort_order
@property
def references(self):
return self.header.references
def close(self):
self._file.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __iter__(self):
for line in self._file:
if line.startswith('@'):
continue
yield SamEntry(line)
#
# Utilities
#
def tags_to_dictionary( tags ):
data_tags = {}
for tag in tags:
if tag[2] != ':':
msg = 'Not a valid tag: "{0}"'.format(tag)
raise TypeError( msg )
tag_id, tag_value = tag[:2], tag[3:]
data_tags[tag_id] = tag_value
return data_tags
def validate_tags( tags, valid_tags, required_tags ):
for tag in tags: # Check that all present tags are valid
if tag not in valid_tags:
msg = 'Invalid tag "{0}" present'.format(tag)
raise TypeError( msg )
for tag in required_tags: # Check that all required tags are present
if tag not in tags:
msg = 'Required tag "{0}" not present'.format(tag)
raise TypeError( msg )
``` |
[
{
"content": "```python\n#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# test_data_root.py\nimport os\nimport sys\nimport unittest\nimport time\nimport yaml\n\nroot_folder = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + os.sep + \"..\" ) \ndata_path = os.path.join(root_folder, 'aikif', 'data', 'co... | [
{
"content": "<|memory_start|>```python\n#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# test_data_root.py\nimport os\nimport sys\nimport unittest\nimport time\nimport yaml\n\nroot_folder = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + os.sep + \"..\" ) \ndata_path = os.path.join(root_folder, 'aik... | ```python
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# test_data_root.py
import os
import sys
import unittest
import time
import yaml
root_folder = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + os.sep + ".." )
data_path = os.path.join(root_folder, 'aikif', 'data', 'core')
yaml_file = os.path.join(data_path, 'root.yaml')
#yaml_file = os.path.join(data_path, 'projects.yaml')
sys.path.append(data_path) # dont need this
print('yaml_file = ', yaml_file)
class TestDataRoot(unittest.TestCase):
def test_01_read_file(self):
"""
read the yaml file - example for use is in comments below
print(self.yaml_data)
for core in self.yaml_data:
print(core)
for item in self.yaml_data[core]:
print(' - ' + item)
"""
with open(yaml_file, 'r') as stream:
self.yaml_data = yaml.load(stream)
self.assertEqual(len(self.yaml_data), 4)
self.assertEqual(len(self.yaml_data['Interface']), 6)
self.assertTrue('Toolbox' in self.yaml_data['Interface'])
self.assertEqual(len(self.yaml_data['Environment_types']), 14)
self.assertTrue('Grid 2D' in self.yaml_data['Environment_types'])
self.assertEqual(len(self.yaml_data['Agent_types']), 5)
self.assertTrue('Explorer' in self.yaml_data['Agent_types'])
self.assertEqual(len(self.yaml_data['Toolbox_imports']), 2)
self.assertTrue('aikif.toolbox' in self.yaml_data['Toolbox_imports'])
#print(self.yaml_data)
#for core in self.yaml_data:
# print(core)
# for item in self.yaml_data[core]:
# print(' - ' + item)
if __name__ == '__main__':
unittest.main()
``` |
[
{
"content": "Write the code verbatim:\n```python\n#!/usr/bin/env python\n#\n# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)\n# Copyright (c) 1997-2015 California Institute of Technology.\n# License: 3-clause BSD. The full license text is available at:\n# - http://trac.mystic.cacr.caltech.edu/pro... | [
{
"content": "Write the code verbatim:\n<|memory_start|>```python\n#!/usr/bin/env python\n#\n# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)\n# Copyright (c) 1997-2015 California Institute of Technology.\n# License: 3-clause BSD. The full license text is available at:\n# - http://trac.mystic.cacr... | ```python
#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2015 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE
"""
Testing the polynomial fitting problem of [1] using scipy's Nelder-Mead algorithm.
Reference:
[1] Storn, R. and Price, K. Differential Evolution - A Simple and Efficient
Heuristic for Global Optimization over Continuous Spaces. Journal of Global
Optimization 11: 341-359, 1997.
"""
from test_ffit import Chebyshev8, plot_solution, print_solution
from TEST_ffitPP_b import ChebyshevCost
if __name__ == '__main__':
import random
from mystic.solvers import fmin
#from mystic._scipyoptimize import fmin
from mystic.tools import random_seed
random_seed(123)
import pp
import sys
if len(sys.argv) > 1:
tunnelport = sys.argv[1]
ppservers = ("localhost:%s" % tunnelport,)
else:
ppservers = ()
myserver = pp.Server(ppservers=ppservers)
trials = []
for trial in range(8):
x = tuple([random.uniform(-100,100) + Chebyshev8[i] for i in range(9)])
trials.append(x)
results = [myserver.submit(fmin,(ChebyshevCost,x),(),()) for x in trials]
for solution in results:
print_solution(solution())
#plot_solution(solution)
# end of file
``` |
[
{
"content": "Here is the source code:\n```python\nimport scrapy\nfrom scrapy.spiders import Spider\nfrom scrapy import Request\nimport re\nfrom scrapy.selector import Selector\nfrom xiaomi_appstore_crawler.items import XiaomiAppstoreCrawlerItem\n\nclass XiaomiSpider(Spider):\n name = \"xiaomi\"\n allowed... | [
{
"content": "Here is the source code:\n<|memory_start|>```python\nimport scrapy\nfrom scrapy.spiders import Spider\nfrom scrapy import Request\nimport re\nfrom scrapy.selector import Selector\nfrom xiaomi_appstore_crawler.items import XiaomiAppstoreCrawlerItem\n\nclass XiaomiSpider(Spider):\n name = \"xiaom... | ```python
import scrapy
from scrapy.spiders import Spider
from scrapy import Request
import re
from scrapy.selector import Selector
from xiaomi_appstore_crawler.items import XiaomiAppstoreCrawlerItem
class XiaomiSpider(Spider):
name = "xiaomi"
allowed_domains = ["app.mi.com"]
start_urls = [
"http://app.mi.com/topList?page=1"
]
def parse(self, response):
#import pudb; pu.db
page = Selector(response)
page_nexts = page.xpath('//div[@class="pages"]/a')
page_max = int(page_nexts[-2].xpath('text()').extract_first())
for page_id in xrange(1, 2): #xrange(1, page_max + 1):
url = '{0}{1}'.format('http://app.mi.com/topList?page=', str(page_id))
yield scrapy.Request(url, callback=self.parse_page)
def parse_page(self, response):
page = Selector(response)
lis = page.xpath('//ul[@class="applist"]/li')
if lis == None:
return
url_common = 'http://app.mi.com'
for li in lis:
item = XiaomiAppstoreCrawlerItem()
item['title'] = li.xpath('./h5/a/text()').extract_first().encode('utf-8')
url = li.xpath('./h5/a/@href').extract_first()
appid = re.match(r'/detail/(.*)', url).group(1)
item['appid'] = appid
# import pudb; pu.db
req = scrapy.Request(url_common + url, callback=self.parse_details)
req.meta["item"] = item
yield req
def parse_details(self, response):
item = response.meta["item"]
page = Selector(response)
lis = page.xpath('//div[@class="second-imgbox"]/ul/li')
recommended = []
for li in lis:
url = li.xpath('./a/@href').extract_first()
appid = re.match(r'/detail/(.*)', url).group(1)
recommended.append(appid)
item['recommended'] = recommended
#import pudb; pu.db
yield item
``` |
[
{
"content": "```python\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport re\nimport uuid\n\n_INVISIBLE_CHARS = (\n '\\u200b',\n '\\u200c',\n '\\u200d',\n '\\ufeff',\n)\n_INVISIBLE_MAP = dict(zip(\n '0123456789abcdef',\n (''.join((i, j)) for i in _INVISIBLE_CHARS for j ... | [
{
"content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport re\nimport uuid\n\n_INVISIBLE_CHARS = (\n '\\u200b',\n '\\u200c',\n '\\u200d',\n '\\ufeff',\n)\n_INVISIBLE_MAP = dict(zip(\n '0123456789abcdef',\n (''.join((i, j)) for i in _INVISI... | ```python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import uuid
_INVISIBLE_CHARS = (
'\u200b',
'\u200c',
'\u200d',
'\ufeff',
)
_INVISIBLE_MAP = dict(zip(
'0123456789abcdef',
(''.join((i, j)) for i in _INVISIBLE_CHARS for j in _INVISIBLE_CHARS),
))
_INVISIBLE_REVERSE_MAP = {v: k for k, v in _INVISIBLE_MAP.iteritems()}
def uuid_to_watermark(watermark_uuid):
"Returns the watermark unicode string for a given uuid"
return ''.join(_INVISIBLE_MAP[c] for c in watermark_uuid.get_hex())
_WATERMARK_LENGTH = len(uuid_to_watermark(uuid.uuid4()))
_WATERMARK_RE = re.compile(r'[{}]{{{}}}'.format(
''.join(_INVISIBLE_CHARS),
_WATERMARK_LENGTH,
))
def watermark_to_uuid(watermark):
"Returns the uuid for a given watermark string"
if len(watermark) != _WATERMARK_LENGTH:
raise ValueError('Watermark must be {} characters'.format(
_WATERMARK_LENGTH))
try:
watermark_hex = ''.join(
_INVISIBLE_REVERSE_MAP[k]
for k in map(''.join, zip(*[iter(watermark)] * 2))
)
except KeyError:
raise ValueError('Watermark contains invalid characters')
return uuid.UUID(hex=watermark_hex)
def find_all_watermark_uuids(encoded_text):
return map(watermark_to_uuid, _WATERMARK_RE.findall(encoded_text))
def encode_watermark(text, watermark_uuid=None, prepend=False):
"""Encodes the given text with a watermark string generated from the given
uuid. Optionally appends or prepends the watermark string.
Returns a 2-tuple (encoded_text, watermark_uuid)
"""
if not isinstance(text, unicode):
raise ValueError('text must be a unicode string')
watermark_uuid = watermark_uuid or uuid.uuid4()
watermark = uuid_to_watermark(watermark_uuid)
if prepend:
encoded_text = ''.join((watermark, text))
else:
encoded_text = ''.join((text, watermark))
return encoded_text, watermark_uuid
def decode_watermark(encoded_text):
"""Decodes the given text, separating out the original text and the
watermark uuid.
Returns a 2-tuple (text, watermark_uuid). If no watermark is detected, text
is the original text and watermark_uuid is None.
"""
if not isinstance(encoded_text, unicode):
raise ValueError('encoded_text must be a unicode string')
if len(encoded_text) < _WATERMARK_LENGTH:
return encoded_text, None
# appended watermark
watermark = encoded_text[-_WATERMARK_LENGTH:]
text = encoded_text[:-_WATERMARK_LENGTH]
try:
watermark_uuid = watermark_to_uuid(watermark)
except ValueError:
pass
else:
return text, watermark_uuid
# prepended watermark
watermark = encoded_text[:_WATERMARK_LENGTH]
text = encoded_text[_WATERMARK_LENGTH:]
try:
watermark_uuid = watermark_to_uuid(watermark)
except ValueError:
pass
else:
return text, watermark_uuid
return encoded_text, None
``` |
[
{
"content": "```python\n# Foremast - Pipeline Tooling\n#\n# Copyright 2018 Gogo, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org... | [
{
"content": "<|memory_start|>```python\n# Foremast - Pipeline Tooling\n#\n# Copyright 2018 Gogo, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http:... | ```python
# Foremast - Pipeline Tooling
#
# Copyright 2018 Gogo, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package for foremast supporting utilities."""
from .apps import *
from .asg import *
from .banners import *
from .pipelines import *
from .deep_chain_map import DeepChainMap
from .elb import *
from .encoding import *
from .generate_filename import *
from .dns import *
from .credentials import *
from .properties import *
from .security_group import *
from .subnets import *
from .vpc import *
from .lookups import *
from .slack import *
from .tasks import *
from .templates import *
from .warn_user import *
from .get_cloudwatch_event_rule import get_cloudwatch_event_rule
from .awslambda import *
from .get_sns_subscriptions import get_sns_subscriptions
from .get_sns_topic_arn import get_sns_topic_arn
from .dynamodb_stream import get_dynamodb_stream_arn
from .roles import *
``` |
[
{
"content": "Here is a code file:\n```python\n# Copyright 2016-2017, Fabien Boucher\n# Copyright 2016-2017, Red Hat\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# ... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n# Copyright 2016-2017, Fabien Boucher\n# Copyright 2016-2017, Red Hat\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the Lice... | ```python
# Copyright 2016-2017, Fabien Boucher
# Copyright 2016-2017, Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pecan import conf
from pecan import abort
from pecan import expose
from pecan import request
from pecan import response
from pecan.rest import RestController
from repoxplorer import index
from repoxplorer.exceptions import UnauthorizedException
from repoxplorer.index import users
from repoxplorer.controllers import utils
if conf.get('users_endpoint', False) and conf.get('oidc', False):
from repoxplorer.auth import OpenIDConnectEngine as AuthEngine
else:
from repoxplorer.auth import CAuthEngine as AuthEngine
AUTH_ENGINE = AuthEngine()
xorkey = conf.get('xorkey') or 'default'
class UsersController(RestController):
auth = AUTH_ENGINE
def abort_if_not_active(self):
if not self.auth.is_configured():
abort(403)
def _authorize(self, uid=None):
self.abort_if_not_active()
# Shortcircuit the authorization for testing purpose
# return
try:
self.auth.authorize(request, uid)
except UnauthorizedException as e:
abort(401, str(e))
except Exception as e:
abort(500, "Unexpected error: %s" % e)
self.auth.provision_user(request)
def _validate(self, data):
mandatory_keys = (
'uid', 'name', 'default-email', 'emails')
email_keys = (
('email', True),
('groups', False))
group_keys = (
('group', True),
('begin-date', False),
('end-date', False))
# All keys must be provided
if set(data.keys()) != set(mandatory_keys):
# Mandatory keys are missing
return False
if not isinstance(data['emails'], list):
# Wrong data type for email
return False
if len(data['name']) >= 100:
return False
mekeys = set([mk[0] for mk in email_keys if mk[1]])
mgkeys = set([mk[0] for mk in group_keys if mk[1]])
if data['emails']:
for email in data['emails']:
if not mekeys.issubset(set(email.keys())):
# Mandatory keys are missing
return False
if not set(email.keys()).issubset(
set([k[0] for k in email_keys])):
# Found extra keys
return False
if 'groups' in email.keys():
for group in email['groups']:
if not mgkeys.issubset(set(group.keys())):
# Mandatory keys are missing
return False
if not set(group.keys()).issubset(
set([k[0] for k in group_keys])):
# Found extra keys
return False
return True
def _modify_protected_fields(self, prev, new):
if new['uid'] != prev['uid']:
return True
if new['default-email'] != prev['default-email']:
return True
# Adding or removing emails is forbidden
prev_emails = set([e['email'] for e in prev['emails']])
new_emails = set([e['email'] for e in new['emails']])
if (not new_emails.issubset(prev_emails) or
not prev_emails.issubset(new_emails)):
return True
return False
# curl -H 'Remote-User: admin' -H 'Admin-Token: abc' \
# "http://localhost:51000/api/v1/users/fabien"
@expose('json')
def get(self, uid):
self._authorize(uid)
_users = users.Users(
index.Connector(index_suffix='users'))
u = _users.get(uid)
if not u:
abort(404)
u['cid'] = utils.encrypt(xorkey, u['default-email'])
return u
@expose('json')
def delete(self, uid):
self._authorize(uid)
_users = users.Users(
index.Connector(index_suffix='users'))
u = _users.get(uid)
if not u:
abort(404)
_users.delete(uid)
# curl -X PUT -H 'Remote-User: admin' -H 'Admin-Token: abc' \
# -H "Content-Type: application/json" --data \
# '{"uid":"fabien","name":"Fabien Boucher","default-email": \
# "fboucher@redhat.com","emails": [{"email": "fboucher@redhat.com"}]}' \
# "http://localhost:51000/api/v1/users/fabien"
@expose('json')
def put(self, uid):
# We don't pass uid to authorize, then only admin logged with
# admin token will be authorized
self._authorize()
_users = users.Users(
index.Connector(index_suffix='users'))
u = _users.get(uid)
if u:
abort(409)
infos = request.json if request.content_length else {}
if not self._validate(infos):
abort(400)
# Need to check infos content
infos['uid'] = uid
_users.create(infos)
response.status = 201
# curl -X POST -H 'Remote-User: admin' -H 'Admin-Token: abc' \
# -H "Content-Type: application/json" --data \
# '{"uid":"fabien","name":"Fabien Boucher","default-email": \
# "fboucher@redhat.com","emails": [{"email": "fboucher@redhat.com"}, \
# {"email": "fabien.boucher@enovance.com"}]}' \
# "http://localhost:51000/api/v1/users/fabien"
@expose('json')
def post(self, uid):
requester = self._authorize(uid)
_users = users.Users(
index.Connector(index_suffix='users'))
u = _users.get(uid)
if not u:
abort(404)
infos = request.json if request.content_length else {}
infos['uid'] = uid
# Can be provided by mistake, just remove it
if 'cid' in infos:
del infos['cid']
if not self._validate(infos):
abort(400)
if requester != 'admin':
# User is not allowed to modify some raw_fields
# like adding or removing emails ...
if self._modify_protected_fields(u, infos):
abort(403)
_users.update(infos)
``` |
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\n#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# --- BEGIN_HEADER ---\n#\n# webserver - [insert a few words of module description on this line]\n# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter\n#\n# This file is part of... | [
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\n#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# --- BEGIN_HEADER ---\n#\n# webserver - [insert a few words of module description on this line]\n# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter\n#\n# This... | ```python
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# webserver - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Simple test CGI server"""
import sys
import CGIHTTPServer
import BaseHTTPServer
import SocketServer
class Handler(CGIHTTPServer.CGIHTTPRequestHandler):
cgi_directories = ['/cgi-bin']
class ThreadingServer(SocketServer.ThreadingMixIn,
BaseHTTPServer.HTTPServer):
pass
class ForkingServer(SocketServer.ForkingMixIn,
BaseHTTPServer.HTTPServer):
pass
# Listen address
IP = '127.0.0.1'
PORT = 8080
print 'Serving at %s port %d' % (IP, PORT)
print 'before attr override: have fork: %s' % Handler.have_fork
Handler.have_fork = False
print 'after attr override: have fork: %s' % Handler.have_fork
# server = BaseHTTPServer.HTTPServer((IP, PORT), Handler)
# server.serve_forever()
# server = ThreadingServer((IP,PORT), Handler)
server = ForkingServer((IP, PORT), Handler)
print 'server attr: have fork: %s'\
% server.RequestHandlerClass.have_fork
try:
while True:
sys.stdout.flush()
server.handle_request()
except KeyboardInterrupt:
print 'Server killed'
``` |
[
{
"content": "Reproduce the code exactly as provided (keep formatting):\n```python\nimport time\nimport threading\nimport threadactive\n\nclass BackWorker(threadactive.Agent):\n def tick(self):\n threadactive.Agent.tick(self)\n print(\"[%s][%d] front\" % (threading.current_thread().getName(), t... | [
{
"content": "Reproduce the code exactly as provided (keep formatting):\n<|memory_start|>```python\nimport time\nimport threading\nimport threadactive\n\nclass BackWorker(threadactive.Agent):\n def tick(self):\n threadactive.Agent.tick(self)\n print(\"[%s][%d] front\" % (threading.current_threa... | ```python
import time
import threading
import threadactive
class BackWorker(threadactive.Agent):
def tick(self):
threadactive.Agent.tick(self)
print("[%s][%d] front" % (threading.current_thread().getName(), time.clock()) )
self.print_in_front2()
self.print_in_back()
time.sleep(1)
@threadactive.backend
def print_in_back(self, *args, **kwargs):
print("[%s][%d] back" % (threading.current_thread().getName(), time.clock()) )
self.print_in_back2()
if time.clock() > 3:
self.back_to_front()
@threadactive.frontend
def back_to_front(self, *args, **kwargs):
print("[%s][%d] back to front" % (threading.current_thread().getName(), time.clock()) )
@threadactive.frontend
def print_in_front2(self, *args, **kwargs):
print("[%s][%d] front2" % (threading.current_thread().getName(), time.clock()) )
@threadactive.backend
def print_in_back2(self, *args, **kwargs):
print("[%s][%d] back2" % (threading.current_thread().getName(), time.clock()) )
def main():
i = 0
bw = BackWorker()
while True:
bw.tick()
# restart backend thread
i += 1
if i > 5:
bw.stop_backend()
bw.start_backend()
i = 0
if __name__ == '__main__':
main()
``` |
[
{
"content": "Provide a verbatim copy of the code:\n```python\n#!/usr/bin/python\nfrom BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer\nfrom os import curdir, sep\nimport sys, getopt\nimport cgi\n\nclass ServerHandler(BaseHTTPRequestHandler):\n\n\t# Handler for the GET requests\n\tdef do_GET(self):\n\t\... | [
{
"content": "Provide a verbatim copy of the code:\n<|memory_start|>```python\n#!/usr/bin/python\nfrom BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer\nfrom os import curdir, sep\nimport sys, getopt\nimport cgi\n\nclass ServerHandler(BaseHTTPRequestHandler):\n\n\t# Handler for the GET requests\n\tdef do... | ```python
#!/usr/bin/python
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from os import curdir, sep
import sys, getopt
import cgi
class ServerHandler(BaseHTTPRequestHandler):
# Handler for the GET requests
def do_GET(self):
if self.path=="/":
self.path="/app/views/index.html"
try:
# Check the file extension required and set the right mime type
sendReply = False
if self.path.endswith(".html"):
mimetype='text/html'
sendReply = True
if self.path.endswith(".json"):
mimetype='text/json'
sendReply = True
if self.path.endswith(".xml"):
mimetype='text/xml'
sendReply = True
if self.path.endswith(".png"):
mimetype='image/png'
sendReply = True
if self.path.endswith(".jpg"):
mimetype='image/jpg'
sendReply = True
if self.path.endswith(".gif"):
mimetype='image/gif'
sendReply = True
if self.path.endswith(".css"):
mimetype='text/css'
sendReply = True
if self.path.endswith(".js"):
mimetype='application/javascript'
sendReply = True
if sendReply == True:
# Open the static file requested and send it
f = open(curdir + sep + self.path)
self.send_response(200)
self.send_header('Content-type',mimetype)
self.end_headers()
self.wfile.write(f.read())
f.close()
return
except IOError:
self.send_error(404,'File Not Found: %s' % self.path)
# Handler for the POST requests
def do_POST(self):
if self.path=="submit" or self.path=="send":
form = cgi.FieldStorage(fp=self.rfile, headers=self.headers,
environ={'REQUEST_METHOD':'POST', 'CONTENT_TYPE':self.headers['Content-Type'],}
)
# print "Your name is: %s" % form["your_name"].value
self.send_response(200)
self.end_headers()
# self.wfile.write("Thanks %s !" % form["your_name"].value)
return
def main(argv=None):
address = '127.0.0.1'
port = 8000
if argv is not None:
try:
opts, args = getopt.getopt(argv,"hp:a:",["port=","address="])
except getopt.GetoptError:
print 'simple_cgi_python_server.py -p <port> -a <address>'
for opt, arg in opts:
if opt == '-h':
print 'simple_cgi_python_server.py -p <port> -a <address>'
elif opt in ("-p", "--port"):
try:
port = int(arg)
except ValueError:
print "This port [", arg, "] is incorrect, try a valid integer for port..."
sys.exit(3)
elif opt in ("-a", "--address"):
address = arg
try:
# Create a web server and define the handler to manage the incoming request
server = HTTPServer((address, port), ServerHandler)
socket_info = server.socket.getsockname()
print "Serving HTTP on", socket_info[0], "port", socket_info[1], "..."
# Wait forever for incoming htto requests
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down the web server'
server.socket.close()
if __name__ == '__main__':
main(sys.argv[1:])
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\n# -*- coding: utf-8 -*-\nfrom south.utils import datetime_utils as datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\nfrom south.utils import datetime_utils as datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigrat... | ```python
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Pagina.image'
db.add_column(u'manga_pagina', 'image',
self.gf('django.db.models.fields.files.ImageField')(default=None, max_length=100),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Pagina.image'
db.delete_column(u'manga_pagina', 'image')
models = {
u'manga.capitulo': {
'Meta': {'ordering': "('num', 'titulo')", 'object_name': 'Capitulo'},
'id': ('django_pg.models.fields.uuid.UUIDField', [], {u'auto_add': "u'uuid:uuid4'", 'primary_key': 'True'}),
'num': ('django.db.models.fields.IntegerField', [], {}),
'serie': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['manga.Serie']"}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '90'})
},
u'manga.pagina': {
'Meta': {'ordering': "('num',)", 'object_name': 'Pagina'},
'capitulo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['manga.Capitulo']"}),
'id': ('django_pg.models.fields.uuid.UUIDField', [], {u'auto_add': "u'uuid:uuid4'", 'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'num': ('django.db.models.fields.IntegerField', [], {})
},
u'manga.serie': {
'Meta': {'ordering': "('nome',)", 'object_name': 'Serie'},
'capitulos': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django_pg.models.fields.uuid.UUIDField', [], {u'auto_add': "u'uuid:uuid4'", 'primary_key': 'True'}),
'nome': ('django.db.models.fields.CharField', [], {'max_length': '90'})
}
}
complete_apps = ['manga']
``` |
[
{
"content": "```python\n# -*- coding: utf-8 -*-\nfrom south.utils import datetime_utils as datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n # Deleting field 'WorkFlowStep.depth'\... | [
{
"content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\nfrom south.utils import datetime_utils as datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n # Deleting field 'Work... | ```python
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'WorkFlowStep.depth'
db.delete_column(u'calc_workflowstep', 'depth')
# Deleting field 'WorkFlowStep.path'
db.delete_column(u'calc_workflowstep', 'path')
# Deleting field 'WorkFlowStep.numchild'
db.delete_column(u'calc_workflowstep', 'numchild')
# Adding field 'WorkFlowStep.title'
db.add_column(u'calc_workflowstep', 'title',
self.gf('django.db.models.fields.CharField')(default=1, max_length=255),
keep_default=False)
def backwards(self, orm):
# Adding field 'WorkFlowStep.depth'
db.add_column(u'calc_workflowstep', 'depth',
self.gf('django.db.models.fields.PositiveIntegerField')(default=1),
keep_default=False)
# Adding field 'WorkFlowStep.path'
db.add_column(u'calc_workflowstep', 'path',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, unique=True),
keep_default=False)
# Adding field 'WorkFlowStep.numchild'
db.add_column(u'calc_workflowstep', 'numchild',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
# Deleting field 'WorkFlowStep.title'
db.delete_column(u'calc_workflowstep', 'title')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'calc.experimentindexpage': {
'Meta': {'object_name': 'ExperimentIndexPage', '_ormbases': [u'wagtailcore.Page']},
'intro': ('wagtail.wagtailcore.fields.RichTextField', [], {'blank': 'True'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['wagtailcore.Page']", 'unique': 'True', 'primary_key': 'True'})
},
u'calc.experimentpage': {
'Meta': {'object_name': 'ExperimentPage', '_ormbases': [u'wagtailcore.Page']},
'body': ('wagtail.wagtailcore.fields.RichTextField', [], {'blank': 'True'}),
'intro': ('wagtail.wagtailcore.fields.RichTextField', [], {'blank': 'True'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['wagtailcore.Page']", 'unique': 'True', 'primary_key': 'True'})
},
u'calc.experimentrelatedlink': {
'Meta': {'ordering': "['sort_order']", 'object_name': 'ExperimentRelatedLink'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_document': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtaildocs.Document']"}),
'link_page': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtailcore.Page']"}),
'page': ('modelcluster.fields.ParentalKey', [], {'related_name': "'related_links'", 'to': u"orm['calc.ExperimentPage']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'calc.workflowindexpage': {
'Meta': {'object_name': 'WorkflowIndexPage', '_ormbases': [u'wagtailcore.Page']},
'intro': ('wagtail.wagtailcore.fields.RichTextField', [], {'blank': 'True'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['wagtailcore.Page']", 'unique': 'True', 'primary_key': 'True'})
},
u'calc.workflowpage': {
'Meta': {'object_name': 'WorkflowPage', '_ormbases': [u'wagtailcore.Page']},
'example_file': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtaildocs.Document']"}),
'intro': ('wagtail.wagtailcore.fields.RichTextField', [], {'blank': 'True'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['wagtailcore.Page']", 'unique': 'True', 'primary_key': 'True'})
},
u'calc.workflowstep': {
'Meta': {'ordering': "['sort_order']", 'object_name': 'WorkFlowStep'},
'example_input': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['wagtaildocs.Document']"}),
'fields_list': ('dbarray.fields.CharArrayField', [], {'max_length': '10', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intro': ('wagtail.wagtailcore.fields.RichTextField', [], {'blank': 'True'}),
'page': ('modelcluster.fields.ParentalKey', [], {'blank': 'True', 'related_name': "'workflow_steps'", 'null': 'True', 'to': u"orm['calc.WorkflowPage']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'wagtailcore.page': {
'Meta': {'object_name': 'Page'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pages'", 'to': u"orm['contenttypes.ContentType']"}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'expire_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'go_live_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'has_unpublished_changes': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'live': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_pages'", 'null': 'True', 'to': u"orm['auth.User']"}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'search_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'seo_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'show_in_menus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'wagtaildocs.document': {
'Meta': {'object_name': 'Document'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uploaded_by_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['calc']
``` |
[
{
"content": "Reconstruct the code file line-for-line, unmodified:\n```python\n# storecmds.py \n# Store the requested commands and arguments \n# until given the go-ahead to run them. \n\n# The TECO commands we want are - \n# Files - Read, write, save, close. \n# Lines - Move between lines, within lines. G... | [
{
"content": "Reconstruct the code file line-for-line, unmodified:\n<|memory_start|>```python\n# storecmds.py \n# Store the requested commands and arguments \n# until given the go-ahead to run them. \n\n# The TECO commands we want are - \n# Files - Read, write, save, close. \n# Lines - Move between lines,... | ```python
# storecmds.py
# Store the requested commands and arguments
# until given the go-ahead to run them.
# The TECO commands we want are -
# Files - Read, write, save, close.
# Lines - Move between lines, within lines. Go to a line.
# Move "x" lines up or down. Move "x" bytes back or forward.
# Editing - Insert, delete, type.
# Looping - repeat a command "x" times.
# Variables - define and use.
# Macros - define and run.
argstack = cmdstack = []
cmddict = {}
# A stack for arguments.
def argstack(args):
for arg in args:
argstack.push(arg)
# A stack for commands.
def cmdstack(cmd):
cmdstack.push(cmd)
# A dict to map TECO command abbreviations to their Python equivalents.
def fillcmddict():
cmddict.update({"T": "print",
"D": "del" ,
"L": "move",
"I": "insert" ,
"S": "search" })
# Print the command dict
fillcmddict()
for x in cmddict.items():
print x
``` |
[
{
"content": "Recreate the original code text:\n```python\n# Code for line numbers\n# https://stackoverflow.com/questions/40386194/create-text-area-textedit-with-line-number-in-pyqt-5\n\nfrom PyQt4.Qt import Qt\nfrom PyQt4.QtCore import QRect\nfrom PyQt4.QtCore import QSize\nfrom PyQt4.QtCore import SIGNAL\nfro... | [
{
"content": "Recreate the original code text:\n<|memory_start|>```python\n# Code for line numbers\n# https://stackoverflow.com/questions/40386194/create-text-area-textedit-with-line-number-in-pyqt-5\n\nfrom PyQt4.Qt import Qt\nfrom PyQt4.QtCore import QRect\nfrom PyQt4.QtCore import QSize\nfrom PyQt4.QtCore im... | ```python
# Code for line numbers
# https://stackoverflow.com/questions/40386194/create-text-area-textedit-with-line-number-in-pyqt-5
from PyQt4.Qt import Qt
from PyQt4.QtCore import QRect
from PyQt4.QtCore import QSize
from PyQt4.QtCore import SIGNAL
from PyQt4.QtGui import QColor
from PyQt4.QtGui import QPainter
from PyQt4.QtGui import QPlainTextEdit
from PyQt4.QtGui import QTextEdit
from PyQt4.QtGui import QTextFormat
from PyQt4.QtGui import QWidget
class LineNumberArea(QWidget):
def __init__(self, editor):
super(LineNumberArea, self).__init__(editor)
self.myeditor = editor
def sizeHint(self):
return QSize(self.editor.line_number_area_width(), 0)
def paintEvent(self, event):
self.myeditor.line_number_area_paint_event(event)
class CodeEditor(QPlainTextEdit):
def __init__(self, Q, outq):
super(CodeEditor, self).__init__()
self.lineNumberArea = LineNumberArea(self)
self.previous_loc = (0, 0)
self.connect(self, SIGNAL('blockCountChanged(int)'), self.update_line_number_area_width)
self.connect(self, SIGNAL('updateRequest(QRect,int)'), self.update_line_number_area)
self.connect(self, SIGNAL('cursorPositionChanged()'), self.highlight_current_line)
self.update_line_number_area_width(0)
self.Q = Q
self.Q_out = outq
def line_number_area_width(self):
digits = 1
count = max(1, self.blockCount())
while count >= 10:
count /= 10
digits += 1
space = 3 + self.fontMetrics().width('30') * digits
return space
def update_line_number_area_width(self, _):
self.setViewportMargins(self.line_number_area_width(), 0, 0, 0)
def update_line_number_area(self, rect, dy):
if dy:
self.lineNumberArea.scroll(0, dy)
else:
self.lineNumberArea.update(0, rect.y(), self.lineNumberArea.width(),
rect.height())
if rect.contains(self.viewport().rect()):
self.update_line_number_area_width(0)
def resizeEvent(self, event):
super(CodeEditor, self).resizeEvent(event)
cr = self.contentsRect()
self.lineNumberArea.setGeometry(QRect(cr.left(), cr.top(),
self.line_number_area_width(), cr.height()))
def line_number_area_paint_event(self, event):
mypainter = QPainter(self.lineNumberArea)
mypainter.fillRect(event.rect(), Qt.lightGray)
block = self.firstVisibleBlock()
block_number = block.blockNumber()
top = self.blockBoundingGeometry(block).translated(self.contentOffset()).top()
bottom = top + self.blockBoundingRect(block).height()
# Just to make sure I use the right font
height = self.fontMetrics().height()
while block.isValid() and (top <= event.rect().bottom()):
if block.isVisible() and (bottom >= event.rect().top()):
number = str(block_number + 1)
mypainter.setPen(Qt.black)
mypainter.drawText(0, top, self.lineNumberArea.width(), height,
Qt.AlignCenter, number)
block = block.next()
top = bottom
bottom = top + self.blockBoundingRect(block).height()
block_number += 1
def highlight_current_line(self):
extra_selections = []
if not self.isReadOnly():
selection = QTextEdit.ExtraSelection()
line_color = QColor(Qt.yellow).lighter(160)
selection.format.setBackground(line_color)
selection.format.setProperty(QTextFormat.FullWidthSelection, True)
selection.cursor = self.textCursor()
selection.cursor.clearSelection()
extra_selections.append(selection)
self.setExtraSelections(extra_selections)
def keyPressEvent(self, QKeyEvent):
self.previous_loc = (self.textCursor().blockNumber() + 1, self.textCursor().columnNumber())
return super(CodeEditor, self).keyPressEvent(QKeyEvent)
def keyReleaseEvent(self, QKeyEvent):
l = QKeyEvent.text()
if QKeyEvent.key() == Qt.Key_Backspace:
print QKeyEvent.key()
l = 'backspace'
self.Q_out.put("%s*%s*%d*%d" % ("k", l, self.previous_loc[0], self.previous_loc[1]), timeout=1)
``` |
[
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n```python\n## begin license ##\n# \n# \"Meresco Examples\" is a project demonstrating some of the\n# features of various components of the \"Meresco Suite\".\n# Also see http://meresco.org. \n# \n# Copyright (C) 2007-2008 S... | [
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n<|memory_start|>```python\n## begin license ##\n# \n# \"Meresco Examples\" is a project demonstrating some of the\n# features of various components of the \"Meresco Suite\".\n# Also see http://meresco.org. \n# \n# Copyright... | ```python
## begin license ##
#
# "Meresco Examples" is a project demonstrating some of the
# features of various components of the "Meresco Suite".
# Also see http://meresco.org.
#
# Copyright (C) 2007-2008 SURF Foundation. http://www.surf.nl
# Copyright (C) 2007-2010 Seek You Too (CQ2) http://www.cq2.nl
# Copyright (C) 2007-2009 Stichting Kennisnet Ict op school. http://www.kennisnetictopschool.nl
# Copyright (C) 2009 Delft University of Technology http://www.tudelft.nl
# Copyright (C) 2009 Tilburg University http://www.uvt.nl
#
# This file is part of "Meresco Examples"
#
# "Meresco Examples" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Meresco Examples" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Meresco Examples"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\n# -*- coding: utf-8 -*-\n\n\n# Andy Sayler\n# 2015\n# pytutamen Package\n# Tutamen Client Library\n\n\n### Imports ###\n\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ ... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\n\n# Andy Sayler\n# 2015\n# pytutamen Package\n# Tutamen Client Library\n\n\n### Imports ###\n\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\n... | ```python
# -*- coding: utf-8 -*-
# Andy Sayler
# 2015
# pytutamen Package
# Tutamen Client Library
### Imports ###
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
import os
import os.path
import requests
### Constants ###
_API_BASE = 'api'
_API_VERSION = 'v1'
_TOKENS_DELIMINATOR = ':'
_TOKENS_HEADER = 'tutamen-tokens'
### Exceptions ###
class ServerConnectionException(Exception):
pass
class ClientException(Exception):
pass
### Objects ###
class ServerConnection(object):
def __init__(self, server_url=None, server_name=None, server_ca_crt_path=None,
account_uid=None, client_uid=None, no_client_crt=False,
conf=None, conf_path=None):
# Check Args
if not server_url:
raise(ServerConnectionException("server_url required"))
if not server_name:
raise(ServerConnectionException("server_name required"))
# Call Parent
super().__init__()
# Setup Properties
self._url_server = server_url
self._server_name = server_name
self._path_ca = server_ca_crt_path
self._session = None
# Setup Conf
if not conf:
conf = config.ClientConfig(conf_path=conf_path)
self._conf = conf
# Get UIDs
if not account_uid:
account_uid = conf.defaults_get_account_uid()
if not account_uid:
raise(ACServerConnectionException("Missing Default Account UID"))
self._account_uid = account_uid
if not client_uid:
client_uid = conf.defaults_get_client_uid()
if not client_uid:
raise(ACServerConnectionException("Missing Default Client UID"))
self._client_uid = client_uid
# Get Certs
if not no_client_crt:
client_key_path = conf.path_client_key(account_uid, client_uid)
if not os.path.isfile(client_key_path):
raise(ServerConnectionException("Missing Client Key"))
self._client_key_path = client_key_path
client_crt_path = conf.path_client_crt(account_uid, client_uid, server_name)
if not os.path.isfile(client_crt_path):
raise(ServerConnectionException("Missing Client Cert"))
self._client_crt_path = client_crt_path
else:
self._client_key_path = None
self._client_crt_path = None
def open(self):
if not self._session:
ses = requests.Session()
if self._path_ca:
ses.verify = self._path_ca
else:
ses.verify = True
if self._client_crt_path and self._client_key_path:
ses.cert = (self._client_crt_path, self._client_key_path)
self._session = ses
def close(self):
if self._session:
self._session.close()
self._session = None
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
return False
@property
def is_open(self):
return bool(self._session)
@property
def server_name(self):
return self._server_name
@property
def url_srv(self):
return self._url_server
@property
def url_api(self):
return "{}/{}/{}".format(self.url_srv, _API_BASE, _API_VERSION)
def _tokens_to_header(self, tokens=None):
if tokens is None:
tokens = []
tokens_str = ""
for token in tokens:
tokens_str += token.strip()
tokens_str += _TOKENS_DELIMINATOR
tokens_str = tokens_str.strip(_TOKENS_DELIMINATOR)
header = {_TOKENS_HEADER: tokens_str}
return header
def http_post(self, endpoint, json=None, tokens=None, auth=None):
url = "{:s}/{:s}/".format(self.url_api, endpoint)
header = self._tokens_to_header(tokens)
res = self._session.post(url, json=json, headers=header, auth=auth)
res.raise_for_status()
return res.json()
def http_put(self, endpoint, json=None, tokens=None, auth=None):
url = "{:s}/{:s}/".format(self.url_api, endpoint)
header = self._tokens_to_header(tokens)
res = self._session.put(url, json=json, headers=header, auth=auth)
res.raise_for_status()
return res.json()
def http_get(self, endpoint=None, tokens=None, auth=None):
url = "{:s}/{:s}/".format(self.url_api, endpoint)
header = self._tokens_to_header(tokens)
res = self._session.get(url, headers=header, auth=auth)
res.raise_for_status()
return res.json()
def http_delete(self, endpoint=None, tokens=None, auth=None):
url = "{:s}/{:s}/".format(self.url_api, endpoint)
header = self._tokens_to_header(tokens)
res = self._session.delete(url, headers=header, auth=auth)
res.raise_for_status()
return res.json()
class ObjectClient(object):
def __init__(self, connection):
# Check Args
if not isinstance(connection, ServerConnection):
raise(TypeError("'connection' must of an instance of {}".format(ServerConnection)))
# Call Parent
super().__init__()
# Setup Properties
self._connection = connection
@property
def connection(self):
return self._connection
``` |
[
{
"content": "Replicate the source code:\n```python\n#!/usr/bin/env python3\n\nimport imp\nimport os\nimport unittest\nfrom pprint import pprint\nimp.load_source('ufw_forward', os.path.join(os.path.dirname(__file__), os.path.pardir, 'library', 'ufw_forward.py'))\nfrom ufw_forward import UFWForwards\n\n\n\n\ncla... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\n#!/usr/bin/env python3\n\nimport imp\nimport os\nimport unittest\nfrom pprint import pprint\nimp.load_source('ufw_forward', os.path.join(os.path.dirname(__file__), os.path.pardir, 'library', 'ufw_forward.py'))\nfrom ufw_forward import UFWForwa... | ```python
#!/usr/bin/env python3
import imp
import os
import unittest
from pprint import pprint
imp.load_source('ufw_forward', os.path.join(os.path.dirname(__file__), os.path.pardir, 'library', 'ufw_forward.py'))
from ufw_forward import UFWForwards
class TestBase(unittest.TestCase):
def test_do_magic(self):
test = { "incomming_dev": "eth0",
"outgoing_dev": "lxdbr0",
"outgoing_network": "10.20.10.0/24",
"masquerading": True,
"conntrack_state": "RELATED,ESTABLISHED",
"reroute": [],
"forwards": [
{
"container": "mumble.baviaan.eggie.zone",
"destination_ip": "10.20.10.11",
"destination_port": [
64738
],
"incomming_ip": "88.99.152.112",
"incomming_port": [
64738
],
"protocol": [
"tcp",
"udp"
]
},
{
"container": "brandon-minecraft.baviaan.eggie.zone",
"destination_ip": "10.20.10.12",
"destination_port": [
25565
],
"incomming_ip": "88.99.152.112",
"incomming_port": [
25565
],
"protocol": [
"tcp"
]
}
]
}
response = {
'nat_rules' : [],
'filter_rules' : []
}
ufw_forwards = UFWForwards(test, False)
ufw_forwards.nat_rules = response['nat_rules']
ufw_forwards.filter_rules = response['filter_rules']
ufw_forwards.generate()
for rule in response['nat_rules']:
print(" ".join(rule))
pprint(response['filter_rules']))
for rule in response['filter_rules']:
print(" ".join(rule))
if __name__ == '__main__':
unittest.main()
``` |
[
{
"content": "```python\n#\n#\n\n# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2014 Google Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions ... | [
{
"content": "<|memory_start|>```python\n#\n#\n\n# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2014 Google Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. ... | ```python
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module implementing the job queue handling.
"""
import logging
import errno
import time
import weakref
import threading
import itertools
import operator
import os
try:
# pylint: disable=E0611
from pyinotify import pyinotify
except ImportError:
import pyinotify
from ganeti import asyncnotifier
from ganeti import constants
from ganeti import serializer
from ganeti import locking
from ganeti import luxi
from ganeti import opcodes
from ganeti import opcodes_base
from ganeti import errors
from ganeti import mcpu
from ganeti import utils
from ganeti import jstore
import ganeti.rpc.node as rpc
from ganeti import runtime
from ganeti import netutils
from ganeti import compat
from ganeti import ht
from ganeti import query
from ganeti import qlang
from ganeti import pathutils
from ganeti import vcluster
from ganeti.cmdlib import cluster
#: Retrieves "id" attribute
_GetIdAttr = operator.attrgetter("id")
class CancelJob(Exception):
"""Special exception to cancel a job.
"""
def TimeStampNow():
"""Returns the current timestamp.
@rtype: tuple
@return: the current time in the (seconds, microseconds) format
"""
return utils.SplitTime(time.time())
def _CallJqUpdate(runner, names, file_name, content):
"""Updates job queue file after virtualizing filename.
"""
virt_file_name = vcluster.MakeVirtualPath(file_name)
return runner.call_jobqueue_update(names, virt_file_name, content)
class _QueuedOpCode(object):
"""Encapsulates an opcode object.
@ivar log: holds the execution log and consists of tuples
of the form C{(log_serial, timestamp, level, message)}
@ivar input: the OpCode we encapsulate
@ivar status: the current status
@ivar result: the result of the LU execution
@ivar start_timestamp: timestamp for the start of the execution
@ivar exec_timestamp: timestamp for the actual LU Exec() function invocation
@ivar stop_timestamp: timestamp for the end of the execution
"""
__slots__ = ["input", "status", "result", "log", "priority",
"start_timestamp", "exec_timestamp", "end_timestamp",
"__weakref__"]
def __init__(self, op):
"""Initializes instances of this class.
@type op: L{opcodes.OpCode}
@param op: the opcode we encapsulate
"""
self.input = op
self.status = constants.OP_STATUS_QUEUED
self.result = None
self.log = []
self.start_timestamp = None
self.exec_timestamp = None
self.end_timestamp = None
# Get initial priority (it might change during the lifetime of this opcode)
self.priority = getattr(op, "priority", constants.OP_PRIO_DEFAULT)
@classmethod
def Restore(cls, state):
"""Restore the _QueuedOpCode from the serialized form.
@type state: dict
@param state: the serialized state
@rtype: _QueuedOpCode
@return: a new _QueuedOpCode instance
"""
obj = _QueuedOpCode.__new__(cls)
obj.input = opcodes.OpCode.LoadOpCode(state["input"])
obj.status = state["status"]
obj.result = state["result"]
obj.log = state["log"]
obj.start_timestamp = state.get("start_timestamp", None)
obj.exec_timestamp = state.get("exec_timestamp", None)
obj.end_timestamp = state.get("end_timestamp", None)
obj.priority = state.get("priority", constants.OP_PRIO_DEFAULT)
return obj
def Serialize(self):
"""Serializes this _QueuedOpCode.
@rtype: dict
@return: the dictionary holding the serialized state
"""
return {
"input": self.input.__getstate__(),
"status": self.status,
"result": self.result,
"log": self.log,
"start_timestamp": self.start_timestamp,
"exec_timestamp": self.exec_timestamp,
"end_timestamp": self.end_timestamp,
"priority": self.priority,
}
class _QueuedJob(object):
"""In-memory job representation.
This is what we use to track the user-submitted jobs. Locking must
be taken care of by users of this class.
@type queue: L{JobQueue}
@ivar queue: the parent queue
@ivar id: the job ID
@type ops: list
@ivar ops: the list of _QueuedOpCode that constitute the job
@type log_serial: int
@ivar log_serial: holds the index for the next log entry
@ivar received_timestamp: the timestamp for when the job was received
@ivar start_timestmap: the timestamp for start of execution
@ivar end_timestamp: the timestamp for end of execution
@ivar writable: Whether the job is allowed to be modified
"""
# pylint: disable=W0212
__slots__ = ["queue", "id", "ops", "log_serial", "ops_iter", "cur_opctx",
"received_timestamp", "start_timestamp", "end_timestamp",
"writable", "archived",
"livelock", "process_id",
"__weakref__"]
def AddReasons(self, pickup=False):
"""Extend the reason trail
Add the reason for all the opcodes of this job to be executed.
"""
count = 0
for queued_op in self.ops:
op = queued_op.input
if pickup:
reason_src_prefix = constants.OPCODE_REASON_SRC_PICKUP
else:
reason_src_prefix = constants.OPCODE_REASON_SRC_OPCODE
reason_src = opcodes_base.NameToReasonSrc(op.__class__.__name__,
reason_src_prefix)
reason_text = "job=%d;index=%d" % (self.id, count)
reason = getattr(op, "reason", [])
reason.append((reason_src, reason_text, utils.EpochNano()))
op.reason = reason
count = count + 1
def __init__(self, queue, job_id, ops, writable):
"""Constructor for the _QueuedJob.
@type queue: L{JobQueue}
@param queue: our parent queue
@type job_id: job_id
@param job_id: our job id
@type ops: list
@param ops: the list of opcodes we hold, which will be encapsulated
in _QueuedOpCodes
@type writable: bool
@param writable: Whether job can be modified
"""
if not ops:
raise errors.GenericError("A job needs at least one opcode")
self.queue = queue
self.id = int(job_id)
self.ops = [_QueuedOpCode(op) for op in ops]
self.AddReasons()
self.log_serial = 0
self.received_timestamp = TimeStampNow()
self.start_timestamp = None
self.end_timestamp = None
self.archived = False
self.livelock = None
self.process_id = None
self.writable = None
self._InitInMemory(self, writable)
assert not self.archived, "New jobs can not be marked as archived"
@staticmethod
def _InitInMemory(obj, writable):
"""Initializes in-memory variables.
"""
obj.writable = writable
obj.ops_iter = None
obj.cur_opctx = None
def __repr__(self):
status = ["%s.%s" % (self.__class__.__module__, self.__class__.__name__),
"id=%s" % self.id,
"ops=%s" % ",".join([op.input.Summary() for op in self.ops])]
return "<%s at %#x>" % (" ".join(status), id(self))
@classmethod
def Restore(cls, queue, state, writable, archived):
"""Restore a _QueuedJob from serialized state:
@type queue: L{JobQueue}
@param queue: to which queue the restored job belongs
@type state: dict
@param state: the serialized state
@type writable: bool
@param writable: Whether job can be modified
@type archived: bool
@param archived: Whether job was already archived
@rtype: _JobQueue
@return: the restored _JobQueue instance
"""
obj = _QueuedJob.__new__(cls)
obj.queue = queue
obj.id = int(state["id"])
obj.received_timestamp = state.get("received_timestamp", None)
obj.start_timestamp = state.get("start_timestamp", None)
obj.end_timestamp = state.get("end_timestamp", None)
obj.archived = archived
obj.livelock = state.get("livelock", None)
obj.process_id = state.get("process_id", None)
if obj.process_id is not None:
obj.process_id = int(obj.process_id)
obj.ops = []
obj.log_serial = 0
for op_state in state["ops"]:
op = _QueuedOpCode.Restore(op_state)
for log_entry in op.log:
obj.log_serial = max(obj.log_serial, log_entry[0])
obj.ops.append(op)
cls._InitInMemory(obj, writable)
return obj
def Serialize(self):
"""Serialize the _JobQueue instance.
@rtype: dict
@return: the serialized state
"""
return {
"id": self.id,
"ops": [op.Serialize() for op in self.ops],
"start_timestamp": self.start_timestamp,
"end_timestamp": self.end_timestamp,
"received_timestamp": self.received_timestamp,
"livelock": self.livelock,
"process_id": self.process_id,
}
def CalcStatus(self):
"""Compute the status of this job.
This function iterates over all the _QueuedOpCodes in the job and
based on their status, computes the job status.
The algorithm is:
- if we find a cancelled, or finished with error, the job
status will be the same
- otherwise, the last opcode with the status one of:
- waitlock
- canceling
- running
will determine the job status
- otherwise, it means either all opcodes are queued, or success,
and the job status will be the same
@return: the job status
"""
status = constants.JOB_STATUS_QUEUED
all_success = True
for op in self.ops:
if op.status == constants.OP_STATUS_SUCCESS:
continue
all_success = False
if op.status == constants.OP_STATUS_QUEUED:
pass
elif op.status == constants.OP_STATUS_WAITING:
status = constants.JOB_STATUS_WAITING
elif op.status == constants.OP_STATUS_RUNNING:
status = constants.JOB_STATUS_RUNNING
elif op.status == constants.OP_STATUS_CANCELING:
status = constants.JOB_STATUS_CANCELING
break
elif op.status == constants.OP_STATUS_ERROR:
status = constants.JOB_STATUS_ERROR
# The whole job fails if one opcode failed
break
elif op.status == constants.OP_STATUS_CANCELED:
status = constants.OP_STATUS_CANCELED
break
if all_success:
status = constants.JOB_STATUS_SUCCESS
return status
def CalcPriority(self):
"""Gets the current priority for this job.
Only unfinished opcodes are considered. When all are done, the default
priority is used.
@rtype: int
"""
priorities = [op.priority for op in self.ops
if op.status not in constants.OPS_FINALIZED]
if not priorities:
# All opcodes are done, assume default priority
return constants.OP_PRIO_DEFAULT
return min(priorities)
def GetLogEntries(self, newer_than):
"""Selectively returns the log entries.
@type newer_than: None or int
@param newer_than: if this is None, return all log entries,
otherwise return only the log entries with serial higher
than this value
@rtype: list
@return: the list of the log entries selected
"""
if newer_than is None:
serial = -1
else:
serial = newer_than
entries = []
for op in self.ops:
entries.extend(filter(lambda entry: entry[0] > serial, op.log))
return entries
def MarkUnfinishedOps(self, status, result):
"""Mark unfinished opcodes with a given status and result.
This is an utility function for marking all running or waiting to
be run opcodes with a given status. Opcodes which are already
finalised are not changed.
@param status: a given opcode status
@param result: the opcode result
"""
not_marked = True
for op in self.ops:
if op.status in constants.OPS_FINALIZED:
assert not_marked, "Finalized opcodes found after non-finalized ones"
continue
op.status = status
op.result = result
not_marked = False
def Finalize(self):
"""Marks the job as finalized.
"""
self.end_timestamp = TimeStampNow()
def Cancel(self):
"""Marks job as canceled/-ing if possible.
@rtype: tuple; (bool, string)
@return: Boolean describing whether job was successfully canceled or marked
as canceling and a text message
"""
status = self.CalcStatus()
if status == constants.JOB_STATUS_QUEUED:
self.MarkUnfinishedOps(constants.OP_STATUS_CANCELED,
"Job canceled by request")
self.Finalize()
return (True, "Job %s canceled" % self.id)
elif status == constants.JOB_STATUS_WAITING:
# The worker will notice the new status and cancel the job
self.MarkUnfinishedOps(constants.OP_STATUS_CANCELING, None)
return (True, "Job %s will be canceled" % self.id)
else:
logging.debug("Job %s is no longer waiting in the queue", self.id)
return (False, "Job %s is no longer waiting in the queue" % self.id)
def ChangePriority(self, priority):
"""Changes the job priority.
@type priority: int
@param priority: New priority
@rtype: tuple; (bool, string)
@return: Boolean describing whether job's priority was successfully changed
and a text message
"""
status = self.CalcStatus()
if status in constants.JOBS_FINALIZED:
return (False, "Job %s is finished" % self.id)
elif status == constants.JOB_STATUS_CANCELING:
return (False, "Job %s is cancelling" % self.id)
else:
assert status in (constants.JOB_STATUS_QUEUED,
constants.JOB_STATUS_WAITING,
constants.JOB_STATUS_RUNNING)
changed = False
for op in self.ops:
if (op.status == constants.OP_STATUS_RUNNING or
op.status in constants.OPS_FINALIZED):
assert not changed, \
("Found opcode for which priority should not be changed after"
" priority has been changed for previous opcodes")
continue
assert op.status in (constants.OP_STATUS_QUEUED,
constants.OP_STATUS_WAITING)
changed = True
# Set new priority (doesn't modify opcode input)
op.priority = priority
if changed:
return (True, ("Priorities of pending opcodes for job %s have been"
" changed to %s" % (self.id, priority)))
else:
return (False, "Job %s had no pending opcodes" % self.id)
def SetPid(self, pid):
"""Sets the job's process ID
@type pid: int
@param pid: the process ID
"""
status = self.CalcStatus()
if status in (constants.JOB_STATUS_QUEUED,
constants.JOB_STATUS_WAITING):
if self.process_id is not None:
logging.warning("Replacing the process id %s of job %s with %s",
self.process_id, self.id, pid)
self.process_id = pid
else:
logging.warning("Can set pid only for queued/waiting jobs")
class _OpExecCallbacks(mcpu.OpExecCbBase):
def __init__(self, queue, job, op):
"""Initializes this class.
@type queue: L{JobQueue}
@param queue: Job queue
@type job: L{_QueuedJob}
@param job: Job object
@type op: L{_QueuedOpCode}
@param op: OpCode
"""
super(_OpExecCallbacks, self).__init__()
assert queue, "Queue is missing"
assert job, "Job is missing"
assert op, "Opcode is missing"
self._queue = queue
self._job = job
self._op = op
def _CheckCancel(self):
"""Raises an exception to cancel the job if asked to.
"""
# Cancel here if we were asked to
if self._op.status == constants.OP_STATUS_CANCELING:
logging.debug("Canceling opcode")
raise CancelJob()
def NotifyStart(self):
"""Mark the opcode as running, not lock-waiting.
This is called from the mcpu code as a notifier function, when the LU is
finally about to start the Exec() method. Of course, to have end-user
visible results, the opcode must be initially (before calling into
Processor.ExecOpCode) set to OP_STATUS_WAITING.
"""
assert self._op in self._job.ops
assert self._op.status in (constants.OP_STATUS_WAITING,
constants.OP_STATUS_CANCELING)
# Cancel here if we were asked to
self._CheckCancel()
logging.debug("Opcode is now running")
self._op.status = constants.OP_STATUS_RUNNING
self._op.exec_timestamp = TimeStampNow()
# And finally replicate the job status
self._queue.UpdateJobUnlocked(self._job)
def NotifyRetry(self):
"""Mark opcode again as lock-waiting.
This is called from the mcpu code just after calling PrepareRetry.
The opcode will now again acquire locks (more, hopefully).
"""
self._op.status = constants.OP_STATUS_WAITING
logging.debug("Opcode will be retried. Back to waiting.")
def _AppendFeedback(self, timestamp, log_type, log_msgs):
"""Internal feedback append function, with locks
@type timestamp: tuple (int, int)
@param timestamp: timestamp of the log message
@type log_type: string
@param log_type: log type (one of Types.ELogType)
@type log_msgs: any
@param log_msgs: log data to append
"""
# This should be removed once Feedback() has a clean interface.
# Feedback can be called with anything, we interpret ELogMessageList as
# messages that have to be individually added to the log list, but pushed
# in a single update. Other msgtypes are only transparently passed forward.
if log_type == constants.ELOG_MESSAGE_LIST:
log_type = constants.ELOG_MESSAGE
else:
log_msgs = [log_msgs]
for msg in log_msgs:
self._job.log_serial += 1
self._op.log.append((self._job.log_serial, timestamp, log_type, msg))
self._queue.UpdateJobUnlocked(self._job, replicate=False)
# TODO: Cleanup calling conventions, make them explicit
def Feedback(self, *args):
"""Append a log entry.
Calling conventions:
arg[0]: (optional) string, message type (Types.ELogType)
arg[1]: data to be interpreted as a message
"""
assert len(args) < 3
# TODO: Use separate keyword arguments for a single string vs. a list.
if len(args) == 1:
log_type = constants.ELOG_MESSAGE
log_msg = args[0]
else:
(log_type, log_msg) = args
# The time is split to make serialization easier and not lose
# precision.
timestamp = utils.SplitTime(time.time())
self._AppendFeedback(timestamp, log_type, log_msg)
def CurrentPriority(self):
"""Returns current priority for opcode.
"""
assert self._op.status in (constants.OP_STATUS_WAITING,
constants.OP_STATUS_CANCELING)
# Cancel here if we were asked to
self._CheckCancel()
return self._op.priority
def SubmitManyJobs(self, jobs):
"""Submits jobs for processing.
See L{JobQueue.SubmitManyJobs}.
"""
# Locking is done in job queue
return self._queue.SubmitManyJobs(jobs)
def _EncodeOpError(err):
"""Encodes an error which occurred while processing an opcode.
"""
if isinstance(err, errors.GenericError):
to_encode = err
else:
to_encode = errors.OpExecError(str(err))
return errors.EncodeException(to_encode)
class _TimeoutStrategyWrapper:
def __init__(self, fn):
"""Initializes this class.
"""
self._fn = fn
self._next = None
def _Advance(self):
"""Gets the next timeout if necessary.
"""
if self._next is None:
self._next = self._fn()
def Peek(self):
"""Returns the next timeout.
"""
self._Advance()
return self._next
def Next(self):
"""Returns the current timeout and advances the internal state.
"""
self._Advance()
result = self._next
self._next = None
return result
class _OpExecContext:
def __init__(self, op, index, log_prefix, timeout_strategy_factory):
"""Initializes this class.
"""
self.op = op
self.index = index
self.log_prefix = log_prefix
self.summary = op.input.Summary()
# Create local copy to modify
if getattr(op.input, opcodes_base.DEPEND_ATTR, None):
self.jobdeps = op.input.depends[:]
else:
self.jobdeps = None
self._timeout_strategy_factory = timeout_strategy_factory
self._ResetTimeoutStrategy()
def _ResetTimeoutStrategy(self):
"""Creates a new timeout strategy.
"""
self._timeout_strategy = \
_TimeoutStrategyWrapper(self._timeout_strategy_factory().NextAttempt)
def CheckPriorityIncrease(self):
"""Checks whether priority can and should be increased.
Called when locks couldn't be acquired.
"""
op = self.op
# Exhausted all retries and next round should not use blocking acquire
# for locks?
if (self._timeout_strategy.Peek() is None and
op.priority > constants.OP_PRIO_HIGHEST):
logging.debug("Increasing priority")
op.priority -= 1
self._ResetTimeoutStrategy()
return True
return False
def GetNextLockTimeout(self):
"""Returns the next lock acquire timeout.
"""
return self._timeout_strategy.Next()
class _JobProcessor(object):
(DEFER,
WAITDEP,
FINISHED) = range(1, 4)
def __init__(self, queue, opexec_fn, job,
_timeout_strategy_factory=mcpu.LockAttemptTimeoutStrategy):
"""Initializes this class.
"""
self.queue = queue
self.opexec_fn = opexec_fn
self.job = job
self._timeout_strategy_factory = _timeout_strategy_factory
@staticmethod
def _FindNextOpcode(job, timeout_strategy_factory):
"""Locates the next opcode to run.
@type job: L{_QueuedJob}
@param job: Job object
@param timeout_strategy_factory: Callable to create new timeout strategy
"""
# Create some sort of a cache to speed up locating next opcode for future
# lookups
# TODO: Consider splitting _QueuedJob.ops into two separate lists, one for
# pending and one for processed ops.
if job.ops_iter is None:
job.ops_iter = enumerate(job.ops)
# Find next opcode to run
while True:
try:
(idx, op) = job.ops_iter.next()
except StopIteration:
raise errors.ProgrammerError("Called for a finished job")
if op.status == constants.OP_STATUS_RUNNING:
# Found an opcode already marked as running
raise errors.ProgrammerError("Called for job marked as running")
opctx = _OpExecContext(op, idx, "Op %s/%s" % (idx + 1, len(job.ops)),
timeout_strategy_factory)
if op.status not in constants.OPS_FINALIZED:
return opctx
# This is a job that was partially completed before master daemon
# shutdown, so it can be expected that some opcodes are already
# completed successfully (if any did error out, then the whole job
# should have been aborted and not resubmitted for processing).
logging.info("%s: opcode %s already processed, skipping",
opctx.log_prefix, opctx.summary)
@staticmethod
def _MarkWaitlock(job, op):
"""Marks an opcode as waiting for locks.
The job's start timestamp is also set if necessary.
@type job: L{_QueuedJob}
@param job: Job object
@type op: L{_QueuedOpCode}
@param op: Opcode object
"""
assert op in job.ops
assert op.status in (constants.OP_STATUS_QUEUED,
constants.OP_STATUS_WAITING)
update = False
op.result = None
if op.status == constants.OP_STATUS_QUEUED:
op.status = constants.OP_STATUS_WAITING
update = True
if op.start_timestamp is None:
op.start_timestamp = TimeStampNow()
update = True
if job.start_timestamp is None:
job.start_timestamp = op.start_timestamp
update = True
assert op.status == constants.OP_STATUS_WAITING
return update
@staticmethod
def _CheckDependencies(queue, job, opctx):
"""Checks if an opcode has dependencies and if so, processes them.
@type queue: L{JobQueue}
@param queue: Queue object
@type job: L{_QueuedJob}
@param job: Job object
@type opctx: L{_OpExecContext}
@param opctx: Opcode execution context
@rtype: bool
@return: Whether opcode will be re-scheduled by dependency tracker
"""
op = opctx.op
result = False
while opctx.jobdeps:
(dep_job_id, dep_status) = opctx.jobdeps[0]
(depresult, depmsg) = queue.depmgr.CheckAndRegister(job, dep_job_id,
dep_status)
assert ht.TNonEmptyString(depmsg), "No dependency message"
logging.info("%s: %s", opctx.log_prefix, depmsg)
if depresult == _JobDependencyManager.CONTINUE:
# Remove dependency and continue
opctx.jobdeps.pop(0)
elif depresult == _JobDependencyManager.WAIT:
# Need to wait for notification, dependency tracker will re-add job
# to workerpool
result = True
break
elif depresult == _JobDependencyManager.CANCEL:
# Job was cancelled, cancel this job as well
job.Cancel()
assert op.status == constants.OP_STATUS_CANCELING
break
elif depresult in (_JobDependencyManager.WRONGSTATUS,
_JobDependencyManager.ERROR):
# Job failed or there was an error, this job must fail
op.status = constants.OP_STATUS_ERROR
op.result = _EncodeOpError(errors.OpExecError(depmsg))
break
else:
raise errors.ProgrammerError("Unknown dependency result '%s'" %
depresult)
return result
def _ExecOpCodeUnlocked(self, opctx):
"""Processes one opcode and returns the result.
"""
op = opctx.op
assert op.status in (constants.OP_STATUS_WAITING,
constants.OP_STATUS_CANCELING)
# The very last check if the job was cancelled before trying to execute
if op.status == constants.OP_STATUS_CANCELING:
return (constants.OP_STATUS_CANCELING, None)
timeout = opctx.GetNextLockTimeout()
try:
# Make sure not to hold queue lock while calling ExecOpCode
result = self.opexec_fn(op.input,
_OpExecCallbacks(self.queue, self.job, op),
timeout=timeout)
except mcpu.LockAcquireTimeout:
assert timeout is not None, "Received timeout for blocking acquire"
logging.debug("Couldn't acquire locks in %0.6fs", timeout)
assert op.status in (constants.OP_STATUS_WAITING,
constants.OP_STATUS_CANCELING)
# Was job cancelled while we were waiting for the lock?
if op.status == constants.OP_STATUS_CANCELING:
return (constants.OP_STATUS_CANCELING, None)
# Stay in waitlock while trying to re-acquire lock
return (constants.OP_STATUS_WAITING, None)
except CancelJob:
logging.exception("%s: Canceling job", opctx.log_prefix)
assert op.status == constants.OP_STATUS_CANCELING
return (constants.OP_STATUS_CANCELING, None)
except Exception, err: # pylint: disable=W0703
logging.exception("%s: Caught exception in %s",
opctx.log_prefix, opctx.summary)
return (constants.OP_STATUS_ERROR, _EncodeOpError(err))
else:
logging.debug("%s: %s successful",
opctx.log_prefix, opctx.summary)
return (constants.OP_STATUS_SUCCESS, result)
def __call__(self, _nextop_fn=None):
"""Continues execution of a job.
@param _nextop_fn: Callback function for tests
@return: C{FINISHED} if job is fully processed, C{DEFER} if the job should
be deferred and C{WAITDEP} if the dependency manager
(L{_JobDependencyManager}) will re-schedule the job when appropriate
"""
queue = self.queue
job = self.job
logging.debug("Processing job %s", job.id)
try:
opcount = len(job.ops)
assert job.writable, "Expected writable job"
# Don't do anything for finalized jobs
if job.CalcStatus() in constants.JOBS_FINALIZED:
return self.FINISHED
# Is a previous opcode still pending?
if job.cur_opctx:
opctx = job.cur_opctx
job.cur_opctx = None
else:
if __debug__ and _nextop_fn:
_nextop_fn()
opctx = self._FindNextOpcode(job, self._timeout_strategy_factory)
op = opctx.op
# Consistency check
assert compat.all(i.status in (constants.OP_STATUS_QUEUED,
constants.OP_STATUS_CANCELING)
for i in job.ops[opctx.index + 1:])
assert op.status in (constants.OP_STATUS_QUEUED,
constants.OP_STATUS_WAITING,
constants.OP_STATUS_CANCELING)
assert (op.priority <= constants.OP_PRIO_LOWEST and
op.priority >= constants.OP_PRIO_HIGHEST)
waitjob = None
if op.status != constants.OP_STATUS_CANCELING:
assert op.status in (constants.OP_STATUS_QUEUED,
constants.OP_STATUS_WAITING)
# Prepare to start opcode
if self._MarkWaitlock(job, op):
# Write to disk
queue.UpdateJobUnlocked(job)
assert op.status == constants.OP_STATUS_WAITING
assert job.CalcStatus() == constants.JOB_STATUS_WAITING
assert job.start_timestamp and op.start_timestamp
assert waitjob is None
# Check if waiting for a job is necessary
waitjob = self._CheckDependencies(queue, job, opctx)
assert op.status in (constants.OP_STATUS_WAITING,
constants.OP_STATUS_CANCELING,
constants.OP_STATUS_ERROR)
if not (waitjob or op.status in (constants.OP_STATUS_CANCELING,
constants.OP_STATUS_ERROR)):
logging.info("%s: opcode %s waiting for locks",
opctx.log_prefix, opctx.summary)
assert not opctx.jobdeps, "Not all dependencies were removed"
(op_status, op_result) = self._ExecOpCodeUnlocked(opctx)
op.status = op_status
op.result = op_result
assert not waitjob
if op.status in (constants.OP_STATUS_WAITING,
constants.OP_STATUS_QUEUED):
# waiting: Couldn't get locks in time
# queued: Queue is shutting down
assert not op.end_timestamp
else:
# Finalize opcode
op.end_timestamp = TimeStampNow()
if op.status == constants.OP_STATUS_CANCELING:
assert not compat.any(i.status != constants.OP_STATUS_CANCELING
for i in job.ops[opctx.index:])
else:
assert op.status in constants.OPS_FINALIZED
if op.status == constants.OP_STATUS_QUEUED:
# Queue is shutting down
assert not waitjob
finalize = False
# Reset context
job.cur_opctx = None
# In no case must the status be finalized here
assert job.CalcStatus() == constants.JOB_STATUS_QUEUED
elif op.status == constants.OP_STATUS_WAITING or waitjob:
finalize = False
if not waitjob and opctx.CheckPriorityIncrease():
# Priority was changed, need to update on-disk file
queue.UpdateJobUnlocked(job)
# Keep around for another round
job.cur_opctx = opctx
assert (op.priority <= constants.OP_PRIO_LOWEST and
op.priority >= constants.OP_PRIO_HIGHEST)
# In no case must the status be finalized here
assert job.CalcStatus() == constants.JOB_STATUS_WAITING
else:
# Ensure all opcodes so far have been successful
assert (opctx.index == 0 or
compat.all(i.status == constants.OP_STATUS_SUCCESS
for i in job.ops[:opctx.index]))
# Reset context
job.cur_opctx = None
if op.status == constants.OP_STATUS_SUCCESS:
finalize = False
elif op.status == constants.OP_STATUS_ERROR:
# If we get here, we cannot afford to check for any consistency
# any more, we just want to clean up.
# TODO: Actually, it wouldn't be a bad idea to start a timer
# here to kill the whole process.
to_encode = errors.OpExecError("Preceding opcode failed")
job.MarkUnfinishedOps(constants.OP_STATUS_ERROR,
_EncodeOpError(to_encode))
finalize = True
elif op.status == constants.OP_STATUS_CANCELING:
job.MarkUnfinishedOps(constants.OP_STATUS_CANCELED,
"Job canceled by request")
finalize = True
else:
raise errors.ProgrammerError("Unknown status '%s'" % op.status)
if opctx.index == (opcount - 1):
# Finalize on last opcode
finalize = True
if finalize:
# All opcodes have been run, finalize job
job.Finalize()
# Write to disk. If the job status is final, this is the final write
# allowed. Once the file has been written, it can be archived anytime.
queue.UpdateJobUnlocked(job)
assert not waitjob
if finalize:
logging.info("Finished job %s, status = %s", job.id, job.CalcStatus())
return self.FINISHED
assert not waitjob or queue.depmgr.JobWaiting(job)
if waitjob:
return self.WAITDEP
else:
return self.DEFER
finally:
assert job.writable, "Job became read-only while being processed"
class _JobDependencyManager:
"""Keeps track of job dependencies.
"""
(WAIT,
ERROR,
CANCEL,
CONTINUE,
WRONGSTATUS) = range(1, 6)
def __init__(self, getstatus_fn):
"""Initializes this class.
"""
self._getstatus_fn = getstatus_fn
self._waiters = {}
def JobWaiting(self, job):
"""Checks if a job is waiting.
"""
return compat.any(job in jobs
for jobs in self._waiters.values())
def CheckAndRegister(self, job, dep_job_id, dep_status):
"""Checks if a dependency job has the requested status.
If the other job is not yet in a finalized status, the calling job will be
notified (re-added to the workerpool) at a later point.
@type job: L{_QueuedJob}
@param job: Job object
@type dep_job_id: int
@param dep_job_id: ID of dependency job
@type dep_status: list
@param dep_status: Required status
"""
assert ht.TJobId(job.id)
assert ht.TJobId(dep_job_id)
assert ht.TListOf(ht.TElemOf(constants.JOBS_FINALIZED))(dep_status)
if job.id == dep_job_id:
return (self.ERROR, "Job can't depend on itself")
# Get status of dependency job
try:
status = self._getstatus_fn(dep_job_id)
except errors.JobLost, err:
return (self.ERROR, "Dependency error: %s" % err)
assert status in constants.JOB_STATUS_ALL
job_id_waiters = self._waiters.setdefault(dep_job_id, set())
if status not in constants.JOBS_FINALIZED:
# Register for notification and wait for job to finish
job_id_waiters.add(job)
return (self.WAIT,
"Need to wait for job %s, wanted status '%s'" %
(dep_job_id, dep_status))
# Remove from waiters list
if job in job_id_waiters:
job_id_waiters.remove(job)
if (status == constants.JOB_STATUS_CANCELED and
constants.JOB_STATUS_CANCELED not in dep_status):
return (self.CANCEL, "Dependency job %s was cancelled" % dep_job_id)
elif not dep_status or status in dep_status:
return (self.CONTINUE,
"Dependency job %s finished with status '%s'" %
(dep_job_id, status))
else:
return (self.WRONGSTATUS,
"Dependency job %s finished with status '%s',"
" not one of '%s' as required" %
(dep_job_id, status, utils.CommaJoin(dep_status)))
def _RemoveEmptyWaitersUnlocked(self):
"""Remove all jobs without actual waiters.
"""
for job_id in [job_id for (job_id, waiters) in self._waiters.items()
if not waiters]:
del self._waiters[job_id]
class JobQueue(object):
"""Queue used to manage the jobs.
"""
def __init__(self, context, cfg):
"""Constructor for JobQueue.
The constructor will initialize the job queue object and then
start loading the current jobs from disk, either for starting them
(if they were queue) or for aborting them (if they were already
running).
@type context: GanetiContext
@param context: the context object for access to the configuration
data and other ganeti objects
"""
self.context = context
self._memcache = weakref.WeakValueDictionary()
self._my_hostname = netutils.Hostname.GetSysName()
# Get initial list of nodes
self._nodes = dict((n.name, n.primary_ip)
for n in cfg.GetAllNodesInfo().values()
if n.master_candidate)
# Remove master node
self._nodes.pop(self._my_hostname, None)
# Job dependencies
self.depmgr = _JobDependencyManager(self._GetJobStatusForDependencies)
def _GetRpc(self, address_list):
"""Gets RPC runner with context.
"""
return rpc.JobQueueRunner(self.context, address_list)
@staticmethod
def _CheckRpcResult(result, nodes, failmsg):
"""Verifies the status of an RPC call.
Since we aim to keep consistency should this node (the current
master) fail, we will log errors if our rpc fail, and especially
log the case when more than half of the nodes fails.
@param result: the data as returned from the rpc call
@type nodes: list
@param nodes: the list of nodes we made the call to
@type failmsg: str
@param failmsg: the identifier to be used for logging
"""
failed = []
success = []
for node in nodes:
msg = result[node].fail_msg
if msg:
failed.append(node)
logging.error("RPC call %s (%s) failed on node %s: %s",
result[node].call, failmsg, node, msg)
else:
success.append(node)
# +1 for the master node
if (len(success) + 1) < len(failed):
# TODO: Handle failing nodes
logging.error("More than half of the nodes failed")
def _GetNodeIp(self):
"""Helper for returning the node name/ip list.
@rtype: (list, list)
@return: a tuple of two lists, the first one with the node
names and the second one with the node addresses
"""
# TODO: Change to "tuple(map(list, zip(*self._nodes.items())))"?
name_list = self._nodes.keys()
addr_list = [self._nodes[name] for name in name_list]
return name_list, addr_list
def _UpdateJobQueueFile(self, file_name, data, replicate):
"""Writes a file locally and then replicates it to all nodes.
This function will replace the contents of a file on the local
node and then replicate it to all the other nodes we have.
@type file_name: str
@param file_name: the path of the file to be replicated
@type data: str
@param data: the new contents of the file
@type replicate: boolean
@param replicate: whether to spread the changes to the remote nodes
"""
getents = runtime.GetEnts()
utils.WriteFile(file_name, data=data, uid=getents.masterd_uid,
gid=getents.daemons_gid,
mode=constants.JOB_QUEUE_FILES_PERMS)
if replicate:
names, addrs = self._GetNodeIp()
result = _CallJqUpdate(self._GetRpc(addrs), names, file_name, data)
self._CheckRpcResult(result, self._nodes, "Updating %s" % file_name)
def _RenameFilesUnlocked(self, rename):
"""Renames a file locally and then replicate the change.
This function will rename a file in the local queue directory
and then replicate this rename to all the other nodes we have.
@type rename: list of (old, new)
@param rename: List containing tuples mapping old to new names
"""
# Rename them locally
for old, new in rename:
utils.RenameFile(old, new, mkdir=True)
# ... and on all nodes
names, addrs = self._GetNodeIp()
result = self._GetRpc(addrs).call_jobqueue_rename(names, rename)
self._CheckRpcResult(result, self._nodes, "Renaming files (%r)" % rename)
@staticmethod
def _GetJobPath(job_id):
"""Returns the job file for a given job id.
@type job_id: str
@param job_id: the job identifier
@rtype: str
@return: the path to the job file
"""
return utils.PathJoin(pathutils.QUEUE_DIR, "job-%s" % job_id)
@staticmethod
def _GetArchivedJobPath(job_id):
"""Returns the archived job file for a give job id.
@type job_id: str
@param job_id: the job identifier
@rtype: str
@return: the path to the archived job file
"""
return utils.PathJoin(pathutils.JOB_QUEUE_ARCHIVE_DIR,
jstore.GetArchiveDirectory(job_id),
"job-%s" % job_id)
@staticmethod
def _DetermineJobDirectories(archived):
"""Build list of directories containing job files.
@type archived: bool
@param archived: Whether to include directories for archived jobs
@rtype: list
"""
result = [pathutils.QUEUE_DIR]
if archived:
archive_path = pathutils.JOB_QUEUE_ARCHIVE_DIR
result.extend(map(compat.partial(utils.PathJoin, archive_path),
utils.ListVisibleFiles(archive_path)))
return result
@classmethod
def _GetJobIDsUnlocked(cls, sort=True, archived=False):
"""Return all known job IDs.
The method only looks at disk because it's a requirement that all
jobs are present on disk (so in the _memcache we don't have any
extra IDs).
@type sort: boolean
@param sort: perform sorting on the returned job ids
@rtype: list
@return: the list of job IDs
"""
jlist = []
for path in cls._DetermineJobDirectories(archived):
for filename in utils.ListVisibleFiles(path):
m = constants.JOB_FILE_RE.match(filename)
if m:
jlist.append(int(m.group(1)))
if sort:
jlist.sort()
return jlist
def _LoadJobUnlocked(self, job_id):
"""Loads a job from the disk or memory.
Given a job id, this will return the cached job object if
existing, or try to load the job from the disk. If loading from
disk, it will also add the job to the cache.
@type job_id: int
@param job_id: the job id
@rtype: L{_QueuedJob} or None
@return: either None or the job object
"""
assert isinstance(job_id, int), "Job queue: Supplied job id is not an int!"
job = self._memcache.get(job_id, None)
if job:
logging.debug("Found job %s in memcache", job_id)
assert job.writable, "Found read-only job in memcache"
return job
try:
job = JobQueue._LoadJobFromDisk(self, job_id, False)
if job is None:
return job
except errors.JobFileCorrupted:
old_path = self._GetJobPath(job_id)
new_path = self._GetArchivedJobPath(job_id)
if old_path == new_path:
# job already archived (future case)
logging.exception("Can't parse job %s", job_id)
else:
# non-archived case
logging.exception("Can't parse job %s, will archive.", job_id)
self._RenameFilesUnlocked([(old_path, new_path)])
return None
assert job.writable, "Job just loaded is not writable"
self._memcache[job_id] = job
logging.debug("Added job %s to the cache", job_id)
return job
@staticmethod
def _LoadJobFromDisk(queue, job_id, try_archived, writable=None):
"""Load the given job file from disk.
Given a job file, read, load and restore it in a _QueuedJob format.
@type job_id: int
@param job_id: job identifier
@type try_archived: bool
@param try_archived: Whether to try loading an archived job
@rtype: L{_QueuedJob} or None
@return: either None or the job object
"""
path_functions = [(JobQueue._GetJobPath, False)]
if try_archived:
path_functions.append((JobQueue._GetArchivedJobPath, True))
raw_data = None
archived = None
for (fn, archived) in path_functions:
filepath = fn(job_id)
logging.debug("Loading job from %s", filepath)
try:
raw_data = utils.ReadFile(filepath)
except EnvironmentError, err:
if err.errno != errno.ENOENT:
raise
else:
break
if not raw_data:
logging.debug("No data available for job %s", job_id)
return None
if writable is None:
writable = not archived
try:
data = serializer.LoadJson(raw_data)
job = _QueuedJob.Restore(queue, data, writable, archived)
except Exception, err: # pylint: disable=W0703
raise errors.JobFileCorrupted(err)
return job
@staticmethod
def SafeLoadJobFromDisk(queue, job_id, try_archived, writable=None):
"""Load the given job file from disk.
Given a job file, read, load and restore it in a _QueuedJob format.
In case of error reading the job, it gets returned as None, and the
exception is logged.
@type job_id: int
@param job_id: job identifier
@type try_archived: bool
@param try_archived: Whether to try loading an archived job
@rtype: L{_QueuedJob} or None
@return: either None or the job object
"""
try:
return JobQueue._LoadJobFromDisk(queue, job_id, try_archived,
writable=writable)
except (errors.JobFileCorrupted, EnvironmentError):
logging.exception("Can't load/parse job %s", job_id)
return None
@classmethod
def SubmitManyJobs(cls, jobs):
"""Create and store multiple jobs.
"""
return luxi.Client(address=pathutils.QUERY_SOCKET).SubmitManyJobs(jobs)
@staticmethod
def _ResolveJobDependencies(resolve_fn, deps):
"""Resolves relative job IDs in dependencies.
@type resolve_fn: callable
@param resolve_fn: Function to resolve a relative job ID
@type deps: list
@param deps: Dependencies
@rtype: tuple; (boolean, string or list)
@return: If successful (first tuple item), the returned list contains
resolved job IDs along with the requested status; if not successful,
the second element is an error message
"""
result = []
for (dep_job_id, dep_status) in deps:
if ht.TRelativeJobId(dep_job_id):
assert ht.TInt(dep_job_id) and dep_job_id < 0
try:
job_id = resolve_fn(dep_job_id)
except IndexError:
# Abort
return (False, "Unable to resolve relative job ID %s" % dep_job_id)
else:
job_id = dep_job_id
result.append((job_id, dep_status))
return (True, result)
def _GetJobStatusForDependencies(self, job_id):
"""Gets the status of a job for dependencies.
@type job_id: int
@param job_id: Job ID
@raise errors.JobLost: If job can't be found
"""
# Not using in-memory cache as doing so would require an exclusive lock
# Try to load from disk
job = JobQueue.SafeLoadJobFromDisk(self, job_id, True, writable=False)
if job:
assert not job.writable, "Got writable job" # pylint: disable=E1101
if job:
return job.CalcStatus()
raise errors.JobLost("Job %s not found" % job_id)
def UpdateJobUnlocked(self, job, replicate=True):
"""Update a job's on disk storage.
After a job has been modified, this function needs to be called in
order to write the changes to disk and replicate them to the other
nodes.
@type job: L{_QueuedJob}
@param job: the changed job
@type replicate: boolean
@param replicate: whether to replicate the change to remote nodes
"""
if __debug__:
finalized = job.CalcStatus() in constants.JOBS_FINALIZED
assert (finalized ^ (job.end_timestamp is None))
assert job.writable, "Can't update read-only job"
assert not job.archived, "Can't update archived job"
filename = self._GetJobPath(job.id)
data = serializer.DumpJson(job.Serialize())
logging.debug("Writing job %s to %s", job.id, filename)
self._UpdateJobQueueFile(filename, data, replicate)
def HasJobBeenFinalized(self, job_id):
"""Checks if a job has been finalized.
@type job_id: int
@param job_id: Job identifier
@rtype: boolean
@return: True if the job has been finalized,
False if the timeout has been reached,
None if the job doesn't exist
"""
job = JobQueue.SafeLoadJobFromDisk(self, job_id, True, writable=False)
if job is not None:
return job.CalcStatus() in constants.JOBS_FINALIZED
elif cluster.LUClusterDestroy.clusterHasBeenDestroyed:
# FIXME: The above variable is a temporary workaround until the Python job
# queue is completely removed. When removing the job queue, also remove
# the variable from LUClusterDestroy.
return True
else:
return None
def CancelJob(self, job_id):
"""Cancels a job.
This will only succeed if the job has not started yet.
@type job_id: int
@param job_id: job ID of job to be cancelled.
"""
logging.info("Cancelling job %s", job_id)
return self._ModifyJobUnlocked(job_id, lambda job: job.Cancel())
def ChangeJobPriority(self, job_id, priority):
"""Changes a job's priority.
@type job_id: int
@param job_id: ID of the job whose priority should be changed
@type priority: int
@param priority: New priority
"""
logging.info("Changing priority of job %s to %s", job_id, priority)
if priority not in constants.OP_PRIO_SUBMIT_VALID:
allowed = utils.CommaJoin(constants.OP_PRIO_SUBMIT_VALID)
raise errors.GenericError("Invalid priority %s, allowed are %s" %
(priority, allowed))
def fn(job):
(success, msg) = job.ChangePriority(priority)
return (success, msg)
return self._ModifyJobUnlocked(job_id, fn)
def _ModifyJobUnlocked(self, job_id, mod_fn):
"""Modifies a job.
@type job_id: int
@param job_id: Job ID
@type mod_fn: callable
@param mod_fn: Modifying function, receiving job object as parameter,
returning tuple of (status boolean, message string)
"""
job = self._LoadJobUnlocked(job_id)
if not job:
logging.debug("Job %s not found", job_id)
return (False, "Job %s not found" % job_id)
assert job.writable, "Can't modify read-only job"
assert not job.archived, "Can't modify archived job"
(success, msg) = mod_fn(job)
if success:
# If the job was finalized (e.g. cancelled), this is the final write
# allowed. The job can be archived anytime.
self.UpdateJobUnlocked(job)
return (success, msg)
``` |
[
{
"content": "Here is the snippet:\n```python\n\"\"\"Extensions to the 'distutils' for large or complex distributions\"\"\"\n\nimport os\nimport functools\nimport distutils.core\nimport distutils.filelist\nimport re\nfrom distutils.errors import DistutilsOptionError\nfrom distutils.util import convert_path\nfro... | [
{
"content": "Here is the snippet:\n<|memory_start|>```python\n\"\"\"Extensions to the 'distutils' for large or complex distributions\"\"\"\n\nimport os\nimport functools\nimport distutils.core\nimport distutils.filelist\nimport re\nfrom distutils.errors import DistutilsOptionError\nfrom distutils.util import c... | ```python
"""Extensions to the 'distutils' for large or complex distributions"""
import os
import functools
import distutils.core
import distutils.filelist
import re
from distutils.errors import DistutilsOptionError
from distutils.util import convert_path
from fnmatch import fnmatchcase
from ._deprecation_warning import SetuptoolsDeprecationWarning
from setuptools.extern.six import PY3, string_types
from setuptools.extern.six.moves import filter, map
import setuptools.version
from setuptools.extension import Extension
from setuptools.dist import Distribution, Feature
from setuptools.depends import Require
from . import monkey
__metaclass__ = type
__all__ = [
'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
'SetuptoolsDeprecationWarning',
'find_packages'
]
if PY3:
__all__.append('find_namespace_packages')
__version__ = setuptools.version.__version__
bootstrap_install_from = None
# If we run 2to3 on .py files, should we also convert docstrings?
# Default: yes; assume that we can detect doctests reliably
run_2to3_on_doctests = True
# Standard package names for fixer packages
lib2to3_fixer_packages = ['lib2to3.fixes']
class PackageFinder:
"""
Generate a list of all Python packages found within a directory
"""
@classmethod
def find(cls, where='.', exclude=(), include=('*',)):
"""Return a list all Python packages found within directory 'where'
'where' is the root directory which will be searched for packages. It
should be supplied as a "cross-platform" (i.e. URL-style) path; it will
be converted to the appropriate local path syntax.
'exclude' is a sequence of package names to exclude; '*' can be used
as a wildcard in the names, such that 'foo.*' will exclude all
subpackages of 'foo' (but not 'foo' itself).
'include' is a sequence of package names to include. If it's
specified, only the named packages will be included. If it's not
specified, all found packages will be included. 'include' can contain
shell style wildcard patterns just like 'exclude'.
"""
return list(cls._find_packages_iter(
convert_path(where),
cls._build_filter('ez_setup', '*__pycache__', *exclude),
cls._build_filter(*include)))
@classmethod
def _find_packages_iter(cls, where, exclude, include):
"""
All the packages found in 'where' that pass the 'include' filter, but
not the 'exclude' filter.
"""
for root, dirs, files in os.walk(where, followlinks=True):
# Copy dirs to iterate over it, then empty dirs.
all_dirs = dirs[:]
dirs[:] = []
for dir in all_dirs:
full_path = os.path.join(root, dir)
rel_path = os.path.relpath(full_path, where)
package = rel_path.replace(os.path.sep, '.')
# Skip directory trees that are not valid packages
if ('.' in dir or not cls._looks_like_package(full_path)):
continue
# Should this package be included?
if include(package) and not exclude(package):
yield package
# Keep searching subdirectories, as there may be more packages
# down there, even if the parent was excluded.
dirs.append(dir)
@staticmethod
def _looks_like_package(path):
"""Does a directory look like a package?"""
return os.path.isfile(os.path.join(path, '__init__.py'))
@staticmethod
def _build_filter(*patterns):
"""
Given a list of patterns, return a callable that will be true only if
the input matches at least one of the patterns.
"""
return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
class PEP420PackageFinder(PackageFinder):
@staticmethod
def _looks_like_package(path):
return True
find_packages = PackageFinder.find
if PY3:
find_namespace_packages = PEP420PackageFinder.find
def _install_setup_requires(attrs):
# Note: do not use `setuptools.Distribution` directly, as
# our PEP 517 backend patch `distutils.core.Distribution`.
dist = distutils.core.Distribution(dict(
(k, v) for k, v in attrs.items()
if k in ('dependency_links', 'setup_requires')
))
# Honor setup.cfg's options.
dist.parse_config_files(ignore_option_errors=True)
if dist.setup_requires:
dist.fetch_build_eggs(dist.setup_requires)
def setup(**attrs):
# Make sure we have any requirements needed to interpret 'attrs'.
_install_setup_requires(attrs)
return distutils.core.setup(**attrs)
setup.__doc__ = distutils.core.setup.__doc__
_Command = monkey.get_unpatched(distutils.core.Command)
class Command(_Command):
__doc__ = _Command.__doc__
command_consumes_arguments = False
def __init__(self, dist, **kw):
"""
Construct the command for dist, updating
vars(self) with any keyword parameters.
"""
_Command.__init__(self, dist)
vars(self).update(kw)
def _ensure_stringlike(self, option, what, default=None):
val = getattr(self, option)
if val is None:
setattr(self, option, default)
return default
elif not isinstance(val, string_types):
raise DistutilsOptionError("'%s' must be a %s (got `%s`)"
% (option, what, val))
return val
def ensure_string_list(self, option):
r"""Ensure that 'option' is a list of strings. If 'option' is
currently a string, we split it either on /,\s*/ or /\s+/, so
"foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
["foo", "bar", "baz"].
"""
val = getattr(self, option)
if val is None:
return
elif isinstance(val, string_types):
setattr(self, option, re.split(r',\s*|\s+', val))
else:
if isinstance(val, list):
ok = all(isinstance(v, string_types) for v in val)
else:
ok = False
if not ok:
raise DistutilsOptionError(
"'%s' must be a list of strings (got %r)"
% (option, val))
def reinitialize_command(self, command, reinit_subcommands=0, **kw):
cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
vars(cmd).update(kw)
return cmd
def _find_all_simple(path):
"""
Find all files under 'path'
"""
results = (
os.path.join(base, file)
for base, dirs, files in os.walk(path, followlinks=True)
for file in files
)
return filter(os.path.isfile, results)
def findall(dir=os.curdir):
"""
Find all files under 'dir' and return the list of full filenames.
Unless dir is '.', return full filenames with dir prepended.
"""
files = _find_all_simple(dir)
if dir == os.curdir:
make_rel = functools.partial(os.path.relpath, start=dir)
files = map(make_rel, files)
return list(files)
# Apply monkey patches
monkey.patch_all()
``` |
[
{
"content": "Here is the code content:\n```python\nimport os\nimport logging\n\nfrom PyQt5 import QtCore\nfrom PyQt5.QtWidgets import *\n\nimport vqt.cli as vq_cli\nimport vqt.main as vq_main\nimport vqt.saveable as vq_save\nimport vqt.hotkeys as vq_hotkeys\nimport vqt.menubuilder as vq_menu\nfrom vqt.saveable... | [
{
"content": "Here is the code content:\n<|memory_start|>```python\nimport os\nimport logging\n\nfrom PyQt5 import QtCore\nfrom PyQt5.QtWidgets import *\n\nimport vqt.cli as vq_cli\nimport vqt.main as vq_main\nimport vqt.saveable as vq_save\nimport vqt.hotkeys as vq_hotkeys\nimport vqt.menubuilder as vq_menu\nf... | ```python
import os
import logging
from PyQt5 import QtCore
from PyQt5.QtWidgets import *
import vqt.cli as vq_cli
import vqt.main as vq_main
import vqt.saveable as vq_save
import vqt.hotkeys as vq_hotkeys
import vqt.menubuilder as vq_menu
from vqt.saveable import compat_isNone
logger = logging.getLogger(__name__)
class VQDockWidget(vq_hotkeys.HotKeyMixin, QDockWidget):
def __init__(self, parent):
QDockWidget.__init__(self, parent)
vq_hotkeys.HotKeyMixin.__init__(self)
self.addHotKey('ctrl+enter', 'mem:undockmaximize')
self.addHotKeyTarget('mem:undockmaximize', self._hotkey_undock_maximize)
self.setAllowedAreas(QtCore.Qt.AllDockWidgetAreas)
def vqSaveState(self, settings, name, stub=''):
wid = self.widget()
if isinstance(wid, vq_save.SaveableWidget):
return wid.vqSaveState(settings, name, stub)
def vqRestoreState(self, settings, name, stub=''):
wid = self.widget()
if isinstance(wid, vq_save.SaveableWidget):
return wid.vqRestoreState(settings, name, stub)
def setWidget(self, widget):
# If he sets his window title, we want to...
self.setWindowTitle(widget.windowTitle())
widget.setWindowTitle = self.setWindowTitle
QDockWidget.setWidget(self, widget)
def closeEvent(self, event):
self.hide()
w = self.widget()
w.setParent(None)
w.close()
self.parent().vqRemoveDockWidget(self)
event.accept()
def _hotkey_undock_maximize(self):
# if docked, undock
if not self.isFloating():
self.setFloating(1)
# if not maximized, maximize
if not self.isMaximized():
self.showMaximized()
else:
# else dock
self.showNormal()
self.setFloating(False)
else:
# else dock
self.showNormal()
self.setFloating(False)
self.show()
self.raise_()
class VQMainCmdWindow(vq_hotkeys.HotKeyMixin, QMainWindow):
'''
A base class for application window's to inherit from.
'''
__cli_widget_class__ = vq_cli.VQCli
def __init__(self, appname, cmd, **kwargs):
super(QMainWindow, self).__init__(**kwargs)
vq_hotkeys.HotKeyMixin.__init__(self)
self._vq_appname = appname
self._vq_dockwidgets = []
self._vq_settings = QtCore.QSettings('invisigoth', application=appname, parent=self)
self._vq_histfile = os.path.join(os.path.expanduser('~'), '.%s_history' % appname)
self._dock_classes = {}
self.vqInitDockWidgetClasses()
self._vq_mbar = vq_menu.VQMenuBar()
self.setMenuBar(self._vq_mbar)
# AnimatedDocks, AllowNestedDocks, AllowTabbedDocks, ForceTabbedDocks, VerticalTabs
self.setDockOptions(self.AnimatedDocks | self.AllowTabbedDocks)
self._vq_cli = self.__cli_widget_class__(cmd)
self._vq_cli.input.loadHistory(self._vq_histfile)
self._vq_cli.sigCliQuit.connect( self.close )
self.setCentralWidget(self._vq_cli)
self.vqRestoreGuiSettings(self._vq_settings)
def vqAddMenuField(self, fname, callback, args=()):
self._vq_mbar.addField(fname, callback, args=args)
def vqAddDynMenu(self, fname, callback):
self._vq_mbar.addDynMenu(fname, callback)
def vqInitDockWidgetClasses(self):
# apps can over-ride
pass
def vqAddDockWidgetClass(self, cls, args=()):
self._dock_classes[cls.__name__] = (cls, args)
def vqBuildDockWidget(self, clsname, floating=False, area=QtCore.Qt.TopDockWidgetArea):
res = self._dock_classes.get(clsname)
if res is None:
logger.error('vqBuildDockWidget Failed For: %s', clsname)
return
cls, args = res
obj = cls(*args)
return self.vqDockWidget(obj, area, floating=floating), obj
def vqRestoreGuiSettings(self, settings, stub=''):
dwcls = settings.value('DockClasses')
if not compat_isNone(dwcls):
for i, clsname in enumerate(dwcls):
name = 'VQDockWidget%d' % i
try:
tup = self.vqBuildDockWidget(str(clsname), floating=False)
if tup is not None:
d, obj = tup
d.setObjectName(name)
d.vqRestoreState(settings, name, stub)
d.show()
except Exception as e:
logger.error('Error Building: %s: %s', clsname, e)
# Once dock widgets are loaded, we can restoreState
state = settings.value('DockState')
if not compat_isNone(state):
self.restoreState(state)
geom = settings.value('DockGeometry')
if not compat_isNone(geom):
self.restoreGeometry(geom)
# Just get all the resize activities done...
vq_main.eatevents()
for w in self.vqGetDockWidgets():
w.show()
return True
def vqSaveGuiSettings(self, settings, stub=''):
dock_classes = []
# Enumerate the current dock windows and set
# their names by their list order...
for i, w in enumerate(self.vqGetDockWidgets()):
widget = w.widget()
dock_classes.append(widget.__class__.__name__)
name = 'VQDockWidget%d' % i
w.setObjectName(name)
w.vqSaveState(settings,name,stub)
settings.setValue('DockClasses', dock_classes)
settings.setValue('DockGeometry', self.saveGeometry())
settings.setValue('DockState', self.saveState())
def closeEvent(self, event):
self.vqSaveGuiSettings(self._vq_settings)
self._vq_cli.input.saveHistory(self._vq_histfile)
QMainWindow.closeEvent(self, event)
def vqGetDockWidgets(self):
return list(self._vq_dockwidgets)
def vqClearDockWidgets(self):
for wid in self.vqGetDockWidgets():
wid.close()
def vqRemoveDockWidget(self, widget):
self._vq_dockwidgets.remove(widget)
self.removeDockWidget(widget)
def vqDockWidget(self, widget, area=QtCore.Qt.TopDockWidgetArea, floating=False):
d = VQDockWidget(self)
d.setWidget(widget)
d.setFloating(floating)
self.addDockWidget(area, d)
self._vq_dockwidgets.append(d)
self.restoreDockWidget(d)
d.show()
return d
``` |
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LI... | [
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache... | ```python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handlers for placement API.
Individual handlers are associated with URL paths in the
ROUTE_DECLARATIONS dictionary. At the top level each key is a Routes
compliant path. The value of that key is a dictionary mapping
individual HTTP request methods to a Python function representing a
simple WSGI application for satisfying that request.
The ``make_map`` method processes ROUTE_DECLARATIONS to create a
Routes.Mapper, including automatic handlers to respond with a
405 when a request is made against a valid URL with an invalid
method.
"""
import routes
import webob
from oslo_log import log as logging
from nova.api.openstack.placement import exception
from nova.api.openstack.placement.handlers import aggregate
from nova.api.openstack.placement.handlers import allocation
from nova.api.openstack.placement.handlers import allocation_candidate
from nova.api.openstack.placement.handlers import inventory
from nova.api.openstack.placement.handlers import resource_class
from nova.api.openstack.placement.handlers import resource_provider
from nova.api.openstack.placement.handlers import root
from nova.api.openstack.placement.handlers import trait
from nova.api.openstack.placement.handlers import usage
from nova.api.openstack.placement import policy
from nova.api.openstack.placement import util
from nova.i18n import _
LOG = logging.getLogger(__name__)
# URLs and Handlers
# NOTE(cdent): When adding URLs here, do not use regex patterns in
# the path parameters (e.g. {uuid:[0-9a-zA-Z-]+}) as that will lead
# to 404s that are controlled outside of the individual resources
# and thus do not include specific information on the why of the 404.
ROUTE_DECLARATIONS = {
'/': {
'GET': root.home,
},
# NOTE(cdent): This allows '/placement/' and '/placement' to
# both work as the root of the service, which we probably want
# for those situations where the service is mounted under a
# prefix (as it is in devstack). While weird, an empty string is
# a legit key in a dictionary and matches as desired in Routes.
'': {
'GET': root.home,
},
'/resource_classes': {
'GET': resource_class.list_resource_classes,
'POST': resource_class.create_resource_class
},
'/resource_classes/{name}': {
'GET': resource_class.get_resource_class,
'PUT': resource_class.update_resource_class,
'DELETE': resource_class.delete_resource_class,
},
'/resource_providers': {
'GET': resource_provider.list_resource_providers,
'POST': resource_provider.create_resource_provider
},
'/resource_providers/{uuid}': {
'GET': resource_provider.get_resource_provider,
'DELETE': resource_provider.delete_resource_provider,
'PUT': resource_provider.update_resource_provider
},
'/resource_providers/{uuid}/inventories': {
'GET': inventory.get_inventories,
'POST': inventory.create_inventory,
'PUT': inventory.set_inventories,
'DELETE': inventory.delete_inventories
},
'/resource_providers/{uuid}/inventories/{resource_class}': {
'GET': inventory.get_inventory,
'PUT': inventory.update_inventory,
'DELETE': inventory.delete_inventory
},
'/resource_providers/{uuid}/usages': {
'GET': usage.list_usages
},
'/resource_providers/{uuid}/aggregates': {
'GET': aggregate.get_aggregates,
'PUT': aggregate.set_aggregates
},
'/resource_providers/{uuid}/allocations': {
'GET': allocation.list_for_resource_provider,
},
'/allocations': {
'POST': allocation.set_allocations,
},
'/allocations/{consumer_uuid}': {
'GET': allocation.list_for_consumer,
'PUT': allocation.set_allocations_for_consumer,
'DELETE': allocation.delete_allocations,
},
'/allocation_candidates': {
'GET': allocation_candidate.list_allocation_candidates,
},
'/traits': {
'GET': trait.list_traits,
},
'/traits/{name}': {
'GET': trait.get_trait,
'PUT': trait.put_trait,
'DELETE': trait.delete_trait,
},
'/resource_providers/{uuid}/traits': {
'GET': trait.list_traits_for_resource_provider,
'PUT': trait.update_traits_for_resource_provider,
'DELETE': trait.delete_traits_for_resource_provider
},
'/usages': {
'GET': usage.get_total_usages,
},
}
def dispatch(environ, start_response, mapper):
"""Find a matching route for the current request.
If no match is found, raise a 404 response.
If there is a matching route, but no matching handler
for the given method, raise a 405.
"""
result = mapper.match(environ=environ)
if result is None:
raise webob.exc.HTTPNotFound(
json_formatter=util.json_error_formatter)
# We can't reach this code without action being present.
handler = result.pop('action')
environ['wsgiorg.routing_args'] = ((), result)
return handler(environ, start_response)
def handle_405(environ, start_response):
"""Return a 405 response when method is not allowed.
If _methods are in routing_args, send an allow header listing
the methods that are possible on the provided URL.
"""
_methods = util.wsgi_path_item(environ, '_methods')
headers = {}
if _methods:
# Ensure allow header is a python 2 or 3 native string (thus
# not unicode in python 2 but stay a string in python 3)
# In the process done by Routes to save the allowed methods
# to its routing table they become unicode in py2.
headers['allow'] = str(_methods)
# Use Exception class as WSGI Application. We don't want to raise here.
response = webob.exc.HTTPMethodNotAllowed(
_('The method specified is not allowed for this resource.'),
headers=headers, json_formatter=util.json_error_formatter)
return response(environ, start_response)
def make_map(declarations):
"""Process route declarations to create a Route Mapper."""
mapper = routes.Mapper()
for route, targets in declarations.items():
allowed_methods = []
for method in targets:
mapper.connect(route, action=targets[method],
conditions=dict(method=[method]))
allowed_methods.append(method)
allowed_methods = ', '.join(allowed_methods)
mapper.connect(route, action=handle_405, _methods=allowed_methods)
return mapper
class PlacementHandler(object):
"""Serve Placement API.
Dispatch to handlers defined in ROUTE_DECLARATIONS.
"""
def __init__(self, **local_config):
# NOTE(cdent): Local config currently unused.
self._map = make_map(ROUTE_DECLARATIONS)
def __call__(self, environ, start_response):
# All requests but '/' require admin.
if environ['PATH_INFO'] != '/':
context = environ['placement.context']
# TODO(cdent): Using is_admin everywhere (except /) is
# insufficiently flexible for future use case but is
# convenient for initial exploration.
if not policy.placement_authorize(context, 'placement'):
raise webob.exc.HTTPForbidden(
_('admin required'),
json_formatter=util.json_error_formatter)
# Check that an incoming request with a content-length header
# that is an integer > 0 and not empty, also has a content-type
# header that is not empty. If not raise a 400.
clen = environ.get('CONTENT_LENGTH')
try:
if clen and (int(clen) > 0) and not environ.get('CONTENT_TYPE'):
raise webob.exc.HTTPBadRequest(
_('content-type header required when content-length > 0'),
json_formatter=util.json_error_formatter)
except ValueError as exc:
raise webob.exc.HTTPBadRequest(
_('content-length header must be an integer'),
json_formatter=util.json_error_formatter)
try:
return dispatch(environ, start_response, self._map)
# Trap the NotFound exceptions raised by the objects used
# with the API and transform them into webob.exc.HTTPNotFound.
except exception.NotFound as exc:
raise webob.exc.HTTPNotFound(
exc, json_formatter=util.json_error_formatter)
# Remaining uncaught exceptions will rise first to the Microversion
# middleware, where any WebOb generated exceptions will be caught and
# transformed into legit HTTP error responses (with microversion
# headers added), and then to the FaultWrapper middleware which will
# catch anything else and transform them into 500 responses.
# NOTE(cdent): There should be very few uncaught exceptions which are
# not WebOb exceptions at this stage as the handlers are contained by
# the wsgify decorator which will transform those exceptions to
# responses itself.
``` |
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\n\"\"\"\nTask performed by user in django view\n\"\"\"\nimport functools\n\nfrom django.core.urlresolvers import reverse\nfrom django.conf.urls import url\nfrom django.shortcuts import get_object_or_404\n\nfrom ..activation import Activ... | [
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\n\"\"\"\nTask performed by user in django view\n\"\"\"\nimport functools\n\nfrom django.core.urlresolvers import reverse\nfrom django.conf.urls import url\nfrom django.shortcuts import get_object_or_404\n\nfrom ..activat... | ```python
"""
Task performed by user in django view
"""
import functools
from django.core.urlresolvers import reverse
from django.conf.urls import url
from django.shortcuts import get_object_or_404
from ..activation import Activation, ViewActivation, STATUS
from ..exceptions import FlowRuntimeError
from . import base
def flow_view(**lock_args):
"""
Decorator that locks and runs the flow view in transaction.
Expects view with the signature `(request, activation, **kwargs)`
or CBV view that implements TaskActivation, in this case, dispatch
with would be called with `(request, **kwargs)`
Returns `(request, flow_task, process_pk, task_pk, **kwargs)`
"""
class flow_view_decorator(object):
def __init__(self, func, activation=None):
self.func = func
self.activation = activation
functools.update_wrapper(self, func)
def __call__(self, request, flow_cls, flow_task, process_pk, task_pk, **kwargs):
lock = flow_task.flow_cls.lock_impl(flow_task.flow_cls.instance, **lock_args)
with lock(flow_task.flow_cls, process_pk):
task = get_object_or_404(flow_task.flow_cls.task_cls._default_manager, pk=task_pk)
if self.activation:
"""
Class-based view that implements TaskActivation interface
"""
self.activation.initialize(flow_task, task)
return self.func(request, **kwargs)
else:
"""
Function based view or CBV without TaskActvation interface implementation
"""
activation = flow_task.activation_cls()
activation.initialize(flow_task, task)
return self.func(request, activation, **kwargs)
def __get__(self, instance, instancetype):
"""
If we decorate method on CBV that implements StartActivation interface,
no custom activation is required.
"""
if instance is None:
return self
func = self.func.__get__(instance, type)
activation = instance if isinstance(instance, ViewActivation) else None
return self.__class__(func, activation=activation)
return flow_view_decorator
class ManagedViewActivation(ViewActivation):
"""
Tracks task statistics in activation form
"""
management_form_cls = None
def __init__(self, **kwargs):
super(ManagedViewActivation, self).__init__(**kwargs)
self.management_form = None
self.management_form_cls = kwargs.pop('management_form_cls', None)
def get_management_form_cls(self):
if self.management_form_cls:
return self.management_form_cls
else:
return self.flow_cls.management_form_cls
@Activation.status.super()
def prepare(self, data=None, user=None):
super(ManagedViewActivation, self).prepare.original()
if user:
self.task.owner = user
management_form_cls = self.get_management_form_cls()
self.management_form = management_form_cls(data=data, instance=self.task)
if data:
if not self.management_form.is_valid():
raise FlowRuntimeError('Activation metadata is broken {}'.format(self.management_form.errors))
self.task = self.management_form.save(commit=False)
def has_perm(self, user):
return self.flow_task.can_execute(user, self.task)
@classmethod
def create_task(cls, flow_task, prev_activation, token):
task = ViewActivation.create_task(flow_task, prev_activation, token)
# Try to assign permission
owner_permission = flow_task.calc_owner_permission(task)
if owner_permission:
task.owner_permission = owner_permission
# Try to assign owner
owner = flow_task.calc_owner(task)
if owner:
task.owner = owner
task.status = STATUS.ASSIGNED
return task
class BaseView(base.TaskDescriptionViewMixin,
base.NextNodeMixin,
base.Task,
base.ViewArgsMixin):
"""
Base class for ViewTasks
"""
task_type = 'HUMAN'
activation_cls = ManagedViewActivation
def __init__(self, view_or_cls, **kwargs):
"""
Accepts view callable or CBV View class with view kwargs,
if CBV view implements ViewActivation, it used as activation_cls
"""
self._view, self._view_cls, self._view_args = None, None, None
if isinstance(view_or_cls, type):
self._view_cls = view_or_cls
if issubclass(view_or_cls, ViewActivation):
kwargs.setdefault('activation_cls', view_or_cls)
else:
self._view = view_or_cls
super(BaseView, self).__init__(view_or_cls=view_or_cls, **kwargs)
@property
def view(self):
if not self._view:
self._view = self._view_cls.as_view(**self._view_args)
return self._view
def urls(self):
return [url(r'^(?P<process_pk>\d+)/{}/(?P<task_pk>\d+)/$'.format(self.name),
self.view, {'flow_task': self}, name=self.name)]
class View(base.PermissionMixin,
base.UndoViewMixin,
base.CancelViewMixin,
base.DetailsViewMixin,
base.ActivateNextMixin,
BaseView):
"""
View task
Example::
task = flow.View(some_view) \\
.Permission('my_app.can_do_task') \\
.Next(this.next_task)
In case of function based view::
task = flow.Task(task)
@flow_start_view()
def task(request, activation):
if not activation.flow_task.has_perm(request.user):
raise PermissionDenied
activation.prepare(request.POST or None)
form = SomeForm(request.POST or None)
if form.is_valid():
form.save()
activation.done()
return redirect('/')
return render(request, {'activation': activation, 'form': form})
Ensure to include `{{ activation.management_form }}` inside template, to proper
track when task was started and other task performance statistics::
<form method="POST">
{{ form }}
{{ activation.management_form }}
<button type="submit"/>
</form>
"""
def __init__(self, *args, **kwargs):
self._assign_view = kwargs.pop('assign_view', None)
super(View, self).__init__(*args, **kwargs)
def Assign(self, owner=None, **owner_kwargs):
"""
Assign task to the User immediately on activation,
accepts user lookup kwargs or callable :: Process -> User::
.Assign(username='employee')
.Assign(lambda process: process.created_by)
"""
if owner:
self._owner = owner
else:
self._owner = owner_kwargs
return self
@property
def assign_view(self):
from viewflow.views import AssignView
return self._assign_view if self._assign_view else AssignView.as_view()
def urls(self):
urls = super(View, self).urls()
urls.append(url(r'^(?P<process_pk>\d+)/{}/(?P<task_pk>\d+)/assign/$'.format(self.name),
self.assign_view, {'flow_task': self}, name="{}__assign".format(self.name)))
return urls
def get_task_url(self, task, url_type, **kwargs):
user = kwargs.get('user', None)
# assign
if url_type in ['assign', 'guess']:
if task.status == STATUS.NEW and self.can_assign(user, task):
url_name = '{}:{}__assign'.format(self.flow_cls.instance.namespace, self.name)
return reverse(url_name, kwargs={'process_pk': task.process_id, 'task_pk': task.pk})
# execute
if url_type in ['execute', 'guess']:
if task.status == STATUS.ASSIGNED and self.can_execute(user, task):
url_name = '{}:{}'.format(self.flow_cls.instance.namespace, self.name)
return reverse(url_name, kwargs={'process_pk': task.process_id, 'task_pk': task.pk})
return super(View, self).get_task_url(task, url_type, **kwargs)
def calc_owner(self, task):
from django.contrib.auth import get_user_model
owner = self._owner
if callable(owner):
owner = owner(task.process)
elif isinstance(owner, dict):
owner = get_user_model() ._default_manager.get(**owner)
return owner
def calc_owner_permission(self, task):
owner_permission = self._owner_permission
if callable(owner_permission):
owner_permission = owner_permission(task.process)
return owner_permission
def can_assign(self, user, task):
if task.owner_id:
return False
if user.is_anonymous():
return False
if not task.owner_permission:
"""
Available for everyone
"""
return True
obj = None
if self._owner_permission_obj:
if callable(self._owner_permission_obj):
obj = self._owner_permission_obj(task.process)
else:
obj = self._owner_permission_obj
return user.has_perm(task.owner_permission, obj=obj)
def can_execute(self, user, task):
if task.owner_permission is None and task.owner is None:
return True
return task.owner == user
``` |
[
{
"content": "Here is the code content:\n```python\n#####################################################################\r\n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#\r\n#\tFile: qbo.py\t\t\t\t\t\t\t\t\t\t\t\t\t#\r\n#\tDeveloper: Justin Leto\t\t\t\t\t\t\t\t\t\t\t#\r\n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#\r\n#\tqbo cl... | [
{
"content": "Here is the code content:\n<|memory_start|>```python\n#####################################################################\r\n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#\r\n#\tFile: qbo.py\t\t\t\t\t\t\t\t\t\t\t\t\t#\r\n#\tDeveloper: Justin Leto\t\t\t\t\t\t\t\t\t\t\t#\r\n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t... | ```python
#####################################################################
# #
# File: qbo.py #
# Developer: Justin Leto #
# #
# qbo class provides an interface from main csv iterator method #
# to handle qbo formatting, validations, and writing to file. #
# #
# Usage: python csvtoqbo.py <options> <csvfiles> #
# #
#####################################################################
import sys, traceback
import os
from datetime import datetime
import logging
import qboconst
class qbo:
# Holds a list of valid transactions via the addTransaction() method
__transactions = list()
# The full QBO document build from constants and transactions
__document = None
# Flag indicating whether the QBO document is valid
__isValid = None
# constructor
def __init__(self):
# Reads in constant values from file, set to private (const) variables
self.__HEADER = qboconst.HEADER
self.__FOOTER = qboconst.FOOTER
self.__DATE_START = qboconst.DATE_START
self.__DATE_END = qboconst.DATE_END
self.__BANKTRANLIST_START = qboconst.BANKTRANLIST_START
self.__BANKTRANLIST_END = qboconst.BANKTRANLIST_END
self.__TRANSACTION_START = qboconst.TRANSACTION_START
self.__TRANSACTION_END = qboconst.TRANSACTION_END
# Set document to valid
self.__isValid = True
# PUBLIC GET METHODS for constant values - used in unit testing.
#
#
def getHEADER(self):
return self.__HEADER
def getFOOTER(self):
return self.__FOOTER
def getDATE_START(self):
return self.__DATE_START
def getDATE_END(self):
return self.__DATE_END
def getBANKTRANLIST_START(self):
return self.__BANKTRANLIST_START
def getBANKTRANLIST_END(self):
return self.__BANKTRANLIST_END
def getTRANSACTION_START(self):
return self.__TRANSACTION_START
def getTRANSACTION_END(self):
return self.__TRANSACTION_END
# method to validate paramters used to submit transactions
def validateTransaction(self, status, date_posted, txn_type, to_from_flag, txn_amount, txn_exrate, name):
# if str.lower(status) != 'completed':
# #log status failure
# logging.info("Transaction status [" + status + "] invalid.")
# raise Exception("Transaction status [" + status + "] invalid.")
#
#if type(datetime.strptime(str(date_posted), '%m/%d/%Y')) is not datetime:
# logging.info("Transaction posted date [" + date_posted + "] invalid.")
# raise Exception("Transaction posted date [" + date_posted + "] invalid.")
# if str.lower(txn_type) not in ('payment','refund','withdrawal', 'withdraw funds', 'send', 'receive'):
# logging.info("Transaction type [" + str(txn_type) + "] not 'Payment', 'Refund', 'Withdraw Funds', or 'Withdrawal'.")
# raise Exception("Transaction type [" + str(txn_type) + "] not 'Payment', 'Refund', 'Withdraw Funds', or 'Withdrawal'.")
#
# if str.lower(to_from_flag) not in ('to', 'from'):
# logging.info("Transaction 'To/From' field [" + to_from_flag + "] invalid.")
# raise Exception("Transaction 'To/From' field [" + to_from_flag + "] invalid.")
#
# #logical test of txn_type and to_from_flag
# if ((str.lower(txn_type) == 'refund' and str.lower(to_from_flag) != 'to') or (str.lower(txn_type) == 'payment' and str.lower(to_from_flag) != 'from')):
# logging.info("Transaction type inconsistent with 'To/From' field.")
# raise Exception("Transaction type inconsistent with 'To/From' field.")
#
if len(name) == 0 or not name:
logging.info("Transaction name empty or null.")
raise Exception("Transaction name empty or null.")
return True
# Add transaction takes in param values uses the required formatting QBO transactions
# and pushes to list
def addTransaction(self, denom, date_posted, txn_memo, txn_id, txn_amount, txn_curamt, txn_category, name):
# try:
# # Validating param values prior to committing transaction
# self.validateTransaction(status, date_posted, txn_type, txn_id, txn_amount, name)
# except:
# raise Exception
# Construct QBO formatted transaction
transaction = ""
day = ""
month = ""
date_array = date_posted.split('-')
day = date_array[2]
month = date_array[1]
year = date_array[0]
if len(day) == 1:
day = "0"+day
if len(month) ==1:
month = "0"+month
rec_date = datetime.strptime(year+"/"+month+"/"+day, '%Y/%m/%d')
rec_date = rec_date.strftime('%Y%m%d%H%M%S') + '.000'
dtposted = ' <DTPOSTED>' + rec_date
if float(txn_amount) > 0:
trtype = ' <TRNTYPE>CREDIT'
else:
trtype = ' <TRNTYPE>DEBIT'
#
# if str.lower(txn_type) == 'receive':
# trtype = '<TRNTYPE>CREDIT'
# elif str.lower(txn_type) == 'send':
# trtype = '<TRNTYPE>DEBIT'
# if str.lower(txn_type) in ('refund', 'withdrawal', 'withdraw funds'):
# tramt = '<TRNAMT>-' + str(txn_amount).replace('$','')
# else:
# tramt = '<TRNAMT>' + str(txn_amount).replace('$','')
tramtbits = float(txn_amount) * denom
tramt = ' <TRNAMT>' + str(tramtbits)
if name:
trname = ' <NAME>' + str(name) + "\n"
else:
trname = ''
exrate = float(txn_curamt) / (tramtbits)
curamt = "{0:0.2f}".format(abs(float(txn_curamt)))
fmtexrate = "{0:0.6f}".format(float(exrate))
rawmemo = 'Rate=' + fmtexrate + " USD=" + curamt + " category=\"" + str(txn_category) + "\" memo=\"" + str(txn_memo)
memo = ' <MEMO>' + rawmemo[:253] + "\"\n"
fitid = ' <FITID>' + str(txn_id)
exrate = ' <CURRATE>' + fmtexrate
transaction = ("" + self.__TRANSACTION_START + "\n"
"" + trtype + "\n"
"" + dtposted + "\n"
"" + tramt + "\n"
"" + fitid + "\n"
"" + trname +
"" + memo +
"" + " <CURRENCY>" + "\n"
"" + exrate + "\n"
"" + " <CURSYM>USD" + "\n"
"" + " </CURRENCY>" + "\n"
"" + self.__TRANSACTION_END + "\n")
# Commit transaction to the document by adding to private member list object
self.__transactions.append(transaction)
logging.info("Transaction [" + str(self.getCount()) + "] Accepted.")
return True
# get the current number of valid committed transactions
def getCount(self):
return len(self.__transactions)
# get the valid status of the document
def isValid(self):
# If number of valid transactions are 0 document is invalid
if self.getCount() == 0:
self.__isValid = False
return self.__isValid
# get the text of the document
def getDocument(self):
self.Build()
return self.__document
# Construct the document, add the transactions
# save str into private member variable __document
def Build(self):
if not self.isValid():
logging.info("Error: QBO document is not valid.")
raise Exception("Error: QBO document is not valid.")
self.__document = ("" + self.__HEADER + "\n"
"" + self.__BANKTRANLIST_START + "\n"
"" + self.__DATE_START + "\n"
"" + self.__DATE_END + "\n")
for txn in self.__transactions:
self.__document = self.__document + str(txn)
self.__document = self.__document + ("" + self.__BANKTRANLIST_END + "\n"
"" + self.__FOOTER + "")
# Write QBO document to file
def Write(self, filename):
try:
with open(filename, 'w') as f:
# getDocument method will build document
# test for validity and return string for write
f.write(self.getDocument())
return True
except:
#log io error return False
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
print(''.join('!! ' + line for line in lines))
logging.info('qbo.Write() method: '.join('!! ' + line for line in lines))
return False
``` |
[
{
"content": "```python\nimport numpy as np\ntry:\n import netCDF4 as netCDF\nexcept:\n import netCDF3 as netCDF\nimport pyroms\n\n\ndef remap(src_array, remap_file, src_grad1=None, src_grad2=None, \\\n src_grad3=None, spval=1e37, verbose=False):\n '''\n remap based on addresses and weig... | [
{
"content": "<|memory_start|>```python\nimport numpy as np\ntry:\n import netCDF4 as netCDF\nexcept:\n import netCDF3 as netCDF\nimport pyroms\n\n\ndef remap(src_array, remap_file, src_grad1=None, src_grad2=None, \\\n src_grad3=None, spval=1e37, verbose=False):\n '''\n remap based on ad... | ```python
import numpy as np
try:
import netCDF4 as netCDF
except:
import netCDF3 as netCDF
import pyroms
def remap(src_array, remap_file, src_grad1=None, src_grad2=None, \
src_grad3=None, spval=1e37, verbose=False):
'''
remap based on addresses and weights computed in a setup phase
'''
# get info from remap_file
data = netCDF.Dataset(remap_file, 'r')
title = data.title
map_method = data.map_method
normalization = data.normalization
src_grid_name = data.source_grid
dst_grid_name = data.dest_grid
src_grid_size = len(data.dimensions['src_grid_size'])
dst_grid_size = len(data.dimensions['dst_grid_size'])
num_links = len(data.dimensions['num_links'])
src_grid_dims = data.variables['src_grid_dims']
dst_grid_dims = data.variables['dst_grid_dims']
# get weights and addresses from remap_file
map_wts = data.variables['remap_matrix'][:]
dst_add = data.variables['dst_address'][:]
src_add = data.variables['src_address'][:]
# get destination mask
dst_mask = data.variables['dst_grid_imask'][:]
# remap from src grid to dst grid
if src_grad1 is not None:
iorder = 2
else:
iorder = 1
if verbose is True:
print 'Reading remapping: ', title
print 'From file: ', remap_file
print ' '
print 'Remapping between:'
print src_grid_name
print 'and'
print dst_grid_name
print 'Remapping method: ', map_method
ndim = len(src_array.squeeze().shape)
if (ndim == 2):
tmp_dst_array = np.zeros((dst_grid_size))
tmp_src_array = src_array.flatten()
if iorder == 1:
# first order remapping
# insure that map_wts is a (num_links,4) array
tmp_map_wts = np.zeros((num_links,4))
tmp_map_wts[:,0] = map_wts[:,0].copy()
map_wts = tmp_map_wts
pyroms.remapping.scrip.remap(tmp_dst_array, map_wts, \
dst_add, src_add, tmp_src_array)
if iorder == 2:
# second order remapping
if map_method == 'conservative':
# insure that map_wts is a (num_links,4) array
tmp_map_wts = np.zeros((num_links,4))
tmp_map_wts[:,0:2] = map_wts[:,0:2].copy()
map_wts = tmp_map_wts
tmp_src_grad1 = src_grad1.flatten()
tmp_src_grad2 = src_grad2.flatten()
pyroms.remapping.scrip.remap(tmp_dst_array, map_wts, \
dst_add, src_add, tmp_src_array, \
tmp_src_grad1, tmp_src_grad2)
elif map_method == 'bicubic':
tmp_src_grad1 = src_grad1.flatten()
tmp_src_grad2 = src_grad2.flatten()
tmp_src_grad3 = src_grad3.flatten()
pyroms.remapping.scrip.remap(tmp_dst_array, map_wts, \
dst_add, src_add, tmp_src_array, \
tmp_src_grad1, tmp_src_grad2, \
tmp_src_grad3)
else:
raise ValueError, 'Unknow method'
# mask dst_array
idx = np.where(dst_mask == 0)
tmp_dst_array[idx] = spval
tmp_dst_array = np.ma.masked_values(tmp_dst_array, spval)
# reshape
dst_array = np.reshape(tmp_dst_array, (dst_grid_dims[1], \
dst_grid_dims[0]))
elif (ndim == 3):
nlev = src_array.shape[0]
dst_array = np.zeros((nlev, dst_grid_dims[1], dst_grid_dims[0]))
# loop over vertical level
for k in range(nlev):
tmp_src_array = src_array[k,:,:].flatten()
tmp_dst_array = np.zeros((dst_grid_size))
if iorder == 1:
# first order remapping
# insure that map_wts is a (num_links,4) array
tmp_map_wts = np.zeros((num_links,4))
tmp_map_wts[:,0] = map_wts[:,0].copy()
map_wts = tmp_map_wts
pyroms.remapping.scrip.remap(tmp_dst_array, map_wts, \
dst_add, src_add, tmp_src_array)
if iorder == 2:
# second order remapping
if map_method == 'conservative':
tmp_src_grad1 = src_grad1.flatten()
tmp_src_grad2 = src_grad2.flatten()
pyroms.remapping.scrip.remap(tmp_dst_array, map_wts, \
dst_add, src_add, tmp_src_array, \
tmp_src_grad1, tmp_src_grad2)
elif map_method == 'bicubic':
tmp_src_grad1 = src_grad1.flatten()
tmp_src_grad2 = src_grad2.flatten()
tmp_src_grad3 = src_grad3.flatten()
pyroms.remapping.scrip.remap(tmp_dst_array, map_wts, \
dst_add, src_add, tmp_src_array, \
tmp_src_grad1, tmp_src_grad2, \
tmp_src_grad3)
else:
raise ValueError, 'Unknow method'
# mask dst_array
idx = np.where(dst_mask == 0)
tmp_dst_array[idx] = spval
tmp_dst_array = np.ma.masked_values(tmp_dst_array, spval)
# reshape
dst_array[k,:,:] = np.reshape(tmp_dst_array, (dst_grid_dims[1], \
dst_grid_dims[0]))
else:
raise ValueError, 'src_array must have two or three dimensions'
# close data file
data.close()
return dst_array
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\n# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\n# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy o... | ```python
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pose representation training with TFRecord inputs."""
from absl import app
from absl import flags
import tensorflow as tf
from poem.core import common
from poem.core import input_generator
from poem.core import keypoint_profiles
from poem.core import tfe_input_layer
from poem.cv_mim import train_base
FLAGS = flags.FLAGS
flags.adopt_module_key_flags(train_base)
def main(_):
train_base.run(
input_dataset_class=tf.data.TFRecordDataset,
common_module=common,
keypoint_profiles_module=keypoint_profiles,
input_example_parser_creator=tfe_input_layer.create_tfe_parser,
keypoint_preprocessor_3d=input_generator.preprocess_keypoints_3d)
if __name__ == '__main__':
app.run(main)
``` |
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\n#!/usr/bin/python3\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n... | [
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\n#!/usr/bin/python3\n# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the... | ```python
#!/usr/bin/python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Convert the Natural Questions dataset into SQuAD JSON format.
To use this utility, first follow the directions at the URL below to download
the complete training dataset.
https://ai.google.com/research/NaturalQuestions/download
Next, run this program, specifying the data you wish to convert. For instance,
the invocation:
python nq_to_squad.py\
--data_pattern=/usr/local/data/tnq/v1.0/train/*.gz\
--output_file=/usr/local/data/tnq/v1.0/train.json
will process all training data and write the results into `train.json`. This
file can, in turn, be provided to squad_eval.py using the --squad argument.
"""
import argparse
import glob
import gzip
import json
import logging
import os
import re
def clean_text(start_token, end_token, doc_tokens, doc_bytes,
ignore_final_whitespace=True):
"""Remove HTML tags from a text span and reconstruct proper spacing."""
text = ""
for index in range(start_token, end_token):
token = doc_tokens[index]
if token["html_token"]:
continue
text += token["token"]
# Add a single space between two tokens iff there is at least one
# whitespace character between them (outside of an HTML tag). For example:
#
# token1 token2 ==> Add space.
# token1</B> <B>token2 ==> Add space.
# token1</A>token2 ==> No space.
# token1<A href="..." title="...">token2 ==> No space.
# token1<SUP>2</SUP>token2 ==> No space.
next_token = token
last_index = end_token if ignore_final_whitespace else end_token + 1
for next_token in doc_tokens[index + 1:last_index]:
if not next_token["html_token"]:
break
chars = (doc_bytes[token["end_byte"]:next_token["start_byte"]]
.decode("utf-8"))
# Since some HTML tags are missing from the token list, we count '<' and
# '>' to detect if we're inside a tag.
unclosed_brackets = 0
for char in chars:
if char == "<":
unclosed_brackets += 1
elif char == ">":
unclosed_brackets -= 1
elif unclosed_brackets == 0 and re.match(r"\s", char):
# Add a single space after this token.
text += " "
break
return text
def nq_to_squad(record):
"""Convert a Natural Questions record to SQuAD format."""
doc_bytes = record["document_html"].encode("utf-8")
doc_tokens = record["document_tokens"]
# NQ training data has one annotation per JSON record.
annotation = record["annotations"][0]
short_answers = annotation["short_answers"]
# Skip examples that don't have exactly one short answer.
# Note: Consider including multi-span short answers.
if len(short_answers) != 1:
return
short_answer = short_answers[0]
long_answer = annotation["long_answer"]
# Skip examples where annotator found no long answer.
if long_answer["start_token"] == -1:
return
# Skip examples corresponding to HTML blocks other than <P>.
long_answer_html_tag = doc_tokens[long_answer["start_token"]]["token"]
if long_answer_html_tag != "<P>":
return
paragraph = clean_text(
long_answer["start_token"], long_answer["end_token"], doc_tokens,
doc_bytes)
answer = clean_text(
short_answer["start_token"], short_answer["end_token"], doc_tokens,
doc_bytes)
before_answer = clean_text(
long_answer["start_token"], short_answer["start_token"], doc_tokens,
doc_bytes, ignore_final_whitespace=False)
return {"title": record["document_title"],
"paragraphs":
[{"context": paragraph,
"qas": [{"answers": [{"answer_start": len(before_answer),
"text": answer}],
"id": record["example_id"],
"question": record["question_text"]}]}]}
def main():
parser = argparse.ArgumentParser(
description="Convert the Natural Questions to SQuAD JSON format.")
parser.add_argument("--data_pattern", dest="data_pattern",
help=("A file pattern to match the Natural Questions "
"dataset."),
metavar="PATTERN", required=True)
parser.add_argument("--version", dest="version",
help="The version label in the output file.",
metavar="LABEL", default="nq-train")
parser.add_argument("--output_file", dest="output_file",
help="The name of the SQuAD JSON formatted output file.",
metavar="FILE", default="nq_as_squad.json")
args = parser.parse_args()
root = logging.getLogger()
root.setLevel(logging.DEBUG)
records = 0
nq_as_squad = {"version": args.version, "data": []}
for file in sorted(glob.iglob(args.data_pattern)):
logging.info("opening %s", file)
with gzip.GzipFile(file, "r") as f:
for line in f:
records += 1
nq_record = json.loads(line)
squad_record = nq_to_squad(nq_record)
if squad_record:
nq_as_squad["data"].append(squad_record)
if records % 1000 == 0:
logging.info("processed %s records", records)
print("Converted %s NQ records into %s SQuAD records." %
(records, len(nq_as_squad["data"])))
with open(args.output_file, "w") as f:
json.dump(nq_as_squad, f)
if __name__ == "__main__":
main()
``` |
[
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n```python\n# coding: utf-8\n# This file is part of Thomas Aquinas.\n#\n# Thomas Aquinas is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the ... | [
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n<|memory_start|>```python\n# coding: utf-8\n# This file is part of Thomas Aquinas.\n#\n# Thomas Aquinas is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as publ... | ```python
# coding: utf-8
# This file is part of Thomas Aquinas.
#
# Thomas Aquinas is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Thomas Aquinas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Thomas Aquinas. If not, see <http://www.gnu.org/licenses/>.
#
# veni, Sancte Spiritus.
from summa import summanode
from nose.tools import ok_, eq_, raises
def test_summanode_add():
summadad = summanode.SummaNode()
summachild = summanode.SummaNode()
summadad.add(summachild, name="summachild")
ok_(summadad.get_children())
def test_summanode_get():
summadad = summanode.SummaNode()
summachild = summanode.SummaNode()
summadad.add(summachild, name="summachild")
summaget = summadad.get("summachild")
ok_(isinstance(summaget, summanode.SummaNode), ("El objeto retornado no es "
"instancia de SummaNode"))
def test_summanode_do():
from summa.actions.interval_actions import Jump
summachild = summanode.SummaNode()
saltar = Jump(50, 200, 5, 10)
summachild.do(saltar)
def test_summanode_remove():
summadad = summanode.SummaNode()
summachilda = summanode.SummaNode()
summachildb = summanode.SummaNode()
summadad.add(summachilda, name="summachilda")
summadad.add(summachildb)
# remove by name
summadad.remove(summachilda)
# remove by object
summadad.remove(summachildb)
def test_summanode_stop():
from summa.actions.interval_actions import Jump
summadad = summanode.SummaNode()
saltar = Jump(50, 200, 5, 10)
summadad.do(saltar)
summadad.stop()
``` |
[
{
"content": "```python\n\"\"\"\n\n\"\"\"\nfrom io import BytesIO\nfrom itertools import count\nimport tarfile\nfrom time import time, sleep\n\nfrom click import progressbar\nfrom logbook import Logger\nimport pandas as pd\nimport requests\nfrom six.moves.urllib.parse import urlencode\nfrom boto import connect_... | [
{
"content": "<|memory_start|>```python\n\"\"\"\n\n\"\"\"\nfrom io import BytesIO\nfrom itertools import count\nimport tarfile\nfrom time import time, sleep\n\nfrom click import progressbar\nfrom logbook import Logger\nimport pandas as pd\nimport requests\nfrom six.moves.urllib.parse import urlencode\nfrom boto... | ```python
"""
"""
from io import BytesIO
from itertools import count
import tarfile
from time import time, sleep
from click import progressbar
from logbook import Logger
import pandas as pd
import requests
from six.moves.urllib.parse import urlencode
from boto import connect_s3
import tarfile
from . import core as bundles
from zipline.utils.cli import maybe_show_progress
log = Logger(__name__)
seconds_per_call = (pd.Timedelta('10 minutes') / 2000).total_seconds()
# Invalid symbols that quandl has had in its metadata:
excluded_symbols = frozenset({'TEST123456789'})
ACCESS_KEY = 'AKIAJHRHBH4FEBEGWPWA'
SECRET_KEY = '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'
BUCKET_NAME = 'zipline-store'
@bundles.register('quandl-xjpx', create_writers=False)
def quandl_xjpx_bundle(environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir):
conn = connect_s3(ACCESS_KEY, SECRET_KEY)
bucket = conn.get_bucket(BUCKET_NAME)
file_key = bucket.get_key('quandl-xjpx.tar')
bs = BytesIO()
file_key.get_contents_to_file(bs)
bs.seek(0)
with tarfile.open('r', fileobj=bs) as tar:
tar.extractall(output_dir)
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\n#!/usr/bin/env python\n\nimport requests\nimport json\nimport os\nfrom fabric.colors import red, green, blue, cyan, magenta\nfrom time import sleep\n\napi_key = 'be25c8a17fdcd031f54dd3417ee4d083'\napi_base_url = 'http:... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\n#!/usr/bin/env python\n\nimport requests\nimport json\nimport os\nfrom fabric.colors import red, green, blue, cyan, magenta\nfrom time import sleep\n\napi_key = 'be25c8a17fdcd031f54dd3417ee4d083'\napi_b... | ```python
#!/usr/bin/env python
import requests
import json
import os
from fabric.colors import red, green, blue, cyan, magenta
from time import sleep
api_key = 'be25c8a17fdcd031f54dd3417ee4d083'
api_base_url = 'http://api.themoviedb.org/3'
def pretty(data):
print ""
print json.dumps(data, sort_keys=True, indent=2)
print ""
def keyval(key, val):
print(cyan(key + ': ') + green(val))
def pretty_keys(source, keys):
for key in keys:
keyval(key, source.get(key))
def pretty_flat_keys(source, keys):
ret = ""
for key in keys:
ret = ret + ""
def getJSON(endpoint):
sleep(0.25)
return requests.get(api_base_url + endpoint + '?api_key=' + api_key).json()
def header(text):
print ""
print blue(text.upper().center(140, '='))
def subheader(text):
print ""
print magenta(text.upper().center(140, '-'))
def accent(text):
print red(text.upper().center(40))
# Util Methods
def readJSONFile(path):
print blue(" > Reading JSON File: {0}".format(path))
with open(path) as data_file:
return json.load(data_file)
def writeJSONFile(path, data):
print blue(" > Writing JSON File: {0}".format(path))
with open(path, 'w+') as outfile:
json.dump(data, outfile, sort_keys=True, indent=2)
# Cached API Methods
def getObjectByID(type, id, endpoint):
type_map = {
'episode': 'episodes',
'person': 'people',
'season': 'seasons',
'show': 'shows'
}
cache_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../data/{0}/{1}.json'.format(type_map.get(type), str(id))))
# print "OBJECT FILE PATH: {0}".format(cache_path)
if os.path.exists(cache_path):
return readJSONFile(cache_path)
else:
object_data = getJSON(endpoint)
writeJSONFile(cache_path, object_data)
return object_data
def getShow(id):
return getObjectByID('show', id, '/tv/{0}'.format(id))
def getSeason(show_id, season_number):
return getObjectByID('season', '{0}-{1}'.format(show_id, season_number), '/tv/{0}/season/{1}'.format(show_id, season_number))
def getEpisode(show_id, season_number, episode_number):
return getObjectByID('episode', '{0}-{1}-{2}'.format(show_id, season_number, episode_number), '/tv/{0}/season/{1}/episode/{2}'.format(show_id, season_number, episode_number))
def getPerson(id):
return getObjectByID('person', id, '/person/{0}'.format(id))
# show = getJSON('/tv/95')
# show = getObjectByID('show', 95)
# CURRENT_SHOW_ID = 95 # BUFFY
CURRENT_SHOW_ID = 2426 # ANGEL
show = getShow(CURRENT_SHOW_ID)
pretty_keys(show, ['name', 'first_air_date', 'number_of_seasons', 'number_of_episodes', 'overview'])
seasons = show.get('seasons', {})
for season in seasons:
season_id = season.get('id')
season_number = season.get('season_number')
header('season ' + str(season_number))
season_data = getSeason(CURRENT_SHOW_ID, season_number)
episodes = season_data.get('episodes', [])
keyval('Episode Count', len(episodes))
for episode in episodes:
subheader('episode ' + str(episode.get('episode_number')))
pretty_keys(episode, ['id', 'name'])
episode_number = episode.get('episode_number')
episode_data = getEpisode(CURRENT_SHOW_ID, season_number, episode_number)
for person in episode.get('crew', []):
getPerson(person.get('id'))
for person in episode.get('guest_stars', []):
getPerson(person.get('id'))
# crew = episode.get('crew', [])
# cast = episode.get('guest_stars', [])
# accent('crew')
# for crew_member in crew:
# keyval(crew_member.get('job'), crew_member.get('name'))
# accent('cast')
# for cast_member in cast:
# pretty_keys(cast_member, ['name'])
# pretty(season_data)
# break
# pretty(raw)
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\n# -*- coding: utf-8 -*-\n\"\"\"\nTransfer function with derivatives\n\n:Example:\n >>> import numpy as np\n >>> f = TanSig()\n >>> x = np.linspace(-5,5,100)\n >>> y = f(x)\n >>> df_on_dy = f.deriv(x, y)... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\"\"\"\nTransfer function with derivatives\n\n:Example:\n >>> import numpy as np\n >>> f = TanSig()\n >>> x = np.linspace(-5,5,100)\n >>> y = f(x)\n >>> df_on_dy... | ```python
# -*- coding: utf-8 -*-
"""
Transfer function with derivatives
:Example:
>>> import numpy as np
>>> f = TanSig()
>>> x = np.linspace(-5,5,100)
>>> y = f(x)
>>> df_on_dy = f.deriv(x, y) # calc derivative
>>> f.out_minmax # list output range [min, max]
[-1, 1]
>>> f.inp_active # list input active range [min, max]
[-2, 2]
"""
import numpy as np
class TanSig:
"""
Hyperbolic tangent sigmoid transfer function
:Parameters:
x: ndarray
Input array
:Returns:
y : ndarray
The corresponding hyperbolic tangent values.
:Example:
>>> f = TanSig()
>>> f([-np.Inf, 0.0, np.Inf])
array([-1., 0., 1.])
"""
# output range
out_minmax = [-1, 1]
# input active range
inp_active = [-2, 2]
def __call__(self, x):
return np.tanh(x)
def deriv(self, x, y):
"""
Derivative of transfer function TanSig
"""
return 1.0 - np.square(y)
class PureLin:
"""
Linear transfer function
:Parameters:
x: ndarray
Input array
:Returns:
y : ndarray
copy of x
:Example:
>>> import numpy as np
>>> f = PureLin()
>>> x = np.array([-100., 50., 10., 40.])
>>> f(x).tolist()
[-100.0, 50.0, 10.0, 40.0]
"""
out_minmax = [-np.Inf, np.Inf]
inp_active = [-np.Inf, np.Inf]
def __call__(self, x):
return x.copy()
def deriv(self, x, y):
"""
Derivative of transfer function PureLin
"""
return np.ones_like(x)
class LogSig:
"""
Logarithmic sigmoid transfer function
:Parameters:
x: ndarray
Input array
:Returns:
y : ndarray
The corresponding logarithmic sigmoid values.
:Example:
>>> f = LogSig()
>>> x = np.array([-np.Inf, 0.0, np.Inf])
>>> f(x).tolist()
[0.0, 0.5, 1.0]
"""
out_minmax = [0, 1]
inp_active = [-4, 4]
def __call__(self, x):
return 1 / (1 + np.exp(-x))
def deriv(self, x, y):
"""
Derivative of transfer function LogSig
"""
return y * (1 - y)
class HardLim:
"""
Hard limit transfer function
:Parameters:
x: ndarray
Input array
:Returns:
y : ndarray
may take the following values: 0, 1
:Example:
>>> f = HardLim()
>>> x = np.array([-5, -0.1, 0, 0.1, 100])
>>> f(x)
array([ 0., 0., 0., 1., 1.])
"""
out_minmax = [0, 1]
inp_active = [0, 0]
def __call__(self, x):
return (x > 0) * 1.0
def deriv(self, x, y):
"""
Derivative of transfer function HardLim
"""
return np.zeros_like(x)
class HardLims:
"""
Symmetric hard limit transfer function
:Parameters:
x: ndarray
Input array
:Returns:
y : ndarray
may take the following values: -1, 1
:Example:
>>> f = HardLims()
>>> x = np.array([-5, -0.1, 0, 0.1, 100])
>>> f(x)
array([-1., -1., -1., 1., 1.])
"""
out_minmax = [-1, 1]
inp_active = [0, 0]
def __call__(self, x):
return (x > 0) * 2.0 - 1.0
def deriv(self, x, y):
"""
Derivative of transfer function HardLims
"""
return np.zeros_like(x)
class Competitive:
"""
Competitive transfer function
:Parameters:
x: ndarray
Input array
:Returns:
y : ndarray
may take the following values: 0, 1
1 if is a minimal element of x, else 0
:Example:
>>> f = Competitive()
>>> f([-5, -0.1, 0, 0.1, 100])
array([ 1., 0., 0., 0., 0.])
>>> f([-5, -0.1, 0, -6, 100])
array([ 0., 0., 0., 1., 0.])
"""
out_minmax = [0, 1]
inp_active = [-np.Inf, np.Inf]
def __call__(self, dist):
r = np.zeros_like(dist)
min = np.argmin(dist)
r[min] = 1.0
return r
class SoftMax:
"""
Soft max transfer function
:Parameters:
x: ndarray
Input array
:Returns:
y : ndarray
range values [0, 1]
:Example:
>>> from numpy import floor
>>> f = SoftMax()
>>> floor(f([0, 1, 0.5, -0.5]) * 10)
array([ 1., 4., 2., 1.])
"""
out_minmax = [0, 1]
inp_active = [-np.Inf, np.Inf]
def __call__(self, dist):
exp = np.exp(dist)
return exp / exp.sum()
def deriv(self, x, y):
return y * (1 - y)
class SatLins:
"""
Symmetric saturating linear transfer function
:Parameters:
x: ndarray
Input array
:Returns:
y : ndarray
-1 if x < -1; x if -1 <= x <= 1; 1 if x >1
:Example:
>>> f = SatLins()
>>> x = np.array([-5, -1, 0, 0.1, 100])
>>> f(x)
array([-1. , -1. , 0. , 0.1, 1. ])
"""
out_minmax = [-1, 1]
inp_active = [-1, 1]
def __call__(self, x):
y = x.copy()
y[y < -1] = -1
y[y > 1] = 1
return y
def deriv(self, x, y):
"""
Derivative of transfer function SatLins
"""
d = np.zeros_like(x)
d[(x > -1) & (x < 1)] = 1
return d
class SatLin:
"""
Saturating linear transfer function
:Parameters:
x: ndarray
Input array
:Returns:
y : ndarray
0 if x < 0; x if 0 <= x <= 1; 1 if x >1
:Example:
>>> f = SatLin()
>>> x = np.array([-5, -0.1, 0, 0.1, 100])
>>> f(x)
array([ 0. , 0. , 0. , 0.1, 1. ])
"""
out_minmax = [0, 1]
inp_active = [0, 1]
def __call__(self, x):
y = x.copy()
y[y < 0] = 0
y[y > 1] = 1
return y
def deriv(self, x, y):
"""
Derivative of transfer function SatLin
"""
d = np.zeros_like(x)
d[(x > 0) & (x < 1)] = 1
return d
class SatLinPrm:
"""
Linear transfer function with parametric output
May use instead Satlin and Satlins
:Init Parameters:
k: float default 1
output scaling
out_min: float default 0
minimum output
out_max: float default 1
maximum output
:Parameters:
x: ndarray
Input array
:Returns:
y : ndarray
with default values
0 if x < 0; x if 0 <= x <= 1; 1 if x >1
:Example:
>>> f = SatLinPrm()
>>> x = np.array([-5, -0.1, 0, 0.1, 100])
>>> f(x)
array([ 0. , 0. , 0. , 0.1, 1. ])
>>> f = SatLinPrm(1, -1, 1)
>>> f(x)
array([-1. , -0.1, 0. , 0.1, 1. ])
"""
def __init__(self, k=1, out_min=0, out_max=1):
"""
Linear transfer function with parametric output
:Init Parameters:
k: float default 1
output scaling
out_min: float default 0
minimum output
out_max: float default 1
maximum output
"""
self.k = k
self.out_min = out_min
self.out_max = out_max
self.out_minmax = [out_min, out_max]
self.inp_active = [out_min, out_max]
def __call__(self, x):
y = x.copy()
y[y < self.out_min] = self.out_min
y[y > self.out_max] = self.out_max
y[(y >= self.out_min) & (y <= self.out_max)] *= self.k
return y
def deriv(self, x, y):
"""
Derivative of transfer function SatLin
"""
d = np.zeros_like(x)
d[(x > self.out_min) & (x < self.out_max)] = 1
return d
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\n# Sublime Suricate, Copyright (C) 2013 N. Subiron\n#\n# This program comes with ABSOLUTELY NO WARRANTY. This is free software, and you\n# are welcome to redistribute it and/or modify it under the terms of the GNU\n# General Public License as pu... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\n# Sublime Suricate, Copyright (C) 2013 N. Subiron\n#\n# This program comes with ABSOLUTELY NO WARRANTY. This is free software, and you\n# are welcome to redistribute it and/or modify it under the terms of the GNU\n# General Publ... | ```python
# Sublime Suricate, Copyright (C) 2013 N. Subiron
#
# This program comes with ABSOLUTELY NO WARRANTY. This is free software, and you
# are welcome to redistribute it and/or modify it under the terms of the GNU
# General Public License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
import imp
import os
import sublime
from . import _variables
_THIS_FOLDER = os.path.dirname(os.path.abspath(__file__))
class _SuricateAPI(object):
api_is_ready = False
debug_log = False
is_packaged = _THIS_FOLDER.endswith('.sublime-package')
if is_packaged:
package_path = _THIS_FOLDER
else:
package_path = os.path.abspath(os.path.join(_THIS_FOLDER, '..'))
package_name = os.path.splitext(os.path.basename(package_path))[0]
library_module_name = '.'.join([package_name, 'lib'])
settings_file_base_name = 'Suricate.sublime-settings'
profile_extension = '.suricate-profile'
generated_files_path = None
variables = {}
@staticmethod
def set_ready():
if _SuricateAPI.api_is_ready:
raise RuntimeError('suricate API already initialized')
packages_path = sublime.packages_path()
folder_path = os.path.join(packages_path, _SuricateAPI.package_name)
_SuricateAPI.generated_files_path = os.path.abspath(folder_path)
_SuricateAPI._init_variables()
_SuricateAPI.api_is_ready = True
@staticmethod
def unload():
_SuricateAPI.api_is_ready = False
_SuricateAPI.variables.clear()
@staticmethod
def set_debug_log(active=None):
if active is None:
_SuricateAPI.debug_log = not _SuricateAPI.debug_log
else:
_SuricateAPI.debug_log = bool(active)
@staticmethod
def _init_variables():
is_valid = lambda k, v: not k.startswith('_') and isinstance(v, str)
prefix = 'suricate_'
api_vars = vars(_SuricateAPI)
variables = dict((prefix + k, v)
for k, v in api_vars.items() if is_valid(k, v))
_SuricateAPI.variables.update(variables)
def api_is_ready():
return _SuricateAPI.api_is_ready
def is_packaged():
return _SuricateAPI.is_packaged
def log(message, *args):
print('Suricate: ' + str(message) % args)
def debuglog(message, *args):
if _SuricateAPI.debug_log:
log(message, *args)
def set_debuglog(active=None):
_SuricateAPI.set_debug_log(active)
def reload_module(module):
if get_setting('dev_mode', False):
debuglog('reloading module %r', module.__name__)
return imp.reload(module)
return module
def extract_variables(window=None, append_suricate_variables=True):
if window is None:
window = sublime.active_window()
variables = _variables.extract_window_variables(window)
if append_suricate_variables:
variables.update(_SuricateAPI.variables)
return variables
def expand_variables(value, variables=None, window=None):
if variables is None:
variables = extract_variables(window)
return _variables.expand_variables(value, variables)
def get_variable(key, default=None):
return extract_variables().get(key, default)
def load_settings():
return sublime.load_settings(_SuricateAPI.settings_file_base_name)
def get_setting(key, default=None):
return load_settings().get(key, default)
``` |
[
{
"content": "Return the code unaltered:\n```python\n# Copyright 2021 Google LLC. All Rights Reserved.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apa... | [
{
"content": "Return the code unaltered:\n<|memory_start|>```python\n# Copyright 2021 Google LLC. All Rights Reserved.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# ... | ```python
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.network_services import (
endpoint_config_selector_pb2,
)
from google3.cloud.graphite.mmv2.services.google.network_services import (
endpoint_config_selector_pb2_grpc,
)
from typing import List
class EndpointConfigSelector(object):
def __init__(
self,
name: str = None,
create_time: str = None,
update_time: str = None,
labels: dict = None,
type: str = None,
authorization_policy: str = None,
http_filters: dict = None,
endpoint_matcher: dict = None,
traffic_port_selector: dict = None,
description: str = None,
server_tls_policy: str = None,
client_tls_policy: str = None,
project: str = None,
location: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.labels = labels
self.type = type
self.authorization_policy = authorization_policy
self.http_filters = http_filters
self.endpoint_matcher = endpoint_matcher
self.traffic_port_selector = traffic_port_selector
self.description = description
self.server_tls_policy = server_tls_policy
self.client_tls_policy = client_tls_policy
self.project = project
self.location = location
self.service_account_file = service_account_file
def apply(self):
stub = endpoint_config_selector_pb2_grpc.NetworkservicesBetaEndpointConfigSelectorServiceStub(
channel.Channel()
)
request = (
endpoint_config_selector_pb2.ApplyNetworkservicesBetaEndpointConfigSelectorRequest()
)
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.labels):
request.resource.labels = Primitive.to_proto(self.labels)
if EndpointConfigSelectorTypeEnum.to_proto(self.type):
request.resource.type = EndpointConfigSelectorTypeEnum.to_proto(self.type)
if Primitive.to_proto(self.authorization_policy):
request.resource.authorization_policy = Primitive.to_proto(
self.authorization_policy
)
if EndpointConfigSelectorHttpFilters.to_proto(self.http_filters):
request.resource.http_filters.CopyFrom(
EndpointConfigSelectorHttpFilters.to_proto(self.http_filters)
)
else:
request.resource.ClearField("http_filters")
if EndpointConfigSelectorEndpointMatcher.to_proto(self.endpoint_matcher):
request.resource.endpoint_matcher.CopyFrom(
EndpointConfigSelectorEndpointMatcher.to_proto(self.endpoint_matcher)
)
else:
request.resource.ClearField("endpoint_matcher")
if EndpointConfigSelectorTrafficPortSelector.to_proto(
self.traffic_port_selector
):
request.resource.traffic_port_selector.CopyFrom(
EndpointConfigSelectorTrafficPortSelector.to_proto(
self.traffic_port_selector
)
)
else:
request.resource.ClearField("traffic_port_selector")
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.server_tls_policy):
request.resource.server_tls_policy = Primitive.to_proto(
self.server_tls_policy
)
if Primitive.to_proto(self.client_tls_policy):
request.resource.client_tls_policy = Primitive.to_proto(
self.client_tls_policy
)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
request.service_account_file = self.service_account_file
response = stub.ApplyNetworkservicesBetaEndpointConfigSelector(request)
self.name = Primitive.from_proto(response.name)
self.create_time = Primitive.from_proto(response.create_time)
self.update_time = Primitive.from_proto(response.update_time)
self.labels = Primitive.from_proto(response.labels)
self.type = EndpointConfigSelectorTypeEnum.from_proto(response.type)
self.authorization_policy = Primitive.from_proto(response.authorization_policy)
self.http_filters = EndpointConfigSelectorHttpFilters.from_proto(
response.http_filters
)
self.endpoint_matcher = EndpointConfigSelectorEndpointMatcher.from_proto(
response.endpoint_matcher
)
self.traffic_port_selector = EndpointConfigSelectorTrafficPortSelector.from_proto(
response.traffic_port_selector
)
self.description = Primitive.from_proto(response.description)
self.server_tls_policy = Primitive.from_proto(response.server_tls_policy)
self.client_tls_policy = Primitive.from_proto(response.client_tls_policy)
self.project = Primitive.from_proto(response.project)
self.location = Primitive.from_proto(response.location)
def delete(self):
stub = endpoint_config_selector_pb2_grpc.NetworkservicesBetaEndpointConfigSelectorServiceStub(
channel.Channel()
)
request = (
endpoint_config_selector_pb2.DeleteNetworkservicesBetaEndpointConfigSelectorRequest()
)
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.labels):
request.resource.labels = Primitive.to_proto(self.labels)
if EndpointConfigSelectorTypeEnum.to_proto(self.type):
request.resource.type = EndpointConfigSelectorTypeEnum.to_proto(self.type)
if Primitive.to_proto(self.authorization_policy):
request.resource.authorization_policy = Primitive.to_proto(
self.authorization_policy
)
if EndpointConfigSelectorHttpFilters.to_proto(self.http_filters):
request.resource.http_filters.CopyFrom(
EndpointConfigSelectorHttpFilters.to_proto(self.http_filters)
)
else:
request.resource.ClearField("http_filters")
if EndpointConfigSelectorEndpointMatcher.to_proto(self.endpoint_matcher):
request.resource.endpoint_matcher.CopyFrom(
EndpointConfigSelectorEndpointMatcher.to_proto(self.endpoint_matcher)
)
else:
request.resource.ClearField("endpoint_matcher")
if EndpointConfigSelectorTrafficPortSelector.to_proto(
self.traffic_port_selector
):
request.resource.traffic_port_selector.CopyFrom(
EndpointConfigSelectorTrafficPortSelector.to_proto(
self.traffic_port_selector
)
)
else:
request.resource.ClearField("traffic_port_selector")
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.server_tls_policy):
request.resource.server_tls_policy = Primitive.to_proto(
self.server_tls_policy
)
if Primitive.to_proto(self.client_tls_policy):
request.resource.client_tls_policy = Primitive.to_proto(
self.client_tls_policy
)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
response = stub.DeleteNetworkservicesBetaEndpointConfigSelector(request)
@classmethod
def list(self, project, location, service_account_file=""):
stub = endpoint_config_selector_pb2_grpc.NetworkservicesBetaEndpointConfigSelectorServiceStub(
channel.Channel()
)
request = (
endpoint_config_selector_pb2.ListNetworkservicesBetaEndpointConfigSelectorRequest()
)
request.service_account_file = service_account_file
request.Project = project
request.Location = location
return stub.ListNetworkservicesBetaEndpointConfigSelector(request).items
def to_proto(self):
resource = (
endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelector()
)
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.labels):
resource.labels = Primitive.to_proto(self.labels)
if EndpointConfigSelectorTypeEnum.to_proto(self.type):
resource.type = EndpointConfigSelectorTypeEnum.to_proto(self.type)
if Primitive.to_proto(self.authorization_policy):
resource.authorization_policy = Primitive.to_proto(
self.authorization_policy
)
if EndpointConfigSelectorHttpFilters.to_proto(self.http_filters):
resource.http_filters.CopyFrom(
EndpointConfigSelectorHttpFilters.to_proto(self.http_filters)
)
else:
resource.ClearField("http_filters")
if EndpointConfigSelectorEndpointMatcher.to_proto(self.endpoint_matcher):
resource.endpoint_matcher.CopyFrom(
EndpointConfigSelectorEndpointMatcher.to_proto(self.endpoint_matcher)
)
else:
resource.ClearField("endpoint_matcher")
if EndpointConfigSelectorTrafficPortSelector.to_proto(
self.traffic_port_selector
):
resource.traffic_port_selector.CopyFrom(
EndpointConfigSelectorTrafficPortSelector.to_proto(
self.traffic_port_selector
)
)
else:
resource.ClearField("traffic_port_selector")
if Primitive.to_proto(self.description):
resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.server_tls_policy):
resource.server_tls_policy = Primitive.to_proto(self.server_tls_policy)
if Primitive.to_proto(self.client_tls_policy):
resource.client_tls_policy = Primitive.to_proto(self.client_tls_policy)
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
resource.location = Primitive.to_proto(self.location)
return resource
class EndpointConfigSelectorHttpFilters(object):
def __init__(self, http_filters: list = None):
self.http_filters = http_filters
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelectorHttpFilters()
)
if Primitive.to_proto(resource.http_filters):
res.http_filters.extend(Primitive.to_proto(resource.http_filters))
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EndpointConfigSelectorHttpFilters(
http_filters=Primitive.from_proto(resource.http_filters),
)
class EndpointConfigSelectorHttpFiltersArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [EndpointConfigSelectorHttpFilters.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [EndpointConfigSelectorHttpFilters.from_proto(i) for i in resources]
class EndpointConfigSelectorEndpointMatcher(object):
def __init__(self, metadata_label_matcher: dict = None):
self.metadata_label_matcher = metadata_label_matcher
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelectorEndpointMatcher()
)
if EndpointConfigSelectorEndpointMatcherMetadataLabelMatcher.to_proto(
resource.metadata_label_matcher
):
res.metadata_label_matcher.CopyFrom(
EndpointConfigSelectorEndpointMatcherMetadataLabelMatcher.to_proto(
resource.metadata_label_matcher
)
)
else:
res.ClearField("metadata_label_matcher")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EndpointConfigSelectorEndpointMatcher(
metadata_label_matcher=EndpointConfigSelectorEndpointMatcherMetadataLabelMatcher.from_proto(
resource.metadata_label_matcher
),
)
class EndpointConfigSelectorEndpointMatcherArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [EndpointConfigSelectorEndpointMatcher.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [EndpointConfigSelectorEndpointMatcher.from_proto(i) for i in resources]
class EndpointConfigSelectorEndpointMatcherMetadataLabelMatcher(object):
def __init__(
self, metadata_label_match_criteria: str = None, metadata_labels: list = None
):
self.metadata_label_match_criteria = metadata_label_match_criteria
self.metadata_labels = metadata_labels
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcher()
)
if EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum.to_proto(
resource.metadata_label_match_criteria
):
res.metadata_label_match_criteria = EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum.to_proto(
resource.metadata_label_match_criteria
)
if EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelsArray.to_proto(
resource.metadata_labels
):
res.metadata_labels.extend(
EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelsArray.to_proto(
resource.metadata_labels
)
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EndpointConfigSelectorEndpointMatcherMetadataLabelMatcher(
metadata_label_match_criteria=EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum.from_proto(
resource.metadata_label_match_criteria
),
metadata_labels=EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelsArray.from_proto(
resource.metadata_labels
),
)
class EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
EndpointConfigSelectorEndpointMatcherMetadataLabelMatcher.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
EndpointConfigSelectorEndpointMatcherMetadataLabelMatcher.from_proto(i)
for i in resources
]
class EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabels(object):
def __init__(self, label_name: str = None, label_value: str = None):
self.label_name = label_name
self.label_value = label_value
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabels()
)
if Primitive.to_proto(resource.label_name):
res.label_name = Primitive.to_proto(resource.label_name)
if Primitive.to_proto(resource.label_value):
res.label_value = Primitive.to_proto(resource.label_value)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabels(
label_name=Primitive.from_proto(resource.label_name),
label_value=Primitive.from_proto(resource.label_value),
)
class EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelsArray(
object
):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabels.to_proto(
i
)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabels.from_proto(
i
)
for i in resources
]
class EndpointConfigSelectorTrafficPortSelector(object):
def __init__(self, ports: list = None):
self.ports = ports
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelectorTrafficPortSelector()
)
if Primitive.to_proto(resource.ports):
res.ports.extend(Primitive.to_proto(resource.ports))
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return EndpointConfigSelectorTrafficPortSelector(
ports=Primitive.from_proto(resource.ports),
)
class EndpointConfigSelectorTrafficPortSelectorArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
EndpointConfigSelectorTrafficPortSelector.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
EndpointConfigSelectorTrafficPortSelector.from_proto(i) for i in resources
]
class EndpointConfigSelectorTypeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelectorTypeEnum.Value(
"NetworkservicesBetaEndpointConfigSelectorTypeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelectorTypeEnum.Name(
resource
)[
len("NetworkservicesBetaEndpointConfigSelectorTypeEnum") :
]
class EndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum(
object
):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum.Value(
"NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum%s"
% resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return endpoint_config_selector_pb2.NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum.Name(
resource
)[
len(
"NetworkservicesBetaEndpointConfigSelectorEndpointMatcherMetadataLabelMatcherMetadataLabelMatchCriteriaEnum"
) :
]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
``` |
[
{
"content": "Reconstruct the code exactly:\n```python\n#!/usr/bin/env python\nimport re, glob, string\n# as swap1.py except that we here use a regex with comments and compile it:\narg = r'[^,]+'\ncall = re.compile(r\"\"\"\n (?P<start> # preserve start of function string\n superLibFunc # n... | [
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\n#!/usr/bin/env python\nimport re, glob, string\n# as swap1.py except that we here use a regex with comments and compile it:\narg = r'[^,]+'\ncall = re.compile(r\"\"\"\n (?P<start> # preserve start of function string\n sup... | ```python
#!/usr/bin/env python
import re, glob, string
# as swap1.py except that we here use a regex with comments and compile it:
arg = r'[^,]+'
call = re.compile(r"""
(?P<start> # preserve start of function string
superLibFunc # name of function to match
\s* # optional whitespace
\( # parenthesis before argument list
\s* # optional whitespace
) # end of <start>
(?P<arg1>.+?) # a C variable name, non-greedy
(?P<middle> # preserve middle of function string
\s*,\s* # comma with optional surrounding whitespace
) # end of <middle>
(?P<arg2>.+?) # a C variable name, non-greedy
(?P<end> # preserve end of function string
\s* # optional whitespace
\) # closing parenthesis
) # End of <end>
""" , re.VERBOSE | re.DOTALL)
cfiles = ['.test1.c']
for cfile in cfiles:
print 'Treating',cfile
file = open(cfile, 'r')
filestr = file.read() # load all lines into a string
file.close()
filestr = call.sub(r'superLibFunc(\g<arg2>\g<middle>\g<arg1>\g<end>)',
filestr)
file = open(cfile + '.tmp', 'w')
file.write(filestr) # print everything into cfile.tmp
``` |
[
{
"content": "Recreate the entire code block with identical formatting:\n```python\nimport xbmc,xbmcgui,xbmcaddon,xbmcplugin\nimport urllib\nimport thesportsdb\nimport datetime\nimport os\nimport re\nimport threading\nfrom random import randint\nfrom centerutils.common_variables import *\nfrom centerutils.datem... | [
{
"content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\nimport xbmc,xbmcgui,xbmcaddon,xbmcplugin\nimport urllib\nimport thesportsdb\nimport datetime\nimport os\nimport re\nimport threading\nfrom random import randint\nfrom centerutils.common_variables import *\nfrom c... | ```python
import xbmc,xbmcgui,xbmcaddon,xbmcplugin
import urllib
import thesportsdb
import datetime
import os
import re
import threading
from random import randint
from centerutils.common_variables import *
from centerutils.datemanipulation import *
import competlist as competlist
import teamview as teamview
import contextmenubuilder
import tweetbuild
def start(data_list):
window = dialog_calendar('DialogCalendar.xml',addonpath,'Default',str(data_list))
window.doModal()
class dialog_calendar(xbmcgui.WindowXML):
def __init__( self, *args, **kwargs ):
xbmcgui.WindowXML.__init__(self)
self.date_string = eval(args[3])
def onInit(self):
self.getControl(911).setImage(addon_fanart)
self.getControl(333).setLabel('Calendar View')
self.ignored_leagues = os.listdir(ignoredleaguesfolder)
self.rmleaguescalendar = os.listdir(ignoreleaguecalendar)
#Change background if custom is defined
if settings.getSetting('calendar-background-type') == '1' and settings.getSetting('calendar-background-custom') != '':
self.getControl(912).setImage(settings.getSetting('calendar-background-custom'))
#Populate week days
menu = []
#grab datetime now and transform into a timezone object based on user timezone
date_now = datetime.datetime.now()
date_now_mytz = pytz.timezone(str(pytz.timezone(str(my_location)))).localize(date_now)
#convert datetime timezone object to the timezone of the database
date_now_tsdb = date_now_mytz.astimezone(my_location)
menu.append(('Today, %s' % (date_now_mytz.day),'%s-%s-%s' % (str(date_now_tsdb.year),str(date_now_tsdb.month),str(date_now_tsdb.day))))
for i in range(7):
date_now_mytz += datetime.timedelta(days=1)
date_now_tsdb += datetime.timedelta(days=1)
if i == 0: day_string ='%s, %s' % ('Tomorrow',date_now_mytz.day)
else:
day_string = '%s, %s' % (get_weekday(date_now_mytz.weekday()),date_now_mytz.day)
date_string = '%s-%s-%s' % (str(date_now_tsdb.year),str(date_now_tsdb.month),str(date_now_tsdb.day))
menu.append((day_string,date_string))
self.getControl(983).reset()
for data_string,date in menu:
menu_entry = xbmcgui.ListItem(data_string)
menu_entry.setProperty('menu_entry', data_string)
menu_entry.setProperty('entry_date', date)
self.getControl(983).addItem(menu_entry)
#use this to direct navigation to a given date! -TODO
threading.Thread(name='watcher', target=self.watcher).start()
if not self.date_string:
self.setFocusId(983)
self.getControl(983).selectItem(0)
self.date_string = menu[0][1]
self.fill_calendar(self.date_string)
def fill_calendar(self,datestring):
self.getControl(93).setVisible(False)
items_to_add = []
self.getControl(94).setPercent(0)
self.getControl(92).setImage(os.path.join(addonpath,art,'busy.png'))
xbmc.executebuiltin("SetProperty(loading,1,home)")
self.getControl(987).reset()
#next matches stuff
event_next_list = thesportsdb.Schedules(tsdbkey).eventsday(datestring,None,None)["events"]
j = 0
if event_next_list:
total_events = len(event_next_list)
for event in event_next_list:
event_sport = thesportsdb.Events().get_sport(event)
event_id = thesportsdb.Events().get_eventid(event)
#check if event belongs to blocked sport strSport
if event_sport == 'Soccer' and settings.getSetting('enable-football') == 'false' and settings.getSetting('calendar-disabledsports') == 'true': pass
elif event_sport == 'Basketball' and settings.getSetting('enable-basketball') == 'false' and settings.getSetting('calendar-disabledsports') == 'true': pass
elif event_sport == 'Ice Hockey' and settings.getSetting('enable-icehockey') == 'false' and settings.getSetting('calendar-disabledsports') == 'true': pass
elif event_sport == 'Baseball' and settings.getSetting('enable-baseball') == 'false' and settings.getSetting('calendar-disabledsports') == 'true': pass
elif event_sport == 'Motorsport' and settings.getSetting('enable-motorsport') == 'false' and settings.getSetting('calendar-disabledsports') == 'true': pass
elif event_sport == 'Rugby' and settings.getSetting('enable-rugby') == 'false' and settings.getSetting('calendar-disabledsports') == 'true': pass
elif event_sport == 'Golf' and settings.getSetting('enable-golf') == 'false' and settings.getSetting('calendar-disabledsports') == 'true': pass
elif event_sport == 'American Football' and settings.getSetting('enable-amfootball') == 'false' and settings.getSetting('calendar-disabledsports') == 'true': pass
else:
#get league id and check if the league is not ignored
league_id = thesportsdb.Events().get_leagueid(event)
if ((league_id + '.txt') in self.ignored_leagues and settings.getSetting('calendar-disabledleagues') == 'true') or ((league_id + '.txt') in self.rmleaguescalendar): pass
else:
event_fullname = thesportsdb.Events().get_eventtitle(event)
event_race = thesportsdb.Events().get_racelocation(event)
event_league = thesportsdb.Events().get_league(event)
event_sport = thesportsdb.Events().get_sport(event)
if event_sport == 'Soccer': sport_logo = os.path.join(addonpath,art,'loadingsports','soccer.png')
elif event_sport == 'Basketball': sport_logo = os.path.join(addonpath,art,'loadingsports','basketball.png')
elif event_sport == 'Ice Hockey': sport_logo = os.path.join(addonpath,art,'loadingsports','ice%20hockey.png')
elif event_sport == 'Baseball': sport_logo = os.path.join(addonpath,art,'loadingsports','baseball.png')
elif event_sport == 'Motorsport': sport_logo = os.path.join(addonpath,art,'loadingsports','motorsport.png')
elif event_sport == 'Rugby': sport_logo = os.path.join(addonpath,art,'loadingsports','rugby.png')
elif event_sport == 'Golf': sport_logo = os.path.join(addonpath,art,'loadingsports','golf.png')
elif event_sport == 'American Football': sport_logo = os.path.join(addonpath,art,'loadingsports','american%20football.png')
fmt = "%y-%m-%d"
fmt_time = "%H:%M"
event_datetime = thesportsdb.Events().get_datetime_object(event)
if event_datetime:
#datetime object conversion goes here
db_time = pytz.timezone(str(pytz.timezone(tsdbtimezone))).localize(event_datetime)
event_datetime=db_time.astimezone(my_location)
event_strtime = thesportsdb.Events().get_time(event)
if event_strtime and event_strtime != 'null' and event_strtime != 'None':
event_time = event_datetime.strftime(fmt_time)
if len(str(event_datetime.minute)) == 1: event_minute = str(event_datetime.minute) + '0'
else: event_minute = str(event_datetime.minute)
event_order = int(str(event_datetime.hour) + str(event_minute))
else:
event_time = 'N/A'
event_order = 30000
else:
event_time = 'N/A'
event_order = 30000
if event_race:
home_team_logo = os.path.join(addonpath,art,'raceflag.png')
event_name = thesportsdb.Events().get_eventtitle(event)
event_round = ''
else:
home_team_id = thesportsdb.Events().get_hometeamid(event)
home_team_dict = thesportsdb.Lookups(tsdbkey).lookupteam(home_team_id)["teams"][0]
if settings.getSetting('team-naming')=='0': home_team_name = thesportsdb.Teams().get_name(home_team_dict)
else: team_name = home_team_name = thesportsdb.Teams().get_alternativefirst(home_team_dict)
home_team_logo = thesportsdb.Teams().get_badge(home_team_dict)
stadium_fanart = thesportsdb.Teams().get_stadium_thumb(home_team_dict)
away_team_id = thesportsdb.Events().get_awayteamid(event)
away_team_dict = thesportsdb.Lookups(tsdbkey).lookupteam(away_team_id)["teams"][0]
if settings.getSetting('team-naming')=='0': away_team_name = thesportsdb.Teams().get_name(away_team_dict)
else: away_team_name = thesportsdb.Teams().get_alternativefirst(away_team_dict)
away_team_logo = thesportsdb.Teams().get_badge(away_team_dict)
event_round = thesportsdb.Events().get_round(event)
if event_round and event_round != '0':
round_label = ' - Round ' + str(event_round)
event_league = event_league + round_label
game = xbmcgui.ListItem(event_fullname)
game.setProperty('HomeTeamLogo',home_team_logo)
game.setProperty('league',event_league)
game.setProperty('sport_logo',sport_logo)
game.setProperty('sport',event_sport)
game.setProperty('event_time',event_time)
game.setProperty('event_order',str(event_order))
game.setProperty('event_id',event_id)
if not event_race:
if ' ' in home_team_name:
if len(home_team_name) > 12: game.setProperty('HomeTeamLong',home_team_name)
else: game.setProperty('HomeTeamShort',home_team_name)
else: game.setProperty('HomeTeamShort',home_team_name)
game.setProperty('AwayTeamLogo',away_team_logo)
if ' ' in away_team_name:
if len(away_team_name) > 12: game.setProperty('AwayTeamLong',away_team_name)
else: game.setProperty('AwayTeamShort',away_team_name)
else: game.setProperty('AwayTeamShort',away_team_name)
game.setProperty('StadiumThumb',stadium_fanart)
game.setProperty('vs','VS')
try: game.setProperty('date',event_datetime.strftime(fmt))
except: pass
if event_race:
game.setProperty('EventName',event_name)
try:
date_now_mytz = pytz.timezone(str(pytz.timezone(str(my_location)))).localize(datetime.datetime.now())
if event_datetime > date_now_mytz:
hour_diff = (event_datetime-date_now_mytz).seconds/3600
else: hour_diff = ((date_now_mytz-event_datetime).seconds/3600)*(-1)
if settings.getsetting('calendar-disabledpassed') == 'true' and hour_diff > int(settings.getSetting('calendar-disabledpassed-delay')): pass
else: items_to_add.append(game)
except:items_to_add.append(game)
#try to set progress bar here
#for the events presented
j+=1
self.getControl(94).setPercent(int(float(j)/total_events*100))
#for the events not presented
j+=1
self.getControl(94).setPercent(int(float(j)/total_events*100))
#order the items here by start time
time_array = []
items_to_add_processed = []
for item in items_to_add:
time_array.append(int(item.getProperty('event_order')))
for timestmp in sorted(time_array):
for item in items_to_add:
itemorder = int(item.getProperty('event_order'))
if itemorder == timestmp:
items_to_add_processed.append(item)
items_to_add.remove(item)
if items_to_add_processed: self.getControl(987).addItems(items_to_add_processed)
else:
self.getControl(93).setVisible(True)
self.getControl(93).setLabel('No events available!')
else:
self.getControl(93).setVisible(True)
self.getControl(93).setLabel('No events available!')
xbmc.executebuiltin("ClearProperty(loading,Home)")
xbmc.executebuiltin("ClearProperty(lastmatchview,Home)")
xbmc.executebuiltin("ClearProperty(plotview,Home)")
xbmc.executebuiltin("ClearProperty(bannerview,Home)")
xbmc.executebuiltin("ClearProperty(nextview,Home)")
xbmc.executebuiltin("ClearProperty(videosview,Home)")
xbmc.executebuiltin("ClearProperty(jerseyview,Home)")
xbmc.executebuiltin("ClearProperty(badgeview,Home)")
xbmc.executebuiltin("ClearProperty(newsview,Home)")
xbmc.executebuiltin("SetProperty(nextmatchview,1,home)")
settings.setSetting("view_type_league",'nextmatchview')
self.getControl(2).setLabel("League: NextMatchView")
def watcher(self,):
while not xbmc.abortRequested:
rmleaguescalendar = os.listdir(ignoreleaguecalendar)
if self.rmleaguescalendar != rmleaguescalendar:
self.rmleaguescalendar = rmleaguescalendar
self.fill_calendar(self.date_string)
xbmc.sleep(200)
def onAction(self,action):
if action.getId() == 92 or action.getId() == 10:
self.close()
elif action.getId() == 117: #contextmenu
if xbmc.getCondVisibility("Control.HasFocus(987)"): container = 987
self.specific_id = self.getControl(container).getSelectedItem().getProperty('event_id')
contextmenubuilder.start(['calendaritem',self.specific_id])
def onClick(self,controlId):
if controlId == 983:
listControl = self.getControl(controlId)
selected_date=listControl.getSelectedItem().getProperty('entry_date')
self.date_string = selected_date
self.fill_calendar(selected_date)
elif controlId == 980 or controlId == 984 or controlId == 985 or controlId == 981:
self.team = self.getControl(controlId).getSelectedItem().getProperty('team_id')
teamview.start([self.team,self.sport,'','plotview'])
elif controlId == 2:
active_view_type = self.getControl(controlId).getLabel()
if active_view_type == "League: PlotView":
self.setvideosview()
elif active_view_type == "League: VideosView":
self.setbannerview()
elif active_view_type == "League: BannerView":
self.setbadgeview()
elif active_view_type == "League: BadgeView":
self.setjerseyview()
elif active_view_type == "League: JerseyView":
self.setnewsview()
elif active_view_type == "League: NewsView":
self.setnextmatchview()
elif active_view_type == "League: NextMatchView":
self.setlastmatchview()
elif active_view_type == "League: LastMatchView":
self.setplotview()
elif controlId == 989:
youtube_id = self.getControl(989).getSelectedItem().getProperty('video_id')
xbmc.executebuiltin('PlayMedia(plugin://plugin.video.youtube/play/?video_id='+youtube_id+')')
elif controlId == 986:
news_content = self.getControl(986).getSelectedItem().getProperty('content')
news_title = self.getControl(986).getSelectedItem().getProperty('title')
news_image = self.getControl(986).getSelectedItem().getProperty('news_img')
self.getControl(939).setImage(news_image)
self.getControl(937).setText(news_content)
self.getControl(938).setLabel(news_title)
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\n# SpeakerTest.py \n# Group 7 ECE 4900\n# Edward Reehorst w/ help from\n# http://minhdo.ece.illinois.edu/teaching/speaker_recognition/\n# Text independent speaker recognition system based on mel frequency coeffiecient\n# features and vector quan... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\n# SpeakerTest.py \n# Group 7 ECE 4900\n# Edward Reehorst w/ help from\n# http://minhdo.ece.illinois.edu/teaching/speaker_recognition/\n# Text independent speaker recognition system based on mel frequency coeffiecient\n# features... | ```python
# SpeakerTest.py
# Group 7 ECE 4900
# Edward Reehorst w/ help from
# http://minhdo.ece.illinois.edu/teaching/speaker_recognition/
# Text independent speaker recognition system based on mel frequency coeffiecient
# features and vector quantization
import numpy as np
import scipy.fftpack as fft
import scipy.io.wavfile as wav
import math
# DISTEU Pairwise Euclidean distances between columns of two matrices
#
# Input:
# x, y: Two matrices whose each column is an a vector data.
#
# Output:
# d: Element d(i,j) will be the Euclidean distance between two
# column vectors X(:,i) and Y(:,j)
#
# Note:
# The Euclidean distance D between two vectors X and Y is:
# D = sum((x-y).^2).^0.5
def disteu(x, y):
[M, N] = x.shape
[M2, P] = y.shape
if M != M2:
print 'Matrix dimensions do not match.'
return -1
d = np.zeros((N, P))
for n in range(0,N):
for p in range(0,P):
d[n,p] = np.sum(np.power(x[:,n]-y[:,p],2),axis=0)
d = np.power(d,0.5)
return d
# VQLBG Vector quantization using the Linde-Buzo-Gray algorithm
#
# Inputs:
# d contains training data vectors (one per column)
# k is number of centroids required
#
# Outputs:
# c contains the result VQ codebook (k columns, one for each centroids)
def vqlbg(d, k):
# Constants
split = 0.1;
sigma = 0.001;
#Initial Codebook of one entry contains single centroid
c = np.mean(d, axis=1);
c = c[:,np.newaxis]
m = 1;
# Continue
while m < k :
# (Randomly) Split into two codebooks
c = np.concatenate((np.multiply(c,(1 + split)), np.multiply(c,(1 - split))),axis=1)
m = 2*m
Dpast = float("inf")
D = 10000
while (Dpast - D)/D > sigma:
Dpast = D;
# Nearest Neighbor Search
z = disteu(d, c);
dist = np.amin(z, axis=1);
ind = np.argmin(z, axis=1);
D = np.mean(dist);
# Update Centroids
for j in range(0,m):
c[:, j] = np.mean(d[:, ind==j], axis=1);
return c;
# FROM https://github.com/jameslyons/python_speech_features/blob/master/features/base.py
def get_filterbanks(nfilt=20,nfft=512,samplerate=16000,lowfreq=0,highfreq=None):
"""Compute a Mel-filterbank. The filters are stored in the rows, the columns correspond
to fft bins. The filters are returned as an array of size nfilt * (nfft/2 + 1)
:param nfilt: the number of filters in the filterbank, default 20.
:param nfft: the FFT size. Default is 512.
:param samplerate: the samplerate of the signal we are working with. Affects mel spacing.
:param lowfreq: lowest band edge of mel filters, default 0 Hz
:param highfreq: highest band edge of mel filters, default samplerate/2
:returns: A numpy array of size nfilt * (nfft/2 + 1) containing filterbank. Each row holds 1 filter.
"""
highfreq= highfreq or samplerate/2
assert highfreq <= samplerate/2, "highfreq is greater than samplerate/2"
# compute points evenly spaced in mels
lowmel = hz2mel(lowfreq)
highmel = hz2mel(highfreq)
melpoints = np.linspace(lowmel,highmel,nfilt+2)
# our points are in Hz, but we use fft bins, so we have to convert
# from Hz to fft bin number
bin = np.floor((nfft+1)*mel2hz(melpoints)/samplerate)
fbank = np.zeros([nfilt,nfft/2+1])
for j in xrange(0,nfilt):
for i in xrange(int(bin[j]),int(bin[j+1])):
fbank[j,i] = (i - bin[j])/(bin[j+1]-bin[j])
for i in xrange(int(bin[j+1]),int(bin[j+2])):
fbank[j,i] = (bin[j+2]-i)/(bin[j+2]-bin[j+1])
return fbank
# FROM https://github.com/jameslyons/python_speech_features/blob/master/features/base.py
def hz2mel(hz):
"""Convert a value in Hertz to Mels
:param hz: a value in Hz. This can also be a numpy array, conversion proceeds element-wise.
:returns: a value in Mels. If an array was passed in, an identical sized array is returned.
"""
return 2595 * np.log10(1+hz/700.0)
# FROM https://github.com/jameslyons/python_speech_features/blob/master/features/base.py
def mel2hz(mel):
"""Convert a value in Mels to Hertz
:param mel: a value in Mels. This can also be a numpy array, conversion proceeds element-wise.
:returns: a value in Hertz. If an array was passed in, an identical sized array is returned.
"""
return 700*(10**(mel/2595.0)-1)
def toMono(sig):
if sig.ndim > 1:
return sig[:,0]
return sig
# MFCC Calculate the mel frequencey cepstrum coefficients (MFCC) of a signal
#
# Inputs:
# s : speech signal
# fs : sample rate in Hz
#
# Outputs:
# c : MFCC output, each column contains the MFCC's for one speech frame
def mfcc(s, fs):
#Constants
N = 256
M = 100
P = 30
l = int(math.ceil((s.size-N+1)/M))
#Allocate c array
c = np.zeros((P,l));
for x in range(0,l-1):
#Frame
start = x * M;
frame = s[start:start+N];
#Window
w = np.hamming(N)
windFrame = frame * w
#FFT
frameFFT = np.fft.fft(windFrame)
#Mel-Frequency Wrapping
m = get_filterbanks(P,N,fs)
n2 = math.floor(N/2)
ms = np.dot(m , abs(np.power(frameFFT[0:n2+1],2)))
#Last step, compute mel-frequency cepstrum coefficients
c[:,x] = fft.dct(np.log(ms.clip(min=0.00001)));
np.delete(c,0,0) # exclude 0'th order cepstral coefficient
return c
def train(traindir, n):
# Speaker Recognition: Training Stage
#
# Input:
# traindir : string name of directory contains all train sound files
# n : number of train files in traindir
#
# Output:
# code : trained VQ codebooks, code{i} for i-th speaker
#
# Note:
# Sound files in traindir is supposed to be:
# s1.wav, s2.wav, ..., sn.wav
# Example:
# >> code = train('C:\data\train\', 8);
k = 8; # number of centroids required
code = []
for i in range(1,n+1): # train a VQ codebook for each speaker
file = "{0}s{1}.wav".format(traindir,i)
print file
[fs, s] = wav.read(file)
s = toMono(s)
v = mfcc(s, fs); # Compute MFCC's
code.append(vqlbg(v, k)) # Train VQ codebook
return code
def test(testdir, n, code):
# Speaker Recognition: Testing Stage
#
# Input:
# testdir : string name of directory contains all test sound files
# n : number of test files in testdir
# code : codebooks of all trained speakers
#
# Note:
# Sound files in testdir is supposed to be:
# s1.wav, s2.wav, ..., sn.wav
#
# Example:
# >> test('C:\data\test\', 8, code);
for k in range(1,n+1): # read test sound file of each speaker
file = '{0}s{1}.wav'.format(testdir, k)
[fs, s] = wav.read(file)
s = toMono(s)
v = mfcc(s, fs) # Compute MFCC's
distmin = float('inf')
k1 = 0;
for l in range(0,len(code)): # each trained codebook, compute distortion
d = disteu(v, code[l]);
dist = sum(np.amin(d,axis=1)) / d.shape[0]
print "{0}\t{1}\t{2}".format(k,l+1,dist)
print "\n"
c = train("data/train/",12)
test("data/test/",16,c)
``` |
[
{
"content": "Repeat the following code:\n```python\nimport urllib\nfrom django.db.models import Avg, Sum, Count\nfrom django import forms\nfrom django.forms.widgets import Input\nfrom django.shortcuts import render\nfrom colourlens.models import Artwork, Colour\n\nPROM_ATTRS = {'min': '0', 'max': '100', 'step'... | [
{
"content": "Repeat the following code:\n<|memory_start|>```python\nimport urllib\nfrom django.db.models import Avg, Sum, Count\nfrom django import forms\nfrom django.forms.widgets import Input\nfrom django.shortcuts import render\nfrom colourlens.models import Artwork, Colour\n\nPROM_ATTRS = {'min': '0', 'max... | ```python
import urllib
from django.db.models import Avg, Sum, Count
from django import forms
from django.forms.widgets import Input
from django.shortcuts import render
from colourlens.models import Artwork, Colour
PROM_ATTRS = {'min': '0', 'max': '100', 'step': '5'}
DIST_ATTRS = {'min': '0', 'max': '50', 'step': '1'}
class RangeInput(Input):
input_type = "range"
class ColourForm(forms.Form):
def __init__(self, *args, **kwargs):
"""
Add classes to denote type of input
"""
super(ColourForm, self).__init__(*args, **kwargs)
for k, v in self.fields.iteritems():
if v.widget.__class__ == forms.CheckboxInput:
v.widget.attrs['class'] = 'colourbox'
elif v.widget.__class__ == RangeInput:
v.widget.attrs['class'] = 'slider'
black = forms.BooleanField(required=False)
grey = forms.BooleanField(required=False)
silver = forms.BooleanField(required=False)
white = forms.BooleanField(required=False)
red = forms.BooleanField(required=False)
maroon = forms.BooleanField(required=False)
brown = forms.BooleanField(required=False)
orange = forms.BooleanField(required=False)
yellow = forms.BooleanField(required=False)
lime = forms.BooleanField(required=False)
green = forms.BooleanField(required=False)
olive = forms.BooleanField(required=False)
cyan = forms.BooleanField(required=False)
teal = forms.BooleanField(required=False)
blue = forms.BooleanField(required=False)
navy = forms.BooleanField(required=False)
magenta = forms.BooleanField(required=False)
purple = forms.BooleanField(required=False)
prominence = forms.IntegerField(label="Increase colour area",
widget=RangeInput(attrs=PROM_ATTRS))
distance = forms.IntegerField(label="Broaden palette",
widget=RangeInput(attrs=DIST_ATTRS))
submitted = forms.CharField(widget=forms.HiddenInput())
class ColourChoice(object):
def __init__(self, selected_colours, colour):
self.selected_colours = selected_colours
self.colour = colour
@property
def artwork_count(self):
return self.colour.artwork__count
@property
def hex_value(self):
return self.colour.hex_value
@property
def query_string(self):
# Get new set of selected colours
if self.colour.hex_value in self.selected_colours:
new_selected_colours = self.selected_colours.difference(set([self.colour.hex_value]))
else:
new_selected_colours = self.selected_colours.union(set([self.colour.hex_value]))
if new_selected_colours:
return urllib.urlencode([
('colour', colour)
for colour in new_selected_colours
])
def index(request, institution=False):
"""
Search and browse colours
"""
DISTANCE = 20
artworks = Artwork.objects.select_related().all()
colours = Colour.objects.all()
req_colours = request.GET.getlist('colour', [])
startyear = request.GET.get('startyear', None)
endyear = request.GET.get('endyear', None)
colour_filters = {}
if startyear:
artworks = artworks.filter(year__gte=startyear)
colour_filters['artwork__year__gte'] = startyear
if endyear:
artworks = artworks.filter(year__lte=endyear)
colour_filters['artwork__year__lte'] = endyear
for hex_value in req_colours:
artworks = artworks.filter(
colours__hex_value=hex_value,
colourdistance__distance__lte=DISTANCE,
)
if institution:
artworks = artworks.filter(institution=institution)
colour_filters['artwork__institution'] = institution
artworks = artworks.annotate(
ave_distance=Avg('colourdistance__distance'),
ave_presence=Avg('colourdistance__presence'),
tot_presence=Sum('colourdistance__presence'),
tot_prominence=Sum('colourdistance__prominence'),
ave_prominence=Avg('colourdistance__prominence'),
)
artworks = artworks.order_by('-tot_presence').distinct()
artworks_count = artworks.count()
if req_colours:
colour_filters['artwork__id__in'] = [a.id for a in artworks[:990]]
colour_filters['colourdistance__distance__lte'] = DISTANCE
colours = colours.filter(**colour_filters)
colours = colours.annotate(Count('artwork', distinct=True)).order_by('hue')
colour_count = colours.count()
colour_width = 99.4 / colour_count
if colour_count > 0:
total_palette = sum(c.artwork__count for c in colours)
else:
total_palette = 0
institutions = Artwork.objects.all().values('institution').distinct()
return render(request, 'colour.html', {
'artworks': artworks[:40],
'colours': [ColourChoice(set(req_colours), colour) for colour in colours],
'colour_count': colour_count,
'colour_width': colour_width,
'total_palette': total_palette,
'found': artworks_count,
'institution': institution,
'institutions': institutions,
'req_colours': req_colours,
})
``` |
[
{
"content": "```python\n# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# Code generated by Microsoft (R) Au... | [
{
"content": "<|memory_start|>```python\n# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# Code generated by ... | ```python
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import DeploymentScriptsClientConfiguration
from .operations import DeploymentScriptsOperations
from .. import models
class DeploymentScriptsClient(object):
"""The APIs listed in this specification can be used to manage Deployment Scripts resource through the Azure Resource Manager.
:ivar deployment_scripts: DeploymentScriptsOperations operations
:vartype deployment_scripts: azure.mgmt.resource.deploymentscripts.v2020_10_01.aio.operations.DeploymentScriptsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription Id which forms part of the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = DeploymentScriptsClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.deployment_scripts = DeploymentScriptsOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "DeploymentScriptsClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.