body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
|---|---|---|---|---|---|---|---|---|---|
35b0c681c1e36beaceb56497c861284ca9b3e4e0b5f086d52f070762fa9091f5
|
async def test_config_tag_override(self) -> None:
'Image tag in the config dict overrides tag file.'
(await self.start_manager())
config = {'config': {'image_tag': 'custom_tag'}}
product = (await run_clocked(self, 100, self.manager.create_product('foo', config)))
(await self.manager.product_active(product))
task = list(self.singularity_server.tasks.values())[0]
self.assertEqual(task.deploy.config['containerInfo']['docker']['image'], 'registry.invalid:5000/katsdpcontroller:custom_tag')
arguments = task.arguments()
self.assertIn('--image-tag=custom_tag', arguments)
|
Image tag in the config dict overrides tag file.
|
katsdpcontroller/test/test_master_controller.py
|
test_config_tag_override
|
ska-sa/katsdpcontroller
| 0
|
python
|
async def test_config_tag_override(self) -> None:
(await self.start_manager())
config = {'config': {'image_tag': 'custom_tag'}}
product = (await run_clocked(self, 100, self.manager.create_product('foo', config)))
(await self.manager.product_active(product))
task = list(self.singularity_server.tasks.values())[0]
self.assertEqual(task.deploy.config['containerInfo']['docker']['image'], 'registry.invalid:5000/katsdpcontroller:custom_tag')
arguments = task.arguments()
self.assertIn('--image-tag=custom_tag', arguments)
|
async def test_config_tag_override(self) -> None:
(await self.start_manager())
config = {'config': {'image_tag': 'custom_tag'}}
product = (await run_clocked(self, 100, self.manager.create_product('foo', config)))
(await self.manager.product_active(product))
task = list(self.singularity_server.tasks.values())[0]
self.assertEqual(task.deploy.config['containerInfo']['docker']['image'], 'registry.invalid:5000/katsdpcontroller:custom_tag')
arguments = task.arguments()
self.assertIn('--image-tag=custom_tag', arguments)<|docstring|>Image tag in the config dict overrides tag file.<|endoftext|>
|
fefd3d7246441d8a12d5f15a8d1cb3023d5cd02d46d4a99fbe9fc8595b83f6c3
|
async def test_config_image_override(self) -> None:
'Image override in the config dict overrides the product controller image.'
(await self.start_manager())
config = {'config': {'image_overrides': {'katsdpcontroller': 'katsdpcontroller:custom_tag'}}}
product = (await run_clocked(self, 100, self.manager.create_product('foo', config)))
(await self.manager.product_active(product))
task = list(self.singularity_server.tasks.values())[0]
self.assertEqual(task.deploy.config['containerInfo']['docker']['image'], 'registry.invalid:5000/katsdpcontroller:custom_tag')
|
Image override in the config dict overrides the product controller image.
|
katsdpcontroller/test/test_master_controller.py
|
test_config_image_override
|
ska-sa/katsdpcontroller
| 0
|
python
|
async def test_config_image_override(self) -> None:
(await self.start_manager())
config = {'config': {'image_overrides': {'katsdpcontroller': 'katsdpcontroller:custom_tag'}}}
product = (await run_clocked(self, 100, self.manager.create_product('foo', config)))
(await self.manager.product_active(product))
task = list(self.singularity_server.tasks.values())[0]
self.assertEqual(task.deploy.config['containerInfo']['docker']['image'], 'registry.invalid:5000/katsdpcontroller:custom_tag')
|
async def test_config_image_override(self) -> None:
(await self.start_manager())
config = {'config': {'image_overrides': {'katsdpcontroller': 'katsdpcontroller:custom_tag'}}}
product = (await run_clocked(self, 100, self.manager.create_product('foo', config)))
(await self.manager.product_active(product))
task = list(self.singularity_server.tasks.values())[0]
self.assertEqual(task.deploy.config['containerInfo']['docker']['image'], 'registry.invalid:5000/katsdpcontroller:custom_tag')<|docstring|>Image override in the config dict overrides the product controller image.<|endoftext|>
|
6892fec3a5e0b8236174308c0eb92ccadc8763f71b274f817ef2f76ee4231ffe
|
async def test_create_product_dies_fast(self) -> None:
'Task dies before we observe it running'
(await self.start_manager())
self.singularity_server.lifecycles.append(quick_death_lifecycle)
with self.assertRaises(ProductFailed):
(await run_clocked(self, 100, self.manager.create_product('foo', {})))
self.assertEqual(self.manager.products, {})
|
Task dies before we observe it running
|
katsdpcontroller/test/test_master_controller.py
|
test_create_product_dies_fast
|
ska-sa/katsdpcontroller
| 0
|
python
|
async def test_create_product_dies_fast(self) -> None:
(await self.start_manager())
self.singularity_server.lifecycles.append(quick_death_lifecycle)
with self.assertRaises(ProductFailed):
(await run_clocked(self, 100, self.manager.create_product('foo', {})))
self.assertEqual(self.manager.products, {})
|
async def test_create_product_dies_fast(self) -> None:
(await self.start_manager())
self.singularity_server.lifecycles.append(quick_death_lifecycle)
with self.assertRaises(ProductFailed):
(await run_clocked(self, 100, self.manager.create_product('foo', {})))
self.assertEqual(self.manager.products, {})<|docstring|>Task dies before we observe it running<|endoftext|>
|
a65545c64fd85e5c8c316938baef8c0725bdd5bfc9b7a108e8667eee851c3b12
|
async def test_create_product_parallel(self) -> None:
'Can configure two subarray products at the same time'
(await self.start_manager())
with Background(self.manager.create_product('product1', {})) as cm1, Background(self.manager.create_product('product2', {})) as cm2:
(await self.advance(100))
product1 = cm1.result
product2 = cm2.result
self.assertEqual(product1.name, 'product1')
self.assertEqual(product2.name, 'product2')
self.assertEqual(product1.task_state, Product.TaskState.STARTING)
self.assertEqual(product2.task_state, Product.TaskState.STARTING)
|
Can configure two subarray products at the same time
|
katsdpcontroller/test/test_master_controller.py
|
test_create_product_parallel
|
ska-sa/katsdpcontroller
| 0
|
python
|
async def test_create_product_parallel(self) -> None:
(await self.start_manager())
with Background(self.manager.create_product('product1', {})) as cm1, Background(self.manager.create_product('product2', {})) as cm2:
(await self.advance(100))
product1 = cm1.result
product2 = cm2.result
self.assertEqual(product1.name, 'product1')
self.assertEqual(product2.name, 'product2')
self.assertEqual(product1.task_state, Product.TaskState.STARTING)
self.assertEqual(product2.task_state, Product.TaskState.STARTING)
|
async def test_create_product_parallel(self) -> None:
(await self.start_manager())
with Background(self.manager.create_product('product1', {})) as cm1, Background(self.manager.create_product('product2', {})) as cm2:
(await self.advance(100))
product1 = cm1.result
product2 = cm2.result
self.assertEqual(product1.name, 'product1')
self.assertEqual(product2.name, 'product2')
self.assertEqual(product1.task_state, Product.TaskState.STARTING)
self.assertEqual(product2.task_state, Product.TaskState.STARTING)<|docstring|>Can configure two subarray products at the same time<|endoftext|>
|
c21b926dc5156a409fe87aac25873388e444b72efaf2634687811ebee2a94afe
|
async def _test_create_product_dies_after_task_id(self, init_wait: float) -> None:
'Task dies immediately after we learn its task ID\n\n This test is parametrised so that we can control whether the task ID is\n learnt during polling for the new task or during task reconciliation.\n '
(await self.start_manager())
self.singularity_server.lifecycles.append(functools.partial(death_after_task_id_lifecycle, init_wait))
with self.assertRaises(ProductFailed):
(await run_clocked(self, 100, self.manager.create_product('foo', {})))
self.assertEqual(self.manager.products, {})
|
Task dies immediately after we learn its task ID
This test is parametrised so that we can control whether the task ID is
learnt during polling for the new task or during task reconciliation.
|
katsdpcontroller/test/test_master_controller.py
|
_test_create_product_dies_after_task_id
|
ska-sa/katsdpcontroller
| 0
|
python
|
async def _test_create_product_dies_after_task_id(self, init_wait: float) -> None:
'Task dies immediately after we learn its task ID\n\n This test is parametrised so that we can control whether the task ID is\n learnt during polling for the new task or during task reconciliation.\n '
(await self.start_manager())
self.singularity_server.lifecycles.append(functools.partial(death_after_task_id_lifecycle, init_wait))
with self.assertRaises(ProductFailed):
(await run_clocked(self, 100, self.manager.create_product('foo', {})))
self.assertEqual(self.manager.products, {})
|
async def _test_create_product_dies_after_task_id(self, init_wait: float) -> None:
'Task dies immediately after we learn its task ID\n\n This test is parametrised so that we can control whether the task ID is\n learnt during polling for the new task or during task reconciliation.\n '
(await self.start_manager())
self.singularity_server.lifecycles.append(functools.partial(death_after_task_id_lifecycle, init_wait))
with self.assertRaises(ProductFailed):
(await run_clocked(self, 100, self.manager.create_product('foo', {})))
self.assertEqual(self.manager.products, {})<|docstring|>Task dies immediately after we learn its task ID
This test is parametrised so that we can control whether the task ID is
learnt during polling for the new task or during task reconciliation.<|endoftext|>
|
c28594cc45d8e87eba6ccf10a58fcf5ce48556c634ac463fab71fe332ba71b3e
|
async def test_create_product_dies_after_task_id_reconciliation(self) -> None:
'Task dies immediately after we learn its task ID during reconciliation'
(await self._test_create_product_dies_after_task_id(self.manager.reconciliation_interval))
|
Task dies immediately after we learn its task ID during reconciliation
|
katsdpcontroller/test/test_master_controller.py
|
test_create_product_dies_after_task_id_reconciliation
|
ska-sa/katsdpcontroller
| 0
|
python
|
async def test_create_product_dies_after_task_id_reconciliation(self) -> None:
(await self._test_create_product_dies_after_task_id(self.manager.reconciliation_interval))
|
async def test_create_product_dies_after_task_id_reconciliation(self) -> None:
(await self._test_create_product_dies_after_task_id(self.manager.reconciliation_interval))<|docstring|>Task dies immediately after we learn its task ID during reconciliation<|endoftext|>
|
f533c4a28cf6572f20554739c76246f65dfb98a3d48ce8d03793a4c034dfb0ef
|
async def test_create_product_dies_after_task_id_poll(self) -> None:
'Task dies immediately after we learn its task ID during polling'
(await self._test_create_product_dies_after_task_id(self.manager.new_task_poll_interval))
|
Task dies immediately after we learn its task ID during polling
|
katsdpcontroller/test/test_master_controller.py
|
test_create_product_dies_after_task_id_poll
|
ska-sa/katsdpcontroller
| 0
|
python
|
async def test_create_product_dies_after_task_id_poll(self) -> None:
(await self._test_create_product_dies_after_task_id(self.manager.new_task_poll_interval))
|
async def test_create_product_dies_after_task_id_poll(self) -> None:
(await self._test_create_product_dies_after_task_id(self.manager.new_task_poll_interval))<|docstring|>Task dies immediately after we learn its task ID during polling<|endoftext|>
|
1865147dc0d2a340db9e564031f2109915c45b7ac9d05b0b1b9dd049f18d779e
|
async def reset_manager(self) -> None:
'Throw away the manager and create a new one'
zk = self.manager._zk
(await self.manager.stop())
(await zk.delete('/running'))
with mock.patch('aiozk.ZKClient', return_value=zk):
self.manager = SingularityProductManager(self.args, self.server, self.image_resolver_factory)
(await self.manager.start())
|
Throw away the manager and create a new one
|
katsdpcontroller/test/test_master_controller.py
|
reset_manager
|
ska-sa/katsdpcontroller
| 0
|
python
|
async def reset_manager(self) -> None:
zk = self.manager._zk
(await self.manager.stop())
(await zk.delete('/running'))
with mock.patch('aiozk.ZKClient', return_value=zk):
self.manager = SingularityProductManager(self.args, self.server, self.image_resolver_factory)
(await self.manager.start())
|
async def reset_manager(self) -> None:
zk = self.manager._zk
(await self.manager.stop())
(await zk.delete('/running'))
with mock.patch('aiozk.ZKClient', return_value=zk):
self.manager = SingularityProductManager(self.args, self.server, self.image_resolver_factory)
(await self.manager.start())<|docstring|>Throw away the manager and create a new one<|endoftext|>
|
b3429d5517810581053491291abf4e3f33f804be667c20e9f2c9f81f82820a58
|
async def test_spontaneous_death(self) -> None:
'Product must be cleaned up if it dies on its own'
(await self.start_manager())
product = (await self.start_product(lifecycle=spontaneous_death_lifecycle))
self.assertEqual((await self.get_zk_state())['products'], {'foo': mock.ANY})
(await self.advance(1000))
self.assertEqual(product.task_state, Product.TaskState.DEAD)
self.assertEqual(self.manager.products, {})
self.assertEqual((await self.get_zk_state())['products'], {})
|
Product must be cleaned up if it dies on its own
|
katsdpcontroller/test/test_master_controller.py
|
test_spontaneous_death
|
ska-sa/katsdpcontroller
| 0
|
python
|
async def test_spontaneous_death(self) -> None:
(await self.start_manager())
product = (await self.start_product(lifecycle=spontaneous_death_lifecycle))
self.assertEqual((await self.get_zk_state())['products'], {'foo': mock.ANY})
(await self.advance(1000))
self.assertEqual(product.task_state, Product.TaskState.DEAD)
self.assertEqual(self.manager.products, {})
self.assertEqual((await self.get_zk_state())['products'], {})
|
async def test_spontaneous_death(self) -> None:
(await self.start_manager())
product = (await self.start_product(lifecycle=spontaneous_death_lifecycle))
self.assertEqual((await self.get_zk_state())['products'], {'foo': mock.ANY})
(await self.advance(1000))
self.assertEqual(product.task_state, Product.TaskState.DEAD)
self.assertEqual(self.manager.products, {})
self.assertEqual((await self.get_zk_state())['products'], {})<|docstring|>Product must be cleaned up if it dies on its own<|endoftext|>
|
1b7ff8b05e6b58a1b62c35dffbf676eb183c0a29b0b3bc28cb3a10f045a76390
|
async def test_stuck_pending(self) -> None:
'Task takes a long time to be launched.\n\n The configure gets cancelled before then, and reconciliation must\n clean up the task.\n '
(await self.start_manager())
self.singularity_server.lifecycles.append(long_pending_lifecycle)
task = self.loop.create_task(self.manager.create_product('foo', {}))
(await self.advance(500))
self.assertFalse(task.done())
task.cancel()
(await self.advance(1000))
self.assertTrue(task.done())
with self.assertRaises(asyncio.CancelledError):
(await task)
|
Task takes a long time to be launched.
The configure gets cancelled before then, and reconciliation must
clean up the task.
|
katsdpcontroller/test/test_master_controller.py
|
test_stuck_pending
|
ska-sa/katsdpcontroller
| 0
|
python
|
async def test_stuck_pending(self) -> None:
'Task takes a long time to be launched.\n\n The configure gets cancelled before then, and reconciliation must\n clean up the task.\n '
(await self.start_manager())
self.singularity_server.lifecycles.append(long_pending_lifecycle)
task = self.loop.create_task(self.manager.create_product('foo', {}))
(await self.advance(500))
self.assertFalse(task.done())
task.cancel()
(await self.advance(1000))
self.assertTrue(task.done())
with self.assertRaises(asyncio.CancelledError):
(await task)
|
async def test_stuck_pending(self) -> None:
'Task takes a long time to be launched.\n\n The configure gets cancelled before then, and reconciliation must\n clean up the task.\n '
(await self.start_manager())
self.singularity_server.lifecycles.append(long_pending_lifecycle)
task = self.loop.create_task(self.manager.create_product('foo', {}))
(await self.advance(500))
self.assertFalse(task.done())
task.cancel()
(await self.advance(1000))
self.assertTrue(task.done())
with self.assertRaises(asyncio.CancelledError):
(await task)<|docstring|>Task takes a long time to be launched.
The configure gets cancelled before then, and reconciliation must
clean up the task.<|endoftext|>
|
4f3d97bee04c5ffd7baf04fedb1faa5d99344bd6bc051ea5e4a847fe71a19a8c
|
async def _test_bad_zk(self, payload: bytes) -> None:
'Existing state data in Zookeeper is not valid'
(await self.manager._zk.create('/state', payload))
with self.assertLogs(master_controller.logger, logging.WARNING) as cm:
(await self.start_manager())
self.assertRegex(cm.output[0], '.*:Could not load existing state')
|
Existing state data in Zookeeper is not valid
|
katsdpcontroller/test/test_master_controller.py
|
_test_bad_zk
|
ska-sa/katsdpcontroller
| 0
|
python
|
async def _test_bad_zk(self, payload: bytes) -> None:
(await self.manager._zk.create('/state', payload))
with self.assertLogs(master_controller.logger, logging.WARNING) as cm:
(await self.start_manager())
self.assertRegex(cm.output[0], '.*:Could not load existing state')
|
async def _test_bad_zk(self, payload: bytes) -> None:
(await self.manager._zk.create('/state', payload))
with self.assertLogs(master_controller.logger, logging.WARNING) as cm:
(await self.start_manager())
self.assertRegex(cm.output[0], '.*:Could not load existing state')<|docstring|>Existing state data in Zookeeper is not valid<|endoftext|>
|
4ff3ec7e8bb9113a12d7c72072ad356d2f38a9d9da8f02ef2ac254f880d0e5d1
|
async def test_bad_zk_version(self) -> None:
'Wrong version in state stored in Zookeeper'
(await self._test_bad_zk(json.dumps({'version': 200000}).encode()))
|
Wrong version in state stored in Zookeeper
|
katsdpcontroller/test/test_master_controller.py
|
test_bad_zk_version
|
ska-sa/katsdpcontroller
| 0
|
python
|
async def test_bad_zk_version(self) -> None:
(await self._test_bad_zk(json.dumps({'version': 200000}).encode()))
|
async def test_bad_zk_version(self) -> None:
(await self._test_bad_zk(json.dumps({'version': 200000}).encode()))<|docstring|>Wrong version in state stored in Zookeeper<|endoftext|>
|
3edc455a53debd14f59ec014745b2682843bab28a024b69f37c5d8f81425ad3a
|
async def test_bad_zk_json(self) -> None:
'Data in Zookeeper is not valid JSON'
(await self._test_bad_zk(b'I am not JSON'))
|
Data in Zookeeper is not valid JSON
|
katsdpcontroller/test/test_master_controller.py
|
test_bad_zk_json
|
ska-sa/katsdpcontroller
| 0
|
python
|
async def test_bad_zk_json(self) -> None:
(await self._test_bad_zk(b'I am not JSON'))
|
async def test_bad_zk_json(self) -> None:
(await self._test_bad_zk(b'I am not JSON'))<|docstring|>Data in Zookeeper is not valid JSON<|endoftext|>
|
81fe471e1afeda4fe7d17efc9a32d5f4ba46b5f945e52417638e3ed62fcec3e3
|
async def test_bad_zk_utf8(self) -> None:
'Data in Zookeeper is not valid UTF-8'
(await self._test_bad_zk(b'\xff'))
|
Data in Zookeeper is not valid UTF-8
|
katsdpcontroller/test/test_master_controller.py
|
test_bad_zk_utf8
|
ska-sa/katsdpcontroller
| 0
|
python
|
async def test_bad_zk_utf8(self) -> None:
(await self._test_bad_zk(b'\xff'))
|
async def test_bad_zk_utf8(self) -> None:
(await self._test_bad_zk(b'\xff'))<|docstring|>Data in Zookeeper is not valid UTF-8<|endoftext|>
|
01f563b749621477d128e7fe1f55f0ffa0381d8355f0d5e22dd3546d6f39504c
|
async def test_bad_zk_schema(self) -> None:
'Data in Zookeeper does not conform to schema'
(await self._test_bad_zk(json.dumps({'version': 1}).encode()))
|
Data in Zookeeper does not conform to schema
|
katsdpcontroller/test/test_master_controller.py
|
test_bad_zk_schema
|
ska-sa/katsdpcontroller
| 0
|
python
|
async def test_bad_zk_schema(self) -> None:
(await self._test_bad_zk(json.dumps({'version': 1}).encode()))
|
async def test_bad_zk_schema(self) -> None:
(await self._test_bad_zk(json.dumps({'version': 1}).encode()))<|docstring|>Data in Zookeeper does not conform to schema<|endoftext|>
|
63fc1957b82e317f8a58a9a7d02c9afd32555f92c5d06968a45726230573c219
|
async def test_product_configure_generate_names(self) -> None:
'Name with trailing * must generate lowest-numbered name'
async def product_configure():
return (await self.client.request('product-configure', 'prefix_*', CONFIG))[0][0]
self.assertEqual(b'prefix_0', (await product_configure()))
self.assertEqual(b'prefix_1', (await product_configure()))
(await self.client.request('product-deconfigure', 'prefix_0'))
(await self.advance(1))
self.assertEqual(b'prefix_0', (await product_configure()))
|
Name with trailing * must generate lowest-numbered name
|
katsdpcontroller/test/test_master_controller.py
|
test_product_configure_generate_names
|
ska-sa/katsdpcontroller
| 0
|
python
|
async def test_product_configure_generate_names(self) -> None:
async def product_configure():
return (await self.client.request('product-configure', 'prefix_*', CONFIG))[0][0]
self.assertEqual(b'prefix_0', (await product_configure()))
self.assertEqual(b'prefix_1', (await product_configure()))
(await self.client.request('product-deconfigure', 'prefix_0'))
(await self.advance(1))
self.assertEqual(b'prefix_0', (await product_configure()))
|
async def test_product_configure_generate_names(self) -> None:
async def product_configure():
return (await self.client.request('product-configure', 'prefix_*', CONFIG))[0][0]
self.assertEqual(b'prefix_0', (await product_configure()))
self.assertEqual(b'prefix_1', (await product_configure()))
(await self.client.request('product-deconfigure', 'prefix_0'))
(await self.advance(1))
self.assertEqual(b'prefix_0', (await product_configure()))<|docstring|>Name with trailing * must generate lowest-numbered name<|endoftext|>
|
ed43e261a83324a8e5ad5b84a090df6bf5a27d85875da3e8d0a52bee66af5603
|
async def test_product_deconfigure_while_configuring_force(self) -> None:
'Forced product-deconfigure must succeed while in product-configure'
async with self._product_configure_slow('product', cancelled=True):
(await self.client.request('product-deconfigure', 'product', True))
self.assertEqual({}, self.server._manager.products)
|
Forced product-deconfigure must succeed while in product-configure
|
katsdpcontroller/test/test_master_controller.py
|
test_product_deconfigure_while_configuring_force
|
ska-sa/katsdpcontroller
| 0
|
python
|
async def test_product_deconfigure_while_configuring_force(self) -> None:
async with self._product_configure_slow('product', cancelled=True):
(await self.client.request('product-deconfigure', 'product', True))
self.assertEqual({}, self.server._manager.products)
|
async def test_product_deconfigure_while_configuring_force(self) -> None:
async with self._product_configure_slow('product', cancelled=True):
(await self.client.request('product-deconfigure', 'product', True))
self.assertEqual({}, self.server._manager.products)<|docstring|>Forced product-deconfigure must succeed while in product-configure<|endoftext|>
|
1d767695be7ebee2b6d410184e3c9195b5bce8630aec42d2dddde85dca5958c8
|
async def test_product_deconfigure_capturing_force(self) -> None:
'forced product-deconfigure must succeed while capturing'
(await self.client.request('product-configure', 'product', CONFIG))
(await self.client.request('capture-init', 'product'))
(await self.client.request('product-deconfigure', 'product', True))
|
forced product-deconfigure must succeed while capturing
|
katsdpcontroller/test/test_master_controller.py
|
test_product_deconfigure_capturing_force
|
ska-sa/katsdpcontroller
| 0
|
python
|
async def test_product_deconfigure_capturing_force(self) -> None:
(await self.client.request('product-configure', 'product', CONFIG))
(await self.client.request('capture-init', 'product'))
(await self.client.request('product-deconfigure', 'product', True))
|
async def test_product_deconfigure_capturing_force(self) -> None:
(await self.client.request('product-configure', 'product', CONFIG))
(await self.client.request('capture-init', 'product'))
(await self.client.request('product-deconfigure', 'product', True))<|docstring|>forced product-deconfigure must succeed while capturing<|endoftext|>
|
3f2d8a5d6c9414307255b94626f31be730e54073cbfcc79a37b07df5a8d3a3c8
|
async def test_product_reconfigure_override(self) -> None:
'?product-reconfigure must pick up config overrides'
(await self.client.request('product-configure', 'product', CONFIG))
(await self.client.request('set-config-override', 'product', '{"config": {"develop": true}}'))
(await self.client.request('product-reconfigure', 'product'))
config = self.server._manager.products['product'].config
self.assertEqual(config['config'].get('develop'), True)
|
?product-reconfigure must pick up config overrides
|
katsdpcontroller/test/test_master_controller.py
|
test_product_reconfigure_override
|
ska-sa/katsdpcontroller
| 0
|
python
|
async def test_product_reconfigure_override(self) -> None:
(await self.client.request('product-configure', 'product', CONFIG))
(await self.client.request('set-config-override', 'product', '{"config": {"develop": true}}'))
(await self.client.request('product-reconfigure', 'product'))
config = self.server._manager.products['product'].config
self.assertEqual(config['config'].get('develop'), True)
|
async def test_product_reconfigure_override(self) -> None:
(await self.client.request('product-configure', 'product', CONFIG))
(await self.client.request('set-config-override', 'product', '{"config": {"develop": true}}'))
(await self.client.request('product-reconfigure', 'product'))
config = self.server._manager.products['product'].config
self.assertEqual(config['config'].get('develop'), True)<|docstring|>?product-reconfigure must pick up config overrides<|endoftext|>
|
74dc471388c17eb2ee2803b9bdcf3532f8b6d3718582437f7e9bd9a0a0e779f4
|
async def test_product_reconfigure_configure_busy(self) -> None:
'Can run product-reconfigure concurrently with another product-configure'
(await self.client.request('product-configure', 'product1', CONFIG))
async with self._product_configure_slow('product2'):
(await self.client.request('product-reconfigure', 'product1'))
|
Can run product-reconfigure concurrently with another product-configure
|
katsdpcontroller/test/test_master_controller.py
|
test_product_reconfigure_configure_busy
|
ska-sa/katsdpcontroller
| 0
|
python
|
async def test_product_reconfigure_configure_busy(self) -> None:
(await self.client.request('product-configure', 'product1', CONFIG))
async with self._product_configure_slow('product2'):
(await self.client.request('product-reconfigure', 'product1'))
|
async def test_product_reconfigure_configure_busy(self) -> None:
(await self.client.request('product-configure', 'product1', CONFIG))
async with self._product_configure_slow('product2'):
(await self.client.request('product-reconfigure', 'product1'))<|docstring|>Can run product-reconfigure concurrently with another product-configure<|endoftext|>
|
51627241b676bdf6336ee0ff0aaba0a94eae4ff99cbe3d168d5604fb5cdb28c4
|
async def test_product_reconfigure_configure_fails(self) -> None:
'Tests product-reconfigure when the new graph fails'
async def request(self, name: str, *args: Any) -> Tuple[(List[bytes], List[aiokatcp.Message])]:
if (name == 'product-configure'):
raise aiokatcp.FailReply('Fault injected into product-configure')
else:
return (await orig_request(self, name, *args))
(await self.client.request('product-configure', 'product', CONFIG))
orig_request = aiokatcp.Client.request
with mock.patch.object(aiokatcp.Client, 'request', new=request):
with self.assertRaises(aiokatcp.FailReply):
(await self.client.request('product-reconfigure', 'product'))
self.assertEqual({}, self.server._manager.products)
|
Tests product-reconfigure when the new graph fails
|
katsdpcontroller/test/test_master_controller.py
|
test_product_reconfigure_configure_fails
|
ska-sa/katsdpcontroller
| 0
|
python
|
async def test_product_reconfigure_configure_fails(self) -> None:
async def request(self, name: str, *args: Any) -> Tuple[(List[bytes], List[aiokatcp.Message])]:
if (name == 'product-configure'):
raise aiokatcp.FailReply('Fault injected into product-configure')
else:
return (await orig_request(self, name, *args))
(await self.client.request('product-configure', 'product', CONFIG))
orig_request = aiokatcp.Client.request
with mock.patch.object(aiokatcp.Client, 'request', new=request):
with self.assertRaises(aiokatcp.FailReply):
(await self.client.request('product-reconfigure', 'product'))
self.assertEqual({}, self.server._manager.products)
|
async def test_product_reconfigure_configure_fails(self) -> None:
async def request(self, name: str, *args: Any) -> Tuple[(List[bytes], List[aiokatcp.Message])]:
if (name == 'product-configure'):
raise aiokatcp.FailReply('Fault injected into product-configure')
else:
return (await orig_request(self, name, *args))
(await self.client.request('product-configure', 'product', CONFIG))
orig_request = aiokatcp.Client.request
with mock.patch.object(aiokatcp.Client, 'request', new=request):
with self.assertRaises(aiokatcp.FailReply):
(await self.client.request('product-reconfigure', 'product'))
self.assertEqual({}, self.server._manager.products)<|docstring|>Tests product-reconfigure when the new graph fails<|endoftext|>
|
f72c0ac750005917a890eec3ff509273d7bf090672a1b72410de496f27afd89b
|
async def test_telstate_endpoint_all(self) -> None:
'Test telstate-endpoint without a subarray_product_id argument'
(await self.client.request('product-configure', 'product1', CONFIG))
(await self.client.request('product-configure', 'product2', CONFIG))
(reply, informs) = (await self.client.request('telstate-endpoint'))
self.assertEqual(reply, [b'2'])
inform_args = [tuple(msg.arguments) for msg in informs]
self.assertEqual([(b'product1', b''), (b'product2', b'')], inform_args)
|
Test telstate-endpoint without a subarray_product_id argument
|
katsdpcontroller/test/test_master_controller.py
|
test_telstate_endpoint_all
|
ska-sa/katsdpcontroller
| 0
|
python
|
async def test_telstate_endpoint_all(self) -> None:
(await self.client.request('product-configure', 'product1', CONFIG))
(await self.client.request('product-configure', 'product2', CONFIG))
(reply, informs) = (await self.client.request('telstate-endpoint'))
self.assertEqual(reply, [b'2'])
inform_args = [tuple(msg.arguments) for msg in informs]
self.assertEqual([(b'product1', b), (b'product2', b)], inform_args)
|
async def test_telstate_endpoint_all(self) -> None:
(await self.client.request('product-configure', 'product1', CONFIG))
(await self.client.request('product-configure', 'product2', CONFIG))
(reply, informs) = (await self.client.request('telstate-endpoint'))
self.assertEqual(reply, [b'2'])
inform_args = [tuple(msg.arguments) for msg in informs]
self.assertEqual([(b'product1', b), (b'product2', b)], inform_args)<|docstring|>Test telstate-endpoint without a subarray_product_id argument<|endoftext|>
|
c5dc46d3e40134ee28409244529536306ae876505e395cc812afe14cbaffac28
|
async def test_telstate_endpoint_one(self) -> None:
'Test telstate-endpoint with a subarray_product_id argument'
(await self.client.request('product-configure', 'product', CONFIG))
(reply, informs) = (await self.client.request('telstate-endpoint', 'product'))
self.assertEqual(reply, [b''])
|
Test telstate-endpoint with a subarray_product_id argument
|
katsdpcontroller/test/test_master_controller.py
|
test_telstate_endpoint_one
|
ska-sa/katsdpcontroller
| 0
|
python
|
async def test_telstate_endpoint_one(self) -> None:
(await self.client.request('product-configure', 'product', CONFIG))
(reply, informs) = (await self.client.request('telstate-endpoint', 'product'))
self.assertEqual(reply, [b])
|
async def test_telstate_endpoint_one(self) -> None:
(await self.client.request('product-configure', 'product', CONFIG))
(reply, informs) = (await self.client.request('telstate-endpoint', 'product'))
self.assertEqual(reply, [b])<|docstring|>Test telstate-endpoint with a subarray_product_id argument<|endoftext|>
|
d2fc40cff0998cd5e8f1bf4a5c02a9386a21a925b916d15103b30aff57c0c9a4
|
async def test_telstate_endpoint_not_found(self) -> None:
'Test telstate-endpoint with a subarray_product_id that does not exist'
(await assert_request_fails(self.client, 'telstate-endpoint', 'product'))
|
Test telstate-endpoint with a subarray_product_id that does not exist
|
katsdpcontroller/test/test_master_controller.py
|
test_telstate_endpoint_not_found
|
ska-sa/katsdpcontroller
| 0
|
python
|
async def test_telstate_endpoint_not_found(self) -> None:
(await assert_request_fails(self.client, 'telstate-endpoint', 'product'))
|
async def test_telstate_endpoint_not_found(self) -> None:
(await assert_request_fails(self.client, 'telstate-endpoint', 'product'))<|docstring|>Test telstate-endpoint with a subarray_product_id that does not exist<|endoftext|>
|
02b660af2dff17979bd5684a46f3ac3cc6cde9605cf1f10a84d2b49daaed5914
|
async def test_capture_status_all(self) -> None:
'Test capture-status without a subarray_product_id argument'
(await self.client.request('product-configure', 'product1', CONFIG))
(await self.client.request('product-configure', 'product2', CONFIG))
(await self.client.request('capture-init', 'product2'))
(reply, informs) = (await self.client.request('capture-status'))
self.assertEqual(reply, [b'2'])
inform_args = [tuple(msg.arguments) for msg in informs]
self.assertEqual([(b'product1', b'idle'), (b'product2', b'capturing')], inform_args)
|
Test capture-status without a subarray_product_id argument
|
katsdpcontroller/test/test_master_controller.py
|
test_capture_status_all
|
ska-sa/katsdpcontroller
| 0
|
python
|
async def test_capture_status_all(self) -> None:
(await self.client.request('product-configure', 'product1', CONFIG))
(await self.client.request('product-configure', 'product2', CONFIG))
(await self.client.request('capture-init', 'product2'))
(reply, informs) = (await self.client.request('capture-status'))
self.assertEqual(reply, [b'2'])
inform_args = [tuple(msg.arguments) for msg in informs]
self.assertEqual([(b'product1', b'idle'), (b'product2', b'capturing')], inform_args)
|
async def test_capture_status_all(self) -> None:
(await self.client.request('product-configure', 'product1', CONFIG))
(await self.client.request('product-configure', 'product2', CONFIG))
(await self.client.request('capture-init', 'product2'))
(reply, informs) = (await self.client.request('capture-status'))
self.assertEqual(reply, [b'2'])
inform_args = [tuple(msg.arguments) for msg in informs]
self.assertEqual([(b'product1', b'idle'), (b'product2', b'capturing')], inform_args)<|docstring|>Test capture-status without a subarray_product_id argument<|endoftext|>
|
56a7184d497d596c8ec7487442ff6e1296782f4ef2b5070b4685b13d93930011
|
async def test_capture_status_one(self) -> None:
'Test capture-status with a subarray_product_id argument'
(await self.client.request('product-configure', 'product', CONFIG))
(reply, informs) = (await self.client.request('capture-status', 'product'))
self.assertEqual(reply, [b'idle'])
self.assertEqual([], informs)
(await self.client.request('capture-init', 'product'))
(reply, informs) = (await self.client.request('capture-status', 'product'))
self.assertEqual(reply, [b'capturing'])
(await self.client.request('capture-done', 'product'))
(reply, informs) = (await self.client.request('capture-status', 'product'))
self.assertEqual(reply, [b'idle'])
|
Test capture-status with a subarray_product_id argument
|
katsdpcontroller/test/test_master_controller.py
|
test_capture_status_one
|
ska-sa/katsdpcontroller
| 0
|
python
|
async def test_capture_status_one(self) -> None:
(await self.client.request('product-configure', 'product', CONFIG))
(reply, informs) = (await self.client.request('capture-status', 'product'))
self.assertEqual(reply, [b'idle'])
self.assertEqual([], informs)
(await self.client.request('capture-init', 'product'))
(reply, informs) = (await self.client.request('capture-status', 'product'))
self.assertEqual(reply, [b'capturing'])
(await self.client.request('capture-done', 'product'))
(reply, informs) = (await self.client.request('capture-status', 'product'))
self.assertEqual(reply, [b'idle'])
|
async def test_capture_status_one(self) -> None:
(await self.client.request('product-configure', 'product', CONFIG))
(reply, informs) = (await self.client.request('capture-status', 'product'))
self.assertEqual(reply, [b'idle'])
self.assertEqual([], informs)
(await self.client.request('capture-init', 'product'))
(reply, informs) = (await self.client.request('capture-status', 'product'))
self.assertEqual(reply, [b'capturing'])
(await self.client.request('capture-done', 'product'))
(reply, informs) = (await self.client.request('capture-status', 'product'))
self.assertEqual(reply, [b'idle'])<|docstring|>Test capture-status with a subarray_product_id argument<|endoftext|>
|
dbfcea1489093a0a2849cec2c62dc30686e9485fa675d6c50542561c5fb8aaff
|
async def test_capture_status_not_found(self) -> None:
'Test capture-status with a subarray_product_id that does not exist'
(await assert_request_fails(self.client, 'capture-status', 'product'))
|
Test capture-status with a subarray_product_id that does not exist
|
katsdpcontroller/test/test_master_controller.py
|
test_capture_status_not_found
|
ska-sa/katsdpcontroller
| 0
|
python
|
async def test_capture_status_not_found(self) -> None:
(await assert_request_fails(self.client, 'capture-status', 'product'))
|
async def test_capture_status_not_found(self) -> None:
(await assert_request_fails(self.client, 'capture-status', 'product'))<|docstring|>Test capture-status with a subarray_product_id that does not exist<|endoftext|>
|
b60a5732be8ee37ec713e7dc1c4f8e8fba9c4b2a4a296657a5ed60a751bd131d
|
@asynctest.patch('time.time')
async def test_product_list_all(self, time_mock) -> None:
'Test product-list without a subarray_product_id argument'
time_mock.return_value = 1122334455.123
(await self.client.request('product-configure', 'product1', CONFIG))
time_mock.return_value = 1234567890.987
(await self.client.request('product-configure', 'product2', CONFIG))
(reply, informs) = (await self.client.request('product-list'))
self.assertEqual(reply, [b'2'])
inform_args = [tuple(msg.arguments) for msg in informs]
self.assertEqual([(b'product1', b'idle, started at 2005-07-25T23:34:15Z'), (b'product2', b'idle, started at 2009-02-13T23:31:30Z')], inform_args)
|
Test product-list without a subarray_product_id argument
|
katsdpcontroller/test/test_master_controller.py
|
test_product_list_all
|
ska-sa/katsdpcontroller
| 0
|
python
|
@asynctest.patch('time.time')
async def test_product_list_all(self, time_mock) -> None:
time_mock.return_value = 1122334455.123
(await self.client.request('product-configure', 'product1', CONFIG))
time_mock.return_value = 1234567890.987
(await self.client.request('product-configure', 'product2', CONFIG))
(reply, informs) = (await self.client.request('product-list'))
self.assertEqual(reply, [b'2'])
inform_args = [tuple(msg.arguments) for msg in informs]
self.assertEqual([(b'product1', b'idle, started at 2005-07-25T23:34:15Z'), (b'product2', b'idle, started at 2009-02-13T23:31:30Z')], inform_args)
|
@asynctest.patch('time.time')
async def test_product_list_all(self, time_mock) -> None:
time_mock.return_value = 1122334455.123
(await self.client.request('product-configure', 'product1', CONFIG))
time_mock.return_value = 1234567890.987
(await self.client.request('product-configure', 'product2', CONFIG))
(reply, informs) = (await self.client.request('product-list'))
self.assertEqual(reply, [b'2'])
inform_args = [tuple(msg.arguments) for msg in informs]
self.assertEqual([(b'product1', b'idle, started at 2005-07-25T23:34:15Z'), (b'product2', b'idle, started at 2009-02-13T23:31:30Z')], inform_args)<|docstring|>Test product-list without a subarray_product_id argument<|endoftext|>
|
69d4c794e87e5da726643be3265d52753e2cdb73c8367132e9444a47015324a2
|
@asynctest.patch('time.time', return_value=1122334455.123)
async def test_product_list_one(self, time_mock) -> None:
'Test product-list with a subarray_product_id argument'
(await self.client.request('product-configure', 'product', CONFIG))
(reply, informs) = (await self.client.request('product-list', 'product'))
self.assertEqual(reply, [b'1'])
inform_args = [tuple(msg.arguments) for msg in informs]
self.assertEqual([(b'product', b'idle, started at 2005-07-25T23:34:15Z')], inform_args)
|
Test product-list with a subarray_product_id argument
|
katsdpcontroller/test/test_master_controller.py
|
test_product_list_one
|
ska-sa/katsdpcontroller
| 0
|
python
|
@asynctest.patch('time.time', return_value=1122334455.123)
async def test_product_list_one(self, time_mock) -> None:
(await self.client.request('product-configure', 'product', CONFIG))
(reply, informs) = (await self.client.request('product-list', 'product'))
self.assertEqual(reply, [b'1'])
inform_args = [tuple(msg.arguments) for msg in informs]
self.assertEqual([(b'product', b'idle, started at 2005-07-25T23:34:15Z')], inform_args)
|
@asynctest.patch('time.time', return_value=1122334455.123)
async def test_product_list_one(self, time_mock) -> None:
(await self.client.request('product-configure', 'product', CONFIG))
(reply, informs) = (await self.client.request('product-list', 'product'))
self.assertEqual(reply, [b'1'])
inform_args = [tuple(msg.arguments) for msg in informs]
self.assertEqual([(b'product', b'idle, started at 2005-07-25T23:34:15Z')], inform_args)<|docstring|>Test product-list with a subarray_product_id argument<|endoftext|>
|
c9719026f2961b1ed3aa58e90f681e0dd82fbef165ab06bf1b9792a1f9f69955
|
async def test_product_list_not_found(self) -> None:
'Test product-list with a subarray_product_id that does not exist'
(await assert_request_fails(self.client, 'product-list', 'product'))
|
Test product-list with a subarray_product_id that does not exist
|
katsdpcontroller/test/test_master_controller.py
|
test_product_list_not_found
|
ska-sa/katsdpcontroller
| 0
|
python
|
async def test_product_list_not_found(self) -> None:
(await assert_request_fails(self.client, 'product-list', 'product'))
|
async def test_product_list_not_found(self) -> None:
(await assert_request_fails(self.client, 'product-list', 'product'))<|docstring|>Test product-list with a subarray_product_id that does not exist<|endoftext|>
|
d9251554569cb8623936fc4df0a9401c4e1d331509e461abfbe86d65d256b971
|
def fit(self, train_set, val_set=None):
'Fit the model to observations.\n\n Parameters\n ----------\n train_set: :obj:`cornac.data.Dataset`, required\n User-Item preference data as well as additional modalities.\n\n val_set: :obj:`cornac.data.Dataset`, optional, default: None\n User-Item preference data for model selection purposes (e.g., early stopping).\n\n Returns\n -------\n self : object\n '
Recommender.fit(self, train_set, val_set)
if (train_set.item_image is None):
raise CornacException('item_image modality is required but None.')
train_features = train_set.item_image.features[:self.train_set.total_items]
train_features = train_features.astype(np.float32)
self._init(n_users=train_set.total_users, n_items=train_set.total_items, features=train_features)
if self.trainable:
self._fit_torch(train_features)
return self
|
Fit the model to observations.
Parameters
----------
train_set: :obj:`cornac.data.Dataset`, required
User-Item preference data as well as additional modalities.
val_set: :obj:`cornac.data.Dataset`, optional, default: None
User-Item preference data for model selection purposes (e.g., early stopping).
Returns
-------
self : object
|
cornac/models/amr/recom_amr.py
|
fit
|
xurong-liang/cornac
| 597
|
python
|
def fit(self, train_set, val_set=None):
'Fit the model to observations.\n\n Parameters\n ----------\n train_set: :obj:`cornac.data.Dataset`, required\n User-Item preference data as well as additional modalities.\n\n val_set: :obj:`cornac.data.Dataset`, optional, default: None\n User-Item preference data for model selection purposes (e.g., early stopping).\n\n Returns\n -------\n self : object\n '
Recommender.fit(self, train_set, val_set)
if (train_set.item_image is None):
raise CornacException('item_image modality is required but None.')
train_features = train_set.item_image.features[:self.train_set.total_items]
train_features = train_features.astype(np.float32)
self._init(n_users=train_set.total_users, n_items=train_set.total_items, features=train_features)
if self.trainable:
self._fit_torch(train_features)
return self
|
def fit(self, train_set, val_set=None):
'Fit the model to observations.\n\n Parameters\n ----------\n train_set: :obj:`cornac.data.Dataset`, required\n User-Item preference data as well as additional modalities.\n\n val_set: :obj:`cornac.data.Dataset`, optional, default: None\n User-Item preference data for model selection purposes (e.g., early stopping).\n\n Returns\n -------\n self : object\n '
Recommender.fit(self, train_set, val_set)
if (train_set.item_image is None):
raise CornacException('item_image modality is required but None.')
train_features = train_set.item_image.features[:self.train_set.total_items]
train_features = train_features.astype(np.float32)
self._init(n_users=train_set.total_users, n_items=train_set.total_items, features=train_features)
if self.trainable:
self._fit_torch(train_features)
return self<|docstring|>Fit the model to observations.
Parameters
----------
train_set: :obj:`cornac.data.Dataset`, required
User-Item preference data as well as additional modalities.
val_set: :obj:`cornac.data.Dataset`, optional, default: None
User-Item preference data for model selection purposes (e.g., early stopping).
Returns
-------
self : object<|endoftext|>
|
80fc16dd83d57d4cf062d6b5672f777ec772d7259ea3d95e8e2f7fe5f431e783
|
def score(self, user_idx, item_idx=None):
'Predict the scores/ratings of a user for an item.\n\n Parameters\n ----------\n user_idx: int, required\n The index of the user for whom to perform score prediction.\n\n item_idx: int, optional, default: None\n The index of the item for which to perform score prediction.\n If None, scores for all known items will be returned.\n\n Returns\n -------\n res : A scalar or a Numpy array\n Relative scores that the user gives to the item or to all known items\n\n '
if (item_idx is None):
known_item_scores = np.zeros(self.gamma_item.shape[0], dtype=np.float32)
fast_dot(self.gamma_user[user_idx], self.gamma_item, known_item_scores)
fast_dot(self.gamma_user[user_idx], self.theta_item, known_item_scores)
return known_item_scores
else:
item_score = np.dot(self.gamma_item[item_idx], self.gamma_user[user_idx])
item_score += np.dot(self.theta_item[item_idx], self.gamma_user[user_idx])
return item_score
|
Predict the scores/ratings of a user for an item.
Parameters
----------
user_idx: int, required
The index of the user for whom to perform score prediction.
item_idx: int, optional, default: None
The index of the item for which to perform score prediction.
If None, scores for all known items will be returned.
Returns
-------
res : A scalar or a Numpy array
Relative scores that the user gives to the item or to all known items
|
cornac/models/amr/recom_amr.py
|
score
|
xurong-liang/cornac
| 597
|
python
|
def score(self, user_idx, item_idx=None):
'Predict the scores/ratings of a user for an item.\n\n Parameters\n ----------\n user_idx: int, required\n The index of the user for whom to perform score prediction.\n\n item_idx: int, optional, default: None\n The index of the item for which to perform score prediction.\n If None, scores for all known items will be returned.\n\n Returns\n -------\n res : A scalar or a Numpy array\n Relative scores that the user gives to the item or to all known items\n\n '
if (item_idx is None):
known_item_scores = np.zeros(self.gamma_item.shape[0], dtype=np.float32)
fast_dot(self.gamma_user[user_idx], self.gamma_item, known_item_scores)
fast_dot(self.gamma_user[user_idx], self.theta_item, known_item_scores)
return known_item_scores
else:
item_score = np.dot(self.gamma_item[item_idx], self.gamma_user[user_idx])
item_score += np.dot(self.theta_item[item_idx], self.gamma_user[user_idx])
return item_score
|
def score(self, user_idx, item_idx=None):
'Predict the scores/ratings of a user for an item.\n\n Parameters\n ----------\n user_idx: int, required\n The index of the user for whom to perform score prediction.\n\n item_idx: int, optional, default: None\n The index of the item for which to perform score prediction.\n If None, scores for all known items will be returned.\n\n Returns\n -------\n res : A scalar or a Numpy array\n Relative scores that the user gives to the item or to all known items\n\n '
if (item_idx is None):
known_item_scores = np.zeros(self.gamma_item.shape[0], dtype=np.float32)
fast_dot(self.gamma_user[user_idx], self.gamma_item, known_item_scores)
fast_dot(self.gamma_user[user_idx], self.theta_item, known_item_scores)
return known_item_scores
else:
item_score = np.dot(self.gamma_item[item_idx], self.gamma_user[user_idx])
item_score += np.dot(self.theta_item[item_idx], self.gamma_user[user_idx])
return item_score<|docstring|>Predict the scores/ratings of a user for an item.
Parameters
----------
user_idx: int, required
The index of the user for whom to perform score prediction.
item_idx: int, optional, default: None
The index of the item for which to perform score prediction.
If None, scores for all known items will be returned.
Returns
-------
res : A scalar or a Numpy array
Relative scores that the user gives to the item or to all known items<|endoftext|>
|
ef4edda9db365ce4ad65490c346e7cfa07752e52ef33b68d05a9138248cbb095
|
def __init__(self, data):
'Initialize this node with the given data.'
self.data = data
self.next = None
|
Initialize this node with the given data.
|
site/public/courses/CS-1.2/Code/linkedlist.py
|
__init__
|
KitsuneNoctus/makeschool
| 1
|
python
|
def __init__(self, data):
self.data = data
self.next = None
|
def __init__(self, data):
self.data = data
self.next = None<|docstring|>Initialize this node with the given data.<|endoftext|>
|
9fd8f9b02d11b82cf10ae5c1ba4b14f4bfa5e858bd8e1a351ad27b3cdfdc1b89
|
def __repr__(self):
'Return a string representation of this node.'
return f'Node({self.data})'
|
Return a string representation of this node.
|
site/public/courses/CS-1.2/Code/linkedlist.py
|
__repr__
|
KitsuneNoctus/makeschool
| 1
|
python
|
def __repr__(self):
return f'Node({self.data})'
|
def __repr__(self):
return f'Node({self.data})'<|docstring|>Return a string representation of this node.<|endoftext|>
|
1adc0c9e68bca8f5024527ebd9b8fb8626a08d69b96bc53fb69bc7faca09debf
|
def __init__(self, items=None):
'Initialize this linked list and append the given items, if any.'
self.head = None
self.tail = None
if (items is not None):
for item in items:
self.append(item)
|
Initialize this linked list and append the given items, if any.
|
site/public/courses/CS-1.2/Code/linkedlist.py
|
__init__
|
KitsuneNoctus/makeschool
| 1
|
python
|
def __init__(self, items=None):
self.head = None
self.tail = None
if (items is not None):
for item in items:
self.append(item)
|
def __init__(self, items=None):
self.head = None
self.tail = None
if (items is not None):
for item in items:
self.append(item)<|docstring|>Initialize this linked list and append the given items, if any.<|endoftext|>
|
0191820df5f6520b55fe7d23497faf7cf6cf2fd71c931641d37ad06827dffc0b
|
def __repr__(self):
'Return a string representation of this linked list.'
ll_str = ''
for item in self.items():
ll_str += f'({item}) -> '
return ll_str
|
Return a string representation of this linked list.
|
site/public/courses/CS-1.2/Code/linkedlist.py
|
__repr__
|
KitsuneNoctus/makeschool
| 1
|
python
|
def __repr__(self):
ll_str =
for item in self.items():
ll_str += f'({item}) -> '
return ll_str
|
def __repr__(self):
ll_str =
for item in self.items():
ll_str += f'({item}) -> '
return ll_str<|docstring|>Return a string representation of this linked list.<|endoftext|>
|
e2515afbfc35c07a35eb27c9f1c8870aa14b06a908c82209bab23b5eb42547ab
|
def items(self):
'Return a list (dynamic array) of all items in this linked list.\n Best and worst case running time: O(n) for n items in the list (length)\n because we always need to loop through all n nodes to get each item.'
items = []
node = self.head
while (node is not None):
items.append(node.data)
node = node.next
return items
|
Return a list (dynamic array) of all items in this linked list.
Best and worst case running time: O(n) for n items in the list (length)
because we always need to loop through all n nodes to get each item.
|
site/public/courses/CS-1.2/Code/linkedlist.py
|
items
|
KitsuneNoctus/makeschool
| 1
|
python
|
def items(self):
'Return a list (dynamic array) of all items in this linked list.\n Best and worst case running time: O(n) for n items in the list (length)\n because we always need to loop through all n nodes to get each item.'
items = []
node = self.head
while (node is not None):
items.append(node.data)
node = node.next
return items
|
def items(self):
'Return a list (dynamic array) of all items in this linked list.\n Best and worst case running time: O(n) for n items in the list (length)\n because we always need to loop through all n nodes to get each item.'
items = []
node = self.head
while (node is not None):
items.append(node.data)
node = node.next
return items<|docstring|>Return a list (dynamic array) of all items in this linked list.
Best and worst case running time: O(n) for n items in the list (length)
because we always need to loop through all n nodes to get each item.<|endoftext|>
|
c93367f9b246b8fc15127ecce90e97279aac3bc223b8f9f0d2a014967eccab98
|
def is_empty(self):
'Return a boolean indicating whether this linked list is empty.'
return (self.head is None)
|
Return a boolean indicating whether this linked list is empty.
|
site/public/courses/CS-1.2/Code/linkedlist.py
|
is_empty
|
KitsuneNoctus/makeschool
| 1
|
python
|
def is_empty(self):
return (self.head is None)
|
def is_empty(self):
return (self.head is None)<|docstring|>Return a boolean indicating whether this linked list is empty.<|endoftext|>
|
309412bfdabadb52ee370f15bf2901e3dff6807031155e97d169f6e2e95fc0e6
|
def length(self):
'Return the length of this linked list by traversing its nodes.\n TODO: Running time: O(n) Why and under what conditions?'
|
Return the length of this linked list by traversing its nodes.
TODO: Running time: O(n) Why and under what conditions?
|
site/public/courses/CS-1.2/Code/linkedlist.py
|
length
|
KitsuneNoctus/makeschool
| 1
|
python
|
def length(self):
'Return the length of this linked list by traversing its nodes.\n TODO: Running time: O(n) Why and under what conditions?'
|
def length(self):
'Return the length of this linked list by traversing its nodes.\n TODO: Running time: O(n) Why and under what conditions?'<|docstring|>Return the length of this linked list by traversing its nodes.
TODO: Running time: O(n) Why and under what conditions?<|endoftext|>
|
9438f8480c76cd5e465e3510404b1cf41d4f90155e8f29dd36aea47200e5d6f2
|
def append(self, item):
'Insert the given item at the tail of this linked list.\n TODO: Running time: O(???) Why and under what conditions?'
|
Insert the given item at the tail of this linked list.
TODO: Running time: O(???) Why and under what conditions?
|
site/public/courses/CS-1.2/Code/linkedlist.py
|
append
|
KitsuneNoctus/makeschool
| 1
|
python
|
def append(self, item):
'Insert the given item at the tail of this linked list.\n TODO: Running time: O(???) Why and under what conditions?'
|
def append(self, item):
'Insert the given item at the tail of this linked list.\n TODO: Running time: O(???) Why and under what conditions?'<|docstring|>Insert the given item at the tail of this linked list.
TODO: Running time: O(???) Why and under what conditions?<|endoftext|>
|
2b3329fb30683ebd20425c621daf0d572629318025ebbdcec7ceed22ca5f4648
|
def prepend(self, item):
'Insert the given item at the head of this linked list.\n TODO: Running time: O(???) Why and under what conditions?'
|
Insert the given item at the head of this linked list.
TODO: Running time: O(???) Why and under what conditions?
|
site/public/courses/CS-1.2/Code/linkedlist.py
|
prepend
|
KitsuneNoctus/makeschool
| 1
|
python
|
def prepend(self, item):
'Insert the given item at the head of this linked list.\n TODO: Running time: O(???) Why and under what conditions?'
|
def prepend(self, item):
'Insert the given item at the head of this linked list.\n TODO: Running time: O(???) Why and under what conditions?'<|docstring|>Insert the given item at the head of this linked list.
TODO: Running time: O(???) Why and under what conditions?<|endoftext|>
|
fb2c112785b8c0c157a8863dd3550e27b9eda727ba2da1c4df45082e0afdb7c0
|
def find(self, item):
'Return an item from this linked list if it is present.\n TODO: Best case running time: O(???) Why and under what conditions?\n TODO: Worst case running time: O(???) Why and under what conditions?'
|
Return an item from this linked list if it is present.
TODO: Best case running time: O(???) Why and under what conditions?
TODO: Worst case running time: O(???) Why and under what conditions?
|
site/public/courses/CS-1.2/Code/linkedlist.py
|
find
|
KitsuneNoctus/makeschool
| 1
|
python
|
def find(self, item):
'Return an item from this linked list if it is present.\n TODO: Best case running time: O(???) Why and under what conditions?\n TODO: Worst case running time: O(???) Why and under what conditions?'
|
def find(self, item):
'Return an item from this linked list if it is present.\n TODO: Best case running time: O(???) Why and under what conditions?\n TODO: Worst case running time: O(???) Why and under what conditions?'<|docstring|>Return an item from this linked list if it is present.
TODO: Best case running time: O(???) Why and under what conditions?
TODO: Worst case running time: O(???) Why and under what conditions?<|endoftext|>
|
de857c0472281761055bf73ed6324ecea59c8f8d823d35566aaa22dc4a50cd50
|
def delete(self, item):
'Delete the given item from this linked list, or raise ValueError.\n TODO: Best case running time: O(???) Why and under what conditions?\n TODO: Worst case running time: O(???) Why and under what conditions?'
|
Delete the given item from this linked list, or raise ValueError.
TODO: Best case running time: O(???) Why and under what conditions?
TODO: Worst case running time: O(???) Why and under what conditions?
|
site/public/courses/CS-1.2/Code/linkedlist.py
|
delete
|
KitsuneNoctus/makeschool
| 1
|
python
|
def delete(self, item):
'Delete the given item from this linked list, or raise ValueError.\n TODO: Best case running time: O(???) Why and under what conditions?\n TODO: Worst case running time: O(???) Why and under what conditions?'
|
def delete(self, item):
'Delete the given item from this linked list, or raise ValueError.\n TODO: Best case running time: O(???) Why and under what conditions?\n TODO: Worst case running time: O(???) Why and under what conditions?'<|docstring|>Delete the given item from this linked list, or raise ValueError.
TODO: Best case running time: O(???) Why and under what conditions?
TODO: Worst case running time: O(???) Why and under what conditions?<|endoftext|>
|
b3bfc4b2bbd91a6662b4551fcbbc2636ce4ac4d65cc67d38bcc69e6ef4764521
|
def getFrontendContent(self, **params):
'\n Deliver frontend content of module printers\n\n :return: data of monitors\n '
pass
|
Deliver frontend content of module printers
:return: data of monitors
|
emonitor/modules/printers/content_frontend.py
|
getFrontendContent
|
Durburz/eMonitor
| 21
|
python
|
def getFrontendContent(self, **params):
'\n Deliver frontend content of module printers\n\n :return: data of monitors\n '
pass
|
def getFrontendContent(self, **params):
'\n Deliver frontend content of module printers\n\n :return: data of monitors\n '
pass<|docstring|>Deliver frontend content of module printers
:return: data of monitors<|endoftext|>
|
f89964c48ad54950984b35ec28c798bd169e52bbfc08c4c00dbd7d1ce86e69cd
|
def getFrontendData(self):
'\n Deliver frontend content of module printers (ajax)\n\n :return: rendered template as string or json dict\n '
return ''
|
Deliver frontend content of module printers (ajax)
:return: rendered template as string or json dict
|
emonitor/modules/printers/content_frontend.py
|
getFrontendData
|
Durburz/eMonitor
| 21
|
python
|
def getFrontendData(self):
'\n Deliver frontend content of module printers (ajax)\n\n :return: rendered template as string or json dict\n '
return
|
def getFrontendData(self):
'\n Deliver frontend content of module printers (ajax)\n\n :return: rendered template as string or json dict\n '
return <|docstring|>Deliver frontend content of module printers (ajax)
:return: rendered template as string or json dict<|endoftext|>
|
99329d7988f6e00228718a1e317bf2fa4e81280737c4890ce595efd0e29d54da
|
def insert_method(modulename, code, fn, lineno):
'\n Add code of a module should be added. The methods\n will be simply added, no checking will be done\n '
comp = better_compile(code, modulename, fn, lineno=lineno)
better_exec(comp, None, code, fn)
|
Add code of a module should be added. The methods
will be simply added, no checking will be done
|
poky/bitbake/lib/bb/methodpool.py
|
insert_method
|
Eyerunmyden/HWMgmt-MegaRAC-OpenEdition
| 14
|
python
|
def insert_method(modulename, code, fn, lineno):
'\n Add code of a module should be added. The methods\n will be simply added, no checking will be done\n '
comp = better_compile(code, modulename, fn, lineno=lineno)
better_exec(comp, None, code, fn)
|
def insert_method(modulename, code, fn, lineno):
'\n Add code of a module should be added. The methods\n will be simply added, no checking will be done\n '
comp = better_compile(code, modulename, fn, lineno=lineno)
better_exec(comp, None, code, fn)<|docstring|>Add code of a module should be added. The methods
will be simply added, no checking will be done<|endoftext|>
|
08ae9152b282c6ac5e16af413b5e3f570be6385dbc715ef4392b592490752c6d
|
def record_dataset2(data_path):
'Returns an input pipeline Dataset from `filenames`.'
data_files = tf.gfile.Glob(data_path)
file_queue = tf.train.string_input_producer(data_files, shuffle=True)
record_bytes = (((_HEIGHT * _WIDTH) * _DEPTH) + 1)
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
(_, value) = reader.read(file_queue)
return value
|
Returns an input pipeline Dataset from `filenames`.
|
resnet/cifar100_main_test.py
|
record_dataset2
|
IvanWeiZ/CSC2515
| 0
|
python
|
def record_dataset2(data_path):
data_files = tf.gfile.Glob(data_path)
file_queue = tf.train.string_input_producer(data_files, shuffle=True)
record_bytes = (((_HEIGHT * _WIDTH) * _DEPTH) + 1)
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
(_, value) = reader.read(file_queue)
return value
|
def record_dataset2(data_path):
data_files = tf.gfile.Glob(data_path)
file_queue = tf.train.string_input_producer(data_files, shuffle=True)
record_bytes = (((_HEIGHT * _WIDTH) * _DEPTH) + 1)
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
(_, value) = reader.read(file_queue)
return value<|docstring|>Returns an input pipeline Dataset from `filenames`.<|endoftext|>
|
ec7d915e15a9bb501f615593d5623e33a9d1a519ca5559870521794892240fb5
|
def record_dataset(data_path):
'Returns an input pipeline Dataset from `filenames`.'
record_bytes = (((_HEIGHT * _WIDTH) * _DEPTH) + 1)
return tf.data.FixedLengthRecordDataset(data_path, record_bytes)
|
Returns an input pipeline Dataset from `filenames`.
|
resnet/cifar100_main_test.py
|
record_dataset
|
IvanWeiZ/CSC2515
| 0
|
python
|
def record_dataset(data_path):
record_bytes = (((_HEIGHT * _WIDTH) * _DEPTH) + 1)
return tf.data.FixedLengthRecordDataset(data_path, record_bytes)
|
def record_dataset(data_path):
record_bytes = (((_HEIGHT * _WIDTH) * _DEPTH) + 1)
return tf.data.FixedLengthRecordDataset(data_path, record_bytes)<|docstring|>Returns an input pipeline Dataset from `filenames`.<|endoftext|>
|
b9d37f1c626a43c8b41e269b14b85afa26ba53bb48d393f4963490ba548059f8
|
def get_filenames(is_training, data_dir):
'Returns a list of filenames.'
data_dir = os.path.join(data_dir, 'cifar-100-binary')
assert os.path.exists(data_dir), 'Run cifar100_download_and_extract.py first to download and extract the CIFAR-100 data.'
if is_training:
return [os.path.join(data_dir, 'train.bin')]
else:
return [os.path.join(data_dir, 'test.bin')]
|
Returns a list of filenames.
|
resnet/cifar100_main_test.py
|
get_filenames
|
IvanWeiZ/CSC2515
| 0
|
python
|
def get_filenames(is_training, data_dir):
data_dir = os.path.join(data_dir, 'cifar-100-binary')
assert os.path.exists(data_dir), 'Run cifar100_download_and_extract.py first to download and extract the CIFAR-100 data.'
if is_training:
return [os.path.join(data_dir, 'train.bin')]
else:
return [os.path.join(data_dir, 'test.bin')]
|
def get_filenames(is_training, data_dir):
data_dir = os.path.join(data_dir, 'cifar-100-binary')
assert os.path.exists(data_dir), 'Run cifar100_download_and_extract.py first to download and extract the CIFAR-100 data.'
if is_training:
return [os.path.join(data_dir, 'train.bin')]
else:
return [os.path.join(data_dir, 'test.bin')]<|docstring|>Returns a list of filenames.<|endoftext|>
|
2d4e139829d09bfe713734ab372099fa036fee31784126c8fad28c5384c153a6
|
def parse_record(raw_record):
'Parse CIFAR-100 image and label from a raw record.'
image_size = 32
label_bytes = 1
label_offset = 1
num_classes = 100
depth = 3
image_bytes = ((image_size * image_size) * depth)
record_bytes = ((label_bytes + label_offset) + image_bytes)
record = tf.reshape(tf.decode_raw(raw_record, tf.uint8), [record_bytes])
label = tf.cast(tf.slice(record, [label_offset], [label_bytes]), tf.int32)
label = tf.one_hot(label, num_classes)
depth_major = tf.reshape(tf.slice(record, [(label_offset + label_bytes)], [image_bytes]), [depth, image_size, image_size])
image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32)
return (image, label)
|
Parse CIFAR-100 image and label from a raw record.
|
resnet/cifar100_main_test.py
|
parse_record
|
IvanWeiZ/CSC2515
| 0
|
python
|
def parse_record(raw_record):
image_size = 32
label_bytes = 1
label_offset = 1
num_classes = 100
depth = 3
image_bytes = ((image_size * image_size) * depth)
record_bytes = ((label_bytes + label_offset) + image_bytes)
record = tf.reshape(tf.decode_raw(raw_record, tf.uint8), [record_bytes])
label = tf.cast(tf.slice(record, [label_offset], [label_bytes]), tf.int32)
label = tf.one_hot(label, num_classes)
depth_major = tf.reshape(tf.slice(record, [(label_offset + label_bytes)], [image_bytes]), [depth, image_size, image_size])
image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32)
return (image, label)
|
def parse_record(raw_record):
image_size = 32
label_bytes = 1
label_offset = 1
num_classes = 100
depth = 3
image_bytes = ((image_size * image_size) * depth)
record_bytes = ((label_bytes + label_offset) + image_bytes)
record = tf.reshape(tf.decode_raw(raw_record, tf.uint8), [record_bytes])
label = tf.cast(tf.slice(record, [label_offset], [label_bytes]), tf.int32)
label = tf.one_hot(label, num_classes)
depth_major = tf.reshape(tf.slice(record, [(label_offset + label_bytes)], [image_bytes]), [depth, image_size, image_size])
image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32)
return (image, label)<|docstring|>Parse CIFAR-100 image and label from a raw record.<|endoftext|>
|
66d24a18ef99a2e1c26c67b2f6b9ed91dea6a0c41436238b5e4c7c6892cdec68
|
def preprocess_image(image, is_training):
'Preprocess a single image of layout [height, width, depth].'
if is_training:
image = tf.image.resize_image_with_crop_or_pad(image, (_HEIGHT + 8), (_WIDTH + 8))
image = tf.random_crop(image, [_HEIGHT, _WIDTH, _DEPTH])
image = tf.image.random_flip_left_right(image)
image = tf.image.per_image_standardization(image)
return image
|
Preprocess a single image of layout [height, width, depth].
|
resnet/cifar100_main_test.py
|
preprocess_image
|
IvanWeiZ/CSC2515
| 0
|
python
|
def preprocess_image(image, is_training):
if is_training:
image = tf.image.resize_image_with_crop_or_pad(image, (_HEIGHT + 8), (_WIDTH + 8))
image = tf.random_crop(image, [_HEIGHT, _WIDTH, _DEPTH])
image = tf.image.random_flip_left_right(image)
image = tf.image.per_image_standardization(image)
return image
|
def preprocess_image(image, is_training):
if is_training:
image = tf.image.resize_image_with_crop_or_pad(image, (_HEIGHT + 8), (_WIDTH + 8))
image = tf.random_crop(image, [_HEIGHT, _WIDTH, _DEPTH])
image = tf.image.random_flip_left_right(image)
image = tf.image.per_image_standardization(image)
return image<|docstring|>Preprocess a single image of layout [height, width, depth].<|endoftext|>
|
67f4000be48ab12c2801e27c66dd64c3edf6e6f6aae31ff2db08a5056bb690f8
|
def input_fn(is_training, data_dir, batch_size, num_epochs=1):
'Input_fn using the tf.data input pipeline for CIFAR-10 dataset.\n\n Args:\n is_training: A boolean denoting whether the input is for training.\n data_dir: The directory containing the input data.\n batch_size: The number of samples per batch.\n num_epochs: The number of epochs to repeat the dataset.\n\n Returns:\n A tuple of images and labels.\n '
dataset = record_dataset(get_filenames(is_training, data_dir))
if is_training:
dataset = dataset.shuffle(buffer_size=_NUM_IMAGES['train'])
dataset = dataset.map(parse_record)
dataset = dataset.map((lambda image, label: (preprocess_image(image, is_training), label)))
dataset = dataset.prefetch((2 * batch_size))
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
(images, labels) = iterator.get_next()
return (images, labels)
|
Input_fn using the tf.data input pipeline for CIFAR-10 dataset.
Args:
is_training: A boolean denoting whether the input is for training.
data_dir: The directory containing the input data.
batch_size: The number of samples per batch.
num_epochs: The number of epochs to repeat the dataset.
Returns:
A tuple of images and labels.
|
resnet/cifar100_main_test.py
|
input_fn
|
IvanWeiZ/CSC2515
| 0
|
python
|
def input_fn(is_training, data_dir, batch_size, num_epochs=1):
'Input_fn using the tf.data input pipeline for CIFAR-10 dataset.\n\n Args:\n is_training: A boolean denoting whether the input is for training.\n data_dir: The directory containing the input data.\n batch_size: The number of samples per batch.\n num_epochs: The number of epochs to repeat the dataset.\n\n Returns:\n A tuple of images and labels.\n '
dataset = record_dataset(get_filenames(is_training, data_dir))
if is_training:
dataset = dataset.shuffle(buffer_size=_NUM_IMAGES['train'])
dataset = dataset.map(parse_record)
dataset = dataset.map((lambda image, label: (preprocess_image(image, is_training), label)))
dataset = dataset.prefetch((2 * batch_size))
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
(images, labels) = iterator.get_next()
return (images, labels)
|
def input_fn(is_training, data_dir, batch_size, num_epochs=1):
'Input_fn using the tf.data input pipeline for CIFAR-10 dataset.\n\n Args:\n is_training: A boolean denoting whether the input is for training.\n data_dir: The directory containing the input data.\n batch_size: The number of samples per batch.\n num_epochs: The number of epochs to repeat the dataset.\n\n Returns:\n A tuple of images and labels.\n '
dataset = record_dataset(get_filenames(is_training, data_dir))
if is_training:
dataset = dataset.shuffle(buffer_size=_NUM_IMAGES['train'])
dataset = dataset.map(parse_record)
dataset = dataset.map((lambda image, label: (preprocess_image(image, is_training), label)))
dataset = dataset.prefetch((2 * batch_size))
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
(images, labels) = iterator.get_next()
return (images, labels)<|docstring|>Input_fn using the tf.data input pipeline for CIFAR-10 dataset.
Args:
is_training: A boolean denoting whether the input is for training.
data_dir: The directory containing the input data.
batch_size: The number of samples per batch.
num_epochs: The number of epochs to repeat the dataset.
Returns:
A tuple of images and labels.<|endoftext|>
|
e71ead8edb0bb5c288a4b07136484b0b0ff9ec2abb0c15a9d7a7bbed140513c3
|
def cifar10_model_fn(features, labels, mode, params):
'Model function for CIFAR-10.'
tf.summary.image('images', features, max_outputs=6)
network = resnet_model.cifar10_resnet_v2_generator(params['resnet_size'], _NUM_CLASSES, params['data_format'])
inputs = tf.reshape(features, [(- 1), _HEIGHT, _WIDTH, _DEPTH])
logits = network(inputs, (mode == tf.estimator.ModeKeys.TRAIN))
predictions = {'classes': tf.argmax(logits, axis=1), 'probabilities': tf.nn.softmax(logits, name='softmax_tensor')}
if (mode == tf.estimator.ModeKeys.PREDICT):
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
cross_entropy = tf.losses.softmax_cross_entropy(logits=logits, onehot_labels=labels)
tf.identity(cross_entropy, name='cross_entropy')
tf.summary.scalar('cross_entropy', cross_entropy)
loss = (cross_entropy + (_WEIGHT_DECAY * tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()])))
if (mode == tf.estimator.ModeKeys.TRAIN):
initial_learning_rate = ((0.1 * params['batch_size']) / 128)
batches_per_epoch = (_NUM_IMAGES['train'] / params['batch_size'])
global_step = tf.train.get_or_create_global_step()
boundaries = [int((batches_per_epoch * epoch)) for epoch in [100, 150, 200]]
values = [(initial_learning_rate * decay) for decay in [1, 0.1, 0.01, 0.001]]
learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32), boundaries, values)
tf.identity(learning_rate, name='learning_rate')
tf.summary.scalar('learning_rate', learning_rate)
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=_MOMENTUM)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss, global_step)
else:
train_op = None
accuracy = tf.metrics.accuracy(tf.argmax(labels, axis=1), predictions['classes'])
metrics = {'accuracy': accuracy}
tf.identity(accuracy[1], name='train_accuracy')
tf.summary.scalar('train_accuracy', accuracy[1])
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions, loss=loss, train_op=train_op, eval_metric_ops=metrics)
|
Model function for CIFAR-10.
|
resnet/cifar100_main_test.py
|
cifar10_model_fn
|
IvanWeiZ/CSC2515
| 0
|
python
|
def cifar10_model_fn(features, labels, mode, params):
tf.summary.image('images', features, max_outputs=6)
network = resnet_model.cifar10_resnet_v2_generator(params['resnet_size'], _NUM_CLASSES, params['data_format'])
inputs = tf.reshape(features, [(- 1), _HEIGHT, _WIDTH, _DEPTH])
logits = network(inputs, (mode == tf.estimator.ModeKeys.TRAIN))
predictions = {'classes': tf.argmax(logits, axis=1), 'probabilities': tf.nn.softmax(logits, name='softmax_tensor')}
if (mode == tf.estimator.ModeKeys.PREDICT):
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
cross_entropy = tf.losses.softmax_cross_entropy(logits=logits, onehot_labels=labels)
tf.identity(cross_entropy, name='cross_entropy')
tf.summary.scalar('cross_entropy', cross_entropy)
loss = (cross_entropy + (_WEIGHT_DECAY * tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()])))
if (mode == tf.estimator.ModeKeys.TRAIN):
initial_learning_rate = ((0.1 * params['batch_size']) / 128)
batches_per_epoch = (_NUM_IMAGES['train'] / params['batch_size'])
global_step = tf.train.get_or_create_global_step()
boundaries = [int((batches_per_epoch * epoch)) for epoch in [100, 150, 200]]
values = [(initial_learning_rate * decay) for decay in [1, 0.1, 0.01, 0.001]]
learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32), boundaries, values)
tf.identity(learning_rate, name='learning_rate')
tf.summary.scalar('learning_rate', learning_rate)
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=_MOMENTUM)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss, global_step)
else:
train_op = None
accuracy = tf.metrics.accuracy(tf.argmax(labels, axis=1), predictions['classes'])
metrics = {'accuracy': accuracy}
tf.identity(accuracy[1], name='train_accuracy')
tf.summary.scalar('train_accuracy', accuracy[1])
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions, loss=loss, train_op=train_op, eval_metric_ops=metrics)
|
def cifar10_model_fn(features, labels, mode, params):
tf.summary.image('images', features, max_outputs=6)
network = resnet_model.cifar10_resnet_v2_generator(params['resnet_size'], _NUM_CLASSES, params['data_format'])
inputs = tf.reshape(features, [(- 1), _HEIGHT, _WIDTH, _DEPTH])
logits = network(inputs, (mode == tf.estimator.ModeKeys.TRAIN))
predictions = {'classes': tf.argmax(logits, axis=1), 'probabilities': tf.nn.softmax(logits, name='softmax_tensor')}
if (mode == tf.estimator.ModeKeys.PREDICT):
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
cross_entropy = tf.losses.softmax_cross_entropy(logits=logits, onehot_labels=labels)
tf.identity(cross_entropy, name='cross_entropy')
tf.summary.scalar('cross_entropy', cross_entropy)
loss = (cross_entropy + (_WEIGHT_DECAY * tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables()])))
if (mode == tf.estimator.ModeKeys.TRAIN):
initial_learning_rate = ((0.1 * params['batch_size']) / 128)
batches_per_epoch = (_NUM_IMAGES['train'] / params['batch_size'])
global_step = tf.train.get_or_create_global_step()
boundaries = [int((batches_per_epoch * epoch)) for epoch in [100, 150, 200]]
values = [(initial_learning_rate * decay) for decay in [1, 0.1, 0.01, 0.001]]
learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32), boundaries, values)
tf.identity(learning_rate, name='learning_rate')
tf.summary.scalar('learning_rate', learning_rate)
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=_MOMENTUM)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss, global_step)
else:
train_op = None
accuracy = tf.metrics.accuracy(tf.argmax(labels, axis=1), predictions['classes'])
metrics = {'accuracy': accuracy}
tf.identity(accuracy[1], name='train_accuracy')
tf.summary.scalar('train_accuracy', accuracy[1])
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions, loss=loss, train_op=train_op, eval_metric_ops=metrics)<|docstring|>Model function for CIFAR-10.<|endoftext|>
|
d06850cf4a7021d6c63d4f3280ce113573db916a8276236a3abfc80016d745d2
|
def _scrub_generated_timestamps(self, target_workdir):
'Remove the first line of comment from each file if it contains a timestamp.'
for (root, _, filenames) in safe_walk(target_workdir):
for filename in filenames:
source = os.path.join(root, filename)
with open(source) as f:
lines = f.readlines()
if (len(lines) < 1):
return
with open(source, 'w') as f:
if (not self._COMMENT_WITH_TIMESTAMP_RE.match(lines[0])):
f.write(lines[0])
for line in lines[1:]:
f.write(line)
|
Remove the first line of comment from each file if it contains a timestamp.
|
src/python/pants/backend/codegen/tasks/antlr_gen.py
|
_scrub_generated_timestamps
|
qma/pants
| 0
|
python
|
def _scrub_generated_timestamps(self, target_workdir):
for (root, _, filenames) in safe_walk(target_workdir):
for filename in filenames:
source = os.path.join(root, filename)
with open(source) as f:
lines = f.readlines()
if (len(lines) < 1):
return
with open(source, 'w') as f:
if (not self._COMMENT_WITH_TIMESTAMP_RE.match(lines[0])):
f.write(lines[0])
for line in lines[1:]:
f.write(line)
|
def _scrub_generated_timestamps(self, target_workdir):
for (root, _, filenames) in safe_walk(target_workdir):
for filename in filenames:
source = os.path.join(root, filename)
with open(source) as f:
lines = f.readlines()
if (len(lines) < 1):
return
with open(source, 'w') as f:
if (not self._COMMENT_WITH_TIMESTAMP_RE.match(lines[0])):
f.write(lines[0])
for line in lines[1:]:
f.write(line)<|docstring|>Remove the first line of comment from each file if it contains a timestamp.<|endoftext|>
|
52db2bc5c0130002e86edd66e648961a4a7c5753cc8080ec49a1f45f5b3fe7a0
|
def get(self, index, col):
'Returns dataframe element from ``index`` and ``col``.'
return self.df.loc[(index, col)]
|
Returns dataframe element from ``index`` and ``col``.
|
dpipe/dataset/csv.py
|
get
|
samokhinv/deep_pipe
| 38
|
python
|
def get(self, index, col):
return self.df.loc[(index, col)]
|
def get(self, index, col):
return self.df.loc[(index, col)]<|docstring|>Returns dataframe element from ``index`` and ``col``.<|endoftext|>
|
7a857035159166c84a95bed8b46f7668a0cb217237a48aeaeed54dbc30ac51b7
|
def get_global_path(self, index: str, col: str) -> str:
'\n Get the global path at ``index`` and ``col``.\n Often data frames contain path to data, this is a convenient way to obtain the global path.\n '
return os.path.join(self.path, self.get(index, col))
|
Get the global path at ``index`` and ``col``.
Often data frames contain path to data, this is a convenient way to obtain the global path.
|
dpipe/dataset/csv.py
|
get_global_path
|
samokhinv/deep_pipe
| 38
|
python
|
def get_global_path(self, index: str, col: str) -> str:
'\n Get the global path at ``index`` and ``col``.\n Often data frames contain path to data, this is a convenient way to obtain the global path.\n '
return os.path.join(self.path, self.get(index, col))
|
def get_global_path(self, index: str, col: str) -> str:
'\n Get the global path at ``index`` and ``col``.\n Often data frames contain path to data, this is a convenient way to obtain the global path.\n '
return os.path.join(self.path, self.get(index, col))<|docstring|>Get the global path at ``index`` and ``col``.
Often data frames contain path to data, this is a convenient way to obtain the global path.<|endoftext|>
|
4c5253d75c70c9ddbf1d8b12079369329e9914d0fec543fc84292f19c4f580c4
|
def load(self, index: str, col: str, loader=None):
'Loads the object from the path located in ``index`` and ``col`` positions in dataframe.'
if (loader is None):
loader = self.loader
return loader(self.get_global_path(index, col))
|
Loads the object from the path located in ``index`` and ``col`` positions in dataframe.
|
dpipe/dataset/csv.py
|
load
|
samokhinv/deep_pipe
| 38
|
python
|
def load(self, index: str, col: str, loader=None):
if (loader is None):
loader = self.loader
return loader(self.get_global_path(index, col))
|
def load(self, index: str, col: str, loader=None):
if (loader is None):
loader = self.loader
return loader(self.get_global_path(index, col))<|docstring|>Loads the object from the path located in ``index`` and ``col`` positions in dataframe.<|endoftext|>
|
406c44b9447c9294ad76a41298f9df76fd0ef5ab03ed0e85ad10f132e444ecf4
|
def run(self, infr_executor, data, param_dict, ctx, optimizer='adam', learning_rate=0.001, max_iter=1000, verbose=False, update_shape_constants=None):
"\n :param infr_executor: The MXNet function that computes the training objective.\n :type infr_executor: MXNet Gluon Block\n :param data: a list of observed variables\n :type data: [mxnet.ndarray]\n :param param_dict: The MXNet ParameterDict for Gradient-based optimization\n :type param_dict: mxnet.gluon.ParameterDict\n :param ctx: MXNet context\n :type ctx: mxnet.cpu or mxnet.gpu\n :param optimizer: the choice of optimizer (default: 'adam')\n :type optimizer: str\n :param learning_rate: the learning rate of the gradient optimizer (default: 0.001)\n :type learning_rate: float\n :param max_iter: the maximum number of iterations of gradient optimization\n :type max_iter: int\n :param verbose: whether to print per-iteration messages.\n :type verbose: boolean\n :param update_shape_constants: The callback function to update the shape constants according to the size of minibatch\n :type update_shape_constants: Python function\n "
if isinstance(data, mx.gluon.data.DataLoader):
data_loader = data
else:
data_loader = mx.gluon.data.DataLoader(ArrayDataset(*data), batch_size=self.batch_size, shuffle=True, last_batch='rollover')
trainer = mx.gluon.Trainer(param_dict, optimizer=optimizer, optimizer_params={'learning_rate': learning_rate})
for e in range(max_iter):
L_e = 0
n_batches = 0
for (i, data_batch) in enumerate(data_loader):
if (not isinstance(data_batch, (list or tuple))):
data_batch = [data_batch]
if (update_shape_constants is not None):
update_shape_constants(data_batch)
with mx.autograd.record():
(loss, loss_for_gradient) = infr_executor(mx.nd.zeros(1, ctx=ctx), *data_batch)
loss_for_gradient.backward()
if verbose:
print('\repoch {} Iteration {} loss: {}\t\t\t'.format((e + 1), (i + 1), loss.asscalar()), end='')
trainer.step(batch_size=self.batch_size, ignore_stale_grad=True)
L_e += loss.asscalar()
n_batches += 1
if verbose:
print('epoch-loss: {} '.format((L_e / n_batches)))
|
:param infr_executor: The MXNet function that computes the training objective.
:type infr_executor: MXNet Gluon Block
:param data: a list of observed variables
:type data: [mxnet.ndarray]
:param param_dict: The MXNet ParameterDict for Gradient-based optimization
:type param_dict: mxnet.gluon.ParameterDict
:param ctx: MXNet context
:type ctx: mxnet.cpu or mxnet.gpu
:param optimizer: the choice of optimizer (default: 'adam')
:type optimizer: str
:param learning_rate: the learning rate of the gradient optimizer (default: 0.001)
:type learning_rate: float
:param max_iter: the maximum number of iterations of gradient optimization
:type max_iter: int
:param verbose: whether to print per-iteration messages.
:type verbose: boolean
:param update_shape_constants: The callback function to update the shape constants according to the size of minibatch
:type update_shape_constants: Python function
|
mxfusion/inference/minibatch_loop.py
|
run
|
JeremiasKnoblauch/MXFusion
| 2
|
python
|
def run(self, infr_executor, data, param_dict, ctx, optimizer='adam', learning_rate=0.001, max_iter=1000, verbose=False, update_shape_constants=None):
"\n :param infr_executor: The MXNet function that computes the training objective.\n :type infr_executor: MXNet Gluon Block\n :param data: a list of observed variables\n :type data: [mxnet.ndarray]\n :param param_dict: The MXNet ParameterDict for Gradient-based optimization\n :type param_dict: mxnet.gluon.ParameterDict\n :param ctx: MXNet context\n :type ctx: mxnet.cpu or mxnet.gpu\n :param optimizer: the choice of optimizer (default: 'adam')\n :type optimizer: str\n :param learning_rate: the learning rate of the gradient optimizer (default: 0.001)\n :type learning_rate: float\n :param max_iter: the maximum number of iterations of gradient optimization\n :type max_iter: int\n :param verbose: whether to print per-iteration messages.\n :type verbose: boolean\n :param update_shape_constants: The callback function to update the shape constants according to the size of minibatch\n :type update_shape_constants: Python function\n "
if isinstance(data, mx.gluon.data.DataLoader):
data_loader = data
else:
data_loader = mx.gluon.data.DataLoader(ArrayDataset(*data), batch_size=self.batch_size, shuffle=True, last_batch='rollover')
trainer = mx.gluon.Trainer(param_dict, optimizer=optimizer, optimizer_params={'learning_rate': learning_rate})
for e in range(max_iter):
L_e = 0
n_batches = 0
for (i, data_batch) in enumerate(data_loader):
if (not isinstance(data_batch, (list or tuple))):
data_batch = [data_batch]
if (update_shape_constants is not None):
update_shape_constants(data_batch)
with mx.autograd.record():
(loss, loss_for_gradient) = infr_executor(mx.nd.zeros(1, ctx=ctx), *data_batch)
loss_for_gradient.backward()
if verbose:
print('\repoch {} Iteration {} loss: {}\t\t\t'.format((e + 1), (i + 1), loss.asscalar()), end=)
trainer.step(batch_size=self.batch_size, ignore_stale_grad=True)
L_e += loss.asscalar()
n_batches += 1
if verbose:
print('epoch-loss: {} '.format((L_e / n_batches)))
|
def run(self, infr_executor, data, param_dict, ctx, optimizer='adam', learning_rate=0.001, max_iter=1000, verbose=False, update_shape_constants=None):
"\n :param infr_executor: The MXNet function that computes the training objective.\n :type infr_executor: MXNet Gluon Block\n :param data: a list of observed variables\n :type data: [mxnet.ndarray]\n :param param_dict: The MXNet ParameterDict for Gradient-based optimization\n :type param_dict: mxnet.gluon.ParameterDict\n :param ctx: MXNet context\n :type ctx: mxnet.cpu or mxnet.gpu\n :param optimizer: the choice of optimizer (default: 'adam')\n :type optimizer: str\n :param learning_rate: the learning rate of the gradient optimizer (default: 0.001)\n :type learning_rate: float\n :param max_iter: the maximum number of iterations of gradient optimization\n :type max_iter: int\n :param verbose: whether to print per-iteration messages.\n :type verbose: boolean\n :param update_shape_constants: The callback function to update the shape constants according to the size of minibatch\n :type update_shape_constants: Python function\n "
if isinstance(data, mx.gluon.data.DataLoader):
data_loader = data
else:
data_loader = mx.gluon.data.DataLoader(ArrayDataset(*data), batch_size=self.batch_size, shuffle=True, last_batch='rollover')
trainer = mx.gluon.Trainer(param_dict, optimizer=optimizer, optimizer_params={'learning_rate': learning_rate})
for e in range(max_iter):
L_e = 0
n_batches = 0
for (i, data_batch) in enumerate(data_loader):
if (not isinstance(data_batch, (list or tuple))):
data_batch = [data_batch]
if (update_shape_constants is not None):
update_shape_constants(data_batch)
with mx.autograd.record():
(loss, loss_for_gradient) = infr_executor(mx.nd.zeros(1, ctx=ctx), *data_batch)
loss_for_gradient.backward()
if verbose:
print('\repoch {} Iteration {} loss: {}\t\t\t'.format((e + 1), (i + 1), loss.asscalar()), end=)
trainer.step(batch_size=self.batch_size, ignore_stale_grad=True)
L_e += loss.asscalar()
n_batches += 1
if verbose:
print('epoch-loss: {} '.format((L_e / n_batches)))<|docstring|>:param infr_executor: The MXNet function that computes the training objective.
:type infr_executor: MXNet Gluon Block
:param data: a list of observed variables
:type data: [mxnet.ndarray]
:param param_dict: The MXNet ParameterDict for Gradient-based optimization
:type param_dict: mxnet.gluon.ParameterDict
:param ctx: MXNet context
:type ctx: mxnet.cpu or mxnet.gpu
:param optimizer: the choice of optimizer (default: 'adam')
:type optimizer: str
:param learning_rate: the learning rate of the gradient optimizer (default: 0.001)
:type learning_rate: float
:param max_iter: the maximum number of iterations of gradient optimization
:type max_iter: int
:param verbose: whether to print per-iteration messages.
:type verbose: boolean
:param update_shape_constants: The callback function to update the shape constants according to the size of minibatch
:type update_shape_constants: Python function<|endoftext|>
|
fc4ac36ae999830463af7f64f8aac16a1242b12961f5ee5c20d9bdbf75efb0ab
|
def main(directory, calibrationfile, calibrate=True, plot=False):
'Run program.\n\n Keyword arguments:\n directory -- input directory\n '
read_from = 'rac'
CCDitems = read_all_files_in_directory(read_from, directory)
if calibrate:
for CCDitem in tqdm(CCDitems):
(image_lsb, image_bias_sub, image_desmeared, image_dark_sub, image_flatf_comp) = L1_calibrate(CCDitem, calibrationfile)
if plot:
(fig, ax) = plt.subplots(5, 1)
plot_CCDimage(image_lsb, fig, ax[0], 'Original LSB')
plot_CCDimage(image_bias_sub, fig, ax[1], 'Bias subtracted')
plot_CCDimage(image_desmeared, fig, ax[2], ' Desmeared LSB')
plot_CCDimage(image_dark_sub, fig, ax[3], ' Dark current subtracted LSB')
plot_CCDimage(image_flatf_comp, fig, ax[4], ' Flat field compensated LSB')
fig.suptitle(CCDitem['channel'])
else:
for CCDitem in CCDitems[:]:
fig = plt.figure()
ax = fig.gca()
plotCCDitem(CCDitem, fig, ax, title=CCDitem['channel'])
|
Run program.
Keyword arguments:
directory -- input directory
|
src/mats_l1_processing/read_and_calibrate_all_files.py
|
main
|
OleMartinChristensen/MATS-L1-processsing
| 0
|
python
|
def main(directory, calibrationfile, calibrate=True, plot=False):
'Run program.\n\n Keyword arguments:\n directory -- input directory\n '
read_from = 'rac'
CCDitems = read_all_files_in_directory(read_from, directory)
if calibrate:
for CCDitem in tqdm(CCDitems):
(image_lsb, image_bias_sub, image_desmeared, image_dark_sub, image_flatf_comp) = L1_calibrate(CCDitem, calibrationfile)
if plot:
(fig, ax) = plt.subplots(5, 1)
plot_CCDimage(image_lsb, fig, ax[0], 'Original LSB')
plot_CCDimage(image_bias_sub, fig, ax[1], 'Bias subtracted')
plot_CCDimage(image_desmeared, fig, ax[2], ' Desmeared LSB')
plot_CCDimage(image_dark_sub, fig, ax[3], ' Dark current subtracted LSB')
plot_CCDimage(image_flatf_comp, fig, ax[4], ' Flat field compensated LSB')
fig.suptitle(CCDitem['channel'])
else:
for CCDitem in CCDitems[:]:
fig = plt.figure()
ax = fig.gca()
plotCCDitem(CCDitem, fig, ax, title=CCDitem['channel'])
|
def main(directory, calibrationfile, calibrate=True, plot=False):
'Run program.\n\n Keyword arguments:\n directory -- input directory\n '
read_from = 'rac'
CCDitems = read_all_files_in_directory(read_from, directory)
if calibrate:
for CCDitem in tqdm(CCDitems):
(image_lsb, image_bias_sub, image_desmeared, image_dark_sub, image_flatf_comp) = L1_calibrate(CCDitem, calibrationfile)
if plot:
(fig, ax) = plt.subplots(5, 1)
plot_CCDimage(image_lsb, fig, ax[0], 'Original LSB')
plot_CCDimage(image_bias_sub, fig, ax[1], 'Bias subtracted')
plot_CCDimage(image_desmeared, fig, ax[2], ' Desmeared LSB')
plot_CCDimage(image_dark_sub, fig, ax[3], ' Dark current subtracted LSB')
plot_CCDimage(image_flatf_comp, fig, ax[4], ' Flat field compensated LSB')
fig.suptitle(CCDitem['channel'])
else:
for CCDitem in CCDitems[:]:
fig = plt.figure()
ax = fig.gca()
plotCCDitem(CCDitem, fig, ax, title=CCDitem['channel'])<|docstring|>Run program.
Keyword arguments:
directory -- input directory<|endoftext|>
|
bbccc9ff0e5cfe24f7095b3dfec205db4222653dbb9fe1ee067889370b2dff2e
|
def __init__(self, user_id=None, user_created_date_utc=None, last_login_date_utc=None, is_external_partner=None, has_accountant_role=None, month_period=None, number_of_logins=None, number_of_documents_created=None, net_value_documents_created=None, absolute_value_documents_created=None, attached_practices=None, history_records=None):
'UserResponse - a model defined in OpenAPI'
self._user_id = None
self._user_created_date_utc = None
self._last_login_date_utc = None
self._is_external_partner = None
self._has_accountant_role = None
self._month_period = None
self._number_of_logins = None
self._number_of_documents_created = None
self._net_value_documents_created = None
self._absolute_value_documents_created = None
self._attached_practices = None
self._history_records = None
self.discriminator = None
if (user_id is not None):
self.user_id = user_id
if (user_created_date_utc is not None):
self.user_created_date_utc = user_created_date_utc
if (last_login_date_utc is not None):
self.last_login_date_utc = last_login_date_utc
if (is_external_partner is not None):
self.is_external_partner = is_external_partner
if (has_accountant_role is not None):
self.has_accountant_role = has_accountant_role
if (month_period is not None):
self.month_period = month_period
if (number_of_logins is not None):
self.number_of_logins = number_of_logins
if (number_of_documents_created is not None):
self.number_of_documents_created = number_of_documents_created
if (net_value_documents_created is not None):
self.net_value_documents_created = net_value_documents_created
if (absolute_value_documents_created is not None):
self.absolute_value_documents_created = absolute_value_documents_created
if (attached_practices is not None):
self.attached_practices = attached_practices
if (history_records is not None):
self.history_records = history_records
|
UserResponse - a model defined in OpenAPI
|
xero_python/finance/models/user_response.py
|
__init__
|
gavinwhyte/xero-python
| 1
|
python
|
def __init__(self, user_id=None, user_created_date_utc=None, last_login_date_utc=None, is_external_partner=None, has_accountant_role=None, month_period=None, number_of_logins=None, number_of_documents_created=None, net_value_documents_created=None, absolute_value_documents_created=None, attached_practices=None, history_records=None):
self._user_id = None
self._user_created_date_utc = None
self._last_login_date_utc = None
self._is_external_partner = None
self._has_accountant_role = None
self._month_period = None
self._number_of_logins = None
self._number_of_documents_created = None
self._net_value_documents_created = None
self._absolute_value_documents_created = None
self._attached_practices = None
self._history_records = None
self.discriminator = None
if (user_id is not None):
self.user_id = user_id
if (user_created_date_utc is not None):
self.user_created_date_utc = user_created_date_utc
if (last_login_date_utc is not None):
self.last_login_date_utc = last_login_date_utc
if (is_external_partner is not None):
self.is_external_partner = is_external_partner
if (has_accountant_role is not None):
self.has_accountant_role = has_accountant_role
if (month_period is not None):
self.month_period = month_period
if (number_of_logins is not None):
self.number_of_logins = number_of_logins
if (number_of_documents_created is not None):
self.number_of_documents_created = number_of_documents_created
if (net_value_documents_created is not None):
self.net_value_documents_created = net_value_documents_created
if (absolute_value_documents_created is not None):
self.absolute_value_documents_created = absolute_value_documents_created
if (attached_practices is not None):
self.attached_practices = attached_practices
if (history_records is not None):
self.history_records = history_records
|
def __init__(self, user_id=None, user_created_date_utc=None, last_login_date_utc=None, is_external_partner=None, has_accountant_role=None, month_period=None, number_of_logins=None, number_of_documents_created=None, net_value_documents_created=None, absolute_value_documents_created=None, attached_practices=None, history_records=None):
self._user_id = None
self._user_created_date_utc = None
self._last_login_date_utc = None
self._is_external_partner = None
self._has_accountant_role = None
self._month_period = None
self._number_of_logins = None
self._number_of_documents_created = None
self._net_value_documents_created = None
self._absolute_value_documents_created = None
self._attached_practices = None
self._history_records = None
self.discriminator = None
if (user_id is not None):
self.user_id = user_id
if (user_created_date_utc is not None):
self.user_created_date_utc = user_created_date_utc
if (last_login_date_utc is not None):
self.last_login_date_utc = last_login_date_utc
if (is_external_partner is not None):
self.is_external_partner = is_external_partner
if (has_accountant_role is not None):
self.has_accountant_role = has_accountant_role
if (month_period is not None):
self.month_period = month_period
if (number_of_logins is not None):
self.number_of_logins = number_of_logins
if (number_of_documents_created is not None):
self.number_of_documents_created = number_of_documents_created
if (net_value_documents_created is not None):
self.net_value_documents_created = net_value_documents_created
if (absolute_value_documents_created is not None):
self.absolute_value_documents_created = absolute_value_documents_created
if (attached_practices is not None):
self.attached_practices = attached_practices
if (history_records is not None):
self.history_records = history_records<|docstring|>UserResponse - a model defined in OpenAPI<|endoftext|>
|
ddb6d9be6c3029c2156c2914eeec04c71e19218deaa6d09184df630799e9e9a9
|
@property
def user_id(self):
'Gets the user_id of this UserResponse. # noqa: E501\n\n The Xero identifier for the user # noqa: E501\n\n :return: The user_id of this UserResponse. # noqa: E501\n :rtype: str\n '
return self._user_id
|
Gets the user_id of this UserResponse. # noqa: E501
The Xero identifier for the user # noqa: E501
:return: The user_id of this UserResponse. # noqa: E501
:rtype: str
|
xero_python/finance/models/user_response.py
|
user_id
|
gavinwhyte/xero-python
| 1
|
python
|
@property
def user_id(self):
'Gets the user_id of this UserResponse. # noqa: E501\n\n The Xero identifier for the user # noqa: E501\n\n :return: The user_id of this UserResponse. # noqa: E501\n :rtype: str\n '
return self._user_id
|
@property
def user_id(self):
'Gets the user_id of this UserResponse. # noqa: E501\n\n The Xero identifier for the user # noqa: E501\n\n :return: The user_id of this UserResponse. # noqa: E501\n :rtype: str\n '
return self._user_id<|docstring|>Gets the user_id of this UserResponse. # noqa: E501
The Xero identifier for the user # noqa: E501
:return: The user_id of this UserResponse. # noqa: E501
:rtype: str<|endoftext|>
|
72cb74bb7bcee67233c19e532b57e47a1492007a29ba6477b7df040925e65a6e
|
@user_id.setter
def user_id(self, user_id):
'Sets the user_id of this UserResponse.\n\n The Xero identifier for the user # noqa: E501\n\n :param user_id: The user_id of this UserResponse. # noqa: E501\n :type: str\n '
self._user_id = user_id
|
Sets the user_id of this UserResponse.
The Xero identifier for the user # noqa: E501
:param user_id: The user_id of this UserResponse. # noqa: E501
:type: str
|
xero_python/finance/models/user_response.py
|
user_id
|
gavinwhyte/xero-python
| 1
|
python
|
@user_id.setter
def user_id(self, user_id):
'Sets the user_id of this UserResponse.\n\n The Xero identifier for the user # noqa: E501\n\n :param user_id: The user_id of this UserResponse. # noqa: E501\n :type: str\n '
self._user_id = user_id
|
@user_id.setter
def user_id(self, user_id):
'Sets the user_id of this UserResponse.\n\n The Xero identifier for the user # noqa: E501\n\n :param user_id: The user_id of this UserResponse. # noqa: E501\n :type: str\n '
self._user_id = user_id<|docstring|>Sets the user_id of this UserResponse.
The Xero identifier for the user # noqa: E501
:param user_id: The user_id of this UserResponse. # noqa: E501
:type: str<|endoftext|>
|
67e9c7c82923f6aba74277c221f38bff09bb79c2810d59bf0519bcfee6dd249f
|
@property
def user_created_date_utc(self):
'Gets the user_created_date_utc of this UserResponse. # noqa: E501\n\n Timestamp of user creation. # noqa: E501\n\n :return: The user_created_date_utc of this UserResponse. # noqa: E501\n :rtype: datetime\n '
return self._user_created_date_utc
|
Gets the user_created_date_utc of this UserResponse. # noqa: E501
Timestamp of user creation. # noqa: E501
:return: The user_created_date_utc of this UserResponse. # noqa: E501
:rtype: datetime
|
xero_python/finance/models/user_response.py
|
user_created_date_utc
|
gavinwhyte/xero-python
| 1
|
python
|
@property
def user_created_date_utc(self):
'Gets the user_created_date_utc of this UserResponse. # noqa: E501\n\n Timestamp of user creation. # noqa: E501\n\n :return: The user_created_date_utc of this UserResponse. # noqa: E501\n :rtype: datetime\n '
return self._user_created_date_utc
|
@property
def user_created_date_utc(self):
'Gets the user_created_date_utc of this UserResponse. # noqa: E501\n\n Timestamp of user creation. # noqa: E501\n\n :return: The user_created_date_utc of this UserResponse. # noqa: E501\n :rtype: datetime\n '
return self._user_created_date_utc<|docstring|>Gets the user_created_date_utc of this UserResponse. # noqa: E501
Timestamp of user creation. # noqa: E501
:return: The user_created_date_utc of this UserResponse. # noqa: E501
:rtype: datetime<|endoftext|>
|
651dab1626bae952051948bfab7b6a47907546783c77e54aeecaa4b9571b5a71
|
@user_created_date_utc.setter
def user_created_date_utc(self, user_created_date_utc):
'Sets the user_created_date_utc of this UserResponse.\n\n Timestamp of user creation. # noqa: E501\n\n :param user_created_date_utc: The user_created_date_utc of this UserResponse. # noqa: E501\n :type: datetime\n '
self._user_created_date_utc = user_created_date_utc
|
Sets the user_created_date_utc of this UserResponse.
Timestamp of user creation. # noqa: E501
:param user_created_date_utc: The user_created_date_utc of this UserResponse. # noqa: E501
:type: datetime
|
xero_python/finance/models/user_response.py
|
user_created_date_utc
|
gavinwhyte/xero-python
| 1
|
python
|
@user_created_date_utc.setter
def user_created_date_utc(self, user_created_date_utc):
'Sets the user_created_date_utc of this UserResponse.\n\n Timestamp of user creation. # noqa: E501\n\n :param user_created_date_utc: The user_created_date_utc of this UserResponse. # noqa: E501\n :type: datetime\n '
self._user_created_date_utc = user_created_date_utc
|
@user_created_date_utc.setter
def user_created_date_utc(self, user_created_date_utc):
'Sets the user_created_date_utc of this UserResponse.\n\n Timestamp of user creation. # noqa: E501\n\n :param user_created_date_utc: The user_created_date_utc of this UserResponse. # noqa: E501\n :type: datetime\n '
self._user_created_date_utc = user_created_date_utc<|docstring|>Sets the user_created_date_utc of this UserResponse.
Timestamp of user creation. # noqa: E501
:param user_created_date_utc: The user_created_date_utc of this UserResponse. # noqa: E501
:type: datetime<|endoftext|>
|
49fee08ce52faaf7db3db549edd7e17c62b462f7f5c37573134aef63d5ae10ce
|
@property
def last_login_date_utc(self):
'Gets the last_login_date_utc of this UserResponse. # noqa: E501\n\n Timestamp of user last login # noqa: E501\n\n :return: The last_login_date_utc of this UserResponse. # noqa: E501\n :rtype: datetime\n '
return self._last_login_date_utc
|
Gets the last_login_date_utc of this UserResponse. # noqa: E501
Timestamp of user last login # noqa: E501
:return: The last_login_date_utc of this UserResponse. # noqa: E501
:rtype: datetime
|
xero_python/finance/models/user_response.py
|
last_login_date_utc
|
gavinwhyte/xero-python
| 1
|
python
|
@property
def last_login_date_utc(self):
'Gets the last_login_date_utc of this UserResponse. # noqa: E501\n\n Timestamp of user last login # noqa: E501\n\n :return: The last_login_date_utc of this UserResponse. # noqa: E501\n :rtype: datetime\n '
return self._last_login_date_utc
|
@property
def last_login_date_utc(self):
'Gets the last_login_date_utc of this UserResponse. # noqa: E501\n\n Timestamp of user last login # noqa: E501\n\n :return: The last_login_date_utc of this UserResponse. # noqa: E501\n :rtype: datetime\n '
return self._last_login_date_utc<|docstring|>Gets the last_login_date_utc of this UserResponse. # noqa: E501
Timestamp of user last login # noqa: E501
:return: The last_login_date_utc of this UserResponse. # noqa: E501
:rtype: datetime<|endoftext|>
|
1b5f5df6506ede3700aa9b89ba8cf01cf7f9c94aca18cd6503989369de151782
|
@last_login_date_utc.setter
def last_login_date_utc(self, last_login_date_utc):
'Sets the last_login_date_utc of this UserResponse.\n\n Timestamp of user last login # noqa: E501\n\n :param last_login_date_utc: The last_login_date_utc of this UserResponse. # noqa: E501\n :type: datetime\n '
self._last_login_date_utc = last_login_date_utc
|
Sets the last_login_date_utc of this UserResponse.
Timestamp of user last login # noqa: E501
:param last_login_date_utc: The last_login_date_utc of this UserResponse. # noqa: E501
:type: datetime
|
xero_python/finance/models/user_response.py
|
last_login_date_utc
|
gavinwhyte/xero-python
| 1
|
python
|
@last_login_date_utc.setter
def last_login_date_utc(self, last_login_date_utc):
'Sets the last_login_date_utc of this UserResponse.\n\n Timestamp of user last login # noqa: E501\n\n :param last_login_date_utc: The last_login_date_utc of this UserResponse. # noqa: E501\n :type: datetime\n '
self._last_login_date_utc = last_login_date_utc
|
@last_login_date_utc.setter
def last_login_date_utc(self, last_login_date_utc):
'Sets the last_login_date_utc of this UserResponse.\n\n Timestamp of user last login # noqa: E501\n\n :param last_login_date_utc: The last_login_date_utc of this UserResponse. # noqa: E501\n :type: datetime\n '
self._last_login_date_utc = last_login_date_utc<|docstring|>Sets the last_login_date_utc of this UserResponse.
Timestamp of user last login # noqa: E501
:param last_login_date_utc: The last_login_date_utc of this UserResponse. # noqa: E501
:type: datetime<|endoftext|>
|
a33679e283a7c3f0898dd3a41599c8c0c1d2978327511155c27eee1c557fae86
|
@property
def is_external_partner(self):
'Gets the is_external_partner of this UserResponse. # noqa: E501\n\n User is external partner. # noqa: E501\n\n :return: The is_external_partner of this UserResponse. # noqa: E501\n :rtype: bool\n '
return self._is_external_partner
|
Gets the is_external_partner of this UserResponse. # noqa: E501
User is external partner. # noqa: E501
:return: The is_external_partner of this UserResponse. # noqa: E501
:rtype: bool
|
xero_python/finance/models/user_response.py
|
is_external_partner
|
gavinwhyte/xero-python
| 1
|
python
|
@property
def is_external_partner(self):
'Gets the is_external_partner of this UserResponse. # noqa: E501\n\n User is external partner. # noqa: E501\n\n :return: The is_external_partner of this UserResponse. # noqa: E501\n :rtype: bool\n '
return self._is_external_partner
|
@property
def is_external_partner(self):
'Gets the is_external_partner of this UserResponse. # noqa: E501\n\n User is external partner. # noqa: E501\n\n :return: The is_external_partner of this UserResponse. # noqa: E501\n :rtype: bool\n '
return self._is_external_partner<|docstring|>Gets the is_external_partner of this UserResponse. # noqa: E501
User is external partner. # noqa: E501
:return: The is_external_partner of this UserResponse. # noqa: E501
:rtype: bool<|endoftext|>
|
333feffd34f9bc1ca7bc89374afeb900d2063ebb9615c6043f67ec08a32ec80a
|
@is_external_partner.setter
def is_external_partner(self, is_external_partner):
'Sets the is_external_partner of this UserResponse.\n\n User is external partner. # noqa: E501\n\n :param is_external_partner: The is_external_partner of this UserResponse. # noqa: E501\n :type: bool\n '
self._is_external_partner = is_external_partner
|
Sets the is_external_partner of this UserResponse.
User is external partner. # noqa: E501
:param is_external_partner: The is_external_partner of this UserResponse. # noqa: E501
:type: bool
|
xero_python/finance/models/user_response.py
|
is_external_partner
|
gavinwhyte/xero-python
| 1
|
python
|
@is_external_partner.setter
def is_external_partner(self, is_external_partner):
'Sets the is_external_partner of this UserResponse.\n\n User is external partner. # noqa: E501\n\n :param is_external_partner: The is_external_partner of this UserResponse. # noqa: E501\n :type: bool\n '
self._is_external_partner = is_external_partner
|
@is_external_partner.setter
def is_external_partner(self, is_external_partner):
'Sets the is_external_partner of this UserResponse.\n\n User is external partner. # noqa: E501\n\n :param is_external_partner: The is_external_partner of this UserResponse. # noqa: E501\n :type: bool\n '
self._is_external_partner = is_external_partner<|docstring|>Sets the is_external_partner of this UserResponse.
User is external partner. # noqa: E501
:param is_external_partner: The is_external_partner of this UserResponse. # noqa: E501
:type: bool<|endoftext|>
|
c5b028705536dca8240105b93f030ca76d4b97bb8d27947346e9ca04a0d2f17b
|
@property
def has_accountant_role(self):
'Gets the has_accountant_role of this UserResponse. # noqa: E501\n\n User has Accountant role. # noqa: E501\n\n :return: The has_accountant_role of this UserResponse. # noqa: E501\n :rtype: bool\n '
return self._has_accountant_role
|
Gets the has_accountant_role of this UserResponse. # noqa: E501
User has Accountant role. # noqa: E501
:return: The has_accountant_role of this UserResponse. # noqa: E501
:rtype: bool
|
xero_python/finance/models/user_response.py
|
has_accountant_role
|
gavinwhyte/xero-python
| 1
|
python
|
@property
def has_accountant_role(self):
'Gets the has_accountant_role of this UserResponse. # noqa: E501\n\n User has Accountant role. # noqa: E501\n\n :return: The has_accountant_role of this UserResponse. # noqa: E501\n :rtype: bool\n '
return self._has_accountant_role
|
@property
def has_accountant_role(self):
'Gets the has_accountant_role of this UserResponse. # noqa: E501\n\n User has Accountant role. # noqa: E501\n\n :return: The has_accountant_role of this UserResponse. # noqa: E501\n :rtype: bool\n '
return self._has_accountant_role<|docstring|>Gets the has_accountant_role of this UserResponse. # noqa: E501
User has Accountant role. # noqa: E501
:return: The has_accountant_role of this UserResponse. # noqa: E501
:rtype: bool<|endoftext|>
|
2eb2aa4e0f806c738dfff950b2525cca115eae8c9d05c9bbde2ab898e527ad59
|
@has_accountant_role.setter
def has_accountant_role(self, has_accountant_role):
'Sets the has_accountant_role of this UserResponse.\n\n User has Accountant role. # noqa: E501\n\n :param has_accountant_role: The has_accountant_role of this UserResponse. # noqa: E501\n :type: bool\n '
self._has_accountant_role = has_accountant_role
|
Sets the has_accountant_role of this UserResponse.
User has Accountant role. # noqa: E501
:param has_accountant_role: The has_accountant_role of this UserResponse. # noqa: E501
:type: bool
|
xero_python/finance/models/user_response.py
|
has_accountant_role
|
gavinwhyte/xero-python
| 1
|
python
|
@has_accountant_role.setter
def has_accountant_role(self, has_accountant_role):
'Sets the has_accountant_role of this UserResponse.\n\n User has Accountant role. # noqa: E501\n\n :param has_accountant_role: The has_accountant_role of this UserResponse. # noqa: E501\n :type: bool\n '
self._has_accountant_role = has_accountant_role
|
@has_accountant_role.setter
def has_accountant_role(self, has_accountant_role):
'Sets the has_accountant_role of this UserResponse.\n\n User has Accountant role. # noqa: E501\n\n :param has_accountant_role: The has_accountant_role of this UserResponse. # noqa: E501\n :type: bool\n '
self._has_accountant_role = has_accountant_role<|docstring|>Sets the has_accountant_role of this UserResponse.
User has Accountant role. # noqa: E501
:param has_accountant_role: The has_accountant_role of this UserResponse. # noqa: E501
:type: bool<|endoftext|>
|
41e8e0f7ce3b6ae1267cacb51f2994f57f5fc281e347e6936b1a58342b48e3c4
|
@property
def month_period(self):
'Gets the month_period of this UserResponse. # noqa: E501\n\n Month period in format yyyy-MM. # noqa: E501\n\n :return: The month_period of this UserResponse. # noqa: E501\n :rtype: str\n '
return self._month_period
|
Gets the month_period of this UserResponse. # noqa: E501
Month period in format yyyy-MM. # noqa: E501
:return: The month_period of this UserResponse. # noqa: E501
:rtype: str
|
xero_python/finance/models/user_response.py
|
month_period
|
gavinwhyte/xero-python
| 1
|
python
|
@property
def month_period(self):
'Gets the month_period of this UserResponse. # noqa: E501\n\n Month period in format yyyy-MM. # noqa: E501\n\n :return: The month_period of this UserResponse. # noqa: E501\n :rtype: str\n '
return self._month_period
|
@property
def month_period(self):
'Gets the month_period of this UserResponse. # noqa: E501\n\n Month period in format yyyy-MM. # noqa: E501\n\n :return: The month_period of this UserResponse. # noqa: E501\n :rtype: str\n '
return self._month_period<|docstring|>Gets the month_period of this UserResponse. # noqa: E501
Month period in format yyyy-MM. # noqa: E501
:return: The month_period of this UserResponse. # noqa: E501
:rtype: str<|endoftext|>
|
afebf8c373f31a39487d1dc1ed2b602f94262db6bb86e7b05768e7c1aa1e051b
|
@month_period.setter
def month_period(self, month_period):
'Sets the month_period of this UserResponse.\n\n Month period in format yyyy-MM. # noqa: E501\n\n :param month_period: The month_period of this UserResponse. # noqa: E501\n :type: str\n '
self._month_period = month_period
|
Sets the month_period of this UserResponse.
Month period in format yyyy-MM. # noqa: E501
:param month_period: The month_period of this UserResponse. # noqa: E501
:type: str
|
xero_python/finance/models/user_response.py
|
month_period
|
gavinwhyte/xero-python
| 1
|
python
|
@month_period.setter
def month_period(self, month_period):
'Sets the month_period of this UserResponse.\n\n Month period in format yyyy-MM. # noqa: E501\n\n :param month_period: The month_period of this UserResponse. # noqa: E501\n :type: str\n '
self._month_period = month_period
|
@month_period.setter
def month_period(self, month_period):
'Sets the month_period of this UserResponse.\n\n Month period in format yyyy-MM. # noqa: E501\n\n :param month_period: The month_period of this UserResponse. # noqa: E501\n :type: str\n '
self._month_period = month_period<|docstring|>Sets the month_period of this UserResponse.
Month period in format yyyy-MM. # noqa: E501
:param month_period: The month_period of this UserResponse. # noqa: E501
:type: str<|endoftext|>
|
c129d60c6fbe8d06dcfe47a148154bbdc5ad75ffb68801fa953380931f3bc7b6
|
@property
def number_of_logins(self):
'Gets the number_of_logins of this UserResponse. # noqa: E501\n\n Number of times the user has logged in. # noqa: E501\n\n :return: The number_of_logins of this UserResponse. # noqa: E501\n :rtype: int\n '
return self._number_of_logins
|
Gets the number_of_logins of this UserResponse. # noqa: E501
Number of times the user has logged in. # noqa: E501
:return: The number_of_logins of this UserResponse. # noqa: E501
:rtype: int
|
xero_python/finance/models/user_response.py
|
number_of_logins
|
gavinwhyte/xero-python
| 1
|
python
|
@property
def number_of_logins(self):
'Gets the number_of_logins of this UserResponse. # noqa: E501\n\n Number of times the user has logged in. # noqa: E501\n\n :return: The number_of_logins of this UserResponse. # noqa: E501\n :rtype: int\n '
return self._number_of_logins
|
@property
def number_of_logins(self):
'Gets the number_of_logins of this UserResponse. # noqa: E501\n\n Number of times the user has logged in. # noqa: E501\n\n :return: The number_of_logins of this UserResponse. # noqa: E501\n :rtype: int\n '
return self._number_of_logins<|docstring|>Gets the number_of_logins of this UserResponse. # noqa: E501
Number of times the user has logged in. # noqa: E501
:return: The number_of_logins of this UserResponse. # noqa: E501
:rtype: int<|endoftext|>
|
78f0a6920f03f934f9ae59c1005f467439159e4b69aa5b49cee38f9fbf5c7498
|
@number_of_logins.setter
def number_of_logins(self, number_of_logins):
'Sets the number_of_logins of this UserResponse.\n\n Number of times the user has logged in. # noqa: E501\n\n :param number_of_logins: The number_of_logins of this UserResponse. # noqa: E501\n :type: int\n '
self._number_of_logins = number_of_logins
|
Sets the number_of_logins of this UserResponse.
Number of times the user has logged in. # noqa: E501
:param number_of_logins: The number_of_logins of this UserResponse. # noqa: E501
:type: int
|
xero_python/finance/models/user_response.py
|
number_of_logins
|
gavinwhyte/xero-python
| 1
|
python
|
@number_of_logins.setter
def number_of_logins(self, number_of_logins):
'Sets the number_of_logins of this UserResponse.\n\n Number of times the user has logged in. # noqa: E501\n\n :param number_of_logins: The number_of_logins of this UserResponse. # noqa: E501\n :type: int\n '
self._number_of_logins = number_of_logins
|
@number_of_logins.setter
def number_of_logins(self, number_of_logins):
'Sets the number_of_logins of this UserResponse.\n\n Number of times the user has logged in. # noqa: E501\n\n :param number_of_logins: The number_of_logins of this UserResponse. # noqa: E501\n :type: int\n '
self._number_of_logins = number_of_logins<|docstring|>Sets the number_of_logins of this UserResponse.
Number of times the user has logged in. # noqa: E501
:param number_of_logins: The number_of_logins of this UserResponse. # noqa: E501
:type: int<|endoftext|>
|
7b855f0f418f5d77f919ca9d3141050e4d66b5f8d9aa33872484a22e5161476f
|
@property
def number_of_documents_created(self):
'Gets the number_of_documents_created of this UserResponse. # noqa: E501\n\n Number of documents created. # noqa: E501\n\n :return: The number_of_documents_created of this UserResponse. # noqa: E501\n :rtype: int\n '
return self._number_of_documents_created
|
Gets the number_of_documents_created of this UserResponse. # noqa: E501
Number of documents created. # noqa: E501
:return: The number_of_documents_created of this UserResponse. # noqa: E501
:rtype: int
|
xero_python/finance/models/user_response.py
|
number_of_documents_created
|
gavinwhyte/xero-python
| 1
|
python
|
@property
def number_of_documents_created(self):
'Gets the number_of_documents_created of this UserResponse. # noqa: E501\n\n Number of documents created. # noqa: E501\n\n :return: The number_of_documents_created of this UserResponse. # noqa: E501\n :rtype: int\n '
return self._number_of_documents_created
|
@property
def number_of_documents_created(self):
'Gets the number_of_documents_created of this UserResponse. # noqa: E501\n\n Number of documents created. # noqa: E501\n\n :return: The number_of_documents_created of this UserResponse. # noqa: E501\n :rtype: int\n '
return self._number_of_documents_created<|docstring|>Gets the number_of_documents_created of this UserResponse. # noqa: E501
Number of documents created. # noqa: E501
:return: The number_of_documents_created of this UserResponse. # noqa: E501
:rtype: int<|endoftext|>
|
9d26f29613c341f318a7a4a2f0062c02daad26aab75ffcb45739cb3afb72793b
|
@number_of_documents_created.setter
def number_of_documents_created(self, number_of_documents_created):
'Sets the number_of_documents_created of this UserResponse.\n\n Number of documents created. # noqa: E501\n\n :param number_of_documents_created: The number_of_documents_created of this UserResponse. # noqa: E501\n :type: int\n '
self._number_of_documents_created = number_of_documents_created
|
Sets the number_of_documents_created of this UserResponse.
Number of documents created. # noqa: E501
:param number_of_documents_created: The number_of_documents_created of this UserResponse. # noqa: E501
:type: int
|
xero_python/finance/models/user_response.py
|
number_of_documents_created
|
gavinwhyte/xero-python
| 1
|
python
|
@number_of_documents_created.setter
def number_of_documents_created(self, number_of_documents_created):
'Sets the number_of_documents_created of this UserResponse.\n\n Number of documents created. # noqa: E501\n\n :param number_of_documents_created: The number_of_documents_created of this UserResponse. # noqa: E501\n :type: int\n '
self._number_of_documents_created = number_of_documents_created
|
@number_of_documents_created.setter
def number_of_documents_created(self, number_of_documents_created):
'Sets the number_of_documents_created of this UserResponse.\n\n Number of documents created. # noqa: E501\n\n :param number_of_documents_created: The number_of_documents_created of this UserResponse. # noqa: E501\n :type: int\n '
self._number_of_documents_created = number_of_documents_created<|docstring|>Sets the number_of_documents_created of this UserResponse.
Number of documents created. # noqa: E501
:param number_of_documents_created: The number_of_documents_created of this UserResponse. # noqa: E501
:type: int<|endoftext|>
|
d48f5db66e666ec8f9fc18d81533d1d95812267632c84f6359bbf5557f289c5c
|
@property
def net_value_documents_created(self):
'Gets the net_value_documents_created of this UserResponse. # noqa: E501\n\n Net value of documents created. # noqa: E501\n\n :return: The net_value_documents_created of this UserResponse. # noqa: E501\n :rtype: float\n '
return self._net_value_documents_created
|
Gets the net_value_documents_created of this UserResponse. # noqa: E501
Net value of documents created. # noqa: E501
:return: The net_value_documents_created of this UserResponse. # noqa: E501
:rtype: float
|
xero_python/finance/models/user_response.py
|
net_value_documents_created
|
gavinwhyte/xero-python
| 1
|
python
|
@property
def net_value_documents_created(self):
'Gets the net_value_documents_created of this UserResponse. # noqa: E501\n\n Net value of documents created. # noqa: E501\n\n :return: The net_value_documents_created of this UserResponse. # noqa: E501\n :rtype: float\n '
return self._net_value_documents_created
|
@property
def net_value_documents_created(self):
'Gets the net_value_documents_created of this UserResponse. # noqa: E501\n\n Net value of documents created. # noqa: E501\n\n :return: The net_value_documents_created of this UserResponse. # noqa: E501\n :rtype: float\n '
return self._net_value_documents_created<|docstring|>Gets the net_value_documents_created of this UserResponse. # noqa: E501
Net value of documents created. # noqa: E501
:return: The net_value_documents_created of this UserResponse. # noqa: E501
:rtype: float<|endoftext|>
|
9e555f794a873ea251725a3ae485567d612833f9edd755f83c1cc96fd2326836
|
@net_value_documents_created.setter
def net_value_documents_created(self, net_value_documents_created):
'Sets the net_value_documents_created of this UserResponse.\n\n Net value of documents created. # noqa: E501\n\n :param net_value_documents_created: The net_value_documents_created of this UserResponse. # noqa: E501\n :type: float\n '
self._net_value_documents_created = net_value_documents_created
|
Sets the net_value_documents_created of this UserResponse.
Net value of documents created. # noqa: E501
:param net_value_documents_created: The net_value_documents_created of this UserResponse. # noqa: E501
:type: float
|
xero_python/finance/models/user_response.py
|
net_value_documents_created
|
gavinwhyte/xero-python
| 1
|
python
|
@net_value_documents_created.setter
def net_value_documents_created(self, net_value_documents_created):
'Sets the net_value_documents_created of this UserResponse.\n\n Net value of documents created. # noqa: E501\n\n :param net_value_documents_created: The net_value_documents_created of this UserResponse. # noqa: E501\n :type: float\n '
self._net_value_documents_created = net_value_documents_created
|
@net_value_documents_created.setter
def net_value_documents_created(self, net_value_documents_created):
'Sets the net_value_documents_created of this UserResponse.\n\n Net value of documents created. # noqa: E501\n\n :param net_value_documents_created: The net_value_documents_created of this UserResponse. # noqa: E501\n :type: float\n '
self._net_value_documents_created = net_value_documents_created<|docstring|>Sets the net_value_documents_created of this UserResponse.
Net value of documents created. # noqa: E501
:param net_value_documents_created: The net_value_documents_created of this UserResponse. # noqa: E501
:type: float<|endoftext|>
|
0b74048b15fc938c2f94756df926ff449e33d88cd4935d851e3fdb8245ef981a
|
@property
def absolute_value_documents_created(self):
'Gets the absolute_value_documents_created of this UserResponse. # noqa: E501\n\n Absolute value of documents created. # noqa: E501\n\n :return: The absolute_value_documents_created of this UserResponse. # noqa: E501\n :rtype: float\n '
return self._absolute_value_documents_created
|
Gets the absolute_value_documents_created of this UserResponse. # noqa: E501
Absolute value of documents created. # noqa: E501
:return: The absolute_value_documents_created of this UserResponse. # noqa: E501
:rtype: float
|
xero_python/finance/models/user_response.py
|
absolute_value_documents_created
|
gavinwhyte/xero-python
| 1
|
python
|
@property
def absolute_value_documents_created(self):
'Gets the absolute_value_documents_created of this UserResponse. # noqa: E501\n\n Absolute value of documents created. # noqa: E501\n\n :return: The absolute_value_documents_created of this UserResponse. # noqa: E501\n :rtype: float\n '
return self._absolute_value_documents_created
|
@property
def absolute_value_documents_created(self):
'Gets the absolute_value_documents_created of this UserResponse. # noqa: E501\n\n Absolute value of documents created. # noqa: E501\n\n :return: The absolute_value_documents_created of this UserResponse. # noqa: E501\n :rtype: float\n '
return self._absolute_value_documents_created<|docstring|>Gets the absolute_value_documents_created of this UserResponse. # noqa: E501
Absolute value of documents created. # noqa: E501
:return: The absolute_value_documents_created of this UserResponse. # noqa: E501
:rtype: float<|endoftext|>
|
55c2aaf65a66e25dee63e7fe504d5b8c80068f46289ddde1cc26ece0b280ff00
|
@absolute_value_documents_created.setter
def absolute_value_documents_created(self, absolute_value_documents_created):
'Sets the absolute_value_documents_created of this UserResponse.\n\n Absolute value of documents created. # noqa: E501\n\n :param absolute_value_documents_created: The absolute_value_documents_created of this UserResponse. # noqa: E501\n :type: float\n '
self._absolute_value_documents_created = absolute_value_documents_created
|
Sets the absolute_value_documents_created of this UserResponse.
Absolute value of documents created. # noqa: E501
:param absolute_value_documents_created: The absolute_value_documents_created of this UserResponse. # noqa: E501
:type: float
|
xero_python/finance/models/user_response.py
|
absolute_value_documents_created
|
gavinwhyte/xero-python
| 1
|
python
|
@absolute_value_documents_created.setter
def absolute_value_documents_created(self, absolute_value_documents_created):
'Sets the absolute_value_documents_created of this UserResponse.\n\n Absolute value of documents created. # noqa: E501\n\n :param absolute_value_documents_created: The absolute_value_documents_created of this UserResponse. # noqa: E501\n :type: float\n '
self._absolute_value_documents_created = absolute_value_documents_created
|
@absolute_value_documents_created.setter
def absolute_value_documents_created(self, absolute_value_documents_created):
'Sets the absolute_value_documents_created of this UserResponse.\n\n Absolute value of documents created. # noqa: E501\n\n :param absolute_value_documents_created: The absolute_value_documents_created of this UserResponse. # noqa: E501\n :type: float\n '
self._absolute_value_documents_created = absolute_value_documents_created<|docstring|>Sets the absolute_value_documents_created of this UserResponse.
Absolute value of documents created. # noqa: E501
:param absolute_value_documents_created: The absolute_value_documents_created of this UserResponse. # noqa: E501
:type: float<|endoftext|>
|
f9f43adbd91b73f0451ac14844ef2db44ae99be5dfd65af7e4741269e577b2e3
|
@property
def attached_practices(self):
'Gets the attached_practices of this UserResponse. # noqa: E501\n\n\n :return: The attached_practices of this UserResponse. # noqa: E501\n :rtype: list[PracticeResponse]\n '
return self._attached_practices
|
Gets the attached_practices of this UserResponse. # noqa: E501
:return: The attached_practices of this UserResponse. # noqa: E501
:rtype: list[PracticeResponse]
|
xero_python/finance/models/user_response.py
|
attached_practices
|
gavinwhyte/xero-python
| 1
|
python
|
@property
def attached_practices(self):
'Gets the attached_practices of this UserResponse. # noqa: E501\n\n\n :return: The attached_practices of this UserResponse. # noqa: E501\n :rtype: list[PracticeResponse]\n '
return self._attached_practices
|
@property
def attached_practices(self):
'Gets the attached_practices of this UserResponse. # noqa: E501\n\n\n :return: The attached_practices of this UserResponse. # noqa: E501\n :rtype: list[PracticeResponse]\n '
return self._attached_practices<|docstring|>Gets the attached_practices of this UserResponse. # noqa: E501
:return: The attached_practices of this UserResponse. # noqa: E501
:rtype: list[PracticeResponse]<|endoftext|>
|
090f26977d52cfef22184234a0def213d9847e7b77e28a2f16431382685b6e55
|
@attached_practices.setter
def attached_practices(self, attached_practices):
'Sets the attached_practices of this UserResponse.\n\n\n :param attached_practices: The attached_practices of this UserResponse. # noqa: E501\n :type: list[PracticeResponse]\n '
self._attached_practices = attached_practices
|
Sets the attached_practices of this UserResponse.
:param attached_practices: The attached_practices of this UserResponse. # noqa: E501
:type: list[PracticeResponse]
|
xero_python/finance/models/user_response.py
|
attached_practices
|
gavinwhyte/xero-python
| 1
|
python
|
@attached_practices.setter
def attached_practices(self, attached_practices):
'Sets the attached_practices of this UserResponse.\n\n\n :param attached_practices: The attached_practices of this UserResponse. # noqa: E501\n :type: list[PracticeResponse]\n '
self._attached_practices = attached_practices
|
@attached_practices.setter
def attached_practices(self, attached_practices):
'Sets the attached_practices of this UserResponse.\n\n\n :param attached_practices: The attached_practices of this UserResponse. # noqa: E501\n :type: list[PracticeResponse]\n '
self._attached_practices = attached_practices<|docstring|>Sets the attached_practices of this UserResponse.
:param attached_practices: The attached_practices of this UserResponse. # noqa: E501
:type: list[PracticeResponse]<|endoftext|>
|
128c4276e6a21914da4ce30bcf9079bdb9dae88b9cba7f49da844791250f5d37
|
@property
def history_records(self):
'Gets the history_records of this UserResponse. # noqa: E501\n\n\n :return: The history_records of this UserResponse. # noqa: E501\n :rtype: list[HistoryRecordResponse]\n '
return self._history_records
|
Gets the history_records of this UserResponse. # noqa: E501
:return: The history_records of this UserResponse. # noqa: E501
:rtype: list[HistoryRecordResponse]
|
xero_python/finance/models/user_response.py
|
history_records
|
gavinwhyte/xero-python
| 1
|
python
|
@property
def history_records(self):
'Gets the history_records of this UserResponse. # noqa: E501\n\n\n :return: The history_records of this UserResponse. # noqa: E501\n :rtype: list[HistoryRecordResponse]\n '
return self._history_records
|
@property
def history_records(self):
'Gets the history_records of this UserResponse. # noqa: E501\n\n\n :return: The history_records of this UserResponse. # noqa: E501\n :rtype: list[HistoryRecordResponse]\n '
return self._history_records<|docstring|>Gets the history_records of this UserResponse. # noqa: E501
:return: The history_records of this UserResponse. # noqa: E501
:rtype: list[HistoryRecordResponse]<|endoftext|>
|
1bd0566c4b4ff613761648ef9bc7da99cd8f63ccbec4c90393680ec7f8496652
|
@history_records.setter
def history_records(self, history_records):
'Sets the history_records of this UserResponse.\n\n\n :param history_records: The history_records of this UserResponse. # noqa: E501\n :type: list[HistoryRecordResponse]\n '
self._history_records = history_records
|
Sets the history_records of this UserResponse.
:param history_records: The history_records of this UserResponse. # noqa: E501
:type: list[HistoryRecordResponse]
|
xero_python/finance/models/user_response.py
|
history_records
|
gavinwhyte/xero-python
| 1
|
python
|
@history_records.setter
def history_records(self, history_records):
'Sets the history_records of this UserResponse.\n\n\n :param history_records: The history_records of this UserResponse. # noqa: E501\n :type: list[HistoryRecordResponse]\n '
self._history_records = history_records
|
@history_records.setter
def history_records(self, history_records):
'Sets the history_records of this UserResponse.\n\n\n :param history_records: The history_records of this UserResponse. # noqa: E501\n :type: list[HistoryRecordResponse]\n '
self._history_records = history_records<|docstring|>Sets the history_records of this UserResponse.
:param history_records: The history_records of this UserResponse. # noqa: E501
:type: list[HistoryRecordResponse]<|endoftext|>
|
805fa10cb871e35a8f48747c7ffbb455ba112cdf9c6c9fa89578cdc971a0ede1
|
def simple_model(input_nodes, dim, n_classes, dm=True):
'\n :param input_nodes: number of nodes in first layer (int)\n :param dim: input shape (tuple)\n :param n_classes: number of classes (int)\n :param dm: show network architecture (boolean)\n :return: keras.Sequential() model\n -------------------------------------\n Layer 1: Dense, Fully connected input layer\n Activation 1: Sigmoid\n Layer 2: Dense, fully connected output layer\n Activation 2: Softmax\n -------------------------------------\n '
model = Sequential()
model.add(Dense(input_nodes, activation='sigmoid', input_shape=dim))
model.add(Dense(n_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer=SGD())
if dm:
model.summary()
return model
|
:param input_nodes: number of nodes in first layer (int)
:param dim: input shape (tuple)
:param n_classes: number of classes (int)
:param dm: show network architecture (boolean)
:return: keras.Sequential() model
-------------------------------------
Layer 1: Dense, Fully connected input layer
Activation 1: Sigmoid
Layer 2: Dense, fully connected output layer
Activation 2: Softmax
-------------------------------------
|
simple/MNIST_simple.py
|
simple_model
|
magluva/INF368
| 0
|
python
|
def simple_model(input_nodes, dim, n_classes, dm=True):
'\n :param input_nodes: number of nodes in first layer (int)\n :param dim: input shape (tuple)\n :param n_classes: number of classes (int)\n :param dm: show network architecture (boolean)\n :return: keras.Sequential() model\n -------------------------------------\n Layer 1: Dense, Fully connected input layer\n Activation 1: Sigmoid\n Layer 2: Dense, fully connected output layer\n Activation 2: Softmax\n -------------------------------------\n '
model = Sequential()
model.add(Dense(input_nodes, activation='sigmoid', input_shape=dim))
model.add(Dense(n_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer=SGD())
if dm:
model.summary()
return model
|
def simple_model(input_nodes, dim, n_classes, dm=True):
'\n :param input_nodes: number of nodes in first layer (int)\n :param dim: input shape (tuple)\n :param n_classes: number of classes (int)\n :param dm: show network architecture (boolean)\n :return: keras.Sequential() model\n -------------------------------------\n Layer 1: Dense, Fully connected input layer\n Activation 1: Sigmoid\n Layer 2: Dense, fully connected output layer\n Activation 2: Softmax\n -------------------------------------\n '
model = Sequential()
model.add(Dense(input_nodes, activation='sigmoid', input_shape=dim))
model.add(Dense(n_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer=SGD())
if dm:
model.summary()
return model<|docstring|>:param input_nodes: number of nodes in first layer (int)
:param dim: input shape (tuple)
:param n_classes: number of classes (int)
:param dm: show network architecture (boolean)
:return: keras.Sequential() model
-------------------------------------
Layer 1: Dense, Fully connected input layer
Activation 1: Sigmoid
Layer 2: Dense, fully connected output layer
Activation 2: Softmax
-------------------------------------<|endoftext|>
|
c8ac9b6619701d8e9c6262deef45deaccdbec962da8b26a53e3b84177eb230fe
|
def my_plot(history, file_name):
'\n :param history: training history (dict)\n :param file_name: desired PNG save file name (str)\n :return: matplotlib.pyplot.show()\n -------------------------------------\n Plots train/validation accuracy and\n loss of model.\n -------------------------------------\n '
fig = plt.figure()
plt.subplot(2, 1, 1)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='lower right')
plt.subplot(2, 1, 2)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper right')
plt.tight_layout()
plt.savefig((file_name + '.png'))
return plt.show()
|
:param history: training history (dict)
:param file_name: desired PNG save file name (str)
:return: matplotlib.pyplot.show()
-------------------------------------
Plots train/validation accuracy and
loss of model.
-------------------------------------
|
simple/MNIST_simple.py
|
my_plot
|
magluva/INF368
| 0
|
python
|
def my_plot(history, file_name):
'\n :param history: training history (dict)\n :param file_name: desired PNG save file name (str)\n :return: matplotlib.pyplot.show()\n -------------------------------------\n Plots train/validation accuracy and\n loss of model.\n -------------------------------------\n '
fig = plt.figure()
plt.subplot(2, 1, 1)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='lower right')
plt.subplot(2, 1, 2)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper right')
plt.tight_layout()
plt.savefig((file_name + '.png'))
return plt.show()
|
def my_plot(history, file_name):
'\n :param history: training history (dict)\n :param file_name: desired PNG save file name (str)\n :return: matplotlib.pyplot.show()\n -------------------------------------\n Plots train/validation accuracy and\n loss of model.\n -------------------------------------\n '
fig = plt.figure()
plt.subplot(2, 1, 1)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='lower right')
plt.subplot(2, 1, 2)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper right')
plt.tight_layout()
plt.savefig((file_name + '.png'))
return plt.show()<|docstring|>:param history: training history (dict)
:param file_name: desired PNG save file name (str)
:return: matplotlib.pyplot.show()
-------------------------------------
Plots train/validation accuracy and
loss of model.
-------------------------------------<|endoftext|>
|
e12ba139b9deadd969acde68a1b1ea2d60ada10e9fc2fe233f7c34d2c9a5eb0d
|
def save_model(model, model_name, history=None):
'\n :param model: Sequential() model to save\n :param model_name: desired file name (str)\n :param history: training history (dict)\n :return: None\n -------------------------------------\n Saves model as .json file,\n weights as HDF5 and training history\n as a dict in pickle file format.\n -------------------------------------\n '
model_json = model.to_json()
with open((model_name + '.json'), 'w') as json_file:
json_file.write(model_json)
model.save_weights((model_name + '_weights.h5'))
print('\nSaved model to disk as: {}.json, {}_weights.h5'.format(model_name, model_name))
if (history is not None):
file_name = 'simple_train_hist_dict'
with open(file_name, 'wb') as f:
pickle.dump(history.history, f)
print('\nSaved training history to disk as: {}'.format(file_name))
|
:param model: Sequential() model to save
:param model_name: desired file name (str)
:param history: training history (dict)
:return: None
-------------------------------------
Saves model as .json file,
weights as HDF5 and training history
as a dict in pickle file format.
-------------------------------------
|
simple/MNIST_simple.py
|
save_model
|
magluva/INF368
| 0
|
python
|
def save_model(model, model_name, history=None):
'\n :param model: Sequential() model to save\n :param model_name: desired file name (str)\n :param history: training history (dict)\n :return: None\n -------------------------------------\n Saves model as .json file,\n weights as HDF5 and training history\n as a dict in pickle file format.\n -------------------------------------\n '
model_json = model.to_json()
with open((model_name + '.json'), 'w') as json_file:
json_file.write(model_json)
model.save_weights((model_name + '_weights.h5'))
print('\nSaved model to disk as: {}.json, {}_weights.h5'.format(model_name, model_name))
if (history is not None):
file_name = 'simple_train_hist_dict'
with open(file_name, 'wb') as f:
pickle.dump(history.history, f)
print('\nSaved training history to disk as: {}'.format(file_name))
|
def save_model(model, model_name, history=None):
'\n :param model: Sequential() model to save\n :param model_name: desired file name (str)\n :param history: training history (dict)\n :return: None\n -------------------------------------\n Saves model as .json file,\n weights as HDF5 and training history\n as a dict in pickle file format.\n -------------------------------------\n '
model_json = model.to_json()
with open((model_name + '.json'), 'w') as json_file:
json_file.write(model_json)
model.save_weights((model_name + '_weights.h5'))
print('\nSaved model to disk as: {}.json, {}_weights.h5'.format(model_name, model_name))
if (history is not None):
file_name = 'simple_train_hist_dict'
with open(file_name, 'wb') as f:
pickle.dump(history.history, f)
print('\nSaved training history to disk as: {}'.format(file_name))<|docstring|>:param model: Sequential() model to save
:param model_name: desired file name (str)
:param history: training history (dict)
:return: None
-------------------------------------
Saves model as .json file,
weights as HDF5 and training history
as a dict in pickle file format.
-------------------------------------<|endoftext|>
|
85819201ec89d1976037f065ea0a8d5f52894c6c719764ea338509807012f67a
|
def main():
'\n Simple Model\n -------------------------------------\n - Compiles and trains a simple neural network on the MNIST data set of gray-scale images.\n - Plots acc/loss vs. epochs.\n - Saves model data.\n '
model_name = 'MNIST_simple'
img_x = 28
img_y = 28
validation_frac = 12
n_classes = 10
n_epochs = 10
((X_train, y_train), (X_test, y_test)) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
input_shape = ((img_x * img_y),)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
y_train_1h = to_categorical(y_train, 10)
y_test_1h = to_categorical(y_test, 10)
start_idx = (X_train.shape[0] - (X_train.shape[0] // validation_frac))
X_val = X_train[start_idx:]
y_val_1h = y_train_1h[start_idx:]
X_train = X_train[:start_idx]
y_train_1h = y_train_1h[:start_idx]
model = simple_model(32, input_shape, n_classes)
hist = model.fit(X_train, y_train_1h, batch_size=64, epochs=n_epochs, verbose=1, validation_data=(X_val, y_val_1h))
my_plot(hist, model_name)
scores = model.evaluate(x=X_test, y=y_test_1h, batch_size=128, verbose=1)
[print('-', end='') for i in range(20)]
print('\nMetrics: {}'.format(scores))
[print('-', end='') for i in range(20)]
save_model(model, model_name, history=hist)
|
Simple Model
-------------------------------------
- Compiles and trains a simple neural network on the MNIST data set of gray-scale images.
- Plots acc/loss vs. epochs.
- Saves model data.
|
simple/MNIST_simple.py
|
main
|
magluva/INF368
| 0
|
python
|
def main():
'\n Simple Model\n -------------------------------------\n - Compiles and trains a simple neural network on the MNIST data set of gray-scale images.\n - Plots acc/loss vs. epochs.\n - Saves model data.\n '
model_name = 'MNIST_simple'
img_x = 28
img_y = 28
validation_frac = 12
n_classes = 10
n_epochs = 10
((X_train, y_train), (X_test, y_test)) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
input_shape = ((img_x * img_y),)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
y_train_1h = to_categorical(y_train, 10)
y_test_1h = to_categorical(y_test, 10)
start_idx = (X_train.shape[0] - (X_train.shape[0] // validation_frac))
X_val = X_train[start_idx:]
y_val_1h = y_train_1h[start_idx:]
X_train = X_train[:start_idx]
y_train_1h = y_train_1h[:start_idx]
model = simple_model(32, input_shape, n_classes)
hist = model.fit(X_train, y_train_1h, batch_size=64, epochs=n_epochs, verbose=1, validation_data=(X_val, y_val_1h))
my_plot(hist, model_name)
scores = model.evaluate(x=X_test, y=y_test_1h, batch_size=128, verbose=1)
[print('-', end=) for i in range(20)]
print('\nMetrics: {}'.format(scores))
[print('-', end=) for i in range(20)]
save_model(model, model_name, history=hist)
|
def main():
'\n Simple Model\n -------------------------------------\n - Compiles and trains a simple neural network on the MNIST data set of gray-scale images.\n - Plots acc/loss vs. epochs.\n - Saves model data.\n '
model_name = 'MNIST_simple'
img_x = 28
img_y = 28
validation_frac = 12
n_classes = 10
n_epochs = 10
((X_train, y_train), (X_test, y_test)) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
input_shape = ((img_x * img_y),)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
y_train_1h = to_categorical(y_train, 10)
y_test_1h = to_categorical(y_test, 10)
start_idx = (X_train.shape[0] - (X_train.shape[0] // validation_frac))
X_val = X_train[start_idx:]
y_val_1h = y_train_1h[start_idx:]
X_train = X_train[:start_idx]
y_train_1h = y_train_1h[:start_idx]
model = simple_model(32, input_shape, n_classes)
hist = model.fit(X_train, y_train_1h, batch_size=64, epochs=n_epochs, verbose=1, validation_data=(X_val, y_val_1h))
my_plot(hist, model_name)
scores = model.evaluate(x=X_test, y=y_test_1h, batch_size=128, verbose=1)
[print('-', end=) for i in range(20)]
print('\nMetrics: {}'.format(scores))
[print('-', end=) for i in range(20)]
save_model(model, model_name, history=hist)<|docstring|>Simple Model
-------------------------------------
- Compiles and trains a simple neural network on the MNIST data set of gray-scale images.
- Plots acc/loss vs. epochs.
- Saves model data.<|endoftext|>
|
6514562b69beac4d41bb7bebb9c23ff033a74c81b2bf5d042a88ac1c96f67df3
|
def __init__(self):
'Initializes a new instance of the Step object.\n\n '
self.input = {}
self.output = {}
|
Initializes a new instance of the Step object.
|
src/step.py
|
__init__
|
allanwright/media-classifier
| 2
|
python
|
def __init__(self):
'\n\n '
self.input = {}
self.output = {}
|
def __init__(self):
'\n\n '
self.input = {}
self.output = {}<|docstring|>Initializes a new instance of the Step object.<|endoftext|>
|
bfc0ad9bfbd2c416ccc1be8e79892657887072714eabc837251a88f5956f4a0b
|
@abstractmethod
def run(self):
'Runs the pipeline step.\n\n '
|
Runs the pipeline step.
|
src/step.py
|
run
|
allanwright/media-classifier
| 2
|
python
|
@abstractmethod
def run(self):
'\n\n '
|
@abstractmethod
def run(self):
'\n\n '<|docstring|>Runs the pipeline step.<|endoftext|>
|
29ce1d57983ab3979960626c414384513aa5060f111650165c6c399aeefd174e
|
def print(self, message: str, **kwargs):
'Prints a message about the progress of the pipeline step.\n\n Args:\n message (string): The message to print.\n kwargs (args): The arguments used to format the message.\n '
step_name = self.__class__.__name__
message = message.format(**kwargs)
print("Step '{step_name}': {message}".format(step_name=step_name, message=message))
|
Prints a message about the progress of the pipeline step.
Args:
message (string): The message to print.
kwargs (args): The arguments used to format the message.
|
src/step.py
|
print
|
allanwright/media-classifier
| 2
|
python
|
def print(self, message: str, **kwargs):
'Prints a message about the progress of the pipeline step.\n\n Args:\n message (string): The message to print.\n kwargs (args): The arguments used to format the message.\n '
step_name = self.__class__.__name__
message = message.format(**kwargs)
print("Step '{step_name}': {message}".format(step_name=step_name, message=message))
|
def print(self, message: str, **kwargs):
'Prints a message about the progress of the pipeline step.\n\n Args:\n message (string): The message to print.\n kwargs (args): The arguments used to format the message.\n '
step_name = self.__class__.__name__
message = message.format(**kwargs)
print("Step '{step_name}': {message}".format(step_name=step_name, message=message))<|docstring|>Prints a message about the progress of the pipeline step.
Args:
message (string): The message to print.
kwargs (args): The arguments used to format the message.<|endoftext|>
|
9b58e2ebf573eda39d3376f86e7571cefabeb46758c39eb25a264733c366b3c3
|
def print_separator(text: str) -> None:
'Print a nice separator to the console with the given text.\n\n Arguments:\n text -- The texts that should be printed under the separator.\n '
print('')
print(f"[91m{('=' * 80)}[00m")
print(f'[91m= > {text}[00m')
|
Print a nice separator to the console with the given text.
Arguments:
text -- The texts that should be printed under the separator.
|
webias/utils.py
|
print_separator
|
webis-de/IJCAI-21
| 1
|
python
|
def print_separator(text: str) -> None:
'Print a nice separator to the console with the given text.\n\n Arguments:\n text -- The texts that should be printed under the separator.\n '
print()
print(f"[91m{('=' * 80)}[00m")
print(f'[91m= > {text}[00m')
|
def print_separator(text: str) -> None:
'Print a nice separator to the console with the given text.\n\n Arguments:\n text -- The texts that should be printed under the separator.\n '
print()
print(f"[91m{('=' * 80)}[00m")
print(f'[91m= > {text}[00m')<|docstring|>Print a nice separator to the console with the given text.
Arguments:
text -- The texts that should be printed under the separator.<|endoftext|>
|
cf0e152a2b2efa44d513646676a48b9e2c4b01028b4c89ae76b0eabce372bb37
|
def extract_unique_tokens(lexicon: dict) -> list:
'Extract all unique tokens from a lexicon and return it as list.\n\n The lexicon is expected to have the format returned by the `prepare_lexicons` function.\n\n Arguments:\n lexicon -- A dictionary containing all the tokens that should be extracted into a unique list of\n tokens.\n '
all_tokens = []
for (test_type, test_lexicons) in lexicon.items():
for words in test_lexicons.values():
all_tokens.extend(words[0][(- 1)])
return list(set(all_tokens))
|
Extract all unique tokens from a lexicon and return it as list.
The lexicon is expected to have the format returned by the `prepare_lexicons` function.
Arguments:
lexicon -- A dictionary containing all the tokens that should be extracted into a unique list of
tokens.
|
webias/utils.py
|
extract_unique_tokens
|
webis-de/IJCAI-21
| 1
|
python
|
def extract_unique_tokens(lexicon: dict) -> list:
'Extract all unique tokens from a lexicon and return it as list.\n\n The lexicon is expected to have the format returned by the `prepare_lexicons` function.\n\n Arguments:\n lexicon -- A dictionary containing all the tokens that should be extracted into a unique list of\n tokens.\n '
all_tokens = []
for (test_type, test_lexicons) in lexicon.items():
for words in test_lexicons.values():
all_tokens.extend(words[0][(- 1)])
return list(set(all_tokens))
|
def extract_unique_tokens(lexicon: dict) -> list:
'Extract all unique tokens from a lexicon and return it as list.\n\n The lexicon is expected to have the format returned by the `prepare_lexicons` function.\n\n Arguments:\n lexicon -- A dictionary containing all the tokens that should be extracted into a unique list of\n tokens.\n '
all_tokens = []
for (test_type, test_lexicons) in lexicon.items():
for words in test_lexicons.values():
all_tokens.extend(words[0][(- 1)])
return list(set(all_tokens))<|docstring|>Extract all unique tokens from a lexicon and return it as list.
The lexicon is expected to have the format returned by the `prepare_lexicons` function.
Arguments:
lexicon -- A dictionary containing all the tokens that should be extracted into a unique list of
tokens.<|endoftext|>
|
fd699d26bce7d1ec4f4fc8446a710e1e123c15a3650665c2a53cff7781f48bb7
|
def build_word_embedding_cache(lexicon: dict, embedding_model) -> dict:
'Retrieve the word vectors for all tokens in the provided lexicons and cache them.\n\n Return the cache as a dictionary.\n This should decrease the access times in cases where the vectors are requested multiple hundred\n times. Since the runs should all have the same tokens, use the last index of the first run and\n extract all tokens.\n\n Arguments:\n lexicon -- A dictionary containing the tokens that should be cached. Expects the lexcion to be\n in a specific format, as returned by the lexicon preparation function\n `prepare_lexicons`.\n embedding_model -- The model that should be used to retrieve the word vectors.\n '
unique_tokens = extract_unique_tokens(lexicon)
return {key: embedding_model[key] for key in unique_tokens}
|
Retrieve the word vectors for all tokens in the provided lexicons and cache them.
Return the cache as a dictionary.
This should decrease the access times in cases where the vectors are requested multiple hundred
times. Since the runs should all have the same tokens, use the last index of the first run and
extract all tokens.
Arguments:
lexicon -- A dictionary containing the tokens that should be cached. Expects the lexcion to be
in a specific format, as returned by the lexicon preparation function
`prepare_lexicons`.
embedding_model -- The model that should be used to retrieve the word vectors.
|
webias/utils.py
|
build_word_embedding_cache
|
webis-de/IJCAI-21
| 1
|
python
|
def build_word_embedding_cache(lexicon: dict, embedding_model) -> dict:
'Retrieve the word vectors for all tokens in the provided lexicons and cache them.\n\n Return the cache as a dictionary.\n This should decrease the access times in cases where the vectors are requested multiple hundred\n times. Since the runs should all have the same tokens, use the last index of the first run and\n extract all tokens.\n\n Arguments:\n lexicon -- A dictionary containing the tokens that should be cached. Expects the lexcion to be\n in a specific format, as returned by the lexicon preparation function\n `prepare_lexicons`.\n embedding_model -- The model that should be used to retrieve the word vectors.\n '
unique_tokens = extract_unique_tokens(lexicon)
return {key: embedding_model[key] for key in unique_tokens}
|
def build_word_embedding_cache(lexicon: dict, embedding_model) -> dict:
'Retrieve the word vectors for all tokens in the provided lexicons and cache them.\n\n Return the cache as a dictionary.\n This should decrease the access times in cases where the vectors are requested multiple hundred\n times. Since the runs should all have the same tokens, use the last index of the first run and\n extract all tokens.\n\n Arguments:\n lexicon -- A dictionary containing the tokens that should be cached. Expects the lexcion to be\n in a specific format, as returned by the lexicon preparation function\n `prepare_lexicons`.\n embedding_model -- The model that should be used to retrieve the word vectors.\n '
unique_tokens = extract_unique_tokens(lexicon)
return {key: embedding_model[key] for key in unique_tokens}<|docstring|>Retrieve the word vectors for all tokens in the provided lexicons and cache them.
Return the cache as a dictionary.
This should decrease the access times in cases where the vectors are requested multiple hundred
times. Since the runs should all have the same tokens, use the last index of the first run and
extract all tokens.
Arguments:
lexicon -- A dictionary containing the tokens that should be cached. Expects the lexcion to be
in a specific format, as returned by the lexicon preparation function
`prepare_lexicons`.
embedding_model -- The model that should be used to retrieve the word vectors.<|endoftext|>
|
ac4aca0e31717c6cd70ecbd388a274ef7ca8132dadc267c4077dc6f3adf80d3c
|
def _determine_combined_lexicon_eval_lengths(lexicons: list, step_size: int, allow_different_lengths: bool) -> list:
'Determine the lengths at which the given lexicons should be evaluated at.\n\n Return a list of lexicon sizes.\n\n The lexicon sizes will start at `step_size` and end at the combined length of all given\n lexicons.\n\n Arguments:\n lexicons -- The lexcions for which the evlauation lengths should be determined.\n step_size -- The number of words to be added to each lexicon at each step.\n allow_different_lengths -- Whether the evaluation allows for differently sized lexicon lengths\n to be used or not.\n '
lexicon_lengths = [len(lex) for lex in lexicons]
if allow_different_lengths:
max_lexicon_size = sum(lexicon_lengths)
else:
max_lexicon_size = (min(lexicon_lengths) * len(lexicons))
combined_step_size = (step_size * len(lexicons))
lexicon_eval_lengths = list(range(combined_step_size, (max_lexicon_size + 1), combined_step_size))
if ((max_lexicon_size % combined_step_size) > 0):
lexicon_eval_lengths.append(max_lexicon_size)
return lexicon_eval_lengths
|
Determine the lengths at which the given lexicons should be evaluated at.
Return a list of lexicon sizes.
The lexicon sizes will start at `step_size` and end at the combined length of all given
lexicons.
Arguments:
lexicons -- The lexcions for which the evlauation lengths should be determined.
step_size -- The number of words to be added to each lexicon at each step.
allow_different_lengths -- Whether the evaluation allows for differently sized lexicon lengths
to be used or not.
|
webias/utils.py
|
_determine_combined_lexicon_eval_lengths
|
webis-de/IJCAI-21
| 1
|
python
|
def _determine_combined_lexicon_eval_lengths(lexicons: list, step_size: int, allow_different_lengths: bool) -> list:
'Determine the lengths at which the given lexicons should be evaluated at.\n\n Return a list of lexicon sizes.\n\n The lexicon sizes will start at `step_size` and end at the combined length of all given\n lexicons.\n\n Arguments:\n lexicons -- The lexcions for which the evlauation lengths should be determined.\n step_size -- The number of words to be added to each lexicon at each step.\n allow_different_lengths -- Whether the evaluation allows for differently sized lexicon lengths\n to be used or not.\n '
lexicon_lengths = [len(lex) for lex in lexicons]
if allow_different_lengths:
max_lexicon_size = sum(lexicon_lengths)
else:
max_lexicon_size = (min(lexicon_lengths) * len(lexicons))
combined_step_size = (step_size * len(lexicons))
lexicon_eval_lengths = list(range(combined_step_size, (max_lexicon_size + 1), combined_step_size))
if ((max_lexicon_size % combined_step_size) > 0):
lexicon_eval_lengths.append(max_lexicon_size)
return lexicon_eval_lengths
|
def _determine_combined_lexicon_eval_lengths(lexicons: list, step_size: int, allow_different_lengths: bool) -> list:
'Determine the lengths at which the given lexicons should be evaluated at.\n\n Return a list of lexicon sizes.\n\n The lexicon sizes will start at `step_size` and end at the combined length of all given\n lexicons.\n\n Arguments:\n lexicons -- The lexcions for which the evlauation lengths should be determined.\n step_size -- The number of words to be added to each lexicon at each step.\n allow_different_lengths -- Whether the evaluation allows for differently sized lexicon lengths\n to be used or not.\n '
lexicon_lengths = [len(lex) for lex in lexicons]
if allow_different_lengths:
max_lexicon_size = sum(lexicon_lengths)
else:
max_lexicon_size = (min(lexicon_lengths) * len(lexicons))
combined_step_size = (step_size * len(lexicons))
lexicon_eval_lengths = list(range(combined_step_size, (max_lexicon_size + 1), combined_step_size))
if ((max_lexicon_size % combined_step_size) > 0):
lexicon_eval_lengths.append(max_lexicon_size)
return lexicon_eval_lengths<|docstring|>Determine the lengths at which the given lexicons should be evaluated at.
Return a list of lexicon sizes.
The lexicon sizes will start at `step_size` and end at the combined length of all given
lexicons.
Arguments:
lexicons -- The lexcions for which the evlauation lengths should be determined.
step_size -- The number of words to be added to each lexicon at each step.
allow_different_lengths -- Whether the evaluation allows for differently sized lexicon lengths
to be used or not.<|endoftext|>
|
3219237e44e418688a2288bd994b36845224a50273755dfded327c3bb195baee
|
def _determine_lexicon_eval_lengths(lexicons: list, step_size: int, allow_different_lengths: bool) -> list:
'Determine the lengths at which the given lexicons should be evaluated at.\n\n Return a list of lexicon sizes.\n\n The lexicon sizes will start at `step_size` and end at the length of the longest lexicon if\n different lengths are allowed and the length of the shortest one otherwise.\n\n Arguments:\n lexicons -- The lexcions for which the evlauation lengths should be determined.\n step_size -- The number of words to be added to each lexicon at each step.\n allow_different_lengths -- Whether the evaluation allows for differently sized lexicon lengths\n to be used or not.\n '
lexicon_lengths = [len(lex) for lex in lexicons]
if allow_different_lengths:
max_lexicon_size = max(lexicon_lengths)
else:
max_lexicon_size = min(lexicon_lengths)
lexicon_eval_lengths = list(range(step_size, (max_lexicon_size + 1), step_size))
if ((max_lexicon_size % step_size) > 0):
lexicon_eval_lengths.append(max_lexicon_size)
return lexicon_eval_lengths
|
Determine the lengths at which the given lexicons should be evaluated at.
Return a list of lexicon sizes.
The lexicon sizes will start at `step_size` and end at the length of the longest lexicon if
different lengths are allowed and the length of the shortest one otherwise.
Arguments:
lexicons -- The lexcions for which the evlauation lengths should be determined.
step_size -- The number of words to be added to each lexicon at each step.
allow_different_lengths -- Whether the evaluation allows for differently sized lexicon lengths
to be used or not.
|
webias/utils.py
|
_determine_lexicon_eval_lengths
|
webis-de/IJCAI-21
| 1
|
python
|
def _determine_lexicon_eval_lengths(lexicons: list, step_size: int, allow_different_lengths: bool) -> list:
'Determine the lengths at which the given lexicons should be evaluated at.\n\n Return a list of lexicon sizes.\n\n The lexicon sizes will start at `step_size` and end at the length of the longest lexicon if\n different lengths are allowed and the length of the shortest one otherwise.\n\n Arguments:\n lexicons -- The lexcions for which the evlauation lengths should be determined.\n step_size -- The number of words to be added to each lexicon at each step.\n allow_different_lengths -- Whether the evaluation allows for differently sized lexicon lengths\n to be used or not.\n '
lexicon_lengths = [len(lex) for lex in lexicons]
if allow_different_lengths:
max_lexicon_size = max(lexicon_lengths)
else:
max_lexicon_size = min(lexicon_lengths)
lexicon_eval_lengths = list(range(step_size, (max_lexicon_size + 1), step_size))
if ((max_lexicon_size % step_size) > 0):
lexicon_eval_lengths.append(max_lexicon_size)
return lexicon_eval_lengths
|
def _determine_lexicon_eval_lengths(lexicons: list, step_size: int, allow_different_lengths: bool) -> list:
'Determine the lengths at which the given lexicons should be evaluated at.\n\n Return a list of lexicon sizes.\n\n The lexicon sizes will start at `step_size` and end at the length of the longest lexicon if\n different lengths are allowed and the length of the shortest one otherwise.\n\n Arguments:\n lexicons -- The lexcions for which the evlauation lengths should be determined.\n step_size -- The number of words to be added to each lexicon at each step.\n allow_different_lengths -- Whether the evaluation allows for differently sized lexicon lengths\n to be used or not.\n '
lexicon_lengths = [len(lex) for lex in lexicons]
if allow_different_lengths:
max_lexicon_size = max(lexicon_lengths)
else:
max_lexicon_size = min(lexicon_lengths)
lexicon_eval_lengths = list(range(step_size, (max_lexicon_size + 1), step_size))
if ((max_lexicon_size % step_size) > 0):
lexicon_eval_lengths.append(max_lexicon_size)
return lexicon_eval_lengths<|docstring|>Determine the lengths at which the given lexicons should be evaluated at.
Return a list of lexicon sizes.
The lexicon sizes will start at `step_size` and end at the length of the longest lexicon if
different lengths are allowed and the length of the shortest one otherwise.
Arguments:
lexicons -- The lexcions for which the evlauation lengths should be determined.
step_size -- The number of words to be added to each lexicon at each step.
allow_different_lengths -- Whether the evaluation allows for differently sized lexicon lengths
to be used or not.<|endoftext|>
|
7442397439677e82ee01b76ed245f357891dc7e89b2d81367b186fdb586ff6ba
|
def prepare_combined_lexicons(lexicon_1: list, lexicon_2: list, shuffled_runs: int, step_size: int, lowercase: bool, allow_different_lengths: bool=False) -> list:
'Combine and prepare a two lists of tokens for the metric evaluation.\n\n Return a list of shuffled runs for a lexicon that is created by combining all given ones. This\n method ensures that each partial lexicon (parts of the full lexicons at different runs) will\n contain roughly the same number of tokens from each of the lexicons.\n\n Arguments:\n lexicon_1 -- The first list of tokens that should be prepared.\n lexicon_2 -- The second list of tokens that should be prepared.\n shuffled_runs -- How many shuffled lexcions to prepare. Each run will have a different shuffle.\n step_size -- By which size to increase the lexicons for each test.\n lowercase -- Whether the tokens should be lowercased or not.\n allow_different_lengths -- Whether to allow for different lexicon lengths or not. If `False`,\n all lexicons will be trimed to the size of the shortest one.\n '
prepared_lexicons = ([[t.lower() for t in inner] for inner in [lexicon_1, lexicon_2]] if lowercase else [lexicon_1, lexicon_2])
shortest_lexicon = min([len(lex) for lex in prepared_lexicons])
lexicon_lengths = _determine_combined_lexicon_eval_lengths(prepared_lexicons, step_size, allow_different_lengths)
lexicon_runs = []
for run in range(0, shuffled_runs):
shuffled_partials = []
if allow_different_lengths:
lexicons_zip = zip_longest(*prepared_lexicons, fillvalue=None)
lexicons_unpkg = [t for t_tuple in lexicons_zip for t in t_tuple if (t is not None)]
shuffled_lexicons_unpkg = shuffle(lexicons_unpkg, random_state=run)
else:
shuffled_lexicons = [shuffle(lexicon[:shortest_lexicon], random_state=run) for lexicon in prepared_lexicons]
shuffled_lexicons_zip = zip(*shuffled_lexicons)
shuffled_lexicons_unpkg = [t for t_tuple in shuffled_lexicons_zip for t in t_tuple]
for length in lexicon_lengths:
shuffled_partials.append(shuffled_lexicons_unpkg[:length])
lexicon_runs.append(shuffled_partials)
return lexicon_runs
|
Combine and prepare a two lists of tokens for the metric evaluation.
Return a list of shuffled runs for a lexicon that is created by combining all given ones. This
method ensures that each partial lexicon (parts of the full lexicons at different runs) will
contain roughly the same number of tokens from each of the lexicons.
Arguments:
lexicon_1 -- The first list of tokens that should be prepared.
lexicon_2 -- The second list of tokens that should be prepared.
shuffled_runs -- How many shuffled lexcions to prepare. Each run will have a different shuffle.
step_size -- By which size to increase the lexicons for each test.
lowercase -- Whether the tokens should be lowercased or not.
allow_different_lengths -- Whether to allow for different lexicon lengths or not. If `False`,
all lexicons will be trimed to the size of the shortest one.
|
webias/utils.py
|
prepare_combined_lexicons
|
webis-de/IJCAI-21
| 1
|
python
|
def prepare_combined_lexicons(lexicon_1: list, lexicon_2: list, shuffled_runs: int, step_size: int, lowercase: bool, allow_different_lengths: bool=False) -> list:
'Combine and prepare a two lists of tokens for the metric evaluation.\n\n Return a list of shuffled runs for a lexicon that is created by combining all given ones. This\n method ensures that each partial lexicon (parts of the full lexicons at different runs) will\n contain roughly the same number of tokens from each of the lexicons.\n\n Arguments:\n lexicon_1 -- The first list of tokens that should be prepared.\n lexicon_2 -- The second list of tokens that should be prepared.\n shuffled_runs -- How many shuffled lexcions to prepare. Each run will have a different shuffle.\n step_size -- By which size to increase the lexicons for each test.\n lowercase -- Whether the tokens should be lowercased or not.\n allow_different_lengths -- Whether to allow for different lexicon lengths or not. If `False`,\n all lexicons will be trimed to the size of the shortest one.\n '
prepared_lexicons = ([[t.lower() for t in inner] for inner in [lexicon_1, lexicon_2]] if lowercase else [lexicon_1, lexicon_2])
shortest_lexicon = min([len(lex) for lex in prepared_lexicons])
lexicon_lengths = _determine_combined_lexicon_eval_lengths(prepared_lexicons, step_size, allow_different_lengths)
lexicon_runs = []
for run in range(0, shuffled_runs):
shuffled_partials = []
if allow_different_lengths:
lexicons_zip = zip_longest(*prepared_lexicons, fillvalue=None)
lexicons_unpkg = [t for t_tuple in lexicons_zip for t in t_tuple if (t is not None)]
shuffled_lexicons_unpkg = shuffle(lexicons_unpkg, random_state=run)
else:
shuffled_lexicons = [shuffle(lexicon[:shortest_lexicon], random_state=run) for lexicon in prepared_lexicons]
shuffled_lexicons_zip = zip(*shuffled_lexicons)
shuffled_lexicons_unpkg = [t for t_tuple in shuffled_lexicons_zip for t in t_tuple]
for length in lexicon_lengths:
shuffled_partials.append(shuffled_lexicons_unpkg[:length])
lexicon_runs.append(shuffled_partials)
return lexicon_runs
|
def prepare_combined_lexicons(lexicon_1: list, lexicon_2: list, shuffled_runs: int, step_size: int, lowercase: bool, allow_different_lengths: bool=False) -> list:
'Combine and prepare a two lists of tokens for the metric evaluation.\n\n Return a list of shuffled runs for a lexicon that is created by combining all given ones. This\n method ensures that each partial lexicon (parts of the full lexicons at different runs) will\n contain roughly the same number of tokens from each of the lexicons.\n\n Arguments:\n lexicon_1 -- The first list of tokens that should be prepared.\n lexicon_2 -- The second list of tokens that should be prepared.\n shuffled_runs -- How many shuffled lexcions to prepare. Each run will have a different shuffle.\n step_size -- By which size to increase the lexicons for each test.\n lowercase -- Whether the tokens should be lowercased or not.\n allow_different_lengths -- Whether to allow for different lexicon lengths or not. If `False`,\n all lexicons will be trimed to the size of the shortest one.\n '
prepared_lexicons = ([[t.lower() for t in inner] for inner in [lexicon_1, lexicon_2]] if lowercase else [lexicon_1, lexicon_2])
shortest_lexicon = min([len(lex) for lex in prepared_lexicons])
lexicon_lengths = _determine_combined_lexicon_eval_lengths(prepared_lexicons, step_size, allow_different_lengths)
lexicon_runs = []
for run in range(0, shuffled_runs):
shuffled_partials = []
if allow_different_lengths:
lexicons_zip = zip_longest(*prepared_lexicons, fillvalue=None)
lexicons_unpkg = [t for t_tuple in lexicons_zip for t in t_tuple if (t is not None)]
shuffled_lexicons_unpkg = shuffle(lexicons_unpkg, random_state=run)
else:
shuffled_lexicons = [shuffle(lexicon[:shortest_lexicon], random_state=run) for lexicon in prepared_lexicons]
shuffled_lexicons_zip = zip(*shuffled_lexicons)
shuffled_lexicons_unpkg = [t for t_tuple in shuffled_lexicons_zip for t in t_tuple]
for length in lexicon_lengths:
shuffled_partials.append(shuffled_lexicons_unpkg[:length])
lexicon_runs.append(shuffled_partials)
return lexicon_runs<|docstring|>Combine and prepare a two lists of tokens for the metric evaluation.
Return a list of shuffled runs for a lexicon that is created by combining all given ones. This
method ensures that each partial lexicon (parts of the full lexicons at different runs) will
contain roughly the same number of tokens from each of the lexicons.
Arguments:
lexicon_1 -- The first list of tokens that should be prepared.
lexicon_2 -- The second list of tokens that should be prepared.
shuffled_runs -- How many shuffled lexcions to prepare. Each run will have a different shuffle.
step_size -- By which size to increase the lexicons for each test.
lowercase -- Whether the tokens should be lowercased or not.
allow_different_lengths -- Whether to allow for different lexicon lengths or not. If `False`,
all lexicons will be trimed to the size of the shortest one.<|endoftext|>
|
e69f3307a2c63d0dab40136e7d5c56689d474e62038bf4767ef8a0ea8a27a744
|
def prepare_lexicons(lexicon_1: list, lexicon_2: list, shuffled_runs: int, step_size: int, lowercase: bool, allow_different_lengths: bool=False) -> list:
'Prepare a two lists of tokens for the metric evaluation.\n\n Return a tuple of all given lexicons that were shuffled and split separately.\n\n Arguments:\n lexicon_1 -- The first list of tokens that should be prepared.\n lexicon_2 -- The second list of tokens that should be prepared.\n shuffled_runs -- How many shuffled lexcions to prepare. Each run will have a different shuffle.\n step_size -- By which size to increase the lexicons for each test.\n lowercase -- Whether the tokens should be lowercased or not.\n allow_different_lengths -- Whether to allow for different lexicon lengths or not. If `False`,\n all lexicons will be trimed to the size of the shortest one. If\n `True` the sizes of the lists will be increased proportional to their\n relative length to each other. The shortest list will use\n the defined step size, while the longer lists will use a step size\n that makes them grow proportionally, so that the relative length\n difference is the same for all test runs.\n '
prepared_lexicons = ([[t.lower() for t in inner] for inner in [lexicon_1, lexicon_2]] if lowercase else [lexicon_1, lexicon_2])
lexicon_lengths = _determine_lexicon_eval_lengths(prepared_lexicons, step_size, allow_different_lengths)
all_lexicon_runs = []
for lexicon in prepared_lexicons:
lexicon_runs = []
for run in range(0, shuffled_runs):
shuffled_partials = []
if allow_different_lengths:
shuffled_lexicon = shuffle(lexicon, random_state=run)
else:
shuffled_lexicon = shuffle(lexicon[:lexicon_lengths[(- 1)]], random_state=run)
for length in lexicon_lengths:
shuffled_partials.append(shuffled_lexicon[:length])
lexicon_runs.append(shuffled_partials)
all_lexicon_runs.append(lexicon_runs)
return tuple(all_lexicon_runs)
|
Prepare a two lists of tokens for the metric evaluation.
Return a tuple of all given lexicons that were shuffled and split separately.
Arguments:
lexicon_1 -- The first list of tokens that should be prepared.
lexicon_2 -- The second list of tokens that should be prepared.
shuffled_runs -- How many shuffled lexcions to prepare. Each run will have a different shuffle.
step_size -- By which size to increase the lexicons for each test.
lowercase -- Whether the tokens should be lowercased or not.
allow_different_lengths -- Whether to allow for different lexicon lengths or not. If `False`,
all lexicons will be trimed to the size of the shortest one. If
`True` the sizes of the lists will be increased proportional to their
relative length to each other. The shortest list will use
the defined step size, while the longer lists will use a step size
that makes them grow proportionally, so that the relative length
difference is the same for all test runs.
|
webias/utils.py
|
prepare_lexicons
|
webis-de/IJCAI-21
| 1
|
python
|
def prepare_lexicons(lexicon_1: list, lexicon_2: list, shuffled_runs: int, step_size: int, lowercase: bool, allow_different_lengths: bool=False) -> list:
'Prepare a two lists of tokens for the metric evaluation.\n\n Return a tuple of all given lexicons that were shuffled and split separately.\n\n Arguments:\n lexicon_1 -- The first list of tokens that should be prepared.\n lexicon_2 -- The second list of tokens that should be prepared.\n shuffled_runs -- How many shuffled lexcions to prepare. Each run will have a different shuffle.\n step_size -- By which size to increase the lexicons for each test.\n lowercase -- Whether the tokens should be lowercased or not.\n allow_different_lengths -- Whether to allow for different lexicon lengths or not. If `False`,\n all lexicons will be trimed to the size of the shortest one. If\n `True` the sizes of the lists will be increased proportional to their\n relative length to each other. The shortest list will use\n the defined step size, while the longer lists will use a step size\n that makes them grow proportionally, so that the relative length\n difference is the same for all test runs.\n '
prepared_lexicons = ([[t.lower() for t in inner] for inner in [lexicon_1, lexicon_2]] if lowercase else [lexicon_1, lexicon_2])
lexicon_lengths = _determine_lexicon_eval_lengths(prepared_lexicons, step_size, allow_different_lengths)
all_lexicon_runs = []
for lexicon in prepared_lexicons:
lexicon_runs = []
for run in range(0, shuffled_runs):
shuffled_partials = []
if allow_different_lengths:
shuffled_lexicon = shuffle(lexicon, random_state=run)
else:
shuffled_lexicon = shuffle(lexicon[:lexicon_lengths[(- 1)]], random_state=run)
for length in lexicon_lengths:
shuffled_partials.append(shuffled_lexicon[:length])
lexicon_runs.append(shuffled_partials)
all_lexicon_runs.append(lexicon_runs)
return tuple(all_lexicon_runs)
|
def prepare_lexicons(lexicon_1: list, lexicon_2: list, shuffled_runs: int, step_size: int, lowercase: bool, allow_different_lengths: bool=False) -> list:
'Prepare a two lists of tokens for the metric evaluation.\n\n Return a tuple of all given lexicons that were shuffled and split separately.\n\n Arguments:\n lexicon_1 -- The first list of tokens that should be prepared.\n lexicon_2 -- The second list of tokens that should be prepared.\n shuffled_runs -- How many shuffled lexcions to prepare. Each run will have a different shuffle.\n step_size -- By which size to increase the lexicons for each test.\n lowercase -- Whether the tokens should be lowercased or not.\n allow_different_lengths -- Whether to allow for different lexicon lengths or not. If `False`,\n all lexicons will be trimed to the size of the shortest one. If\n `True` the sizes of the lists will be increased proportional to their\n relative length to each other. The shortest list will use\n the defined step size, while the longer lists will use a step size\n that makes them grow proportionally, so that the relative length\n difference is the same for all test runs.\n '
prepared_lexicons = ([[t.lower() for t in inner] for inner in [lexicon_1, lexicon_2]] if lowercase else [lexicon_1, lexicon_2])
lexicon_lengths = _determine_lexicon_eval_lengths(prepared_lexicons, step_size, allow_different_lengths)
all_lexicon_runs = []
for lexicon in prepared_lexicons:
lexicon_runs = []
for run in range(0, shuffled_runs):
shuffled_partials = []
if allow_different_lengths:
shuffled_lexicon = shuffle(lexicon, random_state=run)
else:
shuffled_lexicon = shuffle(lexicon[:lexicon_lengths[(- 1)]], random_state=run)
for length in lexicon_lengths:
shuffled_partials.append(shuffled_lexicon[:length])
lexicon_runs.append(shuffled_partials)
all_lexicon_runs.append(lexicon_runs)
return tuple(all_lexicon_runs)<|docstring|>Prepare a two lists of tokens for the metric evaluation.
Return a tuple of all given lexicons that were shuffled and split separately.
Arguments:
lexicon_1 -- The first list of tokens that should be prepared.
lexicon_2 -- The second list of tokens that should be prepared.
shuffled_runs -- How many shuffled lexcions to prepare. Each run will have a different shuffle.
step_size -- By which size to increase the lexicons for each test.
lowercase -- Whether the tokens should be lowercased or not.
allow_different_lengths -- Whether to allow for different lexicon lengths or not. If `False`,
all lexicons will be trimed to the size of the shortest one. If
`True` the sizes of the lists will be increased proportional to their
relative length to each other. The shortest list will use
the defined step size, while the longer lists will use a step size
that makes them grow proportionally, so that the relative length
difference is the same for all test runs.<|endoftext|>
|
8e966aa7c01caaf326efe64fa505acba6b3ab4eda651ce9a82a0357a10bdac40
|
def __init__(self, family=None, *args, **kwargs):
'\n Initialize.\n\n :param family: IP address family (default: INET, possible: INET6)\n '
super(IPDetector_Socket, self).__init__(*args, family=family, **kwargs)
|
Initialize.
:param family: IP address family (default: INET, possible: INET6)
|
dyndnsc/detector/socket_ip.py
|
__init__
|
infothrill/python-dyndnsc
| 35
|
python
|
def __init__(self, family=None, *args, **kwargs):
'\n Initialize.\n\n :param family: IP address family (default: INET, possible: INET6)\n '
super(IPDetector_Socket, self).__init__(*args, family=family, **kwargs)
|
def __init__(self, family=None, *args, **kwargs):
'\n Initialize.\n\n :param family: IP address family (default: INET, possible: INET6)\n '
super(IPDetector_Socket, self).__init__(*args, family=family, **kwargs)<|docstring|>Initialize.
:param family: IP address family (default: INET, possible: INET6)<|endoftext|>
|
a53095f371ef61cf7be55a0806d4c39b50d400259ba0630f8399e6044a489a53
|
def can_detect_offline(self):
'Return False, this detector works offline.'
return False
|
Return False, this detector works offline.
|
dyndnsc/detector/socket_ip.py
|
can_detect_offline
|
infothrill/python-dyndnsc
| 35
|
python
|
def can_detect_offline(self):
return False
|
def can_detect_offline(self):
return False<|docstring|>Return False, this detector works offline.<|endoftext|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.