code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
class Solution(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution(object):
def isIsomorphic(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
n1 = len(s)
n2 = len(t)
if n1 != n2:
return False
else:
map1 = {}
map2 = {}
for i in range(n1):
if s[i] not in map1 and t[i] not in map2:
map1.update({s[i]: t[i]})
map2.update({t[i]: s[i]})
elif s[i] not in map1 or t[i] not in map2 or map1[s[i]] != t[i
] or map2[t[i]] != s[i]:
return False
return True
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution(object):
def isIsomorphic(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
n1 = len(s)
n2 = len(t)
if n1 != n2:
return False
else:
map1 = {}
map2 = {}
for i in range(n1):
if s[i] not in map1 and t[i] not in map2:
map1.update({s[i]: t[i]})
map2.update({t[i]: s[i]})
elif s[i] not in map1 or t[i] not in map2 or map1[s[i]] != t[i
] or map2[t[i]] != s[i]:
return False
return True
<|reserved_special_token_0|>
print(solution.isIsomorphic(s, t))
<|reserved_special_token_1|>
class Solution(object):
def isIsomorphic(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
n1 = len(s)
n2 = len(t)
if n1 != n2:
return False
else:
map1 = {}
map2 = {}
for i in range(n1):
if s[i] not in map1 and t[i] not in map2:
map1.update({s[i]: t[i]})
map2.update({t[i]: s[i]})
elif s[i] not in map1 or t[i] not in map2 or map1[s[i]] != t[i
] or map2[t[i]] != s[i]:
return False
return True
solution = Solution()
s = 'bb'
t = 'ab'
print(solution.isIsomorphic(s, t))
|
flexible
|
{
"blob_id": "7fdddf98fc7b588e9b8816ffa22bc24f715d7efe",
"index": 5210,
"step-1": "class Solution(object):\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Solution(object):\n\n def isIsomorphic(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: bool\n \"\"\"\n n1 = len(s)\n n2 = len(t)\n if n1 != n2:\n return False\n else:\n map1 = {}\n map2 = {}\n for i in range(n1):\n if s[i] not in map1 and t[i] not in map2:\n map1.update({s[i]: t[i]})\n map2.update({t[i]: s[i]})\n elif s[i] not in map1 or t[i] not in map2 or map1[s[i]] != t[i\n ] or map2[t[i]] != s[i]:\n return False\n return True\n\n\n<mask token>\n",
"step-3": "class Solution(object):\n\n def isIsomorphic(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: bool\n \"\"\"\n n1 = len(s)\n n2 = len(t)\n if n1 != n2:\n return False\n else:\n map1 = {}\n map2 = {}\n for i in range(n1):\n if s[i] not in map1 and t[i] not in map2:\n map1.update({s[i]: t[i]})\n map2.update({t[i]: s[i]})\n elif s[i] not in map1 or t[i] not in map2 or map1[s[i]] != t[i\n ] or map2[t[i]] != s[i]:\n return False\n return True\n\n\n<mask token>\nprint(solution.isIsomorphic(s, t))\n",
"step-4": "class Solution(object):\n\n def isIsomorphic(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: bool\n \"\"\"\n n1 = len(s)\n n2 = len(t)\n if n1 != n2:\n return False\n else:\n map1 = {}\n map2 = {}\n for i in range(n1):\n if s[i] not in map1 and t[i] not in map2:\n map1.update({s[i]: t[i]})\n map2.update({t[i]: s[i]})\n elif s[i] not in map1 or t[i] not in map2 or map1[s[i]] != t[i\n ] or map2[t[i]] != s[i]:\n return False\n return True\n\n\nsolution = Solution()\ns = 'bb'\nt = 'ab'\nprint(solution.isIsomorphic(s, t))\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
"""
Написать программу, которая принимает строку
и выводит строку без пробелов и ее длину.
Для удаления пробелов реализовать доп функцию.
"""
|
flexible
|
{
"blob_id": "1eab2ddda6fdd71db372e978caa6e7d24c7fe78e",
"index": 7724,
"step-1": "<mask token>\n",
"step-2": "\"\"\"\n Написать программу, которая принимает строку\n и выводит строку без пробелов и ее длину.\n Для удаления пробелов реализовать доп функцию.\n\"\"\"",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
class ConfigApp(admin.MConfigHandler):
<|reserved_special_token_0|>
def handleList(self, confInfo):
confDict = self.readConf('appsetup')
if None != confDict:
for stanza, settings in confDict.items():
for key, val in settings.items():
if key in ['api_key'] and val in [None, '']:
val = ''
confInfo[stanza].append(key, val)
def handleEdit(self, confInfo):
name = self.callerArgs.id
args = self.callerArgs
self.writeConf('appsetup', 'app_config', self.callerArgs.data)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ConfigApp(admin.MConfigHandler):
def setup(self):
if self.requestedAction == admin.ACTION_EDIT:
for myarg in ['api_key']:
self.supportedArgs.addOptArg(myarg)
def handleList(self, confInfo):
confDict = self.readConf('appsetup')
if None != confDict:
for stanza, settings in confDict.items():
for key, val in settings.items():
if key in ['api_key'] and val in [None, '']:
val = ''
confInfo[stanza].append(key, val)
def handleEdit(self, confInfo):
name = self.callerArgs.id
args = self.callerArgs
self.writeConf('appsetup', 'app_config', self.callerArgs.data)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ConfigApp(admin.MConfigHandler):
def setup(self):
if self.requestedAction == admin.ACTION_EDIT:
for myarg in ['api_key']:
self.supportedArgs.addOptArg(myarg)
def handleList(self, confInfo):
confDict = self.readConf('appsetup')
if None != confDict:
for stanza, settings in confDict.items():
for key, val in settings.items():
if key in ['api_key'] and val in [None, '']:
val = ''
confInfo[stanza].append(key, val)
def handleEdit(self, confInfo):
name = self.callerArgs.id
args = self.callerArgs
self.writeConf('appsetup', 'app_config', self.callerArgs.data)
admin.init(ConfigApp, admin.CONTEXT_NONE)
<|reserved_special_token_1|>
import splunk.admin as admin
import splunk.entity as en
class ConfigApp(admin.MConfigHandler):
def setup(self):
if self.requestedAction == admin.ACTION_EDIT:
for myarg in ['api_key']:
self.supportedArgs.addOptArg(myarg)
def handleList(self, confInfo):
confDict = self.readConf('appsetup')
if None != confDict:
for stanza, settings in confDict.items():
for key, val in settings.items():
if key in ['api_key'] and val in [None, '']:
val = ''
confInfo[stanza].append(key, val)
def handleEdit(self, confInfo):
name = self.callerArgs.id
args = self.callerArgs
self.writeConf('appsetup', 'app_config', self.callerArgs.data)
admin.init(ConfigApp, admin.CONTEXT_NONE)
<|reserved_special_token_1|>
import splunk.admin as admin
import splunk.entity as en
class ConfigApp(admin.MConfigHandler):
def setup(self):
if self.requestedAction == admin.ACTION_EDIT:
for myarg in ['api_key']:
self.supportedArgs.addOptArg(myarg)
def handleList(self, confInfo):
confDict = self.readConf("appsetup")
if None != confDict:
for stanza, settings in confDict.items():
for key, val in settings.items():
if key in ['api_key'] and val in [None, '']:
val = ''
confInfo[stanza].append(key, val)
def handleEdit(self, confInfo):
name = self.callerArgs.id
args = self.callerArgs
self.writeConf('appsetup', 'app_config', self.callerArgs.data)
admin.init(ConfigApp, admin.CONTEXT_NONE)
|
flexible
|
{
"blob_id": "8d6c58e9ef4e14a089a7eb33a92214d081ed7692",
"index": 8462,
"step-1": "<mask token>\n\n\nclass ConfigApp(admin.MConfigHandler):\n <mask token>\n\n def handleList(self, confInfo):\n confDict = self.readConf('appsetup')\n if None != confDict:\n for stanza, settings in confDict.items():\n for key, val in settings.items():\n if key in ['api_key'] and val in [None, '']:\n val = ''\n confInfo[stanza].append(key, val)\n\n def handleEdit(self, confInfo):\n name = self.callerArgs.id\n args = self.callerArgs\n self.writeConf('appsetup', 'app_config', self.callerArgs.data)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ConfigApp(admin.MConfigHandler):\n\n def setup(self):\n if self.requestedAction == admin.ACTION_EDIT:\n for myarg in ['api_key']:\n self.supportedArgs.addOptArg(myarg)\n\n def handleList(self, confInfo):\n confDict = self.readConf('appsetup')\n if None != confDict:\n for stanza, settings in confDict.items():\n for key, val in settings.items():\n if key in ['api_key'] and val in [None, '']:\n val = ''\n confInfo[stanza].append(key, val)\n\n def handleEdit(self, confInfo):\n name = self.callerArgs.id\n args = self.callerArgs\n self.writeConf('appsetup', 'app_config', self.callerArgs.data)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ConfigApp(admin.MConfigHandler):\n\n def setup(self):\n if self.requestedAction == admin.ACTION_EDIT:\n for myarg in ['api_key']:\n self.supportedArgs.addOptArg(myarg)\n\n def handleList(self, confInfo):\n confDict = self.readConf('appsetup')\n if None != confDict:\n for stanza, settings in confDict.items():\n for key, val in settings.items():\n if key in ['api_key'] and val in [None, '']:\n val = ''\n confInfo[stanza].append(key, val)\n\n def handleEdit(self, confInfo):\n name = self.callerArgs.id\n args = self.callerArgs\n self.writeConf('appsetup', 'app_config', self.callerArgs.data)\n\n\nadmin.init(ConfigApp, admin.CONTEXT_NONE)\n",
"step-4": "import splunk.admin as admin\nimport splunk.entity as en\n\n\nclass ConfigApp(admin.MConfigHandler):\n\n def setup(self):\n if self.requestedAction == admin.ACTION_EDIT:\n for myarg in ['api_key']:\n self.supportedArgs.addOptArg(myarg)\n\n def handleList(self, confInfo):\n confDict = self.readConf('appsetup')\n if None != confDict:\n for stanza, settings in confDict.items():\n for key, val in settings.items():\n if key in ['api_key'] and val in [None, '']:\n val = ''\n confInfo[stanza].append(key, val)\n\n def handleEdit(self, confInfo):\n name = self.callerArgs.id\n args = self.callerArgs\n self.writeConf('appsetup', 'app_config', self.callerArgs.data)\n\n\nadmin.init(ConfigApp, admin.CONTEXT_NONE)\n",
"step-5": "import splunk.admin as admin\nimport splunk.entity as en\n \nclass ConfigApp(admin.MConfigHandler):\n\tdef setup(self):\n\t\tif self.requestedAction == admin.ACTION_EDIT:\n\t\t\tfor myarg in ['api_key']:\n\t\t\t\tself.supportedArgs.addOptArg(myarg)\n \n\tdef handleList(self, confInfo):\n\t\tconfDict = self.readConf(\"appsetup\")\n\t\tif None != confDict:\n\t\t\tfor stanza, settings in confDict.items():\n\t\t\t\tfor key, val in settings.items():\n\t\t\t\t\tif key in ['api_key'] and val in [None, '']:\n\t\t\t\t\t\tval = ''\n\t\t\t\t\tconfInfo[stanza].append(key, val)\n \n\tdef handleEdit(self, confInfo):\n\t\tname = self.callerArgs.id\n\t\targs = self.callerArgs\n\t\tself.writeConf('appsetup', 'app_config', self.callerArgs.data)\n \nadmin.init(ConfigApp, admin.CONTEXT_NONE)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from __future__ import absolute_import, division, print_function
import numbers
import torch
from torch.distributions import constraints
from pyro.distributions.distribution import Distribution
from pyro.distributions.score_parts import ScoreParts
from pyro.distributions.util import broadcast_shape, sum_rightmost
class TorchDistributionMixin(Distribution):
"""
Mixin to provide Pyro compatibility for PyTorch distributions.
You should instead use `TorchDistribution` for new distribution classes.
This is mainly useful for wrapping existing PyTorch distributions for
use in Pyro. Derived classes must first inherit from
:class:`torch.distributions.distribution.Distribution` and then inherit
from :class:`TorchDistributionMixin`.
"""
def __call__(self, sample_shape=torch.Size()):
"""
Samples a random value.
This is reparameterized whenever possible, calling
:meth:`~torch.distributions.distribution.Distribution.rsample` for
reparameterized distributions and
:meth:`~torch.distributions.distribution.Distribution.sample` for
non-reparameterized distributions.
:param sample_shape: the size of the iid batch to be drawn from the
distribution.
:type sample_shape: torch.Size
:return: A random value or batch of random values (if parameters are
batched). The shape of the result should be `self.shape()`.
:rtype: torch.Tensor
"""
return self.rsample(sample_shape) if self.has_rsample else self.sample(sample_shape)
@property
def event_dim(self):
"""
:return: Number of dimensions of individual events.
:rtype: int
"""
return len(self.event_shape)
def shape(self, sample_shape=torch.Size()):
"""
The tensor shape of samples from this distribution.
Samples are of shape::
d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape
:param sample_shape: the size of the iid batch to be drawn from the
distribution.
:type sample_shape: torch.Size
:return: Tensor shape of samples.
:rtype: torch.Size
"""
return sample_shape + self.batch_shape + self.event_shape
def expand(self, batch_shape):
"""
Expands a distribution to a desired
:attr:`~torch.distributions.distribution.Distribution.batch_shape`.
Note that this is more general than :meth:`expand_by` because
``d.expand_by(sample_shape)`` can be reduced to
``d.expand(sample_shape + d.batch_shape)``.
:param torch.Size batch_shape: The target ``batch_shape``. This must
compatible with ``self.batch_shape`` similar to the requirements
of :func:`torch.Tensor.expand`: the target ``batch_shape`` must
be at least as long as ``self.batch_shape``, and for each
non-singleton dim of ``self.batch_shape``, ``batch_shape`` must
either agree or be set to ``-1``.
:return: An expanded version of this distribution.
:rtype: :class:`ReshapedDistribution`
"""
batch_shape = list(batch_shape)
if len(batch_shape) < len(self.batch_shape):
raise ValueError("Expected len(batch_shape) >= len(self.batch_shape), "
"actual {} vs {}".format(len(batch_shape), len(self.batch_shape)))
# check sizes of existing dims
for dim in range(-1, -1 - len(self.batch_shape), -1):
if batch_shape[dim] == -1:
batch_shape[dim] = self.batch_shape[dim]
elif batch_shape[dim] != self.batch_shape[dim]:
if self.batch_shape[dim] != 1:
raise ValueError("Cannot broadcast dim {} of size {} to size {}".format(
dim, self.batch_shape[dim], batch_shape[dim]))
else:
raise NotImplementedError("https://github.com/uber/pyro/issues/1119")
sample_shape = batch_shape[:len(batch_shape) - len(self.batch_shape)]
return self.expand_by(sample_shape)
def expand_by(self, sample_shape):
"""
Expands a distribution by adding ``sample_shape`` to the left side of
its :attr:`~torch.distributions.distribution.Distribution.batch_shape`.
To expand internal dims of ``self.batch_shape`` from 1 to something
larger, use :meth:`expand` instead.
:param torch.Size sample_shape: The size of the iid batch to be drawn
from the distribution.
:return: An expanded version of this distribution.
:rtype: :class:`ReshapedDistribution`
"""
return ReshapedDistribution(self, sample_shape=sample_shape)
def reshape(self, sample_shape=None, extra_event_dims=None):
raise Exception('''
.reshape(sample_shape=s, extra_event_dims=n) was renamed and split into
.expand_by(sample_shape=s).independent(reinterpreted_batch_ndims=n).''')
def independent(self, reinterpreted_batch_ndims=None):
"""
Reinterprets the ``n`` rightmost dimensions of this distributions
:attr:`~torch.distributions.distribution.Distribution.batch_shape`
as event dims, adding them to the left side of
:attr:`~torch.distributions.distribution.Distribution.event_shape`.
Example::
>>> [d1.batch_shape, d1.event_shape]
[torch.Size((2, 3)), torch.Size((4, 5))]
>>> d2 = d1.independent(1)
>>> [d2.batch_shape, d2.event_shape]
[torch.Size((2,)), torch.Size((3, 4, 5))]
>>> d3 = d1.independent(2)
>>> [d3.batch_shape, d3.event_shape]
[torch.Size(()), torch.Size((2, 3, 4, 5))]
:param int reinterpreted_batch_ndims: The number of batch dimensions
to reinterpret as event dimensions.
:return: A reshaped version of this distribution.
:rtype: :class:`ReshapedDistribution`
"""
if reinterpreted_batch_ndims is None:
reinterpreted_batch_ndims = len(self.batch_shape)
# TODO return pyro.distributions.torch.Independent(self, reinterpreted_batch_ndims)
return ReshapedDistribution(self, reinterpreted_batch_ndims=reinterpreted_batch_ndims)
def mask(self, mask):
"""
Masks a distribution by a zero-one tensor that is broadcastable to the
distributions :attr:`~torch.distributions.distribution.Distribution.batch_shape`.
:param torch.Tensor mask: A zero-one valued float tensor.
:return: A masked copy of this distribution.
:rtype: :class:`MaskedDistribution`
"""
return MaskedDistribution(self, mask)
class TorchDistribution(torch.distributions.Distribution, TorchDistributionMixin):
"""
Base class for PyTorch-compatible distributions with Pyro support.
This should be the base class for almost all new Pyro distributions.
.. note::
Parameters and data should be of type :class:`~torch.Tensor`
and all methods return type :class:`~torch.Tensor` unless
otherwise noted.
**Tensor Shapes**:
TorchDistributions provide a method ``.shape()`` for the tensor shape of samples::
x = d.sample(sample_shape)
assert x.shape == d.shape(sample_shape)
Pyro follows the same distribution shape semantics as PyTorch. It distinguishes
between three different roles for tensor shapes of samples:
- *sample shape* corresponds to the shape of the iid samples drawn from the distribution.
This is taken as an argument by the distribution's `sample` method.
- *batch shape* corresponds to non-identical (independent) parameterizations of
the distribution, inferred from the distribution's parameter shapes. This is
fixed for a distribution instance.
- *event shape* corresponds to the event dimensions of the distribution, which
is fixed for a distribution class. These are collapsed when we try to score
a sample from the distribution via `d.log_prob(x)`.
These shapes are related by the equation::
assert d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape
Distributions provide a vectorized
:meth`~torch.distributions.distribution.Distribution.log_prob` method that
evaluates the log probability density of each event in a batch
independently, returning a tensor of shape
``sample_shape + d.batch_shape``::
x = d.sample(sample_shape)
assert x.shape == d.shape(sample_shape)
log_p = d.log_prob(x)
assert log_p.shape == sample_shape + d.batch_shape
**Implementing New Distributions**:
Derived classes must implement the methods
:meth:`~torch.distributions.distribution.Distribution.sample`
(or :meth:`~torch.distributions.distribution.Distribution.rsample` if
``.has_rsample == True``) and
:meth:`~torch.distributions.distribution.Distribution.log_prob`, and must
implement the properties
:attr:`~torch.distributions.distribution.Distribution.batch_shape`,
and :attr:`~torch.distributions.distribution.Distribution.event_shape`.
Discrete classes may also implement the
:meth:`~torch.distributions.distribution.Distribution.enumerate_support`
method to improve gradient estimates and set
``.has_enumerate_support = True``.
"""
pass
class ReshapedDistribution(TorchDistribution):
"""
Reshapes a distribution by adding ``sample_shape`` to its total shape
and adding ``reinterpreted_batch_ndims`` to its
:attr:`~torch.distributions.distribution.Distribution.event_shape`.
:param torch.Size sample_shape: The size of the iid batch to be drawn from
the distribution.
:param int reinterpreted_batch_ndims: The number of extra event dimensions that will
be considered dependent.
"""
arg_constraints = {}
def __init__(self, base_dist, sample_shape=torch.Size(), reinterpreted_batch_ndims=0):
sample_shape = torch.Size(sample_shape)
if reinterpreted_batch_ndims > len(sample_shape + base_dist.batch_shape):
raise ValueError('Expected reinterpreted_batch_ndims <= len(sample_shape + base_dist.batch_shape), '
'actual {} vs {}'.format(reinterpreted_batch_ndims,
len(sample_shape + base_dist.batch_shape)))
self.base_dist = base_dist
self.sample_shape = sample_shape
self.reinterpreted_batch_ndims = reinterpreted_batch_ndims
shape = sample_shape + base_dist.batch_shape + base_dist.event_shape
batch_dim = len(shape) - reinterpreted_batch_ndims - len(base_dist.event_shape)
batch_shape, event_shape = shape[:batch_dim], shape[batch_dim:]
super(ReshapedDistribution, self).__init__(batch_shape, event_shape)
def expand_by(self, sample_shape):
base_dist = self.base_dist
sample_shape = torch.Size(sample_shape) + self.sample_shape
reinterpreted_batch_ndims = self.reinterpreted_batch_ndims
return ReshapedDistribution(base_dist, sample_shape, reinterpreted_batch_ndims)
def independent(self, reinterpreted_batch_ndims=None):
if reinterpreted_batch_ndims is None:
reinterpreted_batch_ndims = len(self.batch_shape)
base_dist = self.base_dist
sample_shape = self.sample_shape
reinterpreted_batch_ndims = self.reinterpreted_batch_ndims + reinterpreted_batch_ndims
return ReshapedDistribution(base_dist, sample_shape, reinterpreted_batch_ndims)
@property
def has_rsample(self):
return self.base_dist.has_rsample
@property
def has_enumerate_support(self):
return self.base_dist.has_enumerate_support
@constraints.dependent_property
def support(self):
return self.base_dist.support
def sample(self, sample_shape=torch.Size()):
return self.base_dist.sample(sample_shape + self.sample_shape)
def rsample(self, sample_shape=torch.Size()):
return self.base_dist.rsample(sample_shape + self.sample_shape)
def log_prob(self, value):
shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() - self.event_dim])
return sum_rightmost(self.base_dist.log_prob(value), self.reinterpreted_batch_ndims).expand(shape)
def score_parts(self, value):
shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() - self.event_dim])
log_prob, score_function, entropy_term = self.base_dist.score_parts(value)
log_prob = sum_rightmost(log_prob, self.reinterpreted_batch_ndims).expand(shape)
if not isinstance(score_function, numbers.Number):
score_function = sum_rightmost(score_function, self.reinterpreted_batch_ndims).expand(shape)
if not isinstance(entropy_term, numbers.Number):
entropy_term = sum_rightmost(entropy_term, self.reinterpreted_batch_ndims).expand(shape)
return ScoreParts(log_prob, score_function, entropy_term)
def enumerate_support(self):
if self.reinterpreted_batch_ndims:
raise NotImplementedError("Pyro does not enumerate over cartesian products")
samples = self.base_dist.enumerate_support()
if not self.sample_shape:
return samples
# Shift enumeration dim to correct location.
enum_shape, base_shape = samples.shape[:1], samples.shape[1:]
samples = samples.reshape(enum_shape + (1,) * len(self.sample_shape) + base_shape)
samples = samples.expand(enum_shape + self.sample_shape + base_shape)
return samples
@property
def mean(self):
return self.base_dist.mean.expand(self.batch_shape + self.event_shape)
@property
def variance(self):
return self.base_dist.variance.expand(self.batch_shape + self.event_shape)
class MaskedDistribution(TorchDistribution):
"""
Masks a distribution by a zero-one tensor that is broadcastable to the
distribution's :attr:`~torch.distributions.distribution.Distribution.batch_shape`.
:param torch.Tensor mask: A zero-one valued float tensor.
"""
arg_constraints = {}
def __init__(self, base_dist, mask):
if broadcast_shape(mask.shape, base_dist.batch_shape) != base_dist.batch_shape:
raise ValueError("Expected mask.shape to be broadcastable to base_dist.batch_shape, "
"actual {} vs {}".format(mask.shape, base_dist.batch_shape))
self.base_dist = base_dist
self._mask = mask
super(MaskedDistribution, self).__init__(base_dist.batch_shape, base_dist.event_shape)
@property
def has_rsample(self):
return self.base_dist.has_rsample
@property
def has_enumerate_support(self):
return self.base_dist.has_enumerate_support
@constraints.dependent_property
def support(self):
return self.base_dist.support
def sample(self, sample_shape=torch.Size()):
return self.base_dist.sample(sample_shape)
def rsample(self, sample_shape=torch.Size()):
return self.base_dist.rsample(sample_shape)
def log_prob(self, value):
return self.base_dist.log_prob(value) * self._mask
def score_parts(self, value):
return self.base_dist.score_parts(value) * self._mask
def enumerate_support(self):
return self.base_dist.enumerate_support()
@property
def mean(self):
return self.base_dist.mean
@property
def variance(self):
return self.base_dist.variance
|
normal
|
{
"blob_id": "0f0ea6f07f9a082042ed9aff7a95d372c32b5a13",
"index": 1897,
"step-1": "<mask token>\n\n\nclass ReshapedDistribution(TorchDistribution):\n <mask token>\n <mask token>\n\n def __init__(self, base_dist, sample_shape=torch.Size(),\n reinterpreted_batch_ndims=0):\n sample_shape = torch.Size(sample_shape)\n if reinterpreted_batch_ndims > len(sample_shape + base_dist.batch_shape\n ):\n raise ValueError(\n 'Expected reinterpreted_batch_ndims <= len(sample_shape + base_dist.batch_shape), actual {} vs {}'\n .format(reinterpreted_batch_ndims, len(sample_shape +\n base_dist.batch_shape)))\n self.base_dist = base_dist\n self.sample_shape = sample_shape\n self.reinterpreted_batch_ndims = reinterpreted_batch_ndims\n shape = sample_shape + base_dist.batch_shape + base_dist.event_shape\n batch_dim = len(shape) - reinterpreted_batch_ndims - len(base_dist.\n event_shape)\n batch_shape, event_shape = shape[:batch_dim], shape[batch_dim:]\n super(ReshapedDistribution, self).__init__(batch_shape, event_shape)\n\n def expand_by(self, sample_shape):\n base_dist = self.base_dist\n sample_shape = torch.Size(sample_shape) + self.sample_shape\n reinterpreted_batch_ndims = self.reinterpreted_batch_ndims\n return ReshapedDistribution(base_dist, sample_shape,\n reinterpreted_batch_ndims)\n\n def independent(self, reinterpreted_batch_ndims=None):\n if reinterpreted_batch_ndims is None:\n reinterpreted_batch_ndims = len(self.batch_shape)\n base_dist = self.base_dist\n sample_shape = self.sample_shape\n reinterpreted_batch_ndims = (self.reinterpreted_batch_ndims +\n reinterpreted_batch_ndims)\n return ReshapedDistribution(base_dist, sample_shape,\n reinterpreted_batch_ndims)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape + self.sample_shape)\n <mask token>\n\n def log_prob(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -\n self.event_dim])\n return sum_rightmost(self.base_dist.log_prob(value), self.\n reinterpreted_batch_ndims).expand(shape)\n\n def score_parts(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -\n self.event_dim])\n log_prob, score_function, entropy_term = self.base_dist.score_parts(\n value)\n log_prob = sum_rightmost(log_prob, self.reinterpreted_batch_ndims\n ).expand(shape)\n if not isinstance(score_function, numbers.Number):\n score_function = sum_rightmost(score_function, self.\n reinterpreted_batch_ndims).expand(shape)\n if not isinstance(entropy_term, numbers.Number):\n entropy_term = sum_rightmost(entropy_term, self.\n reinterpreted_batch_ndims).expand(shape)\n return ScoreParts(log_prob, score_function, entropy_term)\n\n def enumerate_support(self):\n if self.reinterpreted_batch_ndims:\n raise NotImplementedError(\n 'Pyro does not enumerate over cartesian products')\n samples = self.base_dist.enumerate_support()\n if not self.sample_shape:\n return samples\n enum_shape, base_shape = samples.shape[:1], samples.shape[1:]\n samples = samples.reshape(enum_shape + (1,) * len(self.sample_shape\n ) + base_shape)\n samples = samples.expand(enum_shape + self.sample_shape + base_shape)\n return samples\n\n @property\n def mean(self):\n return self.base_dist.mean.expand(self.batch_shape + self.event_shape)\n\n @property\n def variance(self):\n return self.base_dist.variance.expand(self.batch_shape + self.\n event_shape)\n\n\nclass MaskedDistribution(TorchDistribution):\n \"\"\"\n Masks a distribution by a zero-one tensor that is broadcastable to the\n distribution's :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n :param torch.Tensor mask: A zero-one valued float tensor.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_dist, mask):\n if broadcast_shape(mask.shape, base_dist.batch_shape\n ) != base_dist.batch_shape:\n raise ValueError(\n 'Expected mask.shape to be broadcastable to base_dist.batch_shape, actual {} vs {}'\n .format(mask.shape, base_dist.batch_shape))\n self.base_dist = base_dist\n self._mask = mask\n super(MaskedDistribution, self).__init__(base_dist.batch_shape,\n base_dist.event_shape)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape)\n\n def rsample(self, sample_shape=torch.Size()):\n return self.base_dist.rsample(sample_shape)\n\n def log_prob(self, value):\n return self.base_dist.log_prob(value) * self._mask\n\n def score_parts(self, value):\n return self.base_dist.score_parts(value) * self._mask\n\n def enumerate_support(self):\n return self.base_dist.enumerate_support()\n\n @property\n def mean(self):\n return self.base_dist.mean\n\n @property\n def variance(self):\n return self.base_dist.variance\n",
"step-2": "<mask token>\n\n\nclass TorchDistributionMixin(Distribution):\n <mask token>\n <mask token>\n <mask token>\n\n def shape(self, sample_shape=torch.Size()):\n \"\"\"\n The tensor shape of samples from this distribution.\n\n Samples are of shape::\n\n d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape\n\n :param sample_shape: the size of the iid batch to be drawn from the\n distribution.\n :type sample_shape: torch.Size\n :return: Tensor shape of samples.\n :rtype: torch.Size\n \"\"\"\n return sample_shape + self.batch_shape + self.event_shape\n <mask token>\n <mask token>\n\n def reshape(self, sample_shape=None, extra_event_dims=None):\n raise Exception(\n \"\"\"\n .reshape(sample_shape=s, extra_event_dims=n) was renamed and split into\n .expand_by(sample_shape=s).independent(reinterpreted_batch_ndims=n).\"\"\"\n )\n <mask token>\n <mask token>\n\n\nclass TorchDistribution(torch.distributions.Distribution,\n TorchDistributionMixin):\n \"\"\"\n Base class for PyTorch-compatible distributions with Pyro support.\n\n This should be the base class for almost all new Pyro distributions.\n\n .. note::\n\n Parameters and data should be of type :class:`~torch.Tensor`\n and all methods return type :class:`~torch.Tensor` unless\n otherwise noted.\n\n **Tensor Shapes**:\n\n TorchDistributions provide a method ``.shape()`` for the tensor shape of samples::\n\n x = d.sample(sample_shape)\n assert x.shape == d.shape(sample_shape)\n\n Pyro follows the same distribution shape semantics as PyTorch. It distinguishes\n between three different roles for tensor shapes of samples:\n\n - *sample shape* corresponds to the shape of the iid samples drawn from the distribution.\n This is taken as an argument by the distribution's `sample` method.\n - *batch shape* corresponds to non-identical (independent) parameterizations of\n the distribution, inferred from the distribution's parameter shapes. This is\n fixed for a distribution instance.\n - *event shape* corresponds to the event dimensions of the distribution, which\n is fixed for a distribution class. These are collapsed when we try to score\n a sample from the distribution via `d.log_prob(x)`.\n\n These shapes are related by the equation::\n\n assert d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape\n\n Distributions provide a vectorized\n :meth`~torch.distributions.distribution.Distribution.log_prob` method that\n evaluates the log probability density of each event in a batch\n independently, returning a tensor of shape\n ``sample_shape + d.batch_shape``::\n\n x = d.sample(sample_shape)\n assert x.shape == d.shape(sample_shape)\n log_p = d.log_prob(x)\n assert log_p.shape == sample_shape + d.batch_shape\n\n **Implementing New Distributions**:\n\n Derived classes must implement the methods\n :meth:`~torch.distributions.distribution.Distribution.sample`\n (or :meth:`~torch.distributions.distribution.Distribution.rsample` if\n ``.has_rsample == True``) and\n :meth:`~torch.distributions.distribution.Distribution.log_prob`, and must\n implement the properties\n :attr:`~torch.distributions.distribution.Distribution.batch_shape`,\n and :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n Discrete classes may also implement the\n :meth:`~torch.distributions.distribution.Distribution.enumerate_support`\n method to improve gradient estimates and set\n ``.has_enumerate_support = True``.\n \"\"\"\n pass\n\n\nclass ReshapedDistribution(TorchDistribution):\n \"\"\"\n Reshapes a distribution by adding ``sample_shape`` to its total shape\n and adding ``reinterpreted_batch_ndims`` to its\n :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n\n :param torch.Size sample_shape: The size of the iid batch to be drawn from\n the distribution.\n :param int reinterpreted_batch_ndims: The number of extra event dimensions that will\n be considered dependent.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_dist, sample_shape=torch.Size(),\n reinterpreted_batch_ndims=0):\n sample_shape = torch.Size(sample_shape)\n if reinterpreted_batch_ndims > len(sample_shape + base_dist.batch_shape\n ):\n raise ValueError(\n 'Expected reinterpreted_batch_ndims <= len(sample_shape + base_dist.batch_shape), actual {} vs {}'\n .format(reinterpreted_batch_ndims, len(sample_shape +\n base_dist.batch_shape)))\n self.base_dist = base_dist\n self.sample_shape = sample_shape\n self.reinterpreted_batch_ndims = reinterpreted_batch_ndims\n shape = sample_shape + base_dist.batch_shape + base_dist.event_shape\n batch_dim = len(shape) - reinterpreted_batch_ndims - len(base_dist.\n event_shape)\n batch_shape, event_shape = shape[:batch_dim], shape[batch_dim:]\n super(ReshapedDistribution, self).__init__(batch_shape, event_shape)\n\n def expand_by(self, sample_shape):\n base_dist = self.base_dist\n sample_shape = torch.Size(sample_shape) + self.sample_shape\n reinterpreted_batch_ndims = self.reinterpreted_batch_ndims\n return ReshapedDistribution(base_dist, sample_shape,\n reinterpreted_batch_ndims)\n\n def independent(self, reinterpreted_batch_ndims=None):\n if reinterpreted_batch_ndims is None:\n reinterpreted_batch_ndims = len(self.batch_shape)\n base_dist = self.base_dist\n sample_shape = self.sample_shape\n reinterpreted_batch_ndims = (self.reinterpreted_batch_ndims +\n reinterpreted_batch_ndims)\n return ReshapedDistribution(base_dist, sample_shape,\n reinterpreted_batch_ndims)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape + self.sample_shape)\n\n def rsample(self, sample_shape=torch.Size()):\n return self.base_dist.rsample(sample_shape + self.sample_shape)\n\n def log_prob(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -\n self.event_dim])\n return sum_rightmost(self.base_dist.log_prob(value), self.\n reinterpreted_batch_ndims).expand(shape)\n\n def score_parts(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -\n self.event_dim])\n log_prob, score_function, entropy_term = self.base_dist.score_parts(\n value)\n log_prob = sum_rightmost(log_prob, self.reinterpreted_batch_ndims\n ).expand(shape)\n if not isinstance(score_function, numbers.Number):\n score_function = sum_rightmost(score_function, self.\n reinterpreted_batch_ndims).expand(shape)\n if not isinstance(entropy_term, numbers.Number):\n entropy_term = sum_rightmost(entropy_term, self.\n reinterpreted_batch_ndims).expand(shape)\n return ScoreParts(log_prob, score_function, entropy_term)\n\n def enumerate_support(self):\n if self.reinterpreted_batch_ndims:\n raise NotImplementedError(\n 'Pyro does not enumerate over cartesian products')\n samples = self.base_dist.enumerate_support()\n if not self.sample_shape:\n return samples\n enum_shape, base_shape = samples.shape[:1], samples.shape[1:]\n samples = samples.reshape(enum_shape + (1,) * len(self.sample_shape\n ) + base_shape)\n samples = samples.expand(enum_shape + self.sample_shape + base_shape)\n return samples\n\n @property\n def mean(self):\n return self.base_dist.mean.expand(self.batch_shape + self.event_shape)\n\n @property\n def variance(self):\n return self.base_dist.variance.expand(self.batch_shape + self.\n event_shape)\n\n\nclass MaskedDistribution(TorchDistribution):\n \"\"\"\n Masks a distribution by a zero-one tensor that is broadcastable to the\n distribution's :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n :param torch.Tensor mask: A zero-one valued float tensor.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_dist, mask):\n if broadcast_shape(mask.shape, base_dist.batch_shape\n ) != base_dist.batch_shape:\n raise ValueError(\n 'Expected mask.shape to be broadcastable to base_dist.batch_shape, actual {} vs {}'\n .format(mask.shape, base_dist.batch_shape))\n self.base_dist = base_dist\n self._mask = mask\n super(MaskedDistribution, self).__init__(base_dist.batch_shape,\n base_dist.event_shape)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape)\n\n def rsample(self, sample_shape=torch.Size()):\n return self.base_dist.rsample(sample_shape)\n\n def log_prob(self, value):\n return self.base_dist.log_prob(value) * self._mask\n\n def score_parts(self, value):\n return self.base_dist.score_parts(value) * self._mask\n\n def enumerate_support(self):\n return self.base_dist.enumerate_support()\n\n @property\n def mean(self):\n return self.base_dist.mean\n\n @property\n def variance(self):\n return self.base_dist.variance\n",
"step-3": "<mask token>\n\n\nclass TorchDistributionMixin(Distribution):\n <mask token>\n\n def __call__(self, sample_shape=torch.Size()):\n \"\"\"\n Samples a random value.\n\n This is reparameterized whenever possible, calling\n :meth:`~torch.distributions.distribution.Distribution.rsample` for\n reparameterized distributions and\n :meth:`~torch.distributions.distribution.Distribution.sample` for\n non-reparameterized distributions.\n\n :param sample_shape: the size of the iid batch to be drawn from the\n distribution.\n :type sample_shape: torch.Size\n :return: A random value or batch of random values (if parameters are\n batched). The shape of the result should be `self.shape()`.\n :rtype: torch.Tensor\n \"\"\"\n return self.rsample(sample_shape) if self.has_rsample else self.sample(\n sample_shape)\n <mask token>\n\n def shape(self, sample_shape=torch.Size()):\n \"\"\"\n The tensor shape of samples from this distribution.\n\n Samples are of shape::\n\n d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape\n\n :param sample_shape: the size of the iid batch to be drawn from the\n distribution.\n :type sample_shape: torch.Size\n :return: Tensor shape of samples.\n :rtype: torch.Size\n \"\"\"\n return sample_shape + self.batch_shape + self.event_shape\n <mask token>\n <mask token>\n\n def reshape(self, sample_shape=None, extra_event_dims=None):\n raise Exception(\n \"\"\"\n .reshape(sample_shape=s, extra_event_dims=n) was renamed and split into\n .expand_by(sample_shape=s).independent(reinterpreted_batch_ndims=n).\"\"\"\n )\n <mask token>\n <mask token>\n\n\nclass TorchDistribution(torch.distributions.Distribution,\n TorchDistributionMixin):\n \"\"\"\n Base class for PyTorch-compatible distributions with Pyro support.\n\n This should be the base class for almost all new Pyro distributions.\n\n .. note::\n\n Parameters and data should be of type :class:`~torch.Tensor`\n and all methods return type :class:`~torch.Tensor` unless\n otherwise noted.\n\n **Tensor Shapes**:\n\n TorchDistributions provide a method ``.shape()`` for the tensor shape of samples::\n\n x = d.sample(sample_shape)\n assert x.shape == d.shape(sample_shape)\n\n Pyro follows the same distribution shape semantics as PyTorch. It distinguishes\n between three different roles for tensor shapes of samples:\n\n - *sample shape* corresponds to the shape of the iid samples drawn from the distribution.\n This is taken as an argument by the distribution's `sample` method.\n - *batch shape* corresponds to non-identical (independent) parameterizations of\n the distribution, inferred from the distribution's parameter shapes. This is\n fixed for a distribution instance.\n - *event shape* corresponds to the event dimensions of the distribution, which\n is fixed for a distribution class. These are collapsed when we try to score\n a sample from the distribution via `d.log_prob(x)`.\n\n These shapes are related by the equation::\n\n assert d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape\n\n Distributions provide a vectorized\n :meth`~torch.distributions.distribution.Distribution.log_prob` method that\n evaluates the log probability density of each event in a batch\n independently, returning a tensor of shape\n ``sample_shape + d.batch_shape``::\n\n x = d.sample(sample_shape)\n assert x.shape == d.shape(sample_shape)\n log_p = d.log_prob(x)\n assert log_p.shape == sample_shape + d.batch_shape\n\n **Implementing New Distributions**:\n\n Derived classes must implement the methods\n :meth:`~torch.distributions.distribution.Distribution.sample`\n (or :meth:`~torch.distributions.distribution.Distribution.rsample` if\n ``.has_rsample == True``) and\n :meth:`~torch.distributions.distribution.Distribution.log_prob`, and must\n implement the properties\n :attr:`~torch.distributions.distribution.Distribution.batch_shape`,\n and :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n Discrete classes may also implement the\n :meth:`~torch.distributions.distribution.Distribution.enumerate_support`\n method to improve gradient estimates and set\n ``.has_enumerate_support = True``.\n \"\"\"\n pass\n\n\nclass ReshapedDistribution(TorchDistribution):\n \"\"\"\n Reshapes a distribution by adding ``sample_shape`` to its total shape\n and adding ``reinterpreted_batch_ndims`` to its\n :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n\n :param torch.Size sample_shape: The size of the iid batch to be drawn from\n the distribution.\n :param int reinterpreted_batch_ndims: The number of extra event dimensions that will\n be considered dependent.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_dist, sample_shape=torch.Size(),\n reinterpreted_batch_ndims=0):\n sample_shape = torch.Size(sample_shape)\n if reinterpreted_batch_ndims > len(sample_shape + base_dist.batch_shape\n ):\n raise ValueError(\n 'Expected reinterpreted_batch_ndims <= len(sample_shape + base_dist.batch_shape), actual {} vs {}'\n .format(reinterpreted_batch_ndims, len(sample_shape +\n base_dist.batch_shape)))\n self.base_dist = base_dist\n self.sample_shape = sample_shape\n self.reinterpreted_batch_ndims = reinterpreted_batch_ndims\n shape = sample_shape + base_dist.batch_shape + base_dist.event_shape\n batch_dim = len(shape) - reinterpreted_batch_ndims - len(base_dist.\n event_shape)\n batch_shape, event_shape = shape[:batch_dim], shape[batch_dim:]\n super(ReshapedDistribution, self).__init__(batch_shape, event_shape)\n\n def expand_by(self, sample_shape):\n base_dist = self.base_dist\n sample_shape = torch.Size(sample_shape) + self.sample_shape\n reinterpreted_batch_ndims = self.reinterpreted_batch_ndims\n return ReshapedDistribution(base_dist, sample_shape,\n reinterpreted_batch_ndims)\n\n def independent(self, reinterpreted_batch_ndims=None):\n if reinterpreted_batch_ndims is None:\n reinterpreted_batch_ndims = len(self.batch_shape)\n base_dist = self.base_dist\n sample_shape = self.sample_shape\n reinterpreted_batch_ndims = (self.reinterpreted_batch_ndims +\n reinterpreted_batch_ndims)\n return ReshapedDistribution(base_dist, sample_shape,\n reinterpreted_batch_ndims)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape + self.sample_shape)\n\n def rsample(self, sample_shape=torch.Size()):\n return self.base_dist.rsample(sample_shape + self.sample_shape)\n\n def log_prob(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -\n self.event_dim])\n return sum_rightmost(self.base_dist.log_prob(value), self.\n reinterpreted_batch_ndims).expand(shape)\n\n def score_parts(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -\n self.event_dim])\n log_prob, score_function, entropy_term = self.base_dist.score_parts(\n value)\n log_prob = sum_rightmost(log_prob, self.reinterpreted_batch_ndims\n ).expand(shape)\n if not isinstance(score_function, numbers.Number):\n score_function = sum_rightmost(score_function, self.\n reinterpreted_batch_ndims).expand(shape)\n if not isinstance(entropy_term, numbers.Number):\n entropy_term = sum_rightmost(entropy_term, self.\n reinterpreted_batch_ndims).expand(shape)\n return ScoreParts(log_prob, score_function, entropy_term)\n\n def enumerate_support(self):\n if self.reinterpreted_batch_ndims:\n raise NotImplementedError(\n 'Pyro does not enumerate over cartesian products')\n samples = self.base_dist.enumerate_support()\n if not self.sample_shape:\n return samples\n enum_shape, base_shape = samples.shape[:1], samples.shape[1:]\n samples = samples.reshape(enum_shape + (1,) * len(self.sample_shape\n ) + base_shape)\n samples = samples.expand(enum_shape + self.sample_shape + base_shape)\n return samples\n\n @property\n def mean(self):\n return self.base_dist.mean.expand(self.batch_shape + self.event_shape)\n\n @property\n def variance(self):\n return self.base_dist.variance.expand(self.batch_shape + self.\n event_shape)\n\n\nclass MaskedDistribution(TorchDistribution):\n \"\"\"\n Masks a distribution by a zero-one tensor that is broadcastable to the\n distribution's :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n :param torch.Tensor mask: A zero-one valued float tensor.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_dist, mask):\n if broadcast_shape(mask.shape, base_dist.batch_shape\n ) != base_dist.batch_shape:\n raise ValueError(\n 'Expected mask.shape to be broadcastable to base_dist.batch_shape, actual {} vs {}'\n .format(mask.shape, base_dist.batch_shape))\n self.base_dist = base_dist\n self._mask = mask\n super(MaskedDistribution, self).__init__(base_dist.batch_shape,\n base_dist.event_shape)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape)\n\n def rsample(self, sample_shape=torch.Size()):\n return self.base_dist.rsample(sample_shape)\n\n def log_prob(self, value):\n return self.base_dist.log_prob(value) * self._mask\n\n def score_parts(self, value):\n return self.base_dist.score_parts(value) * self._mask\n\n def enumerate_support(self):\n return self.base_dist.enumerate_support()\n\n @property\n def mean(self):\n return self.base_dist.mean\n\n @property\n def variance(self):\n return self.base_dist.variance\n",
"step-4": "<mask token>\n\n\nclass TorchDistributionMixin(Distribution):\n <mask token>\n\n def __call__(self, sample_shape=torch.Size()):\n \"\"\"\n Samples a random value.\n\n This is reparameterized whenever possible, calling\n :meth:`~torch.distributions.distribution.Distribution.rsample` for\n reparameterized distributions and\n :meth:`~torch.distributions.distribution.Distribution.sample` for\n non-reparameterized distributions.\n\n :param sample_shape: the size of the iid batch to be drawn from the\n distribution.\n :type sample_shape: torch.Size\n :return: A random value or batch of random values (if parameters are\n batched). The shape of the result should be `self.shape()`.\n :rtype: torch.Tensor\n \"\"\"\n return self.rsample(sample_shape) if self.has_rsample else self.sample(\n sample_shape)\n <mask token>\n\n def shape(self, sample_shape=torch.Size()):\n \"\"\"\n The tensor shape of samples from this distribution.\n\n Samples are of shape::\n\n d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape\n\n :param sample_shape: the size of the iid batch to be drawn from the\n distribution.\n :type sample_shape: torch.Size\n :return: Tensor shape of samples.\n :rtype: torch.Size\n \"\"\"\n return sample_shape + self.batch_shape + self.event_shape\n\n def expand(self, batch_shape):\n \"\"\"\n Expands a distribution to a desired\n :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n Note that this is more general than :meth:`expand_by` because\n ``d.expand_by(sample_shape)`` can be reduced to\n ``d.expand(sample_shape + d.batch_shape)``.\n\n :param torch.Size batch_shape: The target ``batch_shape``. This must\n compatible with ``self.batch_shape`` similar to the requirements\n of :func:`torch.Tensor.expand`: the target ``batch_shape`` must\n be at least as long as ``self.batch_shape``, and for each\n non-singleton dim of ``self.batch_shape``, ``batch_shape`` must\n either agree or be set to ``-1``.\n :return: An expanded version of this distribution.\n :rtype: :class:`ReshapedDistribution`\n \"\"\"\n batch_shape = list(batch_shape)\n if len(batch_shape) < len(self.batch_shape):\n raise ValueError(\n 'Expected len(batch_shape) >= len(self.batch_shape), actual {} vs {}'\n .format(len(batch_shape), len(self.batch_shape)))\n for dim in range(-1, -1 - len(self.batch_shape), -1):\n if batch_shape[dim] == -1:\n batch_shape[dim] = self.batch_shape[dim]\n elif batch_shape[dim] != self.batch_shape[dim]:\n if self.batch_shape[dim] != 1:\n raise ValueError(\n 'Cannot broadcast dim {} of size {} to size {}'.\n format(dim, self.batch_shape[dim], batch_shape[dim]))\n else:\n raise NotImplementedError(\n 'https://github.com/uber/pyro/issues/1119')\n sample_shape = batch_shape[:len(batch_shape) - len(self.batch_shape)]\n return self.expand_by(sample_shape)\n <mask token>\n\n def reshape(self, sample_shape=None, extra_event_dims=None):\n raise Exception(\n \"\"\"\n .reshape(sample_shape=s, extra_event_dims=n) was renamed and split into\n .expand_by(sample_shape=s).independent(reinterpreted_batch_ndims=n).\"\"\"\n )\n\n def independent(self, reinterpreted_batch_ndims=None):\n \"\"\"\n Reinterprets the ``n`` rightmost dimensions of this distributions\n :attr:`~torch.distributions.distribution.Distribution.batch_shape`\n as event dims, adding them to the left side of\n :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n\n Example::\n\n >>> [d1.batch_shape, d1.event_shape]\n [torch.Size((2, 3)), torch.Size((4, 5))]\n >>> d2 = d1.independent(1)\n >>> [d2.batch_shape, d2.event_shape]\n [torch.Size((2,)), torch.Size((3, 4, 5))]\n >>> d3 = d1.independent(2)\n >>> [d3.batch_shape, d3.event_shape]\n [torch.Size(()), torch.Size((2, 3, 4, 5))]\n\n :param int reinterpreted_batch_ndims: The number of batch dimensions\n to reinterpret as event dimensions.\n :return: A reshaped version of this distribution.\n :rtype: :class:`ReshapedDistribution`\n \"\"\"\n if reinterpreted_batch_ndims is None:\n reinterpreted_batch_ndims = len(self.batch_shape)\n return ReshapedDistribution(self, reinterpreted_batch_ndims=\n reinterpreted_batch_ndims)\n\n def mask(self, mask):\n \"\"\"\n Masks a distribution by a zero-one tensor that is broadcastable to the\n distributions :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n :param torch.Tensor mask: A zero-one valued float tensor.\n :return: A masked copy of this distribution.\n :rtype: :class:`MaskedDistribution`\n \"\"\"\n return MaskedDistribution(self, mask)\n\n\nclass TorchDistribution(torch.distributions.Distribution,\n TorchDistributionMixin):\n \"\"\"\n Base class for PyTorch-compatible distributions with Pyro support.\n\n This should be the base class for almost all new Pyro distributions.\n\n .. note::\n\n Parameters and data should be of type :class:`~torch.Tensor`\n and all methods return type :class:`~torch.Tensor` unless\n otherwise noted.\n\n **Tensor Shapes**:\n\n TorchDistributions provide a method ``.shape()`` for the tensor shape of samples::\n\n x = d.sample(sample_shape)\n assert x.shape == d.shape(sample_shape)\n\n Pyro follows the same distribution shape semantics as PyTorch. It distinguishes\n between three different roles for tensor shapes of samples:\n\n - *sample shape* corresponds to the shape of the iid samples drawn from the distribution.\n This is taken as an argument by the distribution's `sample` method.\n - *batch shape* corresponds to non-identical (independent) parameterizations of\n the distribution, inferred from the distribution's parameter shapes. This is\n fixed for a distribution instance.\n - *event shape* corresponds to the event dimensions of the distribution, which\n is fixed for a distribution class. These are collapsed when we try to score\n a sample from the distribution via `d.log_prob(x)`.\n\n These shapes are related by the equation::\n\n assert d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape\n\n Distributions provide a vectorized\n :meth`~torch.distributions.distribution.Distribution.log_prob` method that\n evaluates the log probability density of each event in a batch\n independently, returning a tensor of shape\n ``sample_shape + d.batch_shape``::\n\n x = d.sample(sample_shape)\n assert x.shape == d.shape(sample_shape)\n log_p = d.log_prob(x)\n assert log_p.shape == sample_shape + d.batch_shape\n\n **Implementing New Distributions**:\n\n Derived classes must implement the methods\n :meth:`~torch.distributions.distribution.Distribution.sample`\n (or :meth:`~torch.distributions.distribution.Distribution.rsample` if\n ``.has_rsample == True``) and\n :meth:`~torch.distributions.distribution.Distribution.log_prob`, and must\n implement the properties\n :attr:`~torch.distributions.distribution.Distribution.batch_shape`,\n and :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n Discrete classes may also implement the\n :meth:`~torch.distributions.distribution.Distribution.enumerate_support`\n method to improve gradient estimates and set\n ``.has_enumerate_support = True``.\n \"\"\"\n pass\n\n\nclass ReshapedDistribution(TorchDistribution):\n \"\"\"\n Reshapes a distribution by adding ``sample_shape`` to its total shape\n and adding ``reinterpreted_batch_ndims`` to its\n :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n\n :param torch.Size sample_shape: The size of the iid batch to be drawn from\n the distribution.\n :param int reinterpreted_batch_ndims: The number of extra event dimensions that will\n be considered dependent.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_dist, sample_shape=torch.Size(),\n reinterpreted_batch_ndims=0):\n sample_shape = torch.Size(sample_shape)\n if reinterpreted_batch_ndims > len(sample_shape + base_dist.batch_shape\n ):\n raise ValueError(\n 'Expected reinterpreted_batch_ndims <= len(sample_shape + base_dist.batch_shape), actual {} vs {}'\n .format(reinterpreted_batch_ndims, len(sample_shape +\n base_dist.batch_shape)))\n self.base_dist = base_dist\n self.sample_shape = sample_shape\n self.reinterpreted_batch_ndims = reinterpreted_batch_ndims\n shape = sample_shape + base_dist.batch_shape + base_dist.event_shape\n batch_dim = len(shape) - reinterpreted_batch_ndims - len(base_dist.\n event_shape)\n batch_shape, event_shape = shape[:batch_dim], shape[batch_dim:]\n super(ReshapedDistribution, self).__init__(batch_shape, event_shape)\n\n def expand_by(self, sample_shape):\n base_dist = self.base_dist\n sample_shape = torch.Size(sample_shape) + self.sample_shape\n reinterpreted_batch_ndims = self.reinterpreted_batch_ndims\n return ReshapedDistribution(base_dist, sample_shape,\n reinterpreted_batch_ndims)\n\n def independent(self, reinterpreted_batch_ndims=None):\n if reinterpreted_batch_ndims is None:\n reinterpreted_batch_ndims = len(self.batch_shape)\n base_dist = self.base_dist\n sample_shape = self.sample_shape\n reinterpreted_batch_ndims = (self.reinterpreted_batch_ndims +\n reinterpreted_batch_ndims)\n return ReshapedDistribution(base_dist, sample_shape,\n reinterpreted_batch_ndims)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape + self.sample_shape)\n\n def rsample(self, sample_shape=torch.Size()):\n return self.base_dist.rsample(sample_shape + self.sample_shape)\n\n def log_prob(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -\n self.event_dim])\n return sum_rightmost(self.base_dist.log_prob(value), self.\n reinterpreted_batch_ndims).expand(shape)\n\n def score_parts(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() -\n self.event_dim])\n log_prob, score_function, entropy_term = self.base_dist.score_parts(\n value)\n log_prob = sum_rightmost(log_prob, self.reinterpreted_batch_ndims\n ).expand(shape)\n if not isinstance(score_function, numbers.Number):\n score_function = sum_rightmost(score_function, self.\n reinterpreted_batch_ndims).expand(shape)\n if not isinstance(entropy_term, numbers.Number):\n entropy_term = sum_rightmost(entropy_term, self.\n reinterpreted_batch_ndims).expand(shape)\n return ScoreParts(log_prob, score_function, entropy_term)\n\n def enumerate_support(self):\n if self.reinterpreted_batch_ndims:\n raise NotImplementedError(\n 'Pyro does not enumerate over cartesian products')\n samples = self.base_dist.enumerate_support()\n if not self.sample_shape:\n return samples\n enum_shape, base_shape = samples.shape[:1], samples.shape[1:]\n samples = samples.reshape(enum_shape + (1,) * len(self.sample_shape\n ) + base_shape)\n samples = samples.expand(enum_shape + self.sample_shape + base_shape)\n return samples\n\n @property\n def mean(self):\n return self.base_dist.mean.expand(self.batch_shape + self.event_shape)\n\n @property\n def variance(self):\n return self.base_dist.variance.expand(self.batch_shape + self.\n event_shape)\n\n\nclass MaskedDistribution(TorchDistribution):\n \"\"\"\n Masks a distribution by a zero-one tensor that is broadcastable to the\n distribution's :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n :param torch.Tensor mask: A zero-one valued float tensor.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_dist, mask):\n if broadcast_shape(mask.shape, base_dist.batch_shape\n ) != base_dist.batch_shape:\n raise ValueError(\n 'Expected mask.shape to be broadcastable to base_dist.batch_shape, actual {} vs {}'\n .format(mask.shape, base_dist.batch_shape))\n self.base_dist = base_dist\n self._mask = mask\n super(MaskedDistribution, self).__init__(base_dist.batch_shape,\n base_dist.event_shape)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape)\n\n def rsample(self, sample_shape=torch.Size()):\n return self.base_dist.rsample(sample_shape)\n\n def log_prob(self, value):\n return self.base_dist.log_prob(value) * self._mask\n\n def score_parts(self, value):\n return self.base_dist.score_parts(value) * self._mask\n\n def enumerate_support(self):\n return self.base_dist.enumerate_support()\n\n @property\n def mean(self):\n return self.base_dist.mean\n\n @property\n def variance(self):\n return self.base_dist.variance\n",
"step-5": "from __future__ import absolute_import, division, print_function\n\nimport numbers\n\nimport torch\nfrom torch.distributions import constraints\n\nfrom pyro.distributions.distribution import Distribution\nfrom pyro.distributions.score_parts import ScoreParts\nfrom pyro.distributions.util import broadcast_shape, sum_rightmost\n\n\nclass TorchDistributionMixin(Distribution):\n \"\"\"\n Mixin to provide Pyro compatibility for PyTorch distributions.\n\n You should instead use `TorchDistribution` for new distribution classes.\n\n This is mainly useful for wrapping existing PyTorch distributions for\n use in Pyro. Derived classes must first inherit from\n :class:`torch.distributions.distribution.Distribution` and then inherit\n from :class:`TorchDistributionMixin`.\n \"\"\"\n def __call__(self, sample_shape=torch.Size()):\n \"\"\"\n Samples a random value.\n\n This is reparameterized whenever possible, calling\n :meth:`~torch.distributions.distribution.Distribution.rsample` for\n reparameterized distributions and\n :meth:`~torch.distributions.distribution.Distribution.sample` for\n non-reparameterized distributions.\n\n :param sample_shape: the size of the iid batch to be drawn from the\n distribution.\n :type sample_shape: torch.Size\n :return: A random value or batch of random values (if parameters are\n batched). The shape of the result should be `self.shape()`.\n :rtype: torch.Tensor\n \"\"\"\n return self.rsample(sample_shape) if self.has_rsample else self.sample(sample_shape)\n\n @property\n def event_dim(self):\n \"\"\"\n :return: Number of dimensions of individual events.\n :rtype: int\n \"\"\"\n return len(self.event_shape)\n\n def shape(self, sample_shape=torch.Size()):\n \"\"\"\n The tensor shape of samples from this distribution.\n\n Samples are of shape::\n\n d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape\n\n :param sample_shape: the size of the iid batch to be drawn from the\n distribution.\n :type sample_shape: torch.Size\n :return: Tensor shape of samples.\n :rtype: torch.Size\n \"\"\"\n return sample_shape + self.batch_shape + self.event_shape\n\n def expand(self, batch_shape):\n \"\"\"\n Expands a distribution to a desired\n :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n Note that this is more general than :meth:`expand_by` because\n ``d.expand_by(sample_shape)`` can be reduced to\n ``d.expand(sample_shape + d.batch_shape)``.\n\n :param torch.Size batch_shape: The target ``batch_shape``. This must\n compatible with ``self.batch_shape`` similar to the requirements\n of :func:`torch.Tensor.expand`: the target ``batch_shape`` must\n be at least as long as ``self.batch_shape``, and for each\n non-singleton dim of ``self.batch_shape``, ``batch_shape`` must\n either agree or be set to ``-1``.\n :return: An expanded version of this distribution.\n :rtype: :class:`ReshapedDistribution`\n \"\"\"\n batch_shape = list(batch_shape)\n if len(batch_shape) < len(self.batch_shape):\n raise ValueError(\"Expected len(batch_shape) >= len(self.batch_shape), \"\n \"actual {} vs {}\".format(len(batch_shape), len(self.batch_shape)))\n # check sizes of existing dims\n for dim in range(-1, -1 - len(self.batch_shape), -1):\n if batch_shape[dim] == -1:\n batch_shape[dim] = self.batch_shape[dim]\n elif batch_shape[dim] != self.batch_shape[dim]:\n if self.batch_shape[dim] != 1:\n raise ValueError(\"Cannot broadcast dim {} of size {} to size {}\".format(\n dim, self.batch_shape[dim], batch_shape[dim]))\n else:\n raise NotImplementedError(\"https://github.com/uber/pyro/issues/1119\")\n sample_shape = batch_shape[:len(batch_shape) - len(self.batch_shape)]\n return self.expand_by(sample_shape)\n\n def expand_by(self, sample_shape):\n \"\"\"\n Expands a distribution by adding ``sample_shape`` to the left side of\n its :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n To expand internal dims of ``self.batch_shape`` from 1 to something\n larger, use :meth:`expand` instead.\n\n :param torch.Size sample_shape: The size of the iid batch to be drawn\n from the distribution.\n :return: An expanded version of this distribution.\n :rtype: :class:`ReshapedDistribution`\n \"\"\"\n return ReshapedDistribution(self, sample_shape=sample_shape)\n\n def reshape(self, sample_shape=None, extra_event_dims=None):\n raise Exception('''\n .reshape(sample_shape=s, extra_event_dims=n) was renamed and split into\n .expand_by(sample_shape=s).independent(reinterpreted_batch_ndims=n).''')\n\n def independent(self, reinterpreted_batch_ndims=None):\n \"\"\"\n Reinterprets the ``n`` rightmost dimensions of this distributions\n :attr:`~torch.distributions.distribution.Distribution.batch_shape`\n as event dims, adding them to the left side of\n :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n\n Example::\n\n >>> [d1.batch_shape, d1.event_shape]\n [torch.Size((2, 3)), torch.Size((4, 5))]\n >>> d2 = d1.independent(1)\n >>> [d2.batch_shape, d2.event_shape]\n [torch.Size((2,)), torch.Size((3, 4, 5))]\n >>> d3 = d1.independent(2)\n >>> [d3.batch_shape, d3.event_shape]\n [torch.Size(()), torch.Size((2, 3, 4, 5))]\n\n :param int reinterpreted_batch_ndims: The number of batch dimensions\n to reinterpret as event dimensions.\n :return: A reshaped version of this distribution.\n :rtype: :class:`ReshapedDistribution`\n \"\"\"\n if reinterpreted_batch_ndims is None:\n reinterpreted_batch_ndims = len(self.batch_shape)\n # TODO return pyro.distributions.torch.Independent(self, reinterpreted_batch_ndims)\n return ReshapedDistribution(self, reinterpreted_batch_ndims=reinterpreted_batch_ndims)\n\n def mask(self, mask):\n \"\"\"\n Masks a distribution by a zero-one tensor that is broadcastable to the\n distributions :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n :param torch.Tensor mask: A zero-one valued float tensor.\n :return: A masked copy of this distribution.\n :rtype: :class:`MaskedDistribution`\n \"\"\"\n return MaskedDistribution(self, mask)\n\n\nclass TorchDistribution(torch.distributions.Distribution, TorchDistributionMixin):\n \"\"\"\n Base class for PyTorch-compatible distributions with Pyro support.\n\n This should be the base class for almost all new Pyro distributions.\n\n .. note::\n\n Parameters and data should be of type :class:`~torch.Tensor`\n and all methods return type :class:`~torch.Tensor` unless\n otherwise noted.\n\n **Tensor Shapes**:\n\n TorchDistributions provide a method ``.shape()`` for the tensor shape of samples::\n\n x = d.sample(sample_shape)\n assert x.shape == d.shape(sample_shape)\n\n Pyro follows the same distribution shape semantics as PyTorch. It distinguishes\n between three different roles for tensor shapes of samples:\n\n - *sample shape* corresponds to the shape of the iid samples drawn from the distribution.\n This is taken as an argument by the distribution's `sample` method.\n - *batch shape* corresponds to non-identical (independent) parameterizations of\n the distribution, inferred from the distribution's parameter shapes. This is\n fixed for a distribution instance.\n - *event shape* corresponds to the event dimensions of the distribution, which\n is fixed for a distribution class. These are collapsed when we try to score\n a sample from the distribution via `d.log_prob(x)`.\n\n These shapes are related by the equation::\n\n assert d.shape(sample_shape) == sample_shape + d.batch_shape + d.event_shape\n\n Distributions provide a vectorized\n :meth`~torch.distributions.distribution.Distribution.log_prob` method that\n evaluates the log probability density of each event in a batch\n independently, returning a tensor of shape\n ``sample_shape + d.batch_shape``::\n\n x = d.sample(sample_shape)\n assert x.shape == d.shape(sample_shape)\n log_p = d.log_prob(x)\n assert log_p.shape == sample_shape + d.batch_shape\n\n **Implementing New Distributions**:\n\n Derived classes must implement the methods\n :meth:`~torch.distributions.distribution.Distribution.sample`\n (or :meth:`~torch.distributions.distribution.Distribution.rsample` if\n ``.has_rsample == True``) and\n :meth:`~torch.distributions.distribution.Distribution.log_prob`, and must\n implement the properties\n :attr:`~torch.distributions.distribution.Distribution.batch_shape`,\n and :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n Discrete classes may also implement the\n :meth:`~torch.distributions.distribution.Distribution.enumerate_support`\n method to improve gradient estimates and set\n ``.has_enumerate_support = True``.\n \"\"\"\n pass\n\n\nclass ReshapedDistribution(TorchDistribution):\n \"\"\"\n Reshapes a distribution by adding ``sample_shape`` to its total shape\n and adding ``reinterpreted_batch_ndims`` to its\n :attr:`~torch.distributions.distribution.Distribution.event_shape`.\n\n :param torch.Size sample_shape: The size of the iid batch to be drawn from\n the distribution.\n :param int reinterpreted_batch_ndims: The number of extra event dimensions that will\n be considered dependent.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_dist, sample_shape=torch.Size(), reinterpreted_batch_ndims=0):\n sample_shape = torch.Size(sample_shape)\n if reinterpreted_batch_ndims > len(sample_shape + base_dist.batch_shape):\n raise ValueError('Expected reinterpreted_batch_ndims <= len(sample_shape + base_dist.batch_shape), '\n 'actual {} vs {}'.format(reinterpreted_batch_ndims,\n len(sample_shape + base_dist.batch_shape)))\n self.base_dist = base_dist\n self.sample_shape = sample_shape\n self.reinterpreted_batch_ndims = reinterpreted_batch_ndims\n shape = sample_shape + base_dist.batch_shape + base_dist.event_shape\n batch_dim = len(shape) - reinterpreted_batch_ndims - len(base_dist.event_shape)\n batch_shape, event_shape = shape[:batch_dim], shape[batch_dim:]\n super(ReshapedDistribution, self).__init__(batch_shape, event_shape)\n\n def expand_by(self, sample_shape):\n base_dist = self.base_dist\n sample_shape = torch.Size(sample_shape) + self.sample_shape\n reinterpreted_batch_ndims = self.reinterpreted_batch_ndims\n return ReshapedDistribution(base_dist, sample_shape, reinterpreted_batch_ndims)\n\n def independent(self, reinterpreted_batch_ndims=None):\n if reinterpreted_batch_ndims is None:\n reinterpreted_batch_ndims = len(self.batch_shape)\n base_dist = self.base_dist\n sample_shape = self.sample_shape\n reinterpreted_batch_ndims = self.reinterpreted_batch_ndims + reinterpreted_batch_ndims\n return ReshapedDistribution(base_dist, sample_shape, reinterpreted_batch_ndims)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape + self.sample_shape)\n\n def rsample(self, sample_shape=torch.Size()):\n return self.base_dist.rsample(sample_shape + self.sample_shape)\n\n def log_prob(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() - self.event_dim])\n return sum_rightmost(self.base_dist.log_prob(value), self.reinterpreted_batch_ndims).expand(shape)\n\n def score_parts(self, value):\n shape = broadcast_shape(self.batch_shape, value.shape[:value.dim() - self.event_dim])\n log_prob, score_function, entropy_term = self.base_dist.score_parts(value)\n log_prob = sum_rightmost(log_prob, self.reinterpreted_batch_ndims).expand(shape)\n if not isinstance(score_function, numbers.Number):\n score_function = sum_rightmost(score_function, self.reinterpreted_batch_ndims).expand(shape)\n if not isinstance(entropy_term, numbers.Number):\n entropy_term = sum_rightmost(entropy_term, self.reinterpreted_batch_ndims).expand(shape)\n return ScoreParts(log_prob, score_function, entropy_term)\n\n def enumerate_support(self):\n if self.reinterpreted_batch_ndims:\n raise NotImplementedError(\"Pyro does not enumerate over cartesian products\")\n\n samples = self.base_dist.enumerate_support()\n if not self.sample_shape:\n return samples\n\n # Shift enumeration dim to correct location.\n enum_shape, base_shape = samples.shape[:1], samples.shape[1:]\n samples = samples.reshape(enum_shape + (1,) * len(self.sample_shape) + base_shape)\n samples = samples.expand(enum_shape + self.sample_shape + base_shape)\n return samples\n\n @property\n def mean(self):\n return self.base_dist.mean.expand(self.batch_shape + self.event_shape)\n\n @property\n def variance(self):\n return self.base_dist.variance.expand(self.batch_shape + self.event_shape)\n\n\nclass MaskedDistribution(TorchDistribution):\n \"\"\"\n Masks a distribution by a zero-one tensor that is broadcastable to the\n distribution's :attr:`~torch.distributions.distribution.Distribution.batch_shape`.\n\n :param torch.Tensor mask: A zero-one valued float tensor.\n \"\"\"\n arg_constraints = {}\n\n def __init__(self, base_dist, mask):\n if broadcast_shape(mask.shape, base_dist.batch_shape) != base_dist.batch_shape:\n raise ValueError(\"Expected mask.shape to be broadcastable to base_dist.batch_shape, \"\n \"actual {} vs {}\".format(mask.shape, base_dist.batch_shape))\n self.base_dist = base_dist\n self._mask = mask\n super(MaskedDistribution, self).__init__(base_dist.batch_shape, base_dist.event_shape)\n\n @property\n def has_rsample(self):\n return self.base_dist.has_rsample\n\n @property\n def has_enumerate_support(self):\n return self.base_dist.has_enumerate_support\n\n @constraints.dependent_property\n def support(self):\n return self.base_dist.support\n\n def sample(self, sample_shape=torch.Size()):\n return self.base_dist.sample(sample_shape)\n\n def rsample(self, sample_shape=torch.Size()):\n return self.base_dist.rsample(sample_shape)\n\n def log_prob(self, value):\n return self.base_dist.log_prob(value) * self._mask\n\n def score_parts(self, value):\n return self.base_dist.score_parts(value) * self._mask\n\n def enumerate_support(self):\n return self.base_dist.enumerate_support()\n\n @property\n def mean(self):\n return self.base_dist.mean\n\n @property\n def variance(self):\n return self.base_dist.variance\n",
"step-ids": [
27,
35,
36,
39,
44
]
}
|
[
27,
35,
36,
39,
44
] |
###########Seq_Profile_Blast_Parser_tool################################
import csv
import time
import re
import os
import sys
from collections import Counter
import operator
from fractions import *
import glob
import ntpath
from collections import defaultdict
path = open('config.txt').read().splitlines()[0].split('=')[-1]
rootDir = '.'
blast_files = []
curdir = os.getcwd()
curdir_up = '/'.join(curdir.split('/')[:-1])
for dirName, subdirList, fileList in os.walk(rootDir, topdown = False):
for fname in fileList:
if fname.startswith("S.A"):
fname = os.path.join(dirName, fname)
blast_files.append(fname)
print 'Module1'
print ' step 1.1 : Parsing the input Blastp files'
for blastfiles in blast_files[:]:
if 'Prot' not in blastfiles:
qids=[]
query_lengths = []
counter = 0
seqsas = []
file1 = open(blastfiles,'r').read()
queries = file1.split('Query=')
datas = queries[1:]
for item in datas[:]:
lines = item.split('\n')
qid = item.split()[0]
qids.append(qid)
for line in lines[:]:
if line.startswith('Length='):
query_lengths.append(int(line.split('=')[-1]))
break
for i,data in enumerate(datas[:]):
lines = data.split('\n')
record = False
for line in lines[:]:
if line.startswith(">") :
tmp = line.split(">")
tmp_name = tmp[1]
tmp_name1 = tmp_name.split("[")
tmp_hit = ''.join(tmp_name[0:-1])
if 'Staphylococcus' in line:
record = True
else:
record = False
if line.startswith(" Score") and record:
tmp = line.strip().split()
tmp_score_s = tmp[2]
tmp_score = float(tmp_score_s)
tmp_evalue = float(tmp[7].replace(",",""))
seqsas.append([qids[i],tmp_hit,tmp_score,tmp_evalue])
if line.startswith(" Identities")and counter <len(seqsas) and record:
tmp = line.strip().split()
tmp_id = tmp[3]
tmp_ids = tmp_id.replace('(','').replace(')','').replace('%','').replace(',','')
ids = int(tmp_ids)
tmp_po = tmp[7]
tmp_pos = tmp_po.replace('(','').replace(')','').replace('%','').replace(',','')
pos = int(tmp_pos)
tmp_gap = tmp[11]
tmp_gaps = tmp_gap.replace('(','').replace(')','').replace('%','').replace(',','')
gaps_percent = int(tmp_gaps)
gap_number = int(tmp[10].split('/')[0])
alignment_length = int(tmp[10].split('/')[-1])
coverage_percent = round(float((alignment_length - gap_number))/query_lengths[i] * 100, 2)
seqsas[counter].append(ids)
seqsas[counter].append(pos)
seqsas[counter].append(gaps_percent)
seqsas[counter].append(gap_number)
seqsas[counter].append(alignment_length)
seqsas[counter].append(coverage_percent)
counter+=1
path1 = '%s/RESULT/MODULE1/P1' % curdir_up
if not os.path.exists(path1):
os.makedirs(path1)
file_name = ntpath.basename('blast_out1%s' % blastfiles) + '.txt'
with open(os.path.join(path1,file_name),'w') as out1:
for item in seqsas[:]:
item = '\t'.join([str(x) for x in item])
out1.write('%s\n' %item)
out1.close()
else:
strsas = []
qids=[]
query_lengths = []
counter = 0
file2 = open(blastfiles,'r').read()
queries = file2.split('Query=')
datas = queries[1:]
for item in datas[:]:
lines = item.split('\n')
qid = item.split()[0]
qids.append(qid)
for line in lines[:]:
if line.startswith('Length='):
query_lengths.append(int(line.split('=')[-1]))
break
for i,data in enumerate(datas[:]):
lines = data.split('\n')
record = False
for line in lines[:]:
if line.startswith(">") :
tmp = line.split(">")
tmp_name = tmp[1]
tmp_hit = tmp_name.split("|")[0]
if line.startswith(" Score") :
tmp = line.strip().split()
tmp_score_s = tmp[2]
tmp_score = float(tmp_score_s)
tmp_evalue = float(tmp[7].replace(",",""))
strsas.append([qids[i],tmp_hit,tmp_score,tmp_evalue])
if line.startswith(" Identities") and counter < len(strsas):
tmp = line.strip().split()
tmp_id = tmp[3]
tmp_ids = tmp_id.replace('(','').replace(')','').replace('%','').replace(',','')
ids = int(tmp_ids)
tmp_po = tmp[7]
tmp_pos = tmp_po.replace('(','').replace(')','').replace('%','').replace(',','')
pos = int(tmp_pos)
tmp_gap = tmp[11]
tmp_gaps = tmp_gap.replace('(','').replace(')','').replace('%','').replace(',','')
gaps_percent = int(tmp_gaps)
gap_number_1 = Fraction(tmp[10])
gap_number = int(tmp[10].split('/')[0])
alignment_length = int(tmp[10].split('/')[-1])
coverage_percent = round(float((alignment_length - gap_number))/query_lengths[i] * 100, 2)
strsas[counter].append(ids)
strsas[counter].append(pos)
strsas[counter].append(gaps_percent)
strsas[counter].append(gap_number)
strsas[counter].append(alignment_length)
strsas[counter].append(coverage_percent)
counter +=1
path1 = '%s/RESULT/MODULE1/P1' %curdir_up
if not os.path.exists(path1):
os.makedirs(path1)
prot_file_name = ntpath.basename('prot_blast_out1%s' % blastfiles) + '.txt'
with open(os.path.join(path1,prot_file_name),'w') as out2:
for item in strsas[:]:
item = '\t'.join([str(x) for x in item])
out2.write('%s\n' %item)
out2.close()
def parser2():
os.chdir('%s/RESULT/MODULE1/P1' %curdir_up)
for file1 in glob.glob('*.txt'):
file_s = open(file1).readlines()
prepsas = []
for item in file_s[:]:
item = item.strip().split('\t')
hit = item[1]
e = float(item[3])
ids = int(item[4])
cov = float(item[9])
if e <=1e-10 and ids >= 35 and cov >= 75:
prepsas.append(item)
if len(item) < 10:
print 'not match'
prot_file_name_s = str(file1)
path2 = '%s/RESULT/MODULE1/P2' %curdir_up
if not os.path.exists(path2):
os.makedirs(path2)
with open(os.path.join(path2,prot_file_name_s),'w') as prepsas1:
for hits in prepsas[:]:
hits = '\t'.join([str(x) for x in hits])
prepsas1.write('%s\n' %hits)
prepsas1.close()
def parser3():
os.chdir('%s/RESULT/MODULE1/P2' %curdir_up)
for file2 in glob.glob('*.txt'):
file3 =open(file2).readlines()
d = {}
for filters in file3[:]:
key, value = filters.strip("\n").split("\t")[0],filters.strip("\n").split("\t")[1:]
key = key.strip('\t')
value = [str(x)[0:]for x in value]
if key not in d:
d[key] = [value]
elif key in d and len(d[key]) <= 250:
d[key].append(value)
prot_file_name_s = str(file2)
path2 = '%s/RESULT/MODULE1/P3' %curdir_up
if not os.path.exists(path2):
os.makedirs(path2)
with open(os.path.join(path2,prot_file_name_s),'w') as fp:
for item in d.keys()[:]:
line = item
hits = d[item]
for hit in hits:
hit2 = ','.join(hit)
line += '\t%s' % hit2
fp.write("%s\n" % line)
parser2()
parser3()
|
normal
|
{
"blob_id": "7eb8fe491a88bcfadf2a38eaa158b74b21514a1c",
"index": 8431,
"step-1": "###########Seq_Profile_Blast_Parser_tool################################\nimport csv\nimport time\nimport re\nimport os\nimport sys\nfrom collections import Counter\nimport operator\nfrom fractions import *\nimport glob\nimport ntpath\nfrom collections import defaultdict\n\n\n\npath = open('config.txt').read().splitlines()[0].split('=')[-1]\n\nrootDir = '.'\t\t\nblast_files = []\ncurdir = os.getcwd()\ncurdir_up = '/'.join(curdir.split('/')[:-1])\n\n\nfor dirName, subdirList, fileList in os.walk(rootDir, topdown = False):\n\t\n\tfor fname in fileList:\n\t\t\n\t\tif fname.startswith(\"S.A\"):\n\t\t\t\n\t\t\tfname = os.path.join(dirName, fname)\n\t\t\tblast_files.append(fname)\n\nprint 'Module1'\nprint '\t\tstep 1.1 : Parsing the input Blastp files'\t\n\n\nfor blastfiles in blast_files[:]:\n\n\tif 'Prot' not in blastfiles:\n\t\t\n\t\tqids=[]\n\t\tquery_lengths = []\n\t\tcounter = 0\n\t\tseqsas = []\n\t\tfile1 = open(blastfiles,'r').read()\n\t\tqueries = file1.split('Query=')\n\t\tdatas = queries[1:]\t\n\t\t\n\t\tfor item in datas[:]:\n\t\t\t\n\t\t\t\tlines = item.split('\\n')\n\t\t\t\tqid = item.split()[0]\n\t\t\t\tqids.append(qid)\n\t\t\t\t\n\t\t\t\tfor line in lines[:]:\n\t\t\t\t\t\n\t\t\t\t\tif line.startswith('Length='):\n\t\t\t\t\t\t\n\t\t\t\t\t\tquery_lengths.append(int(line.split('=')[-1]))\n\t\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\tfor i,data in enumerate(datas[:]):\n\t\t\t\n\t\t\tlines = data.split('\\n')\n\t\t\trecord = False\n\t\t\t\n\t\t\tfor line in lines[:]:\n\t\t\t\t\n\t\t\t\tif line.startswith(\">\") :\n\t\t\t\t\t\n\t\t\t\t\ttmp = line.split(\">\")\n\t\t\t\t\ttmp_name = tmp[1]\n\t\t\t\t\ttmp_name1 = tmp_name.split(\"[\")\t\n\t\t\t\t\ttmp_hit = ''.join(tmp_name[0:-1])\n\t\t\t\t\t\n\t\t\t\t\tif 'Staphylococcus' in line:\n\t\t\t\t\t\t\n\t\t\t\t\t\trecord = True\n\t\t\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\t\n\t\t\t\t\t\trecord = False\n\t\t\t\t\t\t\n\t\t\t\tif line.startswith(\" Score\") and record:\n\t\t\t\t\t\n\t\t\t\t\ttmp = line.strip().split()\n\t\t\t\t\ttmp_score_s = tmp[2]\n\t\t\t\t\ttmp_score = float(tmp_score_s)\n\t\t\t\t\ttmp_evalue = float(tmp[7].replace(\",\",\"\"))\n\t\t\t\t\tseqsas.append([qids[i],tmp_hit,tmp_score,tmp_evalue])\n\t\t\n\t\t\t\tif line.startswith(\" Identities\")and counter <len(seqsas) and record:\n\t\t\t\t\t\n\t\t\t\t\ttmp = line.strip().split() \n\t\t\t\t\ttmp_id = tmp[3]\n\t\t\t\t\ttmp_ids = tmp_id.replace('(','').replace(')','').replace('%','').replace(',','')\n\t\t\t\t\tids = int(tmp_ids)\n\t\t\t\t\ttmp_po = tmp[7]\n\t\t\t\t\ttmp_pos = tmp_po.replace('(','').replace(')','').replace('%','').replace(',','')\n\t\t\t\t\tpos = int(tmp_pos)\n\t\t\t\t\ttmp_gap = tmp[11]\n\t\t\t\t\ttmp_gaps = tmp_gap.replace('(','').replace(')','').replace('%','').replace(',','')\n\t\t\t\t\tgaps_percent = int(tmp_gaps)\n\t\t\t\t\tgap_number = int(tmp[10].split('/')[0])\n\t\t\t\t\talignment_length = int(tmp[10].split('/')[-1])\n\t\t\t\t\tcoverage_percent = round(float((alignment_length - gap_number))/query_lengths[i] * 100, 2)\n\t\t\t\t\tseqsas[counter].append(ids)\n\t\t\t\t\tseqsas[counter].append(pos)\n\t\t\t\t\tseqsas[counter].append(gaps_percent)\n\t\t\t\t\tseqsas[counter].append(gap_number)\n\t\t\t\t\tseqsas[counter].append(alignment_length)\n\t\t\t\t\tseqsas[counter].append(coverage_percent)\n\t\t\t\t\tcounter+=1\n\t\t\t\t\t\n\t\tpath1 = '%s/RESULT/MODULE1/P1' % curdir_up\n\n\t\tif not os.path.exists(path1):\n\t\t\t\n\t\t\tos.makedirs(path1)\n\t\tfile_name = ntpath.basename('blast_out1%s' % blastfiles) + '.txt'\n\t\t\n\t\twith open(os.path.join(path1,file_name),'w') as out1:\n\t\t\t\n\t\t\tfor item in seqsas[:]:\n\t\t\t\t\n\t\t\t\titem = '\\t'.join([str(x) for x in item])\n\t\t\t\tout1.write('%s\\n' %item)\n\t\t\t\t\n\t\t\tout1.close()\n\t\t\t\t\t\n\telse:\n\t\t\n\t\tstrsas = []\n\t\tqids=[]\n\t\tquery_lengths = []\n\t\tcounter = 0\n\t\tfile2 = open(blastfiles,'r').read()\n\t\tqueries = file2.split('Query=')\n\t\tdatas = queries[1:]\t\n\t\t\n\t\tfor item in datas[:]:\n\t\t\t\n\t\t\t\tlines = item.split('\\n')\n\t\t\t\tqid = item.split()[0]\n\t\t\t\tqids.append(qid)\n\t\t\t\t\n\t\t\t\tfor line in lines[:]:\n\t\t\t\t\t\n\t\t\t\t\tif line.startswith('Length='):\n\t\t\t\t\t\t\n\t\t\t\t\t\tquery_lengths.append(int(line.split('=')[-1]))\n\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\n\t\tfor i,data in enumerate(datas[:]):\n\t\t\t\n\t\t\tlines = data.split('\\n')\n\t\t\trecord = False\n\t\t\t\n\t\t\tfor line in lines[:]:\n\t\t\t\t\n\t\t\t\tif line.startswith(\">\") :\n\t\t\t\t\t\n\t\t\t\t\ttmp = line.split(\">\")\n\t\t\t\t\ttmp_name = tmp[1]\n\t\t\t\t\ttmp_hit = tmp_name.split(\"|\")[0]\n\t\t\t\n\t\t\t\t\t\n\t\t\t\tif line.startswith(\" Score\") :\n\t\t\t\t\t\n\t\t\t\t\ttmp = line.strip().split()\n\t\t\t\t\ttmp_score_s = tmp[2]\n\t\t\t\t\ttmp_score = float(tmp_score_s)\n\t\t\t\t\ttmp_evalue = float(tmp[7].replace(\",\",\"\"))\n\t\t\t\t\tstrsas.append([qids[i],tmp_hit,tmp_score,tmp_evalue])\n\t\t\n\t\t\t\tif line.startswith(\" Identities\") and counter < len(strsas):\n\t\t\t\t\t\n\t\t\t\t\ttmp = line.strip().split()\n\t\t\t\t\ttmp_id = tmp[3]\n\t\t\t\t\ttmp_ids = tmp_id.replace('(','').replace(')','').replace('%','').replace(',','')\n\t\t\t\t\tids = int(tmp_ids)\n\t\t\t\t\ttmp_po = tmp[7]\n\t\t\t\t\ttmp_pos = tmp_po.replace('(','').replace(')','').replace('%','').replace(',','')\n\t\t\t\t\tpos = int(tmp_pos)\n\t\t\t\t\ttmp_gap = tmp[11]\n\t\t\t\t\ttmp_gaps = tmp_gap.replace('(','').replace(')','').replace('%','').replace(',','')\n\t\t\t\t\tgaps_percent = int(tmp_gaps)\n\t\t\t\t\tgap_number_1 = Fraction(tmp[10])\n\t\t\t\t\tgap_number = int(tmp[10].split('/')[0])\n\t\t\t\t\talignment_length = int(tmp[10].split('/')[-1])\n\t\t\t\t\tcoverage_percent = round(float((alignment_length - gap_number))/query_lengths[i] * 100, 2)\n\t\t\t\t\tstrsas[counter].append(ids)\n\t\t\t\t\tstrsas[counter].append(pos)\n\t\t\t\t\tstrsas[counter].append(gaps_percent)\n\t\t\t\t\tstrsas[counter].append(gap_number)\n\t\t\t\t\tstrsas[counter].append(alignment_length)\n\t\t\t\t\tstrsas[counter].append(coverage_percent)\n\t\t\t\t\tcounter +=1\n\t\t\t\t\t\n\t\tpath1 = '%s/RESULT/MODULE1/P1' %curdir_up\n\t\t\n\t\tif not os.path.exists(path1):\n\t\t\t\n\t\t\tos.makedirs(path1)\n\t\tprot_file_name = ntpath.basename('prot_blast_out1%s' % blastfiles) + '.txt'\n\t\t\n\t\twith open(os.path.join(path1,prot_file_name),'w') as out2:\n\t\t\t\n\t\t\tfor item in strsas[:]:\n\t\t\t\t\n\t\t\t\titem = '\\t'.join([str(x) for x in item])\n\t\t\t\tout2.write('%s\\n' %item)\n\t\t\t\t\n\t\t\tout2.close()\n\t\t\ndef parser2():\n\t\n\t\tos.chdir('%s/RESULT/MODULE1/P1' %curdir_up)\n\t\t\n\t\tfor file1 in glob.glob('*.txt'):\n\t\t\tfile_s = open(file1).readlines()\n\t\t\tprepsas = []\n\t\t\t\n\t\t\tfor item in file_s[:]:\n\t\t\t\t\n\t\t\t\titem = item.strip().split('\\t')\n\t\t\t\thit = item[1]\n\t\t\t\te = float(item[3])\n\t\t\t\tids = int(item[4])\n\t\t\t\tcov = float(item[9])\n\t\t\t\tif e <=1e-10 and ids >= 35 and cov >= 75:\n\t\t\t\t\t\n\t\t\t\t\tprepsas.append(item)\n\t\t\t\t\t\t\n\t\t\t\tif len(item) < 10:\n\t\t\t\t\t\n\t\t\t\t\tprint 'not match'\n\t\t\t\n\t\t\tprot_file_name_s = str(file1) \n\t\t\t\n\t\t\tpath2 = '%s/RESULT/MODULE1/P2' %curdir_up\n\t\t\t\n\t\t\tif not os.path.exists(path2):\n\t\t\t\t\n\t\t\t\tos.makedirs(path2)\n\t\t\n\t\t\twith open(os.path.join(path2,prot_file_name_s),'w') as prepsas1:\n\t\t\t\t\n\t\t\t\tfor hits in prepsas[:]:\n\t\t\t\t\t\n\t\t\t\t\thits = '\\t'.join([str(x) for x in hits])\n\t\t\t\t\tprepsas1.write('%s\\n' %hits)\n\t\t\t\t\t\n\t\t\t\tprepsas1.close()\n\t\t\n\t\t\t\t\ndef parser3():\n\t\n\tos.chdir('%s/RESULT/MODULE1/P2' %curdir_up)\n\t\n\tfor file2 in glob.glob('*.txt'):\n\t\t\n\t\tfile3 =open(file2).readlines()\n\t\td = {}\n\t\t\n\t\tfor filters in file3[:]:\n\t\t\t\n\t\t\tkey, value = filters.strip(\"\\n\").split(\"\\t\")[0],filters.strip(\"\\n\").split(\"\\t\")[1:]\n\t\t\tkey = key.strip('\\t')\n\t\t\tvalue = [str(x)[0:]for x in value]\n\t\t\t\n\t\t\tif key not in d:\n\t\t\t\t\n\t\t\t\td[key] = [value]\n\t\t\t\t\n\t\t\telif key in d and len(d[key]) <= 250:\n\t\t\t\t\n\t\t\t\td[key].append(value)\n\t\t\t\t\n\t\tprot_file_name_s = str(file2) \n\t\t\n\t\tpath2 = '%s/RESULT/MODULE1/P3' %curdir_up\n\t\t\n\t\tif not os.path.exists(path2):\n\t\t\t\n\t\t\tos.makedirs(path2)\t\n\t\t\t\t\n\t\twith open(os.path.join(path2,prot_file_name_s),'w') as fp:\n\t\t\t\n\t\t\tfor item in d.keys()[:]:\n\t\t\t\t\n\t\t\t\tline = item\n\t\t\t\thits = d[item]\n\n\t\t\t\tfor hit in hits:\n\t\t\t\t\t\n\t\t\t\t\thit2 = ','.join(hit)\n\t\t\t\t\tline += '\\t%s' % hit2\n\t\t\t\t\t\n\t\t\t\tfp.write(\"%s\\n\" % line)\n\n\t\nparser2()\nparser3()\n\t\n\t\n\t\n\t\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
'''It can be seen that the number, 125874, and its double, 251748, contain
exactly the same digits, but in a different order.
Find the smallest positive integer, x, such that 2x, 3x, 4x, 5x, and 6x, contain
the same digits.
'''
import common
import itertools
def digits(x):
return set(int(d) for d in str(x))
common.assertEquals(digits(125874), digits(251748))
def same_digits_as_multiples(x, multiples):
d = digits(x)
# duplicate digits are implicitly forbidden
if len(d) != len(str(x)): return False
for i in multiples:
if d != digits(i*x):
return False
return True
common.assertEquals(True, same_digits_as_multiples(125874, [2]))
common.assertEquals(False, same_digits_as_multiples(123456, [2]))
def euler052():
multiples = range(2,7)
for i in itertools.count(10**5): # solution must have at least 6 digits
if same_digits_as_multiples(i, multiples):
return i
common.submit(euler052(), expected=142857)
|
normal
|
{
"blob_id": "2ec8b9a92f8dd42faf99f0cd569ebf356e12c1d6",
"index": 8042,
"step-1": "<mask token>\n\n\ndef digits(x):\n return set(int(d) for d in str(x))\n\n\n<mask token>\n\n\ndef euler052():\n multiples = range(2, 7)\n for i in itertools.count(10 ** 5):\n if same_digits_as_multiples(i, multiples):\n return i\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef digits(x):\n return set(int(d) for d in str(x))\n\n\n<mask token>\n\n\ndef same_digits_as_multiples(x, multiples):\n d = digits(x)\n if len(d) != len(str(x)):\n return False\n for i in multiples:\n if d != digits(i * x):\n return False\n return True\n\n\n<mask token>\n\n\ndef euler052():\n multiples = range(2, 7)\n for i in itertools.count(10 ** 5):\n if same_digits_as_multiples(i, multiples):\n return i\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef digits(x):\n return set(int(d) for d in str(x))\n\n\ncommon.assertEquals(digits(125874), digits(251748))\n\n\ndef same_digits_as_multiples(x, multiples):\n d = digits(x)\n if len(d) != len(str(x)):\n return False\n for i in multiples:\n if d != digits(i * x):\n return False\n return True\n\n\ncommon.assertEquals(True, same_digits_as_multiples(125874, [2]))\ncommon.assertEquals(False, same_digits_as_multiples(123456, [2]))\n\n\ndef euler052():\n multiples = range(2, 7)\n for i in itertools.count(10 ** 5):\n if same_digits_as_multiples(i, multiples):\n return i\n\n\ncommon.submit(euler052(), expected=142857)\n",
"step-4": "<mask token>\nimport common\nimport itertools\n\n\ndef digits(x):\n return set(int(d) for d in str(x))\n\n\ncommon.assertEquals(digits(125874), digits(251748))\n\n\ndef same_digits_as_multiples(x, multiples):\n d = digits(x)\n if len(d) != len(str(x)):\n return False\n for i in multiples:\n if d != digits(i * x):\n return False\n return True\n\n\ncommon.assertEquals(True, same_digits_as_multiples(125874, [2]))\ncommon.assertEquals(False, same_digits_as_multiples(123456, [2]))\n\n\ndef euler052():\n multiples = range(2, 7)\n for i in itertools.count(10 ** 5):\n if same_digits_as_multiples(i, multiples):\n return i\n\n\ncommon.submit(euler052(), expected=142857)\n",
"step-5": "'''It can be seen that the number, 125874, and its double, 251748, contain\nexactly the same digits, but in a different order.\n\nFind the smallest positive integer, x, such that 2x, 3x, 4x, 5x, and 6x, contain\nthe same digits.\n'''\n\nimport common\nimport itertools\n\ndef digits(x):\n return set(int(d) for d in str(x))\n\ncommon.assertEquals(digits(125874), digits(251748))\n\ndef same_digits_as_multiples(x, multiples):\n d = digits(x)\n\n # duplicate digits are implicitly forbidden\n if len(d) != len(str(x)): return False\n\n for i in multiples:\n if d != digits(i*x):\n return False\n return True\n\ncommon.assertEquals(True, same_digits_as_multiples(125874, [2]))\ncommon.assertEquals(False, same_digits_as_multiples(123456, [2]))\n\ndef euler052():\n multiples = range(2,7)\n for i in itertools.count(10**5): # solution must have at least 6 digits\n if same_digits_as_multiples(i, multiples):\n return i\n\ncommon.submit(euler052(), expected=142857)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
n,m=map(int,input().split())
l=list(map(int,input().split()))
t=0
result=[0 for i in range(0,n)]
result.insert(0,1)
while(t<m):
#print(t)
for i in range(l[t],n+1):
result[i]=result[i]+result[i-l[t]]
t=t+1
print(result[-1])
0 1 2 3 4
1 [1,1,1,1,1]
2 [1 1 2 2 3]
3 [1 1 2 3 4]
|
normal
|
{
"blob_id": "56640454efce16e0c873d557ac130775a4a2ad8d",
"index": 6734,
"step-1": "n,m=map(int,input().split())\r\nl=list(map(int,input().split()))\r\nt=0\r\nresult=[0 for i in range(0,n)]\r\nresult.insert(0,1)\r\nwhile(t<m):\r\n #print(t)\r\n for i in range(l[t],n+1):\r\n result[i]=result[i]+result[i-l[t]] \r\n t=t+1\r\nprint(result[-1])\r\n \r\n 0 1 2 3 4\r\n1 [1,1,1,1,1] \r\n2 [1 1 2 2 3]\r\n3 [1 1 2 3 4]\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def cloud(user_id):
wall = tools.get_all('wall.get', 100, {'owner_id': user_id})['items']
wall = list(filter(lambda x: datetime.fromtimestamp(x['date']).year ==
current_year, wall))
tokenizer = RegexpTokenizer('[а-яА-ЯёЁ]+')
morph = pymorphy2.MorphAnalyzer()
def transform(sentence):
return map(lambda x: morph.parse(x)[0].normal_form.replace('ё', 'е'
), filter(lambda x: len(x) > 2 and 'NOUN' in morph.parse(x)[0].
tag, tokenizer.tokenize(sentence.replace('\xa0', ' '))))
top_words = []
for post in wall:
if 'text' in post:
top_words.extend(transform(post['text']))
if 'copy_history' in post:
for copy in post['copy_history']:
if 'text' in copy:
top_words.extend(transform(copy['text']))
top_words = list(filter(lambda x: x.lower() not in remove_words, top_words)
)
if not top_words:
return
def color_func(word, font_size, position, orientation, random_state=
None, **kwargs):
return 'rgb(0, 0, 0)'
sw = stopwords.words('russian') + stopwords.words('english') + remove_words
wordcloud = WordCloud(max_words=50, max_font_size=500, background_color
='white', margin=5, width=1000, height=1000, stopwords=sw,
prefer_horizontal=0.7, font_path='font.ttf').generate(' '.join(
top_words).lower())
wordcloud = wordcloud.recolor(color_func=color_func, random_state=3
).to_image()
img_arr = io.BytesIO()
wordcloud.save(img_arr, format='PNG')
img_arr.seek(0)
return img_arr, wall, top_words
<|reserved_special_token_0|>
def worker(q, old=False):
while True:
item = q.get()
try:
item[0](*item[1], **item[2])
except Exception:
pass
q.task_done()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
matplotlib.use('Agg')
print('Connecting to VK...', end=' ')
<|reserved_special_token_0|>
print('Done')
print('Connecting to MongoDB...', end=' ')
<|reserved_special_token_0|>
print('Done')
<|reserved_special_token_0|>
def cloud(user_id):
wall = tools.get_all('wall.get', 100, {'owner_id': user_id})['items']
wall = list(filter(lambda x: datetime.fromtimestamp(x['date']).year ==
current_year, wall))
tokenizer = RegexpTokenizer('[а-яА-ЯёЁ]+')
morph = pymorphy2.MorphAnalyzer()
def transform(sentence):
return map(lambda x: morph.parse(x)[0].normal_form.replace('ё', 'е'
), filter(lambda x: len(x) > 2 and 'NOUN' in morph.parse(x)[0].
tag, tokenizer.tokenize(sentence.replace('\xa0', ' '))))
top_words = []
for post in wall:
if 'text' in post:
top_words.extend(transform(post['text']))
if 'copy_history' in post:
for copy in post['copy_history']:
if 'text' in copy:
top_words.extend(transform(copy['text']))
top_words = list(filter(lambda x: x.lower() not in remove_words, top_words)
)
if not top_words:
return
def color_func(word, font_size, position, orientation, random_state=
None, **kwargs):
return 'rgb(0, 0, 0)'
sw = stopwords.words('russian') + stopwords.words('english') + remove_words
wordcloud = WordCloud(max_words=50, max_font_size=500, background_color
='white', margin=5, width=1000, height=1000, stopwords=sw,
prefer_horizontal=0.7, font_path='font.ttf').generate(' '.join(
top_words).lower())
wordcloud = wordcloud.recolor(color_func=color_func, random_state=3
).to_image()
img_arr = io.BytesIO()
wordcloud.save(img_arr, format='PNG')
img_arr.seek(0)
return img_arr, wall, top_words
def send_cloud(user_id, message, send=True):
if user_id in processing:
if send:
vk_group.messages.send(user_id=user_id, random_id=random.
randint(0, 99999999), message=
f'Подожди, я составляю твое облако тегов')
return
if message.lower() != 'облако':
if send:
vk_group.messages.send(user_id=user_id, random_id=random.
randint(0, 99999999), message=
f'Если ты хочешь получить свое облако тегов за {current_year} год, отправь мне слово "облако" без кавычек 🙃'
)
return
processing.append(user_id)
print('Generating cloud for', user_id)
try:
if len(vk.wall.get(owner_id=user_id, count=1)['items']) == 0:
if send:
vk_group.messages.send(user_id=user_id, random_id=random.
randint(0, 99999999), message=
'Похоже, у тебя недостаточно записей на стене для составления облака тегов☹️'
)
processing.remove(user_id)
print('Removed (1) cloud from processing for', user_id)
time.sleep(5)
return
if send:
vk_group.messages.send(user_id=user_id, random_id=random.
randint(0, 99999999), message=
f'Посмотрим, что тебя интересовало в {current_year} году больше всего 😋'
)
user = vk.users.get(user_ids=user_id)[0]
user_id = user['id']
name = user['first_name'] + ' ' + user['last_name']
clouded = cloud(user_id)
if not clouded:
if send:
vk_group.messages.send(user_id=user_id, random_id=random.
randint(0, 99999999), message=
'Похоже, у тебя недостаточно записей на стене для составления облака тегов ☹️'
)
processing.remove(user_id)
print('Removed (2) cloud from processing for', user_id)
time.sleep(5)
return
clouded, wall, top_words = clouded
photo = vk_upload.photo(clouded, album_id=config.album_id, group_id
=config.group_id)[0]
if send:
vk_group.messages.send(user_id=user_id, random_id=random.
randint(0, 99999999), message=
'А вот и твое облако тегов! 🌍', attachment='photo{}_{}'.
format(photo['owner_id'], photo['id']))
vk_group.messages.send(user_id=user_id, random_id=random.
randint(0, 99999999), message=
'Не забудь поделиться с друзьями 😉')
post_id = None
if len(top_words) > 100:
try:
post_id = vk.wall.post(owner_id='-{}'.format(config.
group_id), from_group=1, message=
'Облако тегов для *id{}({})'.format(user_id, name),
attachments='photo{}_{}'.format(photo['owner_id'],
photo['id']))['post_id']
except Exception as e:
processing.remove(user_id)
print(e)
if send:
vk_group.messages.send(user_id=user_id, random_id=
random.randint(0, 99999999), message=
'Похоже, я превысил лимит количества постов на сегодня 😭'
)
vk_group.messages.send(user_id=user_id, random_id=
random.randint(0, 99999999), message=
'Создай новое облако завтра, и я выложу его на стену группы 😎'
)
print('Removed (3) cloud from processing for', user_id)
if post_id:
if send:
vk_group.messages.send(user_id=user_id, random_id=random.
randint(0, 99999999), attachment='wall{}_{}'.format(
photo['owner_id'], post_id))
processing.remove(user_id)
print('Finished cloud for', user_id)
except Exception as e:
processing.remove(user_id)
print('Finished cloud for', user_id, 'with error')
raise e
def worker(q, old=False):
while True:
item = q.get()
try:
item[0](*item[1], **item[2])
except Exception:
pass
q.task_done()
if __name__ == '__main__':
q = Queue()
for i in range(10):
t = Thread(target=worker, args=(q,))
t.setDaemon(True)
t.start()
print('Initializing longpoll connection...', end=' ')
longpoll = VkLongPoll(vk_group_session)
print('Done')
for event in longpoll.listen():
if (event.to_me and event.type == VkEventType.MESSAGE_NEW and event
.user_id not in processing):
print(event.user_id, event.text)
q.put((send_cloud, (event.user_id, event.text), {}))
q.join()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
matplotlib.use('Agg')
print('Connecting to VK...', end=' ')
vk_group_session = vk_api.VkApi(token=config.vk_community_token)
vk_group = vk_group_session.get_api()
vk_session = vk_api.VkApi(token=config.vk_user_token)
tools = vk_api.VkTools(vk_session)
vk = vk_session.get_api()
vk_upload = vk_api.VkUpload(vk_session)
print('Done')
print('Connecting to MongoDB...', end=' ')
collection = MongoClient(config.mongo_host)[config.mongo_db]['photos']
print('Done')
remove_words = ['год']
DIR = os.path.dirname(__file__)
processing = []
current_year = datetime.now().year - 1 if datetime.now(
).month != 12 else datetime.now().year
def cloud(user_id):
wall = tools.get_all('wall.get', 100, {'owner_id': user_id})['items']
wall = list(filter(lambda x: datetime.fromtimestamp(x['date']).year ==
current_year, wall))
tokenizer = RegexpTokenizer('[а-яА-ЯёЁ]+')
morph = pymorphy2.MorphAnalyzer()
def transform(sentence):
return map(lambda x: morph.parse(x)[0].normal_form.replace('ё', 'е'
), filter(lambda x: len(x) > 2 and 'NOUN' in morph.parse(x)[0].
tag, tokenizer.tokenize(sentence.replace('\xa0', ' '))))
top_words = []
for post in wall:
if 'text' in post:
top_words.extend(transform(post['text']))
if 'copy_history' in post:
for copy in post['copy_history']:
if 'text' in copy:
top_words.extend(transform(copy['text']))
top_words = list(filter(lambda x: x.lower() not in remove_words, top_words)
)
if not top_words:
return
def color_func(word, font_size, position, orientation, random_state=
None, **kwargs):
return 'rgb(0, 0, 0)'
sw = stopwords.words('russian') + stopwords.words('english') + remove_words
wordcloud = WordCloud(max_words=50, max_font_size=500, background_color
='white', margin=5, width=1000, height=1000, stopwords=sw,
prefer_horizontal=0.7, font_path='font.ttf').generate(' '.join(
top_words).lower())
wordcloud = wordcloud.recolor(color_func=color_func, random_state=3
).to_image()
img_arr = io.BytesIO()
wordcloud.save(img_arr, format='PNG')
img_arr.seek(0)
return img_arr, wall, top_words
def send_cloud(user_id, message, send=True):
if user_id in processing:
if send:
vk_group.messages.send(user_id=user_id, random_id=random.
randint(0, 99999999), message=
f'Подожди, я составляю твое облако тегов')
return
if message.lower() != 'облако':
if send:
vk_group.messages.send(user_id=user_id, random_id=random.
randint(0, 99999999), message=
f'Если ты хочешь получить свое облако тегов за {current_year} год, отправь мне слово "облако" без кавычек 🙃'
)
return
processing.append(user_id)
print('Generating cloud for', user_id)
try:
if len(vk.wall.get(owner_id=user_id, count=1)['items']) == 0:
if send:
vk_group.messages.send(user_id=user_id, random_id=random.
randint(0, 99999999), message=
'Похоже, у тебя недостаточно записей на стене для составления облака тегов☹️'
)
processing.remove(user_id)
print('Removed (1) cloud from processing for', user_id)
time.sleep(5)
return
if send:
vk_group.messages.send(user_id=user_id, random_id=random.
randint(0, 99999999), message=
f'Посмотрим, что тебя интересовало в {current_year} году больше всего 😋'
)
user = vk.users.get(user_ids=user_id)[0]
user_id = user['id']
name = user['first_name'] + ' ' + user['last_name']
clouded = cloud(user_id)
if not clouded:
if send:
vk_group.messages.send(user_id=user_id, random_id=random.
randint(0, 99999999), message=
'Похоже, у тебя недостаточно записей на стене для составления облака тегов ☹️'
)
processing.remove(user_id)
print('Removed (2) cloud from processing for', user_id)
time.sleep(5)
return
clouded, wall, top_words = clouded
photo = vk_upload.photo(clouded, album_id=config.album_id, group_id
=config.group_id)[0]
if send:
vk_group.messages.send(user_id=user_id, random_id=random.
randint(0, 99999999), message=
'А вот и твое облако тегов! 🌍', attachment='photo{}_{}'.
format(photo['owner_id'], photo['id']))
vk_group.messages.send(user_id=user_id, random_id=random.
randint(0, 99999999), message=
'Не забудь поделиться с друзьями 😉')
post_id = None
if len(top_words) > 100:
try:
post_id = vk.wall.post(owner_id='-{}'.format(config.
group_id), from_group=1, message=
'Облако тегов для *id{}({})'.format(user_id, name),
attachments='photo{}_{}'.format(photo['owner_id'],
photo['id']))['post_id']
except Exception as e:
processing.remove(user_id)
print(e)
if send:
vk_group.messages.send(user_id=user_id, random_id=
random.randint(0, 99999999), message=
'Похоже, я превысил лимит количества постов на сегодня 😭'
)
vk_group.messages.send(user_id=user_id, random_id=
random.randint(0, 99999999), message=
'Создай новое облако завтра, и я выложу его на стену группы 😎'
)
print('Removed (3) cloud from processing for', user_id)
if post_id:
if send:
vk_group.messages.send(user_id=user_id, random_id=random.
randint(0, 99999999), attachment='wall{}_{}'.format(
photo['owner_id'], post_id))
processing.remove(user_id)
print('Finished cloud for', user_id)
except Exception as e:
processing.remove(user_id)
print('Finished cloud for', user_id, 'with error')
raise e
def worker(q, old=False):
while True:
item = q.get()
try:
item[0](*item[1], **item[2])
except Exception:
pass
q.task_done()
if __name__ == '__main__':
q = Queue()
for i in range(10):
t = Thread(target=worker, args=(q,))
t.setDaemon(True)
t.start()
print('Initializing longpoll connection...', end=' ')
longpoll = VkLongPoll(vk_group_session)
print('Done')
for event in longpoll.listen():
if (event.to_me and event.type == VkEventType.MESSAGE_NEW and event
.user_id not in processing):
print(event.user_id, event.text)
q.put((send_cloud, (event.user_id, event.text), {}))
q.join()
<|reserved_special_token_1|>
import _thread
import os
from queue import Queue
from threading import Thread
import random
import io
import vk_api
from vk_api.longpoll import VkLongPoll, VkEventType
from datetime import datetime, timedelta
import time
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from wordcloud import WordCloud
import pymorphy2
from pymongo import MongoClient
import config
import matplotlib
matplotlib.use('Agg')
print('Connecting to VK...', end=' ')
vk_group_session = vk_api.VkApi(token=config.vk_community_token)
vk_group = vk_group_session.get_api()
vk_session = vk_api.VkApi(token=config.vk_user_token)
tools = vk_api.VkTools(vk_session)
vk = vk_session.get_api()
vk_upload = vk_api.VkUpload(vk_session)
print('Done')
print('Connecting to MongoDB...', end=' ')
collection = MongoClient(config.mongo_host)[config.mongo_db]['photos']
print('Done')
remove_words = ['год']
DIR = os.path.dirname(__file__)
processing = []
current_year = datetime.now().year - 1 if datetime.now(
).month != 12 else datetime.now().year
def cloud(user_id):
wall = tools.get_all('wall.get', 100, {'owner_id': user_id})['items']
wall = list(filter(lambda x: datetime.fromtimestamp(x['date']).year ==
current_year, wall))
tokenizer = RegexpTokenizer('[а-яА-ЯёЁ]+')
morph = pymorphy2.MorphAnalyzer()
def transform(sentence):
return map(lambda x: morph.parse(x)[0].normal_form.replace('ё', 'е'
), filter(lambda x: len(x) > 2 and 'NOUN' in morph.parse(x)[0].
tag, tokenizer.tokenize(sentence.replace('\xa0', ' '))))
top_words = []
for post in wall:
if 'text' in post:
top_words.extend(transform(post['text']))
if 'copy_history' in post:
for copy in post['copy_history']:
if 'text' in copy:
top_words.extend(transform(copy['text']))
top_words = list(filter(lambda x: x.lower() not in remove_words, top_words)
)
if not top_words:
return
def color_func(word, font_size, position, orientation, random_state=
None, **kwargs):
return 'rgb(0, 0, 0)'
sw = stopwords.words('russian') + stopwords.words('english') + remove_words
wordcloud = WordCloud(max_words=50, max_font_size=500, background_color
='white', margin=5, width=1000, height=1000, stopwords=sw,
prefer_horizontal=0.7, font_path='font.ttf').generate(' '.join(
top_words).lower())
wordcloud = wordcloud.recolor(color_func=color_func, random_state=3
).to_image()
img_arr = io.BytesIO()
wordcloud.save(img_arr, format='PNG')
img_arr.seek(0)
return img_arr, wall, top_words
def send_cloud(user_id, message, send=True):
if user_id in processing:
if send:
vk_group.messages.send(user_id=user_id, random_id=random.
randint(0, 99999999), message=
f'Подожди, я составляю твое облако тегов')
return
if message.lower() != 'облако':
if send:
vk_group.messages.send(user_id=user_id, random_id=random.
randint(0, 99999999), message=
f'Если ты хочешь получить свое облако тегов за {current_year} год, отправь мне слово "облако" без кавычек 🙃'
)
return
processing.append(user_id)
print('Generating cloud for', user_id)
try:
if len(vk.wall.get(owner_id=user_id, count=1)['items']) == 0:
if send:
vk_group.messages.send(user_id=user_id, random_id=random.
randint(0, 99999999), message=
'Похоже, у тебя недостаточно записей на стене для составления облака тегов☹️'
)
processing.remove(user_id)
print('Removed (1) cloud from processing for', user_id)
time.sleep(5)
return
if send:
vk_group.messages.send(user_id=user_id, random_id=random.
randint(0, 99999999), message=
f'Посмотрим, что тебя интересовало в {current_year} году больше всего 😋'
)
user = vk.users.get(user_ids=user_id)[0]
user_id = user['id']
name = user['first_name'] + ' ' + user['last_name']
clouded = cloud(user_id)
if not clouded:
if send:
vk_group.messages.send(user_id=user_id, random_id=random.
randint(0, 99999999), message=
'Похоже, у тебя недостаточно записей на стене для составления облака тегов ☹️'
)
processing.remove(user_id)
print('Removed (2) cloud from processing for', user_id)
time.sleep(5)
return
clouded, wall, top_words = clouded
photo = vk_upload.photo(clouded, album_id=config.album_id, group_id
=config.group_id)[0]
if send:
vk_group.messages.send(user_id=user_id, random_id=random.
randint(0, 99999999), message=
'А вот и твое облако тегов! 🌍', attachment='photo{}_{}'.
format(photo['owner_id'], photo['id']))
vk_group.messages.send(user_id=user_id, random_id=random.
randint(0, 99999999), message=
'Не забудь поделиться с друзьями 😉')
post_id = None
if len(top_words) > 100:
try:
post_id = vk.wall.post(owner_id='-{}'.format(config.
group_id), from_group=1, message=
'Облако тегов для *id{}({})'.format(user_id, name),
attachments='photo{}_{}'.format(photo['owner_id'],
photo['id']))['post_id']
except Exception as e:
processing.remove(user_id)
print(e)
if send:
vk_group.messages.send(user_id=user_id, random_id=
random.randint(0, 99999999), message=
'Похоже, я превысил лимит количества постов на сегодня 😭'
)
vk_group.messages.send(user_id=user_id, random_id=
random.randint(0, 99999999), message=
'Создай новое облако завтра, и я выложу его на стену группы 😎'
)
print('Removed (3) cloud from processing for', user_id)
if post_id:
if send:
vk_group.messages.send(user_id=user_id, random_id=random.
randint(0, 99999999), attachment='wall{}_{}'.format(
photo['owner_id'], post_id))
processing.remove(user_id)
print('Finished cloud for', user_id)
except Exception as e:
processing.remove(user_id)
print('Finished cloud for', user_id, 'with error')
raise e
def worker(q, old=False):
while True:
item = q.get()
try:
item[0](*item[1], **item[2])
except Exception:
pass
q.task_done()
if __name__ == '__main__':
q = Queue()
for i in range(10):
t = Thread(target=worker, args=(q,))
t.setDaemon(True)
t.start()
print('Initializing longpoll connection...', end=' ')
longpoll = VkLongPoll(vk_group_session)
print('Done')
for event in longpoll.listen():
if (event.to_me and event.type == VkEventType.MESSAGE_NEW and event
.user_id not in processing):
print(event.user_id, event.text)
q.put((send_cloud, (event.user_id, event.text), {}))
q.join()
<|reserved_special_token_1|>
import _thread
import os
from queue import Queue
from threading import Thread
import random
import io
import vk_api
from vk_api.longpoll import VkLongPoll, VkEventType
from datetime import datetime, timedelta
import time
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from wordcloud import WordCloud
import pymorphy2
from pymongo import MongoClient
import config
import matplotlib
matplotlib.use('Agg')
print('Connecting to VK...', end=' ')
vk_group_session = vk_api.VkApi(token=config.vk_community_token)
vk_group = vk_group_session.get_api()
vk_session = vk_api.VkApi(token=config.vk_user_token)
tools = vk_api.VkTools(vk_session)
vk = vk_session.get_api()
vk_upload = vk_api.VkUpload(vk_session)
print('Done')
print('Connecting to MongoDB...', end=' ')
collection = MongoClient(config.mongo_host)[config.mongo_db]['photos']
print('Done')
remove_words = ['год']
DIR = os.path.dirname(__file__)
processing = []
current_year = datetime.now().year - 1 if datetime.now().month != 12 else datetime.now().year
def cloud(user_id):
wall = tools.get_all('wall.get', 100, {'owner_id': user_id})['items']
wall = list(filter(lambda x: datetime.fromtimestamp(x['date']).year == current_year, wall))
tokenizer = RegexpTokenizer('[а-яА-ЯёЁ]+')
morph = pymorphy2.MorphAnalyzer()
def transform(sentence):
return map(lambda x: morph.parse(x)[0].normal_form.replace('ё', 'е'),
filter(
lambda x: len(x) > 2 and 'NOUN' in morph.parse(x)[0].tag,
tokenizer.tokenize(sentence.replace('\xa0', ' '))
)
)
top_words = []
for post in wall:
if 'text' in post:
top_words.extend(transform(post['text']))
if 'copy_history' in post:
for copy in post['copy_history']:
if 'text' in copy:
top_words.extend(transform(copy['text']))
top_words = list(filter(lambda x: x.lower() not in remove_words, top_words))
if not top_words:
return
# def color_func(word, font_size, position, orientation, random_state=None, **kwargs):
# return "hsl(%d, 100%%, %d%%)" % (random.randint(0, 360), random.randint(20, 50))
def color_func(word, font_size, position, orientation, random_state=None, **kwargs):
return "rgb(0, 0, 0)"
sw = (stopwords.words('russian') + stopwords.words('english') + remove_words)
wordcloud = WordCloud(
max_words=50,
max_font_size=500,
background_color='white',
margin=5,
width=1000,
height=1000,
stopwords=sw,
prefer_horizontal=0.7,
font_path='font.ttf'
).generate(' '.join(top_words).lower())
wordcloud = wordcloud.recolor(color_func=color_func, random_state=3).to_image()
img_arr = io.BytesIO()
wordcloud.save(img_arr, format='PNG')
img_arr.seek(0)
return img_arr, wall, top_words
def send_cloud(user_id, message, send=True):
if user_id in processing:
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
message=f'Подожди, я составляю твое облако тегов')
return
if message.lower() != 'облако':
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
message=f'Если ты хочешь получить свое облако тегов за {current_year} '
'год, отправь мне слово "облако" без кавычек 🙃')
return
processing.append(user_id)
print('Generating cloud for', user_id)
try:
# if not vk.groups.isMember(group_id=config.group_id, user_id=user_id):
# vk_group.messages.send(user_id=user_id,
# random_id=random.randint(0, 99999999),
# message='Чтобы составить облако тегов, '
# 'подпишись на меня https://vk.com/wwcloud 🙄')
# time.sleep(1)
# vk_group.messages.send(user_id=user_id,
# random_id=random.randint(0, 99999999),
# message='Когда будешь готов, снова отправь кодовое слово "облако" 😊')
# processing.remove(user_id)
# time.sleep(5)
# return
if len(vk.wall.get(owner_id=user_id, count=1)['items']) == 0:
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
message='Похоже, у тебя недостаточно записей на стене '
'для составления облака тегов☹️')
processing.remove(user_id)
print('Removed (1) cloud from processing for', user_id)
time.sleep(5)
return
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
message=f'Посмотрим, что тебя интересовало в {current_year} году больше всего 😋')
user = vk.users.get(user_ids=user_id)[0]
user_id = user['id']
name = user['first_name'] + ' ' + user['last_name']
clouded = cloud(user_id)
if not clouded:
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
message='Похоже, у тебя недостаточно записей на стене '
'для составления облака тегов ☹️')
processing.remove(user_id)
print('Removed (2) cloud from processing for', user_id)
time.sleep(5)
return
clouded, wall, top_words = clouded
photo = vk_upload.photo(
clouded,
album_id=config.album_id,
group_id=config.group_id
)[0]
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999), message='А вот и твое облако тегов! 🌍',
attachment='photo{}_{}'.format(photo['owner_id'], photo['id']))
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999), message='Не забудь поделиться с друзьями 😉')
post_id = None
if len(top_words) > 100:
try:
post_id = vk.wall.post(owner_id='-{}'.format(config.group_id), from_group=1,
message='Облако тегов для *id{}({})'.format(user_id, name),
attachments='photo{}_{}'.format(photo['owner_id'], photo['id']))['post_id']
except Exception as e:
processing.remove(user_id)
print(e)
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
message='Похоже, я превысил лимит количества постов на сегодня 😭')
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
message='Создай новое облако завтра, и я выложу его на стену группы 😎')
print('Removed (3) cloud from processing for', user_id)
if post_id:
# collection.insert({
# 'user_id': user_id,
# 'owner_id': photo['owner_id'],
# 'id': photo['id'],
# 'post': post_id,
# 'timestamp': time.time(),
# 'length': len(top_words)
# })
if send:
vk_group.messages.send(user_id=user_id,
random_id=random.randint(0, 99999999),
attachment='wall{}_{}'.format(photo['owner_id'], post_id))
# else:
# collection.insert({
# 'user_id': user_id,
# 'owner_id': photo['owner_id'],
# 'id': photo['id'],
# 'timestamp': time.time(),
# 'length': len(top_words)
# })
# if send:
# vk_group.messages.send(
# user_id=user_id,
# random_id=random.randint(0, 99999999),
# message='Кстати, у нас в группе проходит конкурс, советую принять участие 😉',
# attachment='wall-136503501_467'
# )
processing.remove(user_id)
print('Finished cloud for', user_id)
except Exception as e:
processing.remove(user_id)
print('Finished cloud for', user_id, 'with error')
raise e
def worker(q, old=False):
while True:
# Получаем задание из очереди
item = q.get()
try:
item[0](*item[1], **item[2])
except Exception:
pass
# Сообщаем о выполненном задании
q.task_done()
if __name__ == '__main__':
q = Queue()
for i in range(10):
t = Thread(target=worker, args=(q,))
t.setDaemon(True)
t.start()
print('Initializing longpoll connection...', end=' ')
longpoll = VkLongPoll(vk_group_session)
print('Done')
for event in longpoll.listen():
if event.to_me and event.type == VkEventType.MESSAGE_NEW and event.user_id not in processing:
print(event.user_id, event.text)
q.put((send_cloud, (event.user_id, event.text), {}))
q.join()
|
flexible
|
{
"blob_id": "03ce69924c885e59e40689dc63e50d54b89649f7",
"index": 2924,
"step-1": "<mask token>\n\n\ndef cloud(user_id):\n wall = tools.get_all('wall.get', 100, {'owner_id': user_id})['items']\n wall = list(filter(lambda x: datetime.fromtimestamp(x['date']).year ==\n current_year, wall))\n tokenizer = RegexpTokenizer('[а-яА-ЯёЁ]+')\n morph = pymorphy2.MorphAnalyzer()\n\n def transform(sentence):\n return map(lambda x: morph.parse(x)[0].normal_form.replace('ё', 'е'\n ), filter(lambda x: len(x) > 2 and 'NOUN' in morph.parse(x)[0].\n tag, tokenizer.tokenize(sentence.replace('\\xa0', ' '))))\n top_words = []\n for post in wall:\n if 'text' in post:\n top_words.extend(transform(post['text']))\n if 'copy_history' in post:\n for copy in post['copy_history']:\n if 'text' in copy:\n top_words.extend(transform(copy['text']))\n top_words = list(filter(lambda x: x.lower() not in remove_words, top_words)\n )\n if not top_words:\n return\n\n def color_func(word, font_size, position, orientation, random_state=\n None, **kwargs):\n return 'rgb(0, 0, 0)'\n sw = stopwords.words('russian') + stopwords.words('english') + remove_words\n wordcloud = WordCloud(max_words=50, max_font_size=500, background_color\n ='white', margin=5, width=1000, height=1000, stopwords=sw,\n prefer_horizontal=0.7, font_path='font.ttf').generate(' '.join(\n top_words).lower())\n wordcloud = wordcloud.recolor(color_func=color_func, random_state=3\n ).to_image()\n img_arr = io.BytesIO()\n wordcloud.save(img_arr, format='PNG')\n img_arr.seek(0)\n return img_arr, wall, top_words\n\n\n<mask token>\n\n\ndef worker(q, old=False):\n while True:\n item = q.get()\n try:\n item[0](*item[1], **item[2])\n except Exception:\n pass\n q.task_done()\n\n\n<mask token>\n",
"step-2": "<mask token>\nmatplotlib.use('Agg')\nprint('Connecting to VK...', end=' ')\n<mask token>\nprint('Done')\nprint('Connecting to MongoDB...', end=' ')\n<mask token>\nprint('Done')\n<mask token>\n\n\ndef cloud(user_id):\n wall = tools.get_all('wall.get', 100, {'owner_id': user_id})['items']\n wall = list(filter(lambda x: datetime.fromtimestamp(x['date']).year ==\n current_year, wall))\n tokenizer = RegexpTokenizer('[а-яА-ЯёЁ]+')\n morph = pymorphy2.MorphAnalyzer()\n\n def transform(sentence):\n return map(lambda x: morph.parse(x)[0].normal_form.replace('ё', 'е'\n ), filter(lambda x: len(x) > 2 and 'NOUN' in morph.parse(x)[0].\n tag, tokenizer.tokenize(sentence.replace('\\xa0', ' '))))\n top_words = []\n for post in wall:\n if 'text' in post:\n top_words.extend(transform(post['text']))\n if 'copy_history' in post:\n for copy in post['copy_history']:\n if 'text' in copy:\n top_words.extend(transform(copy['text']))\n top_words = list(filter(lambda x: x.lower() not in remove_words, top_words)\n )\n if not top_words:\n return\n\n def color_func(word, font_size, position, orientation, random_state=\n None, **kwargs):\n return 'rgb(0, 0, 0)'\n sw = stopwords.words('russian') + stopwords.words('english') + remove_words\n wordcloud = WordCloud(max_words=50, max_font_size=500, background_color\n ='white', margin=5, width=1000, height=1000, stopwords=sw,\n prefer_horizontal=0.7, font_path='font.ttf').generate(' '.join(\n top_words).lower())\n wordcloud = wordcloud.recolor(color_func=color_func, random_state=3\n ).to_image()\n img_arr = io.BytesIO()\n wordcloud.save(img_arr, format='PNG')\n img_arr.seek(0)\n return img_arr, wall, top_words\n\n\ndef send_cloud(user_id, message, send=True):\n if user_id in processing:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n f'Подожди, я составляю твое облако тегов')\n return\n if message.lower() != 'облако':\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n f'Если ты хочешь получить свое облако тегов за {current_year} год, отправь мне слово \"облако\" без кавычек 🙃'\n )\n return\n processing.append(user_id)\n print('Generating cloud for', user_id)\n try:\n if len(vk.wall.get(owner_id=user_id, count=1)['items']) == 0:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'Похоже, у тебя недостаточно записей на стене для составления облака тегов☹️'\n )\n processing.remove(user_id)\n print('Removed (1) cloud from processing for', user_id)\n time.sleep(5)\n return\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n f'Посмотрим, что тебя интересовало в {current_year} году больше всего 😋'\n )\n user = vk.users.get(user_ids=user_id)[0]\n user_id = user['id']\n name = user['first_name'] + ' ' + user['last_name']\n clouded = cloud(user_id)\n if not clouded:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'Похоже, у тебя недостаточно записей на стене для составления облака тегов ☹️'\n )\n processing.remove(user_id)\n print('Removed (2) cloud from processing for', user_id)\n time.sleep(5)\n return\n clouded, wall, top_words = clouded\n photo = vk_upload.photo(clouded, album_id=config.album_id, group_id\n =config.group_id)[0]\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'А вот и твое облако тегов! 🌍', attachment='photo{}_{}'.\n format(photo['owner_id'], photo['id']))\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'Не забудь поделиться с друзьями 😉')\n post_id = None\n if len(top_words) > 100:\n try:\n post_id = vk.wall.post(owner_id='-{}'.format(config.\n group_id), from_group=1, message=\n 'Облако тегов для *id{}({})'.format(user_id, name),\n attachments='photo{}_{}'.format(photo['owner_id'],\n photo['id']))['post_id']\n except Exception as e:\n processing.remove(user_id)\n print(e)\n if send:\n vk_group.messages.send(user_id=user_id, random_id=\n random.randint(0, 99999999), message=\n 'Похоже, я превысил лимит количества постов на сегодня 😭'\n )\n vk_group.messages.send(user_id=user_id, random_id=\n random.randint(0, 99999999), message=\n 'Создай новое облако завтра, и я выложу его на стену группы 😎'\n )\n print('Removed (3) cloud from processing for', user_id)\n if post_id:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), attachment='wall{}_{}'.format(\n photo['owner_id'], post_id))\n processing.remove(user_id)\n print('Finished cloud for', user_id)\n except Exception as e:\n processing.remove(user_id)\n print('Finished cloud for', user_id, 'with error')\n raise e\n\n\ndef worker(q, old=False):\n while True:\n item = q.get()\n try:\n item[0](*item[1], **item[2])\n except Exception:\n pass\n q.task_done()\n\n\nif __name__ == '__main__':\n q = Queue()\n for i in range(10):\n t = Thread(target=worker, args=(q,))\n t.setDaemon(True)\n t.start()\n print('Initializing longpoll connection...', end=' ')\n longpoll = VkLongPoll(vk_group_session)\n print('Done')\n for event in longpoll.listen():\n if (event.to_me and event.type == VkEventType.MESSAGE_NEW and event\n .user_id not in processing):\n print(event.user_id, event.text)\n q.put((send_cloud, (event.user_id, event.text), {}))\n q.join()\n",
"step-3": "<mask token>\nmatplotlib.use('Agg')\nprint('Connecting to VK...', end=' ')\nvk_group_session = vk_api.VkApi(token=config.vk_community_token)\nvk_group = vk_group_session.get_api()\nvk_session = vk_api.VkApi(token=config.vk_user_token)\ntools = vk_api.VkTools(vk_session)\nvk = vk_session.get_api()\nvk_upload = vk_api.VkUpload(vk_session)\nprint('Done')\nprint('Connecting to MongoDB...', end=' ')\ncollection = MongoClient(config.mongo_host)[config.mongo_db]['photos']\nprint('Done')\nremove_words = ['год']\nDIR = os.path.dirname(__file__)\nprocessing = []\ncurrent_year = datetime.now().year - 1 if datetime.now(\n ).month != 12 else datetime.now().year\n\n\ndef cloud(user_id):\n wall = tools.get_all('wall.get', 100, {'owner_id': user_id})['items']\n wall = list(filter(lambda x: datetime.fromtimestamp(x['date']).year ==\n current_year, wall))\n tokenizer = RegexpTokenizer('[а-яА-ЯёЁ]+')\n morph = pymorphy2.MorphAnalyzer()\n\n def transform(sentence):\n return map(lambda x: morph.parse(x)[0].normal_form.replace('ё', 'е'\n ), filter(lambda x: len(x) > 2 and 'NOUN' in morph.parse(x)[0].\n tag, tokenizer.tokenize(sentence.replace('\\xa0', ' '))))\n top_words = []\n for post in wall:\n if 'text' in post:\n top_words.extend(transform(post['text']))\n if 'copy_history' in post:\n for copy in post['copy_history']:\n if 'text' in copy:\n top_words.extend(transform(copy['text']))\n top_words = list(filter(lambda x: x.lower() not in remove_words, top_words)\n )\n if not top_words:\n return\n\n def color_func(word, font_size, position, orientation, random_state=\n None, **kwargs):\n return 'rgb(0, 0, 0)'\n sw = stopwords.words('russian') + stopwords.words('english') + remove_words\n wordcloud = WordCloud(max_words=50, max_font_size=500, background_color\n ='white', margin=5, width=1000, height=1000, stopwords=sw,\n prefer_horizontal=0.7, font_path='font.ttf').generate(' '.join(\n top_words).lower())\n wordcloud = wordcloud.recolor(color_func=color_func, random_state=3\n ).to_image()\n img_arr = io.BytesIO()\n wordcloud.save(img_arr, format='PNG')\n img_arr.seek(0)\n return img_arr, wall, top_words\n\n\ndef send_cloud(user_id, message, send=True):\n if user_id in processing:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n f'Подожди, я составляю твое облако тегов')\n return\n if message.lower() != 'облако':\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n f'Если ты хочешь получить свое облако тегов за {current_year} год, отправь мне слово \"облако\" без кавычек 🙃'\n )\n return\n processing.append(user_id)\n print('Generating cloud for', user_id)\n try:\n if len(vk.wall.get(owner_id=user_id, count=1)['items']) == 0:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'Похоже, у тебя недостаточно записей на стене для составления облака тегов☹️'\n )\n processing.remove(user_id)\n print('Removed (1) cloud from processing for', user_id)\n time.sleep(5)\n return\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n f'Посмотрим, что тебя интересовало в {current_year} году больше всего 😋'\n )\n user = vk.users.get(user_ids=user_id)[0]\n user_id = user['id']\n name = user['first_name'] + ' ' + user['last_name']\n clouded = cloud(user_id)\n if not clouded:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'Похоже, у тебя недостаточно записей на стене для составления облака тегов ☹️'\n )\n processing.remove(user_id)\n print('Removed (2) cloud from processing for', user_id)\n time.sleep(5)\n return\n clouded, wall, top_words = clouded\n photo = vk_upload.photo(clouded, album_id=config.album_id, group_id\n =config.group_id)[0]\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'А вот и твое облако тегов! 🌍', attachment='photo{}_{}'.\n format(photo['owner_id'], photo['id']))\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'Не забудь поделиться с друзьями 😉')\n post_id = None\n if len(top_words) > 100:\n try:\n post_id = vk.wall.post(owner_id='-{}'.format(config.\n group_id), from_group=1, message=\n 'Облако тегов для *id{}({})'.format(user_id, name),\n attachments='photo{}_{}'.format(photo['owner_id'],\n photo['id']))['post_id']\n except Exception as e:\n processing.remove(user_id)\n print(e)\n if send:\n vk_group.messages.send(user_id=user_id, random_id=\n random.randint(0, 99999999), message=\n 'Похоже, я превысил лимит количества постов на сегодня 😭'\n )\n vk_group.messages.send(user_id=user_id, random_id=\n random.randint(0, 99999999), message=\n 'Создай новое облако завтра, и я выложу его на стену группы 😎'\n )\n print('Removed (3) cloud from processing for', user_id)\n if post_id:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), attachment='wall{}_{}'.format(\n photo['owner_id'], post_id))\n processing.remove(user_id)\n print('Finished cloud for', user_id)\n except Exception as e:\n processing.remove(user_id)\n print('Finished cloud for', user_id, 'with error')\n raise e\n\n\ndef worker(q, old=False):\n while True:\n item = q.get()\n try:\n item[0](*item[1], **item[2])\n except Exception:\n pass\n q.task_done()\n\n\nif __name__ == '__main__':\n q = Queue()\n for i in range(10):\n t = Thread(target=worker, args=(q,))\n t.setDaemon(True)\n t.start()\n print('Initializing longpoll connection...', end=' ')\n longpoll = VkLongPoll(vk_group_session)\n print('Done')\n for event in longpoll.listen():\n if (event.to_me and event.type == VkEventType.MESSAGE_NEW and event\n .user_id not in processing):\n print(event.user_id, event.text)\n q.put((send_cloud, (event.user_id, event.text), {}))\n q.join()\n",
"step-4": "import _thread\nimport os\nfrom queue import Queue\nfrom threading import Thread\nimport random\nimport io\nimport vk_api\nfrom vk_api.longpoll import VkLongPoll, VkEventType\nfrom datetime import datetime, timedelta\nimport time\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.corpus import stopwords\nfrom wordcloud import WordCloud\nimport pymorphy2\nfrom pymongo import MongoClient\nimport config\nimport matplotlib\nmatplotlib.use('Agg')\nprint('Connecting to VK...', end=' ')\nvk_group_session = vk_api.VkApi(token=config.vk_community_token)\nvk_group = vk_group_session.get_api()\nvk_session = vk_api.VkApi(token=config.vk_user_token)\ntools = vk_api.VkTools(vk_session)\nvk = vk_session.get_api()\nvk_upload = vk_api.VkUpload(vk_session)\nprint('Done')\nprint('Connecting to MongoDB...', end=' ')\ncollection = MongoClient(config.mongo_host)[config.mongo_db]['photos']\nprint('Done')\nremove_words = ['год']\nDIR = os.path.dirname(__file__)\nprocessing = []\ncurrent_year = datetime.now().year - 1 if datetime.now(\n ).month != 12 else datetime.now().year\n\n\ndef cloud(user_id):\n wall = tools.get_all('wall.get', 100, {'owner_id': user_id})['items']\n wall = list(filter(lambda x: datetime.fromtimestamp(x['date']).year ==\n current_year, wall))\n tokenizer = RegexpTokenizer('[а-яА-ЯёЁ]+')\n morph = pymorphy2.MorphAnalyzer()\n\n def transform(sentence):\n return map(lambda x: morph.parse(x)[0].normal_form.replace('ё', 'е'\n ), filter(lambda x: len(x) > 2 and 'NOUN' in morph.parse(x)[0].\n tag, tokenizer.tokenize(sentence.replace('\\xa0', ' '))))\n top_words = []\n for post in wall:\n if 'text' in post:\n top_words.extend(transform(post['text']))\n if 'copy_history' in post:\n for copy in post['copy_history']:\n if 'text' in copy:\n top_words.extend(transform(copy['text']))\n top_words = list(filter(lambda x: x.lower() not in remove_words, top_words)\n )\n if not top_words:\n return\n\n def color_func(word, font_size, position, orientation, random_state=\n None, **kwargs):\n return 'rgb(0, 0, 0)'\n sw = stopwords.words('russian') + stopwords.words('english') + remove_words\n wordcloud = WordCloud(max_words=50, max_font_size=500, background_color\n ='white', margin=5, width=1000, height=1000, stopwords=sw,\n prefer_horizontal=0.7, font_path='font.ttf').generate(' '.join(\n top_words).lower())\n wordcloud = wordcloud.recolor(color_func=color_func, random_state=3\n ).to_image()\n img_arr = io.BytesIO()\n wordcloud.save(img_arr, format='PNG')\n img_arr.seek(0)\n return img_arr, wall, top_words\n\n\ndef send_cloud(user_id, message, send=True):\n if user_id in processing:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n f'Подожди, я составляю твое облако тегов')\n return\n if message.lower() != 'облако':\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n f'Если ты хочешь получить свое облако тегов за {current_year} год, отправь мне слово \"облако\" без кавычек 🙃'\n )\n return\n processing.append(user_id)\n print('Generating cloud for', user_id)\n try:\n if len(vk.wall.get(owner_id=user_id, count=1)['items']) == 0:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'Похоже, у тебя недостаточно записей на стене для составления облака тегов☹️'\n )\n processing.remove(user_id)\n print('Removed (1) cloud from processing for', user_id)\n time.sleep(5)\n return\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n f'Посмотрим, что тебя интересовало в {current_year} году больше всего 😋'\n )\n user = vk.users.get(user_ids=user_id)[0]\n user_id = user['id']\n name = user['first_name'] + ' ' + user['last_name']\n clouded = cloud(user_id)\n if not clouded:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'Похоже, у тебя недостаточно записей на стене для составления облака тегов ☹️'\n )\n processing.remove(user_id)\n print('Removed (2) cloud from processing for', user_id)\n time.sleep(5)\n return\n clouded, wall, top_words = clouded\n photo = vk_upload.photo(clouded, album_id=config.album_id, group_id\n =config.group_id)[0]\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'А вот и твое облако тегов! 🌍', attachment='photo{}_{}'.\n format(photo['owner_id'], photo['id']))\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), message=\n 'Не забудь поделиться с друзьями 😉')\n post_id = None\n if len(top_words) > 100:\n try:\n post_id = vk.wall.post(owner_id='-{}'.format(config.\n group_id), from_group=1, message=\n 'Облако тегов для *id{}({})'.format(user_id, name),\n attachments='photo{}_{}'.format(photo['owner_id'],\n photo['id']))['post_id']\n except Exception as e:\n processing.remove(user_id)\n print(e)\n if send:\n vk_group.messages.send(user_id=user_id, random_id=\n random.randint(0, 99999999), message=\n 'Похоже, я превысил лимит количества постов на сегодня 😭'\n )\n vk_group.messages.send(user_id=user_id, random_id=\n random.randint(0, 99999999), message=\n 'Создай новое облако завтра, и я выложу его на стену группы 😎'\n )\n print('Removed (3) cloud from processing for', user_id)\n if post_id:\n if send:\n vk_group.messages.send(user_id=user_id, random_id=random.\n randint(0, 99999999), attachment='wall{}_{}'.format(\n photo['owner_id'], post_id))\n processing.remove(user_id)\n print('Finished cloud for', user_id)\n except Exception as e:\n processing.remove(user_id)\n print('Finished cloud for', user_id, 'with error')\n raise e\n\n\ndef worker(q, old=False):\n while True:\n item = q.get()\n try:\n item[0](*item[1], **item[2])\n except Exception:\n pass\n q.task_done()\n\n\nif __name__ == '__main__':\n q = Queue()\n for i in range(10):\n t = Thread(target=worker, args=(q,))\n t.setDaemon(True)\n t.start()\n print('Initializing longpoll connection...', end=' ')\n longpoll = VkLongPoll(vk_group_session)\n print('Done')\n for event in longpoll.listen():\n if (event.to_me and event.type == VkEventType.MESSAGE_NEW and event\n .user_id not in processing):\n print(event.user_id, event.text)\n q.put((send_cloud, (event.user_id, event.text), {}))\n q.join()\n",
"step-5": "import _thread\nimport os\nfrom queue import Queue\nfrom threading import Thread\nimport random\nimport io\nimport vk_api\nfrom vk_api.longpoll import VkLongPoll, VkEventType\nfrom datetime import datetime, timedelta\nimport time\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.corpus import stopwords\nfrom wordcloud import WordCloud\nimport pymorphy2\nfrom pymongo import MongoClient\nimport config\nimport matplotlib\n\nmatplotlib.use('Agg')\n\nprint('Connecting to VK...', end=' ')\nvk_group_session = vk_api.VkApi(token=config.vk_community_token)\nvk_group = vk_group_session.get_api()\nvk_session = vk_api.VkApi(token=config.vk_user_token)\ntools = vk_api.VkTools(vk_session)\nvk = vk_session.get_api()\nvk_upload = vk_api.VkUpload(vk_session)\nprint('Done')\n\nprint('Connecting to MongoDB...', end=' ')\ncollection = MongoClient(config.mongo_host)[config.mongo_db]['photos']\nprint('Done')\n\nremove_words = ['год']\nDIR = os.path.dirname(__file__)\n\nprocessing = []\n\ncurrent_year = datetime.now().year - 1 if datetime.now().month != 12 else datetime.now().year\n\n\ndef cloud(user_id):\n wall = tools.get_all('wall.get', 100, {'owner_id': user_id})['items']\n wall = list(filter(lambda x: datetime.fromtimestamp(x['date']).year == current_year, wall))\n\n tokenizer = RegexpTokenizer('[а-яА-ЯёЁ]+')\n morph = pymorphy2.MorphAnalyzer()\n\n def transform(sentence):\n return map(lambda x: morph.parse(x)[0].normal_form.replace('ё', 'е'),\n filter(\n lambda x: len(x) > 2 and 'NOUN' in morph.parse(x)[0].tag,\n tokenizer.tokenize(sentence.replace('\\xa0', ' '))\n )\n )\n\n top_words = []\n for post in wall:\n if 'text' in post:\n top_words.extend(transform(post['text']))\n if 'copy_history' in post:\n for copy in post['copy_history']:\n if 'text' in copy:\n top_words.extend(transform(copy['text']))\n top_words = list(filter(lambda x: x.lower() not in remove_words, top_words))\n if not top_words:\n return\n\n # def color_func(word, font_size, position, orientation, random_state=None, **kwargs):\n # return \"hsl(%d, 100%%, %d%%)\" % (random.randint(0, 360), random.randint(20, 50))\n\n def color_func(word, font_size, position, orientation, random_state=None, **kwargs):\n return \"rgb(0, 0, 0)\"\n\n sw = (stopwords.words('russian') + stopwords.words('english') + remove_words)\n wordcloud = WordCloud(\n max_words=50,\n max_font_size=500,\n background_color='white',\n margin=5,\n width=1000,\n height=1000,\n stopwords=sw,\n prefer_horizontal=0.7,\n font_path='font.ttf'\n ).generate(' '.join(top_words).lower())\n wordcloud = wordcloud.recolor(color_func=color_func, random_state=3).to_image()\n img_arr = io.BytesIO()\n wordcloud.save(img_arr, format='PNG')\n img_arr.seek(0)\n return img_arr, wall, top_words\n\n\ndef send_cloud(user_id, message, send=True):\n if user_id in processing:\n if send:\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999),\n message=f'Подожди, я составляю твое облако тегов')\n return\n if message.lower() != 'облако':\n if send:\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999),\n message=f'Если ты хочешь получить свое облако тегов за {current_year} '\n 'год, отправь мне слово \"облако\" без кавычек 🙃')\n return\n\n processing.append(user_id)\n\n print('Generating cloud for', user_id)\n try:\n # if not vk.groups.isMember(group_id=config.group_id, user_id=user_id):\n # vk_group.messages.send(user_id=user_id,\n # random_id=random.randint(0, 99999999),\n # message='Чтобы составить облако тегов, '\n # 'подпишись на меня https://vk.com/wwcloud 🙄')\n # time.sleep(1)\n # vk_group.messages.send(user_id=user_id,\n # random_id=random.randint(0, 99999999),\n # message='Когда будешь готов, снова отправь кодовое слово \"облако\" 😊')\n # processing.remove(user_id)\n # time.sleep(5)\n # return\n if len(vk.wall.get(owner_id=user_id, count=1)['items']) == 0:\n if send:\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999),\n message='Похоже, у тебя недостаточно записей на стене '\n 'для составления облака тегов☹️')\n processing.remove(user_id)\n print('Removed (1) cloud from processing for', user_id)\n time.sleep(5)\n return\n if send:\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999),\n message=f'Посмотрим, что тебя интересовало в {current_year} году больше всего 😋')\n user = vk.users.get(user_ids=user_id)[0]\n user_id = user['id']\n name = user['first_name'] + ' ' + user['last_name']\n clouded = cloud(user_id)\n if not clouded:\n if send:\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999),\n message='Похоже, у тебя недостаточно записей на стене '\n 'для составления облака тегов ☹️')\n processing.remove(user_id)\n print('Removed (2) cloud from processing for', user_id)\n time.sleep(5)\n return\n clouded, wall, top_words = clouded\n photo = vk_upload.photo(\n clouded,\n album_id=config.album_id,\n group_id=config.group_id\n )[0]\n if send:\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999), message='А вот и твое облако тегов! 🌍',\n attachment='photo{}_{}'.format(photo['owner_id'], photo['id']))\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999), message='Не забудь поделиться с друзьями 😉')\n\n post_id = None\n if len(top_words) > 100:\n try:\n post_id = vk.wall.post(owner_id='-{}'.format(config.group_id), from_group=1,\n message='Облако тегов для *id{}({})'.format(user_id, name),\n attachments='photo{}_{}'.format(photo['owner_id'], photo['id']))['post_id']\n except Exception as e:\n processing.remove(user_id)\n print(e)\n if send:\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999),\n message='Похоже, я превысил лимит количества постов на сегодня 😭')\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999),\n message='Создай новое облако завтра, и я выложу его на стену группы 😎')\n print('Removed (3) cloud from processing for', user_id)\n if post_id:\n # collection.insert({\n # 'user_id': user_id,\n # 'owner_id': photo['owner_id'],\n # 'id': photo['id'],\n # 'post': post_id,\n # 'timestamp': time.time(),\n # 'length': len(top_words)\n # })\n if send:\n vk_group.messages.send(user_id=user_id,\n random_id=random.randint(0, 99999999),\n attachment='wall{}_{}'.format(photo['owner_id'], post_id))\n # else:\n # collection.insert({\n # 'user_id': user_id,\n # 'owner_id': photo['owner_id'],\n # 'id': photo['id'],\n # 'timestamp': time.time(),\n # 'length': len(top_words)\n # })\n\n # if send:\n # vk_group.messages.send(\n # user_id=user_id,\n # random_id=random.randint(0, 99999999),\n # message='Кстати, у нас в группе проходит конкурс, советую принять участие 😉',\n # attachment='wall-136503501_467'\n # )\n\n processing.remove(user_id)\n print('Finished cloud for', user_id)\n except Exception as e:\n processing.remove(user_id)\n print('Finished cloud for', user_id, 'with error')\n raise e\n\n\ndef worker(q, old=False):\n while True:\n # Получаем задание из очереди\n item = q.get()\n try:\n item[0](*item[1], **item[2])\n except Exception:\n pass\n # Сообщаем о выполненном задании\n q.task_done()\n\n\nif __name__ == '__main__':\n q = Queue()\n for i in range(10):\n t = Thread(target=worker, args=(q,))\n t.setDaemon(True)\n t.start()\n\n print('Initializing longpoll connection...', end=' ')\n longpoll = VkLongPoll(vk_group_session)\n print('Done')\n\n for event in longpoll.listen():\n if event.to_me and event.type == VkEventType.MESSAGE_NEW and event.user_id not in processing:\n print(event.user_id, event.text)\n q.put((send_cloud, (event.user_id, event.text), {}))\n q.join()\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
import text
nan=""
section_words = {'start': -1, '1.1': 17, '1.2': 38, '1.3': 55, '1.4': 76, '1.5': 95, '1.6': 114, '1.7': 133, '1.8': 151, '1.9': 170, '1.10': 190, '1.11': 209, '1.12': 233, '1.13': 257, '1.14': 277, '1.15': 299, '1.16': 320, '1.17': 341, '1.18': 364, '1.19': 385, '1.20': 405, '1.21': 428, '2.1': 451, '2.2': 474, '2.3': 495, '2.4': 521, '2.5': 542, '2.6': 564, '2.7': 587, '2.8': 611, '2.9': 633, '2.10': 653, '2.11': 674, '2.12': 695, '2.13': 718, '2.14': 740, '2.15': 760, 'end': -2}
the_text = [('AGRICOLA', 0, 'agricola', 'farmer', '', '1_1', 1), ('AMBVLO', 1, 'ambulo', 'to walk', '', '1_1', 2), ('AMO', 2, 'amo', 'to love', '', '1_1', 2), ('AQVA', 3, 'aqua', 'water', '', '1_1', 1), ('ATHLETA', 4, 'athleta', 'athlete', '', '1_1', 1), ('BENE', 5, 'bene', 'well', '', '1_1', 1), ('CVRO', 6, 'curo', 'to take care for/of', '', '1_1', 2), ('ET/2', 7, 'et', 'and', '', '1_1', 1), ('FILIA', 8, 'filia', 'daughter', '', '1_1', 1), ('ITAQVE', 9, 'itaque', 'and so', '', '1_1', 1), ('LVPA', 10, 'lupa', 'she–wolf', '', '1_1', 1), ('NAVTA', 11, 'nauta', 'sailor', '', '1_1', 1), ('POETA', 12, 'poeta', 'poet', '', '1_1', 1), ('POSTEA', 13, 'postea', 'afterwards', '', '1_1', 1), ('PVELLA', 14, 'puella', 'girl', '', '1_1', 1), ('ROMA/N', 15, 'Roma', 'Rome', '', '1_1', 1), ('SVM/1', 16, 'sum', 'to be', '', '1_1', 3), ('TERRA', 17, 'terra', 'land', '', '1_1', 1), ('AMBVLO', 18, 'ambulo', 'to walk', '', '1_2', 2), ('AMO', 19, 'amo', 'to love', '', '1_2', 2), ('CVRO', 20, 'curo', 'to take care for/of', '', '1_2', 2), ('SVM/1', 21, 'sum', 'to be', '', '1_2', 3), ('DEBEO', 22, 'debeo', 'ought, must, should; to owe', '', '1_2', 1), ('DIV', 23, 'diu', 'for a long time', '', '1_2', 1), ('EGO', 24, 'ego', 'I', '', '1_2', 3), ('EXSPECTO', 25, 'exspecto', 'to wait for, await, expect', '', '1_2', 1), ('FABVLA/1', 26, 'fabula', 'story', '', '1_2', 1), ('FORMA', 27, 'forma', 'form,appearance', '', '1_2', 1), ('HABEO', 28, 'habeo', 'to have', '', '1_2', 1), ('HABITO', 29, 'habito', 'to live, dwell', '', '1_2', 1), ('NARRO', 30, 'narro', 'to tell', '', '1_2', 1), ('NON', 31, 'non', 'not', '', '1_2', 1), ('NVNC', 32, 'nunc', 'now', '', '1_2', 1), ('PARO/2', 33, 'paro', 'to prepare, get ready, design', '', '1_2', 2), ('PATRIA', 34, 'patria', 'fatherland', '', '1_2', 1), ('TENEO', 35, 'teneo', 'to hold', '', '1_2', 1), ('TV', 36, 'tu', 'you', '', '1_2', 3), ('VIDEO', 37, 'video', 'to see', '', '1_2', 1), ('VOCO', 38, 'voco', 'to call', '', '1_2', 1), ('EGO', 39, 'ego', 'I', '', '1_3', 3), ('TV', 40, 'tu', 'you', '', '1_3', 3), ('AGER', 41, 'ager', 'field', '', '1_3', 1), ('AMICVS/1', 42, 'amicus', 'friend', '', '1_3', 1), ('ANIMVS', 43, 'animus', 'spirit, soul, mind', '', '1_3', 1), ('CASA', 44, 'casa', 'little house, cottage', '', '1_3', 1), ('CVM/2', 45, 'cum', 'with (w/ abl.)', '', '1_3', 1), ('DEINDE', 46, 'deinde', 'then', '', '1_3', 1), ('DOMVS', 47, 'domus', 'home', '', '1_3', 2), ('FILIVS', 48, 'filius', 'son', '', '1_3', 1), ('IN', 49, 'in', 'in, on (w/ abl.); into, to, against (w/ acc.)', '', '1_3', 2), ('PVER', 50, 'puer', 'boy', '', '1_3', 1), ('RIVVS', 51, 'rivus', 'brook, stream', '', '1_3', 1), ('TIMEO', 52, 'timeo', 'to fear, to be afraid', '', '1_3', 1), ('VALDE', 53, 'valde', 'very, exceedingly', '', '1_3', 1), ('VIA', 54, 'via', 'road', '', '1_3', 1), ('VIR', 55, 'vir', 'man', '', '1_3', 1), ('IN', 56, 'in', 'in, on (w/ abl.); into, to, against (w/ acc.)', '', '1_4', 2), ('AD/2', 57, 'ad', 'into, towards, to (w/ acc.)', '', '1_4', 1), ('ARMATVS/2', 58, 'armatus', 'armed', '', '1_4', 1), ('AVTEM', 59, 'autem', 'however', '', '1_4', 1), ('BELLVM', 60, 'bellum', 'war', '', '1_4', 1), ('BONVS', 61, 'bonus', 'good', '', '1_4', 1), ('CASTRA/2', 62, 'castra', 'camp', '', '1_4', 1), ('DO', 63, 'do', 'to give', '', '1_4', 1), ('DOLVS', 64, 'dolus', 'trickery, deception', '', '1_4', 1), ('EX', 65, 'e', 'from, out of (w/ abl.)', '', '1_4', 1), ('INTRO/2', 66, 'intro', 'to enter', '', '1_4', 1), ('IVBEO', 67, 'iubeo', 'to order somebody (acc.) to do something (inf.)', '', '1_4', 1), ('IVSTVS', 68, 'iustus', 'legitimate, open, just', '', '1_4', 1), ('MAGNVS', 69, 'magnus', 'large, great, important', '', '1_4', 1), ('MALVS/3', 70, 'malus', 'bad', '', '1_4', 1), ('PRAECLARVS', 71, 'praeclarus', 'famous, distinguished', '', '1_4', 1), ('PRAEMIVM', 72, 'praemium', 'reward', '', '1_4', 1), ('ROMANVS/A', 73, 'Romanus', 'Roman; the Romans (pl.)', '', '1_4', 1), ('SED', 74, 'sed', 'but', '', '1_4', 1), ('VENENVM', 75, 'venenum', 'poison', '', '1_4', 1), ('VINCVLVM', 76, 'vinculum', 'chain, fetter', '', '1_4', 1), ('PARO/2', 77, 'paro', 'to prepare, get ready, design', '', '1_5', 2), ('AB', 78, 'a', 'by, from (w/ abl.)', '', '1_5', 1), ('AVXILIVM', 79, 'auxilium', 'help', '', '1_5', 1), ('COGITO', 80, 'cogito', 'to think', '', '1_5', 1), ('CONSILIVM', 81, 'consilium', 'plan, advice', '', '1_5', 2), ('DE', 82, 'de', 'about, concerning, down from (w/ abl.)', '', '1_5', 1), ('DOLEO', 83, 'doleo', 'to feel pain, to be hurt', '', '1_5', 1), ('EPISTOLA', 84, 'epistula', 'letter', '', '1_5', 1), ('FAMILIA', 85, 'familia', 'family, household', '', '1_5', 1), ('GAVDIVM', 86, 'gaudium', 'joy', '', '1_5', 1), ('LACRIMA', 87, 'lacrima', 'tear', '', '1_5', 1), ('LONGE', 88, 'longe', 'far', '', '1_5', 1), ('LONGVS', 89, 'longus', 'long', '', '1_5', 1), ('MISER', 90, 'miser', 'wretched, sad, miserable', '', '1_5', 1), ('NAM', 91, 'nam', 'for, in fact', '', '1_5', 1), ('NONSOLVM', 92, 'non', 'not only…, but also…', '', '1_5', 1), ('PVLCHER', 93, 'pulcher', 'beautiful, nice', '', '1_5', 1), ('SEMPER', 94, 'semper', 'always', '', '1_5', 1), ('TAMEN', 95, 'tamen', 'however', '', '1_5', 2), ('SVM/1', 96, 'sum', 'to be', '', '1_6', 3), ('DOCEO', 97, 'doceo', 'to teach', '', '1_6', 1), ('DVM/2', 98, 'dum', 'while', '', '1_6', 1), ('EXEMPLVM', 99, 'exemplum', 'example', '', '1_6', 1), ('FIRMO', 100, 'firmo', 'to strengthen', '', '1_6', 1), ('IACEO', 101, 'iaceo', 'to lie down, to be inert', '', '1_6', 1), ('IVDICO', 102, 'iudico', 'to judge', '', '1_6', 1), ('LIBER/1', 103, 'liber', 'book', '', '1_6', 1), ('LITTERA', 104, 'littera', 'letter of the alphabet (sing.); literature, letter (pl.)', '', '1_6', 1), ('MANEO', 105, 'maneo', 'to remain', '', '1_6', 1), ('MEMORIA', 106, 'memoria', 'memory', '', '1_6', 1), ('MVLTVS', 107, 'multus', 'much, many', '', '1_6', 1), ('POSSVM/1', 108, 'possum', 'to be able, can', '', '1_6', 1), ('PROPTER/2', 109, 'propter', 'because of, on account of (w/ acc.)', '', '1_6', 1), ('SAEPE', 110, 'saepe', 'often', '', '1_6', 1), ('SERVO', 111, 'servo', 'to save, preserve', '', '1_6', 1), ('SOLEO', 112, 'soleo', 'to be accustomed (w/ inf.)', '', '1_6', 1), ('TENEBRAE', 113, 'tenebrae', 'shadows, darkness (pl.)', '', '1_6', 1), ('VITA', 114, 'vita', 'life', '', '1_6', 1), ('AESTIMO', 115, 'aestimo', 'to regard, esteem', '', '1_7', 1), ('AESTIMOVNIVSASSIS', 116, 'aestimo', 'I do not care a bit ', '', '1_7', 1), ('AMOR', 117, 'amor', 'love', '', '1_7', 1), ('DELICIA/1', 118, 'delicia', 'delight, pet', '', '1_7', 1), ('DIGITVS', 119, 'digitus', 'finger', '', '1_7', 1), ('DOMINA', 120, 'domina', 'mistress', '', '1_7', 1), ('GREMIVM', 121, 'gremium', 'lap', '', '1_7', 1), ('INVIDEO', 122, 'invideo', 'to envy', '', '1_7', 1), ('MEVS', 123, 'meus', 'my', '', '1_7', 1), ('OCVLVS', 124, 'oculus', 'eye', '', '1_7', 1), ('PASSER', 125, 'passer', 'sparrow', '', '1_7', 1), ('PAX', 126, 'pax', 'peace; favor', '', '1_7', 1), ('PVTO', 127, 'puto', 'to think', '', '1_7', 1), ('SENEX/1', 128, 'senex', 'old man', '', '1_7', 1), ('SESESE', 129, 'se', 'him/her/itself', '', '1_7', 1), ('SEVERVS', 130, 'severus', 'serious', '', '1_7', 1), ('SOROR', 131, 'soror', 'sister', '', '1_7', 1), ('SVI/1', 132, 'sui', 'him–/her–/itself', '', '1_7', 1), ('VERBVM', 133, 'verbum', 'word', '', '1_7', 1), ('CONTRA/2', 134, 'contra', 'against (w/ acc.)', '', '1_8', 1), ('DECERNO', 135, 'decerno', 'to decide, determine (often w/ inf.)', '', '1_8', 1), ('DICO/2', 136, 'dico', 'to say', '', '1_8', 1), ('DVX', 137, 'dux', 'leader, general', '', '1_8', 1), ('FORTITVDO', 138, 'fortitudo', 'courage', '', '1_8', 1), ('HOMO', 139, 'homo', 'man, human being; people (pl.)', '', '1_8', 1), ('INTELLIGO', 140, 'intellego', 'to understand', '', '1_8', 1), ('LIBERO', 141, 'libero', 'to free someone (acc.) from something (abl.)', '', '1_8', 1), ('MILES', 142, 'miles', 'soldier', '', '1_8', 1), ('NAVIGO', 143, 'navigo', 'to sail, navigate', '', '1_8', 1), ('ORACVLVM', 144, 'oraculum', 'oracle, prophecy', '', '1_8', 1), ('PETO', 145, 'peto', 'to seek', '', '1_8', 1), ('REX', 146, 'rex', 'king', '', '1_8', 1), ('TANDEM', 147, 'tandem', 'finally', '', '1_8', 1), ('TEMPLVM', 148, 'templum', 'temple', '', '1_8', 1), ('TIMOR', 149, 'timor', 'fear', '', '1_8', 1), ('TVM', 150, 'tum', 'then, at that time', '', '1_8', 2), ('VINCO', 151, 'vinco', 'to conquer', '', '1_8', 1), ('ANIMAL', 152, 'animal', 'animal', '', '1_9', 1), ('ARMA', 153, 'arma', 'weapons (pl.)', '', '1_9', 1), ('AVDIO', 154, 'audio', 'to hear, listen', '', '1_9', 1), ('CAPVT', 155, 'caput', 'head', '', '1_9', 1), ('CIVIS', 156, 'civis', 'citizen', '', '1_9', 1), ('CONSVL', 157, 'consul', 'consul', '', '1_9', 1), ('CORPVS', 158, 'corpus', 'body', '', '1_9', 1), ('CREDO', 159, 'credo', 'to believe somebody (w/ dat.)', '', '1_9', 1), ('EXEMPLAR', 160, 'exemplar', 'example', '', '1_9', 1), ('GERO', 161, 'gero', 'to carry; to behave (w/ se)', '', '1_9', 2), ('MARE', 162, 'mare', 'sea', '', '1_9', 1), ('MORS', 163, 'mors', 'death', '', '1_9', 1), ('MVLIER', 164, 'mulier', 'woman', '', '1_9', 1), ('ORATIO', 165, 'oratio', 'oration, speech', '', '1_9', 1), ('SCIO', 166, 'scio', 'to know', '', '1_9', 1), ('SENTIO', 167, 'sentio', 'to perceive', '', '1_9', 1), ('TEMPVS/1', 168, 'tempus', 'time', '', '1_9', 1), ('VENIO', 169, 'venio', 'to come', '', '1_9', 1), ('VRBS', 170, 'urbs', 'city', '', '1_9', 1), ('ACER/2', 171, 'acer', 'keen, fierce', '', '1_10', 1), ('AEDIFICO', 172, 'aedifico', 'to build', '', '1_10', 1), ('CAPIO/2', 173, 'capio', 'to take, adopt, capture', '', '1_10', 1), ('CELEBER', 174, 'celeber', 'renowned, well–known, crowded', '', '1_10', 1), ('CVPIO', 175, 'cupio', 'to desire, want', '', '1_10', 1), ('DELEO', 176, 'deleo', 'to destroy', '', '1_10', 1), ('DEVS', 177, 'deus', 'god', '', '1_10', 1), ('DONVM', 178, 'donum', 'gift', '', '1_10', 1), ('EQVVS', 179, 'equus', 'horse', '', '1_10', 1), ('FELIX', 180, 'felix', 'fortunate, happy', '', '1_10', 1), ('FLAMMA', 181, 'flamma', 'flame', '', '1_10', 1), ('FORTIS', 182, 'fortis', 'brave, strong', '', '1_10', 1), ('FVGIO', 183, 'fugio', 'to flee, run away', '', '1_10', 1), ('HOSTIS', 184, 'hostis', 'enemy', '', '1_10', 1), ('MOVEO', 185, 'moveo', 'to move', '', '1_10', 1), ('NEC/2', 186, 'nec', 'nor; and not', '', '1_10', 2), ('NOX', 187, 'nox', 'night', '', '1_10', 1), ('PAVCI', 188, 'paucus', 'few', '', '1_10', 1), ('PERICVLVM', 189, 'periculum', 'danger', '', '1_10', 1), ('PVGNO', 190, 'pugno', 'to fight', '', '1_10', 1), ('AGO', 191, 'ago', 'to drive, lead, do, behave', '', '1_11', 1), ('ARDEO', 192, 'ardeo', 'to burn, be on fire', '', '1_11', 1), ('CONSPICIO', 193, 'conspicio', 'to look at, observe', '', '1_11', 1), ('CRVDELIS', 194, 'crudelis', 'cruel', '', '1_11', 1), ('DOLOR', 195, 'dolor', 'grief, pain', '', '1_11', 1), ('ITA', 196, 'ita', 'yes', '', '1_11', 2), ('MINIME', 197, 'minime', 'No', '', '1_11', 1), ('MITTO', 198, 'mitto', 'to send', '', '1_11', 1), ('NE/2', 199, 'ne', '(added to the first word of a question', '', '1_11', 1), ('NOVVS', 200, 'novus', 'new', '', '1_11', 1), ('PARVM/2', 201, 'parum', 'too little', '', '1_11', 2), ('QVE', 202, 'que', 'and', '', '1_11', 1), ('QVOQVE', 203, 'quoque', 'also', '', '1_11', 1), ('REGINA', 204, 'regina', 'queen', '', '1_11', 1), ('RELINQVO', 205, 'relinquo', 'to abandon', '', '1_11', 1), ('SILVA', 206, 'silva', 'forest', '', '1_11', 1), ('SPELVNCA', 207, 'spelunca', 'cave', '', '1_11', 1), ('TEMPESTAS', 208, 'tempestas', 'season', '', '1_11', 1), ('VNA', 209, 'una', 'together', '', '1_11', 1), ('BELLVMGERO', 210, 'bellum', 'to wage war', '', '1_12', 1), ('CONSVMO', 211, 'consumo', 'to consume', '', '1_12', 1), ('DEXTERA', 212, 'dextera', 'right hand', '', '1_12', 1), ('FACIO', 213, 'facio', 'to do, make', '', '1_12', 1), ('IBI', 214, 'ibi', 'there', '', '1_12', 1), ('IGNIS', 215, 'ignis', 'fire', '', '1_12', 1), ('INQVIO', 216, 'inquam', 'to say (used with direct speech)', '', '1_12', 3), ('IRA', 217, 'ira', 'anger', '', '1_12', 1), ('IS', 218, 'is', 's/he/it, this, that', '', '1_12', 1), ('NOMEN', 219, 'nomen', 'name', '', '1_12', 1), ('NOS', 220, 'nos', 'we; us', '', '1_12', 1), ('NOSTER', 221, 'noster', 'our, ours', '', '1_12', 1), ('OCCIDO/2', 222, 'occido', 'to strike down, knock down', '', '1_12', 1), ('OSTENDO', 223, 'ostendo', 'to show', '', '1_12', 1), ('PONO', 224, 'pono', 'to place', '', '1_12', 1), ('PROPE/2', 225, 'prope', 'near', '', '1_12', 2), ('PROVIRIBVS', 226, 'pro', 'with all one’s might', '', '1_12', 1), ('SIMILIS', 227, 'similis', 'similar', '', '1_12', 1), ('STATIM', 228, 'statim', 'immediately', '', '1_12', 1), ('TANTVS', 229, 'tantus', 'so much', '', '1_12', 1), ('TVVS', 230, 'tuus', 'your', '', '1_12', 1), ('VESTER', 231, 'vester', 'your', '', '1_12', 1), ('VIS', 232, 'vis', 'force', '', '1_12', 1), ('VOS', 233, 'vos', 'you', '', '1_12', 1), ('EGO', 234, 'ego', 'I', '', '1_13', 3), ('TV', 235, 'tu', 'you', '', '1_13', 3), ('TVM', 236, 'tum', 'then, at that time', '', '1_13', 2), ('ALIVS', 237, 'alius', 'another, other', '', '1_13', 1), ('APVD', 238, 'apud', 'at the house of (w/ acc.)', '', '1_13', 1), ('ATQVE/1', 239, 'atque', 'as', '', '1_13', 2), ('DISCEDO/1', 240, 'discedo', 'to leave, withdraw, go away', '', '1_13', 1), ('DIVES', 241, 'dives', 'rich', '', '1_13', 1), ('DOCTVS', 242, 'doctus', 'learned', '', '1_13', 1), ('DVCO', 243, 'duco', 'to lead, take', '', '1_13', 2), ('ENIM/2', 244, 'enim', 'for, in fact', '', '1_13', 1), ('IVDEX', 245, 'iudex', 'judge', '', '1_13', 1), ('LICET/1', 246, 'licet', 'it is allowed, permitted (for someone)(to do something)(w/ dat. and inf.) ', '', '1_13', 1), ('NIHIL', 247, 'nihil', 'nothing', '', '1_13', 1), ('NOLO', 248, 'nolo', 'not to want, to be unwilling', '', '1_13', 2), ('OMNIS', 249, 'omnis', 'each, every, all', '', '1_13', 1), ('PRO/1', 250, 'pro', 'for, on behalf of (w/ abl.)', '', '1_13', 1), ('QVID', 251, 'quid', 'what; why', '', '1_13', 1), ('RESPONDEO', 252, 'respondeo', 'to answer', '', '1_13', 1), ('ROGO', 253, 'rogo', 'to ask', '', '1_13', 1), ('SVVS', 254, 'suus', 'his, her, its, their', '', '1_13', 2), ('TANTVM/2', 255, 'tantum', 'only', '', '1_13', 1), ('VALE', 256, 'vale', 'to greetings! farewell!', '', '1_13', 1), ('VALEO', 257, 'valeo', 'to be able (w/ inf.); to be in good health', '', '1_13', 3), ('ALBVS', 258, 'albus', 'white', '', '1_14', 1), ('ARBOR', 259, 'arbor', 'tree', '', '1_14', 1), ('CADO', 260, 'cado', 'to fall', '', '1_14', 1), ('COMEDO/2', 261, 'comedo', 'to eat', '', '1_14', 1), ('CONVENIO', 262, 'convenio', 'to meet', '', '1_14', 1), ('FLVO', 263, 'fluo', 'to flow', '', '1_14', 1), ('GLADIVS', 264, 'gladius', 'sword', '', '1_14', 1), ('IAM', 265, 'iam', 'already, yet', '', '1_14', 1), ('MOX', 266, 'mox', 'soon', '', '1_14', 1), ('ODIVM', 267, 'odium', 'hatred', '', '1_14', 2), ('OS/1', 268, 'os', 'mouth', '', '1_14', 1), ('PARENS/1', 269, 'parens', 'parent', '', '1_14', 1), ('PECTVS', 270, 'pectus', 'chest', '', '1_14', 1), ('PER', 271, 'per', 'through (w/ acc.)', '', '1_14', 1), ('PRIMVS', 272, 'primus', 'first', '', '1_14', 1), ('QVI/1', 273, 'qui', 'who, which (rel. pronoun); what? which? (inter. adj.) ', '', '1_14', 2), ('RVBER', 274, 'ruber', 'red', '', '1_14', 1), ('SANGVIS', 275, 'sanguis', 'blood', '', '1_14', 1), ('SEPARO/2', 276, 'separo', 'to separate, divide', '', '1_14', 1), ('TANGO', 277, 'tango', 'to touch', '', '1_14', 1), ('INQVIO', 278, 'inquam', 'to say (used with direct speech)', '', '1_15', 3), ('QVI/1', 279, 'qui', 'who, which (rel. pronoun); what? which? (inter. adj.) ', '', '1_15', 2), ('ANTE/2', 280, 'ante', 'in front of (w/ acc.)', '', '1_15', 1), ('ARGVMENTVM', 281, 'argumentum', 'proof, indication, argument', '', '1_15', 1), ('CVR/1', 282, 'cur', 'why', '', '1_15', 1), ('DIFFICILIS', 283, 'difficilis', 'difficult', '', '1_15', 1), ('ECCE', 284, 'ecce', 'look here!', '', '1_15', 1), ('ETIAM', 285, 'etiam', 'even, also', '', '1_15', 1), ('FORSITAN', 286, 'forsan', 'perhaps', '', '1_15', 1), ('NEGLIGO', 287, 'neglego', 'to neglect', '', '1_15', 1), ('PARVVS/2', 288, 'parvus', 'small', '', '1_15', 1), ('QVIS/1', 289, 'quis', 'who? which? (inter. pronoun)', '', '1_15', 1), ('RVSTICVS/2', 290, 'rusticus', 'rural, rustic', '', '1_15', 1), ('SAXVM', 291, 'saxum', 'stone, rock', '', '1_15', 1), ('SENECTVS/1', 292, 'senectus', 'old age', '', '1_15', 1), ('SICVT/1', 293, 'sicut', 'just as', '', '1_15', 1), ('STO', 294, 'sto', 'stand', '', '1_15', 1), ('VBIQVE', 295, 'ubique', 'everywhere', '', '1_15', 1), ('VERVS', 296, 'verus', 'real, true', '', '1_15', 1), ('VETVSTVS', 297, 'vetustus', 'old', '', '1_15', 1), ('VILLA', 298, 'villa', 'estate', '', '1_15', 1), ('VMQVAM', 299, 'umquam', 'ever', '', '1_15', 1), ('AVVNCVLVS', 300, 'avunculus', 'uncle', '', '1_16', 1), ('CAELVM/1', 301, 'caelum', 'sky, heaven, weather', '', '1_16', 1), ('CAVSA', 302, 'causa', 'cause, reason', '', '1_16', 1), ('CINIS', 303, 'cinis', 'ash', '', '1_16', 1), ('CLADES', 304, 'clades', 'disaster', '', '1_16', 1), ('CLASSIS', 305, 'classis', 'fleet, class (of people)', '', '1_16', 1), ('FEMINA', 306, 'femina', 'woman', '', '1_16', 1), ('FVMVS', 307, 'fumus', 'smoke', '', '1_16', 1), ('FVNESTVS', 308, 'funestus', 'deadly', '', '1_16', 1), ('IGITVR', 309, 'igitur', 'therefore', '', '1_16', 1), ('INCENDIVM', 310, 'incendium', 'conflagration, eruption', '', '1_16', 1), ('LEGO/2', 311, 'lego', 'to read, choose', '', '1_16', 1), ('LITVS/2', 312, 'litus', 'shore', '', '1_16', 1), ('MATER', 313, 'mater', 'mother', '', '1_16', 1), ('MONS', 314, 'mons', 'mountain', '', '1_16', 1), ('NAVIS', 315, 'navis', 'ship', '', '1_16', 1), ('NVBES', 316, 'nubes', 'cloud', '', '1_16', 1), ('NVMQVAM', 317, 'numquam', 'never', '', '1_16', 1), ('OPPRIMO', 318, 'opprimo', 'to overwhelm, suppress', '', '1_16', 1), ('PARS', 319, 'pars', 'part', '', '1_16', 1), ('STVDEO', 320, 'studeo', 'to study, be eager for, be interested in (w/ dat.)', '', '1_16', 1), ('DOMVS', 321, 'domus', 'home', '', '1_17', 2), ('ALO', 322, 'alo', 'to feed, nourish', '', '1_17', 1), ('AMITTO', 323, 'amitto', 'to lose', '', '1_17', 1), ('CORNV', 324, 'cornu', 'horn', '', '1_17', 1), ('CORRIPIO', 325, 'corripio', 'to seize, occupy, engulf', '', '1_17', 1), ('CVRRO', 326, 'curro', 'to run', '', '1_17', 1), ('DEVASTO', 327, 'devasto', 'to lay waste', '', '1_17', 1), ('EXSTINGVO', 328, 'exstinguo', 'to extinguish', '', '1_17', 1), ('FACILE', 329, 'facile', 'easliy', '', '1_17', 1), ('IACIO', 330, 'iacio', 'to throw', '', '1_17', 1), ('IMPERATOR', 331, 'imperator', 'general, emperor', '', '1_17', 1), ('IMPETVS', 332, 'impetus', 'impetus, force, attack', '', '1_17', 1), ('INITIVM', 333, 'initium', 'beginning', '', '1_17', 1), ('IVSSVS', 334, 'iussus', 'order', '', '1_17', 1), ('LOCVS', 335, 'locus', 'place (sing.); passages of a book (m. pl.); geographical places(n. pl.)', '', '1_17', 1), ('MANVS/1', 336, 'manus', 'hand', '', '1_17', 1), ('MVRVS', 337, 'murus', 'wall', '', '1_17', 1), ('SINE', 338, 'sine', 'without (w/ abl.)', '', '1_17', 1), ('TEMPTO', 339, 'tempto', 'to try', '', '1_17', 1), ('TVMVLTVS', 340, 'tumultus', 'confusion', '', '1_17', 1), ('VENTVS', 341, 'ventus', 'wind', '', '1_17', 1), ('DVCO', 342, 'duco', 'to lead, take', '', '1_18', 2), ('ITA', 343, 'ita', 'yes', '', '1_18', 2), ('COLO/2', 344, 'colo', 'to worship, cultivate', '', '1_18', 1), ('CVM/3', 345, 'cum', 'when, after', '', '1_18', 2), ('DEA', 346, 'dea', 'goddess', '', '1_18', 1), ('DIES', 347, 'dies', 'day', '', '1_18', 1), ('DORMIO', 348, 'dormio', 'to sleep', '', '1_18', 1), ('EXCITO/1', 349, 'excito', 'to awaken, rouse, stir up', '', '1_18', 1), ('EXCLAMO', 350, 'exclamo', 'to exclaim', '', '1_18', 1), ('FACIES', 351, 'facies', 'face', '', '1_18', 1), ('FATVM', 352, 'fatum', 'fate, destiny', '', '1_18', 1), ('MARITVS/1', 353, 'maritus', 'husband', '', '1_18', 1), ('MERIDIES', 354, 'meridies', 'midday', '', '1_18', 2), ('MVLTVM/2', 355, 'multum', 'much', '', '1_18', 1), ('OCCVLTO', 356, 'occulto', 'to hide', '', '1_18', 1), ('PATER', 357, 'pater', 'father', '', '1_18', 2), ('POST/2', 358, 'post', 'after (w/ acc.)', '', '1_18', 1), ('QVAERO', 359, 'quaero', 'to look for, search', '', '1_18', 1), ('RES', 360, 'res', 'thing, matter', '', '1_18', 1), ('SI/2', 361, 'si', 'if', '', '1_18', 1), ('SOMNVS', 362, 'somnus', 'sleep', '', '1_18', 1), ('TAM', 363, 'tam', 'so ', '', '1_18', 1), ('VXOR', 364, 'uxor', 'wife', '', '1_18', 2), ('BARBA', 365, 'barba', 'beard', '', '1_19', 1), ('CARO/1', 366, 'caro', 'meat, flesh', '', '1_19', 1), ('CELERITER', 367, 'celeriter', 'swiftly', '', '1_19', 1), ('COQVO', 368, 'coquo', 'to cook', '', '1_19', 1), ('CRESCO', 369, 'cresco', 'to grow', '', '1_19', 1), ('FEROX', 370, 'ferox', 'fierce, ferocious', '', '1_19', 1), ('FORIS/2', 371, 'foris', 'outside, in the open', '', '1_19', 1), ('HERBA', 372, 'herba', 'plant, vegetation', '', '1_19', 1), ('HIC/1', 373, 'hic', 'this', '', '1_19', 1), ('INTER', 374, 'inter', 'between, among (w/ acc.)', '', '1_19', 1), ('PELLIS', 375, 'pellis', 'skin, hide', '', '1_19', 1), ('POSTQVAM', 376, 'postquam', 'after', '', '1_19', 1), ('PROELIVM', 377, 'proelium', 'battle, combat', '', '1_19', 1), ('SANO', 378, 'sano', 'to heal', '', '1_19', 1), ('SEDEO', 379, 'sedeo', 'to sit', '', '1_19', 1), ('TERO', 380, 'tero', 'to wear out, rub', '', '1_19', 1), ('TERRIBILIS', 381, 'terribilis', 'terrifying', '', '1_19', 1), ('VESTIMENTVM', 382, 'vestimentum', 'garment, clothes (pl.)', '', '1_19', 1), ('VIVO', 383, 'vivo', 'to live', '', '1_19', 1), ('VVLNERO', 384, 'vulnero', 'to wound', '', '1_19', 1), ('VVLNVS', 385, 'vulnus', 'wound', '', '1_19', 1), ('ABVNDO', 386, 'abundo', 'to abound with (w/ abl.)', '', '1_20', 1), ('ADOLESCENS/2', 387, 'adulescens', 'young man, young lady', '', '1_20', 1), ('AEQVVS', 388, 'aequus', 'even', '', '1_20', 1), ('COR', 389, 'cor', 'heart', '', '1_20', 1), ('DELECTO', 390, 'delecto', 'to delight, please', '', '1_20', 1), ('DIVINVS/2', 391, 'divinus', 'divine', '', '1_20', 1), ('EGEO', 392, 'egeo', 'to lack something (abl.)', '', '1_20', 1), ('FVR', 393, 'fur', 'thief', '', '1_20', 1), ('FVRTVM', 394, 'furtum', 'theft', '', '1_20', 1), ('HVMANVS', 395, 'humanus', 'human', '', '1_20', 1), ('ILLE', 396, 'ille', 'that', '', '1_20', 1), ('INIQVITAS', 397, 'iniquitas', 'injustice', '', '1_20', 1), ('LEX', 398, 'lex', 'law', '', '1_20', 1), ('LVDO', 399, 'ludo', 'to play', '', '1_20', 1), ('NOCTV', 400, 'noctu', 'during the night', '', '1_20', 1), ('PAENE', 401, 'paene', 'almost', '', '1_20', 1), ('PAVPER', 402, 'pauper', 'poor', '', '1_20', 1), ('PLENVS', 403, 'plenus', 'full of (w/ gen. or abl.)', '', '1_20', 1), ('POMVM', 404, 'pomum', 'fruit', '', '1_20', 1), ('PVNIO', 405, 'punio', 'to punish', '', '1_20', 1), ('ACCIPIO', 406, 'accipio', 'to accept, receive', '', '1_21', 1), ('ACCVSO', 407, 'accuso', 'to accuse someone (acc.) of something (gen.)', '', '1_21', 1), ('ALIENVS/2', 408, 'alienus', 'foreign to, inconsistent with (w/ a/ab and abl.)', '', '1_21', 1), ('AXIS', 409, 'axis', 'axle, axis', '', '1_21', 1), ('CIRCVM/2', 410, 'circum', 'around (w/ acc.)', '', '1_21', 1), ('CONSTANTIA', 411, 'constantia', 'constancy', '', '1_21', 1), ('DESCENDO', 412, 'descendo', 'to descend', '', '1_21', 1), ('DIVITIAE', 413, 'divitia', 'wealth, riches (pl.)', '', '1_21', 1), ('ERIPIO', 414, 'eripio', 'to snatch away', '', '1_21', 1), ('ERRO/2', 415, 'erro', 'to wander, make a mistake', '', '1_21', 1), ('EXTERNVS', 416, 'externus', 'outward, external', '', '1_21', 1), ('FORTVNA', 417, 'fortuna', 'fortune, the goddess Fortune', '', '1_21', 1), ('FVTVRVS', 418, 'futurus', 'about to be (from sum)', '', '1_21', 1), ('HONOR', 419, 'honor', 'honor, public office or distinction', '', '1_21', 1), ('MVTO/2', 420, 'muto', 'to change', '', '1_21', 1), ('POSSIDEO', 421, 'possideo', 'to possess', '', '1_21', 1), ('PROCERTO', 422, 'pro', 'for certain, for sure', '', '1_21', 1), ('RECIPIO', 423, 'recipio', 'to take back', '', '1_21', 2), ('REPREHENDO', 424, 'reprehendo', 'to blame, rebuke', '', '1_21', 1), ('ROTA', 425, 'rota', 'wheel', '', '1_21', 1), ('TOLLO', 426, 'tollo', 'to lift up, raise; to destroy', '', '1_21', 1), ('VERSO', 427, 'verso', 'to turn', '', '1_21', 1), ('VLLVS', 428, 'ullus', 'any', '', '1_21', 1), ('CONSILIVM', 429, 'consilium', 'plan, advice', '', '2_1', 2), ('MERIDIES', 430, 'meridies', 'midday', '', '2_1', 2), ('PROPE/2', 431, 'prope', 'near', '', '2_1', 2), ('ASPICIO', 432, 'aspicio', 'to look at, catch a glimpse of', '', '2_1', 1), ('ETET', 433, 'et', 'both…and…', '', '2_1', 1), ('GENS', 434, 'gens', 'tribe, population', '', '2_1', 1), ('GIGNO', 435, 'gigno', 'to give birth, produce', '', '2_1', 1), ('HODIE', 436, 'hodie', 'today', '', '2_1', 1), ('INCOLA', 437, 'incola', 'inhabitant', '', '2_1', 1), ('INSVLA', 438, 'insula', 'island', '', '2_1', 2), ('INVENIO', 439, 'invenio', 'to come upon, find', '', '2_1', 1), ('MOS', 440, 'mos', 'custom, habit; morals (pl.)', '', '2_1', 1), ('MVNDVS/1', 441, 'mundus', 'world', '', '2_1', 1), ('NE/4', 442, 'ne', 'that not, not to, lest ', '', '2_1', 3), ('OCCVPO/2', 443, 'occupo', 'to occupy', '', '2_1', 1), ('ORTVS', 444, 'ortus', 'origin, beginning, raising', '', '2_1', 1), ('PISCIS', 445, 'piscis', 'a fish', '', '2_1', 1), ('PROCVL', 446, 'procul', 'far, far away', '', '2_1', 1), ('PROMITTO', 447, 'promitto', 'to promise', '', '2_1', 1), ('SEPTENTRIONALIS', 448, 'septentrionalis', 'northern', '', '2_1', 1), ('SITVS/2', 449, 'situs', 'located, situated', '', '2_1', 1), ('SOL', 450, 'sol', 'sun', '', '2_1', 1), ('VTINAM', 451, 'utinam', 'if only', '', '2_1', 2), ('GERO', 452, 'gero', 'to carry; to behave (w/ se)', '', '2_2', 2), ('ODIVM', 453, 'odium', 'hatred', '', '2_2', 2), ('VALEO', 454, 'valeo', 'to be able (w/ inf.); to be in good health', '', '2_2', 3), ('ALTVS', 455, 'altus', 'tall, deep', '', '2_2', 1), ('ANNVS', 456, 'annus', 'year', '', '2_2', 1), ('ARGENTVM', 457, 'argentum', 'silver', '', '2_2', 1), ('AVRVM', 458, 'aurum', 'gold', '', '2_2', 1), ('BREVIS', 459, 'brevis', 'short', '', '2_2', 1), ('CLARVS', 460, 'clarus', 'clear, distinguished', '', '2_2', 1), ('CVSTOS', 461, 'custos', 'guard', '', '2_2', 1), ('EQVES', 462, 'eques', 'horseman', '', '2_2', 1), ('FINIS', 463, 'finis', 'end', '', '2_2', 1), ('GRAVIS', 464, 'gravis', 'serious, heavy', '', '2_2', 1), ('INTERDVM', 465, 'interdum', 'sometimes', '', '2_2', 1), ('LIS', 466, 'lis', 'dispute, quarrel', '', '2_2', 1), ('MANE/2', 467, 'mane', 'in the morning', '', '2_2', 1), ('ODIOHABEO', 468, 'odio', 'to hate somebody', '', '2_2', 1), ('SINO', 469, 'sino', 'to allow somebody (acc.) to do something (inf.)', '', '2_2', 1), ('VEL/1', 470, 'vel', 'or', '', '2_2', 1), ('VESTIS', 471, 'vestis', 'clothes, attire', '', '2_2', 1), ('VOX', 472, 'vox', 'voice', '', '2_2', 1), ('VT/4', 473, 'ut', 'that, to, in order to, so that', '', '2_2', 4), ('VVLTVS', 474, 'vultus', 'face', '', '2_2', 1), ('VXOR', 475, 'uxor', 'wife', '', '2_3', 2), ('AT/2', 476, 'at', 'but', '', '2_3', 1), ('CONIVX', 477, 'coniunx', 'spouse', '', '2_3', 1), ('DISCIPVLA', 478, 'discipula', 'student', '', '2_3', 1), ('DISCO', 479, 'disco', 'to learn', '', '2_3', 1), ('DOMINVS', 480, 'dominus', 'master, lord', '', '2_3', 1), ('FAMA', 481, 'fama', 'fame, name, reputation', '', '2_3', 1), ('FRATER', 482, 'frater', 'brother', '', '2_3', 1), ('IMPROBVS', 483, 'improbus', 'wicked, bad', '', '2_3', 1), ('IVNGO', 484, 'iungo', 'to join', '', '2_3', 1), ('MAGISTER', 485, 'magister', 'teacher', '', '2_3', 1), ('MATRIMONIVM', 486, 'matrimonium', 'marriage', '', '2_3', 1), ('NE/4', 487, 'ne', 'that not, not to, lest ', '', '2_3', 3), ('NVSQVAM', 488, 'nusquam', 'nowhere', '', '2_3', 1), ('PARIO/2', 489, 'pario', 'to give birth to', '', '2_3', 1), ('PERDO', 490, 'perdo', 'to lose, waste', '', '2_3', 1), ('SALVS', 491, 'salus', 'health, welfare', '', '2_3', 1), ('SALVTEMDICERE', 492, 'salutem', 'to greet (customary opening to letter) ', '', '2_3', 1), ('SCRIBO', 493, 'scribo', 'to write', '', '2_3', 1), ('VT/4', 494, 'ut', 'that, to, in order to, so that', '', '2_3', 4), ('VXOREMDEDVCERE', 495, 'uxorem', 'to marry a woman, to take as a wife', '', '2_3', 1), ('NEC/2', 496, 'nec', 'nor; and not', '', '2_4', 2), ('RECIPIO', 497, 'recipio', 'to take back', '', '2_4', 2), ('AGMEN', 498, 'agmen', 'marching column', '', '2_4', 1), ('APERIO', 499, 'aperio', 'to open', '', '2_4', 1), ('COEPIO', 500, 'coepi', 'to begin (w/ inf.)', '', '2_4', 1), ('DEFENDO', 501, 'defendo', 'to defend', '', '2_4', 1), ('EDO/1', 502, 'edo', 'to produce, give forth', '', '2_4', 1), ('EXTRA/2', 503, 'extra', 'outside of (w/ acc.)', '', '2_4', 1), ('FVRO', 504, 'furo', 'to rage, be insane', '', '2_4', 1), ('INGENS', 505, 'ingens', 'huge', '', '2_4', 1), ('INVADO/2', 506, 'invado', 'to burst in', '', '2_4', 1), ('LIGNEVS', 507, 'ligneus', 'made of wood', '', '2_4', 1), ('NEQVENEC', 508, 'neque', 'neither..nor…', '', '2_4', 1), ('PARCO', 509, 'parco', 'to spare somebody/thing (w/ dat.)', '', '2_4', 1), ('PONS', 510, 'pons', 'bridge', '', '2_4', 1), ('PORTA', 511, 'porta', 'gate', '', '2_4', 1), ('PRIMO', 512, 'primo', 'at first', '', '2_4', 1), ('QVAM/1', 513, 'quam', 'than (w/ comp. words)', '', '2_4', 2), ('QVANTVS/1', 514, 'quantus', 'how great, how much (inter. or rel. adj.)', '', '2_4', 1), ('RESISTO', 515, 'resisto', 'to resist (w/ dat.)', '', '2_4', 1), ('SIMVL/1', 516, 'simul', 'at the same time', '', '2_4', 1), ('TVTVS', 517, 'tutus', 'safe', '', '2_4', 1), ('VACVVS', 518, 'vacuus', 'empty of (w/ abl.)', '', '2_4', 1), ('VALEO', 519, 'valeo', 'to be able (w/ inf.); to be in good health', '', '2_4', 3), ('VICTOR', 520, 'victor', 'victor', '', '2_4', 1), ('VTINAM', 521, 'utinam', 'if only', '', '2_4', 2), ('BIBO/2', 522, 'bibo', 'to drink', '', '2_5', 1), ('CARMEN/1', 523, 'carmen', 'song, poem', '', '2_5', 1), ('CIBVS', 524, 'cibus', 'food', '', '2_5', 1), ('DVLCIS', 525, 'dulcis', 'sweet', '', '2_5', 1), ('FLVMEN', 526, 'flumen', 'river', '', '2_5', 1), ('IMMEMOR', 527, 'immemor', 'forgetful of (w/ gen.)', '', '2_5', 1), ('IOCVS', 528, 'iocus', 'joke', '', '2_5', 1), ('IVVENTVS', 529, 'iuventus', 'youth', '', '2_5', 1), ('LEVIS/1', 530, 'levis', 'light', '', '2_5', 1), ('MENS', 531, 'mens', 'mind, spirit', '', '2_5', 1), ('NE/4', 532, 'ne', 'that not, not to, lest ', '', '2_5', 3), ('ORO', 533, 'oro', 'to ask, entreat', '', '2_5', 1), ('PLACEO', 534, 'placeo', 'to please, be agreeable to somebody', '', '2_5', 1), ('PROXIMVS/2', 535, 'proximus', 'nearest', '', '2_5', 1), ('TAMQVAM/2', 536, 'tam', 'so…as…', '', '2_5', 1), ('VEHEMENS', 537, 'vehemens', 'violent, vehement', '', '2_5', 1), ('VETVS', 538, 'vetus', 'old', '', '2_5', 1), ('VINVM', 539, 'vinum', 'wine', '', '2_5', 1), ('VIRTVS', 540, 'virtus', 'courage, virtue', '', '2_5', 1), ('VITIVM', 541, 'vitium', 'vice', '', '2_5', 1), ('VT/4', 542, 'ut', 'that, to, in order to, so that', '', '2_5', 4), ('PATER', 543, 'pater', 'father', '', '2_6', 2), ('DECIPIO', 544, 'decipio', 'to deceive', '', '2_6', 1), ('DILIGO/3', 545, 'diligo', 'to love, esteem highly', '', '2_6', 1), ('DVO', 546, 'duo', 'two', '', '2_6', 1), ('EXERCITVS/1', 547, 'exercitus', 'army', '', '2_6', 1), ('FIDELIS/2', 548, 'fidelis', 'faithful, loyal', '', '2_6', 1), ('HERES', 549, 'heres', 'heir', '', '2_6', 1), ('IMPERIVM', 550, 'imperium', 'rule, empire, power', '', '2_6', 1), ('INOPIA', 551, 'inopia', 'helplessness, want', '', '2_6', 1), ('LAVDO', 552, 'laudo', 'to praise', '', '2_6', 1), ('NECESSEEST', 553, 'necesse', 'it is necessary for someone (dat.) to do something (inf.)', '', '2_6', 1), ('NEMO', 554, 'nemo', 'no one', '', '2_6', 1), ('PAVLO', 555, 'paulo', 'a little bit, to a small extent', '', '2_6', 1), ('QVAM/1', 556, 'quam', 'than (w/ comp. words)', '', '2_6', 2), ('QVANTVM/3', 557, 'quantum', 'to what extent, how much', '', '2_6', 1), ('RESTITVO', 558, 'restituo', 'to restore', '', '2_6', 1), ('SATIS/2', 559, 'satis', 'enough, sufficiently', '', '2_6', 1), ('SECVNDVS/1', 560, 'secundus', 'second', '', '2_6', 1), ('TERTIVS', 561, 'tertius', 'third', '', '2_6', 1), ('TRES', 562, 'tres', 'three', '', '2_6', 1), ('TRISTIS', 563, 'tristis', 'sad', '', '2_6', 1), ('VEHEMENTER', 564, 'vehementer', 'strongly, vehemently', '', '2_6', 1), ('NOLO', 565, 'nolo', 'not to want, to be unwilling', '', '2_7', 2), ('AETAS', 566, 'aetas', 'age', '', '2_7', 1), ('FIDES/2', 567, 'fides', 'faith', '', '2_7', 1), ('FVNDO/2', 568, 'fundo', 'to pour', '', '2_7', 1), ('GLORIA', 569, 'gloria', 'glory', '', '2_7', 1), ('LIBERTAS', 570, 'libertas', 'freedom', '', '2_7', 1), ('LVMEN', 571, 'lumen', 'light', '', '2_7', 1), ('MALO', 572, 'malo', 'to prefer', '', '2_7', 1), ('ORNATVS/1', 573, 'ornatus', 'adorned, ornate, elaborate', '', '2_7', 1), ('OTIVM', 574, 'otium', 'leisure, free time', '', '2_7', 1), ('POTENS', 575, 'potens', 'powerful', '', '2_7', 1), ('PVBLICVS/2', 576, 'publicus', 'common', '', '2_7', 1), ('QVALIS/1', 577, 'qualis', 'what sort of? (inter. adj.)', '', '2_7', 1), ('RESPVBLICA', 578, 'res', 'state', '', '2_7', 1), ('STVDIOSVS', 579, 'studiosus', 'fond of (w/ gen.)', '', '2_7', 1), ('TAMQVAM/1', 580, 'tamquam', 'as', '', '2_7', 1), ('TOT', 581, 'tot', 'so many', '', '2_7', 1), ('TRAHO', 582, 'traho', 'to drag, draw', '', '2_7', 1), ('VBI/1', 583, 'ubi', 'where? (inter. adv)', '', '2_7', 1), ('VIX', 584, 'vix', 'hardly', '', '2_7', 1), ('VNVS', 585, 'unus', 'one', '', '2_7', 1), ('VOLO/3', 586, 'volo', 'to want', '', '2_7', 1), ('VTILIS', 587, 'utilis', 'useful', '', '2_7', 1), ('ADHVC', 588, 'adhuc', 'still, up to this time', '', '2_8', 1), ('ANTIQVVS', 589, 'antiquus', 'ancient', '', '2_8', 1), ('ARS', 590, 'ars', 'science, art, skill', '', '2_8', 1), ('DOMINOR', 591, 'dominor', 'to dominate, rule', '', '2_8', 1), ('HORTOR', 592, 'hortor', 'to exhort, urge', '', '2_8', 1), ('LATINE', 593, 'Latine', 'in Latin', '', '2_8', 1), ('LATINVS/A', 594, 'Latinus', 'Latin, pertaining to Latin', '', '2_8', 1), ('LINGVA', 595, 'lingua', 'language; tongue', '', '2_8', 1), ('LOQVOR', 596, 'loquor', 'to speak', '', '2_8', 1), ('MAGIS/2', 597, 'magis', 'more', '', '2_8', 1), ('MAIOR', 598, 'maior', 'bigger; greater', '', '2_8', 1), ('MAXIMVS', 599, 'maximus', 'greatest', '', '2_8', 1), ('MELIOR', 600, 'melior', 'better', '', '2_8', 1), ('MINIMVS', 601, 'minimus', 'smallest', '', '2_8', 1), ('MINVS', 602, 'minus', 'less', '', '2_8', 2), ('OPTIMVS', 603, 'optimus', 'best', '', '2_8', 1), ('PARTIOR', 604, 'partior', 'to divide, distribute', '', '2_8', 1), ('PATIOR', 605, 'patior', 'to endure, tolerate, suffer', '', '2_8', 1), ('PEIOR', 606, 'peior', 'worse', '', '2_8', 1), ('PESSIMVS', 607, 'pessimus', 'worst', '', '2_8', 1), ('PLVRIMVS', 608, 'plurimus', 'most', '', '2_8', 1), ('PLVS', 609, 'plus', 'more', '', '2_8', 1), ('SEQVOR', 610, 'sequor', 'to follow', '', '2_8', 1), ('VEREOR', 611, 'vereor', 'to fear, respect', '', '2_8', 1), ('ADDO', 612, 'addo', 'to add', '', '2_9', 1), ('AVRIS', 613, 'auris', 'ear', '', '2_9', 1), ('CONOR', 614, 'conor', 'to try', '', '2_9', 1), ('DEMITTO', 615, 'demitto', 'to send down', '', '2_9', 1), ('DISSIMILIS', 616, 'dissimilis', 'dissimilar ', '', '2_9', 1), ('FACILIS', 617, 'facilis', 'easy', '', '2_9', 1), ('FERO', 618, 'fero', 'to carry, bear', '', '2_9', 1), ('FIO', 619, 'fio', 'to be made, become; (impersonally) to happen', '', '2_9', 1), ('FRIGVS', 620, 'frigus', 'cold', '', '2_9', 1), ('GENVS/1', 621, 'genus', 'kind', '', '2_9', 1), ('GLACIES', 622, 'glacies', 'ice', '', '2_9', 1), ('GRACILIS', 623, 'gracilis', 'slender', '', '2_9', 1), ('HVMILIS', 624, 'humilis', 'low, humble', '', '2_9', 1), ('ITER', 625, 'iter', 'road, trip ', '', '2_9', 1), ('LABOR/2', 626, 'labor', 'to slide, slip, glide down', '', '2_9', 1), ('MODEROR', 627, 'moderor', 'to manage, direct, guide', '', '2_9', 1), ('NIX', 628, 'nix', 'snow', '', '2_9', 1), ('ONVS', 629, 'onus', 'weight, burden', '', '2_9', 1), ('PERVENIO', 630, 'pervenio', 'to arrive', '', '2_9', 1), ('PROGREDIOR', 631, 'progredior', 'to go forward, proceed', '', '2_9', 1), ('QVOTIENS/2', 632, 'quotiens', 'as often as', '', '2_9', 1), ('SIMVLAC/2', 633, 'simulac', 'as soon as', '', '2_9', 1), ('SVVS', 634, 'suus', 'his, her, its, their', '', '2_10', 2), ('AEDES', 635, 'aedes', 'temple; pl. dwelling, house', '', '2_10', 1), ('EO/1', 636, 'eo', 'to go ', '', '2_10', 1), ('IVCVNDVS', 637, 'iucundus', 'pleasant, nice', '', '2_10', 1), ('LABOR/1', 638, 'labor', 'labor, toil', '', '2_10', 1), ('LAEDO', 639, 'laedo', 'to harm', '', '2_10', 1), ('LIBER/2', 640, 'liber', 'free', '', '2_10', 1), ('LVCRVM', 641, 'lucrum', 'profit, gain', '', '2_10', 1), ('MARITIMVS', 642, 'maritimus', 'maritime', '', '2_10', 1), ('MODVS', 643, 'modus', 'way, method, manner', '', '2_10', 1), ('PAVLISPER', 644, 'paulisper', 'for a little while', '', '2_10', 1), ('PECVNIA', 645, 'pecunia', 'money', '', '2_10', 1), ('PLACIDVS', 646, 'placidus', 'peaceful, calm', '', '2_10', 1), ('POTIVS', 647, 'potius', 'rather', '', '2_10', 1), ('PROSPER', 648, 'prosper', 'fortunate, prosperous', '', '2_10', 1), ('REDDO', 649, 'reddo', 'to give back', '', '2_10', 1), ('SARCINA', 650, 'sarcina', 'burden, baggage', '', '2_10', 1), ('SCELESTVS', 651, 'scelestus', 'wicked', '', '2_10', 1), ('SEMEL', 652, 'semel', 'once', '', '2_10', 1), ('SERENVS', 653, 'serenus', 'calm, clear', '', '2_10', 1), ('PARVM/2', 654, 'parum', 'too little', '', '2_11', 2), ('ALTER', 655, 'alter', 'the other (of two)', '', '2_11', 1), ('GEMMA', 656, 'gemma', 'gem, precious stone', '', '2_11', 1), ('LEGATVS', 657, 'legatus', 'ambassador', '', '2_11', 1), ('MAGNIHABEO', 658, 'magni', 'to esteem a lot', '', '2_11', 1), ('MINVS', 659, 'minus', 'less', '', '2_11', 2), ('NESCIO', 660, 'nescio', 'not to know', '', '2_11', 1), ('NEVTER', 661, 'neuter', 'neither, none (of two)', '', '2_11', 1), ('NVLLVS', 662, 'nullus', 'none', '', '2_11', 1), ('OPERAEPRETIVMEST', 663, 'operae', 'it is worthwhile', '', '2_11', 1), ('POPVLVS/1', 664, 'populus', 'a people, populace', '', '2_11', 1), ('QVOMODO/1', 665, 'quomodo', 'how', '', '2_11', 1), ('SALVTO', 666, 'saluto', 'to greet ', '', '2_11', 1), ('SERVVS/1', 667, 'servus', 'slave, servant', '', '2_11', 1), ('SOLVS', 668, 'solus', 'alone, only', '', '2_11', 1), ('SPECTO', 669, 'specto', 'to watch', '', '2_11', 1), ('TACEO', 670, 'taceo', 'to be silent, keep quiet', '', '2_11', 1), ('TOTVS', 671, 'totus', 'whole, entire', '', '2_11', 1), ('TVRPIS', 672, 'turpis', 'shameful, disgraceful', '', '2_11', 1), ('VTER/4', 673, 'uter', 'who, which (of two)?', '', '2_11', 1), ('VTOR', 674, 'utor', 'to use (w/ abl.)', '', '2_11', 1), ('CVM/3', 675, 'cum', 'when, after', '', '2_12', 2), ('INQVIO', 676, 'inquam', 'to say (used with direct speech)', '', '2_12', 3), ('TAMEN', 677, 'tamen', 'however', '', '2_12', 2), ('CARVS', 678, 'carus', 'dear', '', '2_12', 1), ('INSVLA', 679, 'insula', 'island', '', '2_12', 2), ('MORIOR', 680, 'morior', 'to die', '', '2_12', 1), ('NIMIS', 681, 'nimis', 'too much', '', '2_12', 1), ('NISI', 682, 'nisi', 'if not, unless', '', '2_12', 1), ('OFFICIVM', 683, 'officium', 'duty', '', '2_12', 1), ('ORBIS', 684, 'orbis', 'circle', '', '2_12', 1), ('ORBISTERRARVM', 685, 'orbis', 'the earth, the world', '', '2_12', 1), ('PROBO', 686, 'probo', 'to approve ', '', '2_12', 1), ('QVAMQVAM/2', 687, 'quamquam', 'although', '', '2_12', 1), ('QVAMVIS/1', 688, 'quamvis', 'although', '', '2_12', 1), ('QVIA', 689, 'quia', 'because', '', '2_12', 1), ('QVIDEM', 690, 'quidem', 'indeed', '', '2_12', 1), ('QVOD/1', 691, 'quod', 'because', '', '2_12', 1), ('SENTENTIA', 692, 'sententia', 'opinion, point of view', '', '2_12', 1), ('SORS', 693, 'sors', 'lot', '', '2_12', 1), ('SPERO', 694, 'spero', 'to hope', '', '2_12', 1), ('SPES', 695, 'spes', 'hope', '', '2_12', 1), ('ATQVE/1', 696, 'atque', 'as', '', '2_13', 2), ('ABSENS', 697, 'absens', 'away, absent', '', '2_13', 1), ('ABSVM/1', 698, 'absum', 'to be away', '', '2_13', 1), ('BENEVOLENTIA', 699, 'benevolentia', 'good will', '', '2_13', 1), ('DECLARO', 700, 'declaro', 'to demonstrate, show, make known, reveal', '', '2_13', 1), ('IDEM', 701, 'idem', 'the same', '', '2_13', 1), ('IPSE', 702, 'ipse', 'self', '', '2_13', 1), ('IRASCOR', 703, 'irascor', 'to be angry at (w/ dat.)', '', '2_13', 1), ('ISTE', 704, 'iste', 'that (of yours)', '', '2_13', 1), ('MIROR', 705, 'miror', 'to marvel, be surprised at', '', '2_13', 1), ('MVLTITVDO', 706, 'multitudo', 'crowd, throng', '', '2_13', 1), ('NEGO', 707, 'nego', 'to deny ', '', '2_13', 1), ('NVMERO/1', 708, 'numero', 'to number, count among', '', '2_13', 1), ('OFFENDO', 709, 'offendo', ']to happen upon, offend', '', '2_13', 1), ('REDEO/1', 710, 'redeo', 'to go back, return', '', '2_13', 1), ('REFERO', 711, 'refero', 'to carry back, report', '', '2_13', 1), ('SOCIVS/1', 712, 'socius', 'associate, partner, ally', '', '2_13', 1), ('TALIS', 713, 'talis', 'such a', '', '2_13', 1), ('TVRRIS', 714, 'turris', 'tower', '', '2_13', 1), ('VENIA', 715, 'venia', 'pardon, indulgence, forgiveness', '', '2_13', 1), ('VERSOR', 716, 'versor', 'to be situated in, be occupied in ', '', '2_13', 1), ('VIRGA', 717, 'virga', 'twig, stick', '', '2_13', 1), ('VOLVNTAS', 718, 'voluntas', 'will', '', '2_13', 1), ('AFFIRMO', 719, 'affirmo', 'to assert, maintain', '', '2_14', 1), ('CIRCVMEO/1', 720, 'circumeo', 'to go around', '', '2_14', 1), ('CONTINEO', 721, 'contineo', 'to hold, keep together, contain', '', '2_14', 1), ('COTIDIANVS', 722, 'cottidianus', 'of every day, daily', '', '2_14', 1), ('ELEMENTVM', 723, 'elementum', 'element', '', '2_14', 1), ('ERGO/2', 724, 'ergo', 'therefore', '', '2_14', 1), ('GRAVITAS', 725, 'gravitas', 'weight, gravity', '', '2_14', 1), ('IMMENSVS', 726, 'immensus', 'immeasurable, immense, endless', '', '2_14', 1), ('INFINITVS', 727, 'infinitus', 'boundless, unlimited', '', '2_14', 1), ('MAXIME', 728, 'maxime', 'most', '', '2_14', 1), ('MEDIVS', 729, 'medius', 'middle', '', '2_14', 1), ('MOTVS', 730, 'motus', 'motion, movement', '', '2_14', 1), ('MVLTO/2', 731, 'multo', 'by much', '', '2_14', 1), ('NATVRA', 732, 'natura', 'nature', '', '2_14', 1), ('NECESSARIO', 733, 'necessario', 'necessarily', '', '2_14', 1), ('PERPERAM', 734, 'perperam', 'wrongly, incorrectly', '', '2_14', 1), ('PONDVS', 735, 'pondus', 'weight', '', '2_14', 1), ('PRAESERTIM', 736, 'praesertim', 'especially', '', '2_14', 1), ('QVIES', 737, 'quies', 'rest, repose', '', '2_14', 1), ('VNDIQVE', 738, 'undique', 'from all parts, from everywhere', '', '2_14', 1), ('VOLVO', 739, 'volvo', 'to turn round', '', '2_14', 1), ('VT/4', 740, 'ut', 'that, to, in order to, so that', '', '2_14', 4), ('ANIMADVERTO', 741, 'animadverto', 'to notice', '', '2_15', 1), ('APPROPINQVO', 742, 'appropinquo', 'to approach (w/ dat or ad + acc.)', '', '2_15', 1), ('CERNO', 743, 'cerno', 'to see, distinguish with the eyes', '', '2_15', 1), ('CIRCA/2', 744, 'circa', 'around (w/ acc.)', '', '2_15', 1), ('CLAMO', 745, 'clamo', 'to shout, scream', '', '2_15', 1), ('FINGO', 746, 'fingo', 'to imagine, form in the mind', '', '2_15', 1), ('IMPINGO', 747, 'impingo', 'to push, strike, inflict', '', '2_15', 1), ('INFLIGO', 748, 'infligo', 'to strike on or against, inflict', '', '2_15', 1), ('ITERVM', 749, 'iterum', 'again', '', '2_15', 1), ('OPPIDVM', 750, 'oppidum', 'town', '', '2_15', 1), ('PERCVTIO', 751, 'percutio', 'to strike through ', '', '2_15', 1), ('PRAEDITVS', 752, 'praeditus', 'endowed with, possessed of (w/ abl.)', '', '2_15', 1), ('REPELLO', 753, 'repello', 'to push back, thrust back', '', '2_15', 1), ('RIDEO', 754, 'rideo', 'to laugh', '', '2_15', 1), ('RVMPO', 755, 'rumpo', 'to break, tear', '', '2_15', 1), ('SEDES', 756, 'sedes', 'seat, abode', '', '2_15', 1), ('SIC', 757, 'sic', 'in such a way', '', '2_15', 1), ('SIDVS', 758, 'sidus', 'constellation', '', '2_15', 1), ('TELVM', 759, 'telum', 'spear, javelin', '', '2_15', 1), ('VEHO', 760, 'veho', 'to drive, carry', '', '2_15', 1)]
section_list ={'1.1': 'start', '1.2': '1.1', '1.3': '1.2', '1.4': '1.3', '1.5': '1.4', '1.6': '1.5', '1.7': '1.6', '1.8': '1.7', '1.9': '1.8', '1.10': '1.9', '1.11': '1.10', '1.12': '1.11', '1.13': '1.12', '1.14': '1.13', '1.15': '1.14', '1.16': '1.15', '1.17': '1.16', '1.18': '1.17', '1.19': '1.18', '1.20': '1.19', '1.21': '1.20', '2.1': '1.21', '2.2': '2.1', '2.3': '2.2', '2.4': '2.3', '2.5': '2.4', '2.6': '2.5', '2.7': '2.6', '2.8': '2.7', '2.9': '2.8', '2.10': '2.9', '2.11': '2.10', '2.12': '2.11', '2.13': '2.12', '2.14': '2.13', '2.15': '2.14', 'end': '2.15', 'start': 'start'}
title = "Latin for the New Millennium Vols 1 and 2 (Tunberg-Minkova)"
section_level = 2
language = "Latin"
book = text.Text(title, section_words, the_text, section_list, section_level, language, True, False)
|
normal
|
{
"blob_id": "8a0c0f5ca6a965e07f59a6c88d4dd335310cbdfc",
"index": 9530,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nnan = ''\nsection_words = {'start': -1, '1.1': 17, '1.2': 38, '1.3': 55, '1.4': 76,\n '1.5': 95, '1.6': 114, '1.7': 133, '1.8': 151, '1.9': 170, '1.10': 190,\n '1.11': 209, '1.12': 233, '1.13': 257, '1.14': 277, '1.15': 299, '1.16':\n 320, '1.17': 341, '1.18': 364, '1.19': 385, '1.20': 405, '1.21': 428,\n '2.1': 451, '2.2': 474, '2.3': 495, '2.4': 521, '2.5': 542, '2.6': 564,\n '2.7': 587, '2.8': 611, '2.9': 633, '2.10': 653, '2.11': 674, '2.12': \n 695, '2.13': 718, '2.14': 740, '2.15': 760, 'end': -2}\nthe_text = [('AGRICOLA', 0, 'agricola', 'farmer', '', '1_1', 1), ('AMBVLO',\n 1, 'ambulo', 'to walk', '', '1_1', 2), ('AMO', 2, 'amo', 'to love', '',\n '1_1', 2), ('AQVA', 3, 'aqua', 'water', '', '1_1', 1), ('ATHLETA', 4,\n 'athleta', 'athlete', '', '1_1', 1), ('BENE', 5, 'bene', 'well', '',\n '1_1', 1), ('CVRO', 6, 'curo', 'to take care for/of', '', '1_1', 2), (\n 'ET/2', 7, 'et', 'and', '', '1_1', 1), ('FILIA', 8, 'filia', 'daughter',\n '', '1_1', 1), ('ITAQVE', 9, 'itaque', 'and so', '', '1_1', 1), ('LVPA',\n 10, 'lupa', 'she–wolf', '', '1_1', 1), ('NAVTA', 11, 'nauta', 'sailor',\n '', '1_1', 1), ('POETA', 12, 'poeta', 'poet', '', '1_1', 1), ('POSTEA',\n 13, 'postea', 'afterwards', '', '1_1', 1), ('PVELLA', 14, 'puella',\n 'girl', '', '1_1', 1), ('ROMA/N', 15, 'Roma', 'Rome', '', '1_1', 1), (\n 'SVM/1', 16, 'sum', 'to be', '', '1_1', 3), ('TERRA', 17, 'terra',\n 'land', '', '1_1', 1), ('AMBVLO', 18, 'ambulo', 'to walk', '', '1_2', 2\n ), ('AMO', 19, 'amo', 'to love', '', '1_2', 2), ('CVRO', 20, 'curo',\n 'to take care for/of', '', '1_2', 2), ('SVM/1', 21, 'sum', 'to be', '',\n '1_2', 3), ('DEBEO', 22, 'debeo', 'ought, must, should; to owe', '',\n '1_2', 1), ('DIV', 23, 'diu', 'for a long time', '', '1_2', 1), ('EGO',\n 24, 'ego', 'I', '', '1_2', 3), ('EXSPECTO', 25, 'exspecto',\n 'to wait for, await, expect', '', '1_2', 1), ('FABVLA/1', 26, 'fabula',\n 'story', '', '1_2', 1), ('FORMA', 27, 'forma', 'form,appearance', '',\n '1_2', 1), ('HABEO', 28, 'habeo', 'to have', '', '1_2', 1), ('HABITO', \n 29, 'habito', 'to live, dwell', '', '1_2', 1), ('NARRO', 30, 'narro',\n 'to tell', '', '1_2', 1), ('NON', 31, 'non', 'not', '', '1_2', 1), (\n 'NVNC', 32, 'nunc', 'now', '', '1_2', 1), ('PARO/2', 33, 'paro',\n 'to prepare, get ready, design', '', '1_2', 2), ('PATRIA', 34, 'patria',\n 'fatherland', '', '1_2', 1), ('TENEO', 35, 'teneo', 'to hold', '',\n '1_2', 1), ('TV', 36, 'tu', 'you', '', '1_2', 3), ('VIDEO', 37, 'video',\n 'to see', '', '1_2', 1), ('VOCO', 38, 'voco', 'to call', '', '1_2', 1),\n ('EGO', 39, 'ego', 'I', '', '1_3', 3), ('TV', 40, 'tu', 'you', '',\n '1_3', 3), ('AGER', 41, 'ager', 'field', '', '1_3', 1), ('AMICVS/1', 42,\n 'amicus', 'friend', '', '1_3', 1), ('ANIMVS', 43, 'animus',\n 'spirit, soul, mind', '', '1_3', 1), ('CASA', 44, 'casa',\n 'little house, cottage', '', '1_3', 1), ('CVM/2', 45, 'cum',\n 'with (w/ abl.)', '', '1_3', 1), ('DEINDE', 46, 'deinde', 'then', '',\n '1_3', 1), ('DOMVS', 47, 'domus', 'home', '', '1_3', 2), ('FILIVS', 48,\n 'filius', 'son', '', '1_3', 1), ('IN', 49, 'in',\n 'in, on (w/ abl.); into, to, against (w/ acc.)', '', '1_3', 2), ('PVER',\n 50, 'puer', 'boy', '', '1_3', 1), ('RIVVS', 51, 'rivus',\n 'brook, stream', '', '1_3', 1), ('TIMEO', 52, 'timeo',\n 'to fear, to be afraid', '', '1_3', 1), ('VALDE', 53, 'valde',\n 'very, exceedingly', '', '1_3', 1), ('VIA', 54, 'via', 'road', '',\n '1_3', 1), ('VIR', 55, 'vir', 'man', '', '1_3', 1), ('IN', 56, 'in',\n 'in, on (w/ abl.); into, to, against (w/ acc.)', '', '1_4', 2), ('AD/2',\n 57, 'ad', 'into, towards, to (w/ acc.)', '', '1_4', 1), ('ARMATVS/2', \n 58, 'armatus', 'armed', '', '1_4', 1), ('AVTEM', 59, 'autem', 'however',\n '', '1_4', 1), ('BELLVM', 60, 'bellum', 'war', '', '1_4', 1), ('BONVS',\n 61, 'bonus', 'good', '', '1_4', 1), ('CASTRA/2', 62, 'castra', 'camp',\n '', '1_4', 1), ('DO', 63, 'do', 'to give', '', '1_4', 1), ('DOLVS', 64,\n 'dolus', 'trickery, deception', '', '1_4', 1), ('EX', 65, 'e',\n 'from, out of (w/ abl.)', '', '1_4', 1), ('INTRO/2', 66, 'intro',\n 'to enter', '', '1_4', 1), ('IVBEO', 67, 'iubeo',\n 'to order somebody (acc.) to do something (inf.)', '', '1_4', 1), (\n 'IVSTVS', 68, 'iustus', 'legitimate, open, just', '', '1_4', 1), (\n 'MAGNVS', 69, 'magnus', 'large, great, important', '', '1_4', 1), (\n 'MALVS/3', 70, 'malus', 'bad', '', '1_4', 1), ('PRAECLARVS', 71,\n 'praeclarus', 'famous, distinguished', '', '1_4', 1), ('PRAEMIVM', 72,\n 'praemium', 'reward', '', '1_4', 1), ('ROMANVS/A', 73, 'Romanus',\n 'Roman; the Romans (pl.)', '', '1_4', 1), ('SED', 74, 'sed', 'but', '',\n '1_4', 1), ('VENENVM', 75, 'venenum', 'poison', '', '1_4', 1), (\n 'VINCVLVM', 76, 'vinculum', 'chain, fetter', '', '1_4', 1), ('PARO/2', \n 77, 'paro', 'to prepare, get ready, design', '', '1_5', 2), ('AB', 78,\n 'a', 'by, from (w/ abl.)', '', '1_5', 1), ('AVXILIVM', 79, 'auxilium',\n 'help', '', '1_5', 1), ('COGITO', 80, 'cogito', 'to think', '', '1_5', \n 1), ('CONSILIVM', 81, 'consilium', 'plan, advice', '', '1_5', 2), ('DE',\n 82, 'de', 'about, concerning, down from (w/ abl.)', '', '1_5', 1), (\n 'DOLEO', 83, 'doleo', 'to feel pain, to be hurt', '', '1_5', 1), (\n 'EPISTOLA', 84, 'epistula', 'letter', '', '1_5', 1), ('FAMILIA', 85,\n 'familia', 'family, household', '', '1_5', 1), ('GAVDIVM', 86,\n 'gaudium', 'joy', '', '1_5', 1), ('LACRIMA', 87, 'lacrima', 'tear', '',\n '1_5', 1), ('LONGE', 88, 'longe', 'far', '', '1_5', 1), ('LONGVS', 89,\n 'longus', 'long', '', '1_5', 1), ('MISER', 90, 'miser',\n 'wretched, sad, miserable', '', '1_5', 1), ('NAM', 91, 'nam',\n 'for, in fact', '', '1_5', 1), ('NONSOLVM', 92, 'non',\n 'not only…, but also…', '', '1_5', 1), ('PVLCHER', 93, 'pulcher',\n 'beautiful, nice', '', '1_5', 1), ('SEMPER', 94, 'semper', 'always', '',\n '1_5', 1), ('TAMEN', 95, 'tamen', 'however', '', '1_5', 2), ('SVM/1', \n 96, 'sum', 'to be', '', '1_6', 3), ('DOCEO', 97, 'doceo', 'to teach',\n '', '1_6', 1), ('DVM/2', 98, 'dum', 'while', '', '1_6', 1), ('EXEMPLVM',\n 99, 'exemplum', 'example', '', '1_6', 1), ('FIRMO', 100, 'firmo',\n 'to strengthen', '', '1_6', 1), ('IACEO', 101, 'iaceo',\n 'to lie down, to be inert', '', '1_6', 1), ('IVDICO', 102, 'iudico',\n 'to judge', '', '1_6', 1), ('LIBER/1', 103, 'liber', 'book', '', '1_6',\n 1), ('LITTERA', 104, 'littera',\n 'letter of the alphabet (sing.); literature, letter (pl.)', '', '1_6', \n 1), ('MANEO', 105, 'maneo', 'to remain', '', '1_6', 1), ('MEMORIA', 106,\n 'memoria', 'memory', '', '1_6', 1), ('MVLTVS', 107, 'multus',\n 'much, many', '', '1_6', 1), ('POSSVM/1', 108, 'possum',\n 'to be able, can', '', '1_6', 1), ('PROPTER/2', 109, 'propter',\n 'because of, on account of (w/ acc.)', '', '1_6', 1), ('SAEPE', 110,\n 'saepe', 'often', '', '1_6', 1), ('SERVO', 111, 'servo',\n 'to save, preserve', '', '1_6', 1), ('SOLEO', 112, 'soleo',\n 'to be accustomed (w/ inf.)', '', '1_6', 1), ('TENEBRAE', 113,\n 'tenebrae', 'shadows, darkness (pl.)', '', '1_6', 1), ('VITA', 114,\n 'vita', 'life', '', '1_6', 1), ('AESTIMO', 115, 'aestimo',\n 'to regard, esteem', '', '1_7', 1), ('AESTIMOVNIVSASSIS', 116,\n 'aestimo', 'I do not care a bit ', '', '1_7', 1), ('AMOR', 117, 'amor',\n 'love', '', '1_7', 1), ('DELICIA/1', 118, 'delicia', 'delight, pet', '',\n '1_7', 1), ('DIGITVS', 119, 'digitus', 'finger', '', '1_7', 1), (\n 'DOMINA', 120, 'domina', 'mistress', '', '1_7', 1), ('GREMIVM', 121,\n 'gremium', 'lap', '', '1_7', 1), ('INVIDEO', 122, 'invideo', 'to envy',\n '', '1_7', 1), ('MEVS', 123, 'meus', 'my', '', '1_7', 1), ('OCVLVS', \n 124, 'oculus', 'eye', '', '1_7', 1), ('PASSER', 125, 'passer',\n 'sparrow', '', '1_7', 1), ('PAX', 126, 'pax', 'peace; favor', '', '1_7',\n 1), ('PVTO', 127, 'puto', 'to think', '', '1_7', 1), ('SENEX/1', 128,\n 'senex', 'old man', '', '1_7', 1), ('SESESE', 129, 'se',\n 'him/her/itself', '', '1_7', 1), ('SEVERVS', 130, 'severus', 'serious',\n '', '1_7', 1), ('SOROR', 131, 'soror', 'sister', '', '1_7', 1), (\n 'SVI/1', 132, 'sui', 'him–/her–/itself', '', '1_7', 1), ('VERBVM', 133,\n 'verbum', 'word', '', '1_7', 1), ('CONTRA/2', 134, 'contra',\n 'against (w/ acc.)', '', '1_8', 1), ('DECERNO', 135, 'decerno',\n 'to decide, determine (often w/ inf.)', '', '1_8', 1), ('DICO/2', 136,\n 'dico', 'to say', '', '1_8', 1), ('DVX', 137, 'dux', 'leader, general',\n '', '1_8', 1), ('FORTITVDO', 138, 'fortitudo', 'courage', '', '1_8', 1),\n ('HOMO', 139, 'homo', 'man, human being; people (pl.)', '', '1_8', 1),\n ('INTELLIGO', 140, 'intellego', 'to understand', '', '1_8', 1), (\n 'LIBERO', 141, 'libero', 'to free someone (acc.) from something (abl.)',\n '', '1_8', 1), ('MILES', 142, 'miles', 'soldier', '', '1_8', 1), (\n 'NAVIGO', 143, 'navigo', 'to sail, navigate', '', '1_8', 1), (\n 'ORACVLVM', 144, 'oraculum', 'oracle, prophecy', '', '1_8', 1), ('PETO',\n 145, 'peto', 'to seek', '', '1_8', 1), ('REX', 146, 'rex', 'king', '',\n '1_8', 1), ('TANDEM', 147, 'tandem', 'finally', '', '1_8', 1), (\n 'TEMPLVM', 148, 'templum', 'temple', '', '1_8', 1), ('TIMOR', 149,\n 'timor', 'fear', '', '1_8', 1), ('TVM', 150, 'tum',\n 'then, at that time', '', '1_8', 2), ('VINCO', 151, 'vinco',\n 'to conquer', '', '1_8', 1), ('ANIMAL', 152, 'animal', 'animal', '',\n '1_9', 1), ('ARMA', 153, 'arma', 'weapons (pl.)', '', '1_9', 1), (\n 'AVDIO', 154, 'audio', 'to hear, listen', '', '1_9', 1), ('CAPVT', 155,\n 'caput', 'head', '', '1_9', 1), ('CIVIS', 156, 'civis', 'citizen', '',\n '1_9', 1), ('CONSVL', 157, 'consul', 'consul', '', '1_9', 1), ('CORPVS',\n 158, 'corpus', 'body', '', '1_9', 1), ('CREDO', 159, 'credo',\n 'to believe somebody (w/ dat.)', '', '1_9', 1), ('EXEMPLAR', 160,\n 'exemplar', 'example', '', '1_9', 1), ('GERO', 161, 'gero',\n 'to carry; to behave (w/ se)', '', '1_9', 2), ('MARE', 162, 'mare',\n 'sea', '', '1_9', 1), ('MORS', 163, 'mors', 'death', '', '1_9', 1), (\n 'MVLIER', 164, 'mulier', 'woman', '', '1_9', 1), ('ORATIO', 165,\n 'oratio', 'oration, speech', '', '1_9', 1), ('SCIO', 166, 'scio',\n 'to know', '', '1_9', 1), ('SENTIO', 167, 'sentio', 'to perceive', '',\n '1_9', 1), ('TEMPVS/1', 168, 'tempus', 'time', '', '1_9', 1), ('VENIO',\n 169, 'venio', 'to come', '', '1_9', 1), ('VRBS', 170, 'urbs', 'city',\n '', '1_9', 1), ('ACER/2', 171, 'acer', 'keen, fierce', '', '1_10', 1),\n ('AEDIFICO', 172, 'aedifico', 'to build', '', '1_10', 1), ('CAPIO/2', \n 173, 'capio', 'to take, adopt, capture', '', '1_10', 1), ('CELEBER', \n 174, 'celeber', 'renowned, well–known, crowded', '', '1_10', 1), (\n 'CVPIO', 175, 'cupio', 'to desire, want', '', '1_10', 1), ('DELEO', 176,\n 'deleo', 'to destroy', '', '1_10', 1), ('DEVS', 177, 'deus', 'god', '',\n '1_10', 1), ('DONVM', 178, 'donum', 'gift', '', '1_10', 1), ('EQVVS', \n 179, 'equus', 'horse', '', '1_10', 1), ('FELIX', 180, 'felix',\n 'fortunate, happy', '', '1_10', 1), ('FLAMMA', 181, 'flamma', 'flame',\n '', '1_10', 1), ('FORTIS', 182, 'fortis', 'brave, strong', '', '1_10', \n 1), ('FVGIO', 183, 'fugio', 'to flee, run away', '', '1_10', 1), (\n 'HOSTIS', 184, 'hostis', 'enemy', '', '1_10', 1), ('MOVEO', 185,\n 'moveo', 'to move', '', '1_10', 1), ('NEC/2', 186, 'nec',\n 'nor; and not', '', '1_10', 2), ('NOX', 187, 'nox', 'night', '', '1_10',\n 1), ('PAVCI', 188, 'paucus', 'few', '', '1_10', 1), ('PERICVLVM', 189,\n 'periculum', 'danger', '', '1_10', 1), ('PVGNO', 190, 'pugno',\n 'to fight', '', '1_10', 1), ('AGO', 191, 'ago',\n 'to drive, lead, do, behave', '', '1_11', 1), ('ARDEO', 192, 'ardeo',\n 'to burn, be on fire', '', '1_11', 1), ('CONSPICIO', 193, 'conspicio',\n 'to look at, observe', '', '1_11', 1), ('CRVDELIS', 194, 'crudelis',\n 'cruel', '', '1_11', 1), ('DOLOR', 195, 'dolor', 'grief, pain', '',\n '1_11', 1), ('ITA', 196, 'ita', 'yes', '', '1_11', 2), ('MINIME', 197,\n 'minime', 'No', '', '1_11', 1), ('MITTO', 198, 'mitto', 'to send', '',\n '1_11', 1), ('NE/2', 199, 'ne',\n '(added to the first word of a question', '', '1_11', 1), ('NOVVS', 200,\n 'novus', 'new', '', '1_11', 1), ('PARVM/2', 201, 'parum', 'too little',\n '', '1_11', 2), ('QVE', 202, 'que', 'and', '', '1_11', 1), ('QVOQVE', \n 203, 'quoque', 'also', '', '1_11', 1), ('REGINA', 204, 'regina',\n 'queen', '', '1_11', 1), ('RELINQVO', 205, 'relinquo', 'to abandon', '',\n '1_11', 1), ('SILVA', 206, 'silva', 'forest', '', '1_11', 1), (\n 'SPELVNCA', 207, 'spelunca', 'cave', '', '1_11', 1), ('TEMPESTAS', 208,\n 'tempestas', 'season', '', '1_11', 1), ('VNA', 209, 'una', 'together',\n '', '1_11', 1), ('BELLVMGERO', 210, 'bellum', 'to wage war', '', '1_12',\n 1), ('CONSVMO', 211, 'consumo', 'to consume', '', '1_12', 1), (\n 'DEXTERA', 212, 'dextera', 'right hand', '', '1_12', 1), ('FACIO', 213,\n 'facio', 'to do, make', '', '1_12', 1), ('IBI', 214, 'ibi', 'there', '',\n '1_12', 1), ('IGNIS', 215, 'ignis', 'fire', '', '1_12', 1), ('INQVIO', \n 216, 'inquam', 'to say (used with direct speech)', '', '1_12', 3), (\n 'IRA', 217, 'ira', 'anger', '', '1_12', 1), ('IS', 218, 'is',\n 's/he/it, this, that', '', '1_12', 1), ('NOMEN', 219, 'nomen', 'name',\n '', '1_12', 1), ('NOS', 220, 'nos', 'we; us', '', '1_12', 1), ('NOSTER',\n 221, 'noster', 'our, ours', '', '1_12', 1), ('OCCIDO/2', 222, 'occido',\n 'to strike down, knock down', '', '1_12', 1), ('OSTENDO', 223,\n 'ostendo', 'to show', '', '1_12', 1), ('PONO', 224, 'pono', 'to place',\n '', '1_12', 1), ('PROPE/2', 225, 'prope', 'near', '', '1_12', 2), (\n 'PROVIRIBVS', 226, 'pro', 'with all one’s might', '', '1_12', 1), (\n 'SIMILIS', 227, 'similis', 'similar', '', '1_12', 1), ('STATIM', 228,\n 'statim', 'immediately', '', '1_12', 1), ('TANTVS', 229, 'tantus',\n 'so much', '', '1_12', 1), ('TVVS', 230, 'tuus', 'your', '', '1_12', 1),\n ('VESTER', 231, 'vester', 'your', '', '1_12', 1), ('VIS', 232, 'vis',\n 'force', '', '1_12', 1), ('VOS', 233, 'vos', 'you', '', '1_12', 1), (\n 'EGO', 234, 'ego', 'I', '', '1_13', 3), ('TV', 235, 'tu', 'you', '',\n '1_13', 3), ('TVM', 236, 'tum', 'then, at that time', '', '1_13', 2), (\n 'ALIVS', 237, 'alius', 'another, other', '', '1_13', 1), ('APVD', 238,\n 'apud', 'at the house of (w/ acc.)', '', '1_13', 1), ('ATQVE/1', 239,\n 'atque', 'as', '', '1_13', 2), ('DISCEDO/1', 240, 'discedo',\n 'to leave, withdraw, go away', '', '1_13', 1), ('DIVES', 241, 'dives',\n 'rich', '', '1_13', 1), ('DOCTVS', 242, 'doctus', 'learned', '', '1_13',\n 1), ('DVCO', 243, 'duco', 'to lead, take', '', '1_13', 2), ('ENIM/2', \n 244, 'enim', 'for, in fact', '', '1_13', 1), ('IVDEX', 245, 'iudex',\n 'judge', '', '1_13', 1), ('LICET/1', 246, 'licet',\n 'it is allowed, permitted (for someone)(to do something)(w/ dat. and inf.) '\n , '', '1_13', 1), ('NIHIL', 247, 'nihil', 'nothing', '', '1_13', 1), (\n 'NOLO', 248, 'nolo', 'not to want, to be unwilling', '', '1_13', 2), (\n 'OMNIS', 249, 'omnis', 'each, every, all', '', '1_13', 1), ('PRO/1', \n 250, 'pro', 'for, on behalf of (w/ abl.)', '', '1_13', 1), ('QVID', 251,\n 'quid', 'what; why', '', '1_13', 1), ('RESPONDEO', 252, 'respondeo',\n 'to answer', '', '1_13', 1), ('ROGO', 253, 'rogo', 'to ask', '', '1_13',\n 1), ('SVVS', 254, 'suus', 'his, her, its, their', '', '1_13', 2), (\n 'TANTVM/2', 255, 'tantum', 'only', '', '1_13', 1), ('VALE', 256, 'vale',\n 'to greetings! farewell!', '', '1_13', 1), ('VALEO', 257, 'valeo',\n 'to be able (w/ inf.); to be in good health', '', '1_13', 3), ('ALBVS',\n 258, 'albus', 'white', '', '1_14', 1), ('ARBOR', 259, 'arbor', 'tree',\n '', '1_14', 1), ('CADO', 260, 'cado', 'to fall', '', '1_14', 1), (\n 'COMEDO/2', 261, 'comedo', 'to eat', '', '1_14', 1), ('CONVENIO', 262,\n 'convenio', 'to meet', '', '1_14', 1), ('FLVO', 263, 'fluo', 'to flow',\n '', '1_14', 1), ('GLADIVS', 264, 'gladius', 'sword', '', '1_14', 1), (\n 'IAM', 265, 'iam', 'already, yet', '', '1_14', 1), ('MOX', 266, 'mox',\n 'soon', '', '1_14', 1), ('ODIVM', 267, 'odium', 'hatred', '', '1_14', 2\n ), ('OS/1', 268, 'os', 'mouth', '', '1_14', 1), ('PARENS/1', 269,\n 'parens', 'parent', '', '1_14', 1), ('PECTVS', 270, 'pectus', 'chest',\n '', '1_14', 1), ('PER', 271, 'per', 'through (w/ acc.)', '', '1_14', 1),\n ('PRIMVS', 272, 'primus', 'first', '', '1_14', 1), ('QVI/1', 273, 'qui',\n 'who, which (rel. pronoun); what? which? (inter. adj.) ', '', '1_14', 2\n ), ('RVBER', 274, 'ruber', 'red', '', '1_14', 1), ('SANGVIS', 275,\n 'sanguis', 'blood', '', '1_14', 1), ('SEPARO/2', 276, 'separo',\n 'to separate, divide', '', '1_14', 1), ('TANGO', 277, 'tango',\n 'to touch', '', '1_14', 1), ('INQVIO', 278, 'inquam',\n 'to say (used with direct speech)', '', '1_15', 3), ('QVI/1', 279,\n 'qui', 'who, which (rel. pronoun); what? which? (inter. adj.) ', '',\n '1_15', 2), ('ANTE/2', 280, 'ante', 'in front of (w/ acc.)', '', '1_15',\n 1), ('ARGVMENTVM', 281, 'argumentum', 'proof, indication, argument', '',\n '1_15', 1), ('CVR/1', 282, 'cur', 'why', '', '1_15', 1), ('DIFFICILIS',\n 283, 'difficilis', 'difficult', '', '1_15', 1), ('ECCE', 284, 'ecce',\n 'look here!', '', '1_15', 1), ('ETIAM', 285, 'etiam', 'even, also', '',\n '1_15', 1), ('FORSITAN', 286, 'forsan', 'perhaps', '', '1_15', 1), (\n 'NEGLIGO', 287, 'neglego', 'to neglect', '', '1_15', 1), ('PARVVS/2', \n 288, 'parvus', 'small', '', '1_15', 1), ('QVIS/1', 289, 'quis',\n 'who? which? (inter. pronoun)', '', '1_15', 1), ('RVSTICVS/2', 290,\n 'rusticus', 'rural, rustic', '', '1_15', 1), ('SAXVM', 291, 'saxum',\n 'stone, rock', '', '1_15', 1), ('SENECTVS/1', 292, 'senectus',\n 'old age', '', '1_15', 1), ('SICVT/1', 293, 'sicut', 'just as', '',\n '1_15', 1), ('STO', 294, 'sto', 'stand', '', '1_15', 1), ('VBIQVE', 295,\n 'ubique', 'everywhere', '', '1_15', 1), ('VERVS', 296, 'verus',\n 'real, true', '', '1_15', 1), ('VETVSTVS', 297, 'vetustus', 'old', '',\n '1_15', 1), ('VILLA', 298, 'villa', 'estate', '', '1_15', 1), ('VMQVAM',\n 299, 'umquam', 'ever', '', '1_15', 1), ('AVVNCVLVS', 300, 'avunculus',\n 'uncle', '', '1_16', 1), ('CAELVM/1', 301, 'caelum',\n 'sky, heaven, weather', '', '1_16', 1), ('CAVSA', 302, 'causa',\n 'cause, reason', '', '1_16', 1), ('CINIS', 303, 'cinis', 'ash', '',\n '1_16', 1), ('CLADES', 304, 'clades', 'disaster', '', '1_16', 1), (\n 'CLASSIS', 305, 'classis', 'fleet, class (of people)', '', '1_16', 1),\n ('FEMINA', 306, 'femina', 'woman', '', '1_16', 1), ('FVMVS', 307,\n 'fumus', 'smoke', '', '1_16', 1), ('FVNESTVS', 308, 'funestus',\n 'deadly', '', '1_16', 1), ('IGITVR', 309, 'igitur', 'therefore', '',\n '1_16', 1), ('INCENDIVM', 310, 'incendium', 'conflagration, eruption',\n '', '1_16', 1), ('LEGO/2', 311, 'lego', 'to read, choose', '', '1_16', \n 1), ('LITVS/2', 312, 'litus', 'shore', '', '1_16', 1), ('MATER', 313,\n 'mater', 'mother', '', '1_16', 1), ('MONS', 314, 'mons', 'mountain', '',\n '1_16', 1), ('NAVIS', 315, 'navis', 'ship', '', '1_16', 1), ('NVBES', \n 316, 'nubes', 'cloud', '', '1_16', 1), ('NVMQVAM', 317, 'numquam',\n 'never', '', '1_16', 1), ('OPPRIMO', 318, 'opprimo',\n 'to overwhelm, suppress', '', '1_16', 1), ('PARS', 319, 'pars', 'part',\n '', '1_16', 1), ('STVDEO', 320, 'studeo',\n 'to study, be eager for, be interested in (w/ dat.)', '', '1_16', 1), (\n 'DOMVS', 321, 'domus', 'home', '', '1_17', 2), ('ALO', 322, 'alo',\n 'to feed, nourish', '', '1_17', 1), ('AMITTO', 323, 'amitto', 'to lose',\n '', '1_17', 1), ('CORNV', 324, 'cornu', 'horn', '', '1_17', 1), (\n 'CORRIPIO', 325, 'corripio', 'to seize, occupy, engulf', '', '1_17', 1),\n ('CVRRO', 326, 'curro', 'to run', '', '1_17', 1), ('DEVASTO', 327,\n 'devasto', 'to lay waste', '', '1_17', 1), ('EXSTINGVO', 328,\n 'exstinguo', 'to extinguish', '', '1_17', 1), ('FACILE', 329, 'facile',\n 'easliy', '', '1_17', 1), ('IACIO', 330, 'iacio', 'to throw', '',\n '1_17', 1), ('IMPERATOR', 331, 'imperator', 'general, emperor', '',\n '1_17', 1), ('IMPETVS', 332, 'impetus', 'impetus, force, attack', '',\n '1_17', 1), ('INITIVM', 333, 'initium', 'beginning', '', '1_17', 1), (\n 'IVSSVS', 334, 'iussus', 'order', '', '1_17', 1), ('LOCVS', 335,\n 'locus',\n 'place (sing.); passages of a book (m. pl.); geographical places(n. pl.)',\n '', '1_17', 1), ('MANVS/1', 336, 'manus', 'hand', '', '1_17', 1), (\n 'MVRVS', 337, 'murus', 'wall', '', '1_17', 1), ('SINE', 338, 'sine',\n 'without (w/ abl.)', '', '1_17', 1), ('TEMPTO', 339, 'tempto', 'to try',\n '', '1_17', 1), ('TVMVLTVS', 340, 'tumultus', 'confusion', '', '1_17', \n 1), ('VENTVS', 341, 'ventus', 'wind', '', '1_17', 1), ('DVCO', 342,\n 'duco', 'to lead, take', '', '1_18', 2), ('ITA', 343, 'ita', 'yes', '',\n '1_18', 2), ('COLO/2', 344, 'colo', 'to worship, cultivate', '', '1_18',\n 1), ('CVM/3', 345, 'cum', 'when, after', '', '1_18', 2), ('DEA', 346,\n 'dea', 'goddess', '', '1_18', 1), ('DIES', 347, 'dies', 'day', '',\n '1_18', 1), ('DORMIO', 348, 'dormio', 'to sleep', '', '1_18', 1), (\n 'EXCITO/1', 349, 'excito', 'to awaken, rouse, stir up', '', '1_18', 1),\n ('EXCLAMO', 350, 'exclamo', 'to exclaim', '', '1_18', 1), ('FACIES', \n 351, 'facies', 'face', '', '1_18', 1), ('FATVM', 352, 'fatum',\n 'fate, destiny', '', '1_18', 1), ('MARITVS/1', 353, 'maritus',\n 'husband', '', '1_18', 1), ('MERIDIES', 354, 'meridies', 'midday', '',\n '1_18', 2), ('MVLTVM/2', 355, 'multum', 'much', '', '1_18', 1), (\n 'OCCVLTO', 356, 'occulto', 'to hide', '', '1_18', 1), ('PATER', 357,\n 'pater', 'father', '', '1_18', 2), ('POST/2', 358, 'post',\n 'after (w/ acc.)', '', '1_18', 1), ('QVAERO', 359, 'quaero',\n 'to look for, search', '', '1_18', 1), ('RES', 360, 'res',\n 'thing, matter', '', '1_18', 1), ('SI/2', 361, 'si', 'if', '', '1_18', \n 1), ('SOMNVS', 362, 'somnus', 'sleep', '', '1_18', 1), ('TAM', 363,\n 'tam', 'so ', '', '1_18', 1), ('VXOR', 364, 'uxor', 'wife', '', '1_18',\n 2), ('BARBA', 365, 'barba', 'beard', '', '1_19', 1), ('CARO/1', 366,\n 'caro', 'meat, flesh', '', '1_19', 1), ('CELERITER', 367, 'celeriter',\n 'swiftly', '', '1_19', 1), ('COQVO', 368, 'coquo', 'to cook', '',\n '1_19', 1), ('CRESCO', 369, 'cresco', 'to grow', '', '1_19', 1), (\n 'FEROX', 370, 'ferox', 'fierce, ferocious', '', '1_19', 1), ('FORIS/2',\n 371, 'foris', 'outside, in the open', '', '1_19', 1), ('HERBA', 372,\n 'herba', 'plant, vegetation', '', '1_19', 1), ('HIC/1', 373, 'hic',\n 'this', '', '1_19', 1), ('INTER', 374, 'inter',\n 'between, among (w/ acc.)', '', '1_19', 1), ('PELLIS', 375, 'pellis',\n 'skin, hide', '', '1_19', 1), ('POSTQVAM', 376, 'postquam', 'after', '',\n '1_19', 1), ('PROELIVM', 377, 'proelium', 'battle, combat', '', '1_19',\n 1), ('SANO', 378, 'sano', 'to heal', '', '1_19', 1), ('SEDEO', 379,\n 'sedeo', 'to sit', '', '1_19', 1), ('TERO', 380, 'tero',\n 'to wear out, rub', '', '1_19', 1), ('TERRIBILIS', 381, 'terribilis',\n 'terrifying', '', '1_19', 1), ('VESTIMENTVM', 382, 'vestimentum',\n 'garment, clothes (pl.)', '', '1_19', 1), ('VIVO', 383, 'vivo',\n 'to live', '', '1_19', 1), ('VVLNERO', 384, 'vulnero', 'to wound', '',\n '1_19', 1), ('VVLNVS', 385, 'vulnus', 'wound', '', '1_19', 1), (\n 'ABVNDO', 386, 'abundo', 'to abound with (w/ abl.)', '', '1_20', 1), (\n 'ADOLESCENS/2', 387, 'adulescens', 'young man, young lady', '', '1_20',\n 1), ('AEQVVS', 388, 'aequus', 'even', '', '1_20', 1), ('COR', 389,\n 'cor', 'heart', '', '1_20', 1), ('DELECTO', 390, 'delecto',\n 'to delight, please', '', '1_20', 1), ('DIVINVS/2', 391, 'divinus',\n 'divine', '', '1_20', 1), ('EGEO', 392, 'egeo',\n 'to lack something (abl.)', '', '1_20', 1), ('FVR', 393, 'fur', 'thief',\n '', '1_20', 1), ('FVRTVM', 394, 'furtum', 'theft', '', '1_20', 1), (\n 'HVMANVS', 395, 'humanus', 'human', '', '1_20', 1), ('ILLE', 396,\n 'ille', 'that', '', '1_20', 1), ('INIQVITAS', 397, 'iniquitas',\n 'injustice', '', '1_20', 1), ('LEX', 398, 'lex', 'law', '', '1_20', 1),\n ('LVDO', 399, 'ludo', 'to play', '', '1_20', 1), ('NOCTV', 400, 'noctu',\n 'during the night', '', '1_20', 1), ('PAENE', 401, 'paene', 'almost',\n '', '1_20', 1), ('PAVPER', 402, 'pauper', 'poor', '', '1_20', 1), (\n 'PLENVS', 403, 'plenus', 'full of (w/ gen. or abl.)', '', '1_20', 1), (\n 'POMVM', 404, 'pomum', 'fruit', '', '1_20', 1), ('PVNIO', 405, 'punio',\n 'to punish', '', '1_20', 1), ('ACCIPIO', 406, 'accipio',\n 'to accept, receive', '', '1_21', 1), ('ACCVSO', 407, 'accuso',\n 'to accuse someone (acc.) of something (gen.)', '', '1_21', 1), (\n 'ALIENVS/2', 408, 'alienus',\n 'foreign to, inconsistent with (w/ a/ab and abl.)', '', '1_21', 1), (\n 'AXIS', 409, 'axis', 'axle, axis', '', '1_21', 1), ('CIRCVM/2', 410,\n 'circum', 'around (w/ acc.)', '', '1_21', 1), ('CONSTANTIA', 411,\n 'constantia', 'constancy', '', '1_21', 1), ('DESCENDO', 412, 'descendo',\n 'to descend', '', '1_21', 1), ('DIVITIAE', 413, 'divitia',\n 'wealth, riches (pl.)', '', '1_21', 1), ('ERIPIO', 414, 'eripio',\n 'to snatch away', '', '1_21', 1), ('ERRO/2', 415, 'erro',\n 'to wander, make a mistake', '', '1_21', 1), ('EXTERNVS', 416,\n 'externus', 'outward, external', '', '1_21', 1), ('FORTVNA', 417,\n 'fortuna', 'fortune, the goddess Fortune', '', '1_21', 1), ('FVTVRVS', \n 418, 'futurus', 'about to be (from sum)', '', '1_21', 1), ('HONOR', 419,\n 'honor', 'honor, public office or distinction', '', '1_21', 1), (\n 'MVTO/2', 420, 'muto', 'to change', '', '1_21', 1), ('POSSIDEO', 421,\n 'possideo', 'to possess', '', '1_21', 1), ('PROCERTO', 422, 'pro',\n 'for certain, for sure', '', '1_21', 1), ('RECIPIO', 423, 'recipio',\n 'to take back', '', '1_21', 2), ('REPREHENDO', 424, 'reprehendo',\n 'to blame, rebuke', '', '1_21', 1), ('ROTA', 425, 'rota', 'wheel', '',\n '1_21', 1), ('TOLLO', 426, 'tollo', 'to lift up, raise; to destroy', '',\n '1_21', 1), ('VERSO', 427, 'verso', 'to turn', '', '1_21', 1), ('VLLVS',\n 428, 'ullus', 'any', '', '1_21', 1), ('CONSILIVM', 429, 'consilium',\n 'plan, advice', '', '2_1', 2), ('MERIDIES', 430, 'meridies', 'midday',\n '', '2_1', 2), ('PROPE/2', 431, 'prope', 'near', '', '2_1', 2), (\n 'ASPICIO', 432, 'aspicio', 'to look at, catch a glimpse of', '', '2_1',\n 1), ('ETET', 433, 'et', 'both…and…', '', '2_1', 1), ('GENS', 434,\n 'gens', 'tribe, population', '', '2_1', 1), ('GIGNO', 435, 'gigno',\n 'to give birth, produce', '', '2_1', 1), ('HODIE', 436, 'hodie',\n 'today', '', '2_1', 1), ('INCOLA', 437, 'incola', 'inhabitant', '',\n '2_1', 1), ('INSVLA', 438, 'insula', 'island', '', '2_1', 2), (\n 'INVENIO', 439, 'invenio', 'to come upon, find', '', '2_1', 1), ('MOS',\n 440, 'mos', 'custom, habit; morals (pl.)', '', '2_1', 1), ('MVNDVS/1', \n 441, 'mundus', 'world', '', '2_1', 1), ('NE/4', 442, 'ne',\n 'that not, not to, lest ', '', '2_1', 3), ('OCCVPO/2', 443, 'occupo',\n 'to occupy', '', '2_1', 1), ('ORTVS', 444, 'ortus',\n 'origin, beginning, raising', '', '2_1', 1), ('PISCIS', 445, 'piscis',\n 'a fish', '', '2_1', 1), ('PROCVL', 446, 'procul', 'far, far away', '',\n '2_1', 1), ('PROMITTO', 447, 'promitto', 'to promise', '', '2_1', 1), (\n 'SEPTENTRIONALIS', 448, 'septentrionalis', 'northern', '', '2_1', 1), (\n 'SITVS/2', 449, 'situs', 'located, situated', '', '2_1', 1), ('SOL', \n 450, 'sol', 'sun', '', '2_1', 1), ('VTINAM', 451, 'utinam', 'if only',\n '', '2_1', 2), ('GERO', 452, 'gero', 'to carry; to behave (w/ se)', '',\n '2_2', 2), ('ODIVM', 453, 'odium', 'hatred', '', '2_2', 2), ('VALEO', \n 454, 'valeo', 'to be able (w/ inf.); to be in good health', '', '2_2',\n 3), ('ALTVS', 455, 'altus', 'tall, deep', '', '2_2', 1), ('ANNVS', 456,\n 'annus', 'year', '', '2_2', 1), ('ARGENTVM', 457, 'argentum', 'silver',\n '', '2_2', 1), ('AVRVM', 458, 'aurum', 'gold', '', '2_2', 1), ('BREVIS',\n 459, 'brevis', 'short', '', '2_2', 1), ('CLARVS', 460, 'clarus',\n 'clear, distinguished', '', '2_2', 1), ('CVSTOS', 461, 'custos',\n 'guard', '', '2_2', 1), ('EQVES', 462, 'eques', 'horseman', '', '2_2', \n 1), ('FINIS', 463, 'finis', 'end', '', '2_2', 1), ('GRAVIS', 464,\n 'gravis', 'serious, heavy', '', '2_2', 1), ('INTERDVM', 465, 'interdum',\n 'sometimes', '', '2_2', 1), ('LIS', 466, 'lis', 'dispute, quarrel', '',\n '2_2', 1), ('MANE/2', 467, 'mane', 'in the morning', '', '2_2', 1), (\n 'ODIOHABEO', 468, 'odio', 'to hate somebody', '', '2_2', 1), ('SINO', \n 469, 'sino', 'to allow somebody (acc.) to do something (inf.)', '',\n '2_2', 1), ('VEL/1', 470, 'vel', 'or', '', '2_2', 1), ('VESTIS', 471,\n 'vestis', 'clothes, attire', '', '2_2', 1), ('VOX', 472, 'vox', 'voice',\n '', '2_2', 1), ('VT/4', 473, 'ut', 'that, to, in order to, so that', '',\n '2_2', 4), ('VVLTVS', 474, 'vultus', 'face', '', '2_2', 1), ('VXOR', \n 475, 'uxor', 'wife', '', '2_3', 2), ('AT/2', 476, 'at', 'but', '',\n '2_3', 1), ('CONIVX', 477, 'coniunx', 'spouse', '', '2_3', 1), (\n 'DISCIPVLA', 478, 'discipula', 'student', '', '2_3', 1), ('DISCO', 479,\n 'disco', 'to learn', '', '2_3', 1), ('DOMINVS', 480, 'dominus',\n 'master, lord', '', '2_3', 1), ('FAMA', 481, 'fama',\n 'fame, name, reputation', '', '2_3', 1), ('FRATER', 482, 'frater',\n 'brother', '', '2_3', 1), ('IMPROBVS', 483, 'improbus', 'wicked, bad',\n '', '2_3', 1), ('IVNGO', 484, 'iungo', 'to join', '', '2_3', 1), (\n 'MAGISTER', 485, 'magister', 'teacher', '', '2_3', 1), ('MATRIMONIVM', \n 486, 'matrimonium', 'marriage', '', '2_3', 1), ('NE/4', 487, 'ne',\n 'that not, not to, lest ', '', '2_3', 3), ('NVSQVAM', 488, 'nusquam',\n 'nowhere', '', '2_3', 1), ('PARIO/2', 489, 'pario', 'to give birth to',\n '', '2_3', 1), ('PERDO', 490, 'perdo', 'to lose, waste', '', '2_3', 1),\n ('SALVS', 491, 'salus', 'health, welfare', '', '2_3', 1), (\n 'SALVTEMDICERE', 492, 'salutem',\n 'to greet (customary opening to letter) ', '', '2_3', 1), ('SCRIBO', \n 493, 'scribo', 'to write', '', '2_3', 1), ('VT/4', 494, 'ut',\n 'that, to, in order to, so that', '', '2_3', 4), ('VXOREMDEDVCERE', 495,\n 'uxorem', 'to marry a woman, to take as a wife', '', '2_3', 1), (\n 'NEC/2', 496, 'nec', 'nor; and not', '', '2_4', 2), ('RECIPIO', 497,\n 'recipio', 'to take back', '', '2_4', 2), ('AGMEN', 498, 'agmen',\n 'marching column', '', '2_4', 1), ('APERIO', 499, 'aperio', 'to open',\n '', '2_4', 1), ('COEPIO', 500, 'coepi', 'to begin (w/ inf.)', '', '2_4',\n 1), ('DEFENDO', 501, 'defendo', 'to defend', '', '2_4', 1), ('EDO/1', \n 502, 'edo', 'to produce, give forth', '', '2_4', 1), ('EXTRA/2', 503,\n 'extra', 'outside of (w/ acc.)', '', '2_4', 1), ('FVRO', 504, 'furo',\n 'to rage, be insane', '', '2_4', 1), ('INGENS', 505, 'ingens', 'huge',\n '', '2_4', 1), ('INVADO/2', 506, 'invado', 'to burst in', '', '2_4', 1),\n ('LIGNEVS', 507, 'ligneus', 'made of wood', '', '2_4', 1), ('NEQVENEC',\n 508, 'neque', 'neither..nor…', '', '2_4', 1), ('PARCO', 509, 'parco',\n 'to spare somebody/thing (w/ dat.)', '', '2_4', 1), ('PONS', 510,\n 'pons', 'bridge', '', '2_4', 1), ('PORTA', 511, 'porta', 'gate', '',\n '2_4', 1), ('PRIMO', 512, 'primo', 'at first', '', '2_4', 1), ('QVAM/1',\n 513, 'quam', 'than (w/ comp. words)', '', '2_4', 2), ('QVANTVS/1', 514,\n 'quantus', 'how great, how much (inter. or rel. adj.)', '', '2_4', 1),\n ('RESISTO', 515, 'resisto', 'to resist (w/ dat.)', '', '2_4', 1), (\n 'SIMVL/1', 516, 'simul', 'at the same time', '', '2_4', 1), ('TVTVS', \n 517, 'tutus', 'safe', '', '2_4', 1), ('VACVVS', 518, 'vacuus',\n 'empty of (w/ abl.)', '', '2_4', 1), ('VALEO', 519, 'valeo',\n 'to be able (w/ inf.); to be in good health', '', '2_4', 3), ('VICTOR',\n 520, 'victor', 'victor', '', '2_4', 1), ('VTINAM', 521, 'utinam',\n 'if only', '', '2_4', 2), ('BIBO/2', 522, 'bibo', 'to drink', '', '2_5',\n 1), ('CARMEN/1', 523, 'carmen', 'song, poem', '', '2_5', 1), ('CIBVS', \n 524, 'cibus', 'food', '', '2_5', 1), ('DVLCIS', 525, 'dulcis', 'sweet',\n '', '2_5', 1), ('FLVMEN', 526, 'flumen', 'river', '', '2_5', 1), (\n 'IMMEMOR', 527, 'immemor', 'forgetful of (w/ gen.)', '', '2_5', 1), (\n 'IOCVS', 528, 'iocus', 'joke', '', '2_5', 1), ('IVVENTVS', 529,\n 'iuventus', 'youth', '', '2_5', 1), ('LEVIS/1', 530, 'levis', 'light',\n '', '2_5', 1), ('MENS', 531, 'mens', 'mind, spirit', '', '2_5', 1), (\n 'NE/4', 532, 'ne', 'that not, not to, lest ', '', '2_5', 3), ('ORO', \n 533, 'oro', 'to ask, entreat', '', '2_5', 1), ('PLACEO', 534, 'placeo',\n 'to please, be agreeable to somebody', '', '2_5', 1), ('PROXIMVS/2', \n 535, 'proximus', 'nearest', '', '2_5', 1), ('TAMQVAM/2', 536, 'tam',\n 'so…as…', '', '2_5', 1), ('VEHEMENS', 537, 'vehemens',\n 'violent, vehement', '', '2_5', 1), ('VETVS', 538, 'vetus', 'old', '',\n '2_5', 1), ('VINVM', 539, 'vinum', 'wine', '', '2_5', 1), ('VIRTVS', \n 540, 'virtus', 'courage, virtue', '', '2_5', 1), ('VITIVM', 541,\n 'vitium', 'vice', '', '2_5', 1), ('VT/4', 542, 'ut',\n 'that, to, in order to, so that', '', '2_5', 4), ('PATER', 543, 'pater',\n 'father', '', '2_6', 2), ('DECIPIO', 544, 'decipio', 'to deceive', '',\n '2_6', 1), ('DILIGO/3', 545, 'diligo', 'to love, esteem highly', '',\n '2_6', 1), ('DVO', 546, 'duo', 'two', '', '2_6', 1), ('EXERCITVS/1', \n 547, 'exercitus', 'army', '', '2_6', 1), ('FIDELIS/2', 548, 'fidelis',\n 'faithful, loyal', '', '2_6', 1), ('HERES', 549, 'heres', 'heir', '',\n '2_6', 1), ('IMPERIVM', 550, 'imperium', 'rule, empire, power', '',\n '2_6', 1), ('INOPIA', 551, 'inopia', 'helplessness, want', '', '2_6', 1\n ), ('LAVDO', 552, 'laudo', 'to praise', '', '2_6', 1), ('NECESSEEST', \n 553, 'necesse',\n 'it is necessary for someone (dat.) to do something (inf.)', '', '2_6',\n 1), ('NEMO', 554, 'nemo', 'no one', '', '2_6', 1), ('PAVLO', 555,\n 'paulo', 'a little bit, to a small extent', '', '2_6', 1), ('QVAM/1', \n 556, 'quam', 'than (w/ comp. words)', '', '2_6', 2), ('QVANTVM/3', 557,\n 'quantum', 'to what extent, how much', '', '2_6', 1), ('RESTITVO', 558,\n 'restituo', 'to restore', '', '2_6', 1), ('SATIS/2', 559, 'satis',\n 'enough, sufficiently', '', '2_6', 1), ('SECVNDVS/1', 560, 'secundus',\n 'second', '', '2_6', 1), ('TERTIVS', 561, 'tertius', 'third', '', '2_6',\n 1), ('TRES', 562, 'tres', 'three', '', '2_6', 1), ('TRISTIS', 563,\n 'tristis', 'sad', '', '2_6', 1), ('VEHEMENTER', 564, 'vehementer',\n 'strongly, vehemently', '', '2_6', 1), ('NOLO', 565, 'nolo',\n 'not to want, to be unwilling', '', '2_7', 2), ('AETAS', 566, 'aetas',\n 'age', '', '2_7', 1), ('FIDES/2', 567, 'fides', 'faith', '', '2_7', 1),\n ('FVNDO/2', 568, 'fundo', 'to pour', '', '2_7', 1), ('GLORIA', 569,\n 'gloria', 'glory', '', '2_7', 1), ('LIBERTAS', 570, 'libertas',\n 'freedom', '', '2_7', 1), ('LVMEN', 571, 'lumen', 'light', '', '2_7', 1\n ), ('MALO', 572, 'malo', 'to prefer', '', '2_7', 1), ('ORNATVS/1', 573,\n 'ornatus', 'adorned, ornate, elaborate', '', '2_7', 1), ('OTIVM', 574,\n 'otium', 'leisure, free time', '', '2_7', 1), ('POTENS', 575, 'potens',\n 'powerful', '', '2_7', 1), ('PVBLICVS/2', 576, 'publicus', 'common', '',\n '2_7', 1), ('QVALIS/1', 577, 'qualis', 'what sort of? (inter. adj.)',\n '', '2_7', 1), ('RESPVBLICA', 578, 'res', 'state', '', '2_7', 1), (\n 'STVDIOSVS', 579, 'studiosus', 'fond of (w/ gen.)', '', '2_7', 1), (\n 'TAMQVAM/1', 580, 'tamquam', 'as', '', '2_7', 1), ('TOT', 581, 'tot',\n 'so many', '', '2_7', 1), ('TRAHO', 582, 'traho', 'to drag, draw', '',\n '2_7', 1), ('VBI/1', 583, 'ubi', 'where? (inter. adv)', '', '2_7', 1),\n ('VIX', 584, 'vix', 'hardly', '', '2_7', 1), ('VNVS', 585, 'unus',\n 'one', '', '2_7', 1), ('VOLO/3', 586, 'volo', 'to want', '', '2_7', 1),\n ('VTILIS', 587, 'utilis', 'useful', '', '2_7', 1), ('ADHVC', 588,\n 'adhuc', 'still, up to this time', '', '2_8', 1), ('ANTIQVVS', 589,\n 'antiquus', 'ancient', '', '2_8', 1), ('ARS', 590, 'ars',\n 'science, art, skill', '', '2_8', 1), ('DOMINOR', 591, 'dominor',\n 'to dominate, rule', '', '2_8', 1), ('HORTOR', 592, 'hortor',\n 'to exhort, urge', '', '2_8', 1), ('LATINE', 593, 'Latine', 'in Latin',\n '', '2_8', 1), ('LATINVS/A', 594, 'Latinus',\n 'Latin, pertaining to Latin', '', '2_8', 1), ('LINGVA', 595, 'lingua',\n 'language; tongue', '', '2_8', 1), ('LOQVOR', 596, 'loquor', 'to speak',\n '', '2_8', 1), ('MAGIS/2', 597, 'magis', 'more', '', '2_8', 1), (\n 'MAIOR', 598, 'maior', 'bigger; greater', '', '2_8', 1), ('MAXIMVS', \n 599, 'maximus', 'greatest', '', '2_8', 1), ('MELIOR', 600, 'melior',\n 'better', '', '2_8', 1), ('MINIMVS', 601, 'minimus', 'smallest', '',\n '2_8', 1), ('MINVS', 602, 'minus', 'less', '', '2_8', 2), ('OPTIMVS', \n 603, 'optimus', 'best', '', '2_8', 1), ('PARTIOR', 604, 'partior',\n 'to divide, distribute', '', '2_8', 1), ('PATIOR', 605, 'patior',\n 'to endure, tolerate, suffer', '', '2_8', 1), ('PEIOR', 606, 'peior',\n 'worse', '', '2_8', 1), ('PESSIMVS', 607, 'pessimus', 'worst', '',\n '2_8', 1), ('PLVRIMVS', 608, 'plurimus', 'most', '', '2_8', 1), ('PLVS',\n 609, 'plus', 'more', '', '2_8', 1), ('SEQVOR', 610, 'sequor',\n 'to follow', '', '2_8', 1), ('VEREOR', 611, 'vereor',\n 'to fear, respect', '', '2_8', 1), ('ADDO', 612, 'addo', 'to add', '',\n '2_9', 1), ('AVRIS', 613, 'auris', 'ear', '', '2_9', 1), ('CONOR', 614,\n 'conor', 'to try', '', '2_9', 1), ('DEMITTO', 615, 'demitto',\n 'to send down', '', '2_9', 1), ('DISSIMILIS', 616, 'dissimilis',\n 'dissimilar ', '', '2_9', 1), ('FACILIS', 617, 'facilis', 'easy', '',\n '2_9', 1), ('FERO', 618, 'fero', 'to carry, bear', '', '2_9', 1), (\n 'FIO', 619, 'fio', 'to be made, become; (impersonally) to happen', '',\n '2_9', 1), ('FRIGVS', 620, 'frigus', 'cold', '', '2_9', 1), ('GENVS/1',\n 621, 'genus', 'kind', '', '2_9', 1), ('GLACIES', 622, 'glacies', 'ice',\n '', '2_9', 1), ('GRACILIS', 623, 'gracilis', 'slender', '', '2_9', 1),\n ('HVMILIS', 624, 'humilis', 'low, humble', '', '2_9', 1), ('ITER', 625,\n 'iter', 'road, trip ', '', '2_9', 1), ('LABOR/2', 626, 'labor',\n 'to slide, slip, glide down', '', '2_9', 1), ('MODEROR', 627, 'moderor',\n 'to manage, direct, guide', '', '2_9', 1), ('NIX', 628, 'nix', 'snow',\n '', '2_9', 1), ('ONVS', 629, 'onus', 'weight, burden', '', '2_9', 1), (\n 'PERVENIO', 630, 'pervenio', 'to arrive', '', '2_9', 1), ('PROGREDIOR',\n 631, 'progredior', 'to go forward, proceed', '', '2_9', 1), (\n 'QVOTIENS/2', 632, 'quotiens', 'as often as', '', '2_9', 1), (\n 'SIMVLAC/2', 633, 'simulac', 'as soon as', '', '2_9', 1), ('SVVS', 634,\n 'suus', 'his, her, its, their', '', '2_10', 2), ('AEDES', 635, 'aedes',\n 'temple; pl. dwelling, house', '', '2_10', 1), ('EO/1', 636, 'eo',\n 'to go ', '', '2_10', 1), ('IVCVNDVS', 637, 'iucundus',\n 'pleasant, nice', '', '2_10', 1), ('LABOR/1', 638, 'labor',\n 'labor, toil', '', '2_10', 1), ('LAEDO', 639, 'laedo', 'to harm', '',\n '2_10', 1), ('LIBER/2', 640, 'liber', 'free', '', '2_10', 1), ('LVCRVM',\n 641, 'lucrum', 'profit, gain', '', '2_10', 1), ('MARITIMVS', 642,\n 'maritimus', 'maritime', '', '2_10', 1), ('MODVS', 643, 'modus',\n 'way, method, manner', '', '2_10', 1), ('PAVLISPER', 644, 'paulisper',\n 'for a little while', '', '2_10', 1), ('PECVNIA', 645, 'pecunia',\n 'money', '', '2_10', 1), ('PLACIDVS', 646, 'placidus', 'peaceful, calm',\n '', '2_10', 1), ('POTIVS', 647, 'potius', 'rather', '', '2_10', 1), (\n 'PROSPER', 648, 'prosper', 'fortunate, prosperous', '', '2_10', 1), (\n 'REDDO', 649, 'reddo', 'to give back', '', '2_10', 1), ('SARCINA', 650,\n 'sarcina', 'burden, baggage', '', '2_10', 1), ('SCELESTVS', 651,\n 'scelestus', 'wicked', '', '2_10', 1), ('SEMEL', 652, 'semel', 'once',\n '', '2_10', 1), ('SERENVS', 653, 'serenus', 'calm, clear', '', '2_10', \n 1), ('PARVM/2', 654, 'parum', 'too little', '', '2_11', 2), ('ALTER', \n 655, 'alter', 'the other (of two)', '', '2_11', 1), ('GEMMA', 656,\n 'gemma', 'gem, precious stone', '', '2_11', 1), ('LEGATVS', 657,\n 'legatus', 'ambassador', '', '2_11', 1), ('MAGNIHABEO', 658, 'magni',\n 'to esteem a lot', '', '2_11', 1), ('MINVS', 659, 'minus', 'less', '',\n '2_11', 2), ('NESCIO', 660, 'nescio', 'not to know', '', '2_11', 1), (\n 'NEVTER', 661, 'neuter', 'neither, none (of two)', '', '2_11', 1), (\n 'NVLLVS', 662, 'nullus', 'none', '', '2_11', 1), ('OPERAEPRETIVMEST', \n 663, 'operae', 'it is worthwhile', '', '2_11', 1), ('POPVLVS/1', 664,\n 'populus', 'a people, populace', '', '2_11', 1), ('QVOMODO/1', 665,\n 'quomodo', 'how', '', '2_11', 1), ('SALVTO', 666, 'saluto', 'to greet ',\n '', '2_11', 1), ('SERVVS/1', 667, 'servus', 'slave, servant', '',\n '2_11', 1), ('SOLVS', 668, 'solus', 'alone, only', '', '2_11', 1), (\n 'SPECTO', 669, 'specto', 'to watch', '', '2_11', 1), ('TACEO', 670,\n 'taceo', 'to be silent, keep quiet', '', '2_11', 1), ('TOTVS', 671,\n 'totus', 'whole, entire', '', '2_11', 1), ('TVRPIS', 672, 'turpis',\n 'shameful, disgraceful', '', '2_11', 1), ('VTER/4', 673, 'uter',\n 'who, which (of two)?', '', '2_11', 1), ('VTOR', 674, 'utor',\n 'to use (w/ abl.)', '', '2_11', 1), ('CVM/3', 675, 'cum', 'when, after',\n '', '2_12', 2), ('INQVIO', 676, 'inquam',\n 'to say (used with direct speech)', '', '2_12', 3), ('TAMEN', 677,\n 'tamen', 'however', '', '2_12', 2), ('CARVS', 678, 'carus', 'dear', '',\n '2_12', 1), ('INSVLA', 679, 'insula', 'island', '', '2_12', 2), (\n 'MORIOR', 680, 'morior', 'to die', '', '2_12', 1), ('NIMIS', 681,\n 'nimis', 'too much', '', '2_12', 1), ('NISI', 682, 'nisi',\n 'if not, unless', '', '2_12', 1), ('OFFICIVM', 683, 'officium', 'duty',\n '', '2_12', 1), ('ORBIS', 684, 'orbis', 'circle', '', '2_12', 1), (\n 'ORBISTERRARVM', 685, 'orbis', 'the earth, the world', '', '2_12', 1),\n ('PROBO', 686, 'probo', 'to approve ', '', '2_12', 1), ('QVAMQVAM/2', \n 687, 'quamquam', 'although', '', '2_12', 1), ('QVAMVIS/1', 688,\n 'quamvis', 'although', '', '2_12', 1), ('QVIA', 689, 'quia', 'because',\n '', '2_12', 1), ('QVIDEM', 690, 'quidem', 'indeed', '', '2_12', 1), (\n 'QVOD/1', 691, 'quod', 'because', '', '2_12', 1), ('SENTENTIA', 692,\n 'sententia', 'opinion, point of view', '', '2_12', 1), ('SORS', 693,\n 'sors', 'lot', '', '2_12', 1), ('SPERO', 694, 'spero', 'to hope', '',\n '2_12', 1), ('SPES', 695, 'spes', 'hope', '', '2_12', 1), ('ATQVE/1', \n 696, 'atque', 'as', '', '2_13', 2), ('ABSENS', 697, 'absens',\n 'away, absent', '', '2_13', 1), ('ABSVM/1', 698, 'absum', 'to be away',\n '', '2_13', 1), ('BENEVOLENTIA', 699, 'benevolentia', 'good will', '',\n '2_13', 1), ('DECLARO', 700, 'declaro',\n 'to demonstrate, show, make known, reveal', '', '2_13', 1), ('IDEM', \n 701, 'idem', 'the same', '', '2_13', 1), ('IPSE', 702, 'ipse', 'self',\n '', '2_13', 1), ('IRASCOR', 703, 'irascor', 'to be angry at (w/ dat.)',\n '', '2_13', 1), ('ISTE', 704, 'iste', 'that (of yours)', '', '2_13', 1),\n ('MIROR', 705, 'miror', 'to marvel, be surprised at', '', '2_13', 1), (\n 'MVLTITVDO', 706, 'multitudo', 'crowd, throng', '', '2_13', 1), ('NEGO',\n 707, 'nego', 'to deny ', '', '2_13', 1), ('NVMERO/1', 708, 'numero',\n 'to number, count among', '', '2_13', 1), ('OFFENDO', 709, 'offendo',\n ']to happen upon, offend', '', '2_13', 1), ('REDEO/1', 710, 'redeo',\n 'to go back, return', '', '2_13', 1), ('REFERO', 711, 'refero',\n 'to carry back, report', '', '2_13', 1), ('SOCIVS/1', 712, 'socius',\n 'associate, partner, ally', '', '2_13', 1), ('TALIS', 713, 'talis',\n 'such a', '', '2_13', 1), ('TVRRIS', 714, 'turris', 'tower', '', '2_13',\n 1), ('VENIA', 715, 'venia', 'pardon, indulgence, forgiveness', '',\n '2_13', 1), ('VERSOR', 716, 'versor',\n 'to be situated in, be occupied in ', '', '2_13', 1), ('VIRGA', 717,\n 'virga', 'twig, stick', '', '2_13', 1), ('VOLVNTAS', 718, 'voluntas',\n 'will', '', '2_13', 1), ('AFFIRMO', 719, 'affirmo',\n 'to assert, maintain', '', '2_14', 1), ('CIRCVMEO/1', 720, 'circumeo',\n 'to go around', '', '2_14', 1), ('CONTINEO', 721, 'contineo',\n 'to hold, keep together, contain', '', '2_14', 1), ('COTIDIANVS', 722,\n 'cottidianus', 'of every day, daily', '', '2_14', 1), ('ELEMENTVM', 723,\n 'elementum', 'element', '', '2_14', 1), ('ERGO/2', 724, 'ergo',\n 'therefore', '', '2_14', 1), ('GRAVITAS', 725, 'gravitas',\n 'weight, gravity', '', '2_14', 1), ('IMMENSVS', 726, 'immensus',\n 'immeasurable, immense, endless', '', '2_14', 1), ('INFINITVS', 727,\n 'infinitus', 'boundless, unlimited', '', '2_14', 1), ('MAXIME', 728,\n 'maxime', 'most', '', '2_14', 1), ('MEDIVS', 729, 'medius', 'middle',\n '', '2_14', 1), ('MOTVS', 730, 'motus', 'motion, movement', '', '2_14',\n 1), ('MVLTO/2', 731, 'multo', 'by much', '', '2_14', 1), ('NATVRA', 732,\n 'natura', 'nature', '', '2_14', 1), ('NECESSARIO', 733, 'necessario',\n 'necessarily', '', '2_14', 1), ('PERPERAM', 734, 'perperam',\n 'wrongly, incorrectly', '', '2_14', 1), ('PONDVS', 735, 'pondus',\n 'weight', '', '2_14', 1), ('PRAESERTIM', 736, 'praesertim',\n 'especially', '', '2_14', 1), ('QVIES', 737, 'quies', 'rest, repose',\n '', '2_14', 1), ('VNDIQVE', 738, 'undique',\n 'from all parts, from everywhere', '', '2_14', 1), ('VOLVO', 739,\n 'volvo', 'to turn round', '', '2_14', 1), ('VT/4', 740, 'ut',\n 'that, to, in order to, so that', '', '2_14', 4), ('ANIMADVERTO', 741,\n 'animadverto', 'to notice', '', '2_15', 1), ('APPROPINQVO', 742,\n 'appropinquo', 'to approach (w/ dat or ad + acc.)', '', '2_15', 1), (\n 'CERNO', 743, 'cerno', 'to see, distinguish with the eyes', '', '2_15',\n 1), ('CIRCA/2', 744, 'circa', 'around (w/ acc.)', '', '2_15', 1), (\n 'CLAMO', 745, 'clamo', 'to shout, scream', '', '2_15', 1), ('FINGO', \n 746, 'fingo', 'to imagine, form in the mind', '', '2_15', 1), (\n 'IMPINGO', 747, 'impingo', 'to push, strike, inflict', '', '2_15', 1),\n ('INFLIGO', 748, 'infligo', 'to strike on or against, inflict', '',\n '2_15', 1), ('ITERVM', 749, 'iterum', 'again', '', '2_15', 1), (\n 'OPPIDVM', 750, 'oppidum', 'town', '', '2_15', 1), ('PERCVTIO', 751,\n 'percutio', 'to strike through ', '', '2_15', 1), ('PRAEDITVS', 752,\n 'praeditus', 'endowed with, possessed of (w/ abl.)', '', '2_15', 1), (\n 'REPELLO', 753, 'repello', 'to push back, thrust back', '', '2_15', 1),\n ('RIDEO', 754, 'rideo', 'to laugh', '', '2_15', 1), ('RVMPO', 755,\n 'rumpo', 'to break, tear', '', '2_15', 1), ('SEDES', 756, 'sedes',\n 'seat, abode', '', '2_15', 1), ('SIC', 757, 'sic', 'in such a way', '',\n '2_15', 1), ('SIDVS', 758, 'sidus', 'constellation', '', '2_15', 1), (\n 'TELVM', 759, 'telum', 'spear, javelin', '', '2_15', 1), ('VEHO', 760,\n 'veho', 'to drive, carry', '', '2_15', 1)]\nsection_list = {'1.1': 'start', '1.2': '1.1', '1.3': '1.2', '1.4': '1.3',\n '1.5': '1.4', '1.6': '1.5', '1.7': '1.6', '1.8': '1.7', '1.9': '1.8',\n '1.10': '1.9', '1.11': '1.10', '1.12': '1.11', '1.13': '1.12', '1.14':\n '1.13', '1.15': '1.14', '1.16': '1.15', '1.17': '1.16', '1.18': '1.17',\n '1.19': '1.18', '1.20': '1.19', '1.21': '1.20', '2.1': '1.21', '2.2':\n '2.1', '2.3': '2.2', '2.4': '2.3', '2.5': '2.4', '2.6': '2.5', '2.7':\n '2.6', '2.8': '2.7', '2.9': '2.8', '2.10': '2.9', '2.11': '2.10',\n '2.12': '2.11', '2.13': '2.12', '2.14': '2.13', '2.15': '2.14', 'end':\n '2.15', 'start': 'start'}\ntitle = 'Latin for the New Millennium Vols 1 and 2 (Tunberg-Minkova)'\nsection_level = 2\nlanguage = 'Latin'\nbook = text.Text(title, section_words, the_text, section_list,\n section_level, language, True, False)\n",
"step-3": "import text\nnan = ''\nsection_words = {'start': -1, '1.1': 17, '1.2': 38, '1.3': 55, '1.4': 76,\n '1.5': 95, '1.6': 114, '1.7': 133, '1.8': 151, '1.9': 170, '1.10': 190,\n '1.11': 209, '1.12': 233, '1.13': 257, '1.14': 277, '1.15': 299, '1.16':\n 320, '1.17': 341, '1.18': 364, '1.19': 385, '1.20': 405, '1.21': 428,\n '2.1': 451, '2.2': 474, '2.3': 495, '2.4': 521, '2.5': 542, '2.6': 564,\n '2.7': 587, '2.8': 611, '2.9': 633, '2.10': 653, '2.11': 674, '2.12': \n 695, '2.13': 718, '2.14': 740, '2.15': 760, 'end': -2}\nthe_text = [('AGRICOLA', 0, 'agricola', 'farmer', '', '1_1', 1), ('AMBVLO',\n 1, 'ambulo', 'to walk', '', '1_1', 2), ('AMO', 2, 'amo', 'to love', '',\n '1_1', 2), ('AQVA', 3, 'aqua', 'water', '', '1_1', 1), ('ATHLETA', 4,\n 'athleta', 'athlete', '', '1_1', 1), ('BENE', 5, 'bene', 'well', '',\n '1_1', 1), ('CVRO', 6, 'curo', 'to take care for/of', '', '1_1', 2), (\n 'ET/2', 7, 'et', 'and', '', '1_1', 1), ('FILIA', 8, 'filia', 'daughter',\n '', '1_1', 1), ('ITAQVE', 9, 'itaque', 'and so', '', '1_1', 1), ('LVPA',\n 10, 'lupa', 'she–wolf', '', '1_1', 1), ('NAVTA', 11, 'nauta', 'sailor',\n '', '1_1', 1), ('POETA', 12, 'poeta', 'poet', '', '1_1', 1), ('POSTEA',\n 13, 'postea', 'afterwards', '', '1_1', 1), ('PVELLA', 14, 'puella',\n 'girl', '', '1_1', 1), ('ROMA/N', 15, 'Roma', 'Rome', '', '1_1', 1), (\n 'SVM/1', 16, 'sum', 'to be', '', '1_1', 3), ('TERRA', 17, 'terra',\n 'land', '', '1_1', 1), ('AMBVLO', 18, 'ambulo', 'to walk', '', '1_2', 2\n ), ('AMO', 19, 'amo', 'to love', '', '1_2', 2), ('CVRO', 20, 'curo',\n 'to take care for/of', '', '1_2', 2), ('SVM/1', 21, 'sum', 'to be', '',\n '1_2', 3), ('DEBEO', 22, 'debeo', 'ought, must, should; to owe', '',\n '1_2', 1), ('DIV', 23, 'diu', 'for a long time', '', '1_2', 1), ('EGO',\n 24, 'ego', 'I', '', '1_2', 3), ('EXSPECTO', 25, 'exspecto',\n 'to wait for, await, expect', '', '1_2', 1), ('FABVLA/1', 26, 'fabula',\n 'story', '', '1_2', 1), ('FORMA', 27, 'forma', 'form,appearance', '',\n '1_2', 1), ('HABEO', 28, 'habeo', 'to have', '', '1_2', 1), ('HABITO', \n 29, 'habito', 'to live, dwell', '', '1_2', 1), ('NARRO', 30, 'narro',\n 'to tell', '', '1_2', 1), ('NON', 31, 'non', 'not', '', '1_2', 1), (\n 'NVNC', 32, 'nunc', 'now', '', '1_2', 1), ('PARO/2', 33, 'paro',\n 'to prepare, get ready, design', '', '1_2', 2), ('PATRIA', 34, 'patria',\n 'fatherland', '', '1_2', 1), ('TENEO', 35, 'teneo', 'to hold', '',\n '1_2', 1), ('TV', 36, 'tu', 'you', '', '1_2', 3), ('VIDEO', 37, 'video',\n 'to see', '', '1_2', 1), ('VOCO', 38, 'voco', 'to call', '', '1_2', 1),\n ('EGO', 39, 'ego', 'I', '', '1_3', 3), ('TV', 40, 'tu', 'you', '',\n '1_3', 3), ('AGER', 41, 'ager', 'field', '', '1_3', 1), ('AMICVS/1', 42,\n 'amicus', 'friend', '', '1_3', 1), ('ANIMVS', 43, 'animus',\n 'spirit, soul, mind', '', '1_3', 1), ('CASA', 44, 'casa',\n 'little house, cottage', '', '1_3', 1), ('CVM/2', 45, 'cum',\n 'with (w/ abl.)', '', '1_3', 1), ('DEINDE', 46, 'deinde', 'then', '',\n '1_3', 1), ('DOMVS', 47, 'domus', 'home', '', '1_3', 2), ('FILIVS', 48,\n 'filius', 'son', '', '1_3', 1), ('IN', 49, 'in',\n 'in, on (w/ abl.); into, to, against (w/ acc.)', '', '1_3', 2), ('PVER',\n 50, 'puer', 'boy', '', '1_3', 1), ('RIVVS', 51, 'rivus',\n 'brook, stream', '', '1_3', 1), ('TIMEO', 52, 'timeo',\n 'to fear, to be afraid', '', '1_3', 1), ('VALDE', 53, 'valde',\n 'very, exceedingly', '', '1_3', 1), ('VIA', 54, 'via', 'road', '',\n '1_3', 1), ('VIR', 55, 'vir', 'man', '', '1_3', 1), ('IN', 56, 'in',\n 'in, on (w/ abl.); into, to, against (w/ acc.)', '', '1_4', 2), ('AD/2',\n 57, 'ad', 'into, towards, to (w/ acc.)', '', '1_4', 1), ('ARMATVS/2', \n 58, 'armatus', 'armed', '', '1_4', 1), ('AVTEM', 59, 'autem', 'however',\n '', '1_4', 1), ('BELLVM', 60, 'bellum', 'war', '', '1_4', 1), ('BONVS',\n 61, 'bonus', 'good', '', '1_4', 1), ('CASTRA/2', 62, 'castra', 'camp',\n '', '1_4', 1), ('DO', 63, 'do', 'to give', '', '1_4', 1), ('DOLVS', 64,\n 'dolus', 'trickery, deception', '', '1_4', 1), ('EX', 65, 'e',\n 'from, out of (w/ abl.)', '', '1_4', 1), ('INTRO/2', 66, 'intro',\n 'to enter', '', '1_4', 1), ('IVBEO', 67, 'iubeo',\n 'to order somebody (acc.) to do something (inf.)', '', '1_4', 1), (\n 'IVSTVS', 68, 'iustus', 'legitimate, open, just', '', '1_4', 1), (\n 'MAGNVS', 69, 'magnus', 'large, great, important', '', '1_4', 1), (\n 'MALVS/3', 70, 'malus', 'bad', '', '1_4', 1), ('PRAECLARVS', 71,\n 'praeclarus', 'famous, distinguished', '', '1_4', 1), ('PRAEMIVM', 72,\n 'praemium', 'reward', '', '1_4', 1), ('ROMANVS/A', 73, 'Romanus',\n 'Roman; the Romans (pl.)', '', '1_4', 1), ('SED', 74, 'sed', 'but', '',\n '1_4', 1), ('VENENVM', 75, 'venenum', 'poison', '', '1_4', 1), (\n 'VINCVLVM', 76, 'vinculum', 'chain, fetter', '', '1_4', 1), ('PARO/2', \n 77, 'paro', 'to prepare, get ready, design', '', '1_5', 2), ('AB', 78,\n 'a', 'by, from (w/ abl.)', '', '1_5', 1), ('AVXILIVM', 79, 'auxilium',\n 'help', '', '1_5', 1), ('COGITO', 80, 'cogito', 'to think', '', '1_5', \n 1), ('CONSILIVM', 81, 'consilium', 'plan, advice', '', '1_5', 2), ('DE',\n 82, 'de', 'about, concerning, down from (w/ abl.)', '', '1_5', 1), (\n 'DOLEO', 83, 'doleo', 'to feel pain, to be hurt', '', '1_5', 1), (\n 'EPISTOLA', 84, 'epistula', 'letter', '', '1_5', 1), ('FAMILIA', 85,\n 'familia', 'family, household', '', '1_5', 1), ('GAVDIVM', 86,\n 'gaudium', 'joy', '', '1_5', 1), ('LACRIMA', 87, 'lacrima', 'tear', '',\n '1_5', 1), ('LONGE', 88, 'longe', 'far', '', '1_5', 1), ('LONGVS', 89,\n 'longus', 'long', '', '1_5', 1), ('MISER', 90, 'miser',\n 'wretched, sad, miserable', '', '1_5', 1), ('NAM', 91, 'nam',\n 'for, in fact', '', '1_5', 1), ('NONSOLVM', 92, 'non',\n 'not only…, but also…', '', '1_5', 1), ('PVLCHER', 93, 'pulcher',\n 'beautiful, nice', '', '1_5', 1), ('SEMPER', 94, 'semper', 'always', '',\n '1_5', 1), ('TAMEN', 95, 'tamen', 'however', '', '1_5', 2), ('SVM/1', \n 96, 'sum', 'to be', '', '1_6', 3), ('DOCEO', 97, 'doceo', 'to teach',\n '', '1_6', 1), ('DVM/2', 98, 'dum', 'while', '', '1_6', 1), ('EXEMPLVM',\n 99, 'exemplum', 'example', '', '1_6', 1), ('FIRMO', 100, 'firmo',\n 'to strengthen', '', '1_6', 1), ('IACEO', 101, 'iaceo',\n 'to lie down, to be inert', '', '1_6', 1), ('IVDICO', 102, 'iudico',\n 'to judge', '', '1_6', 1), ('LIBER/1', 103, 'liber', 'book', '', '1_6',\n 1), ('LITTERA', 104, 'littera',\n 'letter of the alphabet (sing.); literature, letter (pl.)', '', '1_6', \n 1), ('MANEO', 105, 'maneo', 'to remain', '', '1_6', 1), ('MEMORIA', 106,\n 'memoria', 'memory', '', '1_6', 1), ('MVLTVS', 107, 'multus',\n 'much, many', '', '1_6', 1), ('POSSVM/1', 108, 'possum',\n 'to be able, can', '', '1_6', 1), ('PROPTER/2', 109, 'propter',\n 'because of, on account of (w/ acc.)', '', '1_6', 1), ('SAEPE', 110,\n 'saepe', 'often', '', '1_6', 1), ('SERVO', 111, 'servo',\n 'to save, preserve', '', '1_6', 1), ('SOLEO', 112, 'soleo',\n 'to be accustomed (w/ inf.)', '', '1_6', 1), ('TENEBRAE', 113,\n 'tenebrae', 'shadows, darkness (pl.)', '', '1_6', 1), ('VITA', 114,\n 'vita', 'life', '', '1_6', 1), ('AESTIMO', 115, 'aestimo',\n 'to regard, esteem', '', '1_7', 1), ('AESTIMOVNIVSASSIS', 116,\n 'aestimo', 'I do not care a bit ', '', '1_7', 1), ('AMOR', 117, 'amor',\n 'love', '', '1_7', 1), ('DELICIA/1', 118, 'delicia', 'delight, pet', '',\n '1_7', 1), ('DIGITVS', 119, 'digitus', 'finger', '', '1_7', 1), (\n 'DOMINA', 120, 'domina', 'mistress', '', '1_7', 1), ('GREMIVM', 121,\n 'gremium', 'lap', '', '1_7', 1), ('INVIDEO', 122, 'invideo', 'to envy',\n '', '1_7', 1), ('MEVS', 123, 'meus', 'my', '', '1_7', 1), ('OCVLVS', \n 124, 'oculus', 'eye', '', '1_7', 1), ('PASSER', 125, 'passer',\n 'sparrow', '', '1_7', 1), ('PAX', 126, 'pax', 'peace; favor', '', '1_7',\n 1), ('PVTO', 127, 'puto', 'to think', '', '1_7', 1), ('SENEX/1', 128,\n 'senex', 'old man', '', '1_7', 1), ('SESESE', 129, 'se',\n 'him/her/itself', '', '1_7', 1), ('SEVERVS', 130, 'severus', 'serious',\n '', '1_7', 1), ('SOROR', 131, 'soror', 'sister', '', '1_7', 1), (\n 'SVI/1', 132, 'sui', 'him–/her–/itself', '', '1_7', 1), ('VERBVM', 133,\n 'verbum', 'word', '', '1_7', 1), ('CONTRA/2', 134, 'contra',\n 'against (w/ acc.)', '', '1_8', 1), ('DECERNO', 135, 'decerno',\n 'to decide, determine (often w/ inf.)', '', '1_8', 1), ('DICO/2', 136,\n 'dico', 'to say', '', '1_8', 1), ('DVX', 137, 'dux', 'leader, general',\n '', '1_8', 1), ('FORTITVDO', 138, 'fortitudo', 'courage', '', '1_8', 1),\n ('HOMO', 139, 'homo', 'man, human being; people (pl.)', '', '1_8', 1),\n ('INTELLIGO', 140, 'intellego', 'to understand', '', '1_8', 1), (\n 'LIBERO', 141, 'libero', 'to free someone (acc.) from something (abl.)',\n '', '1_8', 1), ('MILES', 142, 'miles', 'soldier', '', '1_8', 1), (\n 'NAVIGO', 143, 'navigo', 'to sail, navigate', '', '1_8', 1), (\n 'ORACVLVM', 144, 'oraculum', 'oracle, prophecy', '', '1_8', 1), ('PETO',\n 145, 'peto', 'to seek', '', '1_8', 1), ('REX', 146, 'rex', 'king', '',\n '1_8', 1), ('TANDEM', 147, 'tandem', 'finally', '', '1_8', 1), (\n 'TEMPLVM', 148, 'templum', 'temple', '', '1_8', 1), ('TIMOR', 149,\n 'timor', 'fear', '', '1_8', 1), ('TVM', 150, 'tum',\n 'then, at that time', '', '1_8', 2), ('VINCO', 151, 'vinco',\n 'to conquer', '', '1_8', 1), ('ANIMAL', 152, 'animal', 'animal', '',\n '1_9', 1), ('ARMA', 153, 'arma', 'weapons (pl.)', '', '1_9', 1), (\n 'AVDIO', 154, 'audio', 'to hear, listen', '', '1_9', 1), ('CAPVT', 155,\n 'caput', 'head', '', '1_9', 1), ('CIVIS', 156, 'civis', 'citizen', '',\n '1_9', 1), ('CONSVL', 157, 'consul', 'consul', '', '1_9', 1), ('CORPVS',\n 158, 'corpus', 'body', '', '1_9', 1), ('CREDO', 159, 'credo',\n 'to believe somebody (w/ dat.)', '', '1_9', 1), ('EXEMPLAR', 160,\n 'exemplar', 'example', '', '1_9', 1), ('GERO', 161, 'gero',\n 'to carry; to behave (w/ se)', '', '1_9', 2), ('MARE', 162, 'mare',\n 'sea', '', '1_9', 1), ('MORS', 163, 'mors', 'death', '', '1_9', 1), (\n 'MVLIER', 164, 'mulier', 'woman', '', '1_9', 1), ('ORATIO', 165,\n 'oratio', 'oration, speech', '', '1_9', 1), ('SCIO', 166, 'scio',\n 'to know', '', '1_9', 1), ('SENTIO', 167, 'sentio', 'to perceive', '',\n '1_9', 1), ('TEMPVS/1', 168, 'tempus', 'time', '', '1_9', 1), ('VENIO',\n 169, 'venio', 'to come', '', '1_9', 1), ('VRBS', 170, 'urbs', 'city',\n '', '1_9', 1), ('ACER/2', 171, 'acer', 'keen, fierce', '', '1_10', 1),\n ('AEDIFICO', 172, 'aedifico', 'to build', '', '1_10', 1), ('CAPIO/2', \n 173, 'capio', 'to take, adopt, capture', '', '1_10', 1), ('CELEBER', \n 174, 'celeber', 'renowned, well–known, crowded', '', '1_10', 1), (\n 'CVPIO', 175, 'cupio', 'to desire, want', '', '1_10', 1), ('DELEO', 176,\n 'deleo', 'to destroy', '', '1_10', 1), ('DEVS', 177, 'deus', 'god', '',\n '1_10', 1), ('DONVM', 178, 'donum', 'gift', '', '1_10', 1), ('EQVVS', \n 179, 'equus', 'horse', '', '1_10', 1), ('FELIX', 180, 'felix',\n 'fortunate, happy', '', '1_10', 1), ('FLAMMA', 181, 'flamma', 'flame',\n '', '1_10', 1), ('FORTIS', 182, 'fortis', 'brave, strong', '', '1_10', \n 1), ('FVGIO', 183, 'fugio', 'to flee, run away', '', '1_10', 1), (\n 'HOSTIS', 184, 'hostis', 'enemy', '', '1_10', 1), ('MOVEO', 185,\n 'moveo', 'to move', '', '1_10', 1), ('NEC/2', 186, 'nec',\n 'nor; and not', '', '1_10', 2), ('NOX', 187, 'nox', 'night', '', '1_10',\n 1), ('PAVCI', 188, 'paucus', 'few', '', '1_10', 1), ('PERICVLVM', 189,\n 'periculum', 'danger', '', '1_10', 1), ('PVGNO', 190, 'pugno',\n 'to fight', '', '1_10', 1), ('AGO', 191, 'ago',\n 'to drive, lead, do, behave', '', '1_11', 1), ('ARDEO', 192, 'ardeo',\n 'to burn, be on fire', '', '1_11', 1), ('CONSPICIO', 193, 'conspicio',\n 'to look at, observe', '', '1_11', 1), ('CRVDELIS', 194, 'crudelis',\n 'cruel', '', '1_11', 1), ('DOLOR', 195, 'dolor', 'grief, pain', '',\n '1_11', 1), ('ITA', 196, 'ita', 'yes', '', '1_11', 2), ('MINIME', 197,\n 'minime', 'No', '', '1_11', 1), ('MITTO', 198, 'mitto', 'to send', '',\n '1_11', 1), ('NE/2', 199, 'ne',\n '(added to the first word of a question', '', '1_11', 1), ('NOVVS', 200,\n 'novus', 'new', '', '1_11', 1), ('PARVM/2', 201, 'parum', 'too little',\n '', '1_11', 2), ('QVE', 202, 'que', 'and', '', '1_11', 1), ('QVOQVE', \n 203, 'quoque', 'also', '', '1_11', 1), ('REGINA', 204, 'regina',\n 'queen', '', '1_11', 1), ('RELINQVO', 205, 'relinquo', 'to abandon', '',\n '1_11', 1), ('SILVA', 206, 'silva', 'forest', '', '1_11', 1), (\n 'SPELVNCA', 207, 'spelunca', 'cave', '', '1_11', 1), ('TEMPESTAS', 208,\n 'tempestas', 'season', '', '1_11', 1), ('VNA', 209, 'una', 'together',\n '', '1_11', 1), ('BELLVMGERO', 210, 'bellum', 'to wage war', '', '1_12',\n 1), ('CONSVMO', 211, 'consumo', 'to consume', '', '1_12', 1), (\n 'DEXTERA', 212, 'dextera', 'right hand', '', '1_12', 1), ('FACIO', 213,\n 'facio', 'to do, make', '', '1_12', 1), ('IBI', 214, 'ibi', 'there', '',\n '1_12', 1), ('IGNIS', 215, 'ignis', 'fire', '', '1_12', 1), ('INQVIO', \n 216, 'inquam', 'to say (used with direct speech)', '', '1_12', 3), (\n 'IRA', 217, 'ira', 'anger', '', '1_12', 1), ('IS', 218, 'is',\n 's/he/it, this, that', '', '1_12', 1), ('NOMEN', 219, 'nomen', 'name',\n '', '1_12', 1), ('NOS', 220, 'nos', 'we; us', '', '1_12', 1), ('NOSTER',\n 221, 'noster', 'our, ours', '', '1_12', 1), ('OCCIDO/2', 222, 'occido',\n 'to strike down, knock down', '', '1_12', 1), ('OSTENDO', 223,\n 'ostendo', 'to show', '', '1_12', 1), ('PONO', 224, 'pono', 'to place',\n '', '1_12', 1), ('PROPE/2', 225, 'prope', 'near', '', '1_12', 2), (\n 'PROVIRIBVS', 226, 'pro', 'with all one’s might', '', '1_12', 1), (\n 'SIMILIS', 227, 'similis', 'similar', '', '1_12', 1), ('STATIM', 228,\n 'statim', 'immediately', '', '1_12', 1), ('TANTVS', 229, 'tantus',\n 'so much', '', '1_12', 1), ('TVVS', 230, 'tuus', 'your', '', '1_12', 1),\n ('VESTER', 231, 'vester', 'your', '', '1_12', 1), ('VIS', 232, 'vis',\n 'force', '', '1_12', 1), ('VOS', 233, 'vos', 'you', '', '1_12', 1), (\n 'EGO', 234, 'ego', 'I', '', '1_13', 3), ('TV', 235, 'tu', 'you', '',\n '1_13', 3), ('TVM', 236, 'tum', 'then, at that time', '', '1_13', 2), (\n 'ALIVS', 237, 'alius', 'another, other', '', '1_13', 1), ('APVD', 238,\n 'apud', 'at the house of (w/ acc.)', '', '1_13', 1), ('ATQVE/1', 239,\n 'atque', 'as', '', '1_13', 2), ('DISCEDO/1', 240, 'discedo',\n 'to leave, withdraw, go away', '', '1_13', 1), ('DIVES', 241, 'dives',\n 'rich', '', '1_13', 1), ('DOCTVS', 242, 'doctus', 'learned', '', '1_13',\n 1), ('DVCO', 243, 'duco', 'to lead, take', '', '1_13', 2), ('ENIM/2', \n 244, 'enim', 'for, in fact', '', '1_13', 1), ('IVDEX', 245, 'iudex',\n 'judge', '', '1_13', 1), ('LICET/1', 246, 'licet',\n 'it is allowed, permitted (for someone)(to do something)(w/ dat. and inf.) '\n , '', '1_13', 1), ('NIHIL', 247, 'nihil', 'nothing', '', '1_13', 1), (\n 'NOLO', 248, 'nolo', 'not to want, to be unwilling', '', '1_13', 2), (\n 'OMNIS', 249, 'omnis', 'each, every, all', '', '1_13', 1), ('PRO/1', \n 250, 'pro', 'for, on behalf of (w/ abl.)', '', '1_13', 1), ('QVID', 251,\n 'quid', 'what; why', '', '1_13', 1), ('RESPONDEO', 252, 'respondeo',\n 'to answer', '', '1_13', 1), ('ROGO', 253, 'rogo', 'to ask', '', '1_13',\n 1), ('SVVS', 254, 'suus', 'his, her, its, their', '', '1_13', 2), (\n 'TANTVM/2', 255, 'tantum', 'only', '', '1_13', 1), ('VALE', 256, 'vale',\n 'to greetings! farewell!', '', '1_13', 1), ('VALEO', 257, 'valeo',\n 'to be able (w/ inf.); to be in good health', '', '1_13', 3), ('ALBVS',\n 258, 'albus', 'white', '', '1_14', 1), ('ARBOR', 259, 'arbor', 'tree',\n '', '1_14', 1), ('CADO', 260, 'cado', 'to fall', '', '1_14', 1), (\n 'COMEDO/2', 261, 'comedo', 'to eat', '', '1_14', 1), ('CONVENIO', 262,\n 'convenio', 'to meet', '', '1_14', 1), ('FLVO', 263, 'fluo', 'to flow',\n '', '1_14', 1), ('GLADIVS', 264, 'gladius', 'sword', '', '1_14', 1), (\n 'IAM', 265, 'iam', 'already, yet', '', '1_14', 1), ('MOX', 266, 'mox',\n 'soon', '', '1_14', 1), ('ODIVM', 267, 'odium', 'hatred', '', '1_14', 2\n ), ('OS/1', 268, 'os', 'mouth', '', '1_14', 1), ('PARENS/1', 269,\n 'parens', 'parent', '', '1_14', 1), ('PECTVS', 270, 'pectus', 'chest',\n '', '1_14', 1), ('PER', 271, 'per', 'through (w/ acc.)', '', '1_14', 1),\n ('PRIMVS', 272, 'primus', 'first', '', '1_14', 1), ('QVI/1', 273, 'qui',\n 'who, which (rel. pronoun); what? which? (inter. adj.) ', '', '1_14', 2\n ), ('RVBER', 274, 'ruber', 'red', '', '1_14', 1), ('SANGVIS', 275,\n 'sanguis', 'blood', '', '1_14', 1), ('SEPARO/2', 276, 'separo',\n 'to separate, divide', '', '1_14', 1), ('TANGO', 277, 'tango',\n 'to touch', '', '1_14', 1), ('INQVIO', 278, 'inquam',\n 'to say (used with direct speech)', '', '1_15', 3), ('QVI/1', 279,\n 'qui', 'who, which (rel. pronoun); what? which? (inter. adj.) ', '',\n '1_15', 2), ('ANTE/2', 280, 'ante', 'in front of (w/ acc.)', '', '1_15',\n 1), ('ARGVMENTVM', 281, 'argumentum', 'proof, indication, argument', '',\n '1_15', 1), ('CVR/1', 282, 'cur', 'why', '', '1_15', 1), ('DIFFICILIS',\n 283, 'difficilis', 'difficult', '', '1_15', 1), ('ECCE', 284, 'ecce',\n 'look here!', '', '1_15', 1), ('ETIAM', 285, 'etiam', 'even, also', '',\n '1_15', 1), ('FORSITAN', 286, 'forsan', 'perhaps', '', '1_15', 1), (\n 'NEGLIGO', 287, 'neglego', 'to neglect', '', '1_15', 1), ('PARVVS/2', \n 288, 'parvus', 'small', '', '1_15', 1), ('QVIS/1', 289, 'quis',\n 'who? which? (inter. pronoun)', '', '1_15', 1), ('RVSTICVS/2', 290,\n 'rusticus', 'rural, rustic', '', '1_15', 1), ('SAXVM', 291, 'saxum',\n 'stone, rock', '', '1_15', 1), ('SENECTVS/1', 292, 'senectus',\n 'old age', '', '1_15', 1), ('SICVT/1', 293, 'sicut', 'just as', '',\n '1_15', 1), ('STO', 294, 'sto', 'stand', '', '1_15', 1), ('VBIQVE', 295,\n 'ubique', 'everywhere', '', '1_15', 1), ('VERVS', 296, 'verus',\n 'real, true', '', '1_15', 1), ('VETVSTVS', 297, 'vetustus', 'old', '',\n '1_15', 1), ('VILLA', 298, 'villa', 'estate', '', '1_15', 1), ('VMQVAM',\n 299, 'umquam', 'ever', '', '1_15', 1), ('AVVNCVLVS', 300, 'avunculus',\n 'uncle', '', '1_16', 1), ('CAELVM/1', 301, 'caelum',\n 'sky, heaven, weather', '', '1_16', 1), ('CAVSA', 302, 'causa',\n 'cause, reason', '', '1_16', 1), ('CINIS', 303, 'cinis', 'ash', '',\n '1_16', 1), ('CLADES', 304, 'clades', 'disaster', '', '1_16', 1), (\n 'CLASSIS', 305, 'classis', 'fleet, class (of people)', '', '1_16', 1),\n ('FEMINA', 306, 'femina', 'woman', '', '1_16', 1), ('FVMVS', 307,\n 'fumus', 'smoke', '', '1_16', 1), ('FVNESTVS', 308, 'funestus',\n 'deadly', '', '1_16', 1), ('IGITVR', 309, 'igitur', 'therefore', '',\n '1_16', 1), ('INCENDIVM', 310, 'incendium', 'conflagration, eruption',\n '', '1_16', 1), ('LEGO/2', 311, 'lego', 'to read, choose', '', '1_16', \n 1), ('LITVS/2', 312, 'litus', 'shore', '', '1_16', 1), ('MATER', 313,\n 'mater', 'mother', '', '1_16', 1), ('MONS', 314, 'mons', 'mountain', '',\n '1_16', 1), ('NAVIS', 315, 'navis', 'ship', '', '1_16', 1), ('NVBES', \n 316, 'nubes', 'cloud', '', '1_16', 1), ('NVMQVAM', 317, 'numquam',\n 'never', '', '1_16', 1), ('OPPRIMO', 318, 'opprimo',\n 'to overwhelm, suppress', '', '1_16', 1), ('PARS', 319, 'pars', 'part',\n '', '1_16', 1), ('STVDEO', 320, 'studeo',\n 'to study, be eager for, be interested in (w/ dat.)', '', '1_16', 1), (\n 'DOMVS', 321, 'domus', 'home', '', '1_17', 2), ('ALO', 322, 'alo',\n 'to feed, nourish', '', '1_17', 1), ('AMITTO', 323, 'amitto', 'to lose',\n '', '1_17', 1), ('CORNV', 324, 'cornu', 'horn', '', '1_17', 1), (\n 'CORRIPIO', 325, 'corripio', 'to seize, occupy, engulf', '', '1_17', 1),\n ('CVRRO', 326, 'curro', 'to run', '', '1_17', 1), ('DEVASTO', 327,\n 'devasto', 'to lay waste', '', '1_17', 1), ('EXSTINGVO', 328,\n 'exstinguo', 'to extinguish', '', '1_17', 1), ('FACILE', 329, 'facile',\n 'easliy', '', '1_17', 1), ('IACIO', 330, 'iacio', 'to throw', '',\n '1_17', 1), ('IMPERATOR', 331, 'imperator', 'general, emperor', '',\n '1_17', 1), ('IMPETVS', 332, 'impetus', 'impetus, force, attack', '',\n '1_17', 1), ('INITIVM', 333, 'initium', 'beginning', '', '1_17', 1), (\n 'IVSSVS', 334, 'iussus', 'order', '', '1_17', 1), ('LOCVS', 335,\n 'locus',\n 'place (sing.); passages of a book (m. pl.); geographical places(n. pl.)',\n '', '1_17', 1), ('MANVS/1', 336, 'manus', 'hand', '', '1_17', 1), (\n 'MVRVS', 337, 'murus', 'wall', '', '1_17', 1), ('SINE', 338, 'sine',\n 'without (w/ abl.)', '', '1_17', 1), ('TEMPTO', 339, 'tempto', 'to try',\n '', '1_17', 1), ('TVMVLTVS', 340, 'tumultus', 'confusion', '', '1_17', \n 1), ('VENTVS', 341, 'ventus', 'wind', '', '1_17', 1), ('DVCO', 342,\n 'duco', 'to lead, take', '', '1_18', 2), ('ITA', 343, 'ita', 'yes', '',\n '1_18', 2), ('COLO/2', 344, 'colo', 'to worship, cultivate', '', '1_18',\n 1), ('CVM/3', 345, 'cum', 'when, after', '', '1_18', 2), ('DEA', 346,\n 'dea', 'goddess', '', '1_18', 1), ('DIES', 347, 'dies', 'day', '',\n '1_18', 1), ('DORMIO', 348, 'dormio', 'to sleep', '', '1_18', 1), (\n 'EXCITO/1', 349, 'excito', 'to awaken, rouse, stir up', '', '1_18', 1),\n ('EXCLAMO', 350, 'exclamo', 'to exclaim', '', '1_18', 1), ('FACIES', \n 351, 'facies', 'face', '', '1_18', 1), ('FATVM', 352, 'fatum',\n 'fate, destiny', '', '1_18', 1), ('MARITVS/1', 353, 'maritus',\n 'husband', '', '1_18', 1), ('MERIDIES', 354, 'meridies', 'midday', '',\n '1_18', 2), ('MVLTVM/2', 355, 'multum', 'much', '', '1_18', 1), (\n 'OCCVLTO', 356, 'occulto', 'to hide', '', '1_18', 1), ('PATER', 357,\n 'pater', 'father', '', '1_18', 2), ('POST/2', 358, 'post',\n 'after (w/ acc.)', '', '1_18', 1), ('QVAERO', 359, 'quaero',\n 'to look for, search', '', '1_18', 1), ('RES', 360, 'res',\n 'thing, matter', '', '1_18', 1), ('SI/2', 361, 'si', 'if', '', '1_18', \n 1), ('SOMNVS', 362, 'somnus', 'sleep', '', '1_18', 1), ('TAM', 363,\n 'tam', 'so ', '', '1_18', 1), ('VXOR', 364, 'uxor', 'wife', '', '1_18',\n 2), ('BARBA', 365, 'barba', 'beard', '', '1_19', 1), ('CARO/1', 366,\n 'caro', 'meat, flesh', '', '1_19', 1), ('CELERITER', 367, 'celeriter',\n 'swiftly', '', '1_19', 1), ('COQVO', 368, 'coquo', 'to cook', '',\n '1_19', 1), ('CRESCO', 369, 'cresco', 'to grow', '', '1_19', 1), (\n 'FEROX', 370, 'ferox', 'fierce, ferocious', '', '1_19', 1), ('FORIS/2',\n 371, 'foris', 'outside, in the open', '', '1_19', 1), ('HERBA', 372,\n 'herba', 'plant, vegetation', '', '1_19', 1), ('HIC/1', 373, 'hic',\n 'this', '', '1_19', 1), ('INTER', 374, 'inter',\n 'between, among (w/ acc.)', '', '1_19', 1), ('PELLIS', 375, 'pellis',\n 'skin, hide', '', '1_19', 1), ('POSTQVAM', 376, 'postquam', 'after', '',\n '1_19', 1), ('PROELIVM', 377, 'proelium', 'battle, combat', '', '1_19',\n 1), ('SANO', 378, 'sano', 'to heal', '', '1_19', 1), ('SEDEO', 379,\n 'sedeo', 'to sit', '', '1_19', 1), ('TERO', 380, 'tero',\n 'to wear out, rub', '', '1_19', 1), ('TERRIBILIS', 381, 'terribilis',\n 'terrifying', '', '1_19', 1), ('VESTIMENTVM', 382, 'vestimentum',\n 'garment, clothes (pl.)', '', '1_19', 1), ('VIVO', 383, 'vivo',\n 'to live', '', '1_19', 1), ('VVLNERO', 384, 'vulnero', 'to wound', '',\n '1_19', 1), ('VVLNVS', 385, 'vulnus', 'wound', '', '1_19', 1), (\n 'ABVNDO', 386, 'abundo', 'to abound with (w/ abl.)', '', '1_20', 1), (\n 'ADOLESCENS/2', 387, 'adulescens', 'young man, young lady', '', '1_20',\n 1), ('AEQVVS', 388, 'aequus', 'even', '', '1_20', 1), ('COR', 389,\n 'cor', 'heart', '', '1_20', 1), ('DELECTO', 390, 'delecto',\n 'to delight, please', '', '1_20', 1), ('DIVINVS/2', 391, 'divinus',\n 'divine', '', '1_20', 1), ('EGEO', 392, 'egeo',\n 'to lack something (abl.)', '', '1_20', 1), ('FVR', 393, 'fur', 'thief',\n '', '1_20', 1), ('FVRTVM', 394, 'furtum', 'theft', '', '1_20', 1), (\n 'HVMANVS', 395, 'humanus', 'human', '', '1_20', 1), ('ILLE', 396,\n 'ille', 'that', '', '1_20', 1), ('INIQVITAS', 397, 'iniquitas',\n 'injustice', '', '1_20', 1), ('LEX', 398, 'lex', 'law', '', '1_20', 1),\n ('LVDO', 399, 'ludo', 'to play', '', '1_20', 1), ('NOCTV', 400, 'noctu',\n 'during the night', '', '1_20', 1), ('PAENE', 401, 'paene', 'almost',\n '', '1_20', 1), ('PAVPER', 402, 'pauper', 'poor', '', '1_20', 1), (\n 'PLENVS', 403, 'plenus', 'full of (w/ gen. or abl.)', '', '1_20', 1), (\n 'POMVM', 404, 'pomum', 'fruit', '', '1_20', 1), ('PVNIO', 405, 'punio',\n 'to punish', '', '1_20', 1), ('ACCIPIO', 406, 'accipio',\n 'to accept, receive', '', '1_21', 1), ('ACCVSO', 407, 'accuso',\n 'to accuse someone (acc.) of something (gen.)', '', '1_21', 1), (\n 'ALIENVS/2', 408, 'alienus',\n 'foreign to, inconsistent with (w/ a/ab and abl.)', '', '1_21', 1), (\n 'AXIS', 409, 'axis', 'axle, axis', '', '1_21', 1), ('CIRCVM/2', 410,\n 'circum', 'around (w/ acc.)', '', '1_21', 1), ('CONSTANTIA', 411,\n 'constantia', 'constancy', '', '1_21', 1), ('DESCENDO', 412, 'descendo',\n 'to descend', '', '1_21', 1), ('DIVITIAE', 413, 'divitia',\n 'wealth, riches (pl.)', '', '1_21', 1), ('ERIPIO', 414, 'eripio',\n 'to snatch away', '', '1_21', 1), ('ERRO/2', 415, 'erro',\n 'to wander, make a mistake', '', '1_21', 1), ('EXTERNVS', 416,\n 'externus', 'outward, external', '', '1_21', 1), ('FORTVNA', 417,\n 'fortuna', 'fortune, the goddess Fortune', '', '1_21', 1), ('FVTVRVS', \n 418, 'futurus', 'about to be (from sum)', '', '1_21', 1), ('HONOR', 419,\n 'honor', 'honor, public office or distinction', '', '1_21', 1), (\n 'MVTO/2', 420, 'muto', 'to change', '', '1_21', 1), ('POSSIDEO', 421,\n 'possideo', 'to possess', '', '1_21', 1), ('PROCERTO', 422, 'pro',\n 'for certain, for sure', '', '1_21', 1), ('RECIPIO', 423, 'recipio',\n 'to take back', '', '1_21', 2), ('REPREHENDO', 424, 'reprehendo',\n 'to blame, rebuke', '', '1_21', 1), ('ROTA', 425, 'rota', 'wheel', '',\n '1_21', 1), ('TOLLO', 426, 'tollo', 'to lift up, raise; to destroy', '',\n '1_21', 1), ('VERSO', 427, 'verso', 'to turn', '', '1_21', 1), ('VLLVS',\n 428, 'ullus', 'any', '', '1_21', 1), ('CONSILIVM', 429, 'consilium',\n 'plan, advice', '', '2_1', 2), ('MERIDIES', 430, 'meridies', 'midday',\n '', '2_1', 2), ('PROPE/2', 431, 'prope', 'near', '', '2_1', 2), (\n 'ASPICIO', 432, 'aspicio', 'to look at, catch a glimpse of', '', '2_1',\n 1), ('ETET', 433, 'et', 'both…and…', '', '2_1', 1), ('GENS', 434,\n 'gens', 'tribe, population', '', '2_1', 1), ('GIGNO', 435, 'gigno',\n 'to give birth, produce', '', '2_1', 1), ('HODIE', 436, 'hodie',\n 'today', '', '2_1', 1), ('INCOLA', 437, 'incola', 'inhabitant', '',\n '2_1', 1), ('INSVLA', 438, 'insula', 'island', '', '2_1', 2), (\n 'INVENIO', 439, 'invenio', 'to come upon, find', '', '2_1', 1), ('MOS',\n 440, 'mos', 'custom, habit; morals (pl.)', '', '2_1', 1), ('MVNDVS/1', \n 441, 'mundus', 'world', '', '2_1', 1), ('NE/4', 442, 'ne',\n 'that not, not to, lest ', '', '2_1', 3), ('OCCVPO/2', 443, 'occupo',\n 'to occupy', '', '2_1', 1), ('ORTVS', 444, 'ortus',\n 'origin, beginning, raising', '', '2_1', 1), ('PISCIS', 445, 'piscis',\n 'a fish', '', '2_1', 1), ('PROCVL', 446, 'procul', 'far, far away', '',\n '2_1', 1), ('PROMITTO', 447, 'promitto', 'to promise', '', '2_1', 1), (\n 'SEPTENTRIONALIS', 448, 'septentrionalis', 'northern', '', '2_1', 1), (\n 'SITVS/2', 449, 'situs', 'located, situated', '', '2_1', 1), ('SOL', \n 450, 'sol', 'sun', '', '2_1', 1), ('VTINAM', 451, 'utinam', 'if only',\n '', '2_1', 2), ('GERO', 452, 'gero', 'to carry; to behave (w/ se)', '',\n '2_2', 2), ('ODIVM', 453, 'odium', 'hatred', '', '2_2', 2), ('VALEO', \n 454, 'valeo', 'to be able (w/ inf.); to be in good health', '', '2_2',\n 3), ('ALTVS', 455, 'altus', 'tall, deep', '', '2_2', 1), ('ANNVS', 456,\n 'annus', 'year', '', '2_2', 1), ('ARGENTVM', 457, 'argentum', 'silver',\n '', '2_2', 1), ('AVRVM', 458, 'aurum', 'gold', '', '2_2', 1), ('BREVIS',\n 459, 'brevis', 'short', '', '2_2', 1), ('CLARVS', 460, 'clarus',\n 'clear, distinguished', '', '2_2', 1), ('CVSTOS', 461, 'custos',\n 'guard', '', '2_2', 1), ('EQVES', 462, 'eques', 'horseman', '', '2_2', \n 1), ('FINIS', 463, 'finis', 'end', '', '2_2', 1), ('GRAVIS', 464,\n 'gravis', 'serious, heavy', '', '2_2', 1), ('INTERDVM', 465, 'interdum',\n 'sometimes', '', '2_2', 1), ('LIS', 466, 'lis', 'dispute, quarrel', '',\n '2_2', 1), ('MANE/2', 467, 'mane', 'in the morning', '', '2_2', 1), (\n 'ODIOHABEO', 468, 'odio', 'to hate somebody', '', '2_2', 1), ('SINO', \n 469, 'sino', 'to allow somebody (acc.) to do something (inf.)', '',\n '2_2', 1), ('VEL/1', 470, 'vel', 'or', '', '2_2', 1), ('VESTIS', 471,\n 'vestis', 'clothes, attire', '', '2_2', 1), ('VOX', 472, 'vox', 'voice',\n '', '2_2', 1), ('VT/4', 473, 'ut', 'that, to, in order to, so that', '',\n '2_2', 4), ('VVLTVS', 474, 'vultus', 'face', '', '2_2', 1), ('VXOR', \n 475, 'uxor', 'wife', '', '2_3', 2), ('AT/2', 476, 'at', 'but', '',\n '2_3', 1), ('CONIVX', 477, 'coniunx', 'spouse', '', '2_3', 1), (\n 'DISCIPVLA', 478, 'discipula', 'student', '', '2_3', 1), ('DISCO', 479,\n 'disco', 'to learn', '', '2_3', 1), ('DOMINVS', 480, 'dominus',\n 'master, lord', '', '2_3', 1), ('FAMA', 481, 'fama',\n 'fame, name, reputation', '', '2_3', 1), ('FRATER', 482, 'frater',\n 'brother', '', '2_3', 1), ('IMPROBVS', 483, 'improbus', 'wicked, bad',\n '', '2_3', 1), ('IVNGO', 484, 'iungo', 'to join', '', '2_3', 1), (\n 'MAGISTER', 485, 'magister', 'teacher', '', '2_3', 1), ('MATRIMONIVM', \n 486, 'matrimonium', 'marriage', '', '2_3', 1), ('NE/4', 487, 'ne',\n 'that not, not to, lest ', '', '2_3', 3), ('NVSQVAM', 488, 'nusquam',\n 'nowhere', '', '2_3', 1), ('PARIO/2', 489, 'pario', 'to give birth to',\n '', '2_3', 1), ('PERDO', 490, 'perdo', 'to lose, waste', '', '2_3', 1),\n ('SALVS', 491, 'salus', 'health, welfare', '', '2_3', 1), (\n 'SALVTEMDICERE', 492, 'salutem',\n 'to greet (customary opening to letter) ', '', '2_3', 1), ('SCRIBO', \n 493, 'scribo', 'to write', '', '2_3', 1), ('VT/4', 494, 'ut',\n 'that, to, in order to, so that', '', '2_3', 4), ('VXOREMDEDVCERE', 495,\n 'uxorem', 'to marry a woman, to take as a wife', '', '2_3', 1), (\n 'NEC/2', 496, 'nec', 'nor; and not', '', '2_4', 2), ('RECIPIO', 497,\n 'recipio', 'to take back', '', '2_4', 2), ('AGMEN', 498, 'agmen',\n 'marching column', '', '2_4', 1), ('APERIO', 499, 'aperio', 'to open',\n '', '2_4', 1), ('COEPIO', 500, 'coepi', 'to begin (w/ inf.)', '', '2_4',\n 1), ('DEFENDO', 501, 'defendo', 'to defend', '', '2_4', 1), ('EDO/1', \n 502, 'edo', 'to produce, give forth', '', '2_4', 1), ('EXTRA/2', 503,\n 'extra', 'outside of (w/ acc.)', '', '2_4', 1), ('FVRO', 504, 'furo',\n 'to rage, be insane', '', '2_4', 1), ('INGENS', 505, 'ingens', 'huge',\n '', '2_4', 1), ('INVADO/2', 506, 'invado', 'to burst in', '', '2_4', 1),\n ('LIGNEVS', 507, 'ligneus', 'made of wood', '', '2_4', 1), ('NEQVENEC',\n 508, 'neque', 'neither..nor…', '', '2_4', 1), ('PARCO', 509, 'parco',\n 'to spare somebody/thing (w/ dat.)', '', '2_4', 1), ('PONS', 510,\n 'pons', 'bridge', '', '2_4', 1), ('PORTA', 511, 'porta', 'gate', '',\n '2_4', 1), ('PRIMO', 512, 'primo', 'at first', '', '2_4', 1), ('QVAM/1',\n 513, 'quam', 'than (w/ comp. words)', '', '2_4', 2), ('QVANTVS/1', 514,\n 'quantus', 'how great, how much (inter. or rel. adj.)', '', '2_4', 1),\n ('RESISTO', 515, 'resisto', 'to resist (w/ dat.)', '', '2_4', 1), (\n 'SIMVL/1', 516, 'simul', 'at the same time', '', '2_4', 1), ('TVTVS', \n 517, 'tutus', 'safe', '', '2_4', 1), ('VACVVS', 518, 'vacuus',\n 'empty of (w/ abl.)', '', '2_4', 1), ('VALEO', 519, 'valeo',\n 'to be able (w/ inf.); to be in good health', '', '2_4', 3), ('VICTOR',\n 520, 'victor', 'victor', '', '2_4', 1), ('VTINAM', 521, 'utinam',\n 'if only', '', '2_4', 2), ('BIBO/2', 522, 'bibo', 'to drink', '', '2_5',\n 1), ('CARMEN/1', 523, 'carmen', 'song, poem', '', '2_5', 1), ('CIBVS', \n 524, 'cibus', 'food', '', '2_5', 1), ('DVLCIS', 525, 'dulcis', 'sweet',\n '', '2_5', 1), ('FLVMEN', 526, 'flumen', 'river', '', '2_5', 1), (\n 'IMMEMOR', 527, 'immemor', 'forgetful of (w/ gen.)', '', '2_5', 1), (\n 'IOCVS', 528, 'iocus', 'joke', '', '2_5', 1), ('IVVENTVS', 529,\n 'iuventus', 'youth', '', '2_5', 1), ('LEVIS/1', 530, 'levis', 'light',\n '', '2_5', 1), ('MENS', 531, 'mens', 'mind, spirit', '', '2_5', 1), (\n 'NE/4', 532, 'ne', 'that not, not to, lest ', '', '2_5', 3), ('ORO', \n 533, 'oro', 'to ask, entreat', '', '2_5', 1), ('PLACEO', 534, 'placeo',\n 'to please, be agreeable to somebody', '', '2_5', 1), ('PROXIMVS/2', \n 535, 'proximus', 'nearest', '', '2_5', 1), ('TAMQVAM/2', 536, 'tam',\n 'so…as…', '', '2_5', 1), ('VEHEMENS', 537, 'vehemens',\n 'violent, vehement', '', '2_5', 1), ('VETVS', 538, 'vetus', 'old', '',\n '2_5', 1), ('VINVM', 539, 'vinum', 'wine', '', '2_5', 1), ('VIRTVS', \n 540, 'virtus', 'courage, virtue', '', '2_5', 1), ('VITIVM', 541,\n 'vitium', 'vice', '', '2_5', 1), ('VT/4', 542, 'ut',\n 'that, to, in order to, so that', '', '2_5', 4), ('PATER', 543, 'pater',\n 'father', '', '2_6', 2), ('DECIPIO', 544, 'decipio', 'to deceive', '',\n '2_6', 1), ('DILIGO/3', 545, 'diligo', 'to love, esteem highly', '',\n '2_6', 1), ('DVO', 546, 'duo', 'two', '', '2_6', 1), ('EXERCITVS/1', \n 547, 'exercitus', 'army', '', '2_6', 1), ('FIDELIS/2', 548, 'fidelis',\n 'faithful, loyal', '', '2_6', 1), ('HERES', 549, 'heres', 'heir', '',\n '2_6', 1), ('IMPERIVM', 550, 'imperium', 'rule, empire, power', '',\n '2_6', 1), ('INOPIA', 551, 'inopia', 'helplessness, want', '', '2_6', 1\n ), ('LAVDO', 552, 'laudo', 'to praise', '', '2_6', 1), ('NECESSEEST', \n 553, 'necesse',\n 'it is necessary for someone (dat.) to do something (inf.)', '', '2_6',\n 1), ('NEMO', 554, 'nemo', 'no one', '', '2_6', 1), ('PAVLO', 555,\n 'paulo', 'a little bit, to a small extent', '', '2_6', 1), ('QVAM/1', \n 556, 'quam', 'than (w/ comp. words)', '', '2_6', 2), ('QVANTVM/3', 557,\n 'quantum', 'to what extent, how much', '', '2_6', 1), ('RESTITVO', 558,\n 'restituo', 'to restore', '', '2_6', 1), ('SATIS/2', 559, 'satis',\n 'enough, sufficiently', '', '2_6', 1), ('SECVNDVS/1', 560, 'secundus',\n 'second', '', '2_6', 1), ('TERTIVS', 561, 'tertius', 'third', '', '2_6',\n 1), ('TRES', 562, 'tres', 'three', '', '2_6', 1), ('TRISTIS', 563,\n 'tristis', 'sad', '', '2_6', 1), ('VEHEMENTER', 564, 'vehementer',\n 'strongly, vehemently', '', '2_6', 1), ('NOLO', 565, 'nolo',\n 'not to want, to be unwilling', '', '2_7', 2), ('AETAS', 566, 'aetas',\n 'age', '', '2_7', 1), ('FIDES/2', 567, 'fides', 'faith', '', '2_7', 1),\n ('FVNDO/2', 568, 'fundo', 'to pour', '', '2_7', 1), ('GLORIA', 569,\n 'gloria', 'glory', '', '2_7', 1), ('LIBERTAS', 570, 'libertas',\n 'freedom', '', '2_7', 1), ('LVMEN', 571, 'lumen', 'light', '', '2_7', 1\n ), ('MALO', 572, 'malo', 'to prefer', '', '2_7', 1), ('ORNATVS/1', 573,\n 'ornatus', 'adorned, ornate, elaborate', '', '2_7', 1), ('OTIVM', 574,\n 'otium', 'leisure, free time', '', '2_7', 1), ('POTENS', 575, 'potens',\n 'powerful', '', '2_7', 1), ('PVBLICVS/2', 576, 'publicus', 'common', '',\n '2_7', 1), ('QVALIS/1', 577, 'qualis', 'what sort of? (inter. adj.)',\n '', '2_7', 1), ('RESPVBLICA', 578, 'res', 'state', '', '2_7', 1), (\n 'STVDIOSVS', 579, 'studiosus', 'fond of (w/ gen.)', '', '2_7', 1), (\n 'TAMQVAM/1', 580, 'tamquam', 'as', '', '2_7', 1), ('TOT', 581, 'tot',\n 'so many', '', '2_7', 1), ('TRAHO', 582, 'traho', 'to drag, draw', '',\n '2_7', 1), ('VBI/1', 583, 'ubi', 'where? (inter. adv)', '', '2_7', 1),\n ('VIX', 584, 'vix', 'hardly', '', '2_7', 1), ('VNVS', 585, 'unus',\n 'one', '', '2_7', 1), ('VOLO/3', 586, 'volo', 'to want', '', '2_7', 1),\n ('VTILIS', 587, 'utilis', 'useful', '', '2_7', 1), ('ADHVC', 588,\n 'adhuc', 'still, up to this time', '', '2_8', 1), ('ANTIQVVS', 589,\n 'antiquus', 'ancient', '', '2_8', 1), ('ARS', 590, 'ars',\n 'science, art, skill', '', '2_8', 1), ('DOMINOR', 591, 'dominor',\n 'to dominate, rule', '', '2_8', 1), ('HORTOR', 592, 'hortor',\n 'to exhort, urge', '', '2_8', 1), ('LATINE', 593, 'Latine', 'in Latin',\n '', '2_8', 1), ('LATINVS/A', 594, 'Latinus',\n 'Latin, pertaining to Latin', '', '2_8', 1), ('LINGVA', 595, 'lingua',\n 'language; tongue', '', '2_8', 1), ('LOQVOR', 596, 'loquor', 'to speak',\n '', '2_8', 1), ('MAGIS/2', 597, 'magis', 'more', '', '2_8', 1), (\n 'MAIOR', 598, 'maior', 'bigger; greater', '', '2_8', 1), ('MAXIMVS', \n 599, 'maximus', 'greatest', '', '2_8', 1), ('MELIOR', 600, 'melior',\n 'better', '', '2_8', 1), ('MINIMVS', 601, 'minimus', 'smallest', '',\n '2_8', 1), ('MINVS', 602, 'minus', 'less', '', '2_8', 2), ('OPTIMVS', \n 603, 'optimus', 'best', '', '2_8', 1), ('PARTIOR', 604, 'partior',\n 'to divide, distribute', '', '2_8', 1), ('PATIOR', 605, 'patior',\n 'to endure, tolerate, suffer', '', '2_8', 1), ('PEIOR', 606, 'peior',\n 'worse', '', '2_8', 1), ('PESSIMVS', 607, 'pessimus', 'worst', '',\n '2_8', 1), ('PLVRIMVS', 608, 'plurimus', 'most', '', '2_8', 1), ('PLVS',\n 609, 'plus', 'more', '', '2_8', 1), ('SEQVOR', 610, 'sequor',\n 'to follow', '', '2_8', 1), ('VEREOR', 611, 'vereor',\n 'to fear, respect', '', '2_8', 1), ('ADDO', 612, 'addo', 'to add', '',\n '2_9', 1), ('AVRIS', 613, 'auris', 'ear', '', '2_9', 1), ('CONOR', 614,\n 'conor', 'to try', '', '2_9', 1), ('DEMITTO', 615, 'demitto',\n 'to send down', '', '2_9', 1), ('DISSIMILIS', 616, 'dissimilis',\n 'dissimilar ', '', '2_9', 1), ('FACILIS', 617, 'facilis', 'easy', '',\n '2_9', 1), ('FERO', 618, 'fero', 'to carry, bear', '', '2_9', 1), (\n 'FIO', 619, 'fio', 'to be made, become; (impersonally) to happen', '',\n '2_9', 1), ('FRIGVS', 620, 'frigus', 'cold', '', '2_9', 1), ('GENVS/1',\n 621, 'genus', 'kind', '', '2_9', 1), ('GLACIES', 622, 'glacies', 'ice',\n '', '2_9', 1), ('GRACILIS', 623, 'gracilis', 'slender', '', '2_9', 1),\n ('HVMILIS', 624, 'humilis', 'low, humble', '', '2_9', 1), ('ITER', 625,\n 'iter', 'road, trip ', '', '2_9', 1), ('LABOR/2', 626, 'labor',\n 'to slide, slip, glide down', '', '2_9', 1), ('MODEROR', 627, 'moderor',\n 'to manage, direct, guide', '', '2_9', 1), ('NIX', 628, 'nix', 'snow',\n '', '2_9', 1), ('ONVS', 629, 'onus', 'weight, burden', '', '2_9', 1), (\n 'PERVENIO', 630, 'pervenio', 'to arrive', '', '2_9', 1), ('PROGREDIOR',\n 631, 'progredior', 'to go forward, proceed', '', '2_9', 1), (\n 'QVOTIENS/2', 632, 'quotiens', 'as often as', '', '2_9', 1), (\n 'SIMVLAC/2', 633, 'simulac', 'as soon as', '', '2_9', 1), ('SVVS', 634,\n 'suus', 'his, her, its, their', '', '2_10', 2), ('AEDES', 635, 'aedes',\n 'temple; pl. dwelling, house', '', '2_10', 1), ('EO/1', 636, 'eo',\n 'to go ', '', '2_10', 1), ('IVCVNDVS', 637, 'iucundus',\n 'pleasant, nice', '', '2_10', 1), ('LABOR/1', 638, 'labor',\n 'labor, toil', '', '2_10', 1), ('LAEDO', 639, 'laedo', 'to harm', '',\n '2_10', 1), ('LIBER/2', 640, 'liber', 'free', '', '2_10', 1), ('LVCRVM',\n 641, 'lucrum', 'profit, gain', '', '2_10', 1), ('MARITIMVS', 642,\n 'maritimus', 'maritime', '', '2_10', 1), ('MODVS', 643, 'modus',\n 'way, method, manner', '', '2_10', 1), ('PAVLISPER', 644, 'paulisper',\n 'for a little while', '', '2_10', 1), ('PECVNIA', 645, 'pecunia',\n 'money', '', '2_10', 1), ('PLACIDVS', 646, 'placidus', 'peaceful, calm',\n '', '2_10', 1), ('POTIVS', 647, 'potius', 'rather', '', '2_10', 1), (\n 'PROSPER', 648, 'prosper', 'fortunate, prosperous', '', '2_10', 1), (\n 'REDDO', 649, 'reddo', 'to give back', '', '2_10', 1), ('SARCINA', 650,\n 'sarcina', 'burden, baggage', '', '2_10', 1), ('SCELESTVS', 651,\n 'scelestus', 'wicked', '', '2_10', 1), ('SEMEL', 652, 'semel', 'once',\n '', '2_10', 1), ('SERENVS', 653, 'serenus', 'calm, clear', '', '2_10', \n 1), ('PARVM/2', 654, 'parum', 'too little', '', '2_11', 2), ('ALTER', \n 655, 'alter', 'the other (of two)', '', '2_11', 1), ('GEMMA', 656,\n 'gemma', 'gem, precious stone', '', '2_11', 1), ('LEGATVS', 657,\n 'legatus', 'ambassador', '', '2_11', 1), ('MAGNIHABEO', 658, 'magni',\n 'to esteem a lot', '', '2_11', 1), ('MINVS', 659, 'minus', 'less', '',\n '2_11', 2), ('NESCIO', 660, 'nescio', 'not to know', '', '2_11', 1), (\n 'NEVTER', 661, 'neuter', 'neither, none (of two)', '', '2_11', 1), (\n 'NVLLVS', 662, 'nullus', 'none', '', '2_11', 1), ('OPERAEPRETIVMEST', \n 663, 'operae', 'it is worthwhile', '', '2_11', 1), ('POPVLVS/1', 664,\n 'populus', 'a people, populace', '', '2_11', 1), ('QVOMODO/1', 665,\n 'quomodo', 'how', '', '2_11', 1), ('SALVTO', 666, 'saluto', 'to greet ',\n '', '2_11', 1), ('SERVVS/1', 667, 'servus', 'slave, servant', '',\n '2_11', 1), ('SOLVS', 668, 'solus', 'alone, only', '', '2_11', 1), (\n 'SPECTO', 669, 'specto', 'to watch', '', '2_11', 1), ('TACEO', 670,\n 'taceo', 'to be silent, keep quiet', '', '2_11', 1), ('TOTVS', 671,\n 'totus', 'whole, entire', '', '2_11', 1), ('TVRPIS', 672, 'turpis',\n 'shameful, disgraceful', '', '2_11', 1), ('VTER/4', 673, 'uter',\n 'who, which (of two)?', '', '2_11', 1), ('VTOR', 674, 'utor',\n 'to use (w/ abl.)', '', '2_11', 1), ('CVM/3', 675, 'cum', 'when, after',\n '', '2_12', 2), ('INQVIO', 676, 'inquam',\n 'to say (used with direct speech)', '', '2_12', 3), ('TAMEN', 677,\n 'tamen', 'however', '', '2_12', 2), ('CARVS', 678, 'carus', 'dear', '',\n '2_12', 1), ('INSVLA', 679, 'insula', 'island', '', '2_12', 2), (\n 'MORIOR', 680, 'morior', 'to die', '', '2_12', 1), ('NIMIS', 681,\n 'nimis', 'too much', '', '2_12', 1), ('NISI', 682, 'nisi',\n 'if not, unless', '', '2_12', 1), ('OFFICIVM', 683, 'officium', 'duty',\n '', '2_12', 1), ('ORBIS', 684, 'orbis', 'circle', '', '2_12', 1), (\n 'ORBISTERRARVM', 685, 'orbis', 'the earth, the world', '', '2_12', 1),\n ('PROBO', 686, 'probo', 'to approve ', '', '2_12', 1), ('QVAMQVAM/2', \n 687, 'quamquam', 'although', '', '2_12', 1), ('QVAMVIS/1', 688,\n 'quamvis', 'although', '', '2_12', 1), ('QVIA', 689, 'quia', 'because',\n '', '2_12', 1), ('QVIDEM', 690, 'quidem', 'indeed', '', '2_12', 1), (\n 'QVOD/1', 691, 'quod', 'because', '', '2_12', 1), ('SENTENTIA', 692,\n 'sententia', 'opinion, point of view', '', '2_12', 1), ('SORS', 693,\n 'sors', 'lot', '', '2_12', 1), ('SPERO', 694, 'spero', 'to hope', '',\n '2_12', 1), ('SPES', 695, 'spes', 'hope', '', '2_12', 1), ('ATQVE/1', \n 696, 'atque', 'as', '', '2_13', 2), ('ABSENS', 697, 'absens',\n 'away, absent', '', '2_13', 1), ('ABSVM/1', 698, 'absum', 'to be away',\n '', '2_13', 1), ('BENEVOLENTIA', 699, 'benevolentia', 'good will', '',\n '2_13', 1), ('DECLARO', 700, 'declaro',\n 'to demonstrate, show, make known, reveal', '', '2_13', 1), ('IDEM', \n 701, 'idem', 'the same', '', '2_13', 1), ('IPSE', 702, 'ipse', 'self',\n '', '2_13', 1), ('IRASCOR', 703, 'irascor', 'to be angry at (w/ dat.)',\n '', '2_13', 1), ('ISTE', 704, 'iste', 'that (of yours)', '', '2_13', 1),\n ('MIROR', 705, 'miror', 'to marvel, be surprised at', '', '2_13', 1), (\n 'MVLTITVDO', 706, 'multitudo', 'crowd, throng', '', '2_13', 1), ('NEGO',\n 707, 'nego', 'to deny ', '', '2_13', 1), ('NVMERO/1', 708, 'numero',\n 'to number, count among', '', '2_13', 1), ('OFFENDO', 709, 'offendo',\n ']to happen upon, offend', '', '2_13', 1), ('REDEO/1', 710, 'redeo',\n 'to go back, return', '', '2_13', 1), ('REFERO', 711, 'refero',\n 'to carry back, report', '', '2_13', 1), ('SOCIVS/1', 712, 'socius',\n 'associate, partner, ally', '', '2_13', 1), ('TALIS', 713, 'talis',\n 'such a', '', '2_13', 1), ('TVRRIS', 714, 'turris', 'tower', '', '2_13',\n 1), ('VENIA', 715, 'venia', 'pardon, indulgence, forgiveness', '',\n '2_13', 1), ('VERSOR', 716, 'versor',\n 'to be situated in, be occupied in ', '', '2_13', 1), ('VIRGA', 717,\n 'virga', 'twig, stick', '', '2_13', 1), ('VOLVNTAS', 718, 'voluntas',\n 'will', '', '2_13', 1), ('AFFIRMO', 719, 'affirmo',\n 'to assert, maintain', '', '2_14', 1), ('CIRCVMEO/1', 720, 'circumeo',\n 'to go around', '', '2_14', 1), ('CONTINEO', 721, 'contineo',\n 'to hold, keep together, contain', '', '2_14', 1), ('COTIDIANVS', 722,\n 'cottidianus', 'of every day, daily', '', '2_14', 1), ('ELEMENTVM', 723,\n 'elementum', 'element', '', '2_14', 1), ('ERGO/2', 724, 'ergo',\n 'therefore', '', '2_14', 1), ('GRAVITAS', 725, 'gravitas',\n 'weight, gravity', '', '2_14', 1), ('IMMENSVS', 726, 'immensus',\n 'immeasurable, immense, endless', '', '2_14', 1), ('INFINITVS', 727,\n 'infinitus', 'boundless, unlimited', '', '2_14', 1), ('MAXIME', 728,\n 'maxime', 'most', '', '2_14', 1), ('MEDIVS', 729, 'medius', 'middle',\n '', '2_14', 1), ('MOTVS', 730, 'motus', 'motion, movement', '', '2_14',\n 1), ('MVLTO/2', 731, 'multo', 'by much', '', '2_14', 1), ('NATVRA', 732,\n 'natura', 'nature', '', '2_14', 1), ('NECESSARIO', 733, 'necessario',\n 'necessarily', '', '2_14', 1), ('PERPERAM', 734, 'perperam',\n 'wrongly, incorrectly', '', '2_14', 1), ('PONDVS', 735, 'pondus',\n 'weight', '', '2_14', 1), ('PRAESERTIM', 736, 'praesertim',\n 'especially', '', '2_14', 1), ('QVIES', 737, 'quies', 'rest, repose',\n '', '2_14', 1), ('VNDIQVE', 738, 'undique',\n 'from all parts, from everywhere', '', '2_14', 1), ('VOLVO', 739,\n 'volvo', 'to turn round', '', '2_14', 1), ('VT/4', 740, 'ut',\n 'that, to, in order to, so that', '', '2_14', 4), ('ANIMADVERTO', 741,\n 'animadverto', 'to notice', '', '2_15', 1), ('APPROPINQVO', 742,\n 'appropinquo', 'to approach (w/ dat or ad + acc.)', '', '2_15', 1), (\n 'CERNO', 743, 'cerno', 'to see, distinguish with the eyes', '', '2_15',\n 1), ('CIRCA/2', 744, 'circa', 'around (w/ acc.)', '', '2_15', 1), (\n 'CLAMO', 745, 'clamo', 'to shout, scream', '', '2_15', 1), ('FINGO', \n 746, 'fingo', 'to imagine, form in the mind', '', '2_15', 1), (\n 'IMPINGO', 747, 'impingo', 'to push, strike, inflict', '', '2_15', 1),\n ('INFLIGO', 748, 'infligo', 'to strike on or against, inflict', '',\n '2_15', 1), ('ITERVM', 749, 'iterum', 'again', '', '2_15', 1), (\n 'OPPIDVM', 750, 'oppidum', 'town', '', '2_15', 1), ('PERCVTIO', 751,\n 'percutio', 'to strike through ', '', '2_15', 1), ('PRAEDITVS', 752,\n 'praeditus', 'endowed with, possessed of (w/ abl.)', '', '2_15', 1), (\n 'REPELLO', 753, 'repello', 'to push back, thrust back', '', '2_15', 1),\n ('RIDEO', 754, 'rideo', 'to laugh', '', '2_15', 1), ('RVMPO', 755,\n 'rumpo', 'to break, tear', '', '2_15', 1), ('SEDES', 756, 'sedes',\n 'seat, abode', '', '2_15', 1), ('SIC', 757, 'sic', 'in such a way', '',\n '2_15', 1), ('SIDVS', 758, 'sidus', 'constellation', '', '2_15', 1), (\n 'TELVM', 759, 'telum', 'spear, javelin', '', '2_15', 1), ('VEHO', 760,\n 'veho', 'to drive, carry', '', '2_15', 1)]\nsection_list = {'1.1': 'start', '1.2': '1.1', '1.3': '1.2', '1.4': '1.3',\n '1.5': '1.4', '1.6': '1.5', '1.7': '1.6', '1.8': '1.7', '1.9': '1.8',\n '1.10': '1.9', '1.11': '1.10', '1.12': '1.11', '1.13': '1.12', '1.14':\n '1.13', '1.15': '1.14', '1.16': '1.15', '1.17': '1.16', '1.18': '1.17',\n '1.19': '1.18', '1.20': '1.19', '1.21': '1.20', '2.1': '1.21', '2.2':\n '2.1', '2.3': '2.2', '2.4': '2.3', '2.5': '2.4', '2.6': '2.5', '2.7':\n '2.6', '2.8': '2.7', '2.9': '2.8', '2.10': '2.9', '2.11': '2.10',\n '2.12': '2.11', '2.13': '2.12', '2.14': '2.13', '2.15': '2.14', 'end':\n '2.15', 'start': 'start'}\ntitle = 'Latin for the New Millennium Vols 1 and 2 (Tunberg-Minkova)'\nsection_level = 2\nlanguage = 'Latin'\nbook = text.Text(title, section_words, the_text, section_list,\n section_level, language, True, False)\n",
"step-4": "import text\nnan=\"\"\nsection_words = {'start': -1, '1.1': 17, '1.2': 38, '1.3': 55, '1.4': 76, '1.5': 95, '1.6': 114, '1.7': 133, '1.8': 151, '1.9': 170, '1.10': 190, '1.11': 209, '1.12': 233, '1.13': 257, '1.14': 277, '1.15': 299, '1.16': 320, '1.17': 341, '1.18': 364, '1.19': 385, '1.20': 405, '1.21': 428, '2.1': 451, '2.2': 474, '2.3': 495, '2.4': 521, '2.5': 542, '2.6': 564, '2.7': 587, '2.8': 611, '2.9': 633, '2.10': 653, '2.11': 674, '2.12': 695, '2.13': 718, '2.14': 740, '2.15': 760, 'end': -2}\nthe_text = [('AGRICOLA', 0, 'agricola', 'farmer', '', '1_1', 1), ('AMBVLO', 1, 'ambulo', 'to walk', '', '1_1', 2), ('AMO', 2, 'amo', 'to love', '', '1_1', 2), ('AQVA', 3, 'aqua', 'water', '', '1_1', 1), ('ATHLETA', 4, 'athleta', 'athlete', '', '1_1', 1), ('BENE', 5, 'bene', 'well', '', '1_1', 1), ('CVRO', 6, 'curo', 'to take care for/of', '', '1_1', 2), ('ET/2', 7, 'et', 'and', '', '1_1', 1), ('FILIA', 8, 'filia', 'daughter', '', '1_1', 1), ('ITAQVE', 9, 'itaque', 'and so', '', '1_1', 1), ('LVPA', 10, 'lupa', 'she–wolf', '', '1_1', 1), ('NAVTA', 11, 'nauta', 'sailor', '', '1_1', 1), ('POETA', 12, 'poeta', 'poet', '', '1_1', 1), ('POSTEA', 13, 'postea', 'afterwards', '', '1_1', 1), ('PVELLA', 14, 'puella', 'girl', '', '1_1', 1), ('ROMA/N', 15, 'Roma', 'Rome', '', '1_1', 1), ('SVM/1', 16, 'sum', 'to be', '', '1_1', 3), ('TERRA', 17, 'terra', 'land', '', '1_1', 1), ('AMBVLO', 18, 'ambulo', 'to walk', '', '1_2', 2), ('AMO', 19, 'amo', 'to love', '', '1_2', 2), ('CVRO', 20, 'curo', 'to take care for/of', '', '1_2', 2), ('SVM/1', 21, 'sum', 'to be', '', '1_2', 3), ('DEBEO', 22, 'debeo', 'ought, must, should; to owe', '', '1_2', 1), ('DIV', 23, 'diu', 'for a long time', '', '1_2', 1), ('EGO', 24, 'ego', 'I', '', '1_2', 3), ('EXSPECTO', 25, 'exspecto', 'to wait for, await, expect', '', '1_2', 1), ('FABVLA/1', 26, 'fabula', 'story', '', '1_2', 1), ('FORMA', 27, 'forma', 'form,appearance', '', '1_2', 1), ('HABEO', 28, 'habeo', 'to have', '', '1_2', 1), ('HABITO', 29, 'habito', 'to live, dwell', '', '1_2', 1), ('NARRO', 30, 'narro', 'to tell', '', '1_2', 1), ('NON', 31, 'non', 'not', '', '1_2', 1), ('NVNC', 32, 'nunc', 'now', '', '1_2', 1), ('PARO/2', 33, 'paro', 'to prepare, get ready, design', '', '1_2', 2), ('PATRIA', 34, 'patria', 'fatherland', '', '1_2', 1), ('TENEO', 35, 'teneo', 'to hold', '', '1_2', 1), ('TV', 36, 'tu', 'you', '', '1_2', 3), ('VIDEO', 37, 'video', 'to see', '', '1_2', 1), ('VOCO', 38, 'voco', 'to call', '', '1_2', 1), ('EGO', 39, 'ego', 'I', '', '1_3', 3), ('TV', 40, 'tu', 'you', '', '1_3', 3), ('AGER', 41, 'ager', 'field', '', '1_3', 1), ('AMICVS/1', 42, 'amicus', 'friend', '', '1_3', 1), ('ANIMVS', 43, 'animus', 'spirit, soul, mind', '', '1_3', 1), ('CASA', 44, 'casa', 'little house, cottage', '', '1_3', 1), ('CVM/2', 45, 'cum', 'with (w/ abl.)', '', '1_3', 1), ('DEINDE', 46, 'deinde', 'then', '', '1_3', 1), ('DOMVS', 47, 'domus', 'home', '', '1_3', 2), ('FILIVS', 48, 'filius', 'son', '', '1_3', 1), ('IN', 49, 'in', 'in, on (w/ abl.); into, to, against (w/ acc.)', '', '1_3', 2), ('PVER', 50, 'puer', 'boy', '', '1_3', 1), ('RIVVS', 51, 'rivus', 'brook, stream', '', '1_3', 1), ('TIMEO', 52, 'timeo', 'to fear, to be afraid', '', '1_3', 1), ('VALDE', 53, 'valde', 'very, exceedingly', '', '1_3', 1), ('VIA', 54, 'via', 'road', '', '1_3', 1), ('VIR', 55, 'vir', 'man', '', '1_3', 1), ('IN', 56, 'in', 'in, on (w/ abl.); into, to, against (w/ acc.)', '', '1_4', 2), ('AD/2', 57, 'ad', 'into, towards, to (w/ acc.)', '', '1_4', 1), ('ARMATVS/2', 58, 'armatus', 'armed', '', '1_4', 1), ('AVTEM', 59, 'autem', 'however', '', '1_4', 1), ('BELLVM', 60, 'bellum', 'war', '', '1_4', 1), ('BONVS', 61, 'bonus', 'good', '', '1_4', 1), ('CASTRA/2', 62, 'castra', 'camp', '', '1_4', 1), ('DO', 63, 'do', 'to give', '', '1_4', 1), ('DOLVS', 64, 'dolus', 'trickery, deception', '', '1_4', 1), ('EX', 65, 'e', 'from, out of (w/ abl.)', '', '1_4', 1), ('INTRO/2', 66, 'intro', 'to enter', '', '1_4', 1), ('IVBEO', 67, 'iubeo', 'to order somebody (acc.) to do something (inf.)', '', '1_4', 1), ('IVSTVS', 68, 'iustus', 'legitimate, open, just', '', '1_4', 1), ('MAGNVS', 69, 'magnus', 'large, great, important', '', '1_4', 1), ('MALVS/3', 70, 'malus', 'bad', '', '1_4', 1), ('PRAECLARVS', 71, 'praeclarus', 'famous, distinguished', '', '1_4', 1), ('PRAEMIVM', 72, 'praemium', 'reward', '', '1_4', 1), ('ROMANVS/A', 73, 'Romanus', 'Roman; the Romans (pl.)', '', '1_4', 1), ('SED', 74, 'sed', 'but', '', '1_4', 1), ('VENENVM', 75, 'venenum', 'poison', '', '1_4', 1), ('VINCVLVM', 76, 'vinculum', 'chain, fetter', '', '1_4', 1), ('PARO/2', 77, 'paro', 'to prepare, get ready, design', '', '1_5', 2), ('AB', 78, 'a', 'by, from (w/ abl.)', '', '1_5', 1), ('AVXILIVM', 79, 'auxilium', 'help', '', '1_5', 1), ('COGITO', 80, 'cogito', 'to think', '', '1_5', 1), ('CONSILIVM', 81, 'consilium', 'plan, advice', '', '1_5', 2), ('DE', 82, 'de', 'about, concerning, down from (w/ abl.)', '', '1_5', 1), ('DOLEO', 83, 'doleo', 'to feel pain, to be hurt', '', '1_5', 1), ('EPISTOLA', 84, 'epistula', 'letter', '', '1_5', 1), ('FAMILIA', 85, 'familia', 'family, household', '', '1_5', 1), ('GAVDIVM', 86, 'gaudium', 'joy', '', '1_5', 1), ('LACRIMA', 87, 'lacrima', 'tear', '', '1_5', 1), ('LONGE', 88, 'longe', 'far', '', '1_5', 1), ('LONGVS', 89, 'longus', 'long', '', '1_5', 1), ('MISER', 90, 'miser', 'wretched, sad, miserable', '', '1_5', 1), ('NAM', 91, 'nam', 'for, in fact', '', '1_5', 1), ('NONSOLVM', 92, 'non', 'not only…, but also…', '', '1_5', 1), ('PVLCHER', 93, 'pulcher', 'beautiful, nice', '', '1_5', 1), ('SEMPER', 94, 'semper', 'always', '', '1_5', 1), ('TAMEN', 95, 'tamen', 'however', '', '1_5', 2), ('SVM/1', 96, 'sum', 'to be', '', '1_6', 3), ('DOCEO', 97, 'doceo', 'to teach', '', '1_6', 1), ('DVM/2', 98, 'dum', 'while', '', '1_6', 1), ('EXEMPLVM', 99, 'exemplum', 'example', '', '1_6', 1), ('FIRMO', 100, 'firmo', 'to strengthen', '', '1_6', 1), ('IACEO', 101, 'iaceo', 'to lie down, to be inert', '', '1_6', 1), ('IVDICO', 102, 'iudico', 'to judge', '', '1_6', 1), ('LIBER/1', 103, 'liber', 'book', '', '1_6', 1), ('LITTERA', 104, 'littera', 'letter of the alphabet (sing.); literature, letter (pl.)', '', '1_6', 1), ('MANEO', 105, 'maneo', 'to remain', '', '1_6', 1), ('MEMORIA', 106, 'memoria', 'memory', '', '1_6', 1), ('MVLTVS', 107, 'multus', 'much, many', '', '1_6', 1), ('POSSVM/1', 108, 'possum', 'to be able, can', '', '1_6', 1), ('PROPTER/2', 109, 'propter', 'because of, on account of (w/ acc.)', '', '1_6', 1), ('SAEPE', 110, 'saepe', 'often', '', '1_6', 1), ('SERVO', 111, 'servo', 'to save, preserve', '', '1_6', 1), ('SOLEO', 112, 'soleo', 'to be accustomed (w/ inf.)', '', '1_6', 1), ('TENEBRAE', 113, 'tenebrae', 'shadows, darkness (pl.)', '', '1_6', 1), ('VITA', 114, 'vita', 'life', '', '1_6', 1), ('AESTIMO', 115, 'aestimo', 'to regard, esteem', '', '1_7', 1), ('AESTIMOVNIVSASSIS', 116, 'aestimo', 'I do not care a bit ', '', '1_7', 1), ('AMOR', 117, 'amor', 'love', '', '1_7', 1), ('DELICIA/1', 118, 'delicia', 'delight, pet', '', '1_7', 1), ('DIGITVS', 119, 'digitus', 'finger', '', '1_7', 1), ('DOMINA', 120, 'domina', 'mistress', '', '1_7', 1), ('GREMIVM', 121, 'gremium', 'lap', '', '1_7', 1), ('INVIDEO', 122, 'invideo', 'to envy', '', '1_7', 1), ('MEVS', 123, 'meus', 'my', '', '1_7', 1), ('OCVLVS', 124, 'oculus', 'eye', '', '1_7', 1), ('PASSER', 125, 'passer', 'sparrow', '', '1_7', 1), ('PAX', 126, 'pax', 'peace; favor', '', '1_7', 1), ('PVTO', 127, 'puto', 'to think', '', '1_7', 1), ('SENEX/1', 128, 'senex', 'old man', '', '1_7', 1), ('SESESE', 129, 'se', 'him/her/itself', '', '1_7', 1), ('SEVERVS', 130, 'severus', 'serious', '', '1_7', 1), ('SOROR', 131, 'soror', 'sister', '', '1_7', 1), ('SVI/1', 132, 'sui', 'him–/her–/itself', '', '1_7', 1), ('VERBVM', 133, 'verbum', 'word', '', '1_7', 1), ('CONTRA/2', 134, 'contra', 'against (w/ acc.)', '', '1_8', 1), ('DECERNO', 135, 'decerno', 'to decide, determine (often w/ inf.)', '', '1_8', 1), ('DICO/2', 136, 'dico', 'to say', '', '1_8', 1), ('DVX', 137, 'dux', 'leader, general', '', '1_8', 1), ('FORTITVDO', 138, 'fortitudo', 'courage', '', '1_8', 1), ('HOMO', 139, 'homo', 'man, human being; people (pl.)', '', '1_8', 1), ('INTELLIGO', 140, 'intellego', 'to understand', '', '1_8', 1), ('LIBERO', 141, 'libero', 'to free someone (acc.) from something (abl.)', '', '1_8', 1), ('MILES', 142, 'miles', 'soldier', '', '1_8', 1), ('NAVIGO', 143, 'navigo', 'to sail, navigate', '', '1_8', 1), ('ORACVLVM', 144, 'oraculum', 'oracle, prophecy', '', '1_8', 1), ('PETO', 145, 'peto', 'to seek', '', '1_8', 1), ('REX', 146, 'rex', 'king', '', '1_8', 1), ('TANDEM', 147, 'tandem', 'finally', '', '1_8', 1), ('TEMPLVM', 148, 'templum', 'temple', '', '1_8', 1), ('TIMOR', 149, 'timor', 'fear', '', '1_8', 1), ('TVM', 150, 'tum', 'then, at that time', '', '1_8', 2), ('VINCO', 151, 'vinco', 'to conquer', '', '1_8', 1), ('ANIMAL', 152, 'animal', 'animal', '', '1_9', 1), ('ARMA', 153, 'arma', 'weapons (pl.)', '', '1_9', 1), ('AVDIO', 154, 'audio', 'to hear, listen', '', '1_9', 1), ('CAPVT', 155, 'caput', 'head', '', '1_9', 1), ('CIVIS', 156, 'civis', 'citizen', '', '1_9', 1), ('CONSVL', 157, 'consul', 'consul', '', '1_9', 1), ('CORPVS', 158, 'corpus', 'body', '', '1_9', 1), ('CREDO', 159, 'credo', 'to believe somebody (w/ dat.)', '', '1_9', 1), ('EXEMPLAR', 160, 'exemplar', 'example', '', '1_9', 1), ('GERO', 161, 'gero', 'to carry; to behave (w/ se)', '', '1_9', 2), ('MARE', 162, 'mare', 'sea', '', '1_9', 1), ('MORS', 163, 'mors', 'death', '', '1_9', 1), ('MVLIER', 164, 'mulier', 'woman', '', '1_9', 1), ('ORATIO', 165, 'oratio', 'oration, speech', '', '1_9', 1), ('SCIO', 166, 'scio', 'to know', '', '1_9', 1), ('SENTIO', 167, 'sentio', 'to perceive', '', '1_9', 1), ('TEMPVS/1', 168, 'tempus', 'time', '', '1_9', 1), ('VENIO', 169, 'venio', 'to come', '', '1_9', 1), ('VRBS', 170, 'urbs', 'city', '', '1_9', 1), ('ACER/2', 171, 'acer', 'keen, fierce', '', '1_10', 1), ('AEDIFICO', 172, 'aedifico', 'to build', '', '1_10', 1), ('CAPIO/2', 173, 'capio', 'to take, adopt, capture', '', '1_10', 1), ('CELEBER', 174, 'celeber', 'renowned, well–known, crowded', '', '1_10', 1), ('CVPIO', 175, 'cupio', 'to desire, want', '', '1_10', 1), ('DELEO', 176, 'deleo', 'to destroy', '', '1_10', 1), ('DEVS', 177, 'deus', 'god', '', '1_10', 1), ('DONVM', 178, 'donum', 'gift', '', '1_10', 1), ('EQVVS', 179, 'equus', 'horse', '', '1_10', 1), ('FELIX', 180, 'felix', 'fortunate, happy', '', '1_10', 1), ('FLAMMA', 181, 'flamma', 'flame', '', '1_10', 1), ('FORTIS', 182, 'fortis', 'brave, strong', '', '1_10', 1), ('FVGIO', 183, 'fugio', 'to flee, run away', '', '1_10', 1), ('HOSTIS', 184, 'hostis', 'enemy', '', '1_10', 1), ('MOVEO', 185, 'moveo', 'to move', '', '1_10', 1), ('NEC/2', 186, 'nec', 'nor; and not', '', '1_10', 2), ('NOX', 187, 'nox', 'night', '', '1_10', 1), ('PAVCI', 188, 'paucus', 'few', '', '1_10', 1), ('PERICVLVM', 189, 'periculum', 'danger', '', '1_10', 1), ('PVGNO', 190, 'pugno', 'to fight', '', '1_10', 1), ('AGO', 191, 'ago', 'to drive, lead, do, behave', '', '1_11', 1), ('ARDEO', 192, 'ardeo', 'to burn, be on fire', '', '1_11', 1), ('CONSPICIO', 193, 'conspicio', 'to look at, observe', '', '1_11', 1), ('CRVDELIS', 194, 'crudelis', 'cruel', '', '1_11', 1), ('DOLOR', 195, 'dolor', 'grief, pain', '', '1_11', 1), ('ITA', 196, 'ita', 'yes', '', '1_11', 2), ('MINIME', 197, 'minime', 'No', '', '1_11', 1), ('MITTO', 198, 'mitto', 'to send', '', '1_11', 1), ('NE/2', 199, 'ne', '(added to the first word of a question', '', '1_11', 1), ('NOVVS', 200, 'novus', 'new', '', '1_11', 1), ('PARVM/2', 201, 'parum', 'too little', '', '1_11', 2), ('QVE', 202, 'que', 'and', '', '1_11', 1), ('QVOQVE', 203, 'quoque', 'also', '', '1_11', 1), ('REGINA', 204, 'regina', 'queen', '', '1_11', 1), ('RELINQVO', 205, 'relinquo', 'to abandon', '', '1_11', 1), ('SILVA', 206, 'silva', 'forest', '', '1_11', 1), ('SPELVNCA', 207, 'spelunca', 'cave', '', '1_11', 1), ('TEMPESTAS', 208, 'tempestas', 'season', '', '1_11', 1), ('VNA', 209, 'una', 'together', '', '1_11', 1), ('BELLVMGERO', 210, 'bellum', 'to wage war', '', '1_12', 1), ('CONSVMO', 211, 'consumo', 'to consume', '', '1_12', 1), ('DEXTERA', 212, 'dextera', 'right hand', '', '1_12', 1), ('FACIO', 213, 'facio', 'to do, make', '', '1_12', 1), ('IBI', 214, 'ibi', 'there', '', '1_12', 1), ('IGNIS', 215, 'ignis', 'fire', '', '1_12', 1), ('INQVIO', 216, 'inquam', 'to say (used with direct speech)', '', '1_12', 3), ('IRA', 217, 'ira', 'anger', '', '1_12', 1), ('IS', 218, 'is', 's/he/it, this, that', '', '1_12', 1), ('NOMEN', 219, 'nomen', 'name', '', '1_12', 1), ('NOS', 220, 'nos', 'we; us', '', '1_12', 1), ('NOSTER', 221, 'noster', 'our, ours', '', '1_12', 1), ('OCCIDO/2', 222, 'occido', 'to strike down, knock down', '', '1_12', 1), ('OSTENDO', 223, 'ostendo', 'to show', '', '1_12', 1), ('PONO', 224, 'pono', 'to place', '', '1_12', 1), ('PROPE/2', 225, 'prope', 'near', '', '1_12', 2), ('PROVIRIBVS', 226, 'pro', 'with all one’s might', '', '1_12', 1), ('SIMILIS', 227, 'similis', 'similar', '', '1_12', 1), ('STATIM', 228, 'statim', 'immediately', '', '1_12', 1), ('TANTVS', 229, 'tantus', 'so much', '', '1_12', 1), ('TVVS', 230, 'tuus', 'your', '', '1_12', 1), ('VESTER', 231, 'vester', 'your', '', '1_12', 1), ('VIS', 232, 'vis', 'force', '', '1_12', 1), ('VOS', 233, 'vos', 'you', '', '1_12', 1), ('EGO', 234, 'ego', 'I', '', '1_13', 3), ('TV', 235, 'tu', 'you', '', '1_13', 3), ('TVM', 236, 'tum', 'then, at that time', '', '1_13', 2), ('ALIVS', 237, 'alius', 'another, other', '', '1_13', 1), ('APVD', 238, 'apud', 'at the house of (w/ acc.)', '', '1_13', 1), ('ATQVE/1', 239, 'atque', 'as', '', '1_13', 2), ('DISCEDO/1', 240, 'discedo', 'to leave, withdraw, go away', '', '1_13', 1), ('DIVES', 241, 'dives', 'rich', '', '1_13', 1), ('DOCTVS', 242, 'doctus', 'learned', '', '1_13', 1), ('DVCO', 243, 'duco', 'to lead, take', '', '1_13', 2), ('ENIM/2', 244, 'enim', 'for, in fact', '', '1_13', 1), ('IVDEX', 245, 'iudex', 'judge', '', '1_13', 1), ('LICET/1', 246, 'licet', 'it is allowed, permitted (for someone)(to do something)(w/ dat. and inf.) ', '', '1_13', 1), ('NIHIL', 247, 'nihil', 'nothing', '', '1_13', 1), ('NOLO', 248, 'nolo', 'not to want, to be unwilling', '', '1_13', 2), ('OMNIS', 249, 'omnis', 'each, every, all', '', '1_13', 1), ('PRO/1', 250, 'pro', 'for, on behalf of (w/ abl.)', '', '1_13', 1), ('QVID', 251, 'quid', 'what; why', '', '1_13', 1), ('RESPONDEO', 252, 'respondeo', 'to answer', '', '1_13', 1), ('ROGO', 253, 'rogo', 'to ask', '', '1_13', 1), ('SVVS', 254, 'suus', 'his, her, its, their', '', '1_13', 2), ('TANTVM/2', 255, 'tantum', 'only', '', '1_13', 1), ('VALE', 256, 'vale', 'to greetings! farewell!', '', '1_13', 1), ('VALEO', 257, 'valeo', 'to be able (w/ inf.); to be in good health', '', '1_13', 3), ('ALBVS', 258, 'albus', 'white', '', '1_14', 1), ('ARBOR', 259, 'arbor', 'tree', '', '1_14', 1), ('CADO', 260, 'cado', 'to fall', '', '1_14', 1), ('COMEDO/2', 261, 'comedo', 'to eat', '', '1_14', 1), ('CONVENIO', 262, 'convenio', 'to meet', '', '1_14', 1), ('FLVO', 263, 'fluo', 'to flow', '', '1_14', 1), ('GLADIVS', 264, 'gladius', 'sword', '', '1_14', 1), ('IAM', 265, 'iam', 'already, yet', '', '1_14', 1), ('MOX', 266, 'mox', 'soon', '', '1_14', 1), ('ODIVM', 267, 'odium', 'hatred', '', '1_14', 2), ('OS/1', 268, 'os', 'mouth', '', '1_14', 1), ('PARENS/1', 269, 'parens', 'parent', '', '1_14', 1), ('PECTVS', 270, 'pectus', 'chest', '', '1_14', 1), ('PER', 271, 'per', 'through (w/ acc.)', '', '1_14', 1), ('PRIMVS', 272, 'primus', 'first', '', '1_14', 1), ('QVI/1', 273, 'qui', 'who, which (rel. pronoun); what? which? (inter. adj.) ', '', '1_14', 2), ('RVBER', 274, 'ruber', 'red', '', '1_14', 1), ('SANGVIS', 275, 'sanguis', 'blood', '', '1_14', 1), ('SEPARO/2', 276, 'separo', 'to separate, divide', '', '1_14', 1), ('TANGO', 277, 'tango', 'to touch', '', '1_14', 1), ('INQVIO', 278, 'inquam', 'to say (used with direct speech)', '', '1_15', 3), ('QVI/1', 279, 'qui', 'who, which (rel. pronoun); what? which? (inter. adj.) ', '', '1_15', 2), ('ANTE/2', 280, 'ante', 'in front of (w/ acc.)', '', '1_15', 1), ('ARGVMENTVM', 281, 'argumentum', 'proof, indication, argument', '', '1_15', 1), ('CVR/1', 282, 'cur', 'why', '', '1_15', 1), ('DIFFICILIS', 283, 'difficilis', 'difficult', '', '1_15', 1), ('ECCE', 284, 'ecce', 'look here!', '', '1_15', 1), ('ETIAM', 285, 'etiam', 'even, also', '', '1_15', 1), ('FORSITAN', 286, 'forsan', 'perhaps', '', '1_15', 1), ('NEGLIGO', 287, 'neglego', 'to neglect', '', '1_15', 1), ('PARVVS/2', 288, 'parvus', 'small', '', '1_15', 1), ('QVIS/1', 289, 'quis', 'who? which? (inter. pronoun)', '', '1_15', 1), ('RVSTICVS/2', 290, 'rusticus', 'rural, rustic', '', '1_15', 1), ('SAXVM', 291, 'saxum', 'stone, rock', '', '1_15', 1), ('SENECTVS/1', 292, 'senectus', 'old age', '', '1_15', 1), ('SICVT/1', 293, 'sicut', 'just as', '', '1_15', 1), ('STO', 294, 'sto', 'stand', '', '1_15', 1), ('VBIQVE', 295, 'ubique', 'everywhere', '', '1_15', 1), ('VERVS', 296, 'verus', 'real, true', '', '1_15', 1), ('VETVSTVS', 297, 'vetustus', 'old', '', '1_15', 1), ('VILLA', 298, 'villa', 'estate', '', '1_15', 1), ('VMQVAM', 299, 'umquam', 'ever', '', '1_15', 1), ('AVVNCVLVS', 300, 'avunculus', 'uncle', '', '1_16', 1), ('CAELVM/1', 301, 'caelum', 'sky, heaven, weather', '', '1_16', 1), ('CAVSA', 302, 'causa', 'cause, reason', '', '1_16', 1), ('CINIS', 303, 'cinis', 'ash', '', '1_16', 1), ('CLADES', 304, 'clades', 'disaster', '', '1_16', 1), ('CLASSIS', 305, 'classis', 'fleet, class (of people)', '', '1_16', 1), ('FEMINA', 306, 'femina', 'woman', '', '1_16', 1), ('FVMVS', 307, 'fumus', 'smoke', '', '1_16', 1), ('FVNESTVS', 308, 'funestus', 'deadly', '', '1_16', 1), ('IGITVR', 309, 'igitur', 'therefore', '', '1_16', 1), ('INCENDIVM', 310, 'incendium', 'conflagration, eruption', '', '1_16', 1), ('LEGO/2', 311, 'lego', 'to read, choose', '', '1_16', 1), ('LITVS/2', 312, 'litus', 'shore', '', '1_16', 1), ('MATER', 313, 'mater', 'mother', '', '1_16', 1), ('MONS', 314, 'mons', 'mountain', '', '1_16', 1), ('NAVIS', 315, 'navis', 'ship', '', '1_16', 1), ('NVBES', 316, 'nubes', 'cloud', '', '1_16', 1), ('NVMQVAM', 317, 'numquam', 'never', '', '1_16', 1), ('OPPRIMO', 318, 'opprimo', 'to overwhelm, suppress', '', '1_16', 1), ('PARS', 319, 'pars', 'part', '', '1_16', 1), ('STVDEO', 320, 'studeo', 'to study, be eager for, be interested in (w/ dat.)', '', '1_16', 1), ('DOMVS', 321, 'domus', 'home', '', '1_17', 2), ('ALO', 322, 'alo', 'to feed, nourish', '', '1_17', 1), ('AMITTO', 323, 'amitto', 'to lose', '', '1_17', 1), ('CORNV', 324, 'cornu', 'horn', '', '1_17', 1), ('CORRIPIO', 325, 'corripio', 'to seize, occupy, engulf', '', '1_17', 1), ('CVRRO', 326, 'curro', 'to run', '', '1_17', 1), ('DEVASTO', 327, 'devasto', 'to lay waste', '', '1_17', 1), ('EXSTINGVO', 328, 'exstinguo', 'to extinguish', '', '1_17', 1), ('FACILE', 329, 'facile', 'easliy', '', '1_17', 1), ('IACIO', 330, 'iacio', 'to throw', '', '1_17', 1), ('IMPERATOR', 331, 'imperator', 'general, emperor', '', '1_17', 1), ('IMPETVS', 332, 'impetus', 'impetus, force, attack', '', '1_17', 1), ('INITIVM', 333, 'initium', 'beginning', '', '1_17', 1), ('IVSSVS', 334, 'iussus', 'order', '', '1_17', 1), ('LOCVS', 335, 'locus', 'place (sing.); passages of a book (m. pl.); geographical places(n. pl.)', '', '1_17', 1), ('MANVS/1', 336, 'manus', 'hand', '', '1_17', 1), ('MVRVS', 337, 'murus', 'wall', '', '1_17', 1), ('SINE', 338, 'sine', 'without (w/ abl.)', '', '1_17', 1), ('TEMPTO', 339, 'tempto', 'to try', '', '1_17', 1), ('TVMVLTVS', 340, 'tumultus', 'confusion', '', '1_17', 1), ('VENTVS', 341, 'ventus', 'wind', '', '1_17', 1), ('DVCO', 342, 'duco', 'to lead, take', '', '1_18', 2), ('ITA', 343, 'ita', 'yes', '', '1_18', 2), ('COLO/2', 344, 'colo', 'to worship, cultivate', '', '1_18', 1), ('CVM/3', 345, 'cum', 'when, after', '', '1_18', 2), ('DEA', 346, 'dea', 'goddess', '', '1_18', 1), ('DIES', 347, 'dies', 'day', '', '1_18', 1), ('DORMIO', 348, 'dormio', 'to sleep', '', '1_18', 1), ('EXCITO/1', 349, 'excito', 'to awaken, rouse, stir up', '', '1_18', 1), ('EXCLAMO', 350, 'exclamo', 'to exclaim', '', '1_18', 1), ('FACIES', 351, 'facies', 'face', '', '1_18', 1), ('FATVM', 352, 'fatum', 'fate, destiny', '', '1_18', 1), ('MARITVS/1', 353, 'maritus', 'husband', '', '1_18', 1), ('MERIDIES', 354, 'meridies', 'midday', '', '1_18', 2), ('MVLTVM/2', 355, 'multum', 'much', '', '1_18', 1), ('OCCVLTO', 356, 'occulto', 'to hide', '', '1_18', 1), ('PATER', 357, 'pater', 'father', '', '1_18', 2), ('POST/2', 358, 'post', 'after (w/ acc.)', '', '1_18', 1), ('QVAERO', 359, 'quaero', 'to look for, search', '', '1_18', 1), ('RES', 360, 'res', 'thing, matter', '', '1_18', 1), ('SI/2', 361, 'si', 'if', '', '1_18', 1), ('SOMNVS', 362, 'somnus', 'sleep', '', '1_18', 1), ('TAM', 363, 'tam', 'so ', '', '1_18', 1), ('VXOR', 364, 'uxor', 'wife', '', '1_18', 2), ('BARBA', 365, 'barba', 'beard', '', '1_19', 1), ('CARO/1', 366, 'caro', 'meat, flesh', '', '1_19', 1), ('CELERITER', 367, 'celeriter', 'swiftly', '', '1_19', 1), ('COQVO', 368, 'coquo', 'to cook', '', '1_19', 1), ('CRESCO', 369, 'cresco', 'to grow', '', '1_19', 1), ('FEROX', 370, 'ferox', 'fierce, ferocious', '', '1_19', 1), ('FORIS/2', 371, 'foris', 'outside, in the open', '', '1_19', 1), ('HERBA', 372, 'herba', 'plant, vegetation', '', '1_19', 1), ('HIC/1', 373, 'hic', 'this', '', '1_19', 1), ('INTER', 374, 'inter', 'between, among (w/ acc.)', '', '1_19', 1), ('PELLIS', 375, 'pellis', 'skin, hide', '', '1_19', 1), ('POSTQVAM', 376, 'postquam', 'after', '', '1_19', 1), ('PROELIVM', 377, 'proelium', 'battle, combat', '', '1_19', 1), ('SANO', 378, 'sano', 'to heal', '', '1_19', 1), ('SEDEO', 379, 'sedeo', 'to sit', '', '1_19', 1), ('TERO', 380, 'tero', 'to wear out, rub', '', '1_19', 1), ('TERRIBILIS', 381, 'terribilis', 'terrifying', '', '1_19', 1), ('VESTIMENTVM', 382, 'vestimentum', 'garment, clothes (pl.)', '', '1_19', 1), ('VIVO', 383, 'vivo', 'to live', '', '1_19', 1), ('VVLNERO', 384, 'vulnero', 'to wound', '', '1_19', 1), ('VVLNVS', 385, 'vulnus', 'wound', '', '1_19', 1), ('ABVNDO', 386, 'abundo', 'to abound with (w/ abl.)', '', '1_20', 1), ('ADOLESCENS/2', 387, 'adulescens', 'young man, young lady', '', '1_20', 1), ('AEQVVS', 388, 'aequus', 'even', '', '1_20', 1), ('COR', 389, 'cor', 'heart', '', '1_20', 1), ('DELECTO', 390, 'delecto', 'to delight, please', '', '1_20', 1), ('DIVINVS/2', 391, 'divinus', 'divine', '', '1_20', 1), ('EGEO', 392, 'egeo', 'to lack something (abl.)', '', '1_20', 1), ('FVR', 393, 'fur', 'thief', '', '1_20', 1), ('FVRTVM', 394, 'furtum', 'theft', '', '1_20', 1), ('HVMANVS', 395, 'humanus', 'human', '', '1_20', 1), ('ILLE', 396, 'ille', 'that', '', '1_20', 1), ('INIQVITAS', 397, 'iniquitas', 'injustice', '', '1_20', 1), ('LEX', 398, 'lex', 'law', '', '1_20', 1), ('LVDO', 399, 'ludo', 'to play', '', '1_20', 1), ('NOCTV', 400, 'noctu', 'during the night', '', '1_20', 1), ('PAENE', 401, 'paene', 'almost', '', '1_20', 1), ('PAVPER', 402, 'pauper', 'poor', '', '1_20', 1), ('PLENVS', 403, 'plenus', 'full of (w/ gen. or abl.)', '', '1_20', 1), ('POMVM', 404, 'pomum', 'fruit', '', '1_20', 1), ('PVNIO', 405, 'punio', 'to punish', '', '1_20', 1), ('ACCIPIO', 406, 'accipio', 'to accept, receive', '', '1_21', 1), ('ACCVSO', 407, 'accuso', 'to accuse someone (acc.) of something (gen.)', '', '1_21', 1), ('ALIENVS/2', 408, 'alienus', 'foreign to, inconsistent with (w/ a/ab and abl.)', '', '1_21', 1), ('AXIS', 409, 'axis', 'axle, axis', '', '1_21', 1), ('CIRCVM/2', 410, 'circum', 'around (w/ acc.)', '', '1_21', 1), ('CONSTANTIA', 411, 'constantia', 'constancy', '', '1_21', 1), ('DESCENDO', 412, 'descendo', 'to descend', '', '1_21', 1), ('DIVITIAE', 413, 'divitia', 'wealth, riches (pl.)', '', '1_21', 1), ('ERIPIO', 414, 'eripio', 'to snatch away', '', '1_21', 1), ('ERRO/2', 415, 'erro', 'to wander, make a mistake', '', '1_21', 1), ('EXTERNVS', 416, 'externus', 'outward, external', '', '1_21', 1), ('FORTVNA', 417, 'fortuna', 'fortune, the goddess Fortune', '', '1_21', 1), ('FVTVRVS', 418, 'futurus', 'about to be (from sum)', '', '1_21', 1), ('HONOR', 419, 'honor', 'honor, public office or distinction', '', '1_21', 1), ('MVTO/2', 420, 'muto', 'to change', '', '1_21', 1), ('POSSIDEO', 421, 'possideo', 'to possess', '', '1_21', 1), ('PROCERTO', 422, 'pro', 'for certain, for sure', '', '1_21', 1), ('RECIPIO', 423, 'recipio', 'to take back', '', '1_21', 2), ('REPREHENDO', 424, 'reprehendo', 'to blame, rebuke', '', '1_21', 1), ('ROTA', 425, 'rota', 'wheel', '', '1_21', 1), ('TOLLO', 426, 'tollo', 'to lift up, raise; to destroy', '', '1_21', 1), ('VERSO', 427, 'verso', 'to turn', '', '1_21', 1), ('VLLVS', 428, 'ullus', 'any', '', '1_21', 1), ('CONSILIVM', 429, 'consilium', 'plan, advice', '', '2_1', 2), ('MERIDIES', 430, 'meridies', 'midday', '', '2_1', 2), ('PROPE/2', 431, 'prope', 'near', '', '2_1', 2), ('ASPICIO', 432, 'aspicio', 'to look at, catch a glimpse of', '', '2_1', 1), ('ETET', 433, 'et', 'both…and…', '', '2_1', 1), ('GENS', 434, 'gens', 'tribe, population', '', '2_1', 1), ('GIGNO', 435, 'gigno', 'to give birth, produce', '', '2_1', 1), ('HODIE', 436, 'hodie', 'today', '', '2_1', 1), ('INCOLA', 437, 'incola', 'inhabitant', '', '2_1', 1), ('INSVLA', 438, 'insula', 'island', '', '2_1', 2), ('INVENIO', 439, 'invenio', 'to come upon, find', '', '2_1', 1), ('MOS', 440, 'mos', 'custom, habit; morals (pl.)', '', '2_1', 1), ('MVNDVS/1', 441, 'mundus', 'world', '', '2_1', 1), ('NE/4', 442, 'ne', 'that not, not to, lest ', '', '2_1', 3), ('OCCVPO/2', 443, 'occupo', 'to occupy', '', '2_1', 1), ('ORTVS', 444, 'ortus', 'origin, beginning, raising', '', '2_1', 1), ('PISCIS', 445, 'piscis', 'a fish', '', '2_1', 1), ('PROCVL', 446, 'procul', 'far, far away', '', '2_1', 1), ('PROMITTO', 447, 'promitto', 'to promise', '', '2_1', 1), ('SEPTENTRIONALIS', 448, 'septentrionalis', 'northern', '', '2_1', 1), ('SITVS/2', 449, 'situs', 'located, situated', '', '2_1', 1), ('SOL', 450, 'sol', 'sun', '', '2_1', 1), ('VTINAM', 451, 'utinam', 'if only', '', '2_1', 2), ('GERO', 452, 'gero', 'to carry; to behave (w/ se)', '', '2_2', 2), ('ODIVM', 453, 'odium', 'hatred', '', '2_2', 2), ('VALEO', 454, 'valeo', 'to be able (w/ inf.); to be in good health', '', '2_2', 3), ('ALTVS', 455, 'altus', 'tall, deep', '', '2_2', 1), ('ANNVS', 456, 'annus', 'year', '', '2_2', 1), ('ARGENTVM', 457, 'argentum', 'silver', '', '2_2', 1), ('AVRVM', 458, 'aurum', 'gold', '', '2_2', 1), ('BREVIS', 459, 'brevis', 'short', '', '2_2', 1), ('CLARVS', 460, 'clarus', 'clear, distinguished', '', '2_2', 1), ('CVSTOS', 461, 'custos', 'guard', '', '2_2', 1), ('EQVES', 462, 'eques', 'horseman', '', '2_2', 1), ('FINIS', 463, 'finis', 'end', '', '2_2', 1), ('GRAVIS', 464, 'gravis', 'serious, heavy', '', '2_2', 1), ('INTERDVM', 465, 'interdum', 'sometimes', '', '2_2', 1), ('LIS', 466, 'lis', 'dispute, quarrel', '', '2_2', 1), ('MANE/2', 467, 'mane', 'in the morning', '', '2_2', 1), ('ODIOHABEO', 468, 'odio', 'to hate somebody', '', '2_2', 1), ('SINO', 469, 'sino', 'to allow somebody (acc.) to do something (inf.)', '', '2_2', 1), ('VEL/1', 470, 'vel', 'or', '', '2_2', 1), ('VESTIS', 471, 'vestis', 'clothes, attire', '', '2_2', 1), ('VOX', 472, 'vox', 'voice', '', '2_2', 1), ('VT/4', 473, 'ut', 'that, to, in order to, so that', '', '2_2', 4), ('VVLTVS', 474, 'vultus', 'face', '', '2_2', 1), ('VXOR', 475, 'uxor', 'wife', '', '2_3', 2), ('AT/2', 476, 'at', 'but', '', '2_3', 1), ('CONIVX', 477, 'coniunx', 'spouse', '', '2_3', 1), ('DISCIPVLA', 478, 'discipula', 'student', '', '2_3', 1), ('DISCO', 479, 'disco', 'to learn', '', '2_3', 1), ('DOMINVS', 480, 'dominus', 'master, lord', '', '2_3', 1), ('FAMA', 481, 'fama', 'fame, name, reputation', '', '2_3', 1), ('FRATER', 482, 'frater', 'brother', '', '2_3', 1), ('IMPROBVS', 483, 'improbus', 'wicked, bad', '', '2_3', 1), ('IVNGO', 484, 'iungo', 'to join', '', '2_3', 1), ('MAGISTER', 485, 'magister', 'teacher', '', '2_3', 1), ('MATRIMONIVM', 486, 'matrimonium', 'marriage', '', '2_3', 1), ('NE/4', 487, 'ne', 'that not, not to, lest ', '', '2_3', 3), ('NVSQVAM', 488, 'nusquam', 'nowhere', '', '2_3', 1), ('PARIO/2', 489, 'pario', 'to give birth to', '', '2_3', 1), ('PERDO', 490, 'perdo', 'to lose, waste', '', '2_3', 1), ('SALVS', 491, 'salus', 'health, welfare', '', '2_3', 1), ('SALVTEMDICERE', 492, 'salutem', 'to greet (customary opening to letter) ', '', '2_3', 1), ('SCRIBO', 493, 'scribo', 'to write', '', '2_3', 1), ('VT/4', 494, 'ut', 'that, to, in order to, so that', '', '2_3', 4), ('VXOREMDEDVCERE', 495, 'uxorem', 'to marry a woman, to take as a wife', '', '2_3', 1), ('NEC/2', 496, 'nec', 'nor; and not', '', '2_4', 2), ('RECIPIO', 497, 'recipio', 'to take back', '', '2_4', 2), ('AGMEN', 498, 'agmen', 'marching column', '', '2_4', 1), ('APERIO', 499, 'aperio', 'to open', '', '2_4', 1), ('COEPIO', 500, 'coepi', 'to begin (w/ inf.)', '', '2_4', 1), ('DEFENDO', 501, 'defendo', 'to defend', '', '2_4', 1), ('EDO/1', 502, 'edo', 'to produce, give forth', '', '2_4', 1), ('EXTRA/2', 503, 'extra', 'outside of (w/ acc.)', '', '2_4', 1), ('FVRO', 504, 'furo', 'to rage, be insane', '', '2_4', 1), ('INGENS', 505, 'ingens', 'huge', '', '2_4', 1), ('INVADO/2', 506, 'invado', 'to burst in', '', '2_4', 1), ('LIGNEVS', 507, 'ligneus', 'made of wood', '', '2_4', 1), ('NEQVENEC', 508, 'neque', 'neither..nor…', '', '2_4', 1), ('PARCO', 509, 'parco', 'to spare somebody/thing (w/ dat.)', '', '2_4', 1), ('PONS', 510, 'pons', 'bridge', '', '2_4', 1), ('PORTA', 511, 'porta', 'gate', '', '2_4', 1), ('PRIMO', 512, 'primo', 'at first', '', '2_4', 1), ('QVAM/1', 513, 'quam', 'than (w/ comp. words)', '', '2_4', 2), ('QVANTVS/1', 514, 'quantus', 'how great, how much (inter. or rel. adj.)', '', '2_4', 1), ('RESISTO', 515, 'resisto', 'to resist (w/ dat.)', '', '2_4', 1), ('SIMVL/1', 516, 'simul', 'at the same time', '', '2_4', 1), ('TVTVS', 517, 'tutus', 'safe', '', '2_4', 1), ('VACVVS', 518, 'vacuus', 'empty of (w/ abl.)', '', '2_4', 1), ('VALEO', 519, 'valeo', 'to be able (w/ inf.); to be in good health', '', '2_4', 3), ('VICTOR', 520, 'victor', 'victor', '', '2_4', 1), ('VTINAM', 521, 'utinam', 'if only', '', '2_4', 2), ('BIBO/2', 522, 'bibo', 'to drink', '', '2_5', 1), ('CARMEN/1', 523, 'carmen', 'song, poem', '', '2_5', 1), ('CIBVS', 524, 'cibus', 'food', '', '2_5', 1), ('DVLCIS', 525, 'dulcis', 'sweet', '', '2_5', 1), ('FLVMEN', 526, 'flumen', 'river', '', '2_5', 1), ('IMMEMOR', 527, 'immemor', 'forgetful of (w/ gen.)', '', '2_5', 1), ('IOCVS', 528, 'iocus', 'joke', '', '2_5', 1), ('IVVENTVS', 529, 'iuventus', 'youth', '', '2_5', 1), ('LEVIS/1', 530, 'levis', 'light', '', '2_5', 1), ('MENS', 531, 'mens', 'mind, spirit', '', '2_5', 1), ('NE/4', 532, 'ne', 'that not, not to, lest ', '', '2_5', 3), ('ORO', 533, 'oro', 'to ask, entreat', '', '2_5', 1), ('PLACEO', 534, 'placeo', 'to please, be agreeable to somebody', '', '2_5', 1), ('PROXIMVS/2', 535, 'proximus', 'nearest', '', '2_5', 1), ('TAMQVAM/2', 536, 'tam', 'so…as…', '', '2_5', 1), ('VEHEMENS', 537, 'vehemens', 'violent, vehement', '', '2_5', 1), ('VETVS', 538, 'vetus', 'old', '', '2_5', 1), ('VINVM', 539, 'vinum', 'wine', '', '2_5', 1), ('VIRTVS', 540, 'virtus', 'courage, virtue', '', '2_5', 1), ('VITIVM', 541, 'vitium', 'vice', '', '2_5', 1), ('VT/4', 542, 'ut', 'that, to, in order to, so that', '', '2_5', 4), ('PATER', 543, 'pater', 'father', '', '2_6', 2), ('DECIPIO', 544, 'decipio', 'to deceive', '', '2_6', 1), ('DILIGO/3', 545, 'diligo', 'to love, esteem highly', '', '2_6', 1), ('DVO', 546, 'duo', 'two', '', '2_6', 1), ('EXERCITVS/1', 547, 'exercitus', 'army', '', '2_6', 1), ('FIDELIS/2', 548, 'fidelis', 'faithful, loyal', '', '2_6', 1), ('HERES', 549, 'heres', 'heir', '', '2_6', 1), ('IMPERIVM', 550, 'imperium', 'rule, empire, power', '', '2_6', 1), ('INOPIA', 551, 'inopia', 'helplessness, want', '', '2_6', 1), ('LAVDO', 552, 'laudo', 'to praise', '', '2_6', 1), ('NECESSEEST', 553, 'necesse', 'it is necessary for someone (dat.) to do something (inf.)', '', '2_6', 1), ('NEMO', 554, 'nemo', 'no one', '', '2_6', 1), ('PAVLO', 555, 'paulo', 'a little bit, to a small extent', '', '2_6', 1), ('QVAM/1', 556, 'quam', 'than (w/ comp. words)', '', '2_6', 2), ('QVANTVM/3', 557, 'quantum', 'to what extent, how much', '', '2_6', 1), ('RESTITVO', 558, 'restituo', 'to restore', '', '2_6', 1), ('SATIS/2', 559, 'satis', 'enough, sufficiently', '', '2_6', 1), ('SECVNDVS/1', 560, 'secundus', 'second', '', '2_6', 1), ('TERTIVS', 561, 'tertius', 'third', '', '2_6', 1), ('TRES', 562, 'tres', 'three', '', '2_6', 1), ('TRISTIS', 563, 'tristis', 'sad', '', '2_6', 1), ('VEHEMENTER', 564, 'vehementer', 'strongly, vehemently', '', '2_6', 1), ('NOLO', 565, 'nolo', 'not to want, to be unwilling', '', '2_7', 2), ('AETAS', 566, 'aetas', 'age', '', '2_7', 1), ('FIDES/2', 567, 'fides', 'faith', '', '2_7', 1), ('FVNDO/2', 568, 'fundo', 'to pour', '', '2_7', 1), ('GLORIA', 569, 'gloria', 'glory', '', '2_7', 1), ('LIBERTAS', 570, 'libertas', 'freedom', '', '2_7', 1), ('LVMEN', 571, 'lumen', 'light', '', '2_7', 1), ('MALO', 572, 'malo', 'to prefer', '', '2_7', 1), ('ORNATVS/1', 573, 'ornatus', 'adorned, ornate, elaborate', '', '2_7', 1), ('OTIVM', 574, 'otium', 'leisure, free time', '', '2_7', 1), ('POTENS', 575, 'potens', 'powerful', '', '2_7', 1), ('PVBLICVS/2', 576, 'publicus', 'common', '', '2_7', 1), ('QVALIS/1', 577, 'qualis', 'what sort of? (inter. adj.)', '', '2_7', 1), ('RESPVBLICA', 578, 'res', 'state', '', '2_7', 1), ('STVDIOSVS', 579, 'studiosus', 'fond of (w/ gen.)', '', '2_7', 1), ('TAMQVAM/1', 580, 'tamquam', 'as', '', '2_7', 1), ('TOT', 581, 'tot', 'so many', '', '2_7', 1), ('TRAHO', 582, 'traho', 'to drag, draw', '', '2_7', 1), ('VBI/1', 583, 'ubi', 'where? (inter. adv)', '', '2_7', 1), ('VIX', 584, 'vix', 'hardly', '', '2_7', 1), ('VNVS', 585, 'unus', 'one', '', '2_7', 1), ('VOLO/3', 586, 'volo', 'to want', '', '2_7', 1), ('VTILIS', 587, 'utilis', 'useful', '', '2_7', 1), ('ADHVC', 588, 'adhuc', 'still, up to this time', '', '2_8', 1), ('ANTIQVVS', 589, 'antiquus', 'ancient', '', '2_8', 1), ('ARS', 590, 'ars', 'science, art, skill', '', '2_8', 1), ('DOMINOR', 591, 'dominor', 'to dominate, rule', '', '2_8', 1), ('HORTOR', 592, 'hortor', 'to exhort, urge', '', '2_8', 1), ('LATINE', 593, 'Latine', 'in Latin', '', '2_8', 1), ('LATINVS/A', 594, 'Latinus', 'Latin, pertaining to Latin', '', '2_8', 1), ('LINGVA', 595, 'lingua', 'language; tongue', '', '2_8', 1), ('LOQVOR', 596, 'loquor', 'to speak', '', '2_8', 1), ('MAGIS/2', 597, 'magis', 'more', '', '2_8', 1), ('MAIOR', 598, 'maior', 'bigger; greater', '', '2_8', 1), ('MAXIMVS', 599, 'maximus', 'greatest', '', '2_8', 1), ('MELIOR', 600, 'melior', 'better', '', '2_8', 1), ('MINIMVS', 601, 'minimus', 'smallest', '', '2_8', 1), ('MINVS', 602, 'minus', 'less', '', '2_8', 2), ('OPTIMVS', 603, 'optimus', 'best', '', '2_8', 1), ('PARTIOR', 604, 'partior', 'to divide, distribute', '', '2_8', 1), ('PATIOR', 605, 'patior', 'to endure, tolerate, suffer', '', '2_8', 1), ('PEIOR', 606, 'peior', 'worse', '', '2_8', 1), ('PESSIMVS', 607, 'pessimus', 'worst', '', '2_8', 1), ('PLVRIMVS', 608, 'plurimus', 'most', '', '2_8', 1), ('PLVS', 609, 'plus', 'more', '', '2_8', 1), ('SEQVOR', 610, 'sequor', 'to follow', '', '2_8', 1), ('VEREOR', 611, 'vereor', 'to fear, respect', '', '2_8', 1), ('ADDO', 612, 'addo', 'to add', '', '2_9', 1), ('AVRIS', 613, 'auris', 'ear', '', '2_9', 1), ('CONOR', 614, 'conor', 'to try', '', '2_9', 1), ('DEMITTO', 615, 'demitto', 'to send down', '', '2_9', 1), ('DISSIMILIS', 616, 'dissimilis', 'dissimilar ', '', '2_9', 1), ('FACILIS', 617, 'facilis', 'easy', '', '2_9', 1), ('FERO', 618, 'fero', 'to carry, bear', '', '2_9', 1), ('FIO', 619, 'fio', 'to be made, become; (impersonally) to happen', '', '2_9', 1), ('FRIGVS', 620, 'frigus', 'cold', '', '2_9', 1), ('GENVS/1', 621, 'genus', 'kind', '', '2_9', 1), ('GLACIES', 622, 'glacies', 'ice', '', '2_9', 1), ('GRACILIS', 623, 'gracilis', 'slender', '', '2_9', 1), ('HVMILIS', 624, 'humilis', 'low, humble', '', '2_9', 1), ('ITER', 625, 'iter', 'road, trip ', '', '2_9', 1), ('LABOR/2', 626, 'labor', 'to slide, slip, glide down', '', '2_9', 1), ('MODEROR', 627, 'moderor', 'to manage, direct, guide', '', '2_9', 1), ('NIX', 628, 'nix', 'snow', '', '2_9', 1), ('ONVS', 629, 'onus', 'weight, burden', '', '2_9', 1), ('PERVENIO', 630, 'pervenio', 'to arrive', '', '2_9', 1), ('PROGREDIOR', 631, 'progredior', 'to go forward, proceed', '', '2_9', 1), ('QVOTIENS/2', 632, 'quotiens', 'as often as', '', '2_9', 1), ('SIMVLAC/2', 633, 'simulac', 'as soon as', '', '2_9', 1), ('SVVS', 634, 'suus', 'his, her, its, their', '', '2_10', 2), ('AEDES', 635, 'aedes', 'temple; pl. dwelling, house', '', '2_10', 1), ('EO/1', 636, 'eo', 'to go ', '', '2_10', 1), ('IVCVNDVS', 637, 'iucundus', 'pleasant, nice', '', '2_10', 1), ('LABOR/1', 638, 'labor', 'labor, toil', '', '2_10', 1), ('LAEDO', 639, 'laedo', 'to harm', '', '2_10', 1), ('LIBER/2', 640, 'liber', 'free', '', '2_10', 1), ('LVCRVM', 641, 'lucrum', 'profit, gain', '', '2_10', 1), ('MARITIMVS', 642, 'maritimus', 'maritime', '', '2_10', 1), ('MODVS', 643, 'modus', 'way, method, manner', '', '2_10', 1), ('PAVLISPER', 644, 'paulisper', 'for a little while', '', '2_10', 1), ('PECVNIA', 645, 'pecunia', 'money', '', '2_10', 1), ('PLACIDVS', 646, 'placidus', 'peaceful, calm', '', '2_10', 1), ('POTIVS', 647, 'potius', 'rather', '', '2_10', 1), ('PROSPER', 648, 'prosper', 'fortunate, prosperous', '', '2_10', 1), ('REDDO', 649, 'reddo', 'to give back', '', '2_10', 1), ('SARCINA', 650, 'sarcina', 'burden, baggage', '', '2_10', 1), ('SCELESTVS', 651, 'scelestus', 'wicked', '', '2_10', 1), ('SEMEL', 652, 'semel', 'once', '', '2_10', 1), ('SERENVS', 653, 'serenus', 'calm, clear', '', '2_10', 1), ('PARVM/2', 654, 'parum', 'too little', '', '2_11', 2), ('ALTER', 655, 'alter', 'the other (of two)', '', '2_11', 1), ('GEMMA', 656, 'gemma', 'gem, precious stone', '', '2_11', 1), ('LEGATVS', 657, 'legatus', 'ambassador', '', '2_11', 1), ('MAGNIHABEO', 658, 'magni', 'to esteem a lot', '', '2_11', 1), ('MINVS', 659, 'minus', 'less', '', '2_11', 2), ('NESCIO', 660, 'nescio', 'not to know', '', '2_11', 1), ('NEVTER', 661, 'neuter', 'neither, none (of two)', '', '2_11', 1), ('NVLLVS', 662, 'nullus', 'none', '', '2_11', 1), ('OPERAEPRETIVMEST', 663, 'operae', 'it is worthwhile', '', '2_11', 1), ('POPVLVS/1', 664, 'populus', 'a people, populace', '', '2_11', 1), ('QVOMODO/1', 665, 'quomodo', 'how', '', '2_11', 1), ('SALVTO', 666, 'saluto', 'to greet ', '', '2_11', 1), ('SERVVS/1', 667, 'servus', 'slave, servant', '', '2_11', 1), ('SOLVS', 668, 'solus', 'alone, only', '', '2_11', 1), ('SPECTO', 669, 'specto', 'to watch', '', '2_11', 1), ('TACEO', 670, 'taceo', 'to be silent, keep quiet', '', '2_11', 1), ('TOTVS', 671, 'totus', 'whole, entire', '', '2_11', 1), ('TVRPIS', 672, 'turpis', 'shameful, disgraceful', '', '2_11', 1), ('VTER/4', 673, 'uter', 'who, which (of two)?', '', '2_11', 1), ('VTOR', 674, 'utor', 'to use (w/ abl.)', '', '2_11', 1), ('CVM/3', 675, 'cum', 'when, after', '', '2_12', 2), ('INQVIO', 676, 'inquam', 'to say (used with direct speech)', '', '2_12', 3), ('TAMEN', 677, 'tamen', 'however', '', '2_12', 2), ('CARVS', 678, 'carus', 'dear', '', '2_12', 1), ('INSVLA', 679, 'insula', 'island', '', '2_12', 2), ('MORIOR', 680, 'morior', 'to die', '', '2_12', 1), ('NIMIS', 681, 'nimis', 'too much', '', '2_12', 1), ('NISI', 682, 'nisi', 'if not, unless', '', '2_12', 1), ('OFFICIVM', 683, 'officium', 'duty', '', '2_12', 1), ('ORBIS', 684, 'orbis', 'circle', '', '2_12', 1), ('ORBISTERRARVM', 685, 'orbis', 'the earth, the world', '', '2_12', 1), ('PROBO', 686, 'probo', 'to approve ', '', '2_12', 1), ('QVAMQVAM/2', 687, 'quamquam', 'although', '', '2_12', 1), ('QVAMVIS/1', 688, 'quamvis', 'although', '', '2_12', 1), ('QVIA', 689, 'quia', 'because', '', '2_12', 1), ('QVIDEM', 690, 'quidem', 'indeed', '', '2_12', 1), ('QVOD/1', 691, 'quod', 'because', '', '2_12', 1), ('SENTENTIA', 692, 'sententia', 'opinion, point of view', '', '2_12', 1), ('SORS', 693, 'sors', 'lot', '', '2_12', 1), ('SPERO', 694, 'spero', 'to hope', '', '2_12', 1), ('SPES', 695, 'spes', 'hope', '', '2_12', 1), ('ATQVE/1', 696, 'atque', 'as', '', '2_13', 2), ('ABSENS', 697, 'absens', 'away, absent', '', '2_13', 1), ('ABSVM/1', 698, 'absum', 'to be away', '', '2_13', 1), ('BENEVOLENTIA', 699, 'benevolentia', 'good will', '', '2_13', 1), ('DECLARO', 700, 'declaro', 'to demonstrate, show, make known, reveal', '', '2_13', 1), ('IDEM', 701, 'idem', 'the same', '', '2_13', 1), ('IPSE', 702, 'ipse', 'self', '', '2_13', 1), ('IRASCOR', 703, 'irascor', 'to be angry at (w/ dat.)', '', '2_13', 1), ('ISTE', 704, 'iste', 'that (of yours)', '', '2_13', 1), ('MIROR', 705, 'miror', 'to marvel, be surprised at', '', '2_13', 1), ('MVLTITVDO', 706, 'multitudo', 'crowd, throng', '', '2_13', 1), ('NEGO', 707, 'nego', 'to deny ', '', '2_13', 1), ('NVMERO/1', 708, 'numero', 'to number, count among', '', '2_13', 1), ('OFFENDO', 709, 'offendo', ']to happen upon, offend', '', '2_13', 1), ('REDEO/1', 710, 'redeo', 'to go back, return', '', '2_13', 1), ('REFERO', 711, 'refero', 'to carry back, report', '', '2_13', 1), ('SOCIVS/1', 712, 'socius', 'associate, partner, ally', '', '2_13', 1), ('TALIS', 713, 'talis', 'such a', '', '2_13', 1), ('TVRRIS', 714, 'turris', 'tower', '', '2_13', 1), ('VENIA', 715, 'venia', 'pardon, indulgence, forgiveness', '', '2_13', 1), ('VERSOR', 716, 'versor', 'to be situated in, be occupied in ', '', '2_13', 1), ('VIRGA', 717, 'virga', 'twig, stick', '', '2_13', 1), ('VOLVNTAS', 718, 'voluntas', 'will', '', '2_13', 1), ('AFFIRMO', 719, 'affirmo', 'to assert, maintain', '', '2_14', 1), ('CIRCVMEO/1', 720, 'circumeo', 'to go around', '', '2_14', 1), ('CONTINEO', 721, 'contineo', 'to hold, keep together, contain', '', '2_14', 1), ('COTIDIANVS', 722, 'cottidianus', 'of every day, daily', '', '2_14', 1), ('ELEMENTVM', 723, 'elementum', 'element', '', '2_14', 1), ('ERGO/2', 724, 'ergo', 'therefore', '', '2_14', 1), ('GRAVITAS', 725, 'gravitas', 'weight, gravity', '', '2_14', 1), ('IMMENSVS', 726, 'immensus', 'immeasurable, immense, endless', '', '2_14', 1), ('INFINITVS', 727, 'infinitus', 'boundless, unlimited', '', '2_14', 1), ('MAXIME', 728, 'maxime', 'most', '', '2_14', 1), ('MEDIVS', 729, 'medius', 'middle', '', '2_14', 1), ('MOTVS', 730, 'motus', 'motion, movement', '', '2_14', 1), ('MVLTO/2', 731, 'multo', 'by much', '', '2_14', 1), ('NATVRA', 732, 'natura', 'nature', '', '2_14', 1), ('NECESSARIO', 733, 'necessario', 'necessarily', '', '2_14', 1), ('PERPERAM', 734, 'perperam', 'wrongly, incorrectly', '', '2_14', 1), ('PONDVS', 735, 'pondus', 'weight', '', '2_14', 1), ('PRAESERTIM', 736, 'praesertim', 'especially', '', '2_14', 1), ('QVIES', 737, 'quies', 'rest, repose', '', '2_14', 1), ('VNDIQVE', 738, 'undique', 'from all parts, from everywhere', '', '2_14', 1), ('VOLVO', 739, 'volvo', 'to turn round', '', '2_14', 1), ('VT/4', 740, 'ut', 'that, to, in order to, so that', '', '2_14', 4), ('ANIMADVERTO', 741, 'animadverto', 'to notice', '', '2_15', 1), ('APPROPINQVO', 742, 'appropinquo', 'to approach (w/ dat or ad + acc.)', '', '2_15', 1), ('CERNO', 743, 'cerno', 'to see, distinguish with the eyes', '', '2_15', 1), ('CIRCA/2', 744, 'circa', 'around (w/ acc.)', '', '2_15', 1), ('CLAMO', 745, 'clamo', 'to shout, scream', '', '2_15', 1), ('FINGO', 746, 'fingo', 'to imagine, form in the mind', '', '2_15', 1), ('IMPINGO', 747, 'impingo', 'to push, strike, inflict', '', '2_15', 1), ('INFLIGO', 748, 'infligo', 'to strike on or against, inflict', '', '2_15', 1), ('ITERVM', 749, 'iterum', 'again', '', '2_15', 1), ('OPPIDVM', 750, 'oppidum', 'town', '', '2_15', 1), ('PERCVTIO', 751, 'percutio', 'to strike through ', '', '2_15', 1), ('PRAEDITVS', 752, 'praeditus', 'endowed with, possessed of (w/ abl.)', '', '2_15', 1), ('REPELLO', 753, 'repello', 'to push back, thrust back', '', '2_15', 1), ('RIDEO', 754, 'rideo', 'to laugh', '', '2_15', 1), ('RVMPO', 755, 'rumpo', 'to break, tear', '', '2_15', 1), ('SEDES', 756, 'sedes', 'seat, abode', '', '2_15', 1), ('SIC', 757, 'sic', 'in such a way', '', '2_15', 1), ('SIDVS', 758, 'sidus', 'constellation', '', '2_15', 1), ('TELVM', 759, 'telum', 'spear, javelin', '', '2_15', 1), ('VEHO', 760, 'veho', 'to drive, carry', '', '2_15', 1)]\nsection_list ={'1.1': 'start', '1.2': '1.1', '1.3': '1.2', '1.4': '1.3', '1.5': '1.4', '1.6': '1.5', '1.7': '1.6', '1.8': '1.7', '1.9': '1.8', '1.10': '1.9', '1.11': '1.10', '1.12': '1.11', '1.13': '1.12', '1.14': '1.13', '1.15': '1.14', '1.16': '1.15', '1.17': '1.16', '1.18': '1.17', '1.19': '1.18', '1.20': '1.19', '1.21': '1.20', '2.1': '1.21', '2.2': '2.1', '2.3': '2.2', '2.4': '2.3', '2.5': '2.4', '2.6': '2.5', '2.7': '2.6', '2.8': '2.7', '2.9': '2.8', '2.10': '2.9', '2.11': '2.10', '2.12': '2.11', '2.13': '2.12', '2.14': '2.13', '2.15': '2.14', 'end': '2.15', 'start': 'start'}\ntitle = \"Latin for the New Millennium Vols 1 and 2 (Tunberg-Minkova)\"\nsection_level = 2\nlanguage = \"Latin\"\nbook = text.Text(title, section_words, the_text, section_list, section_level, language, True, False)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('monitoring', '0002_auto_20201021_0027')]
operations = [migrations.AlterField(model_name='endpoint', name=
'frequency_in_minutes', field=models.FloatField(default=30)),
migrations.AlterField(model_name='endpoint', name='last_check',
field=models.DateTimeField(blank=True, default=None, null=True)),
migrations.AlterField(model_name='endpoint', name='response_text',
field=models.TextField(blank=True, default=None, null=True)),
migrations.AlterField(model_name='endpoint', name='status_code',
field=models.FloatField(default=200)), migrations.AlterField(
model_name='endpoint', name='test_pattern', field=models.CharField(
blank=True, default=None, help_text=
'If left blank sys will only ping', max_length=100, null=True))]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('monitoring', '0002_auto_20201021_0027')]
operations = [migrations.AlterField(model_name='endpoint', name=
'frequency_in_minutes', field=models.FloatField(default=30)),
migrations.AlterField(model_name='endpoint', name='last_check',
field=models.DateTimeField(blank=True, default=None, null=True)),
migrations.AlterField(model_name='endpoint', name='response_text',
field=models.TextField(blank=True, default=None, null=True)),
migrations.AlterField(model_name='endpoint', name='status_code',
field=models.FloatField(default=200)), migrations.AlterField(
model_name='endpoint', name='test_pattern', field=models.CharField(
blank=True, default=None, help_text=
'If left blank sys will only ping', max_length=100, null=True))]
<|reserved_special_token_1|>
# Generated by Django 3.1.2 on 2020-10-21 21:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('monitoring', '0002_auto_20201021_0027'),
]
operations = [
migrations.AlterField(
model_name='endpoint',
name='frequency_in_minutes',
field=models.FloatField(default=30),
),
migrations.AlterField(
model_name='endpoint',
name='last_check',
field=models.DateTimeField(blank=True, default=None, null=True),
),
migrations.AlterField(
model_name='endpoint',
name='response_text',
field=models.TextField(blank=True, default=None, null=True),
),
migrations.AlterField(
model_name='endpoint',
name='status_code',
field=models.FloatField(default=200),
),
migrations.AlterField(
model_name='endpoint',
name='test_pattern',
field=models.CharField(blank=True,
default=None,
help_text='If left blank sys will only ping',
max_length=100,
null=True),
),
]
|
flexible
|
{
"blob_id": "20f56ff484321a7d623cead4315e5a6b3b0653a7",
"index": 2720,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('monitoring', '0002_auto_20201021_0027')]\n operations = [migrations.AlterField(model_name='endpoint', name=\n 'frequency_in_minutes', field=models.FloatField(default=30)),\n migrations.AlterField(model_name='endpoint', name='last_check',\n field=models.DateTimeField(blank=True, default=None, null=True)),\n migrations.AlterField(model_name='endpoint', name='response_text',\n field=models.TextField(blank=True, default=None, null=True)),\n migrations.AlterField(model_name='endpoint', name='status_code',\n field=models.FloatField(default=200)), migrations.AlterField(\n model_name='endpoint', name='test_pattern', field=models.CharField(\n blank=True, default=None, help_text=\n 'If left blank sys will only ping', max_length=100, null=True))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('monitoring', '0002_auto_20201021_0027')]\n operations = [migrations.AlterField(model_name='endpoint', name=\n 'frequency_in_minutes', field=models.FloatField(default=30)),\n migrations.AlterField(model_name='endpoint', name='last_check',\n field=models.DateTimeField(blank=True, default=None, null=True)),\n migrations.AlterField(model_name='endpoint', name='response_text',\n field=models.TextField(blank=True, default=None, null=True)),\n migrations.AlterField(model_name='endpoint', name='status_code',\n field=models.FloatField(default=200)), migrations.AlterField(\n model_name='endpoint', name='test_pattern', field=models.CharField(\n blank=True, default=None, help_text=\n 'If left blank sys will only ping', max_length=100, null=True))]\n",
"step-5": "# Generated by Django 3.1.2 on 2020-10-21 21:00\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('monitoring', '0002_auto_20201021_0027'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='endpoint',\n name='frequency_in_minutes',\n field=models.FloatField(default=30),\n ),\n migrations.AlterField(\n model_name='endpoint',\n name='last_check',\n field=models.DateTimeField(blank=True, default=None, null=True),\n ),\n migrations.AlterField(\n model_name='endpoint',\n name='response_text',\n field=models.TextField(blank=True, default=None, null=True),\n ),\n migrations.AlterField(\n model_name='endpoint',\n name='status_code',\n field=models.FloatField(default=200),\n ),\n migrations.AlterField(\n model_name='endpoint',\n name='test_pattern',\n field=models.CharField(blank=True,\n default=None,\n help_text='If left blank sys will only ping',\n max_length=100,\n null=True),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def backend_login(title, use_auth_info, use_login_data, settings=None):
api.set_title(title.TITLE_ID_EUR, title.LATEST_VERSION)
nex_token = api.get_nex_token(title.GAME_SERVER_ID)
auth_info = None
login_data = None
if use_auth_info:
auth_info = authentication.AuthenticationInfo()
auth_info.token = nex_token.token
auth_info.server_version = title.SERVER_VERSION
if use_login_data:
login_data = authentication.NintendoLoginData()
login_data.token = nex_token.token
client = backend.BackEndClient(settings)
clietn.configure(title.ACCESS_KEY, title.NEX_VERSION)
client.connect(nex_token.host, nex_token.port)
client.login(nex_token.username, nex_token.password, auth_info, login_data)
return client
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logging.basicConfig(level=logging.INFO)
<|reserved_special_token_0|>
def backend_login(title, use_auth_info, use_login_data, settings=None):
api.set_title(title.TITLE_ID_EUR, title.LATEST_VERSION)
nex_token = api.get_nex_token(title.GAME_SERVER_ID)
auth_info = None
login_data = None
if use_auth_info:
auth_info = authentication.AuthenticationInfo()
auth_info.token = nex_token.token
auth_info.server_version = title.SERVER_VERSION
if use_login_data:
login_data = authentication.NintendoLoginData()
login_data.token = nex_token.token
client = backend.BackEndClient(settings)
clietn.configure(title.ACCESS_KEY, title.NEX_VERSION)
client.connect(nex_token.host, nex_token.port)
client.login(nex_token.username, nex_token.password, auth_info, login_data)
return client
<|reserved_special_token_0|>
api.set_device(DEVICE_ID, SERIAL_NUMBER, SYSTEM_VERSION, REGION, COUNTRY)
api.login(USERNAME, PASSWORD)
<|reserved_special_token_0|>
friends_client.update_presence(presence)
input('Press enter to disconnect and exit\n')
<|reserved_special_token_0|>
friends_client.update_presence(presence)
game_backend.close()
friends_backend.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
logging.basicConfig(level=logging.INFO)
DEVICE_ID = 12345678
SERIAL_NUMBER = '...'
SYSTEM_VERSION = 544
REGION = 4
COUNTRY = 'NL'
USERNAME = '...'
PASSWORD = '...'
def backend_login(title, use_auth_info, use_login_data, settings=None):
api.set_title(title.TITLE_ID_EUR, title.LATEST_VERSION)
nex_token = api.get_nex_token(title.GAME_SERVER_ID)
auth_info = None
login_data = None
if use_auth_info:
auth_info = authentication.AuthenticationInfo()
auth_info.token = nex_token.token
auth_info.server_version = title.SERVER_VERSION
if use_login_data:
login_data = authentication.NintendoLoginData()
login_data.token = nex_token.token
client = backend.BackEndClient(settings)
clietn.configure(title.ACCESS_KEY, title.NEX_VERSION)
client.connect(nex_token.host, nex_token.port)
client.login(nex_token.username, nex_token.password, auth_info, login_data)
return client
api = AccountAPI()
api.set_device(DEVICE_ID, SERIAL_NUMBER, SYSTEM_VERSION, REGION, COUNTRY)
api.login(USERNAME, PASSWORD)
friends_backend = backend_login(Friends, False, True, 'friends.cfg')
game_backend = backend_login(MK8, True, False)
pid = game_backend.get_pid()
friends_client = friends.FriendsClient(friends_backend.secure_client)
matchmaker = matchmaking.MatchmakeExtensionClient(game_backend.secure_client)
matchmake_session = matchmaking.MatchmakeSession()
matchmake_session.player_min = 2
matchmake_session.player_max = 12
matchmake_session.participation_policy = 98
matchmake_session.game_mode = 3
matchmake_session.attribs[4] = 1027
matchmake_session.matchmake_system = matchmaking.MatchmakeSystem.FRIENDS
session_id = matchmaker.create_matchmake_session(matchmake_session, '', 1).gid
application_data = (b'\x00\x00 \x03\x00\x00\x00\x00\x00\x00\x00\x00\x18' +
struct.pack('<I', pid) + b'\x00\x00\x00')
presence = friends.NintendoPresenceV2()
presence.flags = 494
presence.is_online = True
presence.game_key.title_id = MK8.TITLE_ID_EUR
presence.game_key.title_version = MK8.LATEST_VERSION
presence.message = "I'm a Python client"
presence.unk2 = 2
presence.unk3 = 2
presence.game_server_id = MK8.GAME_SERVER_ID
presence.unk4 = 3
presence.pid = pid
presence.gathering_id = session_id
presence.application_data = application_data
friends_client.update_presence(presence)
input('Press enter to disconnect and exit\n')
presence = friends.NintendoPresenceV2()
friends_client.update_presence(presence)
game_backend.close()
friends_backend.close()
<|reserved_special_token_1|>
from nintendo.nex import backend, authentication, friends, matchmaking, common
from nintendo.account import AccountAPI
from nintendo.games import MK8, Friends
import struct
import logging
logging.basicConfig(level=logging.INFO)
DEVICE_ID = 12345678
SERIAL_NUMBER = '...'
SYSTEM_VERSION = 544
REGION = 4
COUNTRY = 'NL'
USERNAME = '...'
PASSWORD = '...'
def backend_login(title, use_auth_info, use_login_data, settings=None):
api.set_title(title.TITLE_ID_EUR, title.LATEST_VERSION)
nex_token = api.get_nex_token(title.GAME_SERVER_ID)
auth_info = None
login_data = None
if use_auth_info:
auth_info = authentication.AuthenticationInfo()
auth_info.token = nex_token.token
auth_info.server_version = title.SERVER_VERSION
if use_login_data:
login_data = authentication.NintendoLoginData()
login_data.token = nex_token.token
client = backend.BackEndClient(settings)
clietn.configure(title.ACCESS_KEY, title.NEX_VERSION)
client.connect(nex_token.host, nex_token.port)
client.login(nex_token.username, nex_token.password, auth_info, login_data)
return client
api = AccountAPI()
api.set_device(DEVICE_ID, SERIAL_NUMBER, SYSTEM_VERSION, REGION, COUNTRY)
api.login(USERNAME, PASSWORD)
friends_backend = backend_login(Friends, False, True, 'friends.cfg')
game_backend = backend_login(MK8, True, False)
pid = game_backend.get_pid()
friends_client = friends.FriendsClient(friends_backend.secure_client)
matchmaker = matchmaking.MatchmakeExtensionClient(game_backend.secure_client)
matchmake_session = matchmaking.MatchmakeSession()
matchmake_session.player_min = 2
matchmake_session.player_max = 12
matchmake_session.participation_policy = 98
matchmake_session.game_mode = 3
matchmake_session.attribs[4] = 1027
matchmake_session.matchmake_system = matchmaking.MatchmakeSystem.FRIENDS
session_id = matchmaker.create_matchmake_session(matchmake_session, '', 1).gid
application_data = (b'\x00\x00 \x03\x00\x00\x00\x00\x00\x00\x00\x00\x18' +
struct.pack('<I', pid) + b'\x00\x00\x00')
presence = friends.NintendoPresenceV2()
presence.flags = 494
presence.is_online = True
presence.game_key.title_id = MK8.TITLE_ID_EUR
presence.game_key.title_version = MK8.LATEST_VERSION
presence.message = "I'm a Python client"
presence.unk2 = 2
presence.unk3 = 2
presence.game_server_id = MK8.GAME_SERVER_ID
presence.unk4 = 3
presence.pid = pid
presence.gathering_id = session_id
presence.application_data = application_data
friends_client.update_presence(presence)
input('Press enter to disconnect and exit\n')
presence = friends.NintendoPresenceV2()
friends_client.update_presence(presence)
game_backend.close()
friends_backend.close()
<|reserved_special_token_1|>
from nintendo.nex import backend, authentication, friends, matchmaking, common
from nintendo.account import AccountAPI
from nintendo.games import MK8, Friends
import struct
import logging
logging.basicConfig(level=logging.INFO)
#Device id can be retrieved with a call to MCP_GetDeviceId on the Wii U
#Serial number can be found on the back of the Wii U
DEVICE_ID = 12345678
SERIAL_NUMBER = "..."
SYSTEM_VERSION = 0x220
REGION = 4 #EUR
COUNTRY = "NL"
USERNAME = "..." #Nintendo network id
PASSWORD = "..." #Nintendo network password
#This function logs in on a game server
def backend_login(title, use_auth_info, use_login_data, settings=None):
api.set_title(title.TITLE_ID_EUR, title.LATEST_VERSION)
nex_token = api.get_nex_token(title.GAME_SERVER_ID)
auth_info = None
login_data = None
if use_auth_info:
auth_info = authentication.AuthenticationInfo()
auth_info.token = nex_token.token
auth_info.server_version = title.SERVER_VERSION
if use_login_data:
login_data = authentication.NintendoLoginData()
login_data.token = nex_token.token
client = backend.BackEndClient(settings)
clietn.configure(title.ACCESS_KEY, title.NEX_VERSION)
client.connect(nex_token.host, nex_token.port)
client.login(
nex_token.username, nex_token.password, auth_info, login_data
)
return client
api = AccountAPI()
api.set_device(DEVICE_ID, SERIAL_NUMBER, SYSTEM_VERSION, REGION, COUNTRY)
api.login(USERNAME, PASSWORD)
#Connect to both the Mario Kart 8 server and the Wii U friends server
friends_backend = backend_login(
Friends, False, True, "friends.cfg"
)
game_backend = backend_login(MK8, True, False)
pid = game_backend.get_pid()
friends_client = friends.FriendsClient(friends_backend.secure_client)
matchmaker = matchmaking.MatchmakeExtensionClient(game_backend.secure_client)
#Create a matchmake session
matchmake_session = matchmaking.MatchmakeSession()
matchmake_session.player_min = 2
matchmake_session.player_max = 12
matchmake_session.participation_policy = 98
matchmake_session.game_mode = 3
matchmake_session.attribs[4] = 0x403 #DLCs enabled
matchmake_session.matchmake_system = matchmaking.MatchmakeSystem.FRIENDS
session_id = matchmaker.create_matchmake_session(
matchmake_session, "", 1
).gid
#Tell friends we're playing MK8 and have created a room
application_data = b"\0\0\x20\x03\0\0\0\0\0\0\0\0\x18" + struct.pack("<I", pid) + b"\0\0\0"
presence = friends.NintendoPresenceV2()
presence.flags = 0x1EE
presence.is_online = True
presence.game_key.title_id = MK8.TITLE_ID_EUR
presence.game_key.title_version = MK8.LATEST_VERSION
presence.message = "I'm a Python client"
presence.unk2 = 2
presence.unk3 = 2
presence.game_server_id = MK8.GAME_SERVER_ID
presence.unk4 = 3
presence.pid = pid
presence.gathering_id = session_id
presence.application_data = application_data
friends_client.update_presence(presence)
input("Press enter to disconnect and exit\n")
#Tell friends we've gone offline
presence = friends.NintendoPresenceV2()
friends_client.update_presence(presence)
#Disconnect from servers
game_backend.close()
friends_backend.close()
|
flexible
|
{
"blob_id": "43315abf9e096cdca89ed7f4de976d2706ff9c20",
"index": 9234,
"step-1": "<mask token>\n\n\ndef backend_login(title, use_auth_info, use_login_data, settings=None):\n api.set_title(title.TITLE_ID_EUR, title.LATEST_VERSION)\n nex_token = api.get_nex_token(title.GAME_SERVER_ID)\n auth_info = None\n login_data = None\n if use_auth_info:\n auth_info = authentication.AuthenticationInfo()\n auth_info.token = nex_token.token\n auth_info.server_version = title.SERVER_VERSION\n if use_login_data:\n login_data = authentication.NintendoLoginData()\n login_data.token = nex_token.token\n client = backend.BackEndClient(settings)\n clietn.configure(title.ACCESS_KEY, title.NEX_VERSION)\n client.connect(nex_token.host, nex_token.port)\n client.login(nex_token.username, nex_token.password, auth_info, login_data)\n return client\n\n\n<mask token>\n",
"step-2": "<mask token>\nlogging.basicConfig(level=logging.INFO)\n<mask token>\n\n\ndef backend_login(title, use_auth_info, use_login_data, settings=None):\n api.set_title(title.TITLE_ID_EUR, title.LATEST_VERSION)\n nex_token = api.get_nex_token(title.GAME_SERVER_ID)\n auth_info = None\n login_data = None\n if use_auth_info:\n auth_info = authentication.AuthenticationInfo()\n auth_info.token = nex_token.token\n auth_info.server_version = title.SERVER_VERSION\n if use_login_data:\n login_data = authentication.NintendoLoginData()\n login_data.token = nex_token.token\n client = backend.BackEndClient(settings)\n clietn.configure(title.ACCESS_KEY, title.NEX_VERSION)\n client.connect(nex_token.host, nex_token.port)\n client.login(nex_token.username, nex_token.password, auth_info, login_data)\n return client\n\n\n<mask token>\napi.set_device(DEVICE_ID, SERIAL_NUMBER, SYSTEM_VERSION, REGION, COUNTRY)\napi.login(USERNAME, PASSWORD)\n<mask token>\nfriends_client.update_presence(presence)\ninput('Press enter to disconnect and exit\\n')\n<mask token>\nfriends_client.update_presence(presence)\ngame_backend.close()\nfriends_backend.close()\n",
"step-3": "<mask token>\nlogging.basicConfig(level=logging.INFO)\nDEVICE_ID = 12345678\nSERIAL_NUMBER = '...'\nSYSTEM_VERSION = 544\nREGION = 4\nCOUNTRY = 'NL'\nUSERNAME = '...'\nPASSWORD = '...'\n\n\ndef backend_login(title, use_auth_info, use_login_data, settings=None):\n api.set_title(title.TITLE_ID_EUR, title.LATEST_VERSION)\n nex_token = api.get_nex_token(title.GAME_SERVER_ID)\n auth_info = None\n login_data = None\n if use_auth_info:\n auth_info = authentication.AuthenticationInfo()\n auth_info.token = nex_token.token\n auth_info.server_version = title.SERVER_VERSION\n if use_login_data:\n login_data = authentication.NintendoLoginData()\n login_data.token = nex_token.token\n client = backend.BackEndClient(settings)\n clietn.configure(title.ACCESS_KEY, title.NEX_VERSION)\n client.connect(nex_token.host, nex_token.port)\n client.login(nex_token.username, nex_token.password, auth_info, login_data)\n return client\n\n\napi = AccountAPI()\napi.set_device(DEVICE_ID, SERIAL_NUMBER, SYSTEM_VERSION, REGION, COUNTRY)\napi.login(USERNAME, PASSWORD)\nfriends_backend = backend_login(Friends, False, True, 'friends.cfg')\ngame_backend = backend_login(MK8, True, False)\npid = game_backend.get_pid()\nfriends_client = friends.FriendsClient(friends_backend.secure_client)\nmatchmaker = matchmaking.MatchmakeExtensionClient(game_backend.secure_client)\nmatchmake_session = matchmaking.MatchmakeSession()\nmatchmake_session.player_min = 2\nmatchmake_session.player_max = 12\nmatchmake_session.participation_policy = 98\nmatchmake_session.game_mode = 3\nmatchmake_session.attribs[4] = 1027\nmatchmake_session.matchmake_system = matchmaking.MatchmakeSystem.FRIENDS\nsession_id = matchmaker.create_matchmake_session(matchmake_session, '', 1).gid\napplication_data = (b'\\x00\\x00 \\x03\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x18' +\n struct.pack('<I', pid) + b'\\x00\\x00\\x00')\npresence = friends.NintendoPresenceV2()\npresence.flags = 494\npresence.is_online = True\npresence.game_key.title_id = MK8.TITLE_ID_EUR\npresence.game_key.title_version = MK8.LATEST_VERSION\npresence.message = \"I'm a Python client\"\npresence.unk2 = 2\npresence.unk3 = 2\npresence.game_server_id = MK8.GAME_SERVER_ID\npresence.unk4 = 3\npresence.pid = pid\npresence.gathering_id = session_id\npresence.application_data = application_data\nfriends_client.update_presence(presence)\ninput('Press enter to disconnect and exit\\n')\npresence = friends.NintendoPresenceV2()\nfriends_client.update_presence(presence)\ngame_backend.close()\nfriends_backend.close()\n",
"step-4": "from nintendo.nex import backend, authentication, friends, matchmaking, common\nfrom nintendo.account import AccountAPI\nfrom nintendo.games import MK8, Friends\nimport struct\nimport logging\nlogging.basicConfig(level=logging.INFO)\nDEVICE_ID = 12345678\nSERIAL_NUMBER = '...'\nSYSTEM_VERSION = 544\nREGION = 4\nCOUNTRY = 'NL'\nUSERNAME = '...'\nPASSWORD = '...'\n\n\ndef backend_login(title, use_auth_info, use_login_data, settings=None):\n api.set_title(title.TITLE_ID_EUR, title.LATEST_VERSION)\n nex_token = api.get_nex_token(title.GAME_SERVER_ID)\n auth_info = None\n login_data = None\n if use_auth_info:\n auth_info = authentication.AuthenticationInfo()\n auth_info.token = nex_token.token\n auth_info.server_version = title.SERVER_VERSION\n if use_login_data:\n login_data = authentication.NintendoLoginData()\n login_data.token = nex_token.token\n client = backend.BackEndClient(settings)\n clietn.configure(title.ACCESS_KEY, title.NEX_VERSION)\n client.connect(nex_token.host, nex_token.port)\n client.login(nex_token.username, nex_token.password, auth_info, login_data)\n return client\n\n\napi = AccountAPI()\napi.set_device(DEVICE_ID, SERIAL_NUMBER, SYSTEM_VERSION, REGION, COUNTRY)\napi.login(USERNAME, PASSWORD)\nfriends_backend = backend_login(Friends, False, True, 'friends.cfg')\ngame_backend = backend_login(MK8, True, False)\npid = game_backend.get_pid()\nfriends_client = friends.FriendsClient(friends_backend.secure_client)\nmatchmaker = matchmaking.MatchmakeExtensionClient(game_backend.secure_client)\nmatchmake_session = matchmaking.MatchmakeSession()\nmatchmake_session.player_min = 2\nmatchmake_session.player_max = 12\nmatchmake_session.participation_policy = 98\nmatchmake_session.game_mode = 3\nmatchmake_session.attribs[4] = 1027\nmatchmake_session.matchmake_system = matchmaking.MatchmakeSystem.FRIENDS\nsession_id = matchmaker.create_matchmake_session(matchmake_session, '', 1).gid\napplication_data = (b'\\x00\\x00 \\x03\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x18' +\n struct.pack('<I', pid) + b'\\x00\\x00\\x00')\npresence = friends.NintendoPresenceV2()\npresence.flags = 494\npresence.is_online = True\npresence.game_key.title_id = MK8.TITLE_ID_EUR\npresence.game_key.title_version = MK8.LATEST_VERSION\npresence.message = \"I'm a Python client\"\npresence.unk2 = 2\npresence.unk3 = 2\npresence.game_server_id = MK8.GAME_SERVER_ID\npresence.unk4 = 3\npresence.pid = pid\npresence.gathering_id = session_id\npresence.application_data = application_data\nfriends_client.update_presence(presence)\ninput('Press enter to disconnect and exit\\n')\npresence = friends.NintendoPresenceV2()\nfriends_client.update_presence(presence)\ngame_backend.close()\nfriends_backend.close()\n",
"step-5": "\nfrom nintendo.nex import backend, authentication, friends, matchmaking, common\nfrom nintendo.account import AccountAPI\nfrom nintendo.games import MK8, Friends\nimport struct\n\nimport logging\nlogging.basicConfig(level=logging.INFO)\n\n#Device id can be retrieved with a call to MCP_GetDeviceId on the Wii U\n#Serial number can be found on the back of the Wii U\nDEVICE_ID = 12345678\nSERIAL_NUMBER = \"...\"\nSYSTEM_VERSION = 0x220\nREGION = 4 #EUR\nCOUNTRY = \"NL\"\n\nUSERNAME = \"...\" #Nintendo network id\nPASSWORD = \"...\" #Nintendo network password\n\n\n#This function logs in on a game server\ndef backend_login(title, use_auth_info, use_login_data, settings=None):\n\tapi.set_title(title.TITLE_ID_EUR, title.LATEST_VERSION)\n\tnex_token = api.get_nex_token(title.GAME_SERVER_ID)\n\n\tauth_info = None\n\tlogin_data = None\n\tif use_auth_info:\n\t\tauth_info = authentication.AuthenticationInfo()\n\t\tauth_info.token = nex_token.token\n\t\tauth_info.server_version = title.SERVER_VERSION\n\tif use_login_data:\n\t\tlogin_data = authentication.NintendoLoginData()\n\t\tlogin_data.token = nex_token.token\n\t\n\tclient = backend.BackEndClient(settings)\n\tclietn.configure(title.ACCESS_KEY, title.NEX_VERSION)\n\tclient.connect(nex_token.host, nex_token.port)\n\tclient.login(\n\t\tnex_token.username, nex_token.password, auth_info, login_data\n\t)\n\treturn client\n\n\napi = AccountAPI()\napi.set_device(DEVICE_ID, SERIAL_NUMBER, SYSTEM_VERSION, REGION, COUNTRY)\napi.login(USERNAME, PASSWORD)\n\n#Connect to both the Mario Kart 8 server and the Wii U friends server\nfriends_backend = backend_login(\n\tFriends, False, True, \"friends.cfg\"\n)\ngame_backend = backend_login(MK8, True, False)\n\npid = game_backend.get_pid()\n\nfriends_client = friends.FriendsClient(friends_backend.secure_client)\nmatchmaker = matchmaking.MatchmakeExtensionClient(game_backend.secure_client)\n\n#Create a matchmake session\nmatchmake_session = matchmaking.MatchmakeSession()\nmatchmake_session.player_min = 2\nmatchmake_session.player_max = 12\nmatchmake_session.participation_policy = 98\nmatchmake_session.game_mode = 3\nmatchmake_session.attribs[4] = 0x403 #DLCs enabled\nmatchmake_session.matchmake_system = matchmaking.MatchmakeSystem.FRIENDS\n\nsession_id = matchmaker.create_matchmake_session(\n\tmatchmake_session, \"\", 1\n).gid\n\n#Tell friends we're playing MK8 and have created a room\napplication_data = b\"\\0\\0\\x20\\x03\\0\\0\\0\\0\\0\\0\\0\\0\\x18\" + struct.pack(\"<I\", pid) + b\"\\0\\0\\0\"\n\npresence = friends.NintendoPresenceV2()\npresence.flags = 0x1EE\npresence.is_online = True\npresence.game_key.title_id = MK8.TITLE_ID_EUR\npresence.game_key.title_version = MK8.LATEST_VERSION\npresence.message = \"I'm a Python client\"\npresence.unk2 = 2\npresence.unk3 = 2\npresence.game_server_id = MK8.GAME_SERVER_ID\npresence.unk4 = 3\npresence.pid = pid\npresence.gathering_id = session_id\npresence.application_data = application_data\n\nfriends_client.update_presence(presence)\n\ninput(\"Press enter to disconnect and exit\\n\")\n\n#Tell friends we've gone offline\npresence = friends.NintendoPresenceV2()\nfriends_client.update_presence(presence)\n\n#Disconnect from servers\ngame_backend.close()\nfriends_backend.close()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/python
class Bob(object):
def __init__(self):
self.question_response = "Sure."
self.yell_response = "Woah, chill out!"
self.silent_response = "Fine. Be that way!"
self.whatever = "Whatever."
def hey(self, question):
if not(question) or question.strip()=='':
return self.silent_response
if question.isupper():
return self.yell_response
elif question.endswith("?"):
return self.question_response
return self.whatever
|
normal
|
{
"blob_id": "7ff7da216bdda5c30bf7c973c82886035b31247c",
"index": 4093,
"step-1": "<mask token>\n",
"step-2": "class Bob(object):\n <mask token>\n <mask token>\n",
"step-3": "class Bob(object):\n <mask token>\n\n def hey(self, question):\n if not question or question.strip() == '':\n return self.silent_response\n if question.isupper():\n return self.yell_response\n elif question.endswith('?'):\n return self.question_response\n return self.whatever\n",
"step-4": "class Bob(object):\n\n def __init__(self):\n self.question_response = 'Sure.'\n self.yell_response = 'Woah, chill out!'\n self.silent_response = 'Fine. Be that way!'\n self.whatever = 'Whatever.'\n\n def hey(self, question):\n if not question or question.strip() == '':\n return self.silent_response\n if question.isupper():\n return self.yell_response\n elif question.endswith('?'):\n return self.question_response\n return self.whatever\n",
"step-5": "#!/usr/bin/python\n\nclass Bob(object):\n def __init__(self):\n self.question_response = \"Sure.\"\n self.yell_response = \"Woah, chill out!\"\n self.silent_response = \"Fine. Be that way!\"\n self.whatever = \"Whatever.\"\n\n def hey(self, question):\n if not(question) or question.strip()=='':\n return self.silent_response\n if question.isupper():\n return self.yell_response\n elif question.endswith(\"?\"):\n return self.question_response\n return self.whatever\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from sys import argv
from pyspark import SparkContext
import json
import re
import math
from _datetime import datetime
start_time = datetime.now()
input_file = argv[1]
model_file = argv[2]
stopwords = argv[3]
sc = SparkContext(appName='inf553')
lines = sc.textFile(input_file).map(lambda x: json.loads(x))
stopwords = sc.textFile(stopwords).map(lambda x: (x, 1))
def tf_idf(words):
word_dict = {}
for w in words:
if w in word_dict.keys():
word_dict[w] += 1
else:
word_dict[w] = 1
max_freq = max(word_dict.values())
for w in words:
word_dict[w] = (word_dict[w] / max_freq) * math.log((N / n_dict[w]), 2)
a = sorted(word_dict.items(), key=lambda x: x[1], reverse=True)
return a[:200]
b_text = lines.map(lambda x: (x['business_id'], x['text']))\
.groupByKey().map(lambda x: (x[0], list(x[1])))\
.map(lambda x: (x[0], str(x[1]).replace('!\'', '')))\
.map(lambda x: (x[0], x[1].replace('.\'', ''))) \
.map(lambda x: (x[0], x[1].replace(', \'', ''))) \
.map(lambda x: (x[0], x[1].replace('\\n',''))) \
.map(lambda x: (x[0], x[1].replace('\\\'',"'")))\
.map(lambda x: (x[0], re.sub('[{}+=~*%#$@(\-/[,.!?&:;\]0-9)"]', ' ', str(x[1]).lower()))) \
.mapValues(lambda x: x.split())
total_words_num = b_text.flatMap(lambda x: x[1]).count()
rare_words = b_text.flatMap(lambda x: x[1])\
.map(lambda x: (x, 1))\
.reduceByKey(lambda x, y: x+y)\
.filter(lambda x: x[1] < total_words_num * 0.000001)\
.map(lambda x: (x[0], 1))
b_unset_words = b_text.flatMap(lambda x: [(word, x[0]) for word in x[1]])\
.subtractByKey(rare_words)\
.subtractByKey(stopwords)
n = b_unset_words.groupByKey()\
.map(lambda x: (x[0], len(set(x[1]))))
n_dict = dict(n.collect())
N = b_text = lines.map(lambda x: (x['business_id'])).distinct().count()
b_profile = b_unset_words.map(lambda x: (x[1], x[0]))\
.groupByKey().map(lambda x: (x[0], list(x[1])))\
.map(lambda x: (x[0], tf_idf(x[1]))) \
.map(lambda x: (x[0], [word[0] for word in x[1]]))
words_list = b_profile.flatMap(lambda x: x[1]).distinct().collect()
words = dict([(word, ind) for ind, word in enumerate(words_list)])
b_profile2 = b_profile.map(lambda x: (x[0], [words[word_ind] for word_ind in x[1]]))
b_profile_dict = dict(b_profile2.collect())
def user_prof(b_list):
u_profile_words =[]
for b in b_list:
u_profile_words.extend(b_profile_dict[b])
return list(set(u_profile_words))
user_profile = lines.map(lambda x: (x['user_id'], x['business_id']))\
.groupByKey().map(lambda x: (x[0], list(x[1])))\
.map(lambda x: (x[0], user_prof(x[1])))
f = open(model_file, "w")
for user, u_vector in dict(user_profile.collect()).items():
f.write(json.dumps({"id": user, "type": "user", "vector": u_vector}))
f.write('\n')
for business, b_vector in b_profile_dict.items():
f.write(json.dumps({"id": business, "type": "business", "vector": b_vector}))
f.write('\n')
end_time = datetime.now()
duration = end_time - start_time
print("Duration:", duration)
|
normal
|
{
"blob_id": "e877f16e604682488d85142174ce4f3f6cee3f18",
"index": 7882,
"step-1": "<mask token>\n\n\ndef tf_idf(words):\n word_dict = {}\n for w in words:\n if w in word_dict.keys():\n word_dict[w] += 1\n else:\n word_dict[w] = 1\n max_freq = max(word_dict.values())\n for w in words:\n word_dict[w] = word_dict[w] / max_freq * math.log(N / n_dict[w], 2)\n a = sorted(word_dict.items(), key=lambda x: x[1], reverse=True)\n return a[:200]\n\n\n<mask token>\n\n\ndef user_prof(b_list):\n u_profile_words = []\n for b in b_list:\n u_profile_words.extend(b_profile_dict[b])\n return list(set(u_profile_words))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef tf_idf(words):\n word_dict = {}\n for w in words:\n if w in word_dict.keys():\n word_dict[w] += 1\n else:\n word_dict[w] = 1\n max_freq = max(word_dict.values())\n for w in words:\n word_dict[w] = word_dict[w] / max_freq * math.log(N / n_dict[w], 2)\n a = sorted(word_dict.items(), key=lambda x: x[1], reverse=True)\n return a[:200]\n\n\n<mask token>\n\n\ndef user_prof(b_list):\n u_profile_words = []\n for b in b_list:\n u_profile_words.extend(b_profile_dict[b])\n return list(set(u_profile_words))\n\n\n<mask token>\nfor user, u_vector in dict(user_profile.collect()).items():\n f.write(json.dumps({'id': user, 'type': 'user', 'vector': u_vector}))\n f.write('\\n')\nfor business, b_vector in b_profile_dict.items():\n f.write(json.dumps({'id': business, 'type': 'business', 'vector':\n b_vector}))\n f.write('\\n')\n<mask token>\nprint('Duration:', duration)\n",
"step-3": "<mask token>\nstart_time = datetime.now()\ninput_file = argv[1]\nmodel_file = argv[2]\nstopwords = argv[3]\nsc = SparkContext(appName='inf553')\nlines = sc.textFile(input_file).map(lambda x: json.loads(x))\nstopwords = sc.textFile(stopwords).map(lambda x: (x, 1))\n\n\ndef tf_idf(words):\n word_dict = {}\n for w in words:\n if w in word_dict.keys():\n word_dict[w] += 1\n else:\n word_dict[w] = 1\n max_freq = max(word_dict.values())\n for w in words:\n word_dict[w] = word_dict[w] / max_freq * math.log(N / n_dict[w], 2)\n a = sorted(word_dict.items(), key=lambda x: x[1], reverse=True)\n return a[:200]\n\n\nb_text = lines.map(lambda x: (x['business_id'], x['text'])).groupByKey().map(\n lambda x: (x[0], list(x[1]))).map(lambda x: (x[0], str(x[1]).replace(\n \"!'\", ''))).map(lambda x: (x[0], x[1].replace(\".'\", ''))).map(lambda x:\n (x[0], x[1].replace(\", '\", ''))).map(lambda x: (x[0], x[1].replace(\n '\\\\n', ''))).map(lambda x: (x[0], x[1].replace(\"\\\\'\", \"'\"))).map(lambda\n x: (x[0], re.sub('[{}+=~*%#$@(\\\\-/[,.!?&:;\\\\]0-9)\"]', ' ', str(x[1]).\n lower()))).mapValues(lambda x: x.split())\ntotal_words_num = b_text.flatMap(lambda x: x[1]).count()\nrare_words = b_text.flatMap(lambda x: x[1]).map(lambda x: (x, 1)).reduceByKey(\n lambda x, y: x + y).filter(lambda x: x[1] < total_words_num * 1e-06).map(\n lambda x: (x[0], 1))\nb_unset_words = b_text.flatMap(lambda x: [(word, x[0]) for word in x[1]]\n ).subtractByKey(rare_words).subtractByKey(stopwords)\nn = b_unset_words.groupByKey().map(lambda x: (x[0], len(set(x[1]))))\nn_dict = dict(n.collect())\nN = b_text = lines.map(lambda x: x['business_id']).distinct().count()\nb_profile = b_unset_words.map(lambda x: (x[1], x[0])).groupByKey().map(lambda\n x: (x[0], list(x[1]))).map(lambda x: (x[0], tf_idf(x[1]))).map(lambda x:\n (x[0], [word[0] for word in x[1]]))\nwords_list = b_profile.flatMap(lambda x: x[1]).distinct().collect()\nwords = dict([(word, ind) for ind, word in enumerate(words_list)])\nb_profile2 = b_profile.map(lambda x: (x[0], [words[word_ind] for word_ind in\n x[1]]))\nb_profile_dict = dict(b_profile2.collect())\n\n\ndef user_prof(b_list):\n u_profile_words = []\n for b in b_list:\n u_profile_words.extend(b_profile_dict[b])\n return list(set(u_profile_words))\n\n\nuser_profile = lines.map(lambda x: (x['user_id'], x['business_id'])\n ).groupByKey().map(lambda x: (x[0], list(x[1]))).map(lambda x: (x[0],\n user_prof(x[1])))\nf = open(model_file, 'w')\nfor user, u_vector in dict(user_profile.collect()).items():\n f.write(json.dumps({'id': user, 'type': 'user', 'vector': u_vector}))\n f.write('\\n')\nfor business, b_vector in b_profile_dict.items():\n f.write(json.dumps({'id': business, 'type': 'business', 'vector':\n b_vector}))\n f.write('\\n')\nend_time = datetime.now()\nduration = end_time - start_time\nprint('Duration:', duration)\n",
"step-4": "from sys import argv\nfrom pyspark import SparkContext\nimport json\nimport re\nimport math\nfrom _datetime import datetime\nstart_time = datetime.now()\ninput_file = argv[1]\nmodel_file = argv[2]\nstopwords = argv[3]\nsc = SparkContext(appName='inf553')\nlines = sc.textFile(input_file).map(lambda x: json.loads(x))\nstopwords = sc.textFile(stopwords).map(lambda x: (x, 1))\n\n\ndef tf_idf(words):\n word_dict = {}\n for w in words:\n if w in word_dict.keys():\n word_dict[w] += 1\n else:\n word_dict[w] = 1\n max_freq = max(word_dict.values())\n for w in words:\n word_dict[w] = word_dict[w] / max_freq * math.log(N / n_dict[w], 2)\n a = sorted(word_dict.items(), key=lambda x: x[1], reverse=True)\n return a[:200]\n\n\nb_text = lines.map(lambda x: (x['business_id'], x['text'])).groupByKey().map(\n lambda x: (x[0], list(x[1]))).map(lambda x: (x[0], str(x[1]).replace(\n \"!'\", ''))).map(lambda x: (x[0], x[1].replace(\".'\", ''))).map(lambda x:\n (x[0], x[1].replace(\", '\", ''))).map(lambda x: (x[0], x[1].replace(\n '\\\\n', ''))).map(lambda x: (x[0], x[1].replace(\"\\\\'\", \"'\"))).map(lambda\n x: (x[0], re.sub('[{}+=~*%#$@(\\\\-/[,.!?&:;\\\\]0-9)\"]', ' ', str(x[1]).\n lower()))).mapValues(lambda x: x.split())\ntotal_words_num = b_text.flatMap(lambda x: x[1]).count()\nrare_words = b_text.flatMap(lambda x: x[1]).map(lambda x: (x, 1)).reduceByKey(\n lambda x, y: x + y).filter(lambda x: x[1] < total_words_num * 1e-06).map(\n lambda x: (x[0], 1))\nb_unset_words = b_text.flatMap(lambda x: [(word, x[0]) for word in x[1]]\n ).subtractByKey(rare_words).subtractByKey(stopwords)\nn = b_unset_words.groupByKey().map(lambda x: (x[0], len(set(x[1]))))\nn_dict = dict(n.collect())\nN = b_text = lines.map(lambda x: x['business_id']).distinct().count()\nb_profile = b_unset_words.map(lambda x: (x[1], x[0])).groupByKey().map(lambda\n x: (x[0], list(x[1]))).map(lambda x: (x[0], tf_idf(x[1]))).map(lambda x:\n (x[0], [word[0] for word in x[1]]))\nwords_list = b_profile.flatMap(lambda x: x[1]).distinct().collect()\nwords = dict([(word, ind) for ind, word in enumerate(words_list)])\nb_profile2 = b_profile.map(lambda x: (x[0], [words[word_ind] for word_ind in\n x[1]]))\nb_profile_dict = dict(b_profile2.collect())\n\n\ndef user_prof(b_list):\n u_profile_words = []\n for b in b_list:\n u_profile_words.extend(b_profile_dict[b])\n return list(set(u_profile_words))\n\n\nuser_profile = lines.map(lambda x: (x['user_id'], x['business_id'])\n ).groupByKey().map(lambda x: (x[0], list(x[1]))).map(lambda x: (x[0],\n user_prof(x[1])))\nf = open(model_file, 'w')\nfor user, u_vector in dict(user_profile.collect()).items():\n f.write(json.dumps({'id': user, 'type': 'user', 'vector': u_vector}))\n f.write('\\n')\nfor business, b_vector in b_profile_dict.items():\n f.write(json.dumps({'id': business, 'type': 'business', 'vector':\n b_vector}))\n f.write('\\n')\nend_time = datetime.now()\nduration = end_time - start_time\nprint('Duration:', duration)\n",
"step-5": "from sys import argv\nfrom pyspark import SparkContext\nimport json\nimport re\nimport math\nfrom _datetime import datetime\nstart_time = datetime.now()\ninput_file = argv[1]\nmodel_file = argv[2]\nstopwords = argv[3]\nsc = SparkContext(appName='inf553')\nlines = sc.textFile(input_file).map(lambda x: json.loads(x))\nstopwords = sc.textFile(stopwords).map(lambda x: (x, 1))\n\n\ndef tf_idf(words):\n word_dict = {}\n for w in words:\n if w in word_dict.keys():\n word_dict[w] += 1\n else:\n word_dict[w] = 1\n max_freq = max(word_dict.values())\n for w in words:\n word_dict[w] = (word_dict[w] / max_freq) * math.log((N / n_dict[w]), 2)\n a = sorted(word_dict.items(), key=lambda x: x[1], reverse=True)\n return a[:200]\n\n\nb_text = lines.map(lambda x: (x['business_id'], x['text']))\\\n .groupByKey().map(lambda x: (x[0], list(x[1])))\\\n .map(lambda x: (x[0], str(x[1]).replace('!\\'', '')))\\\n .map(lambda x: (x[0], x[1].replace('.\\'', ''))) \\\n .map(lambda x: (x[0], x[1].replace(', \\'', ''))) \\\n .map(lambda x: (x[0], x[1].replace('\\\\n',''))) \\\n .map(lambda x: (x[0], x[1].replace('\\\\\\'',\"'\")))\\\n .map(lambda x: (x[0], re.sub('[{}+=~*%#$@(\\-/[,.!?&:;\\]0-9)\"]', ' ', str(x[1]).lower()))) \\\n .mapValues(lambda x: x.split())\n\ntotal_words_num = b_text.flatMap(lambda x: x[1]).count()\nrare_words = b_text.flatMap(lambda x: x[1])\\\n .map(lambda x: (x, 1))\\\n .reduceByKey(lambda x, y: x+y)\\\n .filter(lambda x: x[1] < total_words_num * 0.000001)\\\n .map(lambda x: (x[0], 1))\nb_unset_words = b_text.flatMap(lambda x: [(word, x[0]) for word in x[1]])\\\n .subtractByKey(rare_words)\\\n .subtractByKey(stopwords)\nn = b_unset_words.groupByKey()\\\n .map(lambda x: (x[0], len(set(x[1]))))\n\nn_dict = dict(n.collect())\nN = b_text = lines.map(lambda x: (x['business_id'])).distinct().count()\n\nb_profile = b_unset_words.map(lambda x: (x[1], x[0]))\\\n .groupByKey().map(lambda x: (x[0], list(x[1])))\\\n .map(lambda x: (x[0], tf_idf(x[1]))) \\\n .map(lambda x: (x[0], [word[0] for word in x[1]]))\n\nwords_list = b_profile.flatMap(lambda x: x[1]).distinct().collect()\nwords = dict([(word, ind) for ind, word in enumerate(words_list)])\n\nb_profile2 = b_profile.map(lambda x: (x[0], [words[word_ind] for word_ind in x[1]]))\nb_profile_dict = dict(b_profile2.collect())\n\n\ndef user_prof(b_list):\n u_profile_words =[]\n for b in b_list:\n u_profile_words.extend(b_profile_dict[b])\n return list(set(u_profile_words))\n\n\nuser_profile = lines.map(lambda x: (x['user_id'], x['business_id']))\\\n .groupByKey().map(lambda x: (x[0], list(x[1])))\\\n .map(lambda x: (x[0], user_prof(x[1])))\nf = open(model_file, \"w\")\nfor user, u_vector in dict(user_profile.collect()).items():\n f.write(json.dumps({\"id\": user, \"type\": \"user\", \"vector\": u_vector}))\n f.write('\\n')\nfor business, b_vector in b_profile_dict.items():\n f.write(json.dumps({\"id\": business, \"type\": \"business\", \"vector\": b_vector}))\n f.write('\\n')\n\nend_time = datetime.now()\nduration = end_time - start_time\nprint(\"Duration:\", duration)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def filter(txt):
output = []
for t in txt:
if t == '(' or t == ')' or t == '[' or t == ']':
output.append(t)
return output
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def filter(txt):
output = []
for t in txt:
if t == '(' or t == ')' or t == '[' or t == ']':
output.append(t)
return output
<|reserved_special_token_0|>
while True:
raw_input = input()
line = filter(raw_input)
if raw_input != '.':
stack = []
err = False
for l in line:
try:
if l == '(' or l == '[':
stack.append(l)
elif l == ']':
if stack[len(stack) - 1] == '[':
stack.pop()
else:
err = True
break
elif l == ')':
if stack[len(stack) - 1] == '(':
stack.pop()
else:
err = True
break
except:
err = True
break
if err == True or len(stack) != 0:
result.append('no')
else:
result.append('yes')
else:
break
for r in result:
print(r)
<|reserved_special_token_1|>
def filter(txt):
output = []
for t in txt:
if t == '(' or t == ')' or t == '[' or t == ']':
output.append(t)
return output
result = []
while True:
raw_input = input()
line = filter(raw_input)
if raw_input != '.':
stack = []
err = False
for l in line:
try:
if l == '(' or l == '[':
stack.append(l)
elif l == ']':
if stack[len(stack) - 1] == '[':
stack.pop()
else:
err = True
break
elif l == ')':
if stack[len(stack) - 1] == '(':
stack.pop()
else:
err = True
break
except:
err = True
break
if err == True or len(stack) != 0:
result.append('no')
else:
result.append('yes')
else:
break
for r in result:
print(r)
<|reserved_special_token_1|>
def filter(txt): # can be improved using regular expression
output = []
for t in txt:
if t == "(" or t == ")" or t == "[" or t == "]":
output.append(t)
return output
result = []
while True:
raw_input = input()
line = filter(raw_input)
if raw_input != ".":
stack = []
err = False
for l in line:
try:
if l == "(" or l == "[":
stack.append(l)
elif l == "]":
if stack[len(stack) - 1] == "[":
stack.pop()
else:
err = True
break
elif l == ")":
if stack[len(stack) - 1] == "(":
stack.pop()
else:
err = True
break
except:
err = True
break
if err == True or len(stack) != 0:
result.append("no")
else:
result.append("yes")
else:
break
for r in result:
print(r)
|
flexible
|
{
"blob_id": "9ca769ae8bbabee20b5dd4d75ab91d3c30e8d1bf",
"index": 8387,
"step-1": "<mask token>\n",
"step-2": "def filter(txt):\n output = []\n for t in txt:\n if t == '(' or t == ')' or t == '[' or t == ']':\n output.append(t)\n return output\n\n\n<mask token>\n",
"step-3": "def filter(txt):\n output = []\n for t in txt:\n if t == '(' or t == ')' or t == '[' or t == ']':\n output.append(t)\n return output\n\n\n<mask token>\nwhile True:\n raw_input = input()\n line = filter(raw_input)\n if raw_input != '.':\n stack = []\n err = False\n for l in line:\n try:\n if l == '(' or l == '[':\n stack.append(l)\n elif l == ']':\n if stack[len(stack) - 1] == '[':\n stack.pop()\n else:\n err = True\n break\n elif l == ')':\n if stack[len(stack) - 1] == '(':\n stack.pop()\n else:\n err = True\n break\n except:\n err = True\n break\n if err == True or len(stack) != 0:\n result.append('no')\n else:\n result.append('yes')\n else:\n break\nfor r in result:\n print(r)\n",
"step-4": "def filter(txt):\n output = []\n for t in txt:\n if t == '(' or t == ')' or t == '[' or t == ']':\n output.append(t)\n return output\n\n\nresult = []\nwhile True:\n raw_input = input()\n line = filter(raw_input)\n if raw_input != '.':\n stack = []\n err = False\n for l in line:\n try:\n if l == '(' or l == '[':\n stack.append(l)\n elif l == ']':\n if stack[len(stack) - 1] == '[':\n stack.pop()\n else:\n err = True\n break\n elif l == ')':\n if stack[len(stack) - 1] == '(':\n stack.pop()\n else:\n err = True\n break\n except:\n err = True\n break\n if err == True or len(stack) != 0:\n result.append('no')\n else:\n result.append('yes')\n else:\n break\nfor r in result:\n print(r)\n",
"step-5": "def filter(txt): # can be improved using regular expression\n\toutput = []\n\tfor t in txt:\n\t\tif t == \"(\" or t == \")\" or t == \"[\" or t == \"]\":\n\t\t\toutput.append(t)\n\treturn output\n\nresult = []\nwhile True:\n\traw_input = input()\n\tline = filter(raw_input)\n\t\n\tif raw_input != \".\":\n\t\tstack = []\n\t\terr = False\n\t\t\n\t\tfor l in line:\n\t\t\ttry:\n\t\t\t\tif l == \"(\" or l == \"[\":\n\t\t\t\t\tstack.append(l)\n\t\t\t\telif l == \"]\":\n\t\t\t\t\tif stack[len(stack) - 1] == \"[\":\n\t\t\t\t\t\tstack.pop()\n\t\t\t\t\telse:\n\t\t\t\t\t\terr = True\n\t\t\t\t\t\tbreak\n\t\t\t\telif l == \")\":\n\t\t\t\t\tif stack[len(stack) - 1] == \"(\":\n\t\t\t\t\t\tstack.pop()\n\t\t\t\t\telse:\n\t\t\t\t\t\terr = True\n\t\t\t\t\t\tbreak\n\t\t\texcept:\n\t\t\t\terr = True\n\t\t\t\tbreak\n\t\tif err == True or len(stack) != 0:\n\t\t\tresult.append(\"no\")\n\t\telse:\n\t\t\tresult.append(\"yes\")\n\telse:\n\t\tbreak\n\nfor r in result:\n\tprint(r)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ax.plot(times, result.expect[0])
ax.plot(times, result.expect[1])
ax.set_xlabel('Time')
ax.set_ylabel('Expectation values')
ax.legend(('Sigma-Z', 'Sigma-Y'))
plt.show()
<|reserved_special_token_1|>
times = np.linspace(0.0, 10.0, 100)
result = mesolve(H, psi0, times, [np.sqrt(0.05) * sigmax()], [sigmaz(),
sigmay()])
fig, ax = plt.subplots()
ax.plot(times, result.expect[0])
ax.plot(times, result.expect[1])
ax.set_xlabel('Time')
ax.set_ylabel('Expectation values')
ax.legend(('Sigma-Z', 'Sigma-Y'))
plt.show()
<|reserved_special_token_1|>
times = np.linspace(0.0, 10.0, 100)
result = mesolve(H, psi0, times, [np.sqrt(0.05) * sigmax()], [sigmaz(), sigmay()])
fig, ax = plt.subplots()
ax.plot(times, result.expect[0]) # doctest: +SKIP
ax.plot(times, result.expect[1]) # doctest: +SKIP
ax.set_xlabel('Time') # doctest: +SKIP
ax.set_ylabel('Expectation values') # doctest: +SKIP
ax.legend(("Sigma-Z", "Sigma-Y")) # doctest: +SKIP
plt.show() # doctest: +SKIP
|
flexible
|
{
"blob_id": "8474205d49aef2d18755fc1a25a82718962f4120",
"index": 6912,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nax.plot(times, result.expect[0])\nax.plot(times, result.expect[1])\nax.set_xlabel('Time')\nax.set_ylabel('Expectation values')\nax.legend(('Sigma-Z', 'Sigma-Y'))\nplt.show()\n",
"step-3": "times = np.linspace(0.0, 10.0, 100)\nresult = mesolve(H, psi0, times, [np.sqrt(0.05) * sigmax()], [sigmaz(),\n sigmay()])\nfig, ax = plt.subplots()\nax.plot(times, result.expect[0])\nax.plot(times, result.expect[1])\nax.set_xlabel('Time')\nax.set_ylabel('Expectation values')\nax.legend(('Sigma-Z', 'Sigma-Y'))\nplt.show()\n",
"step-4": "times = np.linspace(0.0, 10.0, 100)\nresult = mesolve(H, psi0, times, [np.sqrt(0.05) * sigmax()], [sigmaz(), sigmay()])\nfig, ax = plt.subplots()\nax.plot(times, result.expect[0]) # doctest: +SKIP\nax.plot(times, result.expect[1]) # doctest: +SKIP\nax.set_xlabel('Time') # doctest: +SKIP\nax.set_ylabel('Expectation values') # doctest: +SKIP\nax.legend((\"Sigma-Z\", \"Sigma-Y\")) # doctest: +SKIP\nplt.show() # doctest: +SKIP\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('account', '0001_initial')]
operations = [migrations.AlterField(model_name='account', name=
'phone_number', field=models.CharField(max_length=15, verbose_name=
'phone number'))]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('account', '0001_initial')]
operations = [migrations.AlterField(model_name='account', name=
'phone_number', field=models.CharField(max_length=15, verbose_name=
'phone number'))]
<|reserved_special_token_1|>
# Generated by Django 2.2.2 on 2019-10-19 14:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='account',
name='phone_number',
field=models.CharField(max_length=15, verbose_name='phone number'),
),
]
|
flexible
|
{
"blob_id": "7d25a8eb61b6fb9069616745c2b68fd3ceeca9fb",
"index": 6600,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('account', '0001_initial')]\n operations = [migrations.AlterField(model_name='account', name=\n 'phone_number', field=models.CharField(max_length=15, verbose_name=\n 'phone number'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('account', '0001_initial')]\n operations = [migrations.AlterField(model_name='account', name=\n 'phone_number', field=models.CharField(max_length=15, verbose_name=\n 'phone number'))]\n",
"step-5": "# Generated by Django 2.2.2 on 2019-10-19 14:09\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('account', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='account',\n name='phone_number',\n field=models.CharField(max_length=15, verbose_name='phone number'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class RedisHandler(Handler):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
async def emit(self, record: ExtendedLogRecord) ->None:
await self.redis_client.rpush(self.key, self.format(record))
async def close(self) ->None:
self.redis_client.close()
await self.redis_client.wait_closed()
@staticmethod
def format(record: ExtendedLogRecord):
o = {'msg': record.get_message(), 'logged_at': record.created,
'line_number': record.lineno, 'file': record.pathname,
'function': record.funcName, 'level': record.levelname,
'module': record.module, 'kwargs': record.args, **record.extra}
return json.dumps(o, ensure_ascii=False)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RedisHandler(Handler):
def __init__(self, redis_client, key=f'{config.APP_NAME}-log', *args,
**kwargs):
super().__init__(*args, **kwargs)
self.key = key
self.redis_client: Redis = redis_client
<|reserved_special_token_0|>
async def emit(self, record: ExtendedLogRecord) ->None:
await self.redis_client.rpush(self.key, self.format(record))
async def close(self) ->None:
self.redis_client.close()
await self.redis_client.wait_closed()
@staticmethod
def format(record: ExtendedLogRecord):
o = {'msg': record.get_message(), 'logged_at': record.created,
'line_number': record.lineno, 'file': record.pathname,
'function': record.funcName, 'level': record.levelname,
'module': record.module, 'kwargs': record.args, **record.extra}
return json.dumps(o, ensure_ascii=False)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RedisHandler(Handler):
def __init__(self, redis_client, key=f'{config.APP_NAME}-log', *args,
**kwargs):
super().__init__(*args, **kwargs)
self.key = key
self.redis_client: Redis = redis_client
@property
def initialized(self):
return not self.redis_client.closed
async def emit(self, record: ExtendedLogRecord) ->None:
await self.redis_client.rpush(self.key, self.format(record))
async def close(self) ->None:
self.redis_client.close()
await self.redis_client.wait_closed()
@staticmethod
def format(record: ExtendedLogRecord):
o = {'msg': record.get_message(), 'logged_at': record.created,
'line_number': record.lineno, 'file': record.pathname,
'function': record.funcName, 'level': record.levelname,
'module': record.module, 'kwargs': record.args, **record.extra}
return json.dumps(o, ensure_ascii=False)
<|reserved_special_token_1|>
import json
from aioredis import Redis
from aiologger.loggers.json import ExtendedLogRecord
from aiologger.handlers.base import Handler
from app.core import config
class RedisHandler(Handler):
def __init__(self, redis_client, key=f'{config.APP_NAME}-log', *args,
**kwargs):
super().__init__(*args, **kwargs)
self.key = key
self.redis_client: Redis = redis_client
@property
def initialized(self):
return not self.redis_client.closed
async def emit(self, record: ExtendedLogRecord) ->None:
await self.redis_client.rpush(self.key, self.format(record))
async def close(self) ->None:
self.redis_client.close()
await self.redis_client.wait_closed()
@staticmethod
def format(record: ExtendedLogRecord):
o = {'msg': record.get_message(), 'logged_at': record.created,
'line_number': record.lineno, 'file': record.pathname,
'function': record.funcName, 'level': record.levelname,
'module': record.module, 'kwargs': record.args, **record.extra}
return json.dumps(o, ensure_ascii=False)
<|reserved_special_token_1|>
import json
from aioredis import Redis
from aiologger.loggers.json import ExtendedLogRecord
from aiologger.handlers.base import Handler
from app.core import config
class RedisHandler(Handler):
def __init__(
self,
redis_client,
key=f"{config.APP_NAME}-log",
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.key = key
self.redis_client: Redis = redis_client
@property
def initialized(self):
return not self.redis_client.closed
async def emit(self, record: ExtendedLogRecord) -> None:
await self.redis_client.rpush(self.key, self.format(record))
async def close(self) -> None:
self.redis_client.close()
await self.redis_client.wait_closed()
@staticmethod
def format(record: ExtendedLogRecord):
o = {
"msg": record.get_message(),
"logged_at": record.created,
"line_number": record.lineno,
"file": record.pathname,
"function": record.funcName,
"level": record.levelname,
"module": record.module,
"kwargs": record.args,
**record.extra,
}
return json.dumps(o, ensure_ascii=False)
|
flexible
|
{
"blob_id": "fe581ca8176fed01309f0d852f72564863aa0895",
"index": 8413,
"step-1": "<mask token>\n\n\nclass RedisHandler(Handler):\n <mask token>\n <mask token>\n\n async def emit(self, record: ExtendedLogRecord) ->None:\n await self.redis_client.rpush(self.key, self.format(record))\n\n async def close(self) ->None:\n self.redis_client.close()\n await self.redis_client.wait_closed()\n\n @staticmethod\n def format(record: ExtendedLogRecord):\n o = {'msg': record.get_message(), 'logged_at': record.created,\n 'line_number': record.lineno, 'file': record.pathname,\n 'function': record.funcName, 'level': record.levelname,\n 'module': record.module, 'kwargs': record.args, **record.extra}\n return json.dumps(o, ensure_ascii=False)\n",
"step-2": "<mask token>\n\n\nclass RedisHandler(Handler):\n\n def __init__(self, redis_client, key=f'{config.APP_NAME}-log', *args,\n **kwargs):\n super().__init__(*args, **kwargs)\n self.key = key\n self.redis_client: Redis = redis_client\n <mask token>\n\n async def emit(self, record: ExtendedLogRecord) ->None:\n await self.redis_client.rpush(self.key, self.format(record))\n\n async def close(self) ->None:\n self.redis_client.close()\n await self.redis_client.wait_closed()\n\n @staticmethod\n def format(record: ExtendedLogRecord):\n o = {'msg': record.get_message(), 'logged_at': record.created,\n 'line_number': record.lineno, 'file': record.pathname,\n 'function': record.funcName, 'level': record.levelname,\n 'module': record.module, 'kwargs': record.args, **record.extra}\n return json.dumps(o, ensure_ascii=False)\n",
"step-3": "<mask token>\n\n\nclass RedisHandler(Handler):\n\n def __init__(self, redis_client, key=f'{config.APP_NAME}-log', *args,\n **kwargs):\n super().__init__(*args, **kwargs)\n self.key = key\n self.redis_client: Redis = redis_client\n\n @property\n def initialized(self):\n return not self.redis_client.closed\n\n async def emit(self, record: ExtendedLogRecord) ->None:\n await self.redis_client.rpush(self.key, self.format(record))\n\n async def close(self) ->None:\n self.redis_client.close()\n await self.redis_client.wait_closed()\n\n @staticmethod\n def format(record: ExtendedLogRecord):\n o = {'msg': record.get_message(), 'logged_at': record.created,\n 'line_number': record.lineno, 'file': record.pathname,\n 'function': record.funcName, 'level': record.levelname,\n 'module': record.module, 'kwargs': record.args, **record.extra}\n return json.dumps(o, ensure_ascii=False)\n",
"step-4": "import json\nfrom aioredis import Redis\nfrom aiologger.loggers.json import ExtendedLogRecord\nfrom aiologger.handlers.base import Handler\nfrom app.core import config\n\n\nclass RedisHandler(Handler):\n\n def __init__(self, redis_client, key=f'{config.APP_NAME}-log', *args,\n **kwargs):\n super().__init__(*args, **kwargs)\n self.key = key\n self.redis_client: Redis = redis_client\n\n @property\n def initialized(self):\n return not self.redis_client.closed\n\n async def emit(self, record: ExtendedLogRecord) ->None:\n await self.redis_client.rpush(self.key, self.format(record))\n\n async def close(self) ->None:\n self.redis_client.close()\n await self.redis_client.wait_closed()\n\n @staticmethod\n def format(record: ExtendedLogRecord):\n o = {'msg': record.get_message(), 'logged_at': record.created,\n 'line_number': record.lineno, 'file': record.pathname,\n 'function': record.funcName, 'level': record.levelname,\n 'module': record.module, 'kwargs': record.args, **record.extra}\n return json.dumps(o, ensure_ascii=False)\n",
"step-5": "import json\n\nfrom aioredis import Redis\nfrom aiologger.loggers.json import ExtendedLogRecord\nfrom aiologger.handlers.base import Handler\n\nfrom app.core import config\n\n\nclass RedisHandler(Handler):\n def __init__(\n self,\n redis_client,\n key=f\"{config.APP_NAME}-log\",\n *args,\n **kwargs,\n ):\n super().__init__(*args, **kwargs)\n self.key = key\n self.redis_client: Redis = redis_client\n\n @property\n def initialized(self):\n return not self.redis_client.closed\n\n async def emit(self, record: ExtendedLogRecord) -> None:\n await self.redis_client.rpush(self.key, self.format(record))\n\n async def close(self) -> None:\n self.redis_client.close()\n await self.redis_client.wait_closed()\n\n @staticmethod\n def format(record: ExtendedLogRecord):\n o = {\n \"msg\": record.get_message(),\n \"logged_at\": record.created,\n \"line_number\": record.lineno,\n \"file\": record.pathname,\n \"function\": record.funcName,\n \"level\": record.levelname,\n \"module\": record.module,\n \"kwargs\": record.args,\n **record.extra,\n }\n return json.dumps(o, ensure_ascii=False)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def create_app(**config_overrides):
app = Flask(__name__)
app.config.from_pyfile('settings.py')
app.config.update(config_overrides)
db.init_app(app)
from user.views import user_app
app.register_blueprint(user_app)
from workflow.views import workflow_app
app.register_blueprint(workflow_app)
return app
<|reserved_special_token_1|>
<|reserved_special_token_0|>
db = MongoEngine()
def create_app(**config_overrides):
app = Flask(__name__)
app.config.from_pyfile('settings.py')
app.config.update(config_overrides)
db.init_app(app)
from user.views import user_app
app.register_blueprint(user_app)
from workflow.views import workflow_app
app.register_blueprint(workflow_app)
return app
<|reserved_special_token_1|>
from flask import Flask
from flask_mongoengine import MongoEngine
db = MongoEngine()
def create_app(**config_overrides):
app = Flask(__name__)
app.config.from_pyfile('settings.py')
app.config.update(config_overrides)
db.init_app(app)
from user.views import user_app
app.register_blueprint(user_app)
from workflow.views import workflow_app
app.register_blueprint(workflow_app)
return app
|
flexible
|
{
"blob_id": "8b7fb0789d197e50d7bdde2791b6fac964782469",
"index": 4001,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_app(**config_overrides):\n app = Flask(__name__)\n app.config.from_pyfile('settings.py')\n app.config.update(config_overrides)\n db.init_app(app)\n from user.views import user_app\n app.register_blueprint(user_app)\n from workflow.views import workflow_app\n app.register_blueprint(workflow_app)\n return app\n",
"step-3": "<mask token>\ndb = MongoEngine()\n\n\ndef create_app(**config_overrides):\n app = Flask(__name__)\n app.config.from_pyfile('settings.py')\n app.config.update(config_overrides)\n db.init_app(app)\n from user.views import user_app\n app.register_blueprint(user_app)\n from workflow.views import workflow_app\n app.register_blueprint(workflow_app)\n return app\n",
"step-4": "from flask import Flask\nfrom flask_mongoengine import MongoEngine\ndb = MongoEngine()\n\n\ndef create_app(**config_overrides):\n app = Flask(__name__)\n app.config.from_pyfile('settings.py')\n app.config.update(config_overrides)\n db.init_app(app)\n from user.views import user_app\n app.register_blueprint(user_app)\n from workflow.views import workflow_app\n app.register_blueprint(workflow_app)\n return app\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Exercise 3: Write a program to prompt for a score between 0.0 and 1.0. If the score is out of range, print an error message.
# If the score is between 0.0 and 1.0, print a grade using the following table:
# Score Grade
# >= 0.9 A
# >= 0.8 B
# >= 0.7 C
# >= 0.6 D
# < 0.6 F
# Vinayak Nayak
# 27th December 2018
# 12:30 pm
try:
i = float(input("Enter the score : "))
if(i > 1 or i < 0):
print("Entered score isn't valid.")
else:
if (i < 0.6):
print("Grade: F")
elif (i < 0.7):
print("Grade: D")
elif (i < 0.8):
print("Grade: C")
elif (i < 0.9):
print("Grade: B")
elif (i <= 1.0):
print("Grade: A")
except Exception as e:
print(str(e))
|
normal
|
{
"blob_id": "6f253da5dc1caa504a3a8aadae7bce6537b5c8c6",
"index": 6237,
"step-1": "<mask token>\n",
"step-2": "try:\n i = float(input('Enter the score : '))\n if i > 1 or i < 0:\n print(\"Entered score isn't valid.\")\n elif i < 0.6:\n print('Grade: F')\n elif i < 0.7:\n print('Grade: D')\n elif i < 0.8:\n print('Grade: C')\n elif i < 0.9:\n print('Grade: B')\n elif i <= 1.0:\n print('Grade: A')\nexcept Exception as e:\n print(str(e))\n",
"step-3": "# Exercise 3: Write a program to prompt for a score between 0.0 and 1.0. If the score is out of range, print an error message.\n# If the score is between 0.0 and 1.0, print a grade using the following table:\n# Score Grade\n# >= 0.9 A\n# >= 0.8 B\n# >= 0.7 C\n# >= 0.6 D\n# < 0.6 F\n\n# Vinayak Nayak\n# 27th December 2018\n# 12:30 pm\n\ntry:\n i = float(input(\"Enter the score : \"))\n\n if(i > 1 or i < 0):\n print(\"Entered score isn't valid.\")\n else:\n if (i < 0.6):\n print(\"Grade: F\")\n\n elif (i < 0.7):\n print(\"Grade: D\")\n\n elif (i < 0.8):\n print(\"Grade: C\")\n\n elif (i < 0.9):\n print(\"Grade: B\")\n\n elif (i <= 1.0):\n print(\"Grade: A\")\n\nexcept Exception as e:\n print(str(e))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/')
def index():
headline = 'Hello world from a variable!'
return render_template('index.html', headline=headline)
@app.route('/bye/')
def bye():
headline = 'Goodbye!'
return render_template('index.html', headline=headline)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
@app.route('/')
def index():
headline = 'Hello world from a variable!'
return render_template('index.html', headline=headline)
@app.route('/bye/')
def bye():
headline = 'Goodbye!'
return render_template('index.html', headline=headline)
<|reserved_special_token_1|>
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
headline = 'Hello world from a variable!'
return render_template('index.html', headline=headline)
@app.route('/bye/')
def bye():
headline = 'Goodbye!'
return render_template('index.html', headline=headline)
<|reserved_special_token_1|>
from flask import Flask, render_template
app = Flask(__name__)
@app.route("/")
def index():
headline = "Hello world from a variable!"
# headline de la izq es el nombre de la variable en la vista
# headline de la der es el nombre de la variable en el server
return render_template("index.html", headline=headline)
# Ahora usamos el mismo idex.html pero con un contenido distinto!
@app.route("/bye/")
def bye():
headline = "Goodbye!"
return render_template("index.html", headline=headline)
|
flexible
|
{
"blob_id": "83bbb6433d1577be869bf840bdd42aa86e415da6",
"index": 9328,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/')\ndef index():\n headline = 'Hello world from a variable!'\n return render_template('index.html', headline=headline)\n\n\n@app.route('/bye/')\ndef bye():\n headline = 'Goodbye!'\n return render_template('index.html', headline=headline)\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n headline = 'Hello world from a variable!'\n return render_template('index.html', headline=headline)\n\n\n@app.route('/bye/')\ndef bye():\n headline = 'Goodbye!'\n return render_template('index.html', headline=headline)\n",
"step-4": "from flask import Flask, render_template\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n headline = 'Hello world from a variable!'\n return render_template('index.html', headline=headline)\n\n\n@app.route('/bye/')\ndef bye():\n headline = 'Goodbye!'\n return render_template('index.html', headline=headline)\n",
"step-5": "from flask import Flask, render_template\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef index():\n headline = \"Hello world from a variable!\"\n # headline de la izq es el nombre de la variable en la vista\n # headline de la der es el nombre de la variable en el server\n return render_template(\"index.html\", headline=headline)\n\n# Ahora usamos el mismo idex.html pero con un contenido distinto!\n@app.route(\"/bye/\")\ndef bye():\n headline = \"Goodbye!\"\n return render_template(\"index.html\", headline=headline)",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
from django.contrib.staticfiles.storage import CachedFilesMixin
from storages.backends.s3boto3 import S3Boto3Storage
class CachedS3Storage(CachedFilesMixin, S3Boto3Storage):
pass
StaticRootS3BotoStorage = lambda : CachedS3Storage(location='static')
MediaRootS3BotoStorage = lambda : S3Boto3Storage(location='media')
|
normal
|
{
"blob_id": "e99ff1c75d5108efc8d587d4533c34eeb15c6978",
"index": 9425,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass CachedS3Storage(CachedFilesMixin, S3Boto3Storage):\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass CachedS3Storage(CachedFilesMixin, S3Boto3Storage):\n pass\n\n\nStaticRootS3BotoStorage = lambda : CachedS3Storage(location='static')\nMediaRootS3BotoStorage = lambda : S3Boto3Storage(location='media')\n",
"step-4": "from django.contrib.staticfiles.storage import CachedFilesMixin\nfrom storages.backends.s3boto3 import S3Boto3Storage\n\n\nclass CachedS3Storage(CachedFilesMixin, S3Boto3Storage):\n pass\n\n\nStaticRootS3BotoStorage = lambda : CachedS3Storage(location='static')\nMediaRootS3BotoStorage = lambda : S3Boto3Storage(location='media')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def calculate_psf_tilts():
"""
Calculate the tilt of the psf at the center of each column
using all binned pixels in the given wavelength calibration file
for both orders and save to file
"""
for order in [1, 2]:
path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)
psf_file = resource_filename('awesimsoss', path)
subarray = 'SUBSTRIP256'
X = range(2048)
Y = range(256)
wave_map = utils.wave_solutions(subarray, order).astype(float)
coeffs = trace_polynomials(subarray=subarray, order=order)
trace = np.polyval(coeffs, X)
wave = interp2d(X, Y, wave_map)
trace_wave = []
for x, y in zip(X, trace):
trace_wave.append(wave(x, y)[0])
angles = []
for n, x in enumerate(X):
w = trace_wave[x]
try:
w0 = trace_wave[x - 1]
except IndexError:
w0 = 0
try:
w1 = trace_wave[x + 1]
except IndexError:
w1 = 10
dw0 = np.mean([w0, w])
dw1 = np.mean([w1, w])
yy, xx = np.where(np.logical_and(wave_map >= dw0, wave_map < dw1))
if len(xx) >= 1:
angle = get_angle([xx[-1], yy[-1]], [x, trace[x]])
else:
angle = 0
angle = angle % 180
angles.append(angle)
np.save(psf_file, np.array(angles))
print('Angles saved to', psf_file)
def nuke_psfs(tilts=True, raw=True, final=True):
"""Generate all the psf cubes from scratch"""
if tilts:
calculate_psf_tilts()
for filt in ['CLEAR', 'F277W']:
if raw:
generate_SOSS_psfs(filt)
if final:
SOSS_psf_cube(filt=filt, generate=True)
<|reserved_special_token_0|>
def get_SOSS_psf(wavelength, filt='CLEAR', psfs=None, cutoff=0.005, plot=False
):
"""
Retrieve the SOSS psf for the given wavelength,
scale the total flux to 1, and set pixels below
cutoff value to zero
Parameters
----------
wavelength: float
The wavelength to retrieve [um]
filt: str
The filter to use, ['CLEAR', 'F277W']
psfs: numpy.interp1d object (optional)
The interpolator
plot: bool
Plot the psf
Returns
-------
np.ndarray
The 2D psf for the input wavelength
"""
if psfs is None:
file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.
format(filt))
cube = fits.getdata(file).swapaxes(-1, -2)
wave = fits.getdata(file, ext=1)
psfs = interp1d(wave, cube, axis=0, kind=3)
if wavelength < psfs.x[0]:
wavelength = psfs.x[0]
if wavelength > psfs.x[-1]:
wavelength = psfs.x[-1]
psf = psfs(wavelength)
psf *= 1.0 / np.sum(psf)
if plot:
fig = figure()
fig.image([psf], x=0, y=0, dw=psf.shape[0], dh=psf.shape[1])
show(fig)
else:
return psf
<|reserved_special_token_0|>
def psf_lightcurve(psf, ld_coeffs, rp, time, tmodel, plot=False):
"""
Generate a lightcurve for a (76, 76) psf of a given wavelength
Parameters
----------
psf: sequencs
The flux-scaled psf for the given wavelength
ld_coeffs: sequence
The limb darkening coefficients to use
rp: float
The planet radius
time: sequence
The time axis for the TSO
tmodel: batman.transitmodel.TransitModel
The transit model of the planet
plot: bool
Plot the lightcurve
Returns
-------
sequence
A 1D array of the lightcurve with the same length as *t*
Example 1
---------
# No planet
import numpy as np
from awesimsoss.make_trace import psf_lightcurve
psf = np.ones((76, 76))
time = np.linspace(-0.2, 0.2, 200)
lc = psf_lightcurve(psf, None, None, time, None, plot=True)
Example 2
---------
# With a planet
import batman
import numpy as np
import astropy.units as q
from awesimsoss.make_trace import psf_lightcurve
params = batman.TransitParams()
params.t0 = 0. # time of inferior conjunction
params.per = 5.7214742 # orbital period (days)
params.a = 0.0558*q.AU.to(q.R_sun)*0.66 # semi-major axis (in units of stellar radii)
params.inc = 89.8 # orbital inclination (in degrees)
params.ecc = 0. # eccentricity
params.w = 90. # longitude of periastron (in degrees)
params.teff = 3500 # effective temperature of the host star
params.logg = 5 # log surface gravity of the host star
params.feh = 0 # metallicity of the host star
params.limb_dark = 'quadratic' # limb darkening profile to use
params.u = [1, 1] # limb darkening coefficients
tmodel = batman.TransitModel(params, time)
lc = psf_lightcurve(psf, [0.1, 0.1], 0.05, time, tmodel, plot=True)
"""
flux = np.tile(psf, (len(time), 1, 1))
if ld_coeffs is not None and rp is not None and str(type(tmodel)
) == "<class 'batman.transitmodel.TransitModel'>":
tmodel.u = ld_coeffs
tmodel.rp = rp
lightcurve = tmodel.light_curve(tmodel)
flux *= lightcurve[:, None, None]
return flux
def psf_tilts(order):
"""
Get the psf tilts for the given order
Parameters
----------
order: int
The order to use, [1, 2]
Returns
-------
np.ndarray
The angle from the vertical of the psf in each of the 2048 columns
"""
if order not in [1, 2]:
raise ValueError('Only orders 1 and 2 are supported.')
path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)
psf_file = resource_filename('awesimsoss', path)
if not os.path.exists(psf_file):
calculate_psf_tilts()
return np.load(psf_file)
def put_psf_on_subarray(psf, y, frame_height=256):
"""Make a 2D SOSS trace from a sequence of psfs and trace center locations
Parameters
----------
psf: sequence
The 2D psf
y: float
The grid y value to place the center of the psf
grid: sequence
The [x, y] grid ranges
Returns
-------
np.ndarray
The 2D frame with the interpolated psf
"""
dim = psf.shape[0]
mid = (dim - 1.0) / 2.0
arr = np.arange(dim, dtype=np.float)
spline = RectBivariateSpline(arr, arr, psf.T, kx=3, ky=3, s=0)
yg, xg = np.indices((frame_height, dim), dtype=np.float64)
yg += mid - y
frame = spline.ev(xg, yg)
extrapol = (xg < -0.5) | (xg >= dim - 0.5) | ((yg < -0.5) | (yg >= dim -
0.5))
frame[extrapol] = 0
return frame
def SOSS_psf_cube(filt='CLEAR', order=1, subarray='SUBSTRIP256', generate=False
):
"""
Generate/retrieve a data cube of shape (3, 2048, 76, 76) which is a
76x76 pixel psf for 2048 wavelengths for each trace order. The PSFs
are scaled to unity and rotated to reproduce the trace tilt at each
wavelength then placed on the desired subarray.
Parameters
----------
filt: str
The filter to use, ['CLEAR', 'F277W']
order: int
The trace order
subarray: str
The subarray to use, ['SUBSTRIP96', 'SUBSTRIP256', 'FULL']
generate: bool
Generate a new cube
Returns
-------
np.ndarray
An array of the SOSS psf at 2048 wavelengths for each order
"""
if generate:
print('Coffee time! This takes about 5 minutes.')
wavelengths = np.mean(utils.wave_solutions(subarray), axis=1)[:2 if
filt == 'CLEAR' else 1]
coeffs = trace_polynomials(subarray)
psf_path = 'files/SOSS_{}_PSF.fits'.format(filt)
psf_file = resource_filename('awesimsoss', psf_path)
cube = fits.getdata(psf_file).swapaxes(-1, -2)
wave = fits.getdata(psf_file, ext=1)
psfs = interp1d(wave, cube, axis=0, kind=3)
trace_cols = np.arange(2048)
for n, wavelength in enumerate(wavelengths):
trace_centers = np.polyval(coeffs[n], trace_cols)
if n == 1 and filt.lower() == 'f277w' or n == 2:
pass
else:
print('Calculating order {} SOSS psfs for {} filter...'.
format(n + 1, filt))
start = time.time()
pool = multiprocessing.Pool(8)
func = partial(get_SOSS_psf, filt=filt, psfs=psfs)
raw_psfs = np.array(pool.map(func, wavelength))
pool.close()
pool.join()
del pool
print('Finished in {} seconds.'.format(time.time() - start))
angles = psf_tilts(order)
print('Rotating order {} SOSS psfs for {} filter...'.format
(n + 1, filt))
start = time.time()
pool = multiprocessing.Pool(8)
func = partial(rotate, reshape=False)
rotated_psfs = np.array(pool.starmap(func, zip(raw_psfs,
angles)))
pool.close()
pool.join()
del pool
print('Finished in {} seconds.'.format(time.time() - start))
rotated_psfs = np.abs(rotated_psfs)
scale = np.nansum(rotated_psfs, axis=(1, 2))[:, None, None]
rotated_psfs = rotated_psfs / scale
chunks = rotated_psfs.reshape(4, 512, 76, 76)
for N, chunk in enumerate(chunks):
idx0 = N * 512
idx1 = idx0 + 512
centers = trace_centers[idx0:idx1]
print(
'Interpolating chunk {}/4 for order {} SOSS psfs for {} filter onto subarray...'
.format(N + 1, n + 1, filt))
start = time.time()
pool = multiprocessing.Pool(8)
data = zip(chunk, centers)
subarray_psfs = pool.starmap(put_psf_on_subarray, data)
pool.close()
pool.join()
del pool
print('Finished in {} seconds.'.format(time.time() - start)
)
filename = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt,
n + 1, N + 1)
file = resource_filename('awesimsoss', filename)
if os.path.isfile(file):
os.system('rm {}'.format(file))
np.save(file, np.array(subarray_psfs))
print('Data saved to', file)
else:
full_data = []
for chunk in [1, 2, 3, 4]:
path = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt, order, chunk
)
file = resource_filename('awesimsoss', path)
full_data.append(np.load(file))
return np.concatenate(full_data, axis=0)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def calculate_psf_tilts():
"""
Calculate the tilt of the psf at the center of each column
using all binned pixels in the given wavelength calibration file
for both orders and save to file
"""
for order in [1, 2]:
path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)
psf_file = resource_filename('awesimsoss', path)
subarray = 'SUBSTRIP256'
X = range(2048)
Y = range(256)
wave_map = utils.wave_solutions(subarray, order).astype(float)
coeffs = trace_polynomials(subarray=subarray, order=order)
trace = np.polyval(coeffs, X)
wave = interp2d(X, Y, wave_map)
trace_wave = []
for x, y in zip(X, trace):
trace_wave.append(wave(x, y)[0])
angles = []
for n, x in enumerate(X):
w = trace_wave[x]
try:
w0 = trace_wave[x - 1]
except IndexError:
w0 = 0
try:
w1 = trace_wave[x + 1]
except IndexError:
w1 = 10
dw0 = np.mean([w0, w])
dw1 = np.mean([w1, w])
yy, xx = np.where(np.logical_and(wave_map >= dw0, wave_map < dw1))
if len(xx) >= 1:
angle = get_angle([xx[-1], yy[-1]], [x, trace[x]])
else:
angle = 0
angle = angle % 180
angles.append(angle)
np.save(psf_file, np.array(angles))
print('Angles saved to', psf_file)
def nuke_psfs(tilts=True, raw=True, final=True):
"""Generate all the psf cubes from scratch"""
if tilts:
calculate_psf_tilts()
for filt in ['CLEAR', 'F277W']:
if raw:
generate_SOSS_psfs(filt)
if final:
SOSS_psf_cube(filt=filt, generate=True)
def generate_SOSS_ldcs(wavelengths, ld_profile, grid_point, model_grid='',
subarray='SUBSTRIP256', n_bins=100, plot=False, save=''):
"""
Generate a lookup table of limb darkening coefficients for full
SOSS wavelength range
Parameters
----------
wavelengths: sequence
The wavelengths at which to calculate the LDCs
ld_profile: str
A limb darkening profile name supported by
`ExoCTK.ldc.ldcfit.ld_profile()`
grid_point: dict, sequence
The stellar parameters [Teff, logg, FeH] or stellar model
dictionary from `ExoCTK.modelgrid.ModelGrid.get()`
n_bins: int
The number of bins to break up the grism into
save: str
The path to save to file to
Example
-------
from awesimsoss.sim2D import awesim
lookup = awesim.soss_ldc('quadratic', [3300, 4.5, 0])
"""
try:
from exoctk import modelgrid
from exoctk.limb_darkening import limb_darkening_fit as lf
except ImportError:
return
if not isinstance(model_grid, modelgrid.ModelGrid):
model_grid = modelgrid.ModelGrid(os.environ['MODELGRID_DIR'],
resolution=700)
model_grid = modelgrid.ModelGrid(os.environ['MODELGRID_DIR'],
resolution=700, wave_rng=(0.6, 2.8))
if isinstance(grid_point, (list, tuple, np.ndarray)):
grid_point = model_grid.get(*grid_point)
if not isinstance(grid_point, dict):
print(
'Please provide the grid_point argument as [Teff, logg, FeH] or ExoCTK.modelgrid.ModelGrid.get(Teff, logg, FeH).'
)
return
bandpass = svo.Filter('NIRISS.GR700XD', n_bins=n_bins, verbose=False)
ldc_results = lf.ldc(None, None, None, model_grid, [ld_profile],
bandpass=bandpass, grid_point=grid_point.copy(), mu_min=0.08,
verbose=False)
coeff_table = ldc_results[ld_profile]['coeffs']
coeff_cols = [c for c in coeff_table.colnames if c.startswith('c')]
coeffs = [np.interp(wavelengths, coeff_table['wavelength'], coeff_table
[c]) for c in coeff_cols]
return np.array(coeffs).T
def generate_SOSS_psfs(filt):
"""
Gnerate a cube of the psf at 100 wavelengths from the min to the max wavelength
Parameters
----------
filt: str
The filter to use, ['CLEAR', 'F277W']
"""
file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.format(
filt))
ns = webbpsf.NIRISS()
ns.filter = filt
ns.pupil_mask = 'GR700XD'
wavelengths = utils.wave_solutions('SUBSTRIP256').flatten()
wave_min = np.max([ns.SHORT_WAVELENGTH_MIN * 1000000.0, np.min(
wavelengths[wavelengths > 0])])
wave_max = np.min([ns.LONG_WAVELENGTH_MAX * 1000000.0, np.max(
wavelengths[wavelengths > 0])])
W = np.linspace(wave_min, wave_max, 100) * 1e-06
print('Generating SOSS psfs. This takes about 8 minutes...')
start = time.time()
PSF = ns.calc_datacube(W, oversample=1)[0].data
print('Finished in', time.time() - start)
psfhdu = fits.PrimaryHDU(data=PSF)
wavhdu = fits.ImageHDU(data=W * 1000000.0, name='WAV')
hdulist = fits.HDUList([psfhdu, wavhdu])
hdulist.writeto(file, overwrite=True)
hdulist.close()
<|reserved_special_token_0|>
def get_SOSS_psf(wavelength, filt='CLEAR', psfs=None, cutoff=0.005, plot=False
):
"""
Retrieve the SOSS psf for the given wavelength,
scale the total flux to 1, and set pixels below
cutoff value to zero
Parameters
----------
wavelength: float
The wavelength to retrieve [um]
filt: str
The filter to use, ['CLEAR', 'F277W']
psfs: numpy.interp1d object (optional)
The interpolator
plot: bool
Plot the psf
Returns
-------
np.ndarray
The 2D psf for the input wavelength
"""
if psfs is None:
file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.
format(filt))
cube = fits.getdata(file).swapaxes(-1, -2)
wave = fits.getdata(file, ext=1)
psfs = interp1d(wave, cube, axis=0, kind=3)
if wavelength < psfs.x[0]:
wavelength = psfs.x[0]
if wavelength > psfs.x[-1]:
wavelength = psfs.x[-1]
psf = psfs(wavelength)
psf *= 1.0 / np.sum(psf)
if plot:
fig = figure()
fig.image([psf], x=0, y=0, dw=psf.shape[0], dh=psf.shape[1])
show(fig)
else:
return psf
def make_frame(psfs):
"""
Generate a frame from an array of psfs
Parameters
----------
psfs: sequence
An array of psfs of shape (2048, 76, 76)
Returns
-------
np.ndarray
An array of the SOSS psf at 2048 wavelengths for each order
"""
frame = np.zeros((256, 2124))
for n, psf in enumerate(psfs):
frame[:, n:n + 76] += psf
return frame[:, 38:-38]
def psf_lightcurve(psf, ld_coeffs, rp, time, tmodel, plot=False):
"""
Generate a lightcurve for a (76, 76) psf of a given wavelength
Parameters
----------
psf: sequencs
The flux-scaled psf for the given wavelength
ld_coeffs: sequence
The limb darkening coefficients to use
rp: float
The planet radius
time: sequence
The time axis for the TSO
tmodel: batman.transitmodel.TransitModel
The transit model of the planet
plot: bool
Plot the lightcurve
Returns
-------
sequence
A 1D array of the lightcurve with the same length as *t*
Example 1
---------
# No planet
import numpy as np
from awesimsoss.make_trace import psf_lightcurve
psf = np.ones((76, 76))
time = np.linspace(-0.2, 0.2, 200)
lc = psf_lightcurve(psf, None, None, time, None, plot=True)
Example 2
---------
# With a planet
import batman
import numpy as np
import astropy.units as q
from awesimsoss.make_trace import psf_lightcurve
params = batman.TransitParams()
params.t0 = 0. # time of inferior conjunction
params.per = 5.7214742 # orbital period (days)
params.a = 0.0558*q.AU.to(q.R_sun)*0.66 # semi-major axis (in units of stellar radii)
params.inc = 89.8 # orbital inclination (in degrees)
params.ecc = 0. # eccentricity
params.w = 90. # longitude of periastron (in degrees)
params.teff = 3500 # effective temperature of the host star
params.logg = 5 # log surface gravity of the host star
params.feh = 0 # metallicity of the host star
params.limb_dark = 'quadratic' # limb darkening profile to use
params.u = [1, 1] # limb darkening coefficients
tmodel = batman.TransitModel(params, time)
lc = psf_lightcurve(psf, [0.1, 0.1], 0.05, time, tmodel, plot=True)
"""
flux = np.tile(psf, (len(time), 1, 1))
if ld_coeffs is not None and rp is not None and str(type(tmodel)
) == "<class 'batman.transitmodel.TransitModel'>":
tmodel.u = ld_coeffs
tmodel.rp = rp
lightcurve = tmodel.light_curve(tmodel)
flux *= lightcurve[:, None, None]
return flux
def psf_tilts(order):
"""
Get the psf tilts for the given order
Parameters
----------
order: int
The order to use, [1, 2]
Returns
-------
np.ndarray
The angle from the vertical of the psf in each of the 2048 columns
"""
if order not in [1, 2]:
raise ValueError('Only orders 1 and 2 are supported.')
path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)
psf_file = resource_filename('awesimsoss', path)
if not os.path.exists(psf_file):
calculate_psf_tilts()
return np.load(psf_file)
def put_psf_on_subarray(psf, y, frame_height=256):
"""Make a 2D SOSS trace from a sequence of psfs and trace center locations
Parameters
----------
psf: sequence
The 2D psf
y: float
The grid y value to place the center of the psf
grid: sequence
The [x, y] grid ranges
Returns
-------
np.ndarray
The 2D frame with the interpolated psf
"""
dim = psf.shape[0]
mid = (dim - 1.0) / 2.0
arr = np.arange(dim, dtype=np.float)
spline = RectBivariateSpline(arr, arr, psf.T, kx=3, ky=3, s=0)
yg, xg = np.indices((frame_height, dim), dtype=np.float64)
yg += mid - y
frame = spline.ev(xg, yg)
extrapol = (xg < -0.5) | (xg >= dim - 0.5) | ((yg < -0.5) | (yg >= dim -
0.5))
frame[extrapol] = 0
return frame
def SOSS_psf_cube(filt='CLEAR', order=1, subarray='SUBSTRIP256', generate=False
):
"""
Generate/retrieve a data cube of shape (3, 2048, 76, 76) which is a
76x76 pixel psf for 2048 wavelengths for each trace order. The PSFs
are scaled to unity and rotated to reproduce the trace tilt at each
wavelength then placed on the desired subarray.
Parameters
----------
filt: str
The filter to use, ['CLEAR', 'F277W']
order: int
The trace order
subarray: str
The subarray to use, ['SUBSTRIP96', 'SUBSTRIP256', 'FULL']
generate: bool
Generate a new cube
Returns
-------
np.ndarray
An array of the SOSS psf at 2048 wavelengths for each order
"""
if generate:
print('Coffee time! This takes about 5 minutes.')
wavelengths = np.mean(utils.wave_solutions(subarray), axis=1)[:2 if
filt == 'CLEAR' else 1]
coeffs = trace_polynomials(subarray)
psf_path = 'files/SOSS_{}_PSF.fits'.format(filt)
psf_file = resource_filename('awesimsoss', psf_path)
cube = fits.getdata(psf_file).swapaxes(-1, -2)
wave = fits.getdata(psf_file, ext=1)
psfs = interp1d(wave, cube, axis=0, kind=3)
trace_cols = np.arange(2048)
for n, wavelength in enumerate(wavelengths):
trace_centers = np.polyval(coeffs[n], trace_cols)
if n == 1 and filt.lower() == 'f277w' or n == 2:
pass
else:
print('Calculating order {} SOSS psfs for {} filter...'.
format(n + 1, filt))
start = time.time()
pool = multiprocessing.Pool(8)
func = partial(get_SOSS_psf, filt=filt, psfs=psfs)
raw_psfs = np.array(pool.map(func, wavelength))
pool.close()
pool.join()
del pool
print('Finished in {} seconds.'.format(time.time() - start))
angles = psf_tilts(order)
print('Rotating order {} SOSS psfs for {} filter...'.format
(n + 1, filt))
start = time.time()
pool = multiprocessing.Pool(8)
func = partial(rotate, reshape=False)
rotated_psfs = np.array(pool.starmap(func, zip(raw_psfs,
angles)))
pool.close()
pool.join()
del pool
print('Finished in {} seconds.'.format(time.time() - start))
rotated_psfs = np.abs(rotated_psfs)
scale = np.nansum(rotated_psfs, axis=(1, 2))[:, None, None]
rotated_psfs = rotated_psfs / scale
chunks = rotated_psfs.reshape(4, 512, 76, 76)
for N, chunk in enumerate(chunks):
idx0 = N * 512
idx1 = idx0 + 512
centers = trace_centers[idx0:idx1]
print(
'Interpolating chunk {}/4 for order {} SOSS psfs for {} filter onto subarray...'
.format(N + 1, n + 1, filt))
start = time.time()
pool = multiprocessing.Pool(8)
data = zip(chunk, centers)
subarray_psfs = pool.starmap(put_psf_on_subarray, data)
pool.close()
pool.join()
del pool
print('Finished in {} seconds.'.format(time.time() - start)
)
filename = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt,
n + 1, N + 1)
file = resource_filename('awesimsoss', filename)
if os.path.isfile(file):
os.system('rm {}'.format(file))
np.save(file, np.array(subarray_psfs))
print('Data saved to', file)
else:
full_data = []
for chunk in [1, 2, 3, 4]:
path = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt, order, chunk
)
file = resource_filename('awesimsoss', path)
full_data.append(np.load(file))
return np.concatenate(full_data, axis=0)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def calculate_psf_tilts():
"""
Calculate the tilt of the psf at the center of each column
using all binned pixels in the given wavelength calibration file
for both orders and save to file
"""
for order in [1, 2]:
path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)
psf_file = resource_filename('awesimsoss', path)
subarray = 'SUBSTRIP256'
X = range(2048)
Y = range(256)
wave_map = utils.wave_solutions(subarray, order).astype(float)
coeffs = trace_polynomials(subarray=subarray, order=order)
trace = np.polyval(coeffs, X)
wave = interp2d(X, Y, wave_map)
trace_wave = []
for x, y in zip(X, trace):
trace_wave.append(wave(x, y)[0])
angles = []
for n, x in enumerate(X):
w = trace_wave[x]
try:
w0 = trace_wave[x - 1]
except IndexError:
w0 = 0
try:
w1 = trace_wave[x + 1]
except IndexError:
w1 = 10
dw0 = np.mean([w0, w])
dw1 = np.mean([w1, w])
yy, xx = np.where(np.logical_and(wave_map >= dw0, wave_map < dw1))
if len(xx) >= 1:
angle = get_angle([xx[-1], yy[-1]], [x, trace[x]])
else:
angle = 0
angle = angle % 180
angles.append(angle)
np.save(psf_file, np.array(angles))
print('Angles saved to', psf_file)
def nuke_psfs(tilts=True, raw=True, final=True):
"""Generate all the psf cubes from scratch"""
if tilts:
calculate_psf_tilts()
for filt in ['CLEAR', 'F277W']:
if raw:
generate_SOSS_psfs(filt)
if final:
SOSS_psf_cube(filt=filt, generate=True)
def generate_SOSS_ldcs(wavelengths, ld_profile, grid_point, model_grid='',
subarray='SUBSTRIP256', n_bins=100, plot=False, save=''):
"""
Generate a lookup table of limb darkening coefficients for full
SOSS wavelength range
Parameters
----------
wavelengths: sequence
The wavelengths at which to calculate the LDCs
ld_profile: str
A limb darkening profile name supported by
`ExoCTK.ldc.ldcfit.ld_profile()`
grid_point: dict, sequence
The stellar parameters [Teff, logg, FeH] or stellar model
dictionary from `ExoCTK.modelgrid.ModelGrid.get()`
n_bins: int
The number of bins to break up the grism into
save: str
The path to save to file to
Example
-------
from awesimsoss.sim2D import awesim
lookup = awesim.soss_ldc('quadratic', [3300, 4.5, 0])
"""
try:
from exoctk import modelgrid
from exoctk.limb_darkening import limb_darkening_fit as lf
except ImportError:
return
if not isinstance(model_grid, modelgrid.ModelGrid):
model_grid = modelgrid.ModelGrid(os.environ['MODELGRID_DIR'],
resolution=700)
model_grid = modelgrid.ModelGrid(os.environ['MODELGRID_DIR'],
resolution=700, wave_rng=(0.6, 2.8))
if isinstance(grid_point, (list, tuple, np.ndarray)):
grid_point = model_grid.get(*grid_point)
if not isinstance(grid_point, dict):
print(
'Please provide the grid_point argument as [Teff, logg, FeH] or ExoCTK.modelgrid.ModelGrid.get(Teff, logg, FeH).'
)
return
bandpass = svo.Filter('NIRISS.GR700XD', n_bins=n_bins, verbose=False)
ldc_results = lf.ldc(None, None, None, model_grid, [ld_profile],
bandpass=bandpass, grid_point=grid_point.copy(), mu_min=0.08,
verbose=False)
coeff_table = ldc_results[ld_profile]['coeffs']
coeff_cols = [c for c in coeff_table.colnames if c.startswith('c')]
coeffs = [np.interp(wavelengths, coeff_table['wavelength'], coeff_table
[c]) for c in coeff_cols]
return np.array(coeffs).T
def generate_SOSS_psfs(filt):
"""
Gnerate a cube of the psf at 100 wavelengths from the min to the max wavelength
Parameters
----------
filt: str
The filter to use, ['CLEAR', 'F277W']
"""
file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.format(
filt))
ns = webbpsf.NIRISS()
ns.filter = filt
ns.pupil_mask = 'GR700XD'
wavelengths = utils.wave_solutions('SUBSTRIP256').flatten()
wave_min = np.max([ns.SHORT_WAVELENGTH_MIN * 1000000.0, np.min(
wavelengths[wavelengths > 0])])
wave_max = np.min([ns.LONG_WAVELENGTH_MAX * 1000000.0, np.max(
wavelengths[wavelengths > 0])])
W = np.linspace(wave_min, wave_max, 100) * 1e-06
print('Generating SOSS psfs. This takes about 8 minutes...')
start = time.time()
PSF = ns.calc_datacube(W, oversample=1)[0].data
print('Finished in', time.time() - start)
psfhdu = fits.PrimaryHDU(data=PSF)
wavhdu = fits.ImageHDU(data=W * 1000000.0, name='WAV')
hdulist = fits.HDUList([psfhdu, wavhdu])
hdulist.writeto(file, overwrite=True)
hdulist.close()
def get_angle(pf, p0=np.array([0, 0]), pi=None):
"""Compute angle (in degrees) for pf-p0-pi corner
Parameters
----------
pf: sequence
The coordinates of a point on the rotated vector
p0: sequence
The coordinates of the pivot
pi: sequence
The coordinates of the fixed vector
Returns
-------
float
The angle in degrees
"""
if pi is None:
pi = p0 + np.array([0, 1])
v0 = np.array(pf) - np.array(p0)
v1 = np.array(pi) - np.array(p0)
angle = np.math.atan2(np.linalg.det([v0, v1]), np.dot(v0, v1))
angle = np.degrees(angle)
return angle
def get_SOSS_psf(wavelength, filt='CLEAR', psfs=None, cutoff=0.005, plot=False
):
"""
Retrieve the SOSS psf for the given wavelength,
scale the total flux to 1, and set pixels below
cutoff value to zero
Parameters
----------
wavelength: float
The wavelength to retrieve [um]
filt: str
The filter to use, ['CLEAR', 'F277W']
psfs: numpy.interp1d object (optional)
The interpolator
plot: bool
Plot the psf
Returns
-------
np.ndarray
The 2D psf for the input wavelength
"""
if psfs is None:
file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.
format(filt))
cube = fits.getdata(file).swapaxes(-1, -2)
wave = fits.getdata(file, ext=1)
psfs = interp1d(wave, cube, axis=0, kind=3)
if wavelength < psfs.x[0]:
wavelength = psfs.x[0]
if wavelength > psfs.x[-1]:
wavelength = psfs.x[-1]
psf = psfs(wavelength)
psf *= 1.0 / np.sum(psf)
if plot:
fig = figure()
fig.image([psf], x=0, y=0, dw=psf.shape[0], dh=psf.shape[1])
show(fig)
else:
return psf
def make_frame(psfs):
"""
Generate a frame from an array of psfs
Parameters
----------
psfs: sequence
An array of psfs of shape (2048, 76, 76)
Returns
-------
np.ndarray
An array of the SOSS psf at 2048 wavelengths for each order
"""
frame = np.zeros((256, 2124))
for n, psf in enumerate(psfs):
frame[:, n:n + 76] += psf
return frame[:, 38:-38]
def psf_lightcurve(psf, ld_coeffs, rp, time, tmodel, plot=False):
"""
Generate a lightcurve for a (76, 76) psf of a given wavelength
Parameters
----------
psf: sequencs
The flux-scaled psf for the given wavelength
ld_coeffs: sequence
The limb darkening coefficients to use
rp: float
The planet radius
time: sequence
The time axis for the TSO
tmodel: batman.transitmodel.TransitModel
The transit model of the planet
plot: bool
Plot the lightcurve
Returns
-------
sequence
A 1D array of the lightcurve with the same length as *t*
Example 1
---------
# No planet
import numpy as np
from awesimsoss.make_trace import psf_lightcurve
psf = np.ones((76, 76))
time = np.linspace(-0.2, 0.2, 200)
lc = psf_lightcurve(psf, None, None, time, None, plot=True)
Example 2
---------
# With a planet
import batman
import numpy as np
import astropy.units as q
from awesimsoss.make_trace import psf_lightcurve
params = batman.TransitParams()
params.t0 = 0. # time of inferior conjunction
params.per = 5.7214742 # orbital period (days)
params.a = 0.0558*q.AU.to(q.R_sun)*0.66 # semi-major axis (in units of stellar radii)
params.inc = 89.8 # orbital inclination (in degrees)
params.ecc = 0. # eccentricity
params.w = 90. # longitude of periastron (in degrees)
params.teff = 3500 # effective temperature of the host star
params.logg = 5 # log surface gravity of the host star
params.feh = 0 # metallicity of the host star
params.limb_dark = 'quadratic' # limb darkening profile to use
params.u = [1, 1] # limb darkening coefficients
tmodel = batman.TransitModel(params, time)
lc = psf_lightcurve(psf, [0.1, 0.1], 0.05, time, tmodel, plot=True)
"""
flux = np.tile(psf, (len(time), 1, 1))
if ld_coeffs is not None and rp is not None and str(type(tmodel)
) == "<class 'batman.transitmodel.TransitModel'>":
tmodel.u = ld_coeffs
tmodel.rp = rp
lightcurve = tmodel.light_curve(tmodel)
flux *= lightcurve[:, None, None]
return flux
def psf_tilts(order):
"""
Get the psf tilts for the given order
Parameters
----------
order: int
The order to use, [1, 2]
Returns
-------
np.ndarray
The angle from the vertical of the psf in each of the 2048 columns
"""
if order not in [1, 2]:
raise ValueError('Only orders 1 and 2 are supported.')
path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)
psf_file = resource_filename('awesimsoss', path)
if not os.path.exists(psf_file):
calculate_psf_tilts()
return np.load(psf_file)
def put_psf_on_subarray(psf, y, frame_height=256):
"""Make a 2D SOSS trace from a sequence of psfs and trace center locations
Parameters
----------
psf: sequence
The 2D psf
y: float
The grid y value to place the center of the psf
grid: sequence
The [x, y] grid ranges
Returns
-------
np.ndarray
The 2D frame with the interpolated psf
"""
dim = psf.shape[0]
mid = (dim - 1.0) / 2.0
arr = np.arange(dim, dtype=np.float)
spline = RectBivariateSpline(arr, arr, psf.T, kx=3, ky=3, s=0)
yg, xg = np.indices((frame_height, dim), dtype=np.float64)
yg += mid - y
frame = spline.ev(xg, yg)
extrapol = (xg < -0.5) | (xg >= dim - 0.5) | ((yg < -0.5) | (yg >= dim -
0.5))
frame[extrapol] = 0
return frame
def SOSS_psf_cube(filt='CLEAR', order=1, subarray='SUBSTRIP256', generate=False
):
"""
Generate/retrieve a data cube of shape (3, 2048, 76, 76) which is a
76x76 pixel psf for 2048 wavelengths for each trace order. The PSFs
are scaled to unity and rotated to reproduce the trace tilt at each
wavelength then placed on the desired subarray.
Parameters
----------
filt: str
The filter to use, ['CLEAR', 'F277W']
order: int
The trace order
subarray: str
The subarray to use, ['SUBSTRIP96', 'SUBSTRIP256', 'FULL']
generate: bool
Generate a new cube
Returns
-------
np.ndarray
An array of the SOSS psf at 2048 wavelengths for each order
"""
if generate:
print('Coffee time! This takes about 5 minutes.')
wavelengths = np.mean(utils.wave_solutions(subarray), axis=1)[:2 if
filt == 'CLEAR' else 1]
coeffs = trace_polynomials(subarray)
psf_path = 'files/SOSS_{}_PSF.fits'.format(filt)
psf_file = resource_filename('awesimsoss', psf_path)
cube = fits.getdata(psf_file).swapaxes(-1, -2)
wave = fits.getdata(psf_file, ext=1)
psfs = interp1d(wave, cube, axis=0, kind=3)
trace_cols = np.arange(2048)
for n, wavelength in enumerate(wavelengths):
trace_centers = np.polyval(coeffs[n], trace_cols)
if n == 1 and filt.lower() == 'f277w' or n == 2:
pass
else:
print('Calculating order {} SOSS psfs for {} filter...'.
format(n + 1, filt))
start = time.time()
pool = multiprocessing.Pool(8)
func = partial(get_SOSS_psf, filt=filt, psfs=psfs)
raw_psfs = np.array(pool.map(func, wavelength))
pool.close()
pool.join()
del pool
print('Finished in {} seconds.'.format(time.time() - start))
angles = psf_tilts(order)
print('Rotating order {} SOSS psfs for {} filter...'.format
(n + 1, filt))
start = time.time()
pool = multiprocessing.Pool(8)
func = partial(rotate, reshape=False)
rotated_psfs = np.array(pool.starmap(func, zip(raw_psfs,
angles)))
pool.close()
pool.join()
del pool
print('Finished in {} seconds.'.format(time.time() - start))
rotated_psfs = np.abs(rotated_psfs)
scale = np.nansum(rotated_psfs, axis=(1, 2))[:, None, None]
rotated_psfs = rotated_psfs / scale
chunks = rotated_psfs.reshape(4, 512, 76, 76)
for N, chunk in enumerate(chunks):
idx0 = N * 512
idx1 = idx0 + 512
centers = trace_centers[idx0:idx1]
print(
'Interpolating chunk {}/4 for order {} SOSS psfs for {} filter onto subarray...'
.format(N + 1, n + 1, filt))
start = time.time()
pool = multiprocessing.Pool(8)
data = zip(chunk, centers)
subarray_psfs = pool.starmap(put_psf_on_subarray, data)
pool.close()
pool.join()
del pool
print('Finished in {} seconds.'.format(time.time() - start)
)
filename = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt,
n + 1, N + 1)
file = resource_filename('awesimsoss', filename)
if os.path.isfile(file):
os.system('rm {}'.format(file))
np.save(file, np.array(subarray_psfs))
print('Data saved to', file)
else:
full_data = []
for chunk in [1, 2, 3, 4]:
path = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt, order, chunk
)
file = resource_filename('awesimsoss', path)
full_data.append(np.load(file))
return np.concatenate(full_data, axis=0)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import os
from pkg_resources import resource_filename
import multiprocessing
import time
from functools import partial
import warnings
import numpy as np
from astropy.io import fits
from bokeh.plotting import figure, show
from hotsoss import utils
from svo_filters import svo
from scipy.interpolate import interp1d
from scipy.ndimage.interpolation import rotate
from scipy.interpolate import interp2d, RectBivariateSpline
try:
import webbpsf
except ImportError:
print('Could not import `webbpsf` package. Functionality limited.')
warnings.simplefilter('ignore')
def calculate_psf_tilts():
"""
Calculate the tilt of the psf at the center of each column
using all binned pixels in the given wavelength calibration file
for both orders and save to file
"""
for order in [1, 2]:
path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)
psf_file = resource_filename('awesimsoss', path)
subarray = 'SUBSTRIP256'
X = range(2048)
Y = range(256)
wave_map = utils.wave_solutions(subarray, order).astype(float)
coeffs = trace_polynomials(subarray=subarray, order=order)
trace = np.polyval(coeffs, X)
wave = interp2d(X, Y, wave_map)
trace_wave = []
for x, y in zip(X, trace):
trace_wave.append(wave(x, y)[0])
angles = []
for n, x in enumerate(X):
w = trace_wave[x]
try:
w0 = trace_wave[x - 1]
except IndexError:
w0 = 0
try:
w1 = trace_wave[x + 1]
except IndexError:
w1 = 10
dw0 = np.mean([w0, w])
dw1 = np.mean([w1, w])
yy, xx = np.where(np.logical_and(wave_map >= dw0, wave_map < dw1))
if len(xx) >= 1:
angle = get_angle([xx[-1], yy[-1]], [x, trace[x]])
else:
angle = 0
angle = angle % 180
angles.append(angle)
np.save(psf_file, np.array(angles))
print('Angles saved to', psf_file)
def nuke_psfs(tilts=True, raw=True, final=True):
"""Generate all the psf cubes from scratch"""
if tilts:
calculate_psf_tilts()
for filt in ['CLEAR', 'F277W']:
if raw:
generate_SOSS_psfs(filt)
if final:
SOSS_psf_cube(filt=filt, generate=True)
def generate_SOSS_ldcs(wavelengths, ld_profile, grid_point, model_grid='',
subarray='SUBSTRIP256', n_bins=100, plot=False, save=''):
"""
Generate a lookup table of limb darkening coefficients for full
SOSS wavelength range
Parameters
----------
wavelengths: sequence
The wavelengths at which to calculate the LDCs
ld_profile: str
A limb darkening profile name supported by
`ExoCTK.ldc.ldcfit.ld_profile()`
grid_point: dict, sequence
The stellar parameters [Teff, logg, FeH] or stellar model
dictionary from `ExoCTK.modelgrid.ModelGrid.get()`
n_bins: int
The number of bins to break up the grism into
save: str
The path to save to file to
Example
-------
from awesimsoss.sim2D import awesim
lookup = awesim.soss_ldc('quadratic', [3300, 4.5, 0])
"""
try:
from exoctk import modelgrid
from exoctk.limb_darkening import limb_darkening_fit as lf
except ImportError:
return
if not isinstance(model_grid, modelgrid.ModelGrid):
model_grid = modelgrid.ModelGrid(os.environ['MODELGRID_DIR'],
resolution=700)
model_grid = modelgrid.ModelGrid(os.environ['MODELGRID_DIR'],
resolution=700, wave_rng=(0.6, 2.8))
if isinstance(grid_point, (list, tuple, np.ndarray)):
grid_point = model_grid.get(*grid_point)
if not isinstance(grid_point, dict):
print(
'Please provide the grid_point argument as [Teff, logg, FeH] or ExoCTK.modelgrid.ModelGrid.get(Teff, logg, FeH).'
)
return
bandpass = svo.Filter('NIRISS.GR700XD', n_bins=n_bins, verbose=False)
ldc_results = lf.ldc(None, None, None, model_grid, [ld_profile],
bandpass=bandpass, grid_point=grid_point.copy(), mu_min=0.08,
verbose=False)
coeff_table = ldc_results[ld_profile]['coeffs']
coeff_cols = [c for c in coeff_table.colnames if c.startswith('c')]
coeffs = [np.interp(wavelengths, coeff_table['wavelength'], coeff_table
[c]) for c in coeff_cols]
return np.array(coeffs).T
def generate_SOSS_psfs(filt):
"""
Gnerate a cube of the psf at 100 wavelengths from the min to the max wavelength
Parameters
----------
filt: str
The filter to use, ['CLEAR', 'F277W']
"""
file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.format(
filt))
ns = webbpsf.NIRISS()
ns.filter = filt
ns.pupil_mask = 'GR700XD'
wavelengths = utils.wave_solutions('SUBSTRIP256').flatten()
wave_min = np.max([ns.SHORT_WAVELENGTH_MIN * 1000000.0, np.min(
wavelengths[wavelengths > 0])])
wave_max = np.min([ns.LONG_WAVELENGTH_MAX * 1000000.0, np.max(
wavelengths[wavelengths > 0])])
W = np.linspace(wave_min, wave_max, 100) * 1e-06
print('Generating SOSS psfs. This takes about 8 minutes...')
start = time.time()
PSF = ns.calc_datacube(W, oversample=1)[0].data
print('Finished in', time.time() - start)
psfhdu = fits.PrimaryHDU(data=PSF)
wavhdu = fits.ImageHDU(data=W * 1000000.0, name='WAV')
hdulist = fits.HDUList([psfhdu, wavhdu])
hdulist.writeto(file, overwrite=True)
hdulist.close()
def get_angle(pf, p0=np.array([0, 0]), pi=None):
"""Compute angle (in degrees) for pf-p0-pi corner
Parameters
----------
pf: sequence
The coordinates of a point on the rotated vector
p0: sequence
The coordinates of the pivot
pi: sequence
The coordinates of the fixed vector
Returns
-------
float
The angle in degrees
"""
if pi is None:
pi = p0 + np.array([0, 1])
v0 = np.array(pf) - np.array(p0)
v1 = np.array(pi) - np.array(p0)
angle = np.math.atan2(np.linalg.det([v0, v1]), np.dot(v0, v1))
angle = np.degrees(angle)
return angle
def get_SOSS_psf(wavelength, filt='CLEAR', psfs=None, cutoff=0.005, plot=False
):
"""
Retrieve the SOSS psf for the given wavelength,
scale the total flux to 1, and set pixels below
cutoff value to zero
Parameters
----------
wavelength: float
The wavelength to retrieve [um]
filt: str
The filter to use, ['CLEAR', 'F277W']
psfs: numpy.interp1d object (optional)
The interpolator
plot: bool
Plot the psf
Returns
-------
np.ndarray
The 2D psf for the input wavelength
"""
if psfs is None:
file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.
format(filt))
cube = fits.getdata(file).swapaxes(-1, -2)
wave = fits.getdata(file, ext=1)
psfs = interp1d(wave, cube, axis=0, kind=3)
if wavelength < psfs.x[0]:
wavelength = psfs.x[0]
if wavelength > psfs.x[-1]:
wavelength = psfs.x[-1]
psf = psfs(wavelength)
psf *= 1.0 / np.sum(psf)
if plot:
fig = figure()
fig.image([psf], x=0, y=0, dw=psf.shape[0], dh=psf.shape[1])
show(fig)
else:
return psf
def make_frame(psfs):
"""
Generate a frame from an array of psfs
Parameters
----------
psfs: sequence
An array of psfs of shape (2048, 76, 76)
Returns
-------
np.ndarray
An array of the SOSS psf at 2048 wavelengths for each order
"""
frame = np.zeros((256, 2124))
for n, psf in enumerate(psfs):
frame[:, n:n + 76] += psf
return frame[:, 38:-38]
def psf_lightcurve(psf, ld_coeffs, rp, time, tmodel, plot=False):
"""
Generate a lightcurve for a (76, 76) psf of a given wavelength
Parameters
----------
psf: sequencs
The flux-scaled psf for the given wavelength
ld_coeffs: sequence
The limb darkening coefficients to use
rp: float
The planet radius
time: sequence
The time axis for the TSO
tmodel: batman.transitmodel.TransitModel
The transit model of the planet
plot: bool
Plot the lightcurve
Returns
-------
sequence
A 1D array of the lightcurve with the same length as *t*
Example 1
---------
# No planet
import numpy as np
from awesimsoss.make_trace import psf_lightcurve
psf = np.ones((76, 76))
time = np.linspace(-0.2, 0.2, 200)
lc = psf_lightcurve(psf, None, None, time, None, plot=True)
Example 2
---------
# With a planet
import batman
import numpy as np
import astropy.units as q
from awesimsoss.make_trace import psf_lightcurve
params = batman.TransitParams()
params.t0 = 0. # time of inferior conjunction
params.per = 5.7214742 # orbital period (days)
params.a = 0.0558*q.AU.to(q.R_sun)*0.66 # semi-major axis (in units of stellar radii)
params.inc = 89.8 # orbital inclination (in degrees)
params.ecc = 0. # eccentricity
params.w = 90. # longitude of periastron (in degrees)
params.teff = 3500 # effective temperature of the host star
params.logg = 5 # log surface gravity of the host star
params.feh = 0 # metallicity of the host star
params.limb_dark = 'quadratic' # limb darkening profile to use
params.u = [1, 1] # limb darkening coefficients
tmodel = batman.TransitModel(params, time)
lc = psf_lightcurve(psf, [0.1, 0.1], 0.05, time, tmodel, plot=True)
"""
flux = np.tile(psf, (len(time), 1, 1))
if ld_coeffs is not None and rp is not None and str(type(tmodel)
) == "<class 'batman.transitmodel.TransitModel'>":
tmodel.u = ld_coeffs
tmodel.rp = rp
lightcurve = tmodel.light_curve(tmodel)
flux *= lightcurve[:, None, None]
return flux
def psf_tilts(order):
"""
Get the psf tilts for the given order
Parameters
----------
order: int
The order to use, [1, 2]
Returns
-------
np.ndarray
The angle from the vertical of the psf in each of the 2048 columns
"""
if order not in [1, 2]:
raise ValueError('Only orders 1 and 2 are supported.')
path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)
psf_file = resource_filename('awesimsoss', path)
if not os.path.exists(psf_file):
calculate_psf_tilts()
return np.load(psf_file)
def put_psf_on_subarray(psf, y, frame_height=256):
"""Make a 2D SOSS trace from a sequence of psfs and trace center locations
Parameters
----------
psf: sequence
The 2D psf
y: float
The grid y value to place the center of the psf
grid: sequence
The [x, y] grid ranges
Returns
-------
np.ndarray
The 2D frame with the interpolated psf
"""
dim = psf.shape[0]
mid = (dim - 1.0) / 2.0
arr = np.arange(dim, dtype=np.float)
spline = RectBivariateSpline(arr, arr, psf.T, kx=3, ky=3, s=0)
yg, xg = np.indices((frame_height, dim), dtype=np.float64)
yg += mid - y
frame = spline.ev(xg, yg)
extrapol = (xg < -0.5) | (xg >= dim - 0.5) | ((yg < -0.5) | (yg >= dim -
0.5))
frame[extrapol] = 0
return frame
def SOSS_psf_cube(filt='CLEAR', order=1, subarray='SUBSTRIP256', generate=False
):
"""
Generate/retrieve a data cube of shape (3, 2048, 76, 76) which is a
76x76 pixel psf for 2048 wavelengths for each trace order. The PSFs
are scaled to unity and rotated to reproduce the trace tilt at each
wavelength then placed on the desired subarray.
Parameters
----------
filt: str
The filter to use, ['CLEAR', 'F277W']
order: int
The trace order
subarray: str
The subarray to use, ['SUBSTRIP96', 'SUBSTRIP256', 'FULL']
generate: bool
Generate a new cube
Returns
-------
np.ndarray
An array of the SOSS psf at 2048 wavelengths for each order
"""
if generate:
print('Coffee time! This takes about 5 minutes.')
wavelengths = np.mean(utils.wave_solutions(subarray), axis=1)[:2 if
filt == 'CLEAR' else 1]
coeffs = trace_polynomials(subarray)
psf_path = 'files/SOSS_{}_PSF.fits'.format(filt)
psf_file = resource_filename('awesimsoss', psf_path)
cube = fits.getdata(psf_file).swapaxes(-1, -2)
wave = fits.getdata(psf_file, ext=1)
psfs = interp1d(wave, cube, axis=0, kind=3)
trace_cols = np.arange(2048)
for n, wavelength in enumerate(wavelengths):
trace_centers = np.polyval(coeffs[n], trace_cols)
if n == 1 and filt.lower() == 'f277w' or n == 2:
pass
else:
print('Calculating order {} SOSS psfs for {} filter...'.
format(n + 1, filt))
start = time.time()
pool = multiprocessing.Pool(8)
func = partial(get_SOSS_psf, filt=filt, psfs=psfs)
raw_psfs = np.array(pool.map(func, wavelength))
pool.close()
pool.join()
del pool
print('Finished in {} seconds.'.format(time.time() - start))
angles = psf_tilts(order)
print('Rotating order {} SOSS psfs for {} filter...'.format
(n + 1, filt))
start = time.time()
pool = multiprocessing.Pool(8)
func = partial(rotate, reshape=False)
rotated_psfs = np.array(pool.starmap(func, zip(raw_psfs,
angles)))
pool.close()
pool.join()
del pool
print('Finished in {} seconds.'.format(time.time() - start))
rotated_psfs = np.abs(rotated_psfs)
scale = np.nansum(rotated_psfs, axis=(1, 2))[:, None, None]
rotated_psfs = rotated_psfs / scale
chunks = rotated_psfs.reshape(4, 512, 76, 76)
for N, chunk in enumerate(chunks):
idx0 = N * 512
idx1 = idx0 + 512
centers = trace_centers[idx0:idx1]
print(
'Interpolating chunk {}/4 for order {} SOSS psfs for {} filter onto subarray...'
.format(N + 1, n + 1, filt))
start = time.time()
pool = multiprocessing.Pool(8)
data = zip(chunk, centers)
subarray_psfs = pool.starmap(put_psf_on_subarray, data)
pool.close()
pool.join()
del pool
print('Finished in {} seconds.'.format(time.time() - start)
)
filename = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt,
n + 1, N + 1)
file = resource_filename('awesimsoss', filename)
if os.path.isfile(file):
os.system('rm {}'.format(file))
np.save(file, np.array(subarray_psfs))
print('Data saved to', file)
else:
full_data = []
for chunk in [1, 2, 3, 4]:
path = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt, order, chunk
)
file = resource_filename('awesimsoss', path)
full_data.append(np.load(file))
return np.concatenate(full_data, axis=0)
<|reserved_special_token_1|>
"""
A module to generate simulated 2D time-series SOSS data
Authors: Joe Filippazzo
"""
import os
from pkg_resources import resource_filename
import multiprocessing
import time
from functools import partial
import warnings
import numpy as np
from astropy.io import fits
from bokeh.plotting import figure, show
from hotsoss import utils
from svo_filters import svo
from scipy.interpolate import interp1d
from scipy.ndimage.interpolation import rotate
from scipy.interpolate import interp2d, RectBivariateSpline
try:
import webbpsf
except ImportError:
print("Could not import `webbpsf` package. Functionality limited.")
warnings.simplefilter('ignore')
def calculate_psf_tilts():
"""
Calculate the tilt of the psf at the center of each column
using all binned pixels in the given wavelength calibration file
for both orders and save to file
"""
for order in [1, 2]:
# Get the file
path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)
psf_file = resource_filename('awesimsoss', path)
# Dimensions
subarray = 'SUBSTRIP256'
X = range(2048)
Y = range(256)
# Get the wave map
wave_map = utils.wave_solutions(subarray, order).astype(float)
# Get the y-coordinate of the trace polynomial in this column
# (center of the trace)
coeffs = trace_polynomials(subarray=subarray, order=order)
trace = np.polyval(coeffs, X)
# Interpolate to get the wavelength value at the center
wave = interp2d(X, Y, wave_map)
# Get the wavelength of the trace center in each column
trace_wave = []
for x, y in zip(X, trace):
trace_wave.append(wave(x, y)[0])
# For each column wavelength (defined by the wavelength at
# the trace center) define an isowavelength contour
angles = []
for n, x in enumerate(X):
w = trace_wave[x]
# Edge cases
try:
w0 = trace_wave[x-1]
except IndexError:
w0 = 0
try:
w1 = trace_wave[x+1]
except IndexError:
w1 = 10
# Define the width of the wavelength bin as half-way
# between neighboring points
dw0 = np.mean([w0, w])
dw1 = np.mean([w1, w])
# Get the coordinates of all the pixels in that range
yy, xx = np.where(np.logical_and(wave_map >= dw0, wave_map < dw1))
# Find the angle between the vertical and the tilted wavelength bin
if len(xx) >= 1:
angle = get_angle([xx[-1], yy[-1]], [x, trace[x]])
else:
angle = 0
# Don't flip them upside down
angle = angle % 180
# Add to the array
angles.append(angle)
# Save the file
np.save(psf_file, np.array(angles))
print('Angles saved to', psf_file)
def nuke_psfs(tilts=True, raw=True, final=True):
"""Generate all the psf cubes from scratch"""
# Calculate the psf tilts
if tilts:
calculate_psf_tilts()
for filt in ['CLEAR', 'F277W']:
# Calculate the raw psfs from WebbPSF
if raw:
generate_SOSS_psfs(filt)
# Generate the rotated and interpolated psfs ready for trace assembly
if final:
SOSS_psf_cube(filt=filt, generate=True)
def generate_SOSS_ldcs(wavelengths, ld_profile, grid_point, model_grid='', subarray='SUBSTRIP256', n_bins=100, plot=False, save=''):
"""
Generate a lookup table of limb darkening coefficients for full
SOSS wavelength range
Parameters
----------
wavelengths: sequence
The wavelengths at which to calculate the LDCs
ld_profile: str
A limb darkening profile name supported by
`ExoCTK.ldc.ldcfit.ld_profile()`
grid_point: dict, sequence
The stellar parameters [Teff, logg, FeH] or stellar model
dictionary from `ExoCTK.modelgrid.ModelGrid.get()`
n_bins: int
The number of bins to break up the grism into
save: str
The path to save to file to
Example
-------
from awesimsoss.sim2D import awesim
lookup = awesim.soss_ldc('quadratic', [3300, 4.5, 0])
"""
try:
from exoctk import modelgrid
from exoctk.limb_darkening import limb_darkening_fit as lf
except ImportError:
return
# Get the model grid
if not isinstance(model_grid, modelgrid.ModelGrid):
model_grid = modelgrid.ModelGrid(os.environ['MODELGRID_DIR'], resolution=700)
# Load the model grid
model_grid = modelgrid.ModelGrid(os.environ['MODELGRID_DIR'], resolution=700, wave_rng=(0.6, 2.8))
# Get the grid point
if isinstance(grid_point, (list, tuple, np.ndarray)):
grid_point = model_grid.get(*grid_point)
# Abort if no stellar dict
if not isinstance(grid_point, dict):
print('Please provide the grid_point argument as [Teff, logg, FeH] or ExoCTK.modelgrid.ModelGrid.get(Teff, logg, FeH).')
return
# Break the bandpass up into n_bins pieces
bandpass = svo.Filter('NIRISS.GR700XD', n_bins=n_bins, verbose=False)
# Calculate the LDCs
ldc_results = lf.ldc(None, None, None, model_grid, [ld_profile],
bandpass=bandpass, grid_point=grid_point.copy(),
mu_min=0.08, verbose=False)
# Interpolate the LDCs to the desired wavelengths
coeff_table = ldc_results[ld_profile]['coeffs']
coeff_cols = [c for c in coeff_table.colnames if c.startswith('c')]
coeffs = [np.interp(wavelengths, coeff_table['wavelength'], coeff_table[c]) for c in coeff_cols]
return np.array(coeffs).T
def generate_SOSS_psfs(filt):
"""
Gnerate a cube of the psf at 100 wavelengths from the min to the max wavelength
Parameters
----------
filt: str
The filter to use, ['CLEAR', 'F277W']
"""
# Get the file
file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.format(filt))
# Get the NIRISS class from webbpsf and set the filter
ns = webbpsf.NIRISS()
ns.filter = filt
ns.pupil_mask = 'GR700XD'
# Get the min and max wavelengths
wavelengths = utils.wave_solutions('SUBSTRIP256').flatten()
wave_min = np.max([ns.SHORT_WAVELENGTH_MIN * 1E6, np.min(wavelengths[wavelengths > 0])])
wave_max = np.min([ns.LONG_WAVELENGTH_MAX * 1E6, np.max(wavelengths[wavelengths > 0])])
# webbpsf.calc_datacube can only handle 100 but that's sufficient
W = np.linspace(wave_min, wave_max, 100)*1E-6
# Calculate the psfs
print("Generating SOSS psfs. This takes about 8 minutes...")
start = time.time()
PSF = ns.calc_datacube(W, oversample=1)[0].data
print("Finished in", time.time()-start)
# Make the HDUList
psfhdu = fits.PrimaryHDU(data=PSF)
wavhdu = fits.ImageHDU(data=W*1E6, name='WAV')
hdulist = fits.HDUList([psfhdu, wavhdu])
# Write the file
hdulist.writeto(file, overwrite=True)
hdulist.close()
def get_angle(pf, p0=np.array([0, 0]), pi=None):
"""Compute angle (in degrees) for pf-p0-pi corner
Parameters
----------
pf: sequence
The coordinates of a point on the rotated vector
p0: sequence
The coordinates of the pivot
pi: sequence
The coordinates of the fixed vector
Returns
-------
float
The angle in degrees
"""
if pi is None:
pi = p0 + np.array([0, 1])
v0 = np.array(pf) - np.array(p0)
v1 = np.array(pi) - np.array(p0)
angle = np.math.atan2(np.linalg.det([v0, v1]), np.dot(v0, v1))
angle = np.degrees(angle)
return angle
def get_SOSS_psf(wavelength, filt='CLEAR', psfs=None, cutoff=0.005, plot=False):
"""
Retrieve the SOSS psf for the given wavelength,
scale the total flux to 1, and set pixels below
cutoff value to zero
Parameters
----------
wavelength: float
The wavelength to retrieve [um]
filt: str
The filter to use, ['CLEAR', 'F277W']
psfs: numpy.interp1d object (optional)
The interpolator
plot: bool
Plot the psf
Returns
-------
np.ndarray
The 2D psf for the input wavelength
"""
if psfs is None:
# Get the file
file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.format(filt))
# Load the SOSS psf cube
cube = fits.getdata(file).swapaxes(-1, -2)
wave = fits.getdata(file, ext=1)
# Initilize interpolator
psfs = interp1d(wave, cube, axis=0, kind=3)
# Check the wavelength
if wavelength < psfs.x[0]:
wavelength = psfs.x[0]
if wavelength > psfs.x[-1]:
wavelength = psfs.x[-1]
# Interpolate and scale psf
psf = psfs(wavelength)
psf *= 1./np.sum(psf)
# Remove background
# psf[psf < cutoff] = 0
if plot:
fig = figure()
fig.image([psf], x=0, y=0, dw=psf.shape[0], dh=psf.shape[1])
show(fig)
else:
return psf
def make_frame(psfs):
"""
Generate a frame from an array of psfs
Parameters
----------
psfs: sequence
An array of psfs of shape (2048, 76, 76)
Returns
-------
np.ndarray
An array of the SOSS psf at 2048 wavelengths for each order
"""
# Empty frame
frame = np.zeros((256, 2124))
# Add each psf
for n, psf in enumerate(psfs):
frame[:, n:n+76] += psf
return frame[:, 38:-38]
def psf_lightcurve(psf, ld_coeffs, rp, time, tmodel, plot=False):
"""
Generate a lightcurve for a (76, 76) psf of a given wavelength
Parameters
----------
psf: sequencs
The flux-scaled psf for the given wavelength
ld_coeffs: sequence
The limb darkening coefficients to use
rp: float
The planet radius
time: sequence
The time axis for the TSO
tmodel: batman.transitmodel.TransitModel
The transit model of the planet
plot: bool
Plot the lightcurve
Returns
-------
sequence
A 1D array of the lightcurve with the same length as *t*
Example 1
---------
# No planet
import numpy as np
from awesimsoss.make_trace import psf_lightcurve
psf = np.ones((76, 76))
time = np.linspace(-0.2, 0.2, 200)
lc = psf_lightcurve(psf, None, None, time, None, plot=True)
Example 2
---------
# With a planet
import batman
import numpy as np
import astropy.units as q
from awesimsoss.make_trace import psf_lightcurve
params = batman.TransitParams()
params.t0 = 0. # time of inferior conjunction
params.per = 5.7214742 # orbital period (days)
params.a = 0.0558*q.AU.to(q.R_sun)*0.66 # semi-major axis (in units of stellar radii)
params.inc = 89.8 # orbital inclination (in degrees)
params.ecc = 0. # eccentricity
params.w = 90. # longitude of periastron (in degrees)
params.teff = 3500 # effective temperature of the host star
params.logg = 5 # log surface gravity of the host star
params.feh = 0 # metallicity of the host star
params.limb_dark = 'quadratic' # limb darkening profile to use
params.u = [1, 1] # limb darkening coefficients
tmodel = batman.TransitModel(params, time)
lc = psf_lightcurve(psf, [0.1, 0.1], 0.05, time, tmodel, plot=True)
"""
# Expand to shape of time axis
flux = np.tile(psf, (len(time), 1, 1))
# If there is a transiting planet...
if ld_coeffs is not None and rp is not None and str(type(tmodel)) == "<class 'batman.transitmodel.TransitModel'>":
# Set the wavelength dependent orbital parameters
tmodel.u = ld_coeffs
tmodel.rp = rp
# Generate the light curve for this pixel
lightcurve = tmodel.light_curve(tmodel)
# Scale the flux with the lightcurve
flux *= lightcurve[:, None, None]
return flux
def psf_tilts(order):
"""
Get the psf tilts for the given order
Parameters
----------
order: int
The order to use, [1, 2]
Returns
-------
np.ndarray
The angle from the vertical of the psf in each of the 2048 columns
"""
if order not in [1, 2]:
raise ValueError('Only orders 1 and 2 are supported.')
# Get the file
path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)
psf_file = resource_filename('awesimsoss', path)
if not os.path.exists(psf_file):
calculate_psf_tilts()
return np.load(psf_file)
def put_psf_on_subarray(psf, y, frame_height=256):
"""Make a 2D SOSS trace from a sequence of psfs and trace center locations
Parameters
----------
psf: sequence
The 2D psf
y: float
The grid y value to place the center of the psf
grid: sequence
The [x, y] grid ranges
Returns
-------
np.ndarray
The 2D frame with the interpolated psf
"""
# Create spline generator
dim = psf.shape[0]
mid = (dim - 1.0) / 2.0
arr = np.arange(dim, dtype=np.float)
spline = RectBivariateSpline(arr, arr, psf.T, kx=3, ky=3, s=0)
# Create output frame, shifted as necessary
yg, xg = np.indices((frame_height, dim), dtype=np.float64)
yg += mid-y
# Resample onto the subarray
frame = spline.ev(xg, yg)
# Fill resampled points with zeros
extrapol = (((xg < -0.5) | (xg >= dim - 0.5)) | ((yg < -0.5) | (yg >= dim - 0.5)))
frame[extrapol] = 0
return frame
def SOSS_psf_cube(filt='CLEAR', order=1, subarray='SUBSTRIP256', generate=False):
"""
Generate/retrieve a data cube of shape (3, 2048, 76, 76) which is a
76x76 pixel psf for 2048 wavelengths for each trace order. The PSFs
are scaled to unity and rotated to reproduce the trace tilt at each
wavelength then placed on the desired subarray.
Parameters
----------
filt: str
The filter to use, ['CLEAR', 'F277W']
order: int
The trace order
subarray: str
The subarray to use, ['SUBSTRIP96', 'SUBSTRIP256', 'FULL']
generate: bool
Generate a new cube
Returns
-------
np.ndarray
An array of the SOSS psf at 2048 wavelengths for each order
"""
if generate:
print('Coffee time! This takes about 5 minutes.')
# Get the wavelengths
wavelengths = np.mean(utils.wave_solutions(subarray), axis=1)[:2 if filt == 'CLEAR' else 1]
coeffs = trace_polynomials(subarray)
# Get the file
psf_path = 'files/SOSS_{}_PSF.fits'.format(filt)
psf_file = resource_filename('awesimsoss', psf_path)
# Load the SOSS psf cube
cube = fits.getdata(psf_file).swapaxes(-1, -2)
wave = fits.getdata(psf_file, ext=1)
# Initilize interpolator
psfs = interp1d(wave, cube, axis=0, kind=3)
trace_cols = np.arange(2048)
# Run datacube
for n, wavelength in enumerate(wavelengths):
# Evaluate the trace polynomial in each column to get the y-position of the trace center
trace_centers = np.polyval(coeffs[n], trace_cols)
# Don't calculate order2 for F277W or order 3 for either
if (n == 1 and filt.lower() == 'f277w') or n == 2:
pass
else:
# Get the psf for each column
print('Calculating order {} SOSS psfs for {} filter...'.format(n+1, filt))
start = time.time()
pool = multiprocessing.Pool(8)
func = partial(get_SOSS_psf, filt=filt, psfs=psfs)
raw_psfs = np.array(pool.map(func, wavelength))
pool.close()
pool.join()
del pool
print('Finished in {} seconds.'.format(time.time()-start))
# Get the PSF tilt at each column
angles = psf_tilts(order)
# Rotate the psfs
print('Rotating order {} SOSS psfs for {} filter...'.format(n+1, filt))
start = time.time()
pool = multiprocessing.Pool(8)
func = partial(rotate, reshape=False)
rotated_psfs = np.array(pool.starmap(func, zip(raw_psfs, angles)))
pool.close()
pool.join()
del pool
print('Finished in {} seconds.'.format(time.time()-start))
# Scale psfs to 1
rotated_psfs = np.abs(rotated_psfs)
scale = np.nansum(rotated_psfs, axis=(1, 2))[:, None, None]
rotated_psfs = rotated_psfs/scale
# Split it into 4 chunks to be below Github file size limit
chunks = rotated_psfs.reshape(4, 512, 76, 76)
for N, chunk in enumerate(chunks):
idx0 = N*512
idx1 = idx0+512
centers = trace_centers[idx0:idx1]
# Interpolate the psfs onto the subarray
print('Interpolating chunk {}/4 for order {} SOSS psfs for {} filter onto subarray...'.format(N+1, n+1, filt))
start = time.time()
pool = multiprocessing.Pool(8)
data = zip(chunk, centers)
subarray_psfs = pool.starmap(put_psf_on_subarray, data)
pool.close()
pool.join()
del pool
print('Finished in {} seconds.'.format(time.time()-start))
# Get the filepath
filename = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt, n+1, N+1)
file = resource_filename('awesimsoss', filename)
# Delete the file if it exists
if os.path.isfile(file):
os.system('rm {}'.format(file))
# Write the data
np.save(file, np.array(subarray_psfs))
print('Data saved to', file)
else:
# Get the chunked data and concatenate
full_data = []
for chunk in [1, 2, 3, 4]:
path = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt, order, chunk)
file = resource_filename('awesimsoss', path)
full_data.append(np.load(file))
return np.concatenate(full_data, axis=0)
|
flexible
|
{
"blob_id": "9f478df4ff19cfe6c6559b6489c874d49377b90e",
"index": 4949,
"step-1": "<mask token>\n\n\ndef calculate_psf_tilts():\n \"\"\"\n Calculate the tilt of the psf at the center of each column\n using all binned pixels in the given wavelength calibration file\n for both orders and save to file\n \"\"\"\n for order in [1, 2]:\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n subarray = 'SUBSTRIP256'\n X = range(2048)\n Y = range(256)\n wave_map = utils.wave_solutions(subarray, order).astype(float)\n coeffs = trace_polynomials(subarray=subarray, order=order)\n trace = np.polyval(coeffs, X)\n wave = interp2d(X, Y, wave_map)\n trace_wave = []\n for x, y in zip(X, trace):\n trace_wave.append(wave(x, y)[0])\n angles = []\n for n, x in enumerate(X):\n w = trace_wave[x]\n try:\n w0 = trace_wave[x - 1]\n except IndexError:\n w0 = 0\n try:\n w1 = trace_wave[x + 1]\n except IndexError:\n w1 = 10\n dw0 = np.mean([w0, w])\n dw1 = np.mean([w1, w])\n yy, xx = np.where(np.logical_and(wave_map >= dw0, wave_map < dw1))\n if len(xx) >= 1:\n angle = get_angle([xx[-1], yy[-1]], [x, trace[x]])\n else:\n angle = 0\n angle = angle % 180\n angles.append(angle)\n np.save(psf_file, np.array(angles))\n print('Angles saved to', psf_file)\n\n\ndef nuke_psfs(tilts=True, raw=True, final=True):\n \"\"\"Generate all the psf cubes from scratch\"\"\"\n if tilts:\n calculate_psf_tilts()\n for filt in ['CLEAR', 'F277W']:\n if raw:\n generate_SOSS_psfs(filt)\n if final:\n SOSS_psf_cube(filt=filt, generate=True)\n\n\n<mask token>\n\n\ndef get_SOSS_psf(wavelength, filt='CLEAR', psfs=None, cutoff=0.005, plot=False\n ):\n \"\"\"\n Retrieve the SOSS psf for the given wavelength,\n scale the total flux to 1, and set pixels below\n cutoff value to zero\n\n Parameters\n ----------\n wavelength: float\n The wavelength to retrieve [um]\n filt: str\n The filter to use, ['CLEAR', 'F277W']\n psfs: numpy.interp1d object (optional)\n The interpolator\n plot: bool\n Plot the psf\n\n Returns\n -------\n np.ndarray\n The 2D psf for the input wavelength\n \"\"\"\n if psfs is None:\n file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.\n format(filt))\n cube = fits.getdata(file).swapaxes(-1, -2)\n wave = fits.getdata(file, ext=1)\n psfs = interp1d(wave, cube, axis=0, kind=3)\n if wavelength < psfs.x[0]:\n wavelength = psfs.x[0]\n if wavelength > psfs.x[-1]:\n wavelength = psfs.x[-1]\n psf = psfs(wavelength)\n psf *= 1.0 / np.sum(psf)\n if plot:\n fig = figure()\n fig.image([psf], x=0, y=0, dw=psf.shape[0], dh=psf.shape[1])\n show(fig)\n else:\n return psf\n\n\n<mask token>\n\n\ndef psf_lightcurve(psf, ld_coeffs, rp, time, tmodel, plot=False):\n \"\"\"\n Generate a lightcurve for a (76, 76) psf of a given wavelength\n\n Parameters\n ----------\n psf: sequencs\n The flux-scaled psf for the given wavelength\n ld_coeffs: sequence\n The limb darkening coefficients to use\n rp: float\n The planet radius\n time: sequence\n The time axis for the TSO\n tmodel: batman.transitmodel.TransitModel\n The transit model of the planet\n plot: bool\n Plot the lightcurve\n\n Returns\n -------\n sequence\n A 1D array of the lightcurve with the same length as *t*\n\n Example 1\n ---------\n # No planet\n import numpy as np\n from awesimsoss.make_trace import psf_lightcurve\n psf = np.ones((76, 76))\n time = np.linspace(-0.2, 0.2, 200)\n lc = psf_lightcurve(psf, None, None, time, None, plot=True)\n\n Example 2\n ---------\n # With a planet\n import batman\n import numpy as np\n import astropy.units as q\n from awesimsoss.make_trace import psf_lightcurve\n params = batman.TransitParams()\n params.t0 = 0. # time of inferior conjunction\n params.per = 5.7214742 # orbital period (days)\n params.a = 0.0558*q.AU.to(q.R_sun)*0.66 # semi-major axis (in units of stellar radii)\n params.inc = 89.8 # orbital inclination (in degrees)\n params.ecc = 0. # eccentricity\n params.w = 90. # longitude of periastron (in degrees)\n params.teff = 3500 # effective temperature of the host star\n params.logg = 5 # log surface gravity of the host star\n params.feh = 0 # metallicity of the host star\n params.limb_dark = 'quadratic' # limb darkening profile to use\n params.u = [1, 1] # limb darkening coefficients\n tmodel = batman.TransitModel(params, time)\n lc = psf_lightcurve(psf, [0.1, 0.1], 0.05, time, tmodel, plot=True)\n \"\"\"\n flux = np.tile(psf, (len(time), 1, 1))\n if ld_coeffs is not None and rp is not None and str(type(tmodel)\n ) == \"<class 'batman.transitmodel.TransitModel'>\":\n tmodel.u = ld_coeffs\n tmodel.rp = rp\n lightcurve = tmodel.light_curve(tmodel)\n flux *= lightcurve[:, None, None]\n return flux\n\n\ndef psf_tilts(order):\n \"\"\"\n Get the psf tilts for the given order\n\n Parameters\n ----------\n order: int\n The order to use, [1, 2]\n\n Returns\n -------\n np.ndarray\n The angle from the vertical of the psf in each of the 2048 columns\n \"\"\"\n if order not in [1, 2]:\n raise ValueError('Only orders 1 and 2 are supported.')\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n if not os.path.exists(psf_file):\n calculate_psf_tilts()\n return np.load(psf_file)\n\n\ndef put_psf_on_subarray(psf, y, frame_height=256):\n \"\"\"Make a 2D SOSS trace from a sequence of psfs and trace center locations\n\n Parameters\n ----------\n psf: sequence\n The 2D psf\n y: float\n The grid y value to place the center of the psf\n grid: sequence\n The [x, y] grid ranges\n\n Returns\n -------\n np.ndarray\n The 2D frame with the interpolated psf\n \"\"\"\n dim = psf.shape[0]\n mid = (dim - 1.0) / 2.0\n arr = np.arange(dim, dtype=np.float)\n spline = RectBivariateSpline(arr, arr, psf.T, kx=3, ky=3, s=0)\n yg, xg = np.indices((frame_height, dim), dtype=np.float64)\n yg += mid - y\n frame = spline.ev(xg, yg)\n extrapol = (xg < -0.5) | (xg >= dim - 0.5) | ((yg < -0.5) | (yg >= dim -\n 0.5))\n frame[extrapol] = 0\n return frame\n\n\ndef SOSS_psf_cube(filt='CLEAR', order=1, subarray='SUBSTRIP256', generate=False\n ):\n \"\"\"\n Generate/retrieve a data cube of shape (3, 2048, 76, 76) which is a\n 76x76 pixel psf for 2048 wavelengths for each trace order. The PSFs\n are scaled to unity and rotated to reproduce the trace tilt at each\n wavelength then placed on the desired subarray.\n\n Parameters\n ----------\n filt: str\n The filter to use, ['CLEAR', 'F277W']\n order: int\n The trace order\n subarray: str\n The subarray to use, ['SUBSTRIP96', 'SUBSTRIP256', 'FULL']\n generate: bool\n Generate a new cube\n\n Returns\n -------\n np.ndarray\n An array of the SOSS psf at 2048 wavelengths for each order\n \"\"\"\n if generate:\n print('Coffee time! This takes about 5 minutes.')\n wavelengths = np.mean(utils.wave_solutions(subarray), axis=1)[:2 if\n filt == 'CLEAR' else 1]\n coeffs = trace_polynomials(subarray)\n psf_path = 'files/SOSS_{}_PSF.fits'.format(filt)\n psf_file = resource_filename('awesimsoss', psf_path)\n cube = fits.getdata(psf_file).swapaxes(-1, -2)\n wave = fits.getdata(psf_file, ext=1)\n psfs = interp1d(wave, cube, axis=0, kind=3)\n trace_cols = np.arange(2048)\n for n, wavelength in enumerate(wavelengths):\n trace_centers = np.polyval(coeffs[n], trace_cols)\n if n == 1 and filt.lower() == 'f277w' or n == 2:\n pass\n else:\n print('Calculating order {} SOSS psfs for {} filter...'.\n format(n + 1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n func = partial(get_SOSS_psf, filt=filt, psfs=psfs)\n raw_psfs = np.array(pool.map(func, wavelength))\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time() - start))\n angles = psf_tilts(order)\n print('Rotating order {} SOSS psfs for {} filter...'.format\n (n + 1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n func = partial(rotate, reshape=False)\n rotated_psfs = np.array(pool.starmap(func, zip(raw_psfs,\n angles)))\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time() - start))\n rotated_psfs = np.abs(rotated_psfs)\n scale = np.nansum(rotated_psfs, axis=(1, 2))[:, None, None]\n rotated_psfs = rotated_psfs / scale\n chunks = rotated_psfs.reshape(4, 512, 76, 76)\n for N, chunk in enumerate(chunks):\n idx0 = N * 512\n idx1 = idx0 + 512\n centers = trace_centers[idx0:idx1]\n print(\n 'Interpolating chunk {}/4 for order {} SOSS psfs for {} filter onto subarray...'\n .format(N + 1, n + 1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n data = zip(chunk, centers)\n subarray_psfs = pool.starmap(put_psf_on_subarray, data)\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time() - start)\n )\n filename = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt,\n n + 1, N + 1)\n file = resource_filename('awesimsoss', filename)\n if os.path.isfile(file):\n os.system('rm {}'.format(file))\n np.save(file, np.array(subarray_psfs))\n print('Data saved to', file)\n else:\n full_data = []\n for chunk in [1, 2, 3, 4]:\n path = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt, order, chunk\n )\n file = resource_filename('awesimsoss', path)\n full_data.append(np.load(file))\n return np.concatenate(full_data, axis=0)\n",
"step-2": "<mask token>\n\n\ndef calculate_psf_tilts():\n \"\"\"\n Calculate the tilt of the psf at the center of each column\n using all binned pixels in the given wavelength calibration file\n for both orders and save to file\n \"\"\"\n for order in [1, 2]:\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n subarray = 'SUBSTRIP256'\n X = range(2048)\n Y = range(256)\n wave_map = utils.wave_solutions(subarray, order).astype(float)\n coeffs = trace_polynomials(subarray=subarray, order=order)\n trace = np.polyval(coeffs, X)\n wave = interp2d(X, Y, wave_map)\n trace_wave = []\n for x, y in zip(X, trace):\n trace_wave.append(wave(x, y)[0])\n angles = []\n for n, x in enumerate(X):\n w = trace_wave[x]\n try:\n w0 = trace_wave[x - 1]\n except IndexError:\n w0 = 0\n try:\n w1 = trace_wave[x + 1]\n except IndexError:\n w1 = 10\n dw0 = np.mean([w0, w])\n dw1 = np.mean([w1, w])\n yy, xx = np.where(np.logical_and(wave_map >= dw0, wave_map < dw1))\n if len(xx) >= 1:\n angle = get_angle([xx[-1], yy[-1]], [x, trace[x]])\n else:\n angle = 0\n angle = angle % 180\n angles.append(angle)\n np.save(psf_file, np.array(angles))\n print('Angles saved to', psf_file)\n\n\ndef nuke_psfs(tilts=True, raw=True, final=True):\n \"\"\"Generate all the psf cubes from scratch\"\"\"\n if tilts:\n calculate_psf_tilts()\n for filt in ['CLEAR', 'F277W']:\n if raw:\n generate_SOSS_psfs(filt)\n if final:\n SOSS_psf_cube(filt=filt, generate=True)\n\n\ndef generate_SOSS_ldcs(wavelengths, ld_profile, grid_point, model_grid='',\n subarray='SUBSTRIP256', n_bins=100, plot=False, save=''):\n \"\"\"\n Generate a lookup table of limb darkening coefficients for full\n SOSS wavelength range\n\n Parameters\n ----------\n wavelengths: sequence\n The wavelengths at which to calculate the LDCs\n ld_profile: str\n A limb darkening profile name supported by\n `ExoCTK.ldc.ldcfit.ld_profile()`\n grid_point: dict, sequence\n The stellar parameters [Teff, logg, FeH] or stellar model\n dictionary from `ExoCTK.modelgrid.ModelGrid.get()`\n n_bins: int\n The number of bins to break up the grism into\n save: str\n The path to save to file to\n\n Example\n -------\n from awesimsoss.sim2D import awesim\n lookup = awesim.soss_ldc('quadratic', [3300, 4.5, 0])\n \"\"\"\n try:\n from exoctk import modelgrid\n from exoctk.limb_darkening import limb_darkening_fit as lf\n except ImportError:\n return\n if not isinstance(model_grid, modelgrid.ModelGrid):\n model_grid = modelgrid.ModelGrid(os.environ['MODELGRID_DIR'],\n resolution=700)\n model_grid = modelgrid.ModelGrid(os.environ['MODELGRID_DIR'],\n resolution=700, wave_rng=(0.6, 2.8))\n if isinstance(grid_point, (list, tuple, np.ndarray)):\n grid_point = model_grid.get(*grid_point)\n if not isinstance(grid_point, dict):\n print(\n 'Please provide the grid_point argument as [Teff, logg, FeH] or ExoCTK.modelgrid.ModelGrid.get(Teff, logg, FeH).'\n )\n return\n bandpass = svo.Filter('NIRISS.GR700XD', n_bins=n_bins, verbose=False)\n ldc_results = lf.ldc(None, None, None, model_grid, [ld_profile],\n bandpass=bandpass, grid_point=grid_point.copy(), mu_min=0.08,\n verbose=False)\n coeff_table = ldc_results[ld_profile]['coeffs']\n coeff_cols = [c for c in coeff_table.colnames if c.startswith('c')]\n coeffs = [np.interp(wavelengths, coeff_table['wavelength'], coeff_table\n [c]) for c in coeff_cols]\n return np.array(coeffs).T\n\n\ndef generate_SOSS_psfs(filt):\n \"\"\"\n Gnerate a cube of the psf at 100 wavelengths from the min to the max wavelength\n\n Parameters\n ----------\n filt: str\n The filter to use, ['CLEAR', 'F277W']\n \"\"\"\n file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.format(\n filt))\n ns = webbpsf.NIRISS()\n ns.filter = filt\n ns.pupil_mask = 'GR700XD'\n wavelengths = utils.wave_solutions('SUBSTRIP256').flatten()\n wave_min = np.max([ns.SHORT_WAVELENGTH_MIN * 1000000.0, np.min(\n wavelengths[wavelengths > 0])])\n wave_max = np.min([ns.LONG_WAVELENGTH_MAX * 1000000.0, np.max(\n wavelengths[wavelengths > 0])])\n W = np.linspace(wave_min, wave_max, 100) * 1e-06\n print('Generating SOSS psfs. This takes about 8 minutes...')\n start = time.time()\n PSF = ns.calc_datacube(W, oversample=1)[0].data\n print('Finished in', time.time() - start)\n psfhdu = fits.PrimaryHDU(data=PSF)\n wavhdu = fits.ImageHDU(data=W * 1000000.0, name='WAV')\n hdulist = fits.HDUList([psfhdu, wavhdu])\n hdulist.writeto(file, overwrite=True)\n hdulist.close()\n\n\n<mask token>\n\n\ndef get_SOSS_psf(wavelength, filt='CLEAR', psfs=None, cutoff=0.005, plot=False\n ):\n \"\"\"\n Retrieve the SOSS psf for the given wavelength,\n scale the total flux to 1, and set pixels below\n cutoff value to zero\n\n Parameters\n ----------\n wavelength: float\n The wavelength to retrieve [um]\n filt: str\n The filter to use, ['CLEAR', 'F277W']\n psfs: numpy.interp1d object (optional)\n The interpolator\n plot: bool\n Plot the psf\n\n Returns\n -------\n np.ndarray\n The 2D psf for the input wavelength\n \"\"\"\n if psfs is None:\n file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.\n format(filt))\n cube = fits.getdata(file).swapaxes(-1, -2)\n wave = fits.getdata(file, ext=1)\n psfs = interp1d(wave, cube, axis=0, kind=3)\n if wavelength < psfs.x[0]:\n wavelength = psfs.x[0]\n if wavelength > psfs.x[-1]:\n wavelength = psfs.x[-1]\n psf = psfs(wavelength)\n psf *= 1.0 / np.sum(psf)\n if plot:\n fig = figure()\n fig.image([psf], x=0, y=0, dw=psf.shape[0], dh=psf.shape[1])\n show(fig)\n else:\n return psf\n\n\ndef make_frame(psfs):\n \"\"\"\n Generate a frame from an array of psfs\n\n Parameters\n ----------\n psfs: sequence\n An array of psfs of shape (2048, 76, 76)\n\n Returns\n -------\n np.ndarray\n An array of the SOSS psf at 2048 wavelengths for each order\n \"\"\"\n frame = np.zeros((256, 2124))\n for n, psf in enumerate(psfs):\n frame[:, n:n + 76] += psf\n return frame[:, 38:-38]\n\n\ndef psf_lightcurve(psf, ld_coeffs, rp, time, tmodel, plot=False):\n \"\"\"\n Generate a lightcurve for a (76, 76) psf of a given wavelength\n\n Parameters\n ----------\n psf: sequencs\n The flux-scaled psf for the given wavelength\n ld_coeffs: sequence\n The limb darkening coefficients to use\n rp: float\n The planet radius\n time: sequence\n The time axis for the TSO\n tmodel: batman.transitmodel.TransitModel\n The transit model of the planet\n plot: bool\n Plot the lightcurve\n\n Returns\n -------\n sequence\n A 1D array of the lightcurve with the same length as *t*\n\n Example 1\n ---------\n # No planet\n import numpy as np\n from awesimsoss.make_trace import psf_lightcurve\n psf = np.ones((76, 76))\n time = np.linspace(-0.2, 0.2, 200)\n lc = psf_lightcurve(psf, None, None, time, None, plot=True)\n\n Example 2\n ---------\n # With a planet\n import batman\n import numpy as np\n import astropy.units as q\n from awesimsoss.make_trace import psf_lightcurve\n params = batman.TransitParams()\n params.t0 = 0. # time of inferior conjunction\n params.per = 5.7214742 # orbital period (days)\n params.a = 0.0558*q.AU.to(q.R_sun)*0.66 # semi-major axis (in units of stellar radii)\n params.inc = 89.8 # orbital inclination (in degrees)\n params.ecc = 0. # eccentricity\n params.w = 90. # longitude of periastron (in degrees)\n params.teff = 3500 # effective temperature of the host star\n params.logg = 5 # log surface gravity of the host star\n params.feh = 0 # metallicity of the host star\n params.limb_dark = 'quadratic' # limb darkening profile to use\n params.u = [1, 1] # limb darkening coefficients\n tmodel = batman.TransitModel(params, time)\n lc = psf_lightcurve(psf, [0.1, 0.1], 0.05, time, tmodel, plot=True)\n \"\"\"\n flux = np.tile(psf, (len(time), 1, 1))\n if ld_coeffs is not None and rp is not None and str(type(tmodel)\n ) == \"<class 'batman.transitmodel.TransitModel'>\":\n tmodel.u = ld_coeffs\n tmodel.rp = rp\n lightcurve = tmodel.light_curve(tmodel)\n flux *= lightcurve[:, None, None]\n return flux\n\n\ndef psf_tilts(order):\n \"\"\"\n Get the psf tilts for the given order\n\n Parameters\n ----------\n order: int\n The order to use, [1, 2]\n\n Returns\n -------\n np.ndarray\n The angle from the vertical of the psf in each of the 2048 columns\n \"\"\"\n if order not in [1, 2]:\n raise ValueError('Only orders 1 and 2 are supported.')\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n if not os.path.exists(psf_file):\n calculate_psf_tilts()\n return np.load(psf_file)\n\n\ndef put_psf_on_subarray(psf, y, frame_height=256):\n \"\"\"Make a 2D SOSS trace from a sequence of psfs and trace center locations\n\n Parameters\n ----------\n psf: sequence\n The 2D psf\n y: float\n The grid y value to place the center of the psf\n grid: sequence\n The [x, y] grid ranges\n\n Returns\n -------\n np.ndarray\n The 2D frame with the interpolated psf\n \"\"\"\n dim = psf.shape[0]\n mid = (dim - 1.0) / 2.0\n arr = np.arange(dim, dtype=np.float)\n spline = RectBivariateSpline(arr, arr, psf.T, kx=3, ky=3, s=0)\n yg, xg = np.indices((frame_height, dim), dtype=np.float64)\n yg += mid - y\n frame = spline.ev(xg, yg)\n extrapol = (xg < -0.5) | (xg >= dim - 0.5) | ((yg < -0.5) | (yg >= dim -\n 0.5))\n frame[extrapol] = 0\n return frame\n\n\ndef SOSS_psf_cube(filt='CLEAR', order=1, subarray='SUBSTRIP256', generate=False\n ):\n \"\"\"\n Generate/retrieve a data cube of shape (3, 2048, 76, 76) which is a\n 76x76 pixel psf for 2048 wavelengths for each trace order. The PSFs\n are scaled to unity and rotated to reproduce the trace tilt at each\n wavelength then placed on the desired subarray.\n\n Parameters\n ----------\n filt: str\n The filter to use, ['CLEAR', 'F277W']\n order: int\n The trace order\n subarray: str\n The subarray to use, ['SUBSTRIP96', 'SUBSTRIP256', 'FULL']\n generate: bool\n Generate a new cube\n\n Returns\n -------\n np.ndarray\n An array of the SOSS psf at 2048 wavelengths for each order\n \"\"\"\n if generate:\n print('Coffee time! This takes about 5 minutes.')\n wavelengths = np.mean(utils.wave_solutions(subarray), axis=1)[:2 if\n filt == 'CLEAR' else 1]\n coeffs = trace_polynomials(subarray)\n psf_path = 'files/SOSS_{}_PSF.fits'.format(filt)\n psf_file = resource_filename('awesimsoss', psf_path)\n cube = fits.getdata(psf_file).swapaxes(-1, -2)\n wave = fits.getdata(psf_file, ext=1)\n psfs = interp1d(wave, cube, axis=0, kind=3)\n trace_cols = np.arange(2048)\n for n, wavelength in enumerate(wavelengths):\n trace_centers = np.polyval(coeffs[n], trace_cols)\n if n == 1 and filt.lower() == 'f277w' or n == 2:\n pass\n else:\n print('Calculating order {} SOSS psfs for {} filter...'.\n format(n + 1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n func = partial(get_SOSS_psf, filt=filt, psfs=psfs)\n raw_psfs = np.array(pool.map(func, wavelength))\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time() - start))\n angles = psf_tilts(order)\n print('Rotating order {} SOSS psfs for {} filter...'.format\n (n + 1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n func = partial(rotate, reshape=False)\n rotated_psfs = np.array(pool.starmap(func, zip(raw_psfs,\n angles)))\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time() - start))\n rotated_psfs = np.abs(rotated_psfs)\n scale = np.nansum(rotated_psfs, axis=(1, 2))[:, None, None]\n rotated_psfs = rotated_psfs / scale\n chunks = rotated_psfs.reshape(4, 512, 76, 76)\n for N, chunk in enumerate(chunks):\n idx0 = N * 512\n idx1 = idx0 + 512\n centers = trace_centers[idx0:idx1]\n print(\n 'Interpolating chunk {}/4 for order {} SOSS psfs for {} filter onto subarray...'\n .format(N + 1, n + 1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n data = zip(chunk, centers)\n subarray_psfs = pool.starmap(put_psf_on_subarray, data)\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time() - start)\n )\n filename = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt,\n n + 1, N + 1)\n file = resource_filename('awesimsoss', filename)\n if os.path.isfile(file):\n os.system('rm {}'.format(file))\n np.save(file, np.array(subarray_psfs))\n print('Data saved to', file)\n else:\n full_data = []\n for chunk in [1, 2, 3, 4]:\n path = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt, order, chunk\n )\n file = resource_filename('awesimsoss', path)\n full_data.append(np.load(file))\n return np.concatenate(full_data, axis=0)\n",
"step-3": "<mask token>\n\n\ndef calculate_psf_tilts():\n \"\"\"\n Calculate the tilt of the psf at the center of each column\n using all binned pixels in the given wavelength calibration file\n for both orders and save to file\n \"\"\"\n for order in [1, 2]:\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n subarray = 'SUBSTRIP256'\n X = range(2048)\n Y = range(256)\n wave_map = utils.wave_solutions(subarray, order).astype(float)\n coeffs = trace_polynomials(subarray=subarray, order=order)\n trace = np.polyval(coeffs, X)\n wave = interp2d(X, Y, wave_map)\n trace_wave = []\n for x, y in zip(X, trace):\n trace_wave.append(wave(x, y)[0])\n angles = []\n for n, x in enumerate(X):\n w = trace_wave[x]\n try:\n w0 = trace_wave[x - 1]\n except IndexError:\n w0 = 0\n try:\n w1 = trace_wave[x + 1]\n except IndexError:\n w1 = 10\n dw0 = np.mean([w0, w])\n dw1 = np.mean([w1, w])\n yy, xx = np.where(np.logical_and(wave_map >= dw0, wave_map < dw1))\n if len(xx) >= 1:\n angle = get_angle([xx[-1], yy[-1]], [x, trace[x]])\n else:\n angle = 0\n angle = angle % 180\n angles.append(angle)\n np.save(psf_file, np.array(angles))\n print('Angles saved to', psf_file)\n\n\ndef nuke_psfs(tilts=True, raw=True, final=True):\n \"\"\"Generate all the psf cubes from scratch\"\"\"\n if tilts:\n calculate_psf_tilts()\n for filt in ['CLEAR', 'F277W']:\n if raw:\n generate_SOSS_psfs(filt)\n if final:\n SOSS_psf_cube(filt=filt, generate=True)\n\n\ndef generate_SOSS_ldcs(wavelengths, ld_profile, grid_point, model_grid='',\n subarray='SUBSTRIP256', n_bins=100, plot=False, save=''):\n \"\"\"\n Generate a lookup table of limb darkening coefficients for full\n SOSS wavelength range\n\n Parameters\n ----------\n wavelengths: sequence\n The wavelengths at which to calculate the LDCs\n ld_profile: str\n A limb darkening profile name supported by\n `ExoCTK.ldc.ldcfit.ld_profile()`\n grid_point: dict, sequence\n The stellar parameters [Teff, logg, FeH] or stellar model\n dictionary from `ExoCTK.modelgrid.ModelGrid.get()`\n n_bins: int\n The number of bins to break up the grism into\n save: str\n The path to save to file to\n\n Example\n -------\n from awesimsoss.sim2D import awesim\n lookup = awesim.soss_ldc('quadratic', [3300, 4.5, 0])\n \"\"\"\n try:\n from exoctk import modelgrid\n from exoctk.limb_darkening import limb_darkening_fit as lf\n except ImportError:\n return\n if not isinstance(model_grid, modelgrid.ModelGrid):\n model_grid = modelgrid.ModelGrid(os.environ['MODELGRID_DIR'],\n resolution=700)\n model_grid = modelgrid.ModelGrid(os.environ['MODELGRID_DIR'],\n resolution=700, wave_rng=(0.6, 2.8))\n if isinstance(grid_point, (list, tuple, np.ndarray)):\n grid_point = model_grid.get(*grid_point)\n if not isinstance(grid_point, dict):\n print(\n 'Please provide the grid_point argument as [Teff, logg, FeH] or ExoCTK.modelgrid.ModelGrid.get(Teff, logg, FeH).'\n )\n return\n bandpass = svo.Filter('NIRISS.GR700XD', n_bins=n_bins, verbose=False)\n ldc_results = lf.ldc(None, None, None, model_grid, [ld_profile],\n bandpass=bandpass, grid_point=grid_point.copy(), mu_min=0.08,\n verbose=False)\n coeff_table = ldc_results[ld_profile]['coeffs']\n coeff_cols = [c for c in coeff_table.colnames if c.startswith('c')]\n coeffs = [np.interp(wavelengths, coeff_table['wavelength'], coeff_table\n [c]) for c in coeff_cols]\n return np.array(coeffs).T\n\n\ndef generate_SOSS_psfs(filt):\n \"\"\"\n Gnerate a cube of the psf at 100 wavelengths from the min to the max wavelength\n\n Parameters\n ----------\n filt: str\n The filter to use, ['CLEAR', 'F277W']\n \"\"\"\n file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.format(\n filt))\n ns = webbpsf.NIRISS()\n ns.filter = filt\n ns.pupil_mask = 'GR700XD'\n wavelengths = utils.wave_solutions('SUBSTRIP256').flatten()\n wave_min = np.max([ns.SHORT_WAVELENGTH_MIN * 1000000.0, np.min(\n wavelengths[wavelengths > 0])])\n wave_max = np.min([ns.LONG_WAVELENGTH_MAX * 1000000.0, np.max(\n wavelengths[wavelengths > 0])])\n W = np.linspace(wave_min, wave_max, 100) * 1e-06\n print('Generating SOSS psfs. This takes about 8 minutes...')\n start = time.time()\n PSF = ns.calc_datacube(W, oversample=1)[0].data\n print('Finished in', time.time() - start)\n psfhdu = fits.PrimaryHDU(data=PSF)\n wavhdu = fits.ImageHDU(data=W * 1000000.0, name='WAV')\n hdulist = fits.HDUList([psfhdu, wavhdu])\n hdulist.writeto(file, overwrite=True)\n hdulist.close()\n\n\ndef get_angle(pf, p0=np.array([0, 0]), pi=None):\n \"\"\"Compute angle (in degrees) for pf-p0-pi corner\n\n Parameters\n ----------\n pf: sequence\n The coordinates of a point on the rotated vector\n p0: sequence\n The coordinates of the pivot\n pi: sequence\n The coordinates of the fixed vector\n\n Returns\n -------\n float\n The angle in degrees\n \"\"\"\n if pi is None:\n pi = p0 + np.array([0, 1])\n v0 = np.array(pf) - np.array(p0)\n v1 = np.array(pi) - np.array(p0)\n angle = np.math.atan2(np.linalg.det([v0, v1]), np.dot(v0, v1))\n angle = np.degrees(angle)\n return angle\n\n\ndef get_SOSS_psf(wavelength, filt='CLEAR', psfs=None, cutoff=0.005, plot=False\n ):\n \"\"\"\n Retrieve the SOSS psf for the given wavelength,\n scale the total flux to 1, and set pixels below\n cutoff value to zero\n\n Parameters\n ----------\n wavelength: float\n The wavelength to retrieve [um]\n filt: str\n The filter to use, ['CLEAR', 'F277W']\n psfs: numpy.interp1d object (optional)\n The interpolator\n plot: bool\n Plot the psf\n\n Returns\n -------\n np.ndarray\n The 2D psf for the input wavelength\n \"\"\"\n if psfs is None:\n file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.\n format(filt))\n cube = fits.getdata(file).swapaxes(-1, -2)\n wave = fits.getdata(file, ext=1)\n psfs = interp1d(wave, cube, axis=0, kind=3)\n if wavelength < psfs.x[0]:\n wavelength = psfs.x[0]\n if wavelength > psfs.x[-1]:\n wavelength = psfs.x[-1]\n psf = psfs(wavelength)\n psf *= 1.0 / np.sum(psf)\n if plot:\n fig = figure()\n fig.image([psf], x=0, y=0, dw=psf.shape[0], dh=psf.shape[1])\n show(fig)\n else:\n return psf\n\n\ndef make_frame(psfs):\n \"\"\"\n Generate a frame from an array of psfs\n\n Parameters\n ----------\n psfs: sequence\n An array of psfs of shape (2048, 76, 76)\n\n Returns\n -------\n np.ndarray\n An array of the SOSS psf at 2048 wavelengths for each order\n \"\"\"\n frame = np.zeros((256, 2124))\n for n, psf in enumerate(psfs):\n frame[:, n:n + 76] += psf\n return frame[:, 38:-38]\n\n\ndef psf_lightcurve(psf, ld_coeffs, rp, time, tmodel, plot=False):\n \"\"\"\n Generate a lightcurve for a (76, 76) psf of a given wavelength\n\n Parameters\n ----------\n psf: sequencs\n The flux-scaled psf for the given wavelength\n ld_coeffs: sequence\n The limb darkening coefficients to use\n rp: float\n The planet radius\n time: sequence\n The time axis for the TSO\n tmodel: batman.transitmodel.TransitModel\n The transit model of the planet\n plot: bool\n Plot the lightcurve\n\n Returns\n -------\n sequence\n A 1D array of the lightcurve with the same length as *t*\n\n Example 1\n ---------\n # No planet\n import numpy as np\n from awesimsoss.make_trace import psf_lightcurve\n psf = np.ones((76, 76))\n time = np.linspace(-0.2, 0.2, 200)\n lc = psf_lightcurve(psf, None, None, time, None, plot=True)\n\n Example 2\n ---------\n # With a planet\n import batman\n import numpy as np\n import astropy.units as q\n from awesimsoss.make_trace import psf_lightcurve\n params = batman.TransitParams()\n params.t0 = 0. # time of inferior conjunction\n params.per = 5.7214742 # orbital period (days)\n params.a = 0.0558*q.AU.to(q.R_sun)*0.66 # semi-major axis (in units of stellar radii)\n params.inc = 89.8 # orbital inclination (in degrees)\n params.ecc = 0. # eccentricity\n params.w = 90. # longitude of periastron (in degrees)\n params.teff = 3500 # effective temperature of the host star\n params.logg = 5 # log surface gravity of the host star\n params.feh = 0 # metallicity of the host star\n params.limb_dark = 'quadratic' # limb darkening profile to use\n params.u = [1, 1] # limb darkening coefficients\n tmodel = batman.TransitModel(params, time)\n lc = psf_lightcurve(psf, [0.1, 0.1], 0.05, time, tmodel, plot=True)\n \"\"\"\n flux = np.tile(psf, (len(time), 1, 1))\n if ld_coeffs is not None and rp is not None and str(type(tmodel)\n ) == \"<class 'batman.transitmodel.TransitModel'>\":\n tmodel.u = ld_coeffs\n tmodel.rp = rp\n lightcurve = tmodel.light_curve(tmodel)\n flux *= lightcurve[:, None, None]\n return flux\n\n\ndef psf_tilts(order):\n \"\"\"\n Get the psf tilts for the given order\n\n Parameters\n ----------\n order: int\n The order to use, [1, 2]\n\n Returns\n -------\n np.ndarray\n The angle from the vertical of the psf in each of the 2048 columns\n \"\"\"\n if order not in [1, 2]:\n raise ValueError('Only orders 1 and 2 are supported.')\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n if not os.path.exists(psf_file):\n calculate_psf_tilts()\n return np.load(psf_file)\n\n\ndef put_psf_on_subarray(psf, y, frame_height=256):\n \"\"\"Make a 2D SOSS trace from a sequence of psfs and trace center locations\n\n Parameters\n ----------\n psf: sequence\n The 2D psf\n y: float\n The grid y value to place the center of the psf\n grid: sequence\n The [x, y] grid ranges\n\n Returns\n -------\n np.ndarray\n The 2D frame with the interpolated psf\n \"\"\"\n dim = psf.shape[0]\n mid = (dim - 1.0) / 2.0\n arr = np.arange(dim, dtype=np.float)\n spline = RectBivariateSpline(arr, arr, psf.T, kx=3, ky=3, s=0)\n yg, xg = np.indices((frame_height, dim), dtype=np.float64)\n yg += mid - y\n frame = spline.ev(xg, yg)\n extrapol = (xg < -0.5) | (xg >= dim - 0.5) | ((yg < -0.5) | (yg >= dim -\n 0.5))\n frame[extrapol] = 0\n return frame\n\n\ndef SOSS_psf_cube(filt='CLEAR', order=1, subarray='SUBSTRIP256', generate=False\n ):\n \"\"\"\n Generate/retrieve a data cube of shape (3, 2048, 76, 76) which is a\n 76x76 pixel psf for 2048 wavelengths for each trace order. The PSFs\n are scaled to unity and rotated to reproduce the trace tilt at each\n wavelength then placed on the desired subarray.\n\n Parameters\n ----------\n filt: str\n The filter to use, ['CLEAR', 'F277W']\n order: int\n The trace order\n subarray: str\n The subarray to use, ['SUBSTRIP96', 'SUBSTRIP256', 'FULL']\n generate: bool\n Generate a new cube\n\n Returns\n -------\n np.ndarray\n An array of the SOSS psf at 2048 wavelengths for each order\n \"\"\"\n if generate:\n print('Coffee time! This takes about 5 minutes.')\n wavelengths = np.mean(utils.wave_solutions(subarray), axis=1)[:2 if\n filt == 'CLEAR' else 1]\n coeffs = trace_polynomials(subarray)\n psf_path = 'files/SOSS_{}_PSF.fits'.format(filt)\n psf_file = resource_filename('awesimsoss', psf_path)\n cube = fits.getdata(psf_file).swapaxes(-1, -2)\n wave = fits.getdata(psf_file, ext=1)\n psfs = interp1d(wave, cube, axis=0, kind=3)\n trace_cols = np.arange(2048)\n for n, wavelength in enumerate(wavelengths):\n trace_centers = np.polyval(coeffs[n], trace_cols)\n if n == 1 and filt.lower() == 'f277w' or n == 2:\n pass\n else:\n print('Calculating order {} SOSS psfs for {} filter...'.\n format(n + 1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n func = partial(get_SOSS_psf, filt=filt, psfs=psfs)\n raw_psfs = np.array(pool.map(func, wavelength))\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time() - start))\n angles = psf_tilts(order)\n print('Rotating order {} SOSS psfs for {} filter...'.format\n (n + 1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n func = partial(rotate, reshape=False)\n rotated_psfs = np.array(pool.starmap(func, zip(raw_psfs,\n angles)))\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time() - start))\n rotated_psfs = np.abs(rotated_psfs)\n scale = np.nansum(rotated_psfs, axis=(1, 2))[:, None, None]\n rotated_psfs = rotated_psfs / scale\n chunks = rotated_psfs.reshape(4, 512, 76, 76)\n for N, chunk in enumerate(chunks):\n idx0 = N * 512\n idx1 = idx0 + 512\n centers = trace_centers[idx0:idx1]\n print(\n 'Interpolating chunk {}/4 for order {} SOSS psfs for {} filter onto subarray...'\n .format(N + 1, n + 1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n data = zip(chunk, centers)\n subarray_psfs = pool.starmap(put_psf_on_subarray, data)\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time() - start)\n )\n filename = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt,\n n + 1, N + 1)\n file = resource_filename('awesimsoss', filename)\n if os.path.isfile(file):\n os.system('rm {}'.format(file))\n np.save(file, np.array(subarray_psfs))\n print('Data saved to', file)\n else:\n full_data = []\n for chunk in [1, 2, 3, 4]:\n path = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt, order, chunk\n )\n file = resource_filename('awesimsoss', path)\n full_data.append(np.load(file))\n return np.concatenate(full_data, axis=0)\n",
"step-4": "<mask token>\nimport os\nfrom pkg_resources import resource_filename\nimport multiprocessing\nimport time\nfrom functools import partial\nimport warnings\nimport numpy as np\nfrom astropy.io import fits\nfrom bokeh.plotting import figure, show\nfrom hotsoss import utils\nfrom svo_filters import svo\nfrom scipy.interpolate import interp1d\nfrom scipy.ndimage.interpolation import rotate\nfrom scipy.interpolate import interp2d, RectBivariateSpline\ntry:\n import webbpsf\nexcept ImportError:\n print('Could not import `webbpsf` package. Functionality limited.')\nwarnings.simplefilter('ignore')\n\n\ndef calculate_psf_tilts():\n \"\"\"\n Calculate the tilt of the psf at the center of each column\n using all binned pixels in the given wavelength calibration file\n for both orders and save to file\n \"\"\"\n for order in [1, 2]:\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n subarray = 'SUBSTRIP256'\n X = range(2048)\n Y = range(256)\n wave_map = utils.wave_solutions(subarray, order).astype(float)\n coeffs = trace_polynomials(subarray=subarray, order=order)\n trace = np.polyval(coeffs, X)\n wave = interp2d(X, Y, wave_map)\n trace_wave = []\n for x, y in zip(X, trace):\n trace_wave.append(wave(x, y)[0])\n angles = []\n for n, x in enumerate(X):\n w = trace_wave[x]\n try:\n w0 = trace_wave[x - 1]\n except IndexError:\n w0 = 0\n try:\n w1 = trace_wave[x + 1]\n except IndexError:\n w1 = 10\n dw0 = np.mean([w0, w])\n dw1 = np.mean([w1, w])\n yy, xx = np.where(np.logical_and(wave_map >= dw0, wave_map < dw1))\n if len(xx) >= 1:\n angle = get_angle([xx[-1], yy[-1]], [x, trace[x]])\n else:\n angle = 0\n angle = angle % 180\n angles.append(angle)\n np.save(psf_file, np.array(angles))\n print('Angles saved to', psf_file)\n\n\ndef nuke_psfs(tilts=True, raw=True, final=True):\n \"\"\"Generate all the psf cubes from scratch\"\"\"\n if tilts:\n calculate_psf_tilts()\n for filt in ['CLEAR', 'F277W']:\n if raw:\n generate_SOSS_psfs(filt)\n if final:\n SOSS_psf_cube(filt=filt, generate=True)\n\n\ndef generate_SOSS_ldcs(wavelengths, ld_profile, grid_point, model_grid='',\n subarray='SUBSTRIP256', n_bins=100, plot=False, save=''):\n \"\"\"\n Generate a lookup table of limb darkening coefficients for full\n SOSS wavelength range\n\n Parameters\n ----------\n wavelengths: sequence\n The wavelengths at which to calculate the LDCs\n ld_profile: str\n A limb darkening profile name supported by\n `ExoCTK.ldc.ldcfit.ld_profile()`\n grid_point: dict, sequence\n The stellar parameters [Teff, logg, FeH] or stellar model\n dictionary from `ExoCTK.modelgrid.ModelGrid.get()`\n n_bins: int\n The number of bins to break up the grism into\n save: str\n The path to save to file to\n\n Example\n -------\n from awesimsoss.sim2D import awesim\n lookup = awesim.soss_ldc('quadratic', [3300, 4.5, 0])\n \"\"\"\n try:\n from exoctk import modelgrid\n from exoctk.limb_darkening import limb_darkening_fit as lf\n except ImportError:\n return\n if not isinstance(model_grid, modelgrid.ModelGrid):\n model_grid = modelgrid.ModelGrid(os.environ['MODELGRID_DIR'],\n resolution=700)\n model_grid = modelgrid.ModelGrid(os.environ['MODELGRID_DIR'],\n resolution=700, wave_rng=(0.6, 2.8))\n if isinstance(grid_point, (list, tuple, np.ndarray)):\n grid_point = model_grid.get(*grid_point)\n if not isinstance(grid_point, dict):\n print(\n 'Please provide the grid_point argument as [Teff, logg, FeH] or ExoCTK.modelgrid.ModelGrid.get(Teff, logg, FeH).'\n )\n return\n bandpass = svo.Filter('NIRISS.GR700XD', n_bins=n_bins, verbose=False)\n ldc_results = lf.ldc(None, None, None, model_grid, [ld_profile],\n bandpass=bandpass, grid_point=grid_point.copy(), mu_min=0.08,\n verbose=False)\n coeff_table = ldc_results[ld_profile]['coeffs']\n coeff_cols = [c for c in coeff_table.colnames if c.startswith('c')]\n coeffs = [np.interp(wavelengths, coeff_table['wavelength'], coeff_table\n [c]) for c in coeff_cols]\n return np.array(coeffs).T\n\n\ndef generate_SOSS_psfs(filt):\n \"\"\"\n Gnerate a cube of the psf at 100 wavelengths from the min to the max wavelength\n\n Parameters\n ----------\n filt: str\n The filter to use, ['CLEAR', 'F277W']\n \"\"\"\n file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.format(\n filt))\n ns = webbpsf.NIRISS()\n ns.filter = filt\n ns.pupil_mask = 'GR700XD'\n wavelengths = utils.wave_solutions('SUBSTRIP256').flatten()\n wave_min = np.max([ns.SHORT_WAVELENGTH_MIN * 1000000.0, np.min(\n wavelengths[wavelengths > 0])])\n wave_max = np.min([ns.LONG_WAVELENGTH_MAX * 1000000.0, np.max(\n wavelengths[wavelengths > 0])])\n W = np.linspace(wave_min, wave_max, 100) * 1e-06\n print('Generating SOSS psfs. This takes about 8 minutes...')\n start = time.time()\n PSF = ns.calc_datacube(W, oversample=1)[0].data\n print('Finished in', time.time() - start)\n psfhdu = fits.PrimaryHDU(data=PSF)\n wavhdu = fits.ImageHDU(data=W * 1000000.0, name='WAV')\n hdulist = fits.HDUList([psfhdu, wavhdu])\n hdulist.writeto(file, overwrite=True)\n hdulist.close()\n\n\ndef get_angle(pf, p0=np.array([0, 0]), pi=None):\n \"\"\"Compute angle (in degrees) for pf-p0-pi corner\n\n Parameters\n ----------\n pf: sequence\n The coordinates of a point on the rotated vector\n p0: sequence\n The coordinates of the pivot\n pi: sequence\n The coordinates of the fixed vector\n\n Returns\n -------\n float\n The angle in degrees\n \"\"\"\n if pi is None:\n pi = p0 + np.array([0, 1])\n v0 = np.array(pf) - np.array(p0)\n v1 = np.array(pi) - np.array(p0)\n angle = np.math.atan2(np.linalg.det([v0, v1]), np.dot(v0, v1))\n angle = np.degrees(angle)\n return angle\n\n\ndef get_SOSS_psf(wavelength, filt='CLEAR', psfs=None, cutoff=0.005, plot=False\n ):\n \"\"\"\n Retrieve the SOSS psf for the given wavelength,\n scale the total flux to 1, and set pixels below\n cutoff value to zero\n\n Parameters\n ----------\n wavelength: float\n The wavelength to retrieve [um]\n filt: str\n The filter to use, ['CLEAR', 'F277W']\n psfs: numpy.interp1d object (optional)\n The interpolator\n plot: bool\n Plot the psf\n\n Returns\n -------\n np.ndarray\n The 2D psf for the input wavelength\n \"\"\"\n if psfs is None:\n file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.\n format(filt))\n cube = fits.getdata(file).swapaxes(-1, -2)\n wave = fits.getdata(file, ext=1)\n psfs = interp1d(wave, cube, axis=0, kind=3)\n if wavelength < psfs.x[0]:\n wavelength = psfs.x[0]\n if wavelength > psfs.x[-1]:\n wavelength = psfs.x[-1]\n psf = psfs(wavelength)\n psf *= 1.0 / np.sum(psf)\n if plot:\n fig = figure()\n fig.image([psf], x=0, y=0, dw=psf.shape[0], dh=psf.shape[1])\n show(fig)\n else:\n return psf\n\n\ndef make_frame(psfs):\n \"\"\"\n Generate a frame from an array of psfs\n\n Parameters\n ----------\n psfs: sequence\n An array of psfs of shape (2048, 76, 76)\n\n Returns\n -------\n np.ndarray\n An array of the SOSS psf at 2048 wavelengths for each order\n \"\"\"\n frame = np.zeros((256, 2124))\n for n, psf in enumerate(psfs):\n frame[:, n:n + 76] += psf\n return frame[:, 38:-38]\n\n\ndef psf_lightcurve(psf, ld_coeffs, rp, time, tmodel, plot=False):\n \"\"\"\n Generate a lightcurve for a (76, 76) psf of a given wavelength\n\n Parameters\n ----------\n psf: sequencs\n The flux-scaled psf for the given wavelength\n ld_coeffs: sequence\n The limb darkening coefficients to use\n rp: float\n The planet radius\n time: sequence\n The time axis for the TSO\n tmodel: batman.transitmodel.TransitModel\n The transit model of the planet\n plot: bool\n Plot the lightcurve\n\n Returns\n -------\n sequence\n A 1D array of the lightcurve with the same length as *t*\n\n Example 1\n ---------\n # No planet\n import numpy as np\n from awesimsoss.make_trace import psf_lightcurve\n psf = np.ones((76, 76))\n time = np.linspace(-0.2, 0.2, 200)\n lc = psf_lightcurve(psf, None, None, time, None, plot=True)\n\n Example 2\n ---------\n # With a planet\n import batman\n import numpy as np\n import astropy.units as q\n from awesimsoss.make_trace import psf_lightcurve\n params = batman.TransitParams()\n params.t0 = 0. # time of inferior conjunction\n params.per = 5.7214742 # orbital period (days)\n params.a = 0.0558*q.AU.to(q.R_sun)*0.66 # semi-major axis (in units of stellar radii)\n params.inc = 89.8 # orbital inclination (in degrees)\n params.ecc = 0. # eccentricity\n params.w = 90. # longitude of periastron (in degrees)\n params.teff = 3500 # effective temperature of the host star\n params.logg = 5 # log surface gravity of the host star\n params.feh = 0 # metallicity of the host star\n params.limb_dark = 'quadratic' # limb darkening profile to use\n params.u = [1, 1] # limb darkening coefficients\n tmodel = batman.TransitModel(params, time)\n lc = psf_lightcurve(psf, [0.1, 0.1], 0.05, time, tmodel, plot=True)\n \"\"\"\n flux = np.tile(psf, (len(time), 1, 1))\n if ld_coeffs is not None and rp is not None and str(type(tmodel)\n ) == \"<class 'batman.transitmodel.TransitModel'>\":\n tmodel.u = ld_coeffs\n tmodel.rp = rp\n lightcurve = tmodel.light_curve(tmodel)\n flux *= lightcurve[:, None, None]\n return flux\n\n\ndef psf_tilts(order):\n \"\"\"\n Get the psf tilts for the given order\n\n Parameters\n ----------\n order: int\n The order to use, [1, 2]\n\n Returns\n -------\n np.ndarray\n The angle from the vertical of the psf in each of the 2048 columns\n \"\"\"\n if order not in [1, 2]:\n raise ValueError('Only orders 1 and 2 are supported.')\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n if not os.path.exists(psf_file):\n calculate_psf_tilts()\n return np.load(psf_file)\n\n\ndef put_psf_on_subarray(psf, y, frame_height=256):\n \"\"\"Make a 2D SOSS trace from a sequence of psfs and trace center locations\n\n Parameters\n ----------\n psf: sequence\n The 2D psf\n y: float\n The grid y value to place the center of the psf\n grid: sequence\n The [x, y] grid ranges\n\n Returns\n -------\n np.ndarray\n The 2D frame with the interpolated psf\n \"\"\"\n dim = psf.shape[0]\n mid = (dim - 1.0) / 2.0\n arr = np.arange(dim, dtype=np.float)\n spline = RectBivariateSpline(arr, arr, psf.T, kx=3, ky=3, s=0)\n yg, xg = np.indices((frame_height, dim), dtype=np.float64)\n yg += mid - y\n frame = spline.ev(xg, yg)\n extrapol = (xg < -0.5) | (xg >= dim - 0.5) | ((yg < -0.5) | (yg >= dim -\n 0.5))\n frame[extrapol] = 0\n return frame\n\n\ndef SOSS_psf_cube(filt='CLEAR', order=1, subarray='SUBSTRIP256', generate=False\n ):\n \"\"\"\n Generate/retrieve a data cube of shape (3, 2048, 76, 76) which is a\n 76x76 pixel psf for 2048 wavelengths for each trace order. The PSFs\n are scaled to unity and rotated to reproduce the trace tilt at each\n wavelength then placed on the desired subarray.\n\n Parameters\n ----------\n filt: str\n The filter to use, ['CLEAR', 'F277W']\n order: int\n The trace order\n subarray: str\n The subarray to use, ['SUBSTRIP96', 'SUBSTRIP256', 'FULL']\n generate: bool\n Generate a new cube\n\n Returns\n -------\n np.ndarray\n An array of the SOSS psf at 2048 wavelengths for each order\n \"\"\"\n if generate:\n print('Coffee time! This takes about 5 minutes.')\n wavelengths = np.mean(utils.wave_solutions(subarray), axis=1)[:2 if\n filt == 'CLEAR' else 1]\n coeffs = trace_polynomials(subarray)\n psf_path = 'files/SOSS_{}_PSF.fits'.format(filt)\n psf_file = resource_filename('awesimsoss', psf_path)\n cube = fits.getdata(psf_file).swapaxes(-1, -2)\n wave = fits.getdata(psf_file, ext=1)\n psfs = interp1d(wave, cube, axis=0, kind=3)\n trace_cols = np.arange(2048)\n for n, wavelength in enumerate(wavelengths):\n trace_centers = np.polyval(coeffs[n], trace_cols)\n if n == 1 and filt.lower() == 'f277w' or n == 2:\n pass\n else:\n print('Calculating order {} SOSS psfs for {} filter...'.\n format(n + 1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n func = partial(get_SOSS_psf, filt=filt, psfs=psfs)\n raw_psfs = np.array(pool.map(func, wavelength))\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time() - start))\n angles = psf_tilts(order)\n print('Rotating order {} SOSS psfs for {} filter...'.format\n (n + 1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n func = partial(rotate, reshape=False)\n rotated_psfs = np.array(pool.starmap(func, zip(raw_psfs,\n angles)))\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time() - start))\n rotated_psfs = np.abs(rotated_psfs)\n scale = np.nansum(rotated_psfs, axis=(1, 2))[:, None, None]\n rotated_psfs = rotated_psfs / scale\n chunks = rotated_psfs.reshape(4, 512, 76, 76)\n for N, chunk in enumerate(chunks):\n idx0 = N * 512\n idx1 = idx0 + 512\n centers = trace_centers[idx0:idx1]\n print(\n 'Interpolating chunk {}/4 for order {} SOSS psfs for {} filter onto subarray...'\n .format(N + 1, n + 1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n data = zip(chunk, centers)\n subarray_psfs = pool.starmap(put_psf_on_subarray, data)\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time() - start)\n )\n filename = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt,\n n + 1, N + 1)\n file = resource_filename('awesimsoss', filename)\n if os.path.isfile(file):\n os.system('rm {}'.format(file))\n np.save(file, np.array(subarray_psfs))\n print('Data saved to', file)\n else:\n full_data = []\n for chunk in [1, 2, 3, 4]:\n path = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt, order, chunk\n )\n file = resource_filename('awesimsoss', path)\n full_data.append(np.load(file))\n return np.concatenate(full_data, axis=0)\n",
"step-5": "\"\"\"\nA module to generate simulated 2D time-series SOSS data\n\nAuthors: Joe Filippazzo\n\"\"\"\n\nimport os\nfrom pkg_resources import resource_filename\nimport multiprocessing\nimport time\nfrom functools import partial\nimport warnings\n\nimport numpy as np\nfrom astropy.io import fits\nfrom bokeh.plotting import figure, show\nfrom hotsoss import utils\nfrom svo_filters import svo\nfrom scipy.interpolate import interp1d\nfrom scipy.ndimage.interpolation import rotate\nfrom scipy.interpolate import interp2d, RectBivariateSpline\n\ntry:\n import webbpsf\nexcept ImportError:\n print(\"Could not import `webbpsf` package. Functionality limited.\")\n\nwarnings.simplefilter('ignore')\n\n\ndef calculate_psf_tilts():\n \"\"\"\n Calculate the tilt of the psf at the center of each column\n using all binned pixels in the given wavelength calibration file\n for both orders and save to file\n \"\"\"\n for order in [1, 2]:\n\n # Get the file\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n\n # Dimensions\n subarray = 'SUBSTRIP256'\n X = range(2048)\n Y = range(256)\n\n # Get the wave map\n wave_map = utils.wave_solutions(subarray, order).astype(float)\n\n # Get the y-coordinate of the trace polynomial in this column\n # (center of the trace)\n coeffs = trace_polynomials(subarray=subarray, order=order)\n trace = np.polyval(coeffs, X)\n\n # Interpolate to get the wavelength value at the center\n wave = interp2d(X, Y, wave_map)\n\n # Get the wavelength of the trace center in each column\n trace_wave = []\n for x, y in zip(X, trace):\n trace_wave.append(wave(x, y)[0])\n\n # For each column wavelength (defined by the wavelength at\n # the trace center) define an isowavelength contour\n angles = []\n for n, x in enumerate(X):\n\n w = trace_wave[x]\n\n # Edge cases\n try:\n w0 = trace_wave[x-1]\n except IndexError:\n w0 = 0\n\n try:\n w1 = trace_wave[x+1]\n except IndexError:\n w1 = 10\n\n # Define the width of the wavelength bin as half-way\n # between neighboring points\n dw0 = np.mean([w0, w])\n dw1 = np.mean([w1, w])\n\n # Get the coordinates of all the pixels in that range\n yy, xx = np.where(np.logical_and(wave_map >= dw0, wave_map < dw1))\n\n # Find the angle between the vertical and the tilted wavelength bin\n if len(xx) >= 1:\n angle = get_angle([xx[-1], yy[-1]], [x, trace[x]])\n else:\n angle = 0\n\n # Don't flip them upside down\n angle = angle % 180\n\n # Add to the array\n angles.append(angle)\n\n # Save the file\n np.save(psf_file, np.array(angles))\n print('Angles saved to', psf_file)\n\n\ndef nuke_psfs(tilts=True, raw=True, final=True):\n \"\"\"Generate all the psf cubes from scratch\"\"\"\n # Calculate the psf tilts\n if tilts:\n calculate_psf_tilts()\n\n for filt in ['CLEAR', 'F277W']:\n\n # Calculate the raw psfs from WebbPSF\n if raw:\n generate_SOSS_psfs(filt)\n\n # Generate the rotated and interpolated psfs ready for trace assembly\n if final:\n SOSS_psf_cube(filt=filt, generate=True)\n\n\ndef generate_SOSS_ldcs(wavelengths, ld_profile, grid_point, model_grid='', subarray='SUBSTRIP256', n_bins=100, plot=False, save=''):\n \"\"\"\n Generate a lookup table of limb darkening coefficients for full\n SOSS wavelength range\n\n Parameters\n ----------\n wavelengths: sequence\n The wavelengths at which to calculate the LDCs\n ld_profile: str\n A limb darkening profile name supported by\n `ExoCTK.ldc.ldcfit.ld_profile()`\n grid_point: dict, sequence\n The stellar parameters [Teff, logg, FeH] or stellar model\n dictionary from `ExoCTK.modelgrid.ModelGrid.get()`\n n_bins: int\n The number of bins to break up the grism into\n save: str\n The path to save to file to\n\n Example\n -------\n from awesimsoss.sim2D import awesim\n lookup = awesim.soss_ldc('quadratic', [3300, 4.5, 0])\n \"\"\"\n try:\n from exoctk import modelgrid\n from exoctk.limb_darkening import limb_darkening_fit as lf\n except ImportError:\n return\n\n # Get the model grid\n if not isinstance(model_grid, modelgrid.ModelGrid):\n model_grid = modelgrid.ModelGrid(os.environ['MODELGRID_DIR'], resolution=700)\n\n # Load the model grid\n model_grid = modelgrid.ModelGrid(os.environ['MODELGRID_DIR'], resolution=700, wave_rng=(0.6, 2.8))\n\n # Get the grid point\n if isinstance(grid_point, (list, tuple, np.ndarray)):\n grid_point = model_grid.get(*grid_point)\n\n # Abort if no stellar dict\n if not isinstance(grid_point, dict):\n print('Please provide the grid_point argument as [Teff, logg, FeH] or ExoCTK.modelgrid.ModelGrid.get(Teff, logg, FeH).')\n return\n\n # Break the bandpass up into n_bins pieces\n bandpass = svo.Filter('NIRISS.GR700XD', n_bins=n_bins, verbose=False)\n\n # Calculate the LDCs\n ldc_results = lf.ldc(None, None, None, model_grid, [ld_profile],\n bandpass=bandpass, grid_point=grid_point.copy(),\n mu_min=0.08, verbose=False)\n\n # Interpolate the LDCs to the desired wavelengths\n coeff_table = ldc_results[ld_profile]['coeffs']\n coeff_cols = [c for c in coeff_table.colnames if c.startswith('c')]\n coeffs = [np.interp(wavelengths, coeff_table['wavelength'], coeff_table[c]) for c in coeff_cols]\n\n return np.array(coeffs).T\n\n\ndef generate_SOSS_psfs(filt):\n \"\"\"\n Gnerate a cube of the psf at 100 wavelengths from the min to the max wavelength\n\n Parameters\n ----------\n filt: str\n The filter to use, ['CLEAR', 'F277W']\n \"\"\"\n # Get the file\n file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.format(filt))\n\n # Get the NIRISS class from webbpsf and set the filter\n ns = webbpsf.NIRISS()\n ns.filter = filt\n ns.pupil_mask = 'GR700XD'\n\n # Get the min and max wavelengths\n wavelengths = utils.wave_solutions('SUBSTRIP256').flatten()\n wave_min = np.max([ns.SHORT_WAVELENGTH_MIN * 1E6, np.min(wavelengths[wavelengths > 0])])\n wave_max = np.min([ns.LONG_WAVELENGTH_MAX * 1E6, np.max(wavelengths[wavelengths > 0])])\n\n # webbpsf.calc_datacube can only handle 100 but that's sufficient\n W = np.linspace(wave_min, wave_max, 100)*1E-6\n\n # Calculate the psfs\n print(\"Generating SOSS psfs. This takes about 8 minutes...\")\n start = time.time()\n PSF = ns.calc_datacube(W, oversample=1)[0].data\n print(\"Finished in\", time.time()-start)\n\n # Make the HDUList\n psfhdu = fits.PrimaryHDU(data=PSF)\n wavhdu = fits.ImageHDU(data=W*1E6, name='WAV')\n hdulist = fits.HDUList([psfhdu, wavhdu])\n\n # Write the file\n hdulist.writeto(file, overwrite=True)\n hdulist.close()\n\n\ndef get_angle(pf, p0=np.array([0, 0]), pi=None):\n \"\"\"Compute angle (in degrees) for pf-p0-pi corner\n\n Parameters\n ----------\n pf: sequence\n The coordinates of a point on the rotated vector\n p0: sequence\n The coordinates of the pivot\n pi: sequence\n The coordinates of the fixed vector\n\n Returns\n -------\n float\n The angle in degrees\n \"\"\"\n if pi is None:\n pi = p0 + np.array([0, 1])\n v0 = np.array(pf) - np.array(p0)\n v1 = np.array(pi) - np.array(p0)\n\n angle = np.math.atan2(np.linalg.det([v0, v1]), np.dot(v0, v1))\n angle = np.degrees(angle)\n\n return angle\n\n\ndef get_SOSS_psf(wavelength, filt='CLEAR', psfs=None, cutoff=0.005, plot=False):\n \"\"\"\n Retrieve the SOSS psf for the given wavelength,\n scale the total flux to 1, and set pixels below\n cutoff value to zero\n\n Parameters\n ----------\n wavelength: float\n The wavelength to retrieve [um]\n filt: str\n The filter to use, ['CLEAR', 'F277W']\n psfs: numpy.interp1d object (optional)\n The interpolator\n plot: bool\n Plot the psf\n\n Returns\n -------\n np.ndarray\n The 2D psf for the input wavelength\n \"\"\"\n if psfs is None:\n\n # Get the file\n file = resource_filename('awesimsoss', 'files/SOSS_{}_PSF.fits'.format(filt))\n\n # Load the SOSS psf cube\n cube = fits.getdata(file).swapaxes(-1, -2)\n wave = fits.getdata(file, ext=1)\n\n # Initilize interpolator\n psfs = interp1d(wave, cube, axis=0, kind=3)\n\n # Check the wavelength\n if wavelength < psfs.x[0]:\n wavelength = psfs.x[0]\n\n if wavelength > psfs.x[-1]:\n wavelength = psfs.x[-1]\n\n # Interpolate and scale psf\n psf = psfs(wavelength)\n psf *= 1./np.sum(psf)\n\n # Remove background\n # psf[psf < cutoff] = 0\n\n if plot:\n\n fig = figure()\n fig.image([psf], x=0, y=0, dw=psf.shape[0], dh=psf.shape[1])\n show(fig)\n\n else:\n return psf\n\n\ndef make_frame(psfs):\n \"\"\"\n Generate a frame from an array of psfs\n\n Parameters\n ----------\n psfs: sequence\n An array of psfs of shape (2048, 76, 76)\n\n Returns\n -------\n np.ndarray\n An array of the SOSS psf at 2048 wavelengths for each order\n \"\"\"\n # Empty frame\n frame = np.zeros((256, 2124))\n\n # Add each psf\n for n, psf in enumerate(psfs):\n frame[:, n:n+76] += psf\n\n return frame[:, 38:-38]\n\n\ndef psf_lightcurve(psf, ld_coeffs, rp, time, tmodel, plot=False):\n \"\"\"\n Generate a lightcurve for a (76, 76) psf of a given wavelength\n\n Parameters\n ----------\n psf: sequencs\n The flux-scaled psf for the given wavelength\n ld_coeffs: sequence\n The limb darkening coefficients to use\n rp: float\n The planet radius\n time: sequence\n The time axis for the TSO\n tmodel: batman.transitmodel.TransitModel\n The transit model of the planet\n plot: bool\n Plot the lightcurve\n\n Returns\n -------\n sequence\n A 1D array of the lightcurve with the same length as *t*\n\n Example 1\n ---------\n # No planet\n import numpy as np\n from awesimsoss.make_trace import psf_lightcurve\n psf = np.ones((76, 76))\n time = np.linspace(-0.2, 0.2, 200)\n lc = psf_lightcurve(psf, None, None, time, None, plot=True)\n\n Example 2\n ---------\n # With a planet\n import batman\n import numpy as np\n import astropy.units as q\n from awesimsoss.make_trace import psf_lightcurve\n params = batman.TransitParams()\n params.t0 = 0. # time of inferior conjunction\n params.per = 5.7214742 # orbital period (days)\n params.a = 0.0558*q.AU.to(q.R_sun)*0.66 # semi-major axis (in units of stellar radii)\n params.inc = 89.8 # orbital inclination (in degrees)\n params.ecc = 0. # eccentricity\n params.w = 90. # longitude of periastron (in degrees)\n params.teff = 3500 # effective temperature of the host star\n params.logg = 5 # log surface gravity of the host star\n params.feh = 0 # metallicity of the host star\n params.limb_dark = 'quadratic' # limb darkening profile to use\n params.u = [1, 1] # limb darkening coefficients\n tmodel = batman.TransitModel(params, time)\n lc = psf_lightcurve(psf, [0.1, 0.1], 0.05, time, tmodel, plot=True)\n \"\"\"\n # Expand to shape of time axis\n flux = np.tile(psf, (len(time), 1, 1))\n\n # If there is a transiting planet...\n if ld_coeffs is not None and rp is not None and str(type(tmodel)) == \"<class 'batman.transitmodel.TransitModel'>\":\n\n # Set the wavelength dependent orbital parameters\n tmodel.u = ld_coeffs\n tmodel.rp = rp\n\n # Generate the light curve for this pixel\n lightcurve = tmodel.light_curve(tmodel)\n\n # Scale the flux with the lightcurve\n flux *= lightcurve[:, None, None]\n\n return flux\n\n\ndef psf_tilts(order):\n \"\"\"\n Get the psf tilts for the given order\n\n Parameters\n ----------\n order: int\n The order to use, [1, 2]\n\n Returns\n -------\n np.ndarray\n The angle from the vertical of the psf in each of the 2048 columns\n \"\"\"\n if order not in [1, 2]:\n raise ValueError('Only orders 1 and 2 are supported.')\n\n # Get the file\n path = 'files/SOSS_PSF_tilt_order{}.npy'.format(order)\n psf_file = resource_filename('awesimsoss', path)\n\n if not os.path.exists(psf_file):\n calculate_psf_tilts()\n\n return np.load(psf_file)\n\n\ndef put_psf_on_subarray(psf, y, frame_height=256):\n \"\"\"Make a 2D SOSS trace from a sequence of psfs and trace center locations\n\n Parameters\n ----------\n psf: sequence\n The 2D psf\n y: float\n The grid y value to place the center of the psf\n grid: sequence\n The [x, y] grid ranges\n\n Returns\n -------\n np.ndarray\n The 2D frame with the interpolated psf\n \"\"\"\n # Create spline generator\n dim = psf.shape[0]\n mid = (dim - 1.0) / 2.0\n arr = np.arange(dim, dtype=np.float)\n spline = RectBivariateSpline(arr, arr, psf.T, kx=3, ky=3, s=0)\n\n # Create output frame, shifted as necessary\n yg, xg = np.indices((frame_height, dim), dtype=np.float64)\n yg += mid-y\n\n # Resample onto the subarray\n frame = spline.ev(xg, yg)\n\n # Fill resampled points with zeros\n extrapol = (((xg < -0.5) | (xg >= dim - 0.5)) | ((yg < -0.5) | (yg >= dim - 0.5)))\n frame[extrapol] = 0\n\n return frame\n\n\ndef SOSS_psf_cube(filt='CLEAR', order=1, subarray='SUBSTRIP256', generate=False):\n \"\"\"\n Generate/retrieve a data cube of shape (3, 2048, 76, 76) which is a\n 76x76 pixel psf for 2048 wavelengths for each trace order. The PSFs\n are scaled to unity and rotated to reproduce the trace tilt at each\n wavelength then placed on the desired subarray.\n\n Parameters\n ----------\n filt: str\n The filter to use, ['CLEAR', 'F277W']\n order: int\n The trace order\n subarray: str\n The subarray to use, ['SUBSTRIP96', 'SUBSTRIP256', 'FULL']\n generate: bool\n Generate a new cube\n\n Returns\n -------\n np.ndarray\n An array of the SOSS psf at 2048 wavelengths for each order\n \"\"\"\n if generate:\n\n print('Coffee time! This takes about 5 minutes.')\n\n # Get the wavelengths\n wavelengths = np.mean(utils.wave_solutions(subarray), axis=1)[:2 if filt == 'CLEAR' else 1]\n coeffs = trace_polynomials(subarray)\n\n # Get the file\n psf_path = 'files/SOSS_{}_PSF.fits'.format(filt)\n psf_file = resource_filename('awesimsoss', psf_path)\n\n # Load the SOSS psf cube\n cube = fits.getdata(psf_file).swapaxes(-1, -2)\n wave = fits.getdata(psf_file, ext=1)\n\n # Initilize interpolator\n psfs = interp1d(wave, cube, axis=0, kind=3)\n trace_cols = np.arange(2048)\n\n # Run datacube\n for n, wavelength in enumerate(wavelengths):\n\n # Evaluate the trace polynomial in each column to get the y-position of the trace center\n trace_centers = np.polyval(coeffs[n], trace_cols)\n\n # Don't calculate order2 for F277W or order 3 for either\n if (n == 1 and filt.lower() == 'f277w') or n == 2:\n pass\n\n else:\n\n # Get the psf for each column\n print('Calculating order {} SOSS psfs for {} filter...'.format(n+1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n func = partial(get_SOSS_psf, filt=filt, psfs=psfs)\n raw_psfs = np.array(pool.map(func, wavelength))\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time()-start))\n\n # Get the PSF tilt at each column\n angles = psf_tilts(order)\n\n # Rotate the psfs\n print('Rotating order {} SOSS psfs for {} filter...'.format(n+1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n func = partial(rotate, reshape=False)\n rotated_psfs = np.array(pool.starmap(func, zip(raw_psfs, angles)))\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time()-start))\n\n # Scale psfs to 1\n rotated_psfs = np.abs(rotated_psfs)\n scale = np.nansum(rotated_psfs, axis=(1, 2))[:, None, None]\n rotated_psfs = rotated_psfs/scale\n\n # Split it into 4 chunks to be below Github file size limit\n chunks = rotated_psfs.reshape(4, 512, 76, 76)\n for N, chunk in enumerate(chunks):\n\n idx0 = N*512\n idx1 = idx0+512\n centers = trace_centers[idx0:idx1]\n\n # Interpolate the psfs onto the subarray\n print('Interpolating chunk {}/4 for order {} SOSS psfs for {} filter onto subarray...'.format(N+1, n+1, filt))\n start = time.time()\n pool = multiprocessing.Pool(8)\n data = zip(chunk, centers)\n subarray_psfs = pool.starmap(put_psf_on_subarray, data)\n pool.close()\n pool.join()\n del pool\n print('Finished in {} seconds.'.format(time.time()-start))\n\n # Get the filepath\n filename = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt, n+1, N+1)\n file = resource_filename('awesimsoss', filename)\n\n # Delete the file if it exists\n if os.path.isfile(file):\n os.system('rm {}'.format(file))\n\n # Write the data\n np.save(file, np.array(subarray_psfs))\n\n print('Data saved to', file)\n\n else:\n\n # Get the chunked data and concatenate\n full_data = []\n for chunk in [1, 2, 3, 4]:\n path = 'files/SOSS_{}_PSF_order{}_{}.npy'.format(filt, order, chunk)\n file = resource_filename('awesimsoss', path)\n full_data.append(np.load(file))\n\n return np.concatenate(full_data, axis=0)\n",
"step-ids": [
7,
10,
11,
13,
14
]
}
|
[
7,
10,
11,
13,
14
] |
<|reserved_special_token_0|>
class Heiyan2Spider(scrapy.Spider):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Heiyan2Spider(scrapy.Spider):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def parse(self, response):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Heiyan2Spider(scrapy.Spider):
name = 'heiyan2'
allowed_domains = ['heiyan.com']
start_urls = ['http://heiyan.com/']
def parse(self, response):
pass
<|reserved_special_token_1|>
import scrapy
class Heiyan2Spider(scrapy.Spider):
name = 'heiyan2'
allowed_domains = ['heiyan.com']
start_urls = ['http://heiyan.com/']
def parse(self, response):
pass
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
import scrapy
class Heiyan2Spider(scrapy.Spider):
name = 'heiyan2'
allowed_domains = ['heiyan.com']
start_urls = ['http://heiyan.com/']
def parse(self, response):
pass
|
flexible
|
{
"blob_id": "d13c6d71bb871496b0c6ad2451a2f561484e7c68",
"index": 9634,
"step-1": "<mask token>\n\n\nclass Heiyan2Spider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Heiyan2Spider(scrapy.Spider):\n <mask token>\n <mask token>\n <mask token>\n\n def parse(self, response):\n pass\n",
"step-3": "<mask token>\n\n\nclass Heiyan2Spider(scrapy.Spider):\n name = 'heiyan2'\n allowed_domains = ['heiyan.com']\n start_urls = ['http://heiyan.com/']\n\n def parse(self, response):\n pass\n",
"step-4": "import scrapy\n\n\nclass Heiyan2Spider(scrapy.Spider):\n name = 'heiyan2'\n allowed_domains = ['heiyan.com']\n start_urls = ['http://heiyan.com/']\n\n def parse(self, response):\n pass\n",
"step-5": "# -*- coding: utf-8 -*-\nimport scrapy\n\n\nclass Heiyan2Spider(scrapy.Spider):\n name = 'heiyan2'\n allowed_domains = ['heiyan.com']\n start_urls = ['http://heiyan.com/']\n\n def parse(self, response):\n pass\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class GenerateMachineConfig(unittest.TestCase):
def setUp(self):
self.machine_configs = []
for machine_config_path in glob.glob(
f'{ASSETS_DIR}/openshift/99_openshift-machineconfig_99-dual-stack-*.yaml'
):
with open(machine_config_path) as f:
self.machine_configs.append(yaml.load(f, Loader=yaml.
FullLoader))
def test_kernel_args(self):
"""Assert there are machine configs configuring the kernel args for masters and workers"""
for machine_config in self.machine_configs:
kernel_args = machine_config['spec']['kernelArguments']
self.assertIn('ip=dhcp,dhcp6', kernel_args)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GenerateMachineConfig(unittest.TestCase):
def setUp(self):
self.machine_configs = []
for machine_config_path in glob.glob(
f'{ASSETS_DIR}/openshift/99_openshift-machineconfig_99-dual-stack-*.yaml'
):
with open(machine_config_path) as f:
self.machine_configs.append(yaml.load(f, Loader=yaml.
FullLoader))
def test_kernel_args(self):
"""Assert there are machine configs configuring the kernel args for masters and workers"""
for machine_config in self.machine_configs:
kernel_args = machine_config['spec']['kernelArguments']
self.assertIn('ip=dhcp,dhcp6', kernel_args)
if __name__ == '__main__':
ASSETS_DIR = sys.argv.pop()
with open(os.environ.get('JUNIT_FILE', '/dev/null'), 'wb') as output:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output=output),
failfast=False, buffer=False, catchbreak=False, verbosity=2)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ASSETS_DIR = ''
class GenerateMachineConfig(unittest.TestCase):
def setUp(self):
self.machine_configs = []
for machine_config_path in glob.glob(
f'{ASSETS_DIR}/openshift/99_openshift-machineconfig_99-dual-stack-*.yaml'
):
with open(machine_config_path) as f:
self.machine_configs.append(yaml.load(f, Loader=yaml.
FullLoader))
def test_kernel_args(self):
"""Assert there are machine configs configuring the kernel args for masters and workers"""
for machine_config in self.machine_configs:
kernel_args = machine_config['spec']['kernelArguments']
self.assertIn('ip=dhcp,dhcp6', kernel_args)
if __name__ == '__main__':
ASSETS_DIR = sys.argv.pop()
with open(os.environ.get('JUNIT_FILE', '/dev/null'), 'wb') as output:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output=output),
failfast=False, buffer=False, catchbreak=False, verbosity=2)
<|reserved_special_token_1|>
import unittest
import xmlrunner
import os
import sys
import glob
import yaml
ASSETS_DIR = ''
class GenerateMachineConfig(unittest.TestCase):
def setUp(self):
self.machine_configs = []
for machine_config_path in glob.glob(
f'{ASSETS_DIR}/openshift/99_openshift-machineconfig_99-dual-stack-*.yaml'
):
with open(machine_config_path) as f:
self.machine_configs.append(yaml.load(f, Loader=yaml.
FullLoader))
def test_kernel_args(self):
"""Assert there are machine configs configuring the kernel args for masters and workers"""
for machine_config in self.machine_configs:
kernel_args = machine_config['spec']['kernelArguments']
self.assertIn('ip=dhcp,dhcp6', kernel_args)
if __name__ == '__main__':
ASSETS_DIR = sys.argv.pop()
with open(os.environ.get('JUNIT_FILE', '/dev/null'), 'wb') as output:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output=output),
failfast=False, buffer=False, catchbreak=False, verbosity=2)
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import xmlrunner
import os
import sys
import glob
import yaml
ASSETS_DIR = ""
class GenerateMachineConfig(unittest.TestCase):
def setUp(self):
self.machine_configs = []
for machine_config_path in glob.glob(
f'{ASSETS_DIR}/openshift/99_openshift-machineconfig_99-dual-stack-*.yaml'
):
with open(machine_config_path) as f:
self.machine_configs.append(yaml.load(f, Loader=yaml.FullLoader))
def test_kernel_args(self):
"""Assert there are machine configs configuring the kernel args for masters and workers"""
for machine_config in self.machine_configs:
kernel_args = machine_config["spec"]["kernelArguments"]
self.assertIn("ip=dhcp,dhcp6", kernel_args)
if __name__ == '__main__':
ASSETS_DIR = sys.argv.pop()
with open(os.environ.get('JUNIT_FILE', '/dev/null'), 'wb') as output:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output=output), failfast=False, buffer=False, catchbreak=False, verbosity=2)
|
flexible
|
{
"blob_id": "f0c082968e26d414b0dbb679d4e5077056e99979",
"index": 8653,
"step-1": "<mask token>\n\n\nclass GenerateMachineConfig(unittest.TestCase):\n\n def setUp(self):\n self.machine_configs = []\n for machine_config_path in glob.glob(\n f'{ASSETS_DIR}/openshift/99_openshift-machineconfig_99-dual-stack-*.yaml'\n ):\n with open(machine_config_path) as f:\n self.machine_configs.append(yaml.load(f, Loader=yaml.\n FullLoader))\n\n def test_kernel_args(self):\n \"\"\"Assert there are machine configs configuring the kernel args for masters and workers\"\"\"\n for machine_config in self.machine_configs:\n kernel_args = machine_config['spec']['kernelArguments']\n self.assertIn('ip=dhcp,dhcp6', kernel_args)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass GenerateMachineConfig(unittest.TestCase):\n\n def setUp(self):\n self.machine_configs = []\n for machine_config_path in glob.glob(\n f'{ASSETS_DIR}/openshift/99_openshift-machineconfig_99-dual-stack-*.yaml'\n ):\n with open(machine_config_path) as f:\n self.machine_configs.append(yaml.load(f, Loader=yaml.\n FullLoader))\n\n def test_kernel_args(self):\n \"\"\"Assert there are machine configs configuring the kernel args for masters and workers\"\"\"\n for machine_config in self.machine_configs:\n kernel_args = machine_config['spec']['kernelArguments']\n self.assertIn('ip=dhcp,dhcp6', kernel_args)\n\n\nif __name__ == '__main__':\n ASSETS_DIR = sys.argv.pop()\n with open(os.environ.get('JUNIT_FILE', '/dev/null'), 'wb') as output:\n unittest.main(testRunner=xmlrunner.XMLTestRunner(output=output),\n failfast=False, buffer=False, catchbreak=False, verbosity=2)\n",
"step-3": "<mask token>\nASSETS_DIR = ''\n\n\nclass GenerateMachineConfig(unittest.TestCase):\n\n def setUp(self):\n self.machine_configs = []\n for machine_config_path in glob.glob(\n f'{ASSETS_DIR}/openshift/99_openshift-machineconfig_99-dual-stack-*.yaml'\n ):\n with open(machine_config_path) as f:\n self.machine_configs.append(yaml.load(f, Loader=yaml.\n FullLoader))\n\n def test_kernel_args(self):\n \"\"\"Assert there are machine configs configuring the kernel args for masters and workers\"\"\"\n for machine_config in self.machine_configs:\n kernel_args = machine_config['spec']['kernelArguments']\n self.assertIn('ip=dhcp,dhcp6', kernel_args)\n\n\nif __name__ == '__main__':\n ASSETS_DIR = sys.argv.pop()\n with open(os.environ.get('JUNIT_FILE', '/dev/null'), 'wb') as output:\n unittest.main(testRunner=xmlrunner.XMLTestRunner(output=output),\n failfast=False, buffer=False, catchbreak=False, verbosity=2)\n",
"step-4": "import unittest\nimport xmlrunner\nimport os\nimport sys\nimport glob\nimport yaml\nASSETS_DIR = ''\n\n\nclass GenerateMachineConfig(unittest.TestCase):\n\n def setUp(self):\n self.machine_configs = []\n for machine_config_path in glob.glob(\n f'{ASSETS_DIR}/openshift/99_openshift-machineconfig_99-dual-stack-*.yaml'\n ):\n with open(machine_config_path) as f:\n self.machine_configs.append(yaml.load(f, Loader=yaml.\n FullLoader))\n\n def test_kernel_args(self):\n \"\"\"Assert there are machine configs configuring the kernel args for masters and workers\"\"\"\n for machine_config in self.machine_configs:\n kernel_args = machine_config['spec']['kernelArguments']\n self.assertIn('ip=dhcp,dhcp6', kernel_args)\n\n\nif __name__ == '__main__':\n ASSETS_DIR = sys.argv.pop()\n with open(os.environ.get('JUNIT_FILE', '/dev/null'), 'wb') as output:\n unittest.main(testRunner=xmlrunner.XMLTestRunner(output=output),\n failfast=False, buffer=False, catchbreak=False, verbosity=2)\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport unittest\nimport xmlrunner\n\nimport os\nimport sys\nimport glob\nimport yaml\n\nASSETS_DIR = \"\"\n\nclass GenerateMachineConfig(unittest.TestCase):\n def setUp(self):\n self.machine_configs = []\n for machine_config_path in glob.glob(\n f'{ASSETS_DIR}/openshift/99_openshift-machineconfig_99-dual-stack-*.yaml'\n ):\n with open(machine_config_path) as f:\n self.machine_configs.append(yaml.load(f, Loader=yaml.FullLoader))\n\n def test_kernel_args(self):\n \"\"\"Assert there are machine configs configuring the kernel args for masters and workers\"\"\"\n for machine_config in self.machine_configs:\n kernel_args = machine_config[\"spec\"][\"kernelArguments\"]\n self.assertIn(\"ip=dhcp,dhcp6\", kernel_args)\n\n\nif __name__ == '__main__':\n ASSETS_DIR = sys.argv.pop()\n with open(os.environ.get('JUNIT_FILE', '/dev/null'), 'wb') as output:\n unittest.main(testRunner=xmlrunner.XMLTestRunner(output=output), failfast=False, buffer=False, catchbreak=False, verbosity=2)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TooManyArgsStrategy(CommandStrategy):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TooManyArgsStrategy(CommandStrategy):
def apply(self):
print('Too many arguments.')
<|reserved_special_token_1|>
from guet.commands.strategies.strategy import CommandStrategy
class TooManyArgsStrategy(CommandStrategy):
def apply(self):
print('Too many arguments.')
|
flexible
|
{
"blob_id": "afd72ce2d9598f92937f3038eb0ef49b740b9977",
"index": 6846,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TooManyArgsStrategy(CommandStrategy):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TooManyArgsStrategy(CommandStrategy):\n\n def apply(self):\n print('Too many arguments.')\n",
"step-4": "from guet.commands.strategies.strategy import CommandStrategy\n\n\nclass TooManyArgsStrategy(CommandStrategy):\n\n def apply(self):\n print('Too many arguments.')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
def remove_zeros(given_list):
list_without_zero = []
for element in given_list:
if element != 0:
list_without_zero.append(element)
return list_without_zero
def sort_desc(given_list):
return sorted(given_list, key=None, reverse=True)
def length_check(n, given_list):
return n > len(given_list)
<|reserved_special_token_0|>
def test_substract_one_for_n_elements():
assert substract_one_for_n_elements(4, [5, 4, 3, 2, 1]) == [4, 3, 2, 1, 1]
assert substract_one_for_n_elements(11, [14, 13, 13, 13, 12, 10, 8, 8,
7, 7, 6, 6, 4, 4, 2]) == [13, 12, 12, 12, 11, 9, 7, 7, 6, 6, 5, 6,
4, 4, 2]
assert substract_one_for_n_elements(1, [10, 10, 10]) == [9, 10, 10]
assert substract_one_for_n_elements(3, [10, 10, 10]) == [9, 9, 9]
assert substract_one_for_n_elements(1, [1]) == [0]
def test_hh():
assert hh([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) is False
assert hh([4, 2, 0, 1, 5, 0]) is False
assert hh([3, 1, 2, 3, 1, 0]) is True
assert hh([16, 9, 9, 15, 9, 7, 9, 11, 17, 11, 4, 9, 12, 14, 14, 12, 17,
0, 3, 16]) is True
assert hh([14, 10, 17, 13, 4, 8, 6, 7, 13, 13, 17, 18, 8, 17, 2, 14, 6,
4, 7, 12]) is True
assert hh([15, 18, 6, 13, 12, 4, 4, 14, 1, 6, 18, 2, 6, 16, 0, 9, 10, 7,
12, 3]) is False
assert hh([6, 0, 10, 10, 10, 5, 8, 3, 0, 14, 16, 2, 13, 1, 2, 13, 6, 15,
5, 1]) is False
assert hh([2, 2, 0]) is False
assert hh([3, 2, 1]) is False
assert hh([1, 1]) is True
assert hh([1]) is False
assert hh([]) is True
<|reserved_special_token_1|>
def remove_zeros(given_list):
list_without_zero = []
for element in given_list:
if element != 0:
list_without_zero.append(element)
return list_without_zero
def sort_desc(given_list):
return sorted(given_list, key=None, reverse=True)
def length_check(n, given_list):
return n > len(given_list)
<|reserved_special_token_0|>
def test_remove_zeros():
assert remove_zeros([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) == [5, 3, 2, 6, 2,
7, 2, 5]
assert remove_zeros([4, 0, 0, 1, 3]) == [4, 1, 3]
assert remove_zeros([1, 2, 3]) == [1, 2, 3]
assert remove_zeros([0, 0, 0]) == []
assert remove_zeros([]) == []
<|reserved_special_token_0|>
def test_length_check():
assert length_check(7, [6, 5, 5, 3, 2, 2, 2]) is False
assert length_check(5, [5, 5, 5, 5, 5]) is False
assert length_check(5, [5, 5, 5, 5]) is True
assert length_check(3, [1, 1]) is True
assert length_check(1, []) is True
assert length_check(0, []) is False
def test_substract_one_for_n_elements():
assert substract_one_for_n_elements(4, [5, 4, 3, 2, 1]) == [4, 3, 2, 1, 1]
assert substract_one_for_n_elements(11, [14, 13, 13, 13, 12, 10, 8, 8,
7, 7, 6, 6, 4, 4, 2]) == [13, 12, 12, 12, 11, 9, 7, 7, 6, 6, 5, 6,
4, 4, 2]
assert substract_one_for_n_elements(1, [10, 10, 10]) == [9, 10, 10]
assert substract_one_for_n_elements(3, [10, 10, 10]) == [9, 9, 9]
assert substract_one_for_n_elements(1, [1]) == [0]
def test_hh():
assert hh([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) is False
assert hh([4, 2, 0, 1, 5, 0]) is False
assert hh([3, 1, 2, 3, 1, 0]) is True
assert hh([16, 9, 9, 15, 9, 7, 9, 11, 17, 11, 4, 9, 12, 14, 14, 12, 17,
0, 3, 16]) is True
assert hh([14, 10, 17, 13, 4, 8, 6, 7, 13, 13, 17, 18, 8, 17, 2, 14, 6,
4, 7, 12]) is True
assert hh([15, 18, 6, 13, 12, 4, 4, 14, 1, 6, 18, 2, 6, 16, 0, 9, 10, 7,
12, 3]) is False
assert hh([6, 0, 10, 10, 10, 5, 8, 3, 0, 14, 16, 2, 13, 1, 2, 13, 6, 15,
5, 1]) is False
assert hh([2, 2, 0]) is False
assert hh([3, 2, 1]) is False
assert hh([1, 1]) is True
assert hh([1]) is False
assert hh([]) is True
<|reserved_special_token_1|>
def remove_zeros(given_list):
list_without_zero = []
for element in given_list:
if element != 0:
list_without_zero.append(element)
return list_without_zero
def sort_desc(given_list):
return sorted(given_list, key=None, reverse=True)
def length_check(n, given_list):
return n > len(given_list)
<|reserved_special_token_0|>
def hh(given_list):
if given_list == []:
return True
else:
while given_list != []:
given_list = remove_zeros(given_list)
if given_list == []:
return True
break
else:
given_list = sort_desc(given_list)
n = given_list.pop(0)
if length_check(n, given_list):
return False
break
else:
given_list = substract_one_for_n_elements(n, given_list)
def test_remove_zeros():
assert remove_zeros([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) == [5, 3, 2, 6, 2,
7, 2, 5]
assert remove_zeros([4, 0, 0, 1, 3]) == [4, 1, 3]
assert remove_zeros([1, 2, 3]) == [1, 2, 3]
assert remove_zeros([0, 0, 0]) == []
assert remove_zeros([]) == []
<|reserved_special_token_0|>
def test_length_check():
assert length_check(7, [6, 5, 5, 3, 2, 2, 2]) is False
assert length_check(5, [5, 5, 5, 5, 5]) is False
assert length_check(5, [5, 5, 5, 5]) is True
assert length_check(3, [1, 1]) is True
assert length_check(1, []) is True
assert length_check(0, []) is False
def test_substract_one_for_n_elements():
assert substract_one_for_n_elements(4, [5, 4, 3, 2, 1]) == [4, 3, 2, 1, 1]
assert substract_one_for_n_elements(11, [14, 13, 13, 13, 12, 10, 8, 8,
7, 7, 6, 6, 4, 4, 2]) == [13, 12, 12, 12, 11, 9, 7, 7, 6, 6, 5, 6,
4, 4, 2]
assert substract_one_for_n_elements(1, [10, 10, 10]) == [9, 10, 10]
assert substract_one_for_n_elements(3, [10, 10, 10]) == [9, 9, 9]
assert substract_one_for_n_elements(1, [1]) == [0]
def test_hh():
assert hh([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) is False
assert hh([4, 2, 0, 1, 5, 0]) is False
assert hh([3, 1, 2, 3, 1, 0]) is True
assert hh([16, 9, 9, 15, 9, 7, 9, 11, 17, 11, 4, 9, 12, 14, 14, 12, 17,
0, 3, 16]) is True
assert hh([14, 10, 17, 13, 4, 8, 6, 7, 13, 13, 17, 18, 8, 17, 2, 14, 6,
4, 7, 12]) is True
assert hh([15, 18, 6, 13, 12, 4, 4, 14, 1, 6, 18, 2, 6, 16, 0, 9, 10, 7,
12, 3]) is False
assert hh([6, 0, 10, 10, 10, 5, 8, 3, 0, 14, 16, 2, 13, 1, 2, 13, 6, 15,
5, 1]) is False
assert hh([2, 2, 0]) is False
assert hh([3, 2, 1]) is False
assert hh([1, 1]) is True
assert hh([1]) is False
assert hh([]) is True
<|reserved_special_token_1|>
def remove_zeros(given_list):
list_without_zero = []
for element in given_list:
if element != 0:
list_without_zero.append(element)
return list_without_zero
def sort_desc(given_list):
return sorted(given_list, key=None, reverse=True)
def length_check(n, given_list):
return n > len(given_list)
def substract_one_for_n_elements(n, given_list):
minus_one_list = given_list[:]
for i in range(0, n):
minus_one_list[i] -= 1
return minus_one_list
def hh(given_list):
if given_list == []:
return True
else:
while given_list != []:
given_list = remove_zeros(given_list)
if given_list == []:
return True
break
else:
given_list = sort_desc(given_list)
n = given_list.pop(0)
if length_check(n, given_list):
return False
break
else:
given_list = substract_one_for_n_elements(n, given_list)
def test_remove_zeros():
assert remove_zeros([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) == [5, 3, 2, 6, 2,
7, 2, 5]
assert remove_zeros([4, 0, 0, 1, 3]) == [4, 1, 3]
assert remove_zeros([1, 2, 3]) == [1, 2, 3]
assert remove_zeros([0, 0, 0]) == []
assert remove_zeros([]) == []
def test_sort_desc():
assert sort_desc([5, 1, 3, 4, 2]) == [5, 4, 3, 2, 1]
assert sort_desc([0, 0, 0, 4, 0]) == [4, 0, 0, 0, 0]
assert sort_desc([1]) == [1]
assert sort_desc([]) == []
def test_length_check():
assert length_check(7, [6, 5, 5, 3, 2, 2, 2]) is False
assert length_check(5, [5, 5, 5, 5, 5]) is False
assert length_check(5, [5, 5, 5, 5]) is True
assert length_check(3, [1, 1]) is True
assert length_check(1, []) is True
assert length_check(0, []) is False
def test_substract_one_for_n_elements():
assert substract_one_for_n_elements(4, [5, 4, 3, 2, 1]) == [4, 3, 2, 1, 1]
assert substract_one_for_n_elements(11, [14, 13, 13, 13, 12, 10, 8, 8,
7, 7, 6, 6, 4, 4, 2]) == [13, 12, 12, 12, 11, 9, 7, 7, 6, 6, 5, 6,
4, 4, 2]
assert substract_one_for_n_elements(1, [10, 10, 10]) == [9, 10, 10]
assert substract_one_for_n_elements(3, [10, 10, 10]) == [9, 9, 9]
assert substract_one_for_n_elements(1, [1]) == [0]
def test_hh():
assert hh([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) is False
assert hh([4, 2, 0, 1, 5, 0]) is False
assert hh([3, 1, 2, 3, 1, 0]) is True
assert hh([16, 9, 9, 15, 9, 7, 9, 11, 17, 11, 4, 9, 12, 14, 14, 12, 17,
0, 3, 16]) is True
assert hh([14, 10, 17, 13, 4, 8, 6, 7, 13, 13, 17, 18, 8, 17, 2, 14, 6,
4, 7, 12]) is True
assert hh([15, 18, 6, 13, 12, 4, 4, 14, 1, 6, 18, 2, 6, 16, 0, 9, 10, 7,
12, 3]) is False
assert hh([6, 0, 10, 10, 10, 5, 8, 3, 0, 14, 16, 2, 13, 1, 2, 13, 6, 15,
5, 1]) is False
assert hh([2, 2, 0]) is False
assert hh([3, 2, 1]) is False
assert hh([1, 1]) is True
assert hh([1]) is False
assert hh([]) is True
<|reserved_special_token_1|>
# funkcja usuwająca zera z listy
def remove_zeros(given_list):
list_without_zero = []
for element in given_list:
if element != 0:
list_without_zero.append(element)
return list_without_zero
# funkcja sortująca listę
def sort_desc(given_list):
# sorted_list = []
# for i in range(0, len(given_list)):
# for element in given_list:
# if element == max(given_list):
# sorted_list.append(element)
# given_list.remove(element)
return sorted(given_list, key=None, reverse=True)
# funkcja sprawdzająca czy iilość elementów jest mniejsza od danej wartości
# zwraca wartość logiczną danego wyrażenia
def length_check(n, given_list):
return n > len(given_list)
# funkcja odejmująca 1 od pierwszych n-elementów listy
def substract_one_for_n_elements(n, given_list):
minus_one_list = given_list[:]
for i in range(0, n):
minus_one_list[i] -= 1
return minus_one_list
# wielki finał i kompletny algorytm Havel-Hakimi.
# This algorithm will return true if the answers are consistent
# (i.e. it's possible that everyone is telling the truth)
# and false if the answers are inconsistent (i.e. someone must be lying)
def hh(given_list):
if given_list == []:
return True
else:
# 1
while given_list != []:
given_list = remove_zeros(given_list)
# 2
if given_list == []:
return True
break
else:
# 3
given_list = sort_desc(given_list)
# 4
n = given_list.pop(0)
# 5
if length_check(n, given_list):
return False
break
# 6, 7
else:
given_list = substract_one_for_n_elements(n, given_list)
# *****************************************
# testy
def test_remove_zeros():
assert remove_zeros([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) == [5, 3, 2, 6, 2, 7, 2, 5]
assert remove_zeros([4, 0, 0, 1, 3]) == [4, 1, 3]
assert remove_zeros([1, 2, 3]) == [1, 2, 3]
assert remove_zeros([0, 0, 0]) == []
assert remove_zeros([]) == []
def test_sort_desc():
assert sort_desc([5, 1, 3, 4, 2]) == [5, 4, 3, 2, 1]
assert sort_desc([0, 0, 0, 4, 0]) == [4, 0, 0, 0, 0]
assert sort_desc([1]) == [1]
assert sort_desc([]) == []
def test_length_check():
assert length_check(7, [6, 5, 5, 3, 2, 2, 2]) is False
assert length_check(5, [5, 5, 5, 5, 5]) is False
assert length_check(5, [5, 5, 5, 5]) is True
assert length_check(3, [1, 1]) is True
assert length_check(1, []) is True
assert length_check(0, []) is False
def test_substract_one_for_n_elements():
assert substract_one_for_n_elements(4, [5, 4, 3, 2, 1]) == [4, 3, 2, 1, 1]
assert substract_one_for_n_elements(11, [14, 13, 13, 13, 12, 10, 8, 8, 7, 7, 6, 6, 4, 4, 2]) == [13, 12, 12, 12, 11, 9, 7, 7, 6, 6, 5, 6, 4, 4, 2]
assert substract_one_for_n_elements(1, [10, 10, 10]) == [9, 10, 10]
assert substract_one_for_n_elements(3, [10, 10, 10]) == [9, 9, 9]
assert substract_one_for_n_elements(1, [1]) == [0]
def test_hh():
assert hh([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) is False
assert hh([4, 2, 0, 1, 5, 0]) is False
assert hh([3, 1, 2, 3, 1, 0]) is True
assert hh([16, 9, 9, 15, 9, 7, 9, 11, 17, 11, 4, 9, 12, 14, 14, 12, 17, 0, 3, 16]) is True
assert hh([14, 10, 17, 13, 4, 8, 6, 7, 13, 13, 17, 18, 8, 17, 2, 14, 6, 4, 7, 12]) is True
assert hh([15, 18, 6, 13, 12, 4, 4, 14, 1, 6, 18, 2, 6, 16, 0, 9, 10, 7, 12, 3]) is False
assert hh([6, 0, 10, 10, 10, 5, 8, 3, 0, 14, 16, 2, 13, 1, 2, 13, 6, 15, 5, 1]) is False
assert hh([2, 2, 0]) is False
assert hh([3, 2, 1]) is False
assert hh([1, 1]) is True
assert hh([1]) is False
assert hh([]) is True
|
flexible
|
{
"blob_id": "0779e516e35c41acf0529961e11541dfd1320749",
"index": 6501,
"step-1": "def remove_zeros(given_list):\n list_without_zero = []\n for element in given_list:\n if element != 0:\n list_without_zero.append(element)\n return list_without_zero\n\n\ndef sort_desc(given_list):\n return sorted(given_list, key=None, reverse=True)\n\n\ndef length_check(n, given_list):\n return n > len(given_list)\n\n\n<mask token>\n\n\ndef test_substract_one_for_n_elements():\n assert substract_one_for_n_elements(4, [5, 4, 3, 2, 1]) == [4, 3, 2, 1, 1]\n assert substract_one_for_n_elements(11, [14, 13, 13, 13, 12, 10, 8, 8, \n 7, 7, 6, 6, 4, 4, 2]) == [13, 12, 12, 12, 11, 9, 7, 7, 6, 6, 5, 6, \n 4, 4, 2]\n assert substract_one_for_n_elements(1, [10, 10, 10]) == [9, 10, 10]\n assert substract_one_for_n_elements(3, [10, 10, 10]) == [9, 9, 9]\n assert substract_one_for_n_elements(1, [1]) == [0]\n\n\ndef test_hh():\n assert hh([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) is False\n assert hh([4, 2, 0, 1, 5, 0]) is False\n assert hh([3, 1, 2, 3, 1, 0]) is True\n assert hh([16, 9, 9, 15, 9, 7, 9, 11, 17, 11, 4, 9, 12, 14, 14, 12, 17,\n 0, 3, 16]) is True\n assert hh([14, 10, 17, 13, 4, 8, 6, 7, 13, 13, 17, 18, 8, 17, 2, 14, 6,\n 4, 7, 12]) is True\n assert hh([15, 18, 6, 13, 12, 4, 4, 14, 1, 6, 18, 2, 6, 16, 0, 9, 10, 7,\n 12, 3]) is False\n assert hh([6, 0, 10, 10, 10, 5, 8, 3, 0, 14, 16, 2, 13, 1, 2, 13, 6, 15,\n 5, 1]) is False\n assert hh([2, 2, 0]) is False\n assert hh([3, 2, 1]) is False\n assert hh([1, 1]) is True\n assert hh([1]) is False\n assert hh([]) is True\n",
"step-2": "def remove_zeros(given_list):\n list_without_zero = []\n for element in given_list:\n if element != 0:\n list_without_zero.append(element)\n return list_without_zero\n\n\ndef sort_desc(given_list):\n return sorted(given_list, key=None, reverse=True)\n\n\ndef length_check(n, given_list):\n return n > len(given_list)\n\n\n<mask token>\n\n\ndef test_remove_zeros():\n assert remove_zeros([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) == [5, 3, 2, 6, 2, \n 7, 2, 5]\n assert remove_zeros([4, 0, 0, 1, 3]) == [4, 1, 3]\n assert remove_zeros([1, 2, 3]) == [1, 2, 3]\n assert remove_zeros([0, 0, 0]) == []\n assert remove_zeros([]) == []\n\n\n<mask token>\n\n\ndef test_length_check():\n assert length_check(7, [6, 5, 5, 3, 2, 2, 2]) is False\n assert length_check(5, [5, 5, 5, 5, 5]) is False\n assert length_check(5, [5, 5, 5, 5]) is True\n assert length_check(3, [1, 1]) is True\n assert length_check(1, []) is True\n assert length_check(0, []) is False\n\n\ndef test_substract_one_for_n_elements():\n assert substract_one_for_n_elements(4, [5, 4, 3, 2, 1]) == [4, 3, 2, 1, 1]\n assert substract_one_for_n_elements(11, [14, 13, 13, 13, 12, 10, 8, 8, \n 7, 7, 6, 6, 4, 4, 2]) == [13, 12, 12, 12, 11, 9, 7, 7, 6, 6, 5, 6, \n 4, 4, 2]\n assert substract_one_for_n_elements(1, [10, 10, 10]) == [9, 10, 10]\n assert substract_one_for_n_elements(3, [10, 10, 10]) == [9, 9, 9]\n assert substract_one_for_n_elements(1, [1]) == [0]\n\n\ndef test_hh():\n assert hh([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) is False\n assert hh([4, 2, 0, 1, 5, 0]) is False\n assert hh([3, 1, 2, 3, 1, 0]) is True\n assert hh([16, 9, 9, 15, 9, 7, 9, 11, 17, 11, 4, 9, 12, 14, 14, 12, 17,\n 0, 3, 16]) is True\n assert hh([14, 10, 17, 13, 4, 8, 6, 7, 13, 13, 17, 18, 8, 17, 2, 14, 6,\n 4, 7, 12]) is True\n assert hh([15, 18, 6, 13, 12, 4, 4, 14, 1, 6, 18, 2, 6, 16, 0, 9, 10, 7,\n 12, 3]) is False\n assert hh([6, 0, 10, 10, 10, 5, 8, 3, 0, 14, 16, 2, 13, 1, 2, 13, 6, 15,\n 5, 1]) is False\n assert hh([2, 2, 0]) is False\n assert hh([3, 2, 1]) is False\n assert hh([1, 1]) is True\n assert hh([1]) is False\n assert hh([]) is True\n",
"step-3": "def remove_zeros(given_list):\n list_without_zero = []\n for element in given_list:\n if element != 0:\n list_without_zero.append(element)\n return list_without_zero\n\n\ndef sort_desc(given_list):\n return sorted(given_list, key=None, reverse=True)\n\n\ndef length_check(n, given_list):\n return n > len(given_list)\n\n\n<mask token>\n\n\ndef hh(given_list):\n if given_list == []:\n return True\n else:\n while given_list != []:\n given_list = remove_zeros(given_list)\n if given_list == []:\n return True\n break\n else:\n given_list = sort_desc(given_list)\n n = given_list.pop(0)\n if length_check(n, given_list):\n return False\n break\n else:\n given_list = substract_one_for_n_elements(n, given_list)\n\n\ndef test_remove_zeros():\n assert remove_zeros([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) == [5, 3, 2, 6, 2, \n 7, 2, 5]\n assert remove_zeros([4, 0, 0, 1, 3]) == [4, 1, 3]\n assert remove_zeros([1, 2, 3]) == [1, 2, 3]\n assert remove_zeros([0, 0, 0]) == []\n assert remove_zeros([]) == []\n\n\n<mask token>\n\n\ndef test_length_check():\n assert length_check(7, [6, 5, 5, 3, 2, 2, 2]) is False\n assert length_check(5, [5, 5, 5, 5, 5]) is False\n assert length_check(5, [5, 5, 5, 5]) is True\n assert length_check(3, [1, 1]) is True\n assert length_check(1, []) is True\n assert length_check(0, []) is False\n\n\ndef test_substract_one_for_n_elements():\n assert substract_one_for_n_elements(4, [5, 4, 3, 2, 1]) == [4, 3, 2, 1, 1]\n assert substract_one_for_n_elements(11, [14, 13, 13, 13, 12, 10, 8, 8, \n 7, 7, 6, 6, 4, 4, 2]) == [13, 12, 12, 12, 11, 9, 7, 7, 6, 6, 5, 6, \n 4, 4, 2]\n assert substract_one_for_n_elements(1, [10, 10, 10]) == [9, 10, 10]\n assert substract_one_for_n_elements(3, [10, 10, 10]) == [9, 9, 9]\n assert substract_one_for_n_elements(1, [1]) == [0]\n\n\ndef test_hh():\n assert hh([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) is False\n assert hh([4, 2, 0, 1, 5, 0]) is False\n assert hh([3, 1, 2, 3, 1, 0]) is True\n assert hh([16, 9, 9, 15, 9, 7, 9, 11, 17, 11, 4, 9, 12, 14, 14, 12, 17,\n 0, 3, 16]) is True\n assert hh([14, 10, 17, 13, 4, 8, 6, 7, 13, 13, 17, 18, 8, 17, 2, 14, 6,\n 4, 7, 12]) is True\n assert hh([15, 18, 6, 13, 12, 4, 4, 14, 1, 6, 18, 2, 6, 16, 0, 9, 10, 7,\n 12, 3]) is False\n assert hh([6, 0, 10, 10, 10, 5, 8, 3, 0, 14, 16, 2, 13, 1, 2, 13, 6, 15,\n 5, 1]) is False\n assert hh([2, 2, 0]) is False\n assert hh([3, 2, 1]) is False\n assert hh([1, 1]) is True\n assert hh([1]) is False\n assert hh([]) is True\n",
"step-4": "def remove_zeros(given_list):\n list_without_zero = []\n for element in given_list:\n if element != 0:\n list_without_zero.append(element)\n return list_without_zero\n\n\ndef sort_desc(given_list):\n return sorted(given_list, key=None, reverse=True)\n\n\ndef length_check(n, given_list):\n return n > len(given_list)\n\n\ndef substract_one_for_n_elements(n, given_list):\n minus_one_list = given_list[:]\n for i in range(0, n):\n minus_one_list[i] -= 1\n return minus_one_list\n\n\ndef hh(given_list):\n if given_list == []:\n return True\n else:\n while given_list != []:\n given_list = remove_zeros(given_list)\n if given_list == []:\n return True\n break\n else:\n given_list = sort_desc(given_list)\n n = given_list.pop(0)\n if length_check(n, given_list):\n return False\n break\n else:\n given_list = substract_one_for_n_elements(n, given_list)\n\n\ndef test_remove_zeros():\n assert remove_zeros([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) == [5, 3, 2, 6, 2, \n 7, 2, 5]\n assert remove_zeros([4, 0, 0, 1, 3]) == [4, 1, 3]\n assert remove_zeros([1, 2, 3]) == [1, 2, 3]\n assert remove_zeros([0, 0, 0]) == []\n assert remove_zeros([]) == []\n\n\ndef test_sort_desc():\n assert sort_desc([5, 1, 3, 4, 2]) == [5, 4, 3, 2, 1]\n assert sort_desc([0, 0, 0, 4, 0]) == [4, 0, 0, 0, 0]\n assert sort_desc([1]) == [1]\n assert sort_desc([]) == []\n\n\ndef test_length_check():\n assert length_check(7, [6, 5, 5, 3, 2, 2, 2]) is False\n assert length_check(5, [5, 5, 5, 5, 5]) is False\n assert length_check(5, [5, 5, 5, 5]) is True\n assert length_check(3, [1, 1]) is True\n assert length_check(1, []) is True\n assert length_check(0, []) is False\n\n\ndef test_substract_one_for_n_elements():\n assert substract_one_for_n_elements(4, [5, 4, 3, 2, 1]) == [4, 3, 2, 1, 1]\n assert substract_one_for_n_elements(11, [14, 13, 13, 13, 12, 10, 8, 8, \n 7, 7, 6, 6, 4, 4, 2]) == [13, 12, 12, 12, 11, 9, 7, 7, 6, 6, 5, 6, \n 4, 4, 2]\n assert substract_one_for_n_elements(1, [10, 10, 10]) == [9, 10, 10]\n assert substract_one_for_n_elements(3, [10, 10, 10]) == [9, 9, 9]\n assert substract_one_for_n_elements(1, [1]) == [0]\n\n\ndef test_hh():\n assert hh([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) is False\n assert hh([4, 2, 0, 1, 5, 0]) is False\n assert hh([3, 1, 2, 3, 1, 0]) is True\n assert hh([16, 9, 9, 15, 9, 7, 9, 11, 17, 11, 4, 9, 12, 14, 14, 12, 17,\n 0, 3, 16]) is True\n assert hh([14, 10, 17, 13, 4, 8, 6, 7, 13, 13, 17, 18, 8, 17, 2, 14, 6,\n 4, 7, 12]) is True\n assert hh([15, 18, 6, 13, 12, 4, 4, 14, 1, 6, 18, 2, 6, 16, 0, 9, 10, 7,\n 12, 3]) is False\n assert hh([6, 0, 10, 10, 10, 5, 8, 3, 0, 14, 16, 2, 13, 1, 2, 13, 6, 15,\n 5, 1]) is False\n assert hh([2, 2, 0]) is False\n assert hh([3, 2, 1]) is False\n assert hh([1, 1]) is True\n assert hh([1]) is False\n assert hh([]) is True\n",
"step-5": "# funkcja usuwająca zera z listy \n\ndef remove_zeros(given_list):\n\n list_without_zero = []\n\n for element in given_list:\n if element != 0:\n list_without_zero.append(element)\n\n return list_without_zero\n\n# funkcja sortująca listę\n\ndef sort_desc(given_list):\n\n # sorted_list = []\n \n # for i in range(0, len(given_list)):\n # for element in given_list:\n # if element == max(given_list):\n # sorted_list.append(element)\n # given_list.remove(element) \n\n return sorted(given_list, key=None, reverse=True)\n\n# funkcja sprawdzająca czy iilość elementów jest mniejsza od danej wartości\n# zwraca wartość logiczną danego wyrażenia\n\ndef length_check(n, given_list):\n\n return n > len(given_list)\n\n# funkcja odejmująca 1 od pierwszych n-elementów listy\n\ndef substract_one_for_n_elements(n, given_list):\n\n minus_one_list = given_list[:]\n\n for i in range(0, n):\n minus_one_list[i] -= 1\n\n return minus_one_list\n\n# wielki finał i kompletny algorytm Havel-Hakimi.\n# This algorithm will return true if the answers are consistent \n# (i.e. it's possible that everyone is telling the truth) \n# and false if the answers are inconsistent (i.e. someone must be lying)\n\ndef hh(given_list):\n\n if given_list == []:\n return True\n \n else:\n # 1\n while given_list != []:\n given_list = remove_zeros(given_list)\n # 2\n if given_list == []:\n return True\n break\n\n else:\n # 3\n given_list = sort_desc(given_list)\n # 4\n n = given_list.pop(0) \n # 5 \n if length_check(n, given_list):\n return False\n break\n # 6, 7\n else:\n given_list = substract_one_for_n_elements(n, given_list) \n\n# *****************************************\n# testy\n\n\ndef test_remove_zeros():\n\n assert remove_zeros([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) == [5, 3, 2, 6, 2, 7, 2, 5]\n assert remove_zeros([4, 0, 0, 1, 3]) == [4, 1, 3]\n assert remove_zeros([1, 2, 3]) == [1, 2, 3]\n assert remove_zeros([0, 0, 0]) == []\n assert remove_zeros([]) == []\n\n\ndef test_sort_desc():\n\n assert sort_desc([5, 1, 3, 4, 2]) == [5, 4, 3, 2, 1]\n assert sort_desc([0, 0, 0, 4, 0]) == [4, 0, 0, 0, 0]\n assert sort_desc([1]) == [1]\n assert sort_desc([]) == []\n\n\ndef test_length_check():\n\n assert length_check(7, [6, 5, 5, 3, 2, 2, 2]) is False\n assert length_check(5, [5, 5, 5, 5, 5]) is False\n assert length_check(5, [5, 5, 5, 5]) is True\n assert length_check(3, [1, 1]) is True\n assert length_check(1, []) is True\n assert length_check(0, []) is False\n\n\ndef test_substract_one_for_n_elements():\n\n assert substract_one_for_n_elements(4, [5, 4, 3, 2, 1]) == [4, 3, 2, 1, 1]\n assert substract_one_for_n_elements(11, [14, 13, 13, 13, 12, 10, 8, 8, 7, 7, 6, 6, 4, 4, 2]) == [13, 12, 12, 12, 11, 9, 7, 7, 6, 6, 5, 6, 4, 4, 2]\n assert substract_one_for_n_elements(1, [10, 10, 10]) == [9, 10, 10]\n assert substract_one_for_n_elements(3, [10, 10, 10]) == [9, 9, 9]\n assert substract_one_for_n_elements(1, [1]) == [0]\n\n\ndef test_hh():\n\n assert hh([5, 3, 0, 2, 6, 2, 0, 7, 2, 5]) is False\n assert hh([4, 2, 0, 1, 5, 0]) is False\n assert hh([3, 1, 2, 3, 1, 0]) is True\n assert hh([16, 9, 9, 15, 9, 7, 9, 11, 17, 11, 4, 9, 12, 14, 14, 12, 17, 0, 3, 16]) is True\n assert hh([14, 10, 17, 13, 4, 8, 6, 7, 13, 13, 17, 18, 8, 17, 2, 14, 6, 4, 7, 12]) is True\n assert hh([15, 18, 6, 13, 12, 4, 4, 14, 1, 6, 18, 2, 6, 16, 0, 9, 10, 7, 12, 3]) is False\n assert hh([6, 0, 10, 10, 10, 5, 8, 3, 0, 14, 16, 2, 13, 1, 2, 13, 6, 15, 5, 1]) is False\n assert hh([2, 2, 0]) is False\n assert hh([3, 2, 1]) is False\n assert hh([1, 1]) is True\n assert hh([1]) is False\n assert hh([]) is True\n",
"step-ids": [
5,
7,
8,
10,
11
]
}
|
[
5,
7,
8,
10,
11
] |
n=int(input("enter a number"))
cp=n
rev=0
sum=0
while(n>0):
rev=n%10
sum+=rev**3
n=n//10
if(cp==sum):
print("the given no is amstrong ")
else:
print("the given no is not amstrong ")
|
normal
|
{
"blob_id": "a8190c7c8926df18ee9439922ce8e3241e9a6140",
"index": 4550,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile n > 0:\n rev = n % 10\n sum += rev ** 3\n n = n // 10\nif cp == sum:\n print('the given no is amstrong ')\nelse:\n print('the given no is not amstrong ')\n",
"step-3": "n = int(input('enter a number'))\ncp = n\nrev = 0\nsum = 0\nwhile n > 0:\n rev = n % 10\n sum += rev ** 3\n n = n // 10\nif cp == sum:\n print('the given no is amstrong ')\nelse:\n print('the given no is not amstrong ')\n",
"step-4": "n=int(input(\"enter a number\"))\ncp=n\nrev=0\nsum=0\nwhile(n>0):\n\trev=n%10\n\tsum+=rev**3\n\tn=n//10\nif(cp==sum):\n\tprint(\"the given no is amstrong \")\nelse:\n\tprint(\"the given no is not amstrong \")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
### Script to convert matlab structure file (/motiongan/data/style-dataset/style_motion_database.mat')
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import argparse
import math
import numpy as np
from collections import OrderedDict
import scipy.io
import pickle
from core.utils.euler_to_quaternion import quaternion_to_rotation_mat, rotation_mat_to_euler
## Load motion data from .mat file
def load_motion(mat_path, out):
mat_data = scipy.io.loadmat(mat_path)['motion_database']
file_nums = mat_data.shape[1]
motion_data_all = {}
for f_id in range(file_nums):
motion_data = {}
# Get style and motion content
motion_data['style'] = mat_data[0,f_id][0][0]
motion_data['motion_type'] = mat_data[0,f_id][1][0]
# Get file name
full_path = mat_data[0,f_id][2][0,0][0][0]
file_name = full_path.split('\\')[-1]
# Get joint parameters
frame_nums = mat_data[0,f_id][2].shape[1]
root_pos = np.zeros((frame_nums,3))
joint_nums = mat_data[0,f_id][2][0,0][2].shape[0]
motion_data['joint_nums'] = joint_nums
joint_quarternions = np.zeros((frame_nums, joint_nums, 4))
for i in range(frame_nums):
root_pos[i,:] = mat_data[0,f_id][2][0,i][1]
joint_quarternions[i,:,:] = mat_data[0,f_id][2][0,i][2]
motion_data['root_position'] = root_pos
motion_data['joint_quarternions'] = joint_quarternions
# Get foot contact annotation
motion_data['foot_contact'] = mat_data[0,f_id][3][0]
# Save file as pickle
with open(os.path.join(out, os.path.splitext(file_name)[0]+'.pkl'), 'wb') as f:
pickle.dump(motion_data, f)
motion_data_all[file_name] = motion_data
return motion_data_all
## Load skeleton data from .mat file
def load_skeleton(mat_path):
mat_data = scipy.io.loadmat(mat_path)['skel'][0,0]
# Init skeleton
skeleton = OrderedDict()
bone_names = mat_data[1].tolist()
for i, bone in enumerate(bone_names):
bone = bone.strip()
if bone == 'Site':
bone = bone_names[i-1].strip() + bone
skeleton[bone] = {'offset':[], 'parent':[], 'children':[]}
# Resister bone parent and children, offset
parent_ids = mat_data[2][0]
offsets = mat_data[3]
for i, bone in enumerate(skeleton.keys()):
if bone != 'root':
parent = list(skeleton.keys())[parent_ids[i]-1]
skeleton[bone]['parent'] = parent
skeleton[parent]['children'].append(bone)
skeleton[bone]['offset'] = offsets[i,:]
return skeleton
## Construct hierarchy of skeleton for bvh
def construct_hierarchy(skeleton):
hierarchy = ['HIERARCHY\r\n']
# Calc tree level
level = 0
for i, bone in enumerate(skeleton.keys()):
if bone == 'root':
skeleton[bone]['level'] = 0
else:
parent = skeleton[bone]['parent']
skeleton[bone]['level'] = skeleton[parent]['level'] + 1
# Write hierarchy
for i, bone in enumerate(skeleton.keys()):
offset = skeleton[bone]['offset']
if bone == 'root':
hierarchy.append('ROOT root\r\n')
hierarchy.append('{\r\n')
hierarchy.append('\tOFFSET {0:.05f} {1:.05f} {2:.05f}\r\n'.format(offset[0],offset[1],offset[2]))
hierarchy.append('\tCHANNELS 6 Xposition Yposition Zposition Zrotation Yrotation Xrotation\r\n')
elif bone.endswith('Site'):
parent = skeleton[bone]['parent']
level = skeleton[bone]['level']
tabs = '\t' * level
hierarchy.append(tabs + 'End Site\r\n')
hierarchy.append(tabs + '{\r\n')
hierarchy.append(tabs + '\tOFFSET {0:.05f} {1:.05f} {2:.05f}\r\n'.format(offset[0],offset[1],offset[2]))
hierarchy.append(tabs + '}\r\n')
# Put end brancket
if i == len(skeleton.keys())-1:
while level > 0:
level -= 1
hierarchy.append('\t' * level + '}\r\n')
else:
for _ in range(level - skeleton[list(skeleton.keys())[i+1]]['level']):
level -= 1
hierarchy.append('\t' * level + '}\r\n')
else:
parent = skeleton[bone]['parent']
level = skeleton[bone]['level']
tabs = '\t'*level
hierarchy.append(tabs + 'JOINT {0}'.format(bone) + '\r\n')
hierarchy.append(tabs + '{\r\n')
hierarchy.append(tabs + '\tOFFSET {0:.05f} {1:.05f} {2:.05f}\r\n'.format(offset[0],offset[1],offset[2]))
hierarchy.append(tabs + '\tCHANNELS 3 Zrotation Yrotation Xrotation\r\n')
#with open('hierarchy_test.txt', 'w') as f:
# f.writelines(hierarchy)
return hierarchy
# Write .bvh file
def write_bvh(skeleton, hierarchy, motion_data_all, out):
for file_name, motion_data in motion_data_all.items():
joint_quarternions = motion_data['joint_quarternions']
root_pos = motion_data['root_position']
# Convert data to list of string
frames = []
for i in range(joint_quarternions.shape[0]):
# Root pos
root_pos_i = root_pos[i]
frame = '{0:.05f} {1:.05f} {2:.05f} '.format(*root_pos_i.tolist())
for j in range(joint_quarternions.shape[1]):
# If Endsite, skip
if list(skeleton.keys())[j].endswith('Site'):
continue
## This implementation is modified to quarternion with 'xyzw' order
R_ij = quaternion_to_rotation_mat(joint_quarternions[i,j,3], joint_quarternions[i,j,2], joint_quarternions[i,j,1], joint_quarternions[i,j,0])
euler_ij = rotation_mat_to_euler(R_ij)
frame += '{0:.05f} {1:.05f} {2:.05f} '.format(*list(map(lambda s: s * (180.0/math.pi), euler_ij.tolist())))
frame += '\r\n'
frames.append(frame)
# Write
with open(os.path.join(out, file_name), 'w') as f:
f.writelines(hierarchy)
f.write('MOTION\r\n')
frames[0] = 'Frames: {0}\r\nFrame Time: 0.0083333\r\n'.format(joint_quarternions.shape[0]) + frames[0]
f.writelines(frames)
print(os.path.join(out, file_name))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('out', type=str)
args = parser.parse_args()
out = args.out
motion_data_all = load_motion('../../motiongan/data/style-dataset/style_motion_database.mat', out)
skeleton = load_skeleton('../../motiongan/data/style-dataset/skeleton.mat')
hierarchy = construct_hierarchy(skeleton)
write_bvh(skeleton, hierarchy, motion_data_all, out)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "f2dac8b454805829cf5dbe2efe3c0de805ae4cb5",
"index": 1727,
"step-1": "<mask token>\n\n\ndef load_skeleton(mat_path):\n mat_data = scipy.io.loadmat(mat_path)['skel'][0, 0]\n skeleton = OrderedDict()\n bone_names = mat_data[1].tolist()\n for i, bone in enumerate(bone_names):\n bone = bone.strip()\n if bone == 'Site':\n bone = bone_names[i - 1].strip() + bone\n skeleton[bone] = {'offset': [], 'parent': [], 'children': []}\n parent_ids = mat_data[2][0]\n offsets = mat_data[3]\n for i, bone in enumerate(skeleton.keys()):\n if bone != 'root':\n parent = list(skeleton.keys())[parent_ids[i] - 1]\n skeleton[bone]['parent'] = parent\n skeleton[parent]['children'].append(bone)\n skeleton[bone]['offset'] = offsets[i, :]\n return skeleton\n\n\n<mask token>\n\n\ndef write_bvh(skeleton, hierarchy, motion_data_all, out):\n for file_name, motion_data in motion_data_all.items():\n joint_quarternions = motion_data['joint_quarternions']\n root_pos = motion_data['root_position']\n frames = []\n for i in range(joint_quarternions.shape[0]):\n root_pos_i = root_pos[i]\n frame = '{0:.05f} {1:.05f} {2:.05f} '.format(*root_pos_i.tolist())\n for j in range(joint_quarternions.shape[1]):\n if list(skeleton.keys())[j].endswith('Site'):\n continue\n R_ij = quaternion_to_rotation_mat(joint_quarternions[i, j, \n 3], joint_quarternions[i, j, 2], joint_quarternions[i,\n j, 1], joint_quarternions[i, j, 0])\n euler_ij = rotation_mat_to_euler(R_ij)\n frame += '{0:.05f} {1:.05f} {2:.05f} '.format(*list(map(lambda\n s: s * (180.0 / math.pi), euler_ij.tolist())))\n frame += '\\r\\n'\n frames.append(frame)\n with open(os.path.join(out, file_name), 'w') as f:\n f.writelines(hierarchy)\n f.write('MOTION\\r\\n')\n frames[0] = 'Frames: {0}\\r\\nFrame Time: 0.0083333\\r\\n'.format(\n joint_quarternions.shape[0]) + frames[0]\n f.writelines(frames)\n print(os.path.join(out, file_name))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('out', type=str)\n args = parser.parse_args()\n out = args.out\n motion_data_all = load_motion(\n '../../motiongan/data/style-dataset/style_motion_database.mat', out)\n skeleton = load_skeleton('../../motiongan/data/style-dataset/skeleton.mat')\n hierarchy = construct_hierarchy(skeleton)\n write_bvh(skeleton, hierarchy, motion_data_all, out)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef load_motion(mat_path, out):\n mat_data = scipy.io.loadmat(mat_path)['motion_database']\n file_nums = mat_data.shape[1]\n motion_data_all = {}\n for f_id in range(file_nums):\n motion_data = {}\n motion_data['style'] = mat_data[0, f_id][0][0]\n motion_data['motion_type'] = mat_data[0, f_id][1][0]\n full_path = mat_data[0, f_id][2][0, 0][0][0]\n file_name = full_path.split('\\\\')[-1]\n frame_nums = mat_data[0, f_id][2].shape[1]\n root_pos = np.zeros((frame_nums, 3))\n joint_nums = mat_data[0, f_id][2][0, 0][2].shape[0]\n motion_data['joint_nums'] = joint_nums\n joint_quarternions = np.zeros((frame_nums, joint_nums, 4))\n for i in range(frame_nums):\n root_pos[i, :] = mat_data[0, f_id][2][0, i][1]\n joint_quarternions[i, :, :] = mat_data[0, f_id][2][0, i][2]\n motion_data['root_position'] = root_pos\n motion_data['joint_quarternions'] = joint_quarternions\n motion_data['foot_contact'] = mat_data[0, f_id][3][0]\n with open(os.path.join(out, os.path.splitext(file_name)[0] + '.pkl'\n ), 'wb') as f:\n pickle.dump(motion_data, f)\n motion_data_all[file_name] = motion_data\n return motion_data_all\n\n\ndef load_skeleton(mat_path):\n mat_data = scipy.io.loadmat(mat_path)['skel'][0, 0]\n skeleton = OrderedDict()\n bone_names = mat_data[1].tolist()\n for i, bone in enumerate(bone_names):\n bone = bone.strip()\n if bone == 'Site':\n bone = bone_names[i - 1].strip() + bone\n skeleton[bone] = {'offset': [], 'parent': [], 'children': []}\n parent_ids = mat_data[2][0]\n offsets = mat_data[3]\n for i, bone in enumerate(skeleton.keys()):\n if bone != 'root':\n parent = list(skeleton.keys())[parent_ids[i] - 1]\n skeleton[bone]['parent'] = parent\n skeleton[parent]['children'].append(bone)\n skeleton[bone]['offset'] = offsets[i, :]\n return skeleton\n\n\ndef construct_hierarchy(skeleton):\n hierarchy = ['HIERARCHY\\r\\n']\n level = 0\n for i, bone in enumerate(skeleton.keys()):\n if bone == 'root':\n skeleton[bone]['level'] = 0\n else:\n parent = skeleton[bone]['parent']\n skeleton[bone]['level'] = skeleton[parent]['level'] + 1\n for i, bone in enumerate(skeleton.keys()):\n offset = skeleton[bone]['offset']\n if bone == 'root':\n hierarchy.append('ROOT root\\r\\n')\n hierarchy.append('{\\r\\n')\n hierarchy.append('\\tOFFSET {0:.05f} {1:.05f} {2:.05f}\\r\\n'.\n format(offset[0], offset[1], offset[2]))\n hierarchy.append(\n '\\tCHANNELS 6 Xposition Yposition Zposition Zrotation Yrotation Xrotation\\r\\n'\n )\n elif bone.endswith('Site'):\n parent = skeleton[bone]['parent']\n level = skeleton[bone]['level']\n tabs = '\\t' * level\n hierarchy.append(tabs + 'End Site\\r\\n')\n hierarchy.append(tabs + '{\\r\\n')\n hierarchy.append(tabs +\n '\\tOFFSET {0:.05f} {1:.05f} {2:.05f}\\r\\n'.format(offset[0],\n offset[1], offset[2]))\n hierarchy.append(tabs + '}\\r\\n')\n if i == len(skeleton.keys()) - 1:\n while level > 0:\n level -= 1\n hierarchy.append('\\t' * level + '}\\r\\n')\n else:\n for _ in range(level - skeleton[list(skeleton.keys())[i + 1\n ]]['level']):\n level -= 1\n hierarchy.append('\\t' * level + '}\\r\\n')\n else:\n parent = skeleton[bone]['parent']\n level = skeleton[bone]['level']\n tabs = '\\t' * level\n hierarchy.append(tabs + 'JOINT {0}'.format(bone) + '\\r\\n')\n hierarchy.append(tabs + '{\\r\\n')\n hierarchy.append(tabs +\n '\\tOFFSET {0:.05f} {1:.05f} {2:.05f}\\r\\n'.format(offset[0],\n offset[1], offset[2]))\n hierarchy.append(tabs +\n '\\tCHANNELS 3 Zrotation Yrotation Xrotation\\r\\n')\n return hierarchy\n\n\ndef write_bvh(skeleton, hierarchy, motion_data_all, out):\n for file_name, motion_data in motion_data_all.items():\n joint_quarternions = motion_data['joint_quarternions']\n root_pos = motion_data['root_position']\n frames = []\n for i in range(joint_quarternions.shape[0]):\n root_pos_i = root_pos[i]\n frame = '{0:.05f} {1:.05f} {2:.05f} '.format(*root_pos_i.tolist())\n for j in range(joint_quarternions.shape[1]):\n if list(skeleton.keys())[j].endswith('Site'):\n continue\n R_ij = quaternion_to_rotation_mat(joint_quarternions[i, j, \n 3], joint_quarternions[i, j, 2], joint_quarternions[i,\n j, 1], joint_quarternions[i, j, 0])\n euler_ij = rotation_mat_to_euler(R_ij)\n frame += '{0:.05f} {1:.05f} {2:.05f} '.format(*list(map(lambda\n s: s * (180.0 / math.pi), euler_ij.tolist())))\n frame += '\\r\\n'\n frames.append(frame)\n with open(os.path.join(out, file_name), 'w') as f:\n f.writelines(hierarchy)\n f.write('MOTION\\r\\n')\n frames[0] = 'Frames: {0}\\r\\nFrame Time: 0.0083333\\r\\n'.format(\n joint_quarternions.shape[0]) + frames[0]\n f.writelines(frames)\n print(os.path.join(out, file_name))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('out', type=str)\n args = parser.parse_args()\n out = args.out\n motion_data_all = load_motion(\n '../../motiongan/data/style-dataset/style_motion_database.mat', out)\n skeleton = load_skeleton('../../motiongan/data/style-dataset/skeleton.mat')\n hierarchy = construct_hierarchy(skeleton)\n write_bvh(skeleton, hierarchy, motion_data_all, out)\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n<mask token>\n\n\ndef load_motion(mat_path, out):\n mat_data = scipy.io.loadmat(mat_path)['motion_database']\n file_nums = mat_data.shape[1]\n motion_data_all = {}\n for f_id in range(file_nums):\n motion_data = {}\n motion_data['style'] = mat_data[0, f_id][0][0]\n motion_data['motion_type'] = mat_data[0, f_id][1][0]\n full_path = mat_data[0, f_id][2][0, 0][0][0]\n file_name = full_path.split('\\\\')[-1]\n frame_nums = mat_data[0, f_id][2].shape[1]\n root_pos = np.zeros((frame_nums, 3))\n joint_nums = mat_data[0, f_id][2][0, 0][2].shape[0]\n motion_data['joint_nums'] = joint_nums\n joint_quarternions = np.zeros((frame_nums, joint_nums, 4))\n for i in range(frame_nums):\n root_pos[i, :] = mat_data[0, f_id][2][0, i][1]\n joint_quarternions[i, :, :] = mat_data[0, f_id][2][0, i][2]\n motion_data['root_position'] = root_pos\n motion_data['joint_quarternions'] = joint_quarternions\n motion_data['foot_contact'] = mat_data[0, f_id][3][0]\n with open(os.path.join(out, os.path.splitext(file_name)[0] + '.pkl'\n ), 'wb') as f:\n pickle.dump(motion_data, f)\n motion_data_all[file_name] = motion_data\n return motion_data_all\n\n\ndef load_skeleton(mat_path):\n mat_data = scipy.io.loadmat(mat_path)['skel'][0, 0]\n skeleton = OrderedDict()\n bone_names = mat_data[1].tolist()\n for i, bone in enumerate(bone_names):\n bone = bone.strip()\n if bone == 'Site':\n bone = bone_names[i - 1].strip() + bone\n skeleton[bone] = {'offset': [], 'parent': [], 'children': []}\n parent_ids = mat_data[2][0]\n offsets = mat_data[3]\n for i, bone in enumerate(skeleton.keys()):\n if bone != 'root':\n parent = list(skeleton.keys())[parent_ids[i] - 1]\n skeleton[bone]['parent'] = parent\n skeleton[parent]['children'].append(bone)\n skeleton[bone]['offset'] = offsets[i, :]\n return skeleton\n\n\ndef construct_hierarchy(skeleton):\n hierarchy = ['HIERARCHY\\r\\n']\n level = 0\n for i, bone in enumerate(skeleton.keys()):\n if bone == 'root':\n skeleton[bone]['level'] = 0\n else:\n parent = skeleton[bone]['parent']\n skeleton[bone]['level'] = skeleton[parent]['level'] + 1\n for i, bone in enumerate(skeleton.keys()):\n offset = skeleton[bone]['offset']\n if bone == 'root':\n hierarchy.append('ROOT root\\r\\n')\n hierarchy.append('{\\r\\n')\n hierarchy.append('\\tOFFSET {0:.05f} {1:.05f} {2:.05f}\\r\\n'.\n format(offset[0], offset[1], offset[2]))\n hierarchy.append(\n '\\tCHANNELS 6 Xposition Yposition Zposition Zrotation Yrotation Xrotation\\r\\n'\n )\n elif bone.endswith('Site'):\n parent = skeleton[bone]['parent']\n level = skeleton[bone]['level']\n tabs = '\\t' * level\n hierarchy.append(tabs + 'End Site\\r\\n')\n hierarchy.append(tabs + '{\\r\\n')\n hierarchy.append(tabs +\n '\\tOFFSET {0:.05f} {1:.05f} {2:.05f}\\r\\n'.format(offset[0],\n offset[1], offset[2]))\n hierarchy.append(tabs + '}\\r\\n')\n if i == len(skeleton.keys()) - 1:\n while level > 0:\n level -= 1\n hierarchy.append('\\t' * level + '}\\r\\n')\n else:\n for _ in range(level - skeleton[list(skeleton.keys())[i + 1\n ]]['level']):\n level -= 1\n hierarchy.append('\\t' * level + '}\\r\\n')\n else:\n parent = skeleton[bone]['parent']\n level = skeleton[bone]['level']\n tabs = '\\t' * level\n hierarchy.append(tabs + 'JOINT {0}'.format(bone) + '\\r\\n')\n hierarchy.append(tabs + '{\\r\\n')\n hierarchy.append(tabs +\n '\\tOFFSET {0:.05f} {1:.05f} {2:.05f}\\r\\n'.format(offset[0],\n offset[1], offset[2]))\n hierarchy.append(tabs +\n '\\tCHANNELS 3 Zrotation Yrotation Xrotation\\r\\n')\n return hierarchy\n\n\ndef write_bvh(skeleton, hierarchy, motion_data_all, out):\n for file_name, motion_data in motion_data_all.items():\n joint_quarternions = motion_data['joint_quarternions']\n root_pos = motion_data['root_position']\n frames = []\n for i in range(joint_quarternions.shape[0]):\n root_pos_i = root_pos[i]\n frame = '{0:.05f} {1:.05f} {2:.05f} '.format(*root_pos_i.tolist())\n for j in range(joint_quarternions.shape[1]):\n if list(skeleton.keys())[j].endswith('Site'):\n continue\n R_ij = quaternion_to_rotation_mat(joint_quarternions[i, j, \n 3], joint_quarternions[i, j, 2], joint_quarternions[i,\n j, 1], joint_quarternions[i, j, 0])\n euler_ij = rotation_mat_to_euler(R_ij)\n frame += '{0:.05f} {1:.05f} {2:.05f} '.format(*list(map(lambda\n s: s * (180.0 / math.pi), euler_ij.tolist())))\n frame += '\\r\\n'\n frames.append(frame)\n with open(os.path.join(out, file_name), 'w') as f:\n f.writelines(hierarchy)\n f.write('MOTION\\r\\n')\n frames[0] = 'Frames: {0}\\r\\nFrame Time: 0.0083333\\r\\n'.format(\n joint_quarternions.shape[0]) + frames[0]\n f.writelines(frames)\n print(os.path.join(out, file_name))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('out', type=str)\n args = parser.parse_args()\n out = args.out\n motion_data_all = load_motion(\n '../../motiongan/data/style-dataset/style_motion_database.mat', out)\n skeleton = load_skeleton('../../motiongan/data/style-dataset/skeleton.mat')\n hierarchy = construct_hierarchy(skeleton)\n write_bvh(skeleton, hierarchy, motion_data_all, out)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nimport argparse\nimport math\nimport numpy as np\nfrom collections import OrderedDict\nimport scipy.io\nimport pickle\nfrom core.utils.euler_to_quaternion import quaternion_to_rotation_mat, rotation_mat_to_euler\n\n\ndef load_motion(mat_path, out):\n mat_data = scipy.io.loadmat(mat_path)['motion_database']\n file_nums = mat_data.shape[1]\n motion_data_all = {}\n for f_id in range(file_nums):\n motion_data = {}\n motion_data['style'] = mat_data[0, f_id][0][0]\n motion_data['motion_type'] = mat_data[0, f_id][1][0]\n full_path = mat_data[0, f_id][2][0, 0][0][0]\n file_name = full_path.split('\\\\')[-1]\n frame_nums = mat_data[0, f_id][2].shape[1]\n root_pos = np.zeros((frame_nums, 3))\n joint_nums = mat_data[0, f_id][2][0, 0][2].shape[0]\n motion_data['joint_nums'] = joint_nums\n joint_quarternions = np.zeros((frame_nums, joint_nums, 4))\n for i in range(frame_nums):\n root_pos[i, :] = mat_data[0, f_id][2][0, i][1]\n joint_quarternions[i, :, :] = mat_data[0, f_id][2][0, i][2]\n motion_data['root_position'] = root_pos\n motion_data['joint_quarternions'] = joint_quarternions\n motion_data['foot_contact'] = mat_data[0, f_id][3][0]\n with open(os.path.join(out, os.path.splitext(file_name)[0] + '.pkl'\n ), 'wb') as f:\n pickle.dump(motion_data, f)\n motion_data_all[file_name] = motion_data\n return motion_data_all\n\n\ndef load_skeleton(mat_path):\n mat_data = scipy.io.loadmat(mat_path)['skel'][0, 0]\n skeleton = OrderedDict()\n bone_names = mat_data[1].tolist()\n for i, bone in enumerate(bone_names):\n bone = bone.strip()\n if bone == 'Site':\n bone = bone_names[i - 1].strip() + bone\n skeleton[bone] = {'offset': [], 'parent': [], 'children': []}\n parent_ids = mat_data[2][0]\n offsets = mat_data[3]\n for i, bone in enumerate(skeleton.keys()):\n if bone != 'root':\n parent = list(skeleton.keys())[parent_ids[i] - 1]\n skeleton[bone]['parent'] = parent\n skeleton[parent]['children'].append(bone)\n skeleton[bone]['offset'] = offsets[i, :]\n return skeleton\n\n\ndef construct_hierarchy(skeleton):\n hierarchy = ['HIERARCHY\\r\\n']\n level = 0\n for i, bone in enumerate(skeleton.keys()):\n if bone == 'root':\n skeleton[bone]['level'] = 0\n else:\n parent = skeleton[bone]['parent']\n skeleton[bone]['level'] = skeleton[parent]['level'] + 1\n for i, bone in enumerate(skeleton.keys()):\n offset = skeleton[bone]['offset']\n if bone == 'root':\n hierarchy.append('ROOT root\\r\\n')\n hierarchy.append('{\\r\\n')\n hierarchy.append('\\tOFFSET {0:.05f} {1:.05f} {2:.05f}\\r\\n'.\n format(offset[0], offset[1], offset[2]))\n hierarchy.append(\n '\\tCHANNELS 6 Xposition Yposition Zposition Zrotation Yrotation Xrotation\\r\\n'\n )\n elif bone.endswith('Site'):\n parent = skeleton[bone]['parent']\n level = skeleton[bone]['level']\n tabs = '\\t' * level\n hierarchy.append(tabs + 'End Site\\r\\n')\n hierarchy.append(tabs + '{\\r\\n')\n hierarchy.append(tabs +\n '\\tOFFSET {0:.05f} {1:.05f} {2:.05f}\\r\\n'.format(offset[0],\n offset[1], offset[2]))\n hierarchy.append(tabs + '}\\r\\n')\n if i == len(skeleton.keys()) - 1:\n while level > 0:\n level -= 1\n hierarchy.append('\\t' * level + '}\\r\\n')\n else:\n for _ in range(level - skeleton[list(skeleton.keys())[i + 1\n ]]['level']):\n level -= 1\n hierarchy.append('\\t' * level + '}\\r\\n')\n else:\n parent = skeleton[bone]['parent']\n level = skeleton[bone]['level']\n tabs = '\\t' * level\n hierarchy.append(tabs + 'JOINT {0}'.format(bone) + '\\r\\n')\n hierarchy.append(tabs + '{\\r\\n')\n hierarchy.append(tabs +\n '\\tOFFSET {0:.05f} {1:.05f} {2:.05f}\\r\\n'.format(offset[0],\n offset[1], offset[2]))\n hierarchy.append(tabs +\n '\\tCHANNELS 3 Zrotation Yrotation Xrotation\\r\\n')\n return hierarchy\n\n\ndef write_bvh(skeleton, hierarchy, motion_data_all, out):\n for file_name, motion_data in motion_data_all.items():\n joint_quarternions = motion_data['joint_quarternions']\n root_pos = motion_data['root_position']\n frames = []\n for i in range(joint_quarternions.shape[0]):\n root_pos_i = root_pos[i]\n frame = '{0:.05f} {1:.05f} {2:.05f} '.format(*root_pos_i.tolist())\n for j in range(joint_quarternions.shape[1]):\n if list(skeleton.keys())[j].endswith('Site'):\n continue\n R_ij = quaternion_to_rotation_mat(joint_quarternions[i, j, \n 3], joint_quarternions[i, j, 2], joint_quarternions[i,\n j, 1], joint_quarternions[i, j, 0])\n euler_ij = rotation_mat_to_euler(R_ij)\n frame += '{0:.05f} {1:.05f} {2:.05f} '.format(*list(map(lambda\n s: s * (180.0 / math.pi), euler_ij.tolist())))\n frame += '\\r\\n'\n frames.append(frame)\n with open(os.path.join(out, file_name), 'w') as f:\n f.writelines(hierarchy)\n f.write('MOTION\\r\\n')\n frames[0] = 'Frames: {0}\\r\\nFrame Time: 0.0083333\\r\\n'.format(\n joint_quarternions.shape[0]) + frames[0]\n f.writelines(frames)\n print(os.path.join(out, file_name))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('out', type=str)\n args = parser.parse_args()\n out = args.out\n motion_data_all = load_motion(\n '../../motiongan/data/style-dataset/style_motion_database.mat', out)\n skeleton = load_skeleton('../../motiongan/data/style-dataset/skeleton.mat')\n hierarchy = construct_hierarchy(skeleton)\n write_bvh(skeleton, hierarchy, motion_data_all, out)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "### Script to convert matlab structure file (/motiongan/data/style-dataset/style_motion_database.mat')\nimport os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\nimport argparse\nimport math\nimport numpy as np\nfrom collections import OrderedDict\nimport scipy.io\nimport pickle\n\nfrom core.utils.euler_to_quaternion import quaternion_to_rotation_mat, rotation_mat_to_euler\n\n## Load motion data from .mat file\ndef load_motion(mat_path, out):\n mat_data = scipy.io.loadmat(mat_path)['motion_database']\n file_nums = mat_data.shape[1]\n motion_data_all = {}\n for f_id in range(file_nums):\n motion_data = {}\n # Get style and motion content \n motion_data['style'] = mat_data[0,f_id][0][0]\n motion_data['motion_type'] = mat_data[0,f_id][1][0] \n\n # Get file name\n full_path = mat_data[0,f_id][2][0,0][0][0]\n file_name = full_path.split('\\\\')[-1]\n\n # Get joint parameters\n frame_nums = mat_data[0,f_id][2].shape[1]\n root_pos = np.zeros((frame_nums,3))\n \n joint_nums = mat_data[0,f_id][2][0,0][2].shape[0]\n motion_data['joint_nums'] = joint_nums\n joint_quarternions = np.zeros((frame_nums, joint_nums, 4))\n for i in range(frame_nums):\n root_pos[i,:] = mat_data[0,f_id][2][0,i][1]\n joint_quarternions[i,:,:] = mat_data[0,f_id][2][0,i][2]\n motion_data['root_position'] = root_pos\n motion_data['joint_quarternions'] = joint_quarternions\n\n # Get foot contact annotation\n motion_data['foot_contact'] = mat_data[0,f_id][3][0]\n\n\n # Save file as pickle\n with open(os.path.join(out, os.path.splitext(file_name)[0]+'.pkl'), 'wb') as f:\n pickle.dump(motion_data, f)\n\n motion_data_all[file_name] = motion_data\n\n return motion_data_all\n\n\n## Load skeleton data from .mat file\ndef load_skeleton(mat_path):\n mat_data = scipy.io.loadmat(mat_path)['skel'][0,0]\n\n # Init skeleton\n skeleton = OrderedDict()\n bone_names = mat_data[1].tolist()\n for i, bone in enumerate(bone_names):\n bone = bone.strip()\n if bone == 'Site':\n bone = bone_names[i-1].strip() + bone\n skeleton[bone] = {'offset':[], 'parent':[], 'children':[]}\n \n # Resister bone parent and children, offset\n parent_ids = mat_data[2][0]\n offsets = mat_data[3]\n for i, bone in enumerate(skeleton.keys()):\n if bone != 'root': \n parent = list(skeleton.keys())[parent_ids[i]-1]\n skeleton[bone]['parent'] = parent\n skeleton[parent]['children'].append(bone)\n\n skeleton[bone]['offset'] = offsets[i,:]\n\n return skeleton\n\n\n## Construct hierarchy of skeleton for bvh\ndef construct_hierarchy(skeleton):\n hierarchy = ['HIERARCHY\\r\\n']\n \n # Calc tree level\n level = 0\n for i, bone in enumerate(skeleton.keys()):\n if bone == 'root':\n skeleton[bone]['level'] = 0\n else:\n parent = skeleton[bone]['parent']\n skeleton[bone]['level'] = skeleton[parent]['level'] + 1\n\n # Write hierarchy\n for i, bone in enumerate(skeleton.keys()):\n offset = skeleton[bone]['offset']\n if bone == 'root':\n hierarchy.append('ROOT root\\r\\n')\n hierarchy.append('{\\r\\n')\n hierarchy.append('\\tOFFSET {0:.05f} {1:.05f} {2:.05f}\\r\\n'.format(offset[0],offset[1],offset[2]))\n hierarchy.append('\\tCHANNELS 6 Xposition Yposition Zposition Zrotation Yrotation Xrotation\\r\\n')\n\n elif bone.endswith('Site'):\n parent = skeleton[bone]['parent']\n level = skeleton[bone]['level']\n tabs = '\\t' * level\n hierarchy.append(tabs + 'End Site\\r\\n')\n hierarchy.append(tabs + '{\\r\\n')\n hierarchy.append(tabs + '\\tOFFSET {0:.05f} {1:.05f} {2:.05f}\\r\\n'.format(offset[0],offset[1],offset[2]))\n hierarchy.append(tabs + '}\\r\\n')\n # Put end brancket\n if i == len(skeleton.keys())-1:\n while level > 0:\n level -= 1\n hierarchy.append('\\t' * level + '}\\r\\n')\n else: \n for _ in range(level - skeleton[list(skeleton.keys())[i+1]]['level']):\n level -= 1\n hierarchy.append('\\t' * level + '}\\r\\n')\n\n else:\n parent = skeleton[bone]['parent']\n level = skeleton[bone]['level']\n tabs = '\\t'*level\n hierarchy.append(tabs + 'JOINT {0}'.format(bone) + '\\r\\n')\n hierarchy.append(tabs + '{\\r\\n')\n hierarchy.append(tabs + '\\tOFFSET {0:.05f} {1:.05f} {2:.05f}\\r\\n'.format(offset[0],offset[1],offset[2]))\n hierarchy.append(tabs + '\\tCHANNELS 3 Zrotation Yrotation Xrotation\\r\\n')\n \n #with open('hierarchy_test.txt', 'w') as f:\n # f.writelines(hierarchy)\n return hierarchy\n\n\n# Write .bvh file\ndef write_bvh(skeleton, hierarchy, motion_data_all, out):\n for file_name, motion_data in motion_data_all.items():\n joint_quarternions = motion_data['joint_quarternions']\n root_pos = motion_data['root_position']\n\n # Convert data to list of string\n frames = []\n for i in range(joint_quarternions.shape[0]):\n # Root pos\n root_pos_i = root_pos[i]\n frame = '{0:.05f} {1:.05f} {2:.05f} '.format(*root_pos_i.tolist()) \n\n for j in range(joint_quarternions.shape[1]):\n # If Endsite, skip\n if list(skeleton.keys())[j].endswith('Site'): \n continue\n ## This implementation is modified to quarternion with 'xyzw' order\n R_ij = quaternion_to_rotation_mat(joint_quarternions[i,j,3], joint_quarternions[i,j,2], joint_quarternions[i,j,1], joint_quarternions[i,j,0]) \n euler_ij = rotation_mat_to_euler(R_ij)\n frame += '{0:.05f} {1:.05f} {2:.05f} '.format(*list(map(lambda s: s * (180.0/math.pi), euler_ij.tolist())))\n\n frame += '\\r\\n'\n frames.append(frame)\n \n # Write\n with open(os.path.join(out, file_name), 'w') as f:\n f.writelines(hierarchy)\n\n f.write('MOTION\\r\\n')\n frames[0] = 'Frames: {0}\\r\\nFrame Time: 0.0083333\\r\\n'.format(joint_quarternions.shape[0]) + frames[0]\n f.writelines(frames)\n \n print(os.path.join(out, file_name))\n\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('out', type=str)\n\n args = parser.parse_args()\n out = args.out\n\n motion_data_all = load_motion('../../motiongan/data/style-dataset/style_motion_database.mat', out)\n skeleton = load_skeleton('../../motiongan/data/style-dataset/skeleton.mat')\n hierarchy = construct_hierarchy(skeleton)\n write_bvh(skeleton, hierarchy, motion_data_all, out) \n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def divisible_by(numbers, divisor):
res = []
for e in numbers:
if e % divisor == 0:
res.append(e)
return res
|
flexible
|
{
"blob_id": "d7ff5bf5d8f397500fcac30b73f469316c908f15",
"index": 5042,
"step-1": "<mask token>\n",
"step-2": "def divisible_by(numbers, divisor):\n res = []\n for e in numbers:\n if e % divisor == 0:\n res.append(e)\n return res\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
def loadDataSet():
"""解析文件
Return: dataMat 文档列表 [[1,x1,x2]...]; labelMat 类别标签列表[1,0,1...]
@author:VPrincekin
"""
dataMat = []
labelMat = []
fr = open('testSet.txt')
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])
labelMat.append(int(lineArr[2]))
return dataMat, labelMat
def sigmoid(inX):
"""sigmoid函数
@author:VPrincekin
"""
return 1 / (1 + exp(-inX))
<|reserved_special_token_0|>
def stocGradAscent1(dataMatrix, classLabels, numIter=150):
"""
Args:dataMatrix 文档列表; classLabels 类别标签列表; numIter 迭代次数,如果没有给定,默认迭代150次。
Return:weights 回归系数矩阵
@author:VPrincekin
"""
m, n = shape(dataMatrix)
weights = ones(n)
for j in range(numIter):
dataIndex = range(m)
for i in range(m):
alpha = 4 / (1.0 + i + j) + 0.01
randIndex = int(random.uniform(0, len(dataIndex)))
h = sigmoid(sum(dataMatrix[randIndex] * weights))
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
return weights
<|reserved_special_token_0|>
def colicTest():
"""测试Logistic回归算法
Args: None
Return: Logistic回归算法错误率
"""
frTrain = open('horseColicTraining.txt')
frTest = open('horseColicTest.txt')
trainingSet = []
trainingLabels = []
for line in frTrain.readlines():
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
trainingSet.append(lineArr)
trainingLabels.append(float(currLine[21]))
trainWeights = stocGradAscent1(array(trainingSet), trainingLabels, 500)
errorCount = 0
numTestVec = 0.0
for line in frTest.readlines():
numTestVec += 1.0
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
if int(classifyVector(array(lineArr), trainWeights)) != int(currLine
[21]):
errorCount += 1
errorRate = float(errorCount) / numTestVec
print('the error rata of this test is : %f' % errorRate)
return errorRate
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def loadDataSet():
"""解析文件
Return: dataMat 文档列表 [[1,x1,x2]...]; labelMat 类别标签列表[1,0,1...]
@author:VPrincekin
"""
dataMat = []
labelMat = []
fr = open('testSet.txt')
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])
labelMat.append(int(lineArr[2]))
return dataMat, labelMat
def sigmoid(inX):
"""sigmoid函数
@author:VPrincekin
"""
return 1 / (1 + exp(-inX))
<|reserved_special_token_0|>
def stocGradAscent1(dataMatrix, classLabels, numIter=150):
"""
Args:dataMatrix 文档列表; classLabels 类别标签列表; numIter 迭代次数,如果没有给定,默认迭代150次。
Return:weights 回归系数矩阵
@author:VPrincekin
"""
m, n = shape(dataMatrix)
weights = ones(n)
for j in range(numIter):
dataIndex = range(m)
for i in range(m):
alpha = 4 / (1.0 + i + j) + 0.01
randIndex = int(random.uniform(0, len(dataIndex)))
h = sigmoid(sum(dataMatrix[randIndex] * weights))
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
return weights
def classifyVector(inX, weights):
"""测试算法
Args: inX 测试样本; weigths 训练算法得到的回归系数
Return: 返回类别,0或1.
@author:VPrincekin
"""
prob = sigmoid(sum(inX * weights))
if prob > 0.5:
return 1.0
else:
return 0.0
def colicTest():
"""测试Logistic回归算法
Args: None
Return: Logistic回归算法错误率
"""
frTrain = open('horseColicTraining.txt')
frTest = open('horseColicTest.txt')
trainingSet = []
trainingLabels = []
for line in frTrain.readlines():
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
trainingSet.append(lineArr)
trainingLabels.append(float(currLine[21]))
trainWeights = stocGradAscent1(array(trainingSet), trainingLabels, 500)
errorCount = 0
numTestVec = 0.0
for line in frTest.readlines():
numTestVec += 1.0
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
if int(classifyVector(array(lineArr), trainWeights)) != int(currLine
[21]):
errorCount += 1
errorRate = float(errorCount) / numTestVec
print('the error rata of this test is : %f' % errorRate)
return errorRate
def multiTest():
"""调用colicTest()多次并求结果的平均值。
@author:VPrincekin
"""
numTests = 10
errorSum = 0.0
for k in range(numTests):
errorSum += colicTest()
print('after %d iterations the average error rate is : %f ' % (numTests,
errorSum / float(numTests)))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def loadDataSet():
"""解析文件
Return: dataMat 文档列表 [[1,x1,x2]...]; labelMat 类别标签列表[1,0,1...]
@author:VPrincekin
"""
dataMat = []
labelMat = []
fr = open('testSet.txt')
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])
labelMat.append(int(lineArr[2]))
return dataMat, labelMat
def sigmoid(inX):
"""sigmoid函数
@author:VPrincekin
"""
return 1 / (1 + exp(-inX))
def gradAscent(dataMatIn, classLabels):
"""梯度上升算法
Args: dataMatIn 文档矩阵 100*3 的矩阵;classLabels 类别标签列表 1*100向量
Return: weights 回归系数矩阵
@author:VPrincekin
"""
dataMatrix = mat(dataMatIn)
labelMat = mat(classLabels).transpose()
m, n = shape(dataMatrix)
alpha = 0.001
maxCycles = 500
weights = ones((n, 1))
for k in range(maxCycles):
h = sigmoid(dataMatrix * weights)
error = labelMat - h
weights = weights + alpha * dataMatrix.transpose() * error
return weights
<|reserved_special_token_0|>
def stocGradAscent0(dataMatrix, classLabels):
"""
Args: dataMatrix 文档列表; classLabels 类别标签列表
Return: weights 回归系数矩阵
@author:VPrincekin
"""
m, n = shape(dataMatrix)
alpha = 0.01
weights = ones(n)
for i in range(m):
h = sigmoid(sum(dataMatrix[i] * weights))
error = classLabels[i] - h
weights = weights + alpha * error * dataMatrix[i]
return weights
def stocGradAscent1(dataMatrix, classLabels, numIter=150):
"""
Args:dataMatrix 文档列表; classLabels 类别标签列表; numIter 迭代次数,如果没有给定,默认迭代150次。
Return:weights 回归系数矩阵
@author:VPrincekin
"""
m, n = shape(dataMatrix)
weights = ones(n)
for j in range(numIter):
dataIndex = range(m)
for i in range(m):
alpha = 4 / (1.0 + i + j) + 0.01
randIndex = int(random.uniform(0, len(dataIndex)))
h = sigmoid(sum(dataMatrix[randIndex] * weights))
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
return weights
def classifyVector(inX, weights):
"""测试算法
Args: inX 测试样本; weigths 训练算法得到的回归系数
Return: 返回类别,0或1.
@author:VPrincekin
"""
prob = sigmoid(sum(inX * weights))
if prob > 0.5:
return 1.0
else:
return 0.0
def colicTest():
"""测试Logistic回归算法
Args: None
Return: Logistic回归算法错误率
"""
frTrain = open('horseColicTraining.txt')
frTest = open('horseColicTest.txt')
trainingSet = []
trainingLabels = []
for line in frTrain.readlines():
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
trainingSet.append(lineArr)
trainingLabels.append(float(currLine[21]))
trainWeights = stocGradAscent1(array(trainingSet), trainingLabels, 500)
errorCount = 0
numTestVec = 0.0
for line in frTest.readlines():
numTestVec += 1.0
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
if int(classifyVector(array(lineArr), trainWeights)) != int(currLine
[21]):
errorCount += 1
errorRate = float(errorCount) / numTestVec
print('the error rata of this test is : %f' % errorRate)
return errorRate
def multiTest():
"""调用colicTest()多次并求结果的平均值。
@author:VPrincekin
"""
numTests = 10
errorSum = 0.0
for k in range(numTests):
errorSum += colicTest()
print('after %d iterations the average error rate is : %f ' % (numTests,
errorSum / float(numTests)))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def loadDataSet():
"""解析文件
Return: dataMat 文档列表 [[1,x1,x2]...]; labelMat 类别标签列表[1,0,1...]
@author:VPrincekin
"""
dataMat = []
labelMat = []
fr = open('testSet.txt')
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])
labelMat.append(int(lineArr[2]))
return dataMat, labelMat
def sigmoid(inX):
"""sigmoid函数
@author:VPrincekin
"""
return 1 / (1 + exp(-inX))
def gradAscent(dataMatIn, classLabels):
"""梯度上升算法
Args: dataMatIn 文档矩阵 100*3 的矩阵;classLabels 类别标签列表 1*100向量
Return: weights 回归系数矩阵
@author:VPrincekin
"""
dataMatrix = mat(dataMatIn)
labelMat = mat(classLabels).transpose()
m, n = shape(dataMatrix)
alpha = 0.001
maxCycles = 500
weights = ones((n, 1))
for k in range(maxCycles):
h = sigmoid(dataMatrix * weights)
error = labelMat - h
weights = weights + alpha * dataMatrix.transpose() * error
return weights
def plotBestFit(weights):
"""
Args:weights 回归系数
@author:VPrincekin
"""
import matplotlib.pyplot as plt
dataMat, labelMat = loadDataSet()
dataArr = array(dataMat)
n = shape(dataArr)[0]
xcord1 = []
ycord1 = []
xcord2 = []
ycord2 = []
for i in range(n):
if int(labelMat[i]) == 1:
xcord1.append(dataArr[i, 1])
ycord1.append(dataArr[i, 2])
else:
xcord2.append(dataArr[i, 1])
ycord2.append(dataArr[i, 2])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')
ax.scatter(xcord2, ycord2, s=30, c='green')
x = arange(-3.0, 3.0, 0.1)
y = (-weights[0] - weights[1] * x) / weights[2]
ax.plot(x, y)
plt.xlabel('X1')
plt.ylabel('X2')
plt.show()
def stocGradAscent0(dataMatrix, classLabels):
"""
Args: dataMatrix 文档列表; classLabels 类别标签列表
Return: weights 回归系数矩阵
@author:VPrincekin
"""
m, n = shape(dataMatrix)
alpha = 0.01
weights = ones(n)
for i in range(m):
h = sigmoid(sum(dataMatrix[i] * weights))
error = classLabels[i] - h
weights = weights + alpha * error * dataMatrix[i]
return weights
def stocGradAscent1(dataMatrix, classLabels, numIter=150):
"""
Args:dataMatrix 文档列表; classLabels 类别标签列表; numIter 迭代次数,如果没有给定,默认迭代150次。
Return:weights 回归系数矩阵
@author:VPrincekin
"""
m, n = shape(dataMatrix)
weights = ones(n)
for j in range(numIter):
dataIndex = range(m)
for i in range(m):
alpha = 4 / (1.0 + i + j) + 0.01
randIndex = int(random.uniform(0, len(dataIndex)))
h = sigmoid(sum(dataMatrix[randIndex] * weights))
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
return weights
def classifyVector(inX, weights):
"""测试算法
Args: inX 测试样本; weigths 训练算法得到的回归系数
Return: 返回类别,0或1.
@author:VPrincekin
"""
prob = sigmoid(sum(inX * weights))
if prob > 0.5:
return 1.0
else:
return 0.0
def colicTest():
"""测试Logistic回归算法
Args: None
Return: Logistic回归算法错误率
"""
frTrain = open('horseColicTraining.txt')
frTest = open('horseColicTest.txt')
trainingSet = []
trainingLabels = []
for line in frTrain.readlines():
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
trainingSet.append(lineArr)
trainingLabels.append(float(currLine[21]))
trainWeights = stocGradAscent1(array(trainingSet), trainingLabels, 500)
errorCount = 0
numTestVec = 0.0
for line in frTest.readlines():
numTestVec += 1.0
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
if int(classifyVector(array(lineArr), trainWeights)) != int(currLine
[21]):
errorCount += 1
errorRate = float(errorCount) / numTestVec
print('the error rata of this test is : %f' % errorRate)
return errorRate
def multiTest():
"""调用colicTest()多次并求结果的平均值。
@author:VPrincekin
"""
numTests = 10
errorSum = 0.0
for k in range(numTests):
errorSum += colicTest()
print('after %d iterations the average error rate is : %f ' % (numTests,
errorSum / float(numTests)))
<|reserved_special_token_1|>
#coding=utf-8
from numpy import *
#代码5-1,Logistic回归梯度上升优化算法。
def loadDataSet():
"""解析文件
Return: dataMat 文档列表 [[1,x1,x2]...]; labelMat 类别标签列表[1,0,1...]
@author:VPrincekin
"""
dataMat = []; labelMat= []
fr = open('testSet.txt')
#每行前两个分别是X1和X2,第三个只是数据对应的类别
for line in fr.readlines():
#strip()去除空格
lineArr = line.strip().split()
#为了方便计算,把X0设置为1。
dataMat.append([1.0,float(lineArr[0]),float(lineArr[1])])
labelMat.append(int(lineArr[2]))
return dataMat,labelMat
def sigmoid(inX):
"""sigmoid函数
@author:VPrincekin
"""
return 1/(1+exp(-inX))
def gradAscent(dataMatIn,classLabels):
"""梯度上升算法
Args: dataMatIn 文档矩阵 100*3 的矩阵;classLabels 类别标签列表 1*100向量
Return: weights 回归系数矩阵
@author:VPrincekin
"""
#mat()转换为NumPy矩阵数据类型
dataMatrix = mat(dataMatIn)
#transpose()转置矩阵
labelMat = mat(classLabels).transpose()
#shape()求出矩阵的维度(行,列)
m,n = shape(dataMatrix)
#alpha 向目标移动的步长
alpha = 0.001
#maxCyles 迭代次数
maxCycles = 500
#创建一个n*1的单位矩阵
weights = ones((n,1))
#开始迭代,梯度上升
for k in range(maxCycles):
h = sigmoid(dataMatrix * weights)
error = (labelMat - h)
weights = weights + alpha * dataMatrix.transpose() * error
return weights
######################################################################################
#代码5-2,画出数据集和Logistic回归最佳拟合直线的函数。
def plotBestFit(weights):
"""
Args:weights 回归系数
@author:VPrincekin
"""
import matplotlib.pyplot as plt
#解析文件,生成文档矩阵和类别标签矩阵
dataMat,labelMat = loadDataSet()
dataArr = array(dataMat)
n = shape(dataArr)[0]
xcord1 = []; ycord1 = []
xcord2 = []; ycord2 = []
for i in range(n):
if int(labelMat[i]) == 1:
xcord1.append(dataArr[i,1]); ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1]); ycord2.append(dataArr[i,2])
#开始画图
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord1,ycord1,s=30,c='red',marker='s')
ax.scatter(xcord2,ycord2,s=30,c='green')
x = arange(-3.0,3.0,0.1)
#此处设置了sigmoid函数为0,0是两个分类的分界处。w0x0+w1x1+w2x2=0
y = (-weights[0]-weights[1]*x)/weights[2]
ax.plot(x,y)
plt.xlabel('X1'); plt.ylabel('X2');
plt.show()
##############################################################################################
#代码5-3,随即梯度上升算法
def stocGradAscent0(dataMatrix,classLabels):
"""
Args: dataMatrix 文档列表; classLabels 类别标签列表
Return: weights 回归系数矩阵
@author:VPrincekin
"""
m,n = shape(dataMatrix)
alpha = 0.01
weights = ones(n)
for i in range(m):
#计算每一个样本的函数值
h = sigmoid(sum(dataMatrix[i]*weights))
#计算误差
error = classLabels[i]-h
#向梯度方向更新迭代
weights = weights + alpha*error*dataMatrix[i]
return weights
##############################################################################################
#代码5-4,改进的随即梯度上升算法
def stocGradAscent1(dataMatrix,classLabels,numIter=150):
"""
Args:dataMatrix 文档列表; classLabels 类别标签列表; numIter 迭代次数,如果没有给定,默认迭代150次。
Return:weights 回归系数矩阵
@author:VPrincekin
"""
m,n = shape(dataMatrix)
weights = ones(n)
for j in range(numIter):
dataIndex = range(m)
for i in range(m):
#第一处改进,alpha在每次迭代的时候都会调整,这会缓解数据波动或者高频波动。
alpha = 4/(1.0+i+j)+0.01
#第二处改进,通过随机选取样本来更新回归系数。
#这种方法将减少周期性波动,每次随即从列表中选出一个值,然后从列表中删掉该值。
randIndex=int(random.uniform(0,len(dataIndex)))
h = sigmoid(sum(dataMatrix[randIndex]*weights))
error = classLabels[randIndex] - h
weights = weights + alpha * error * dataMatrix[randIndex]
return weights
########################################################################################################
#代码5-5,Logistic回归分类函数
def classifyVector(inX,weights):
"""测试算法
Args: inX 测试样本; weigths 训练算法得到的回归系数
Return: 返回类别,0或1.
@author:VPrincekin
"""
prob = sigmoid(sum(inX*weights))
if prob>0.5:
return 1.0
else:
return 0.0
def colicTest():
"""测试Logistic回归算法
Args: None
Return: Logistic回归算法错误率
"""
#每个样本有21个特征,一个类别。
frTrain = open('horseColicTraining.txt')
frTest = open('horseColicTest.txt')
trainingSet = []; trainingLabels = []
#开始解析训练文本,通过stocGradAscent1()计算并返回,回归系数向量。
for line in frTrain.readlines():
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
trainingSet.append(lineArr)
trainingLabels.append(float(currLine[21]))
trainWeights = stocGradAscent1(array(trainingSet),trainingLabels,500)
#开始解析测试文本,计算算法的错误率。
errorCount = 0; numTestVec = 0.0
for line in frTest.readlines():
numTestVec += 1.0
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
if int(classifyVector(array(lineArr),trainWeights)) != int(currLine[21]):
errorCount += 1
errorRate = (float(errorCount)/numTestVec)
print('the error rata of this test is : %f' % errorRate)
return errorRate
def multiTest():
"""调用colicTest()多次并求结果的平均值。
@author:VPrincekin
"""
numTests = 10; errorSum = 0.0
for k in range(numTests):
errorSum += colicTest()
print("after %d iterations the average error rate is : %f " %(numTests,errorSum/float(numTests)))
|
flexible
|
{
"blob_id": "d47ea763ac1a4981fc5dee67cd396ad49570f923",
"index": 7821,
"step-1": "<mask token>\n\n\ndef loadDataSet():\n \"\"\"解析文件\n Return: dataMat 文档列表 [[1,x1,x2]...]; labelMat 类别标签列表[1,0,1...]\n @author:VPrincekin\n \"\"\"\n dataMat = []\n labelMat = []\n fr = open('testSet.txt')\n for line in fr.readlines():\n lineArr = line.strip().split()\n dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])\n labelMat.append(int(lineArr[2]))\n return dataMat, labelMat\n\n\ndef sigmoid(inX):\n \"\"\"sigmoid函数\n @author:VPrincekin\n \"\"\"\n return 1 / (1 + exp(-inX))\n\n\n<mask token>\n\n\ndef stocGradAscent1(dataMatrix, classLabels, numIter=150):\n \"\"\"\n Args:dataMatrix 文档列表; classLabels 类别标签列表; numIter 迭代次数,如果没有给定,默认迭代150次。\n Return:weights 回归系数矩阵\n @author:VPrincekin\n \"\"\"\n m, n = shape(dataMatrix)\n weights = ones(n)\n for j in range(numIter):\n dataIndex = range(m)\n for i in range(m):\n alpha = 4 / (1.0 + i + j) + 0.01\n randIndex = int(random.uniform(0, len(dataIndex)))\n h = sigmoid(sum(dataMatrix[randIndex] * weights))\n error = classLabels[randIndex] - h\n weights = weights + alpha * error * dataMatrix[randIndex]\n return weights\n\n\n<mask token>\n\n\ndef colicTest():\n \"\"\"测试Logistic回归算法\n Args: None\n Return: Logistic回归算法错误率\n \n \"\"\"\n frTrain = open('horseColicTraining.txt')\n frTest = open('horseColicTest.txt')\n trainingSet = []\n trainingLabels = []\n for line in frTrain.readlines():\n currLine = line.strip().split('\\t')\n lineArr = []\n for i in range(21):\n lineArr.append(float(currLine[i]))\n trainingSet.append(lineArr)\n trainingLabels.append(float(currLine[21]))\n trainWeights = stocGradAscent1(array(trainingSet), trainingLabels, 500)\n errorCount = 0\n numTestVec = 0.0\n for line in frTest.readlines():\n numTestVec += 1.0\n currLine = line.strip().split('\\t')\n lineArr = []\n for i in range(21):\n lineArr.append(float(currLine[i]))\n if int(classifyVector(array(lineArr), trainWeights)) != int(currLine\n [21]):\n errorCount += 1\n errorRate = float(errorCount) / numTestVec\n print('the error rata of this test is : %f' % errorRate)\n return errorRate\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef loadDataSet():\n \"\"\"解析文件\n Return: dataMat 文档列表 [[1,x1,x2]...]; labelMat 类别标签列表[1,0,1...]\n @author:VPrincekin\n \"\"\"\n dataMat = []\n labelMat = []\n fr = open('testSet.txt')\n for line in fr.readlines():\n lineArr = line.strip().split()\n dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])\n labelMat.append(int(lineArr[2]))\n return dataMat, labelMat\n\n\ndef sigmoid(inX):\n \"\"\"sigmoid函数\n @author:VPrincekin\n \"\"\"\n return 1 / (1 + exp(-inX))\n\n\n<mask token>\n\n\ndef stocGradAscent1(dataMatrix, classLabels, numIter=150):\n \"\"\"\n Args:dataMatrix 文档列表; classLabels 类别标签列表; numIter 迭代次数,如果没有给定,默认迭代150次。\n Return:weights 回归系数矩阵\n @author:VPrincekin\n \"\"\"\n m, n = shape(dataMatrix)\n weights = ones(n)\n for j in range(numIter):\n dataIndex = range(m)\n for i in range(m):\n alpha = 4 / (1.0 + i + j) + 0.01\n randIndex = int(random.uniform(0, len(dataIndex)))\n h = sigmoid(sum(dataMatrix[randIndex] * weights))\n error = classLabels[randIndex] - h\n weights = weights + alpha * error * dataMatrix[randIndex]\n return weights\n\n\ndef classifyVector(inX, weights):\n \"\"\"测试算法\n Args: inX 测试样本; weigths 训练算法得到的回归系数\n Return: 返回类别,0或1.\n @author:VPrincekin\n \"\"\"\n prob = sigmoid(sum(inX * weights))\n if prob > 0.5:\n return 1.0\n else:\n return 0.0\n\n\ndef colicTest():\n \"\"\"测试Logistic回归算法\n Args: None\n Return: Logistic回归算法错误率\n \n \"\"\"\n frTrain = open('horseColicTraining.txt')\n frTest = open('horseColicTest.txt')\n trainingSet = []\n trainingLabels = []\n for line in frTrain.readlines():\n currLine = line.strip().split('\\t')\n lineArr = []\n for i in range(21):\n lineArr.append(float(currLine[i]))\n trainingSet.append(lineArr)\n trainingLabels.append(float(currLine[21]))\n trainWeights = stocGradAscent1(array(trainingSet), trainingLabels, 500)\n errorCount = 0\n numTestVec = 0.0\n for line in frTest.readlines():\n numTestVec += 1.0\n currLine = line.strip().split('\\t')\n lineArr = []\n for i in range(21):\n lineArr.append(float(currLine[i]))\n if int(classifyVector(array(lineArr), trainWeights)) != int(currLine\n [21]):\n errorCount += 1\n errorRate = float(errorCount) / numTestVec\n print('the error rata of this test is : %f' % errorRate)\n return errorRate\n\n\ndef multiTest():\n \"\"\"调用colicTest()多次并求结果的平均值。\n @author:VPrincekin\n \"\"\"\n numTests = 10\n errorSum = 0.0\n for k in range(numTests):\n errorSum += colicTest()\n print('after %d iterations the average error rate is : %f ' % (numTests,\n errorSum / float(numTests)))\n",
"step-3": "<mask token>\n\n\ndef loadDataSet():\n \"\"\"解析文件\n Return: dataMat 文档列表 [[1,x1,x2]...]; labelMat 类别标签列表[1,0,1...]\n @author:VPrincekin\n \"\"\"\n dataMat = []\n labelMat = []\n fr = open('testSet.txt')\n for line in fr.readlines():\n lineArr = line.strip().split()\n dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])\n labelMat.append(int(lineArr[2]))\n return dataMat, labelMat\n\n\ndef sigmoid(inX):\n \"\"\"sigmoid函数\n @author:VPrincekin\n \"\"\"\n return 1 / (1 + exp(-inX))\n\n\ndef gradAscent(dataMatIn, classLabels):\n \"\"\"梯度上升算法\n Args: dataMatIn 文档矩阵 100*3 的矩阵;classLabels 类别标签列表 1*100向量\n Return: weights 回归系数矩阵\n @author:VPrincekin\n \"\"\"\n dataMatrix = mat(dataMatIn)\n labelMat = mat(classLabels).transpose()\n m, n = shape(dataMatrix)\n alpha = 0.001\n maxCycles = 500\n weights = ones((n, 1))\n for k in range(maxCycles):\n h = sigmoid(dataMatrix * weights)\n error = labelMat - h\n weights = weights + alpha * dataMatrix.transpose() * error\n return weights\n\n\n<mask token>\n\n\ndef stocGradAscent0(dataMatrix, classLabels):\n \"\"\"\n Args: dataMatrix 文档列表; classLabels 类别标签列表\n Return: weights 回归系数矩阵\n @author:VPrincekin\n \"\"\"\n m, n = shape(dataMatrix)\n alpha = 0.01\n weights = ones(n)\n for i in range(m):\n h = sigmoid(sum(dataMatrix[i] * weights))\n error = classLabels[i] - h\n weights = weights + alpha * error * dataMatrix[i]\n return weights\n\n\ndef stocGradAscent1(dataMatrix, classLabels, numIter=150):\n \"\"\"\n Args:dataMatrix 文档列表; classLabels 类别标签列表; numIter 迭代次数,如果没有给定,默认迭代150次。\n Return:weights 回归系数矩阵\n @author:VPrincekin\n \"\"\"\n m, n = shape(dataMatrix)\n weights = ones(n)\n for j in range(numIter):\n dataIndex = range(m)\n for i in range(m):\n alpha = 4 / (1.0 + i + j) + 0.01\n randIndex = int(random.uniform(0, len(dataIndex)))\n h = sigmoid(sum(dataMatrix[randIndex] * weights))\n error = classLabels[randIndex] - h\n weights = weights + alpha * error * dataMatrix[randIndex]\n return weights\n\n\ndef classifyVector(inX, weights):\n \"\"\"测试算法\n Args: inX 测试样本; weigths 训练算法得到的回归系数\n Return: 返回类别,0或1.\n @author:VPrincekin\n \"\"\"\n prob = sigmoid(sum(inX * weights))\n if prob > 0.5:\n return 1.0\n else:\n return 0.0\n\n\ndef colicTest():\n \"\"\"测试Logistic回归算法\n Args: None\n Return: Logistic回归算法错误率\n \n \"\"\"\n frTrain = open('horseColicTraining.txt')\n frTest = open('horseColicTest.txt')\n trainingSet = []\n trainingLabels = []\n for line in frTrain.readlines():\n currLine = line.strip().split('\\t')\n lineArr = []\n for i in range(21):\n lineArr.append(float(currLine[i]))\n trainingSet.append(lineArr)\n trainingLabels.append(float(currLine[21]))\n trainWeights = stocGradAscent1(array(trainingSet), trainingLabels, 500)\n errorCount = 0\n numTestVec = 0.0\n for line in frTest.readlines():\n numTestVec += 1.0\n currLine = line.strip().split('\\t')\n lineArr = []\n for i in range(21):\n lineArr.append(float(currLine[i]))\n if int(classifyVector(array(lineArr), trainWeights)) != int(currLine\n [21]):\n errorCount += 1\n errorRate = float(errorCount) / numTestVec\n print('the error rata of this test is : %f' % errorRate)\n return errorRate\n\n\ndef multiTest():\n \"\"\"调用colicTest()多次并求结果的平均值。\n @author:VPrincekin\n \"\"\"\n numTests = 10\n errorSum = 0.0\n for k in range(numTests):\n errorSum += colicTest()\n print('after %d iterations the average error rate is : %f ' % (numTests,\n errorSum / float(numTests)))\n",
"step-4": "<mask token>\n\n\ndef loadDataSet():\n \"\"\"解析文件\n Return: dataMat 文档列表 [[1,x1,x2]...]; labelMat 类别标签列表[1,0,1...]\n @author:VPrincekin\n \"\"\"\n dataMat = []\n labelMat = []\n fr = open('testSet.txt')\n for line in fr.readlines():\n lineArr = line.strip().split()\n dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])])\n labelMat.append(int(lineArr[2]))\n return dataMat, labelMat\n\n\ndef sigmoid(inX):\n \"\"\"sigmoid函数\n @author:VPrincekin\n \"\"\"\n return 1 / (1 + exp(-inX))\n\n\ndef gradAscent(dataMatIn, classLabels):\n \"\"\"梯度上升算法\n Args: dataMatIn 文档矩阵 100*3 的矩阵;classLabels 类别标签列表 1*100向量\n Return: weights 回归系数矩阵\n @author:VPrincekin\n \"\"\"\n dataMatrix = mat(dataMatIn)\n labelMat = mat(classLabels).transpose()\n m, n = shape(dataMatrix)\n alpha = 0.001\n maxCycles = 500\n weights = ones((n, 1))\n for k in range(maxCycles):\n h = sigmoid(dataMatrix * weights)\n error = labelMat - h\n weights = weights + alpha * dataMatrix.transpose() * error\n return weights\n\n\ndef plotBestFit(weights):\n \"\"\"\n Args:weights 回归系数\n @author:VPrincekin\n \"\"\"\n import matplotlib.pyplot as plt\n dataMat, labelMat = loadDataSet()\n dataArr = array(dataMat)\n n = shape(dataArr)[0]\n xcord1 = []\n ycord1 = []\n xcord2 = []\n ycord2 = []\n for i in range(n):\n if int(labelMat[i]) == 1:\n xcord1.append(dataArr[i, 1])\n ycord1.append(dataArr[i, 2])\n else:\n xcord2.append(dataArr[i, 1])\n ycord2.append(dataArr[i, 2])\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')\n ax.scatter(xcord2, ycord2, s=30, c='green')\n x = arange(-3.0, 3.0, 0.1)\n y = (-weights[0] - weights[1] * x) / weights[2]\n ax.plot(x, y)\n plt.xlabel('X1')\n plt.ylabel('X2')\n plt.show()\n\n\ndef stocGradAscent0(dataMatrix, classLabels):\n \"\"\"\n Args: dataMatrix 文档列表; classLabels 类别标签列表\n Return: weights 回归系数矩阵\n @author:VPrincekin\n \"\"\"\n m, n = shape(dataMatrix)\n alpha = 0.01\n weights = ones(n)\n for i in range(m):\n h = sigmoid(sum(dataMatrix[i] * weights))\n error = classLabels[i] - h\n weights = weights + alpha * error * dataMatrix[i]\n return weights\n\n\ndef stocGradAscent1(dataMatrix, classLabels, numIter=150):\n \"\"\"\n Args:dataMatrix 文档列表; classLabels 类别标签列表; numIter 迭代次数,如果没有给定,默认迭代150次。\n Return:weights 回归系数矩阵\n @author:VPrincekin\n \"\"\"\n m, n = shape(dataMatrix)\n weights = ones(n)\n for j in range(numIter):\n dataIndex = range(m)\n for i in range(m):\n alpha = 4 / (1.0 + i + j) + 0.01\n randIndex = int(random.uniform(0, len(dataIndex)))\n h = sigmoid(sum(dataMatrix[randIndex] * weights))\n error = classLabels[randIndex] - h\n weights = weights + alpha * error * dataMatrix[randIndex]\n return weights\n\n\ndef classifyVector(inX, weights):\n \"\"\"测试算法\n Args: inX 测试样本; weigths 训练算法得到的回归系数\n Return: 返回类别,0或1.\n @author:VPrincekin\n \"\"\"\n prob = sigmoid(sum(inX * weights))\n if prob > 0.5:\n return 1.0\n else:\n return 0.0\n\n\ndef colicTest():\n \"\"\"测试Logistic回归算法\n Args: None\n Return: Logistic回归算法错误率\n \n \"\"\"\n frTrain = open('horseColicTraining.txt')\n frTest = open('horseColicTest.txt')\n trainingSet = []\n trainingLabels = []\n for line in frTrain.readlines():\n currLine = line.strip().split('\\t')\n lineArr = []\n for i in range(21):\n lineArr.append(float(currLine[i]))\n trainingSet.append(lineArr)\n trainingLabels.append(float(currLine[21]))\n trainWeights = stocGradAscent1(array(trainingSet), trainingLabels, 500)\n errorCount = 0\n numTestVec = 0.0\n for line in frTest.readlines():\n numTestVec += 1.0\n currLine = line.strip().split('\\t')\n lineArr = []\n for i in range(21):\n lineArr.append(float(currLine[i]))\n if int(classifyVector(array(lineArr), trainWeights)) != int(currLine\n [21]):\n errorCount += 1\n errorRate = float(errorCount) / numTestVec\n print('the error rata of this test is : %f' % errorRate)\n return errorRate\n\n\ndef multiTest():\n \"\"\"调用colicTest()多次并求结果的平均值。\n @author:VPrincekin\n \"\"\"\n numTests = 10\n errorSum = 0.0\n for k in range(numTests):\n errorSum += colicTest()\n print('after %d iterations the average error rate is : %f ' % (numTests,\n errorSum / float(numTests)))\n",
"step-5": "#coding=utf-8\nfrom numpy import *\n\n#代码5-1,Logistic回归梯度上升优化算法。\ndef loadDataSet():\n \"\"\"解析文件\n Return: dataMat 文档列表 [[1,x1,x2]...]; labelMat 类别标签列表[1,0,1...]\n @author:VPrincekin\n \"\"\"\n dataMat = []; labelMat= []\n fr = open('testSet.txt')\n #每行前两个分别是X1和X2,第三个只是数据对应的类别\n for line in fr.readlines():\n #strip()去除空格\n lineArr = line.strip().split()\n #为了方便计算,把X0设置为1。\n dataMat.append([1.0,float(lineArr[0]),float(lineArr[1])])\n labelMat.append(int(lineArr[2]))\n return dataMat,labelMat\n\ndef sigmoid(inX):\n \"\"\"sigmoid函数\n @author:VPrincekin\n \"\"\"\n return 1/(1+exp(-inX))\n\ndef gradAscent(dataMatIn,classLabels):\n \"\"\"梯度上升算法\n Args: dataMatIn 文档矩阵 100*3 的矩阵;classLabels 类别标签列表 1*100向量\n Return: weights 回归系数矩阵\n @author:VPrincekin\n \"\"\" \n #mat()转换为NumPy矩阵数据类型\n dataMatrix = mat(dataMatIn)\n #transpose()转置矩阵\n labelMat = mat(classLabels).transpose()\n #shape()求出矩阵的维度(行,列)\n m,n = shape(dataMatrix)\n #alpha 向目标移动的步长\n alpha = 0.001\n #maxCyles 迭代次数\n maxCycles = 500\n #创建一个n*1的单位矩阵\n weights = ones((n,1))\n #开始迭代,梯度上升\n for k in range(maxCycles):\n h = sigmoid(dataMatrix * weights)\n error = (labelMat - h)\n weights = weights + alpha * dataMatrix.transpose() * error\n return weights\n \n######################################################################################\n\n#代码5-2,画出数据集和Logistic回归最佳拟合直线的函数。\ndef plotBestFit(weights):\n \"\"\"\n Args:weights 回归系数\n @author:VPrincekin\n \"\"\"\n import matplotlib.pyplot as plt\n #解析文件,生成文档矩阵和类别标签矩阵\n dataMat,labelMat = loadDataSet()\n dataArr = array(dataMat)\n n = shape(dataArr)[0]\n xcord1 = []; ycord1 = []\n xcord2 = []; ycord2 = []\n for i in range(n):\n if int(labelMat[i]) == 1:\n xcord1.append(dataArr[i,1]); ycord1.append(dataArr[i,2])\n else:\n xcord2.append(dataArr[i,1]); ycord2.append(dataArr[i,2])\n #开始画图\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(xcord1,ycord1,s=30,c='red',marker='s')\n ax.scatter(xcord2,ycord2,s=30,c='green')\n x = arange(-3.0,3.0,0.1)\n #此处设置了sigmoid函数为0,0是两个分类的分界处。w0x0+w1x1+w2x2=0\n y = (-weights[0]-weights[1]*x)/weights[2]\n ax.plot(x,y)\n plt.xlabel('X1'); plt.ylabel('X2');\n plt.show()\n \n##############################################################################################\n\n#代码5-3,随即梯度上升算法\ndef stocGradAscent0(dataMatrix,classLabels):\n \"\"\"\n Args: dataMatrix 文档列表; classLabels 类别标签列表\n Return: weights 回归系数矩阵\n @author:VPrincekin\n \"\"\"\n m,n = shape(dataMatrix)\n alpha = 0.01\n weights = ones(n)\n for i in range(m):\n #计算每一个样本的函数值\n h = sigmoid(sum(dataMatrix[i]*weights))\n #计算误差\n error = classLabels[i]-h\n #向梯度方向更新迭代\n weights = weights + alpha*error*dataMatrix[i]\n return weights\n\n##############################################################################################\n\n#代码5-4,改进的随即梯度上升算法\ndef stocGradAscent1(dataMatrix,classLabels,numIter=150):\n \"\"\"\n Args:dataMatrix 文档列表; classLabels 类别标签列表; numIter 迭代次数,如果没有给定,默认迭代150次。\n Return:weights 回归系数矩阵\n @author:VPrincekin\n \"\"\"\n m,n = shape(dataMatrix)\n weights = ones(n)\n for j in range(numIter): \n dataIndex = range(m)\n for i in range(m):\n #第一处改进,alpha在每次迭代的时候都会调整,这会缓解数据波动或者高频波动。\n alpha = 4/(1.0+i+j)+0.01\n #第二处改进,通过随机选取样本来更新回归系数。\n #这种方法将减少周期性波动,每次随即从列表中选出一个值,然后从列表中删掉该值。\n randIndex=int(random.uniform(0,len(dataIndex)))\n h = sigmoid(sum(dataMatrix[randIndex]*weights))\n error = classLabels[randIndex] - h\n weights = weights + alpha * error * dataMatrix[randIndex]\n return weights\n \n########################################################################################################\n \n#代码5-5,Logistic回归分类函数\ndef classifyVector(inX,weights):\n \"\"\"测试算法\n Args: inX 测试样本; weigths 训练算法得到的回归系数\n Return: 返回类别,0或1.\n @author:VPrincekin\n \"\"\"\n prob = sigmoid(sum(inX*weights))\n if prob>0.5:\n return 1.0\n else:\n return 0.0\n \ndef colicTest():\n \"\"\"测试Logistic回归算法\n Args: None\n Return: Logistic回归算法错误率\n \n \"\"\"\n #每个样本有21个特征,一个类别。\n frTrain = open('horseColicTraining.txt')\n frTest = open('horseColicTest.txt')\n trainingSet = []; trainingLabels = []\n #开始解析训练文本,通过stocGradAscent1()计算并返回,回归系数向量。\n for line in frTrain.readlines():\n currLine = line.strip().split('\\t')\n lineArr = []\n for i in range(21):\n lineArr.append(float(currLine[i]))\n trainingSet.append(lineArr)\n trainingLabels.append(float(currLine[21]))\n trainWeights = stocGradAscent1(array(trainingSet),trainingLabels,500)\n #开始解析测试文本,计算算法的错误率。\n errorCount = 0; numTestVec = 0.0\n for line in frTest.readlines():\n numTestVec += 1.0\n currLine = line.strip().split('\\t')\n lineArr = []\n for i in range(21):\n lineArr.append(float(currLine[i]))\n if int(classifyVector(array(lineArr),trainWeights)) != int(currLine[21]):\n errorCount += 1\n errorRate = (float(errorCount)/numTestVec)\n print('the error rata of this test is : %f' % errorRate)\n return errorRate\n\ndef multiTest():\n \"\"\"调用colicTest()多次并求结果的平均值。\n @author:VPrincekin\n \"\"\"\n numTests = 10; errorSum = 0.0 \n for k in range(numTests):\n errorSum += colicTest()\n print(\"after %d iterations the average error rate is : %f \" %(numTests,errorSum/float(numTests)))\n\n",
"step-ids": [
4,
6,
8,
9,
11
]
}
|
[
4,
6,
8,
9,
11
] |
from ROOT import *
import math
import os,sys,time,glob,fnmatch
import argparse
import ROOT
import sys
sys.path.append("utils")
from moments import *
from dirhandle import *
from plothandle import *
from AnalysisGeneratorMT import *
def doAnalysis( blabla):
return blabla.DoThreatdAnalysis()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--test' , action='store_true', default = False)
parser.add_argument('-v', '--verbose' , action='store_true', default = False)
parser.add_argument("--TreeName" , type=str, default = "truth", help="Tree to read? ")
parser.add_argument("-D", '--inputdir' , type=str, default = "/lustre/hpc/hep/alonso/Top/AnalysisTop-2.4.27/Download")
parser.add_argument("-O", '--outputdir' , type=str, default = "plotGeneratorLevel")
parser.add_argument("-b", '--bjets' , type=int, default = 0)
parser.add_argument('--treads' , type=int, default = 20)
args = parser.parse_args()
verbose = 0
if args.verbose:
verbose = 1
#Just ttbar samples:
pattern = [
"user.aalonso.410037*"
"user.aalonso.410038*",
"user.aalonso.410039*",
"user.aalonso.410040*",
"user.aalonso.410041*",
"user.aalonso.410250*",
"user.aalonso.410251*",
"user.aalonso.410252*",
"user.aalonso.410000*",
"user.aalonso.410001*",
"user.aalonso.410002*",
"user.aalonso.410003*",
"user.aalonso.410004*",
"user.aalonso.410159*",
"user.aalonso.410501*",
"user.aalonso.410009*",
"user.aalonso.410021*",
]
#pattern = ["user.aalonso.[3-4]*"]
treads=args.treads
Dir = args.inputdir
nbjets = args.bjets
TreeName = args.TreeName
outfolder = args.outputdir
outname = "outAll_Reco_MT"
outname += "_MC"
outname = outname + "_" +outfolder+ ".root"
print "########################################################################################################"
print "##"
print "## This is the setup we are going to use:"
print "## Input directory:\t\t\t", Dir
print "## Pattern for root files:\t\t", pattern
print "## Results will be saved in:\t\t", outfolder
print "## And: \t\t\t\t", outname
print "## Number of bjets :\t\t\t", nbjets
print "## Number of treads to run:\t\t", treads
print "##"
print "########################################################################################################"
CreateFolder (outfolder)
fortchain,keys = ScanFolder(Dir, pattern, TreeName)
workers = {}
for i in keys:
analysis = analysisGeneratorMT( fortchain[i], nbjets, i,verbose)
workers [i] = analysis
Histos = {}
MomentsAll = {}
if (treads > 1):
from multiprocessing import Process, Pool
pool = Pool(processes=treads) # start 4 worker processes
jobs = {}
for i in keys:
res = pool.apply_async( doAnalysis, ( workers [i],))
jobs[i] = res
for i in jobs:
histo , mom = jobs[i].get(timeout=100000)
if verbose:
print "Job: ",i
print histo
print mom
Histos[i] = histo
MomentsAll [i] = mom
else:
for i in keys:
histo,mom = workers [i].DoThreatdAnalysis()
Histos[i] = histo
MomentsAll [i] = mom
SaveRootFile( outname, Histos, MomentsAll)
|
normal
|
{
"blob_id": "7db31940aea27c10057e2ce1e02410994bd2039b",
"index": 3328,
"step-1": "from ROOT import *\nimport math\nimport os,sys,time,glob,fnmatch\nimport argparse\nimport ROOT\nimport sys\nsys.path.append(\"utils\")\nfrom moments import *\nfrom dirhandle import *\nfrom plothandle import *\nfrom AnalysisGeneratorMT import *\n\ndef doAnalysis( blabla):\n return blabla.DoThreatdAnalysis() \n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-t', '--test'\t\t, action='store_true', default = False)\n parser.add_argument('-v', '--verbose' \t, action='store_true', default = False)\n parser.add_argument(\"--TreeName\"\t\t, type=str, default = \"truth\", help=\"Tree to read? \")\n parser.add_argument(\"-D\", '--inputdir' \t, type=str, default = \"/lustre/hpc/hep/alonso/Top/AnalysisTop-2.4.27/Download\")\n parser.add_argument(\"-O\", '--outputdir'\t, type=str, default = \"plotGeneratorLevel\")\n parser.add_argument(\"-b\", '--bjets'\t\t, type=int, default = 0)\n parser.add_argument('--treads'\t\t, type=int, default = 20)\n args = parser.parse_args()\n\n verbose = 0\n if args.verbose:\n verbose = 1\n\n\n\t#Just ttbar samples:\n pattern = [\n\t\t\"user.aalonso.410037*\"\n\t\t\"user.aalonso.410038*\",\n\t\t\"user.aalonso.410039*\",\n\t\t\"user.aalonso.410040*\",\n\t\t\"user.aalonso.410041*\",\n\t\t\"user.aalonso.410250*\",\n\t\t\"user.aalonso.410251*\",\n\t\t\"user.aalonso.410252*\",\n\t\t\"user.aalonso.410000*\",\n\t\t\"user.aalonso.410001*\",\n\t\t\"user.aalonso.410002*\",\n\t\t\"user.aalonso.410003*\",\n\t\t\"user.aalonso.410004*\",\n\t\t\"user.aalonso.410159*\",\n\t\t\"user.aalonso.410501*\",\n\t\t\"user.aalonso.410009*\",\n\t\t\"user.aalonso.410021*\",\n\n\n\t]\n #pattern = [\"user.aalonso.[3-4]*\"]\n \n treads=args.treads\n Dir\t\t\t= args.inputdir\n nbjets \t\t= args.bjets\n TreeName \t\t= args.TreeName \n outfolder \t= args.outputdir\n outname \t= \"outAll_Reco_MT\"\n outname += \"_MC\"\n outname = outname + \"_\" +outfolder+ \".root\"\n\n print \"########################################################################################################\"\n print \"##\"\n print \"## This is the setup we are going to use:\"\n print \"## Input directory:\\t\\t\\t\", \t\tDir \n print \"## Pattern for root files:\\t\\t\", \t\tpattern\n print \"## Results will be saved in:\\t\\t\",\t\toutfolder\n print \"## And: \\t\\t\\t\\t\", \t\t\t\toutname\n print \"## Number of bjets :\\t\\t\\t\", \t\tnbjets\n print \"## Number of treads to run:\\t\\t\", \t\ttreads \n print \"##\"\n print \"########################################################################################################\"\n\n CreateFolder \t(outfolder)\n fortchain,keys = \tScanFolder(Dir, pattern, TreeName)\n\n workers = {}\n for i in keys:\n analysis = analysisGeneratorMT( fortchain[i], nbjets, i,verbose)\n workers [i] = analysis \n\n\n Histos = {}\n MomentsAll = {}\n\n if (treads > 1):\n from multiprocessing import Process, Pool\n pool = Pool(processes=treads) # start 4 worker processes\n jobs = {}\n for i in keys:\n res = pool.apply_async( doAnalysis, ( workers [i],))\n jobs[i] = res\n for i in jobs:\n histo , mom = jobs[i].get(timeout=100000)\n if verbose:\n print \"Job: \",i\n print histo\n print mom \n Histos[i] = histo\n MomentsAll [i] = mom\n else: \n for i in keys:\n\t histo,mom = workers [i].DoThreatdAnalysis()\n Histos[i] = histo\n MomentsAll [i] = mom\n\n SaveRootFile( \t\toutname, Histos, MomentsAll)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# adventofcode.com
# day19
from collections import defaultdict
INPUTFILE = 'input/input19'
TEST = False
TESTCASE = ('HOH', ['H => HO\n', 'H => OH\n', 'O => HH\n'], ['OHOH', 'HOOH', 'HHHH', 'HOHO'])
def find_idx(string, substring):
""" iterator that returns the index of the next occurence of substring
wrapper around string.find() """
idx = string.find(substring)
while idx != -1:
yield idx
idx = string.find(substring, idx+1)
def replace_in_string(string, length, substring, idx):
""" overwrite only length chars in replacement! """
return string[:idx]+substring+string[idx+length:]
if __name__ == '__main__':
subs = defaultdict(list)
if TEST:
inputstring = TESTCASE[0]
lines = TESTCASE[1]
test = TESTCASE[2]
else:
with open(INPUTFILE, 'r') as f:
lines = f.readlines()
inputstring = lines.pop()
test = False
for line in lines:
if line != '\n':
f, t = line.rstrip('\n').split(' => ')
subs[f].append(t)
solution = []
for key, sublist in subs.items():
for sub in sublist:
for idx in find_idx(inputstring, key):
solution.append(replace_in_string(inputstring, len(key), sub, idx))
print "length : ", len(set(solution))
if test:
assert set(test) == set(solution), 'Testcase failure!'
# part B
# Cheated! #Atoms - 2*(#Rn) - 2*(#Y) - 1
# https://www.reddit.com/r/adventofcode/comments/3xflz8/day_19_solutions/cy4etju
print 'part B'
print sum(map(str.isupper,inputstring)) - 2*inputstring.count('Rn') - 2*inputstring.count('Y') - 1
|
normal
|
{
"blob_id": "e6fa1202d829fb553423998cdbad13684405437c",
"index": 8483,
"step-1": "# adventofcode.com\n# day19\n\nfrom collections import defaultdict\n\nINPUTFILE = 'input/input19'\n\nTEST = False\nTESTCASE = ('HOH', ['H => HO\\n', 'H => OH\\n', 'O => HH\\n'], ['OHOH', 'HOOH', 'HHHH', 'HOHO'])\n\ndef find_idx(string, substring):\n \"\"\" iterator that returns the index of the next occurence of substring\n wrapper around string.find() \"\"\"\n idx = string.find(substring)\n while idx != -1:\n yield idx\n idx = string.find(substring, idx+1)\n\ndef replace_in_string(string, length, substring, idx):\n \"\"\" overwrite only length chars in replacement! \"\"\"\n return string[:idx]+substring+string[idx+length:]\n\nif __name__ == '__main__':\n\n subs = defaultdict(list)\n\n if TEST:\n inputstring = TESTCASE[0]\n lines = TESTCASE[1]\n test = TESTCASE[2]\n else:\n with open(INPUTFILE, 'r') as f:\n lines = f.readlines()\n inputstring = lines.pop()\n test = False\n\n for line in lines:\n if line != '\\n':\n f, t = line.rstrip('\\n').split(' => ')\n subs[f].append(t)\n\n solution = []\n for key, sublist in subs.items():\n for sub in sublist:\n for idx in find_idx(inputstring, key):\n solution.append(replace_in_string(inputstring, len(key), sub, idx))\n\n print \"length : \", len(set(solution))\n\n if test:\n assert set(test) == set(solution), 'Testcase failure!'\n\n # part B\n # Cheated! #Atoms - 2*(#Rn) - 2*(#Y) - 1\n # https://www.reddit.com/r/adventofcode/comments/3xflz8/day_19_solutions/cy4etju\n print 'part B'\n print sum(map(str.isupper,inputstring)) - 2*inputstring.count('Rn') - 2*inputstring.count('Y') - 1\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
keys = {'a', 'e', 'i', 'o', 'u', 'y'}
values = [1]
dictionnaire = {cle: list(values) for cle in keys}
print('dictionnaire : ', dictionnaire)
values.append(2)
for cle in dictionnaire.keys():
dictionnaire.update({cle: values})
print('dictionnaire : ', dictionnaire)
<|reserved_special_token_1|>
import datetime
if __name__ == '__main__':
keys = {'a', 'e', 'i', 'o', 'u', 'y'}
values = [1]
dictionnaire = {cle: list(values) for cle in keys}
print('dictionnaire : ', dictionnaire)
values.append(2)
for cle in dictionnaire.keys():
dictionnaire.update({cle: values})
print('dictionnaire : ', dictionnaire)
<|reserved_special_token_1|>
import datetime
if __name__ == "__main__" :
keys = {'a','e','i', 'o', 'u', 'y'}
values = [1]
dictionnaire = {cle : list(values) for cle in keys}
print("dictionnaire : ", dictionnaire)
values.append(2)
#for cle in keys : dictionnaire.update({cle:values})
#dictionnaire.update({cle2 : list(values) for cle2 in keys})
#dictionnaire = {cle : list(values) for cle in keys}
#for cle in list(dictionnaire) : dictionnaire.update({cle:values})
for cle in dictionnaire.keys() : dictionnaire.update({cle:values})
print("dictionnaire : ", dictionnaire)
|
flexible
|
{
"blob_id": "468c070aebff3124927c5595d68bb94321dd75e5",
"index": 4406,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n keys = {'a', 'e', 'i', 'o', 'u', 'y'}\n values = [1]\n dictionnaire = {cle: list(values) for cle in keys}\n print('dictionnaire : ', dictionnaire)\n values.append(2)\n for cle in dictionnaire.keys():\n dictionnaire.update({cle: values})\n print('dictionnaire : ', dictionnaire)\n",
"step-3": "import datetime\nif __name__ == '__main__':\n keys = {'a', 'e', 'i', 'o', 'u', 'y'}\n values = [1]\n dictionnaire = {cle: list(values) for cle in keys}\n print('dictionnaire : ', dictionnaire)\n values.append(2)\n for cle in dictionnaire.keys():\n dictionnaire.update({cle: values})\n print('dictionnaire : ', dictionnaire)\n",
"step-4": "import datetime\n\nif __name__ == \"__main__\" :\n\n keys = {'a','e','i', 'o', 'u', 'y'}\n values = [1]\n\n dictionnaire = {cle : list(values) for cle in keys}\n print(\"dictionnaire : \", dictionnaire)\n\n values.append(2)\n\n #for cle in keys : dictionnaire.update({cle:values})\n \n #dictionnaire.update({cle2 : list(values) for cle2 in keys})\n \n #dictionnaire = {cle : list(values) for cle in keys}\n \n #for cle in list(dictionnaire) : dictionnaire.update({cle:values})\n \n for cle in dictionnaire.keys() : dictionnaire.update({cle:values})\n\n print(\"dictionnaire : \", dictionnaire)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class ConvLayer(object):
<|reserved_special_token_0|>
def apply(self, h):
if self.activation_fn == False:
if self.normalizer_fn == False:
if self.dropout == False:
h_out = slim.conv2d(h, num_outputs=self.num_outputs,
kernel_size=self.kernel_size, stride=self.stride,
scope=self.scope, padding=self.padding,
weights_initializer=self.weights_initializer,
weights_regularizer=self.weights_regularizer)
else:
h_out = slim.conv2d(slim.dropout(h, scope=self.scope +
'-dropout'), num_outputs=self.num_outputs,
kernel_size=self.kernel_size, stride=self.stride,
scope=self.scope, padding=self.padding,
weights_initializer=self.weights_initializer,
weights_regularizer=self.weights_regularizer)
elif self.dropout == False:
h_out = slim.conv2d(h, num_outputs=self.num_outputs,
kernel_size=self.kernel_size, stride=self.stride, scope
=self.scope, padding=self.padding, normalizer_fn=self.
normalizer_fn, weights_initializer=self.
weights_initializer, weights_regularizer=self.
weights_regularizer)
else:
h_out = slim.conv2d(slim.dropout(h, scope=self.scope +
'-dropout'), num_outputs=self.num_outputs, kernel_size=
self.kernel_size, stride=self.stride, scope=self.scope,
padding=self.padding, normalizer_fn=self.normalizer_fn,
weights_initializer=self.weights_initializer,
weights_regularizer=self.weights_regularizer)
elif self.normalizer_fn == False:
if self.dropout == False:
h_out = slim.conv2d(h, num_outputs=self.num_outputs,
kernel_size=self.kernel_size, stride=self.stride, scope
=self.scope, padding=self.padding, activation_fn=self.
activation_fn, weights_initializer=self.
weights_initializer, weights_regularizer=self.
weights_regularizer)
else:
h_out = slim.conv2d(slim.dropout(h, scope=self.scope +
'-dropout'), num_outputs=self.num_outputs, kernel_size=
self.kernel_size, stride=self.stride, scope=self.scope,
padding=self.padding, activation_fn=self.activation_fn,
weights_initializer=self.weights_initializer,
weights_regularizer=self.weights_regularizer)
elif self.dropout == False:
h_out = slim.conv2d(h, num_outputs=self.num_outputs,
kernel_size=self.kernel_size, stride=self.stride, scope=
self.scope, padding=self.padding, normalizer_fn=self.
normalizer_fn, activation_fn=self.activation_fn,
weights_initializer=self.weights_initializer,
weights_regularizer=self.weights_regularizer)
else:
h_out = slim.conv2d(slim.dropout(h, scope=self.scope +
'-dropout'), num_outputs=self.num_outputs, kernel_size=self
.kernel_size, stride=self.stride, scope=self.scope, padding
=self.padding, normalizer_fn=self.normalizer_fn,
activation_fn=self.activation_fn, weights_initializer=self.
weights_initializer, weights_regularizer=self.
weights_regularizer)
return h_out
class ProjectionAdaptor(object):
def __init__(self, scope, projection_width, num_outputs, dropout=False):
self.dim_reduction_layer = ConvLayer(num_outputs=projection_width,
kernel_size=1, stride=1, padding='SAME', weights_initializer=
ScaledVarianceRandomNormal(), weights_regularizer=tf.contrib.
layers.l2_regularizer(scale=WEIGHT_DECAY), dropout=dropout,
scope=scope + '/adapter/dim_reduction')
self.output_layer = ConvLayer(num_outputs=num_outputs, kernel_size=
1, stride=1, padding='SAME', weights_initializer=
ScaledVarianceRandomNormal(), weights_regularizer=tf.contrib.
layers.l2_regularizer(scale=WEIGHT_DECAY), dropout=dropout,
scope=scope + '/adapter/output', normalizer_fn=None,
activation_fn=None)
def apply(self, h):
reduced_space = self.dim_reduction_layer.apply(h)
return self.output_layer.apply(reduced_space)
<|reserved_special_token_0|>
class ResNextAdaptor(object):
"""
The block structure in Figure 3b. Takes a 4D tensor as input layer and splits, concatenates
the tensor and restores the depth. Finally adds the identity and ReLu.
"""
def __init__(self, scope, cardinality, output_depth, num_filters,
stride, dropout=False):
self.scope = scope
self.dropout = dropout
self.num_filters = num_filters
self.output_depth = output_depth
self.cardinality = cardinality
self.stride = stride
def apply(self, input_layer):
input_depth = input_layer.get_shape().as_list()[-1]
with tf.variable_scope(self.scope):
bottleneck_out = bottleneck_b(input_layer, stride=self.stride,
bottleneck_depth=self.num_filters, cardinality=self.cardinality
)
restored = slim.conv2d(bottleneck_out, num_outputs=self.
output_depth, kernel_size=1, stride=1, scope=
'restore_num_outputs', padding='SAME', activation_fn=None,
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=
WEIGHT_DECAY))
with tf.variable_scope('shortcut'):
if input_depth != self.output_depth:
padded_input = slim.conv2d(input_layer, num_outputs=
self.output_depth, kernel_size=1, stride=self.
stride, padding='SAME', activation_fn=None,
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.
l2_regularizer(scale=WEIGHT_DECAY))
else:
padded_input = input_layer
residual = tf.add(restored, padded_input, name='residual')
return residual
class ResNextBlock(object):
"""
The block structure in Figure 3b. Takes a 4D tensor as input layer and splits, concatenates
the tensor and restores the depth. Finally adds the identity and ReLu.
"""
def __init__(self, scope, cardinality, bottleneck_depth, num_filters,
stride, dropout=False):
self.scope = scope
self.dropout = dropout
self.num_filters = num_filters
self.bottleneck_depth = bottleneck_depth
self.cardinality = cardinality
self.stride = stride
def apply(self, input_layer):
input_depth = input_layer.get_shape().as_list()[-1]
output_depth = 4 * self.num_filters
with tf.variable_scope(self.scope):
bottleneck_out = bottleneck_b(input_layer, stride=self.stride,
bottleneck_depth=self.num_filters, cardinality=self.cardinality
)
restored = slim.conv2d(bottleneck_out, num_outputs=output_depth,
kernel_size=1, stride=1, scope='restore_num_outputs',
padding='SAME', activation_fn=None, weights_initializer=
ScaledVarianceRandomNormal(), weights_regularizer=tf.
contrib.layers.l2_regularizer(scale=WEIGHT_DECAY))
with tf.variable_scope('shortcut'):
if input_depth != output_depth:
padded_input = slim.conv2d(input_layer, num_outputs=
output_depth, kernel_size=1, stride=self.stride,
padding='SAME', activation_fn=None,
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.
l2_regularizer(scale=WEIGHT_DECAY))
else:
padded_input = input_layer
residual = tf.add(restored, padded_input, name='residual')
output = residual
return output
class ResNextGroup(object):
def __init__(self, scope, num_blocks, num_filters, bottleneck_depth,
cardinality, stride, dropout=False):
self.scope = scope
self.dropout = dropout
self.num_filters = num_filters
self.cardinality = cardinality
self.num_blocks = num_blocks
self.bottleneck_depth = bottleneck_depth
self.stride = stride
def apply(self, h):
tensor_stack = [h]
with tf.variable_scope(self.scope):
for i in range(self.num_blocks):
if i == 0:
stride = self.stride
else:
stride = 1
h = ResNextBlock(num_filters=self.num_filters, cardinality=
self.cardinality, bottleneck_depth=self.
bottleneck_depth, stride=stride, dropout=self.dropout,
scope='block%d' % i).apply(tensor_stack[-1])
tensor_stack.append(h)
return tensor_stack[-1]
class AveragePoolLayer(object):
def __init__(self, scope, axis, keep_dims):
self.scope = scope
self.axis = axis
self.keep_dims = keep_dims
def apply(self, h):
with tf.variable_scope(self.scope):
average_pool = tf.reduce_mean(h, axis=self.axis, keep_dims=self
.keep_dims)
return average_pool
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ScaledVarianceRandomNormal(init_ops.Initializer):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class ConvLayer(object):
def __init__(self, scope, num_outputs, kernel_size, padding='SAME',
dropout=False, stride=1, normalizer_fn=False, activation_fn=False,
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=None):
self.scope = scope
self.dropout = dropout
self.padding = padding
self.num_outputs = num_outputs
self.kernel_size = kernel_size
self.stride = stride
self.normalizer_fn = normalizer_fn
self.activation_fn = activation_fn
self.weights_initializer = weights_initializer
self.weights_regularizer = weights_regularizer
def apply(self, h):
if self.activation_fn == False:
if self.normalizer_fn == False:
if self.dropout == False:
h_out = slim.conv2d(h, num_outputs=self.num_outputs,
kernel_size=self.kernel_size, stride=self.stride,
scope=self.scope, padding=self.padding,
weights_initializer=self.weights_initializer,
weights_regularizer=self.weights_regularizer)
else:
h_out = slim.conv2d(slim.dropout(h, scope=self.scope +
'-dropout'), num_outputs=self.num_outputs,
kernel_size=self.kernel_size, stride=self.stride,
scope=self.scope, padding=self.padding,
weights_initializer=self.weights_initializer,
weights_regularizer=self.weights_regularizer)
elif self.dropout == False:
h_out = slim.conv2d(h, num_outputs=self.num_outputs,
kernel_size=self.kernel_size, stride=self.stride, scope
=self.scope, padding=self.padding, normalizer_fn=self.
normalizer_fn, weights_initializer=self.
weights_initializer, weights_regularizer=self.
weights_regularizer)
else:
h_out = slim.conv2d(slim.dropout(h, scope=self.scope +
'-dropout'), num_outputs=self.num_outputs, kernel_size=
self.kernel_size, stride=self.stride, scope=self.scope,
padding=self.padding, normalizer_fn=self.normalizer_fn,
weights_initializer=self.weights_initializer,
weights_regularizer=self.weights_regularizer)
elif self.normalizer_fn == False:
if self.dropout == False:
h_out = slim.conv2d(h, num_outputs=self.num_outputs,
kernel_size=self.kernel_size, stride=self.stride, scope
=self.scope, padding=self.padding, activation_fn=self.
activation_fn, weights_initializer=self.
weights_initializer, weights_regularizer=self.
weights_regularizer)
else:
h_out = slim.conv2d(slim.dropout(h, scope=self.scope +
'-dropout'), num_outputs=self.num_outputs, kernel_size=
self.kernel_size, stride=self.stride, scope=self.scope,
padding=self.padding, activation_fn=self.activation_fn,
weights_initializer=self.weights_initializer,
weights_regularizer=self.weights_regularizer)
elif self.dropout == False:
h_out = slim.conv2d(h, num_outputs=self.num_outputs,
kernel_size=self.kernel_size, stride=self.stride, scope=
self.scope, padding=self.padding, normalizer_fn=self.
normalizer_fn, activation_fn=self.activation_fn,
weights_initializer=self.weights_initializer,
weights_regularizer=self.weights_regularizer)
else:
h_out = slim.conv2d(slim.dropout(h, scope=self.scope +
'-dropout'), num_outputs=self.num_outputs, kernel_size=self
.kernel_size, stride=self.stride, scope=self.scope, padding
=self.padding, normalizer_fn=self.normalizer_fn,
activation_fn=self.activation_fn, weights_initializer=self.
weights_initializer, weights_regularizer=self.
weights_regularizer)
return h_out
class ProjectionAdaptor(object):
def __init__(self, scope, projection_width, num_outputs, dropout=False):
self.dim_reduction_layer = ConvLayer(num_outputs=projection_width,
kernel_size=1, stride=1, padding='SAME', weights_initializer=
ScaledVarianceRandomNormal(), weights_regularizer=tf.contrib.
layers.l2_regularizer(scale=WEIGHT_DECAY), dropout=dropout,
scope=scope + '/adapter/dim_reduction')
self.output_layer = ConvLayer(num_outputs=num_outputs, kernel_size=
1, stride=1, padding='SAME', weights_initializer=
ScaledVarianceRandomNormal(), weights_regularizer=tf.contrib.
layers.l2_regularizer(scale=WEIGHT_DECAY), dropout=dropout,
scope=scope + '/adapter/output', normalizer_fn=None,
activation_fn=None)
def apply(self, h):
reduced_space = self.dim_reduction_layer.apply(h)
return self.output_layer.apply(reduced_space)
<|reserved_special_token_0|>
class ResNextAdaptor(object):
"""
The block structure in Figure 3b. Takes a 4D tensor as input layer and splits, concatenates
the tensor and restores the depth. Finally adds the identity and ReLu.
"""
def __init__(self, scope, cardinality, output_depth, num_filters,
stride, dropout=False):
self.scope = scope
self.dropout = dropout
self.num_filters = num_filters
self.output_depth = output_depth
self.cardinality = cardinality
self.stride = stride
def apply(self, input_layer):
input_depth = input_layer.get_shape().as_list()[-1]
with tf.variable_scope(self.scope):
bottleneck_out = bottleneck_b(input_layer, stride=self.stride,
bottleneck_depth=self.num_filters, cardinality=self.cardinality
)
restored = slim.conv2d(bottleneck_out, num_outputs=self.
output_depth, kernel_size=1, stride=1, scope=
'restore_num_outputs', padding='SAME', activation_fn=None,
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=
WEIGHT_DECAY))
with tf.variable_scope('shortcut'):
if input_depth != self.output_depth:
padded_input = slim.conv2d(input_layer, num_outputs=
self.output_depth, kernel_size=1, stride=self.
stride, padding='SAME', activation_fn=None,
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.
l2_regularizer(scale=WEIGHT_DECAY))
else:
padded_input = input_layer
residual = tf.add(restored, padded_input, name='residual')
return residual
class ResNextBlock(object):
"""
The block structure in Figure 3b. Takes a 4D tensor as input layer and splits, concatenates
the tensor and restores the depth. Finally adds the identity and ReLu.
"""
def __init__(self, scope, cardinality, bottleneck_depth, num_filters,
stride, dropout=False):
self.scope = scope
self.dropout = dropout
self.num_filters = num_filters
self.bottleneck_depth = bottleneck_depth
self.cardinality = cardinality
self.stride = stride
def apply(self, input_layer):
input_depth = input_layer.get_shape().as_list()[-1]
output_depth = 4 * self.num_filters
with tf.variable_scope(self.scope):
bottleneck_out = bottleneck_b(input_layer, stride=self.stride,
bottleneck_depth=self.num_filters, cardinality=self.cardinality
)
restored = slim.conv2d(bottleneck_out, num_outputs=output_depth,
kernel_size=1, stride=1, scope='restore_num_outputs',
padding='SAME', activation_fn=None, weights_initializer=
ScaledVarianceRandomNormal(), weights_regularizer=tf.
contrib.layers.l2_regularizer(scale=WEIGHT_DECAY))
with tf.variable_scope('shortcut'):
if input_depth != output_depth:
padded_input = slim.conv2d(input_layer, num_outputs=
output_depth, kernel_size=1, stride=self.stride,
padding='SAME', activation_fn=None,
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.
l2_regularizer(scale=WEIGHT_DECAY))
else:
padded_input = input_layer
residual = tf.add(restored, padded_input, name='residual')
output = residual
return output
class ResNextGroup(object):
def __init__(self, scope, num_blocks, num_filters, bottleneck_depth,
cardinality, stride, dropout=False):
self.scope = scope
self.dropout = dropout
self.num_filters = num_filters
self.cardinality = cardinality
self.num_blocks = num_blocks
self.bottleneck_depth = bottleneck_depth
self.stride = stride
def apply(self, h):
tensor_stack = [h]
with tf.variable_scope(self.scope):
for i in range(self.num_blocks):
if i == 0:
stride = self.stride
else:
stride = 1
h = ResNextBlock(num_filters=self.num_filters, cardinality=
self.cardinality, bottleneck_depth=self.
bottleneck_depth, stride=stride, dropout=self.dropout,
scope='block%d' % i).apply(tensor_stack[-1])
tensor_stack.append(h)
return tensor_stack[-1]
class AveragePoolLayer(object):
def __init__(self, scope, axis, keep_dims):
self.scope = scope
self.axis = axis
self.keep_dims = keep_dims
def apply(self, h):
with tf.variable_scope(self.scope):
average_pool = tf.reduce_mean(h, axis=self.axis, keep_dims=self
.keep_dims)
return average_pool
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ScaledVarianceRandomNormal(init_ops.Initializer):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
if shape:
n = float(shape[-1])
else:
n = 1.0
for dim in shape[:-2]:
n *= float(dim)
self.stddev = np.sqrt(self.factor * 2.0 / n)
return random_ops.random_normal(shape, self.mean, self.stddev,
dtype, seed=self.seed)
def get_config(self):
return {'mean': self.mean, 'stddev': self.stddev, 'seed': self.seed,
'dtype': self.dtype.name}
class ConvLayer(object):
def __init__(self, scope, num_outputs, kernel_size, padding='SAME',
dropout=False, stride=1, normalizer_fn=False, activation_fn=False,
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=None):
self.scope = scope
self.dropout = dropout
self.padding = padding
self.num_outputs = num_outputs
self.kernel_size = kernel_size
self.stride = stride
self.normalizer_fn = normalizer_fn
self.activation_fn = activation_fn
self.weights_initializer = weights_initializer
self.weights_regularizer = weights_regularizer
def apply(self, h):
if self.activation_fn == False:
if self.normalizer_fn == False:
if self.dropout == False:
h_out = slim.conv2d(h, num_outputs=self.num_outputs,
kernel_size=self.kernel_size, stride=self.stride,
scope=self.scope, padding=self.padding,
weights_initializer=self.weights_initializer,
weights_regularizer=self.weights_regularizer)
else:
h_out = slim.conv2d(slim.dropout(h, scope=self.scope +
'-dropout'), num_outputs=self.num_outputs,
kernel_size=self.kernel_size, stride=self.stride,
scope=self.scope, padding=self.padding,
weights_initializer=self.weights_initializer,
weights_regularizer=self.weights_regularizer)
elif self.dropout == False:
h_out = slim.conv2d(h, num_outputs=self.num_outputs,
kernel_size=self.kernel_size, stride=self.stride, scope
=self.scope, padding=self.padding, normalizer_fn=self.
normalizer_fn, weights_initializer=self.
weights_initializer, weights_regularizer=self.
weights_regularizer)
else:
h_out = slim.conv2d(slim.dropout(h, scope=self.scope +
'-dropout'), num_outputs=self.num_outputs, kernel_size=
self.kernel_size, stride=self.stride, scope=self.scope,
padding=self.padding, normalizer_fn=self.normalizer_fn,
weights_initializer=self.weights_initializer,
weights_regularizer=self.weights_regularizer)
elif self.normalizer_fn == False:
if self.dropout == False:
h_out = slim.conv2d(h, num_outputs=self.num_outputs,
kernel_size=self.kernel_size, stride=self.stride, scope
=self.scope, padding=self.padding, activation_fn=self.
activation_fn, weights_initializer=self.
weights_initializer, weights_regularizer=self.
weights_regularizer)
else:
h_out = slim.conv2d(slim.dropout(h, scope=self.scope +
'-dropout'), num_outputs=self.num_outputs, kernel_size=
self.kernel_size, stride=self.stride, scope=self.scope,
padding=self.padding, activation_fn=self.activation_fn,
weights_initializer=self.weights_initializer,
weights_regularizer=self.weights_regularizer)
elif self.dropout == False:
h_out = slim.conv2d(h, num_outputs=self.num_outputs,
kernel_size=self.kernel_size, stride=self.stride, scope=
self.scope, padding=self.padding, normalizer_fn=self.
normalizer_fn, activation_fn=self.activation_fn,
weights_initializer=self.weights_initializer,
weights_regularizer=self.weights_regularizer)
else:
h_out = slim.conv2d(slim.dropout(h, scope=self.scope +
'-dropout'), num_outputs=self.num_outputs, kernel_size=self
.kernel_size, stride=self.stride, scope=self.scope, padding
=self.padding, normalizer_fn=self.normalizer_fn,
activation_fn=self.activation_fn, weights_initializer=self.
weights_initializer, weights_regularizer=self.
weights_regularizer)
return h_out
class ProjectionAdaptor(object):
def __init__(self, scope, projection_width, num_outputs, dropout=False):
self.dim_reduction_layer = ConvLayer(num_outputs=projection_width,
kernel_size=1, stride=1, padding='SAME', weights_initializer=
ScaledVarianceRandomNormal(), weights_regularizer=tf.contrib.
layers.l2_regularizer(scale=WEIGHT_DECAY), dropout=dropout,
scope=scope + '/adapter/dim_reduction')
self.output_layer = ConvLayer(num_outputs=num_outputs, kernel_size=
1, stride=1, padding='SAME', weights_initializer=
ScaledVarianceRandomNormal(), weights_regularizer=tf.contrib.
layers.l2_regularizer(scale=WEIGHT_DECAY), dropout=dropout,
scope=scope + '/adapter/output', normalizer_fn=None,
activation_fn=None)
def apply(self, h):
reduced_space = self.dim_reduction_layer.apply(h)
return self.output_layer.apply(reduced_space)
<|reserved_special_token_0|>
class ResNextAdaptor(object):
"""
The block structure in Figure 3b. Takes a 4D tensor as input layer and splits, concatenates
the tensor and restores the depth. Finally adds the identity and ReLu.
"""
def __init__(self, scope, cardinality, output_depth, num_filters,
stride, dropout=False):
self.scope = scope
self.dropout = dropout
self.num_filters = num_filters
self.output_depth = output_depth
self.cardinality = cardinality
self.stride = stride
def apply(self, input_layer):
input_depth = input_layer.get_shape().as_list()[-1]
with tf.variable_scope(self.scope):
bottleneck_out = bottleneck_b(input_layer, stride=self.stride,
bottleneck_depth=self.num_filters, cardinality=self.cardinality
)
restored = slim.conv2d(bottleneck_out, num_outputs=self.
output_depth, kernel_size=1, stride=1, scope=
'restore_num_outputs', padding='SAME', activation_fn=None,
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=
WEIGHT_DECAY))
with tf.variable_scope('shortcut'):
if input_depth != self.output_depth:
padded_input = slim.conv2d(input_layer, num_outputs=
self.output_depth, kernel_size=1, stride=self.
stride, padding='SAME', activation_fn=None,
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.
l2_regularizer(scale=WEIGHT_DECAY))
else:
padded_input = input_layer
residual = tf.add(restored, padded_input, name='residual')
return residual
class ResNextBlock(object):
"""
The block structure in Figure 3b. Takes a 4D tensor as input layer and splits, concatenates
the tensor and restores the depth. Finally adds the identity and ReLu.
"""
def __init__(self, scope, cardinality, bottleneck_depth, num_filters,
stride, dropout=False):
self.scope = scope
self.dropout = dropout
self.num_filters = num_filters
self.bottleneck_depth = bottleneck_depth
self.cardinality = cardinality
self.stride = stride
def apply(self, input_layer):
input_depth = input_layer.get_shape().as_list()[-1]
output_depth = 4 * self.num_filters
with tf.variable_scope(self.scope):
bottleneck_out = bottleneck_b(input_layer, stride=self.stride,
bottleneck_depth=self.num_filters, cardinality=self.cardinality
)
restored = slim.conv2d(bottleneck_out, num_outputs=output_depth,
kernel_size=1, stride=1, scope='restore_num_outputs',
padding='SAME', activation_fn=None, weights_initializer=
ScaledVarianceRandomNormal(), weights_regularizer=tf.
contrib.layers.l2_regularizer(scale=WEIGHT_DECAY))
with tf.variable_scope('shortcut'):
if input_depth != output_depth:
padded_input = slim.conv2d(input_layer, num_outputs=
output_depth, kernel_size=1, stride=self.stride,
padding='SAME', activation_fn=None,
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.
l2_regularizer(scale=WEIGHT_DECAY))
else:
padded_input = input_layer
residual = tf.add(restored, padded_input, name='residual')
output = residual
return output
class ResNextGroup(object):
def __init__(self, scope, num_blocks, num_filters, bottleneck_depth,
cardinality, stride, dropout=False):
self.scope = scope
self.dropout = dropout
self.num_filters = num_filters
self.cardinality = cardinality
self.num_blocks = num_blocks
self.bottleneck_depth = bottleneck_depth
self.stride = stride
def apply(self, h):
tensor_stack = [h]
with tf.variable_scope(self.scope):
for i in range(self.num_blocks):
if i == 0:
stride = self.stride
else:
stride = 1
h = ResNextBlock(num_filters=self.num_filters, cardinality=
self.cardinality, bottleneck_depth=self.
bottleneck_depth, stride=stride, dropout=self.dropout,
scope='block%d' % i).apply(tensor_stack[-1])
tensor_stack.append(h)
return tensor_stack[-1]
class AveragePoolLayer(object):
def __init__(self, scope, axis, keep_dims):
self.scope = scope
self.axis = axis
self.keep_dims = keep_dims
def apply(self, h):
with tf.variable_scope(self.scope):
average_pool = tf.reduce_mean(h, axis=self.axis, keep_dims=self
.keep_dims)
return average_pool
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
WEIGHT_DECAY = 0.0005
class ScaledVarianceUniform(init_ops.Initializer):
"""Initializer that generates tensors with a Uniform distribution scaled as per https://github.com/torch/nn/blob/master/Linear.lua
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
"""
def __init__(self, factor=1.0, seed=None, dtype=dtypes.float32):
self.factor = factor
self.seed = seed
self.dtype = dtypes.as_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
if shape:
n = float(shape[-1])
else:
n = 1.0
self.stddev = np.sqrt(self.factor * 3.0 / n)
return random_ops.random_uniform(shape, minval=-self.stddev, maxval
=self.stddev, dtype=dtype, seed=self.seed)
def get_config(self):
return {'mean': self.mean, 'stddev': self.stddev, 'seed': self.seed,
'dtype': self.dtype.name}
class ScaledVarianceRandomNormal(init_ops.Initializer):
"""Initializer that generates tensors with a normal distribution scaled as per https://arxiv.org/pdf/1502.01852.pdf.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
"""
def __init__(self, mean=0.0, factor=1.0, seed=None, dtype=dtypes.float32):
self.mean = mean
self.factor = factor
self.seed = seed
self.dtype = dtypes.as_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
if shape:
n = float(shape[-1])
else:
n = 1.0
for dim in shape[:-2]:
n *= float(dim)
self.stddev = np.sqrt(self.factor * 2.0 / n)
return random_ops.random_normal(shape, self.mean, self.stddev,
dtype, seed=self.seed)
def get_config(self):
return {'mean': self.mean, 'stddev': self.stddev, 'seed': self.seed,
'dtype': self.dtype.name}
class ConvLayer(object):
def __init__(self, scope, num_outputs, kernel_size, padding='SAME',
dropout=False, stride=1, normalizer_fn=False, activation_fn=False,
weights_initializer=tf.contrib.layers.xavier_initializer(),
weights_regularizer=None):
self.scope = scope
self.dropout = dropout
self.padding = padding
self.num_outputs = num_outputs
self.kernel_size = kernel_size
self.stride = stride
self.normalizer_fn = normalizer_fn
self.activation_fn = activation_fn
self.weights_initializer = weights_initializer
self.weights_regularizer = weights_regularizer
def apply(self, h):
if self.activation_fn == False:
if self.normalizer_fn == False:
if self.dropout == False:
h_out = slim.conv2d(h, num_outputs=self.num_outputs,
kernel_size=self.kernel_size, stride=self.stride,
scope=self.scope, padding=self.padding,
weights_initializer=self.weights_initializer,
weights_regularizer=self.weights_regularizer)
else:
h_out = slim.conv2d(slim.dropout(h, scope=self.scope +
'-dropout'), num_outputs=self.num_outputs,
kernel_size=self.kernel_size, stride=self.stride,
scope=self.scope, padding=self.padding,
weights_initializer=self.weights_initializer,
weights_regularizer=self.weights_regularizer)
elif self.dropout == False:
h_out = slim.conv2d(h, num_outputs=self.num_outputs,
kernel_size=self.kernel_size, stride=self.stride, scope
=self.scope, padding=self.padding, normalizer_fn=self.
normalizer_fn, weights_initializer=self.
weights_initializer, weights_regularizer=self.
weights_regularizer)
else:
h_out = slim.conv2d(slim.dropout(h, scope=self.scope +
'-dropout'), num_outputs=self.num_outputs, kernel_size=
self.kernel_size, stride=self.stride, scope=self.scope,
padding=self.padding, normalizer_fn=self.normalizer_fn,
weights_initializer=self.weights_initializer,
weights_regularizer=self.weights_regularizer)
elif self.normalizer_fn == False:
if self.dropout == False:
h_out = slim.conv2d(h, num_outputs=self.num_outputs,
kernel_size=self.kernel_size, stride=self.stride, scope
=self.scope, padding=self.padding, activation_fn=self.
activation_fn, weights_initializer=self.
weights_initializer, weights_regularizer=self.
weights_regularizer)
else:
h_out = slim.conv2d(slim.dropout(h, scope=self.scope +
'-dropout'), num_outputs=self.num_outputs, kernel_size=
self.kernel_size, stride=self.stride, scope=self.scope,
padding=self.padding, activation_fn=self.activation_fn,
weights_initializer=self.weights_initializer,
weights_regularizer=self.weights_regularizer)
elif self.dropout == False:
h_out = slim.conv2d(h, num_outputs=self.num_outputs,
kernel_size=self.kernel_size, stride=self.stride, scope=
self.scope, padding=self.padding, normalizer_fn=self.
normalizer_fn, activation_fn=self.activation_fn,
weights_initializer=self.weights_initializer,
weights_regularizer=self.weights_regularizer)
else:
h_out = slim.conv2d(slim.dropout(h, scope=self.scope +
'-dropout'), num_outputs=self.num_outputs, kernel_size=self
.kernel_size, stride=self.stride, scope=self.scope, padding
=self.padding, normalizer_fn=self.normalizer_fn,
activation_fn=self.activation_fn, weights_initializer=self.
weights_initializer, weights_regularizer=self.
weights_regularizer)
return h_out
class ProjectionAdaptor(object):
def __init__(self, scope, projection_width, num_outputs, dropout=False):
self.dim_reduction_layer = ConvLayer(num_outputs=projection_width,
kernel_size=1, stride=1, padding='SAME', weights_initializer=
ScaledVarianceRandomNormal(), weights_regularizer=tf.contrib.
layers.l2_regularizer(scale=WEIGHT_DECAY), dropout=dropout,
scope=scope + '/adapter/dim_reduction')
self.output_layer = ConvLayer(num_outputs=num_outputs, kernel_size=
1, stride=1, padding='SAME', weights_initializer=
ScaledVarianceRandomNormal(), weights_regularizer=tf.contrib.
layers.l2_regularizer(scale=WEIGHT_DECAY), dropout=dropout,
scope=scope + '/adapter/output', normalizer_fn=None,
activation_fn=None)
def apply(self, h):
reduced_space = self.dim_reduction_layer.apply(h)
return self.output_layer.apply(reduced_space)
def split(input_layer, stride, bottleneck_depth):
"""
The split structure in Figure 3b of the paper. It takes an input tensor. Conv it by [1, 1,
64] filter, and then conv the result by [3, 3, 64]. Return the
final resulted tensor, which is in shape of [batch_size, input_height, input_width, 64]
:param input_layer: 4D tensor in shape of [batch_size, input_height, input_width,
input_channel]
:param stride: int. 1 or 2. If want to shrink the image size, then stride = 2
:return: 4D tensor in shape of [batch_size, input_height, input_width, input_channel/64]
"""
input_depth = input_layer.get_shape().as_list()[-1]
with tf.variable_scope('bneck_%d_1x1_%dd' % (input_depth, bottleneck_depth)
):
bneck_1x1 = slim.conv2d(input_layer, num_outputs=bottleneck_depth,
kernel_size=1, stride=1, padding='SAME', weights_initializer=
ScaledVarianceRandomNormal(), weights_regularizer=tf.contrib.
layers.l2_regularizer(scale=WEIGHT_DECAY))
with tf.variable_scope('bneck_%d_3x3_%dd' % (bottleneck_depth,
bottleneck_depth)):
bneck_3x3 = slim.conv2d(bneck_1x1, num_outputs=bottleneck_depth,
kernel_size=3, stride=stride, padding='SAME',
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=
WEIGHT_DECAY))
return bneck_3x3
def bottleneck_b(input_layer, stride, cardinality, bottleneck_depth):
"""
The bottleneck strucutre in Figure 3b. Concatenates all the splits
:param input_layer: 4D tensor in shape of [batch_size, input_height, input_width,
input_channel]
:param stride: int. 1 or 2. If want to shrink the image size, then stride = 2
:return: 4D tensor in shape of [batch_size, output_height, output_width, output_channel]
"""
nInputPlane = input_layer.get_shape().as_list()[-1]
split_list = []
for i in range(cardinality):
with tf.variable_scope('split_%i' % i):
splits = split(input_layer=input_layer, stride=stride,
bottleneck_depth=bottleneck_depth)
split_list.append(splits)
concat_bottleneck = tf.concat(values=split_list, axis=3, name=
'concat_splits')
return concat_bottleneck
class ResNextAdaptor(object):
"""
The block structure in Figure 3b. Takes a 4D tensor as input layer and splits, concatenates
the tensor and restores the depth. Finally adds the identity and ReLu.
"""
def __init__(self, scope, cardinality, output_depth, num_filters,
stride, dropout=False):
self.scope = scope
self.dropout = dropout
self.num_filters = num_filters
self.output_depth = output_depth
self.cardinality = cardinality
self.stride = stride
def apply(self, input_layer):
input_depth = input_layer.get_shape().as_list()[-1]
with tf.variable_scope(self.scope):
bottleneck_out = bottleneck_b(input_layer, stride=self.stride,
bottleneck_depth=self.num_filters, cardinality=self.cardinality
)
restored = slim.conv2d(bottleneck_out, num_outputs=self.
output_depth, kernel_size=1, stride=1, scope=
'restore_num_outputs', padding='SAME', activation_fn=None,
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=
WEIGHT_DECAY))
with tf.variable_scope('shortcut'):
if input_depth != self.output_depth:
padded_input = slim.conv2d(input_layer, num_outputs=
self.output_depth, kernel_size=1, stride=self.
stride, padding='SAME', activation_fn=None,
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.
l2_regularizer(scale=WEIGHT_DECAY))
else:
padded_input = input_layer
residual = tf.add(restored, padded_input, name='residual')
return residual
class ResNextBlock(object):
"""
The block structure in Figure 3b. Takes a 4D tensor as input layer and splits, concatenates
the tensor and restores the depth. Finally adds the identity and ReLu.
"""
def __init__(self, scope, cardinality, bottleneck_depth, num_filters,
stride, dropout=False):
self.scope = scope
self.dropout = dropout
self.num_filters = num_filters
self.bottleneck_depth = bottleneck_depth
self.cardinality = cardinality
self.stride = stride
def apply(self, input_layer):
input_depth = input_layer.get_shape().as_list()[-1]
output_depth = 4 * self.num_filters
with tf.variable_scope(self.scope):
bottleneck_out = bottleneck_b(input_layer, stride=self.stride,
bottleneck_depth=self.num_filters, cardinality=self.cardinality
)
restored = slim.conv2d(bottleneck_out, num_outputs=output_depth,
kernel_size=1, stride=1, scope='restore_num_outputs',
padding='SAME', activation_fn=None, weights_initializer=
ScaledVarianceRandomNormal(), weights_regularizer=tf.
contrib.layers.l2_regularizer(scale=WEIGHT_DECAY))
with tf.variable_scope('shortcut'):
if input_depth != output_depth:
padded_input = slim.conv2d(input_layer, num_outputs=
output_depth, kernel_size=1, stride=self.stride,
padding='SAME', activation_fn=None,
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.
l2_regularizer(scale=WEIGHT_DECAY))
else:
padded_input = input_layer
residual = tf.add(restored, padded_input, name='residual')
output = residual
return output
class ResNextGroup(object):
def __init__(self, scope, num_blocks, num_filters, bottleneck_depth,
cardinality, stride, dropout=False):
self.scope = scope
self.dropout = dropout
self.num_filters = num_filters
self.cardinality = cardinality
self.num_blocks = num_blocks
self.bottleneck_depth = bottleneck_depth
self.stride = stride
def apply(self, h):
tensor_stack = [h]
with tf.variable_scope(self.scope):
for i in range(self.num_blocks):
if i == 0:
stride = self.stride
else:
stride = 1
h = ResNextBlock(num_filters=self.num_filters, cardinality=
self.cardinality, bottleneck_depth=self.
bottleneck_depth, stride=stride, dropout=self.dropout,
scope='block%d' % i).apply(tensor_stack[-1])
tensor_stack.append(h)
return tensor_stack[-1]
class AveragePoolLayer(object):
def __init__(self, scope, axis, keep_dims):
self.scope = scope
self.axis = axis
self.keep_dims = keep_dims
def apply(self, h):
with tf.variable_scope(self.scope):
average_pool = tf.reduce_mean(h, axis=self.axis, keep_dims=self
.keep_dims)
return average_pool
ArchitectureResNext = [ConvLayer(num_outputs=64, kernel_size=3, stride=1,
scope='input/conv0', activation_fn=None, weights_initializer=
ScaledVarianceRandomNormal(), weights_regularizer=tf.contrib.layers.
l2_regularizer(scale=WEIGHT_DECAY)), ResNextBlock(num_filters=64,
cardinality=2, bottleneck_depth=64, stride=1, dropout=False, scope=
'bottleneck_group1/block1'), ResNextBlock(num_filters=64, cardinality=2,
bottleneck_depth=64, stride=1, dropout=False, scope=
'bottleneck_group1/block2'), ResNextBlock(num_filters=64, cardinality=2,
bottleneck_depth=64, stride=1, dropout=False, scope=
'bottleneck_group1/block3'), ResNextBlock(num_filters=128, cardinality=
2, bottleneck_depth=64, stride=2, dropout=False, scope=
'bottleneck_group2/block1'), ResNextBlock(num_filters=128, cardinality=
2, bottleneck_depth=64, stride=1, dropout=False, scope=
'bottleneck_group2/block2'), ResNextBlock(num_filters=128, cardinality=
2, bottleneck_depth=64, stride=1, dropout=False, scope=
'bottleneck_group2/block3'), ResNextBlock(num_filters=256, cardinality=
2, bottleneck_depth=64, stride=2, dropout=False, scope=
'bottleneck_group3/block1'), ResNextBlock(num_filters=256, cardinality=
2, bottleneck_depth=64, stride=1, dropout=False, scope=
'bottleneck_group3/block2'), ResNextBlock(num_filters=256, cardinality=
2, bottleneck_depth=64, stride=1, dropout=False, scope=
'bottleneck_group3/block3'), AveragePoolLayer(scope='avg_pool', axis=[1,
2], keep_dims=True), ConvLayer(num_outputs=10, kernel_size=1, stride=1,
normalizer_fn=None, activation_fn=None, weights_initializer=
ScaledVarianceUniform(), weights_regularizer=tf.contrib.layers.
l2_regularizer(scale=WEIGHT_DECAY), scope='logits/fc')]
AdaptorStack = [None, ResNextAdaptor(cardinality=2, output_depth=256,
num_filters=64 / 4, stride=1, dropout=False, scope=
'bottleneck_group1/adaptor1'), ResNextAdaptor(cardinality=2,
output_depth=256, num_filters=64 / 4, stride=1, dropout=False, scope=
'bottleneck_group1/adaptor2'), ResNextAdaptor(cardinality=2,
output_depth=256, num_filters=64 / 4, stride=1, dropout=False, scope=
'bottleneck_group1/adaptor3'), ResNextAdaptor(cardinality=2,
output_depth=512, num_filters=128 / 4, stride=2, dropout=False, scope=
'bottleneck_group2/adaptor1'), ResNextAdaptor(cardinality=2,
output_depth=512, num_filters=128 / 4, stride=1, dropout=False, scope=
'bottleneck_group2/adaptor2'), ResNextAdaptor(cardinality=2,
output_depth=512, num_filters=128 / 4, stride=1, dropout=False, scope=
'bottleneck_group2/adaptor3'), ResNextAdaptor(cardinality=2,
output_depth=1024, num_filters=256 / 4, stride=2, dropout=False, scope=
'bottleneck_group3/adaptor1'), ResNextAdaptor(cardinality=2,
output_depth=1024, num_filters=256 / 4, stride=1, dropout=False, scope=
'bottleneck_group3/adaptor2'), ResNextAdaptor(cardinality=2,
output_depth=1024, num_filters=256 / 4, stride=1, dropout=False, scope=
'bottleneck_group3/adaptor3'), None, ProjectionAdaptor(projection_width
=1024 / 8, num_outputs=10, dropout=False, scope='logits/adaptor')]
<|reserved_special_token_1|>
import tensorflow.contrib.slim as slim
import tensorflow as tf
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import init_ops
import numpy as np
WEIGHT_DECAY = 0.0005
class ScaledVarianceUniform(init_ops.Initializer):
"""Initializer that generates tensors with a Uniform distribution scaled as per https://github.com/torch/nn/blob/master/Linear.lua
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
"""
def __init__(self, factor=1.0, seed=None, dtype=dtypes.float32):
self.factor = factor
self.seed = seed
self.dtype = dtypes.as_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
if shape:
n = float(shape[-1])
else:
n = 1.0
self.stddev = np.sqrt(self.factor * 3.0 / n)
return random_ops.random_uniform(shape, minval=-self.stddev, maxval=self.stddev, dtype=dtype, seed=self.seed)
def get_config(self):
return {"mean": self.mean,
"stddev": self.stddev,
"seed": self.seed,
"dtype": self.dtype.name}
class ScaledVarianceRandomNormal(init_ops.Initializer):
"""Initializer that generates tensors with a normal distribution scaled as per https://arxiv.org/pdf/1502.01852.pdf.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
"""
def __init__(self, mean=0.0, factor=1.0, seed=None, dtype=dtypes.float32):
self.mean = mean
self.factor = factor
self.seed = seed
self.dtype = dtypes.as_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
if shape:
n = float(shape[-1])
else:
n = 1.0
for dim in shape[:-2]:
n *= float(dim)
self.stddev = np.sqrt(self.factor * 2.0 / n)
return random_ops.random_normal(shape, self.mean, self.stddev,
dtype, seed=self.seed)
def get_config(self):
return {"mean": self.mean,
"stddev": self.stddev,
"seed": self.seed,
"dtype": self.dtype.name}
class ConvLayer(object):
def __init__(self, scope, num_outputs, kernel_size, padding='SAME', dropout=False, stride=1, normalizer_fn=False,
activation_fn=False, weights_initializer=tf.contrib.layers.xavier_initializer(), weights_regularizer=None):
self.scope = scope
self.dropout = dropout
self.padding = padding
self.num_outputs = num_outputs
self.kernel_size = kernel_size
self.stride = stride
self.normalizer_fn = normalizer_fn
self.activation_fn=activation_fn
self.weights_initializer = weights_initializer
self.weights_regularizer = weights_regularizer
def apply(self, h):
if self.activation_fn == False:
if self.normalizer_fn==False:
if self.dropout==False:
h_out = slim.conv2d(h, num_outputs=self.num_outputs, kernel_size=self.kernel_size, stride=self.stride, scope=self.scope,
padding=self.padding, weights_initializer=self.weights_initializer, weights_regularizer = self.weights_regularizer)
else:
h_out = slim.conv2d(slim.dropout(h, scope=self.scope+'-dropout'), num_outputs=self.num_outputs, kernel_size=self.kernel_size, stride=self.stride, scope=self.scope,
padding=self.padding, weights_initializer=self.weights_initializer, weights_regularizer = self.weights_regularizer)
else:
if self.dropout == False:
h_out = slim.conv2d(h, num_outputs=self.num_outputs, kernel_size=self.kernel_size,
stride=self.stride, scope=self.scope, padding=self.padding,
normalizer_fn=self.normalizer_fn, weights_initializer=self.weights_initializer, weights_regularizer = self.weights_regularizer)
else:
h_out = slim.conv2d(slim.dropout(h, scope=self.scope + '-dropout'), num_outputs=self.num_outputs,
kernel_size=self.kernel_size, stride=self.stride, scope=self.scope,
padding=self.padding, normalizer_fn=self.normalizer_fn, weights_initializer=self.weights_initializer, weights_regularizer = self.weights_regularizer)
else:
if self.normalizer_fn==False:
if self.dropout==False:
h_out = slim.conv2d(h, num_outputs=self.num_outputs, kernel_size=self.kernel_size, stride=self.stride, scope=self.scope,
padding=self.padding, activation_fn=self.activation_fn,
weights_initializer=self.weights_initializer, weights_regularizer = self.weights_regularizer)
else:
h_out = slim.conv2d(slim.dropout(h, scope=self.scope+'-dropout'), num_outputs=self.num_outputs, kernel_size=self.kernel_size, stride=self.stride, scope=self.scope,
padding=self.padding,
activation_fn=self.activation_fn,
weights_initializer=self.weights_initializer, weights_regularizer = self.weights_regularizer)
else:
if self.dropout == False:
h_out = slim.conv2d(h, num_outputs=self.num_outputs, kernel_size=self.kernel_size,
stride=self.stride, scope=self.scope, padding=self.padding,
normalizer_fn=self.normalizer_fn, activation_fn=self.activation_fn,
weights_initializer=self.weights_initializer, weights_regularizer = self.weights_regularizer)
else:
h_out = slim.conv2d(slim.dropout(h, scope=self.scope + '-dropout'), num_outputs=self.num_outputs,
kernel_size=self.kernel_size, stride=self.stride, scope=self.scope,
padding=self.padding, normalizer_fn=self.normalizer_fn, activation_fn=self.activation_fn,
weights_initializer=self.weights_initializer, weights_regularizer = self.weights_regularizer)
return h_out
class ProjectionAdaptor(object):
def __init__(self, scope, projection_width, num_outputs, dropout=False):
self.dim_reduction_layer = ConvLayer(num_outputs=projection_width, kernel_size=1, stride=1, padding='SAME',
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY),
dropout=dropout, scope=scope + '/adapter/dim_reduction')
self.output_layer = ConvLayer(num_outputs=num_outputs, kernel_size=1, stride=1, padding='SAME',
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY),
dropout=dropout, scope=scope + '/adapter/output', normalizer_fn=None, activation_fn=None)
def apply(self, h):
reduced_space = self.dim_reduction_layer.apply(h)
return self.output_layer.apply(reduced_space)
def split(input_layer, stride, bottleneck_depth):
'''
The split structure in Figure 3b of the paper. It takes an input tensor. Conv it by [1, 1,
64] filter, and then conv the result by [3, 3, 64]. Return the
final resulted tensor, which is in shape of [batch_size, input_height, input_width, 64]
:param input_layer: 4D tensor in shape of [batch_size, input_height, input_width,
input_channel]
:param stride: int. 1 or 2. If want to shrink the image size, then stride = 2
:return: 4D tensor in shape of [batch_size, input_height, input_width, input_channel/64]
'''
input_depth = input_layer.get_shape().as_list()[-1]
with tf.variable_scope('bneck_%d_1x1_%dd' %(input_depth, bottleneck_depth)):
bneck_1x1 = slim.conv2d(input_layer, num_outputs=bottleneck_depth, kernel_size=1, stride=1,
padding='SAME',
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY))
with tf.variable_scope('bneck_%d_3x3_%dd' %(bottleneck_depth, bottleneck_depth)):
bneck_3x3 = slim.conv2d(bneck_1x1, num_outputs=bottleneck_depth, kernel_size=3, stride=stride,
padding='SAME',
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY))
return bneck_3x3
def bottleneck_b(input_layer, stride, cardinality, bottleneck_depth):
'''
The bottleneck strucutre in Figure 3b. Concatenates all the splits
:param input_layer: 4D tensor in shape of [batch_size, input_height, input_width,
input_channel]
:param stride: int. 1 or 2. If want to shrink the image size, then stride = 2
:return: 4D tensor in shape of [batch_size, output_height, output_width, output_channel]
'''
nInputPlane = input_layer.get_shape().as_list()[-1]
split_list = []
for i in range(cardinality):
with tf.variable_scope('split_%i'%i):
splits = split(input_layer=input_layer, stride=stride, bottleneck_depth=bottleneck_depth)
split_list.append(splits)
# Concatenate splits and check the dimension
concat_bottleneck = tf.concat(values=split_list, axis=3, name='concat_splits')
return concat_bottleneck
class ResNextAdaptor(object):
'''
The block structure in Figure 3b. Takes a 4D tensor as input layer and splits, concatenates
the tensor and restores the depth. Finally adds the identity and ReLu.
'''
def __init__(self, scope, cardinality, output_depth, num_filters, stride, dropout=False):
self.scope = scope
self.dropout = dropout
self.num_filters = num_filters
self.output_depth = output_depth
self.cardinality = cardinality
self.stride = stride
def apply(self, input_layer):
input_depth = input_layer.get_shape().as_list()[-1]
with tf.variable_scope(self.scope):
bottleneck_out = bottleneck_b(input_layer, stride=self.stride, bottleneck_depth=self.num_filters,
cardinality=self.cardinality)
restored = slim.conv2d(bottleneck_out, num_outputs=self.output_depth, kernel_size=1, stride=1,
scope='restore_num_outputs', padding='SAME', activation_fn=None,
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY))
with tf.variable_scope('shortcut'):
if input_depth != self.output_depth:
padded_input = slim.conv2d(input_layer, num_outputs=self.output_depth, kernel_size=1, stride=self.stride,
padding='SAME', activation_fn=None,
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY))
else:
padded_input = input_layer
residual = tf.add(restored, padded_input, name='residual')
return residual
class ResNextBlock(object):
'''
The block structure in Figure 3b. Takes a 4D tensor as input layer and splits, concatenates
the tensor and restores the depth. Finally adds the identity and ReLu.
'''
def __init__(self, scope, cardinality, bottleneck_depth, num_filters, stride, dropout=False):
self.scope = scope
self.dropout = dropout
self.num_filters = num_filters
self.bottleneck_depth = bottleneck_depth
self.cardinality = cardinality
self.stride = stride
def apply(self, input_layer):
input_depth = input_layer.get_shape().as_list()[-1]
# output width 4*self.num_filters as per line 96 in
# https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua, commit 833a384
output_depth = 4*self.num_filters
with tf.variable_scope(self.scope):
bottleneck_out = bottleneck_b(input_layer, stride=self.stride, bottleneck_depth = self.num_filters, cardinality=self.cardinality)
restored = slim.conv2d(bottleneck_out, num_outputs=output_depth, kernel_size=1, stride=1,
scope='restore_num_outputs', padding='SAME', activation_fn=None,
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY))
with tf.variable_scope('shortcut'):
if input_depth != output_depth:
padded_input = slim.conv2d(input_layer, num_outputs=output_depth, kernel_size=1, stride=self.stride,
padding='SAME', activation_fn=None,
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY))
else:
padded_input = input_layer
residual = tf.add(restored, padded_input, name='residual')
# output = tf.nn.relu(residual, name='residual_relu')
output = residual
return output
class ResNextGroup(object):
def __init__(self, scope, num_blocks, num_filters, bottleneck_depth, cardinality, stride, dropout=False):
self.scope = scope
self.dropout = dropout
self.num_filters = num_filters
self.cardinality = cardinality
self.num_blocks = num_blocks
self.bottleneck_depth = bottleneck_depth
self.stride = stride
def apply(self, h):
tensor_stack = [h]
with tf.variable_scope(self.scope):
for i in range(self.num_blocks):
if i == 0:
stride=self.stride
else:
stride=1
h = ResNextBlock(num_filters=self.num_filters, cardinality=self.cardinality, bottleneck_depth = self.bottleneck_depth,
stride=stride, dropout=self.dropout, scope='block%d' % i).apply(tensor_stack[-1])
tensor_stack.append(h)
return tensor_stack[-1]
class AveragePoolLayer(object):
def __init__(self, scope, axis, keep_dims):
self.scope = scope
self.axis=axis
self.keep_dims=keep_dims
def apply(self, h):
with tf.variable_scope(self.scope):
average_pool = tf.reduce_mean(h, axis=self.axis, keep_dims=self.keep_dims)
return average_pool
# The ResNext architecture is based on the following code:
# https://github.com/wenxinxu/ResNeXt-in-tensorflow/blob/master/resNeXt.py
# commit 8a00577495fb01cf98bf77562422390b652e1a4e
# ResNeXt. total layers = 1 + 3n + 3n + 3n +1 = 9n + 2
ArchitectureResNext = [
ConvLayer(num_outputs=64, kernel_size=3, stride=1, scope='input/conv0',
activation_fn=None,
weights_initializer=ScaledVarianceRandomNormal(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY)),
ResNextBlock(num_filters=64, cardinality=2, bottleneck_depth=64, stride=1, dropout=False, scope='bottleneck_group1/block1'),
ResNextBlock(num_filters=64, cardinality=2, bottleneck_depth=64, stride=1, dropout=False, scope='bottleneck_group1/block2'),
ResNextBlock(num_filters=64, cardinality=2, bottleneck_depth=64, stride=1, dropout=False, scope='bottleneck_group1/block3'),
ResNextBlock(num_filters=128, cardinality=2, bottleneck_depth=64, stride=2, dropout=False, scope='bottleneck_group2/block1'),
ResNextBlock(num_filters=128, cardinality=2, bottleneck_depth=64, stride=1, dropout=False, scope='bottleneck_group2/block2'),
ResNextBlock(num_filters=128, cardinality=2, bottleneck_depth=64, stride=1, dropout=False, scope='bottleneck_group2/block3'),
ResNextBlock(num_filters=256, cardinality=2, bottleneck_depth=64, stride=2, dropout=False, scope='bottleneck_group3/block1'),
ResNextBlock(num_filters=256, cardinality=2, bottleneck_depth=64, stride=1, dropout=False, scope='bottleneck_group3/block2'),
ResNextBlock(num_filters=256, cardinality=2, bottleneck_depth=64, stride=1, dropout=False, scope='bottleneck_group3/block3'),
AveragePoolLayer(scope='avg_pool', axis=[1,2], keep_dims=True),
ConvLayer(num_outputs=10, kernel_size=1, stride=1, normalizer_fn=None, activation_fn=None,
weights_initializer=ScaledVarianceUniform(),
weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY), scope='logits/fc'),
]
AdaptorStack = [
None,
ResNextAdaptor(cardinality=2, output_depth=256, num_filters=64/4, stride=1, dropout=False, scope='bottleneck_group1/adaptor1'),
ResNextAdaptor(cardinality=2, output_depth=256, num_filters=64/4, stride=1, dropout=False, scope='bottleneck_group1/adaptor2'),
ResNextAdaptor(cardinality=2, output_depth=256, num_filters=64/4, stride=1, dropout=False, scope='bottleneck_group1/adaptor3'),
ResNextAdaptor(cardinality=2, output_depth=512, num_filters=128/4, stride=2, dropout=False, scope='bottleneck_group2/adaptor1'),
ResNextAdaptor(cardinality=2, output_depth=512, num_filters=128/4, stride=1, dropout=False, scope='bottleneck_group2/adaptor2'),
ResNextAdaptor(cardinality=2, output_depth=512, num_filters=128/4, stride=1, dropout=False, scope='bottleneck_group2/adaptor3'),
ResNextAdaptor(cardinality=2, output_depth=1024, num_filters=256/4, stride=2, dropout=False, scope='bottleneck_group3/adaptor1'),
ResNextAdaptor(cardinality=2, output_depth=1024, num_filters=256/4, stride=1, dropout=False, scope='bottleneck_group3/adaptor2'),
ResNextAdaptor(cardinality=2, output_depth=1024, num_filters=256/4, stride=1, dropout=False, scope='bottleneck_group3/adaptor3'),
None,
ProjectionAdaptor(projection_width=1024/8, num_outputs=10, dropout=False, scope='logits/adaptor')
]
|
flexible
|
{
"blob_id": "9da6bfa614d64956a302abbfeeea30c0339e9db3",
"index": 5583,
"step-1": "<mask token>\n\n\nclass ConvLayer(object):\n <mask token>\n\n def apply(self, h):\n if self.activation_fn == False:\n if self.normalizer_fn == False:\n if self.dropout == False:\n h_out = slim.conv2d(h, num_outputs=self.num_outputs,\n kernel_size=self.kernel_size, stride=self.stride,\n scope=self.scope, padding=self.padding,\n weights_initializer=self.weights_initializer,\n weights_regularizer=self.weights_regularizer)\n else:\n h_out = slim.conv2d(slim.dropout(h, scope=self.scope +\n '-dropout'), num_outputs=self.num_outputs,\n kernel_size=self.kernel_size, stride=self.stride,\n scope=self.scope, padding=self.padding,\n weights_initializer=self.weights_initializer,\n weights_regularizer=self.weights_regularizer)\n elif self.dropout == False:\n h_out = slim.conv2d(h, num_outputs=self.num_outputs,\n kernel_size=self.kernel_size, stride=self.stride, scope\n =self.scope, padding=self.padding, normalizer_fn=self.\n normalizer_fn, weights_initializer=self.\n weights_initializer, weights_regularizer=self.\n weights_regularizer)\n else:\n h_out = slim.conv2d(slim.dropout(h, scope=self.scope +\n '-dropout'), num_outputs=self.num_outputs, kernel_size=\n self.kernel_size, stride=self.stride, scope=self.scope,\n padding=self.padding, normalizer_fn=self.normalizer_fn,\n weights_initializer=self.weights_initializer,\n weights_regularizer=self.weights_regularizer)\n elif self.normalizer_fn == False:\n if self.dropout == False:\n h_out = slim.conv2d(h, num_outputs=self.num_outputs,\n kernel_size=self.kernel_size, stride=self.stride, scope\n =self.scope, padding=self.padding, activation_fn=self.\n activation_fn, weights_initializer=self.\n weights_initializer, weights_regularizer=self.\n weights_regularizer)\n else:\n h_out = slim.conv2d(slim.dropout(h, scope=self.scope +\n '-dropout'), num_outputs=self.num_outputs, kernel_size=\n self.kernel_size, stride=self.stride, scope=self.scope,\n padding=self.padding, activation_fn=self.activation_fn,\n weights_initializer=self.weights_initializer,\n weights_regularizer=self.weights_regularizer)\n elif self.dropout == False:\n h_out = slim.conv2d(h, num_outputs=self.num_outputs,\n kernel_size=self.kernel_size, stride=self.stride, scope=\n self.scope, padding=self.padding, normalizer_fn=self.\n normalizer_fn, activation_fn=self.activation_fn,\n weights_initializer=self.weights_initializer,\n weights_regularizer=self.weights_regularizer)\n else:\n h_out = slim.conv2d(slim.dropout(h, scope=self.scope +\n '-dropout'), num_outputs=self.num_outputs, kernel_size=self\n .kernel_size, stride=self.stride, scope=self.scope, padding\n =self.padding, normalizer_fn=self.normalizer_fn,\n activation_fn=self.activation_fn, weights_initializer=self.\n weights_initializer, weights_regularizer=self.\n weights_regularizer)\n return h_out\n\n\nclass ProjectionAdaptor(object):\n\n def __init__(self, scope, projection_width, num_outputs, dropout=False):\n self.dim_reduction_layer = ConvLayer(num_outputs=projection_width,\n kernel_size=1, stride=1, padding='SAME', weights_initializer=\n ScaledVarianceRandomNormal(), weights_regularizer=tf.contrib.\n layers.l2_regularizer(scale=WEIGHT_DECAY), dropout=dropout,\n scope=scope + '/adapter/dim_reduction')\n self.output_layer = ConvLayer(num_outputs=num_outputs, kernel_size=\n 1, stride=1, padding='SAME', weights_initializer=\n ScaledVarianceRandomNormal(), weights_regularizer=tf.contrib.\n layers.l2_regularizer(scale=WEIGHT_DECAY), dropout=dropout,\n scope=scope + '/adapter/output', normalizer_fn=None,\n activation_fn=None)\n\n def apply(self, h):\n reduced_space = self.dim_reduction_layer.apply(h)\n return self.output_layer.apply(reduced_space)\n\n\n<mask token>\n\n\nclass ResNextAdaptor(object):\n \"\"\"\n The block structure in Figure 3b. Takes a 4D tensor as input layer and splits, concatenates\n the tensor and restores the depth. Finally adds the identity and ReLu.\n \"\"\"\n\n def __init__(self, scope, cardinality, output_depth, num_filters,\n stride, dropout=False):\n self.scope = scope\n self.dropout = dropout\n self.num_filters = num_filters\n self.output_depth = output_depth\n self.cardinality = cardinality\n self.stride = stride\n\n def apply(self, input_layer):\n input_depth = input_layer.get_shape().as_list()[-1]\n with tf.variable_scope(self.scope):\n bottleneck_out = bottleneck_b(input_layer, stride=self.stride,\n bottleneck_depth=self.num_filters, cardinality=self.cardinality\n )\n restored = slim.conv2d(bottleneck_out, num_outputs=self.\n output_depth, kernel_size=1, stride=1, scope=\n 'restore_num_outputs', padding='SAME', activation_fn=None,\n weights_initializer=ScaledVarianceRandomNormal(),\n weights_regularizer=tf.contrib.layers.l2_regularizer(scale=\n WEIGHT_DECAY))\n with tf.variable_scope('shortcut'):\n if input_depth != self.output_depth:\n padded_input = slim.conv2d(input_layer, num_outputs=\n self.output_depth, kernel_size=1, stride=self.\n stride, padding='SAME', activation_fn=None,\n weights_initializer=ScaledVarianceRandomNormal(),\n weights_regularizer=tf.contrib.layers.\n l2_regularizer(scale=WEIGHT_DECAY))\n else:\n padded_input = input_layer\n residual = tf.add(restored, padded_input, name='residual')\n return residual\n\n\nclass ResNextBlock(object):\n \"\"\"\n The block structure in Figure 3b. Takes a 4D tensor as input layer and splits, concatenates\n the tensor and restores the depth. Finally adds the identity and ReLu.\n \"\"\"\n\n def __init__(self, scope, cardinality, bottleneck_depth, num_filters,\n stride, dropout=False):\n self.scope = scope\n self.dropout = dropout\n self.num_filters = num_filters\n self.bottleneck_depth = bottleneck_depth\n self.cardinality = cardinality\n self.stride = stride\n\n def apply(self, input_layer):\n input_depth = input_layer.get_shape().as_list()[-1]\n output_depth = 4 * self.num_filters\n with tf.variable_scope(self.scope):\n bottleneck_out = bottleneck_b(input_layer, stride=self.stride,\n bottleneck_depth=self.num_filters, cardinality=self.cardinality\n )\n restored = slim.conv2d(bottleneck_out, num_outputs=output_depth,\n kernel_size=1, stride=1, scope='restore_num_outputs',\n padding='SAME', activation_fn=None, weights_initializer=\n ScaledVarianceRandomNormal(), weights_regularizer=tf.\n contrib.layers.l2_regularizer(scale=WEIGHT_DECAY))\n with tf.variable_scope('shortcut'):\n if input_depth != output_depth:\n padded_input = slim.conv2d(input_layer, num_outputs=\n output_depth, kernel_size=1, stride=self.stride,\n padding='SAME', activation_fn=None,\n weights_initializer=ScaledVarianceRandomNormal(),\n weights_regularizer=tf.contrib.layers.\n l2_regularizer(scale=WEIGHT_DECAY))\n else:\n padded_input = input_layer\n residual = tf.add(restored, padded_input, name='residual')\n output = residual\n return output\n\n\nclass ResNextGroup(object):\n\n def __init__(self, scope, num_blocks, num_filters, bottleneck_depth,\n cardinality, stride, dropout=False):\n self.scope = scope\n self.dropout = dropout\n self.num_filters = num_filters\n self.cardinality = cardinality\n self.num_blocks = num_blocks\n self.bottleneck_depth = bottleneck_depth\n self.stride = stride\n\n def apply(self, h):\n tensor_stack = [h]\n with tf.variable_scope(self.scope):\n for i in range(self.num_blocks):\n if i == 0:\n stride = self.stride\n else:\n stride = 1\n h = ResNextBlock(num_filters=self.num_filters, cardinality=\n self.cardinality, bottleneck_depth=self.\n bottleneck_depth, stride=stride, dropout=self.dropout,\n scope='block%d' % i).apply(tensor_stack[-1])\n tensor_stack.append(h)\n return tensor_stack[-1]\n\n\nclass AveragePoolLayer(object):\n\n def __init__(self, scope, axis, keep_dims):\n self.scope = scope\n self.axis = axis\n self.keep_dims = keep_dims\n\n def apply(self, h):\n with tf.variable_scope(self.scope):\n average_pool = tf.reduce_mean(h, axis=self.axis, keep_dims=self\n .keep_dims)\n return average_pool\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ScaledVarianceRandomNormal(init_ops.Initializer):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass ConvLayer(object):\n\n def __init__(self, scope, num_outputs, kernel_size, padding='SAME',\n dropout=False, stride=1, normalizer_fn=False, activation_fn=False,\n weights_initializer=tf.contrib.layers.xavier_initializer(),\n weights_regularizer=None):\n self.scope = scope\n self.dropout = dropout\n self.padding = padding\n self.num_outputs = num_outputs\n self.kernel_size = kernel_size\n self.stride = stride\n self.normalizer_fn = normalizer_fn\n self.activation_fn = activation_fn\n self.weights_initializer = weights_initializer\n self.weights_regularizer = weights_regularizer\n\n def apply(self, h):\n if self.activation_fn == False:\n if self.normalizer_fn == False:\n if self.dropout == False:\n h_out = slim.conv2d(h, num_outputs=self.num_outputs,\n kernel_size=self.kernel_size, stride=self.stride,\n scope=self.scope, padding=self.padding,\n weights_initializer=self.weights_initializer,\n weights_regularizer=self.weights_regularizer)\n else:\n h_out = slim.conv2d(slim.dropout(h, scope=self.scope +\n '-dropout'), num_outputs=self.num_outputs,\n kernel_size=self.kernel_size, stride=self.stride,\n scope=self.scope, padding=self.padding,\n weights_initializer=self.weights_initializer,\n weights_regularizer=self.weights_regularizer)\n elif self.dropout == False:\n h_out = slim.conv2d(h, num_outputs=self.num_outputs,\n kernel_size=self.kernel_size, stride=self.stride, scope\n =self.scope, padding=self.padding, normalizer_fn=self.\n normalizer_fn, weights_initializer=self.\n weights_initializer, weights_regularizer=self.\n weights_regularizer)\n else:\n h_out = slim.conv2d(slim.dropout(h, scope=self.scope +\n '-dropout'), num_outputs=self.num_outputs, kernel_size=\n self.kernel_size, stride=self.stride, scope=self.scope,\n padding=self.padding, normalizer_fn=self.normalizer_fn,\n weights_initializer=self.weights_initializer,\n weights_regularizer=self.weights_regularizer)\n elif self.normalizer_fn == False:\n if self.dropout == False:\n h_out = slim.conv2d(h, num_outputs=self.num_outputs,\n kernel_size=self.kernel_size, stride=self.stride, scope\n =self.scope, padding=self.padding, activation_fn=self.\n activation_fn, weights_initializer=self.\n weights_initializer, weights_regularizer=self.\n weights_regularizer)\n else:\n h_out = slim.conv2d(slim.dropout(h, scope=self.scope +\n '-dropout'), num_outputs=self.num_outputs, kernel_size=\n self.kernel_size, stride=self.stride, scope=self.scope,\n padding=self.padding, activation_fn=self.activation_fn,\n weights_initializer=self.weights_initializer,\n weights_regularizer=self.weights_regularizer)\n elif self.dropout == False:\n h_out = slim.conv2d(h, num_outputs=self.num_outputs,\n kernel_size=self.kernel_size, stride=self.stride, scope=\n self.scope, padding=self.padding, normalizer_fn=self.\n normalizer_fn, activation_fn=self.activation_fn,\n weights_initializer=self.weights_initializer,\n weights_regularizer=self.weights_regularizer)\n else:\n h_out = slim.conv2d(slim.dropout(h, scope=self.scope +\n '-dropout'), num_outputs=self.num_outputs, kernel_size=self\n .kernel_size, stride=self.stride, scope=self.scope, padding\n =self.padding, normalizer_fn=self.normalizer_fn,\n activation_fn=self.activation_fn, weights_initializer=self.\n weights_initializer, weights_regularizer=self.\n weights_regularizer)\n return h_out\n\n\nclass ProjectionAdaptor(object):\n\n def __init__(self, scope, projection_width, num_outputs, dropout=False):\n self.dim_reduction_layer = ConvLayer(num_outputs=projection_width,\n kernel_size=1, stride=1, padding='SAME', weights_initializer=\n ScaledVarianceRandomNormal(), weights_regularizer=tf.contrib.\n layers.l2_regularizer(scale=WEIGHT_DECAY), dropout=dropout,\n scope=scope + '/adapter/dim_reduction')\n self.output_layer = ConvLayer(num_outputs=num_outputs, kernel_size=\n 1, stride=1, padding='SAME', weights_initializer=\n ScaledVarianceRandomNormal(), weights_regularizer=tf.contrib.\n layers.l2_regularizer(scale=WEIGHT_DECAY), dropout=dropout,\n scope=scope + '/adapter/output', normalizer_fn=None,\n activation_fn=None)\n\n def apply(self, h):\n reduced_space = self.dim_reduction_layer.apply(h)\n return self.output_layer.apply(reduced_space)\n\n\n<mask token>\n\n\nclass ResNextAdaptor(object):\n \"\"\"\n The block structure in Figure 3b. Takes a 4D tensor as input layer and splits, concatenates\n the tensor and restores the depth. Finally adds the identity and ReLu.\n \"\"\"\n\n def __init__(self, scope, cardinality, output_depth, num_filters,\n stride, dropout=False):\n self.scope = scope\n self.dropout = dropout\n self.num_filters = num_filters\n self.output_depth = output_depth\n self.cardinality = cardinality\n self.stride = stride\n\n def apply(self, input_layer):\n input_depth = input_layer.get_shape().as_list()[-1]\n with tf.variable_scope(self.scope):\n bottleneck_out = bottleneck_b(input_layer, stride=self.stride,\n bottleneck_depth=self.num_filters, cardinality=self.cardinality\n )\n restored = slim.conv2d(bottleneck_out, num_outputs=self.\n output_depth, kernel_size=1, stride=1, scope=\n 'restore_num_outputs', padding='SAME', activation_fn=None,\n weights_initializer=ScaledVarianceRandomNormal(),\n weights_regularizer=tf.contrib.layers.l2_regularizer(scale=\n WEIGHT_DECAY))\n with tf.variable_scope('shortcut'):\n if input_depth != self.output_depth:\n padded_input = slim.conv2d(input_layer, num_outputs=\n self.output_depth, kernel_size=1, stride=self.\n stride, padding='SAME', activation_fn=None,\n weights_initializer=ScaledVarianceRandomNormal(),\n weights_regularizer=tf.contrib.layers.\n l2_regularizer(scale=WEIGHT_DECAY))\n else:\n padded_input = input_layer\n residual = tf.add(restored, padded_input, name='residual')\n return residual\n\n\nclass ResNextBlock(object):\n \"\"\"\n The block structure in Figure 3b. Takes a 4D tensor as input layer and splits, concatenates\n the tensor and restores the depth. Finally adds the identity and ReLu.\n \"\"\"\n\n def __init__(self, scope, cardinality, bottleneck_depth, num_filters,\n stride, dropout=False):\n self.scope = scope\n self.dropout = dropout\n self.num_filters = num_filters\n self.bottleneck_depth = bottleneck_depth\n self.cardinality = cardinality\n self.stride = stride\n\n def apply(self, input_layer):\n input_depth = input_layer.get_shape().as_list()[-1]\n output_depth = 4 * self.num_filters\n with tf.variable_scope(self.scope):\n bottleneck_out = bottleneck_b(input_layer, stride=self.stride,\n bottleneck_depth=self.num_filters, cardinality=self.cardinality\n )\n restored = slim.conv2d(bottleneck_out, num_outputs=output_depth,\n kernel_size=1, stride=1, scope='restore_num_outputs',\n padding='SAME', activation_fn=None, weights_initializer=\n ScaledVarianceRandomNormal(), weights_regularizer=tf.\n contrib.layers.l2_regularizer(scale=WEIGHT_DECAY))\n with tf.variable_scope('shortcut'):\n if input_depth != output_depth:\n padded_input = slim.conv2d(input_layer, num_outputs=\n output_depth, kernel_size=1, stride=self.stride,\n padding='SAME', activation_fn=None,\n weights_initializer=ScaledVarianceRandomNormal(),\n weights_regularizer=tf.contrib.layers.\n l2_regularizer(scale=WEIGHT_DECAY))\n else:\n padded_input = input_layer\n residual = tf.add(restored, padded_input, name='residual')\n output = residual\n return output\n\n\nclass ResNextGroup(object):\n\n def __init__(self, scope, num_blocks, num_filters, bottleneck_depth,\n cardinality, stride, dropout=False):\n self.scope = scope\n self.dropout = dropout\n self.num_filters = num_filters\n self.cardinality = cardinality\n self.num_blocks = num_blocks\n self.bottleneck_depth = bottleneck_depth\n self.stride = stride\n\n def apply(self, h):\n tensor_stack = [h]\n with tf.variable_scope(self.scope):\n for i in range(self.num_blocks):\n if i == 0:\n stride = self.stride\n else:\n stride = 1\n h = ResNextBlock(num_filters=self.num_filters, cardinality=\n self.cardinality, bottleneck_depth=self.\n bottleneck_depth, stride=stride, dropout=self.dropout,\n scope='block%d' % i).apply(tensor_stack[-1])\n tensor_stack.append(h)\n return tensor_stack[-1]\n\n\nclass AveragePoolLayer(object):\n\n def __init__(self, scope, axis, keep_dims):\n self.scope = scope\n self.axis = axis\n self.keep_dims = keep_dims\n\n def apply(self, h):\n with tf.variable_scope(self.scope):\n average_pool = tf.reduce_mean(h, axis=self.axis, keep_dims=self\n .keep_dims)\n return average_pool\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ScaledVarianceRandomNormal(init_ops.Initializer):\n <mask token>\n <mask token>\n\n def __call__(self, shape, dtype=None, partition_info=None):\n if dtype is None:\n dtype = self.dtype\n if shape:\n n = float(shape[-1])\n else:\n n = 1.0\n for dim in shape[:-2]:\n n *= float(dim)\n self.stddev = np.sqrt(self.factor * 2.0 / n)\n return random_ops.random_normal(shape, self.mean, self.stddev,\n dtype, seed=self.seed)\n\n def get_config(self):\n return {'mean': self.mean, 'stddev': self.stddev, 'seed': self.seed,\n 'dtype': self.dtype.name}\n\n\nclass ConvLayer(object):\n\n def __init__(self, scope, num_outputs, kernel_size, padding='SAME',\n dropout=False, stride=1, normalizer_fn=False, activation_fn=False,\n weights_initializer=tf.contrib.layers.xavier_initializer(),\n weights_regularizer=None):\n self.scope = scope\n self.dropout = dropout\n self.padding = padding\n self.num_outputs = num_outputs\n self.kernel_size = kernel_size\n self.stride = stride\n self.normalizer_fn = normalizer_fn\n self.activation_fn = activation_fn\n self.weights_initializer = weights_initializer\n self.weights_regularizer = weights_regularizer\n\n def apply(self, h):\n if self.activation_fn == False:\n if self.normalizer_fn == False:\n if self.dropout == False:\n h_out = slim.conv2d(h, num_outputs=self.num_outputs,\n kernel_size=self.kernel_size, stride=self.stride,\n scope=self.scope, padding=self.padding,\n weights_initializer=self.weights_initializer,\n weights_regularizer=self.weights_regularizer)\n else:\n h_out = slim.conv2d(slim.dropout(h, scope=self.scope +\n '-dropout'), num_outputs=self.num_outputs,\n kernel_size=self.kernel_size, stride=self.stride,\n scope=self.scope, padding=self.padding,\n weights_initializer=self.weights_initializer,\n weights_regularizer=self.weights_regularizer)\n elif self.dropout == False:\n h_out = slim.conv2d(h, num_outputs=self.num_outputs,\n kernel_size=self.kernel_size, stride=self.stride, scope\n =self.scope, padding=self.padding, normalizer_fn=self.\n normalizer_fn, weights_initializer=self.\n weights_initializer, weights_regularizer=self.\n weights_regularizer)\n else:\n h_out = slim.conv2d(slim.dropout(h, scope=self.scope +\n '-dropout'), num_outputs=self.num_outputs, kernel_size=\n self.kernel_size, stride=self.stride, scope=self.scope,\n padding=self.padding, normalizer_fn=self.normalizer_fn,\n weights_initializer=self.weights_initializer,\n weights_regularizer=self.weights_regularizer)\n elif self.normalizer_fn == False:\n if self.dropout == False:\n h_out = slim.conv2d(h, num_outputs=self.num_outputs,\n kernel_size=self.kernel_size, stride=self.stride, scope\n =self.scope, padding=self.padding, activation_fn=self.\n activation_fn, weights_initializer=self.\n weights_initializer, weights_regularizer=self.\n weights_regularizer)\n else:\n h_out = slim.conv2d(slim.dropout(h, scope=self.scope +\n '-dropout'), num_outputs=self.num_outputs, kernel_size=\n self.kernel_size, stride=self.stride, scope=self.scope,\n padding=self.padding, activation_fn=self.activation_fn,\n weights_initializer=self.weights_initializer,\n weights_regularizer=self.weights_regularizer)\n elif self.dropout == False:\n h_out = slim.conv2d(h, num_outputs=self.num_outputs,\n kernel_size=self.kernel_size, stride=self.stride, scope=\n self.scope, padding=self.padding, normalizer_fn=self.\n normalizer_fn, activation_fn=self.activation_fn,\n weights_initializer=self.weights_initializer,\n weights_regularizer=self.weights_regularizer)\n else:\n h_out = slim.conv2d(slim.dropout(h, scope=self.scope +\n '-dropout'), num_outputs=self.num_outputs, kernel_size=self\n .kernel_size, stride=self.stride, scope=self.scope, padding\n =self.padding, normalizer_fn=self.normalizer_fn,\n activation_fn=self.activation_fn, weights_initializer=self.\n weights_initializer, weights_regularizer=self.\n weights_regularizer)\n return h_out\n\n\nclass ProjectionAdaptor(object):\n\n def __init__(self, scope, projection_width, num_outputs, dropout=False):\n self.dim_reduction_layer = ConvLayer(num_outputs=projection_width,\n kernel_size=1, stride=1, padding='SAME', weights_initializer=\n ScaledVarianceRandomNormal(), weights_regularizer=tf.contrib.\n layers.l2_regularizer(scale=WEIGHT_DECAY), dropout=dropout,\n scope=scope + '/adapter/dim_reduction')\n self.output_layer = ConvLayer(num_outputs=num_outputs, kernel_size=\n 1, stride=1, padding='SAME', weights_initializer=\n ScaledVarianceRandomNormal(), weights_regularizer=tf.contrib.\n layers.l2_regularizer(scale=WEIGHT_DECAY), dropout=dropout,\n scope=scope + '/adapter/output', normalizer_fn=None,\n activation_fn=None)\n\n def apply(self, h):\n reduced_space = self.dim_reduction_layer.apply(h)\n return self.output_layer.apply(reduced_space)\n\n\n<mask token>\n\n\nclass ResNextAdaptor(object):\n \"\"\"\n The block structure in Figure 3b. Takes a 4D tensor as input layer and splits, concatenates\n the tensor and restores the depth. Finally adds the identity and ReLu.\n \"\"\"\n\n def __init__(self, scope, cardinality, output_depth, num_filters,\n stride, dropout=False):\n self.scope = scope\n self.dropout = dropout\n self.num_filters = num_filters\n self.output_depth = output_depth\n self.cardinality = cardinality\n self.stride = stride\n\n def apply(self, input_layer):\n input_depth = input_layer.get_shape().as_list()[-1]\n with tf.variable_scope(self.scope):\n bottleneck_out = bottleneck_b(input_layer, stride=self.stride,\n bottleneck_depth=self.num_filters, cardinality=self.cardinality\n )\n restored = slim.conv2d(bottleneck_out, num_outputs=self.\n output_depth, kernel_size=1, stride=1, scope=\n 'restore_num_outputs', padding='SAME', activation_fn=None,\n weights_initializer=ScaledVarianceRandomNormal(),\n weights_regularizer=tf.contrib.layers.l2_regularizer(scale=\n WEIGHT_DECAY))\n with tf.variable_scope('shortcut'):\n if input_depth != self.output_depth:\n padded_input = slim.conv2d(input_layer, num_outputs=\n self.output_depth, kernel_size=1, stride=self.\n stride, padding='SAME', activation_fn=None,\n weights_initializer=ScaledVarianceRandomNormal(),\n weights_regularizer=tf.contrib.layers.\n l2_regularizer(scale=WEIGHT_DECAY))\n else:\n padded_input = input_layer\n residual = tf.add(restored, padded_input, name='residual')\n return residual\n\n\nclass ResNextBlock(object):\n \"\"\"\n The block structure in Figure 3b. Takes a 4D tensor as input layer and splits, concatenates\n the tensor and restores the depth. Finally adds the identity and ReLu.\n \"\"\"\n\n def __init__(self, scope, cardinality, bottleneck_depth, num_filters,\n stride, dropout=False):\n self.scope = scope\n self.dropout = dropout\n self.num_filters = num_filters\n self.bottleneck_depth = bottleneck_depth\n self.cardinality = cardinality\n self.stride = stride\n\n def apply(self, input_layer):\n input_depth = input_layer.get_shape().as_list()[-1]\n output_depth = 4 * self.num_filters\n with tf.variable_scope(self.scope):\n bottleneck_out = bottleneck_b(input_layer, stride=self.stride,\n bottleneck_depth=self.num_filters, cardinality=self.cardinality\n )\n restored = slim.conv2d(bottleneck_out, num_outputs=output_depth,\n kernel_size=1, stride=1, scope='restore_num_outputs',\n padding='SAME', activation_fn=None, weights_initializer=\n ScaledVarianceRandomNormal(), weights_regularizer=tf.\n contrib.layers.l2_regularizer(scale=WEIGHT_DECAY))\n with tf.variable_scope('shortcut'):\n if input_depth != output_depth:\n padded_input = slim.conv2d(input_layer, num_outputs=\n output_depth, kernel_size=1, stride=self.stride,\n padding='SAME', activation_fn=None,\n weights_initializer=ScaledVarianceRandomNormal(),\n weights_regularizer=tf.contrib.layers.\n l2_regularizer(scale=WEIGHT_DECAY))\n else:\n padded_input = input_layer\n residual = tf.add(restored, padded_input, name='residual')\n output = residual\n return output\n\n\nclass ResNextGroup(object):\n\n def __init__(self, scope, num_blocks, num_filters, bottleneck_depth,\n cardinality, stride, dropout=False):\n self.scope = scope\n self.dropout = dropout\n self.num_filters = num_filters\n self.cardinality = cardinality\n self.num_blocks = num_blocks\n self.bottleneck_depth = bottleneck_depth\n self.stride = stride\n\n def apply(self, h):\n tensor_stack = [h]\n with tf.variable_scope(self.scope):\n for i in range(self.num_blocks):\n if i == 0:\n stride = self.stride\n else:\n stride = 1\n h = ResNextBlock(num_filters=self.num_filters, cardinality=\n self.cardinality, bottleneck_depth=self.\n bottleneck_depth, stride=stride, dropout=self.dropout,\n scope='block%d' % i).apply(tensor_stack[-1])\n tensor_stack.append(h)\n return tensor_stack[-1]\n\n\nclass AveragePoolLayer(object):\n\n def __init__(self, scope, axis, keep_dims):\n self.scope = scope\n self.axis = axis\n self.keep_dims = keep_dims\n\n def apply(self, h):\n with tf.variable_scope(self.scope):\n average_pool = tf.reduce_mean(h, axis=self.axis, keep_dims=self\n .keep_dims)\n return average_pool\n\n\n<mask token>\n",
"step-4": "<mask token>\nWEIGHT_DECAY = 0.0005\n\n\nclass ScaledVarianceUniform(init_ops.Initializer):\n \"\"\"Initializer that generates tensors with a Uniform distribution scaled as per https://github.com/torch/nn/blob/master/Linear.lua\n\n Args:\n mean: a python scalar or a scalar tensor. Mean of the random values\n to generate.\n stddev: a python scalar or a scalar tensor. Standard deviation of the\n random values to generate.\n seed: A Python integer. Used to create random seeds. See\n @{tf.set_random_seed}\n for behavior.\n dtype: The data type. Only floating point types are supported.\n \"\"\"\n\n def __init__(self, factor=1.0, seed=None, dtype=dtypes.float32):\n self.factor = factor\n self.seed = seed\n self.dtype = dtypes.as_dtype(dtype)\n\n def __call__(self, shape, dtype=None, partition_info=None):\n if dtype is None:\n dtype = self.dtype\n if shape:\n n = float(shape[-1])\n else:\n n = 1.0\n self.stddev = np.sqrt(self.factor * 3.0 / n)\n return random_ops.random_uniform(shape, minval=-self.stddev, maxval\n =self.stddev, dtype=dtype, seed=self.seed)\n\n def get_config(self):\n return {'mean': self.mean, 'stddev': self.stddev, 'seed': self.seed,\n 'dtype': self.dtype.name}\n\n\nclass ScaledVarianceRandomNormal(init_ops.Initializer):\n \"\"\"Initializer that generates tensors with a normal distribution scaled as per https://arxiv.org/pdf/1502.01852.pdf.\n\n Args:\n mean: a python scalar or a scalar tensor. Mean of the random values\n to generate.\n stddev: a python scalar or a scalar tensor. Standard deviation of the\n random values to generate.\n seed: A Python integer. Used to create random seeds. See\n @{tf.set_random_seed}\n for behavior.\n dtype: The data type. Only floating point types are supported.\n \"\"\"\n\n def __init__(self, mean=0.0, factor=1.0, seed=None, dtype=dtypes.float32):\n self.mean = mean\n self.factor = factor\n self.seed = seed\n self.dtype = dtypes.as_dtype(dtype)\n\n def __call__(self, shape, dtype=None, partition_info=None):\n if dtype is None:\n dtype = self.dtype\n if shape:\n n = float(shape[-1])\n else:\n n = 1.0\n for dim in shape[:-2]:\n n *= float(dim)\n self.stddev = np.sqrt(self.factor * 2.0 / n)\n return random_ops.random_normal(shape, self.mean, self.stddev,\n dtype, seed=self.seed)\n\n def get_config(self):\n return {'mean': self.mean, 'stddev': self.stddev, 'seed': self.seed,\n 'dtype': self.dtype.name}\n\n\nclass ConvLayer(object):\n\n def __init__(self, scope, num_outputs, kernel_size, padding='SAME',\n dropout=False, stride=1, normalizer_fn=False, activation_fn=False,\n weights_initializer=tf.contrib.layers.xavier_initializer(),\n weights_regularizer=None):\n self.scope = scope\n self.dropout = dropout\n self.padding = padding\n self.num_outputs = num_outputs\n self.kernel_size = kernel_size\n self.stride = stride\n self.normalizer_fn = normalizer_fn\n self.activation_fn = activation_fn\n self.weights_initializer = weights_initializer\n self.weights_regularizer = weights_regularizer\n\n def apply(self, h):\n if self.activation_fn == False:\n if self.normalizer_fn == False:\n if self.dropout == False:\n h_out = slim.conv2d(h, num_outputs=self.num_outputs,\n kernel_size=self.kernel_size, stride=self.stride,\n scope=self.scope, padding=self.padding,\n weights_initializer=self.weights_initializer,\n weights_regularizer=self.weights_regularizer)\n else:\n h_out = slim.conv2d(slim.dropout(h, scope=self.scope +\n '-dropout'), num_outputs=self.num_outputs,\n kernel_size=self.kernel_size, stride=self.stride,\n scope=self.scope, padding=self.padding,\n weights_initializer=self.weights_initializer,\n weights_regularizer=self.weights_regularizer)\n elif self.dropout == False:\n h_out = slim.conv2d(h, num_outputs=self.num_outputs,\n kernel_size=self.kernel_size, stride=self.stride, scope\n =self.scope, padding=self.padding, normalizer_fn=self.\n normalizer_fn, weights_initializer=self.\n weights_initializer, weights_regularizer=self.\n weights_regularizer)\n else:\n h_out = slim.conv2d(slim.dropout(h, scope=self.scope +\n '-dropout'), num_outputs=self.num_outputs, kernel_size=\n self.kernel_size, stride=self.stride, scope=self.scope,\n padding=self.padding, normalizer_fn=self.normalizer_fn,\n weights_initializer=self.weights_initializer,\n weights_regularizer=self.weights_regularizer)\n elif self.normalizer_fn == False:\n if self.dropout == False:\n h_out = slim.conv2d(h, num_outputs=self.num_outputs,\n kernel_size=self.kernel_size, stride=self.stride, scope\n =self.scope, padding=self.padding, activation_fn=self.\n activation_fn, weights_initializer=self.\n weights_initializer, weights_regularizer=self.\n weights_regularizer)\n else:\n h_out = slim.conv2d(slim.dropout(h, scope=self.scope +\n '-dropout'), num_outputs=self.num_outputs, kernel_size=\n self.kernel_size, stride=self.stride, scope=self.scope,\n padding=self.padding, activation_fn=self.activation_fn,\n weights_initializer=self.weights_initializer,\n weights_regularizer=self.weights_regularizer)\n elif self.dropout == False:\n h_out = slim.conv2d(h, num_outputs=self.num_outputs,\n kernel_size=self.kernel_size, stride=self.stride, scope=\n self.scope, padding=self.padding, normalizer_fn=self.\n normalizer_fn, activation_fn=self.activation_fn,\n weights_initializer=self.weights_initializer,\n weights_regularizer=self.weights_regularizer)\n else:\n h_out = slim.conv2d(slim.dropout(h, scope=self.scope +\n '-dropout'), num_outputs=self.num_outputs, kernel_size=self\n .kernel_size, stride=self.stride, scope=self.scope, padding\n =self.padding, normalizer_fn=self.normalizer_fn,\n activation_fn=self.activation_fn, weights_initializer=self.\n weights_initializer, weights_regularizer=self.\n weights_regularizer)\n return h_out\n\n\nclass ProjectionAdaptor(object):\n\n def __init__(self, scope, projection_width, num_outputs, dropout=False):\n self.dim_reduction_layer = ConvLayer(num_outputs=projection_width,\n kernel_size=1, stride=1, padding='SAME', weights_initializer=\n ScaledVarianceRandomNormal(), weights_regularizer=tf.contrib.\n layers.l2_regularizer(scale=WEIGHT_DECAY), dropout=dropout,\n scope=scope + '/adapter/dim_reduction')\n self.output_layer = ConvLayer(num_outputs=num_outputs, kernel_size=\n 1, stride=1, padding='SAME', weights_initializer=\n ScaledVarianceRandomNormal(), weights_regularizer=tf.contrib.\n layers.l2_regularizer(scale=WEIGHT_DECAY), dropout=dropout,\n scope=scope + '/adapter/output', normalizer_fn=None,\n activation_fn=None)\n\n def apply(self, h):\n reduced_space = self.dim_reduction_layer.apply(h)\n return self.output_layer.apply(reduced_space)\n\n\ndef split(input_layer, stride, bottleneck_depth):\n \"\"\"\n The split structure in Figure 3b of the paper. It takes an input tensor. Conv it by [1, 1,\n 64] filter, and then conv the result by [3, 3, 64]. Return the\n final resulted tensor, which is in shape of [batch_size, input_height, input_width, 64]\n\n :param input_layer: 4D tensor in shape of [batch_size, input_height, input_width,\n input_channel]\n :param stride: int. 1 or 2. If want to shrink the image size, then stride = 2\n :return: 4D tensor in shape of [batch_size, input_height, input_width, input_channel/64]\n \"\"\"\n input_depth = input_layer.get_shape().as_list()[-1]\n with tf.variable_scope('bneck_%d_1x1_%dd' % (input_depth, bottleneck_depth)\n ):\n bneck_1x1 = slim.conv2d(input_layer, num_outputs=bottleneck_depth,\n kernel_size=1, stride=1, padding='SAME', weights_initializer=\n ScaledVarianceRandomNormal(), weights_regularizer=tf.contrib.\n layers.l2_regularizer(scale=WEIGHT_DECAY))\n with tf.variable_scope('bneck_%d_3x3_%dd' % (bottleneck_depth,\n bottleneck_depth)):\n bneck_3x3 = slim.conv2d(bneck_1x1, num_outputs=bottleneck_depth,\n kernel_size=3, stride=stride, padding='SAME',\n weights_initializer=ScaledVarianceRandomNormal(),\n weights_regularizer=tf.contrib.layers.l2_regularizer(scale=\n WEIGHT_DECAY))\n return bneck_3x3\n\n\ndef bottleneck_b(input_layer, stride, cardinality, bottleneck_depth):\n \"\"\"\n The bottleneck strucutre in Figure 3b. Concatenates all the splits\n :param input_layer: 4D tensor in shape of [batch_size, input_height, input_width,\n input_channel]\n :param stride: int. 1 or 2. If want to shrink the image size, then stride = 2\n :return: 4D tensor in shape of [batch_size, output_height, output_width, output_channel]\n \"\"\"\n nInputPlane = input_layer.get_shape().as_list()[-1]\n split_list = []\n for i in range(cardinality):\n with tf.variable_scope('split_%i' % i):\n splits = split(input_layer=input_layer, stride=stride,\n bottleneck_depth=bottleneck_depth)\n split_list.append(splits)\n concat_bottleneck = tf.concat(values=split_list, axis=3, name=\n 'concat_splits')\n return concat_bottleneck\n\n\nclass ResNextAdaptor(object):\n \"\"\"\n The block structure in Figure 3b. Takes a 4D tensor as input layer and splits, concatenates\n the tensor and restores the depth. Finally adds the identity and ReLu.\n \"\"\"\n\n def __init__(self, scope, cardinality, output_depth, num_filters,\n stride, dropout=False):\n self.scope = scope\n self.dropout = dropout\n self.num_filters = num_filters\n self.output_depth = output_depth\n self.cardinality = cardinality\n self.stride = stride\n\n def apply(self, input_layer):\n input_depth = input_layer.get_shape().as_list()[-1]\n with tf.variable_scope(self.scope):\n bottleneck_out = bottleneck_b(input_layer, stride=self.stride,\n bottleneck_depth=self.num_filters, cardinality=self.cardinality\n )\n restored = slim.conv2d(bottleneck_out, num_outputs=self.\n output_depth, kernel_size=1, stride=1, scope=\n 'restore_num_outputs', padding='SAME', activation_fn=None,\n weights_initializer=ScaledVarianceRandomNormal(),\n weights_regularizer=tf.contrib.layers.l2_regularizer(scale=\n WEIGHT_DECAY))\n with tf.variable_scope('shortcut'):\n if input_depth != self.output_depth:\n padded_input = slim.conv2d(input_layer, num_outputs=\n self.output_depth, kernel_size=1, stride=self.\n stride, padding='SAME', activation_fn=None,\n weights_initializer=ScaledVarianceRandomNormal(),\n weights_regularizer=tf.contrib.layers.\n l2_regularizer(scale=WEIGHT_DECAY))\n else:\n padded_input = input_layer\n residual = tf.add(restored, padded_input, name='residual')\n return residual\n\n\nclass ResNextBlock(object):\n \"\"\"\n The block structure in Figure 3b. Takes a 4D tensor as input layer and splits, concatenates\n the tensor and restores the depth. Finally adds the identity and ReLu.\n \"\"\"\n\n def __init__(self, scope, cardinality, bottleneck_depth, num_filters,\n stride, dropout=False):\n self.scope = scope\n self.dropout = dropout\n self.num_filters = num_filters\n self.bottleneck_depth = bottleneck_depth\n self.cardinality = cardinality\n self.stride = stride\n\n def apply(self, input_layer):\n input_depth = input_layer.get_shape().as_list()[-1]\n output_depth = 4 * self.num_filters\n with tf.variable_scope(self.scope):\n bottleneck_out = bottleneck_b(input_layer, stride=self.stride,\n bottleneck_depth=self.num_filters, cardinality=self.cardinality\n )\n restored = slim.conv2d(bottleneck_out, num_outputs=output_depth,\n kernel_size=1, stride=1, scope='restore_num_outputs',\n padding='SAME', activation_fn=None, weights_initializer=\n ScaledVarianceRandomNormal(), weights_regularizer=tf.\n contrib.layers.l2_regularizer(scale=WEIGHT_DECAY))\n with tf.variable_scope('shortcut'):\n if input_depth != output_depth:\n padded_input = slim.conv2d(input_layer, num_outputs=\n output_depth, kernel_size=1, stride=self.stride,\n padding='SAME', activation_fn=None,\n weights_initializer=ScaledVarianceRandomNormal(),\n weights_regularizer=tf.contrib.layers.\n l2_regularizer(scale=WEIGHT_DECAY))\n else:\n padded_input = input_layer\n residual = tf.add(restored, padded_input, name='residual')\n output = residual\n return output\n\n\nclass ResNextGroup(object):\n\n def __init__(self, scope, num_blocks, num_filters, bottleneck_depth,\n cardinality, stride, dropout=False):\n self.scope = scope\n self.dropout = dropout\n self.num_filters = num_filters\n self.cardinality = cardinality\n self.num_blocks = num_blocks\n self.bottleneck_depth = bottleneck_depth\n self.stride = stride\n\n def apply(self, h):\n tensor_stack = [h]\n with tf.variable_scope(self.scope):\n for i in range(self.num_blocks):\n if i == 0:\n stride = self.stride\n else:\n stride = 1\n h = ResNextBlock(num_filters=self.num_filters, cardinality=\n self.cardinality, bottleneck_depth=self.\n bottleneck_depth, stride=stride, dropout=self.dropout,\n scope='block%d' % i).apply(tensor_stack[-1])\n tensor_stack.append(h)\n return tensor_stack[-1]\n\n\nclass AveragePoolLayer(object):\n\n def __init__(self, scope, axis, keep_dims):\n self.scope = scope\n self.axis = axis\n self.keep_dims = keep_dims\n\n def apply(self, h):\n with tf.variable_scope(self.scope):\n average_pool = tf.reduce_mean(h, axis=self.axis, keep_dims=self\n .keep_dims)\n return average_pool\n\n\nArchitectureResNext = [ConvLayer(num_outputs=64, kernel_size=3, stride=1,\n scope='input/conv0', activation_fn=None, weights_initializer=\n ScaledVarianceRandomNormal(), weights_regularizer=tf.contrib.layers.\n l2_regularizer(scale=WEIGHT_DECAY)), ResNextBlock(num_filters=64,\n cardinality=2, bottleneck_depth=64, stride=1, dropout=False, scope=\n 'bottleneck_group1/block1'), ResNextBlock(num_filters=64, cardinality=2,\n bottleneck_depth=64, stride=1, dropout=False, scope=\n 'bottleneck_group1/block2'), ResNextBlock(num_filters=64, cardinality=2,\n bottleneck_depth=64, stride=1, dropout=False, scope=\n 'bottleneck_group1/block3'), ResNextBlock(num_filters=128, cardinality=\n 2, bottleneck_depth=64, stride=2, dropout=False, scope=\n 'bottleneck_group2/block1'), ResNextBlock(num_filters=128, cardinality=\n 2, bottleneck_depth=64, stride=1, dropout=False, scope=\n 'bottleneck_group2/block2'), ResNextBlock(num_filters=128, cardinality=\n 2, bottleneck_depth=64, stride=1, dropout=False, scope=\n 'bottleneck_group2/block3'), ResNextBlock(num_filters=256, cardinality=\n 2, bottleneck_depth=64, stride=2, dropout=False, scope=\n 'bottleneck_group3/block1'), ResNextBlock(num_filters=256, cardinality=\n 2, bottleneck_depth=64, stride=1, dropout=False, scope=\n 'bottleneck_group3/block2'), ResNextBlock(num_filters=256, cardinality=\n 2, bottleneck_depth=64, stride=1, dropout=False, scope=\n 'bottleneck_group3/block3'), AveragePoolLayer(scope='avg_pool', axis=[1,\n 2], keep_dims=True), ConvLayer(num_outputs=10, kernel_size=1, stride=1,\n normalizer_fn=None, activation_fn=None, weights_initializer=\n ScaledVarianceUniform(), weights_regularizer=tf.contrib.layers.\n l2_regularizer(scale=WEIGHT_DECAY), scope='logits/fc')]\nAdaptorStack = [None, ResNextAdaptor(cardinality=2, output_depth=256,\n num_filters=64 / 4, stride=1, dropout=False, scope=\n 'bottleneck_group1/adaptor1'), ResNextAdaptor(cardinality=2,\n output_depth=256, num_filters=64 / 4, stride=1, dropout=False, scope=\n 'bottleneck_group1/adaptor2'), ResNextAdaptor(cardinality=2,\n output_depth=256, num_filters=64 / 4, stride=1, dropout=False, scope=\n 'bottleneck_group1/adaptor3'), ResNextAdaptor(cardinality=2,\n output_depth=512, num_filters=128 / 4, stride=2, dropout=False, scope=\n 'bottleneck_group2/adaptor1'), ResNextAdaptor(cardinality=2,\n output_depth=512, num_filters=128 / 4, stride=1, dropout=False, scope=\n 'bottleneck_group2/adaptor2'), ResNextAdaptor(cardinality=2,\n output_depth=512, num_filters=128 / 4, stride=1, dropout=False, scope=\n 'bottleneck_group2/adaptor3'), ResNextAdaptor(cardinality=2,\n output_depth=1024, num_filters=256 / 4, stride=2, dropout=False, scope=\n 'bottleneck_group3/adaptor1'), ResNextAdaptor(cardinality=2,\n output_depth=1024, num_filters=256 / 4, stride=1, dropout=False, scope=\n 'bottleneck_group3/adaptor2'), ResNextAdaptor(cardinality=2,\n output_depth=1024, num_filters=256 / 4, stride=1, dropout=False, scope=\n 'bottleneck_group3/adaptor3'), None, ProjectionAdaptor(projection_width\n =1024 / 8, num_outputs=10, dropout=False, scope='logits/adaptor')]\n",
"step-5": "import tensorflow.contrib.slim as slim\nimport tensorflow as tf\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import init_ops\nimport numpy as np\n\nWEIGHT_DECAY = 0.0005\n\nclass ScaledVarianceUniform(init_ops.Initializer):\n \"\"\"Initializer that generates tensors with a Uniform distribution scaled as per https://github.com/torch/nn/blob/master/Linear.lua\n\n Args:\n mean: a python scalar or a scalar tensor. Mean of the random values\n to generate.\n stddev: a python scalar or a scalar tensor. Standard deviation of the\n random values to generate.\n seed: A Python integer. Used to create random seeds. See\n @{tf.set_random_seed}\n for behavior.\n dtype: The data type. Only floating point types are supported.\n \"\"\"\n\n def __init__(self, factor=1.0, seed=None, dtype=dtypes.float32):\n self.factor = factor\n self.seed = seed\n self.dtype = dtypes.as_dtype(dtype)\n\n def __call__(self, shape, dtype=None, partition_info=None):\n if dtype is None:\n dtype = self.dtype\n\n if shape:\n n = float(shape[-1])\n else:\n n = 1.0\n\n self.stddev = np.sqrt(self.factor * 3.0 / n)\n return random_ops.random_uniform(shape, minval=-self.stddev, maxval=self.stddev, dtype=dtype, seed=self.seed)\n\n def get_config(self):\n return {\"mean\": self.mean,\n \"stddev\": self.stddev,\n \"seed\": self.seed,\n \"dtype\": self.dtype.name}\n\nclass ScaledVarianceRandomNormal(init_ops.Initializer):\n \"\"\"Initializer that generates tensors with a normal distribution scaled as per https://arxiv.org/pdf/1502.01852.pdf.\n\n Args:\n mean: a python scalar or a scalar tensor. Mean of the random values\n to generate.\n stddev: a python scalar or a scalar tensor. Standard deviation of the\n random values to generate.\n seed: A Python integer. Used to create random seeds. See\n @{tf.set_random_seed}\n for behavior.\n dtype: The data type. Only floating point types are supported.\n \"\"\"\n\n def __init__(self, mean=0.0, factor=1.0, seed=None, dtype=dtypes.float32):\n self.mean = mean\n self.factor = factor\n self.seed = seed\n self.dtype = dtypes.as_dtype(dtype)\n\n def __call__(self, shape, dtype=None, partition_info=None):\n if dtype is None:\n dtype = self.dtype\n\n if shape:\n n = float(shape[-1])\n else:\n n = 1.0\n for dim in shape[:-2]:\n n *= float(dim)\n\n self.stddev = np.sqrt(self.factor * 2.0 / n)\n return random_ops.random_normal(shape, self.mean, self.stddev,\n dtype, seed=self.seed)\n\n def get_config(self):\n return {\"mean\": self.mean,\n \"stddev\": self.stddev,\n \"seed\": self.seed,\n \"dtype\": self.dtype.name}\n\nclass ConvLayer(object):\n\n def __init__(self, scope, num_outputs, kernel_size, padding='SAME', dropout=False, stride=1, normalizer_fn=False,\n activation_fn=False, weights_initializer=tf.contrib.layers.xavier_initializer(), weights_regularizer=None):\n\n self.scope = scope\n self.dropout = dropout\n self.padding = padding\n self.num_outputs = num_outputs\n self.kernel_size = kernel_size\n self.stride = stride\n self.normalizer_fn = normalizer_fn\n self.activation_fn=activation_fn\n self.weights_initializer = weights_initializer\n self.weights_regularizer = weights_regularizer\n\n def apply(self, h):\n if self.activation_fn == False:\n if self.normalizer_fn==False:\n if self.dropout==False:\n h_out = slim.conv2d(h, num_outputs=self.num_outputs, kernel_size=self.kernel_size, stride=self.stride, scope=self.scope,\n padding=self.padding, weights_initializer=self.weights_initializer, weights_regularizer = self.weights_regularizer)\n else:\n h_out = slim.conv2d(slim.dropout(h, scope=self.scope+'-dropout'), num_outputs=self.num_outputs, kernel_size=self.kernel_size, stride=self.stride, scope=self.scope,\n padding=self.padding, weights_initializer=self.weights_initializer, weights_regularizer = self.weights_regularizer)\n else:\n if self.dropout == False:\n h_out = slim.conv2d(h, num_outputs=self.num_outputs, kernel_size=self.kernel_size,\n stride=self.stride, scope=self.scope, padding=self.padding,\n normalizer_fn=self.normalizer_fn, weights_initializer=self.weights_initializer, weights_regularizer = self.weights_regularizer)\n else:\n h_out = slim.conv2d(slim.dropout(h, scope=self.scope + '-dropout'), num_outputs=self.num_outputs,\n kernel_size=self.kernel_size, stride=self.stride, scope=self.scope,\n padding=self.padding, normalizer_fn=self.normalizer_fn, weights_initializer=self.weights_initializer, weights_regularizer = self.weights_regularizer)\n else:\n if self.normalizer_fn==False:\n if self.dropout==False:\n h_out = slim.conv2d(h, num_outputs=self.num_outputs, kernel_size=self.kernel_size, stride=self.stride, scope=self.scope,\n padding=self.padding, activation_fn=self.activation_fn,\n weights_initializer=self.weights_initializer, weights_regularizer = self.weights_regularizer)\n else:\n h_out = slim.conv2d(slim.dropout(h, scope=self.scope+'-dropout'), num_outputs=self.num_outputs, kernel_size=self.kernel_size, stride=self.stride, scope=self.scope,\n padding=self.padding,\n activation_fn=self.activation_fn,\n weights_initializer=self.weights_initializer, weights_regularizer = self.weights_regularizer)\n else:\n if self.dropout == False:\n h_out = slim.conv2d(h, num_outputs=self.num_outputs, kernel_size=self.kernel_size,\n stride=self.stride, scope=self.scope, padding=self.padding,\n normalizer_fn=self.normalizer_fn, activation_fn=self.activation_fn,\n weights_initializer=self.weights_initializer, weights_regularizer = self.weights_regularizer)\n else:\n h_out = slim.conv2d(slim.dropout(h, scope=self.scope + '-dropout'), num_outputs=self.num_outputs,\n kernel_size=self.kernel_size, stride=self.stride, scope=self.scope,\n padding=self.padding, normalizer_fn=self.normalizer_fn, activation_fn=self.activation_fn,\n weights_initializer=self.weights_initializer, weights_regularizer = self.weights_regularizer)\n return h_out\n\n\nclass ProjectionAdaptor(object):\n def __init__(self, scope, projection_width, num_outputs, dropout=False):\n self.dim_reduction_layer = ConvLayer(num_outputs=projection_width, kernel_size=1, stride=1, padding='SAME',\n weights_initializer=ScaledVarianceRandomNormal(),\n weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY),\n dropout=dropout, scope=scope + '/adapter/dim_reduction')\n self.output_layer = ConvLayer(num_outputs=num_outputs, kernel_size=1, stride=1, padding='SAME',\n weights_initializer=ScaledVarianceRandomNormal(),\n weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY),\n dropout=dropout, scope=scope + '/adapter/output', normalizer_fn=None, activation_fn=None)\n\n def apply(self, h):\n reduced_space = self.dim_reduction_layer.apply(h)\n return self.output_layer.apply(reduced_space)\n\n\ndef split(input_layer, stride, bottleneck_depth):\n '''\n The split structure in Figure 3b of the paper. It takes an input tensor. Conv it by [1, 1,\n 64] filter, and then conv the result by [3, 3, 64]. Return the\n final resulted tensor, which is in shape of [batch_size, input_height, input_width, 64]\n\n :param input_layer: 4D tensor in shape of [batch_size, input_height, input_width,\n input_channel]\n :param stride: int. 1 or 2. If want to shrink the image size, then stride = 2\n :return: 4D tensor in shape of [batch_size, input_height, input_width, input_channel/64]\n '''\n\n input_depth = input_layer.get_shape().as_list()[-1]\n\n with tf.variable_scope('bneck_%d_1x1_%dd' %(input_depth, bottleneck_depth)):\n bneck_1x1 = slim.conv2d(input_layer, num_outputs=bottleneck_depth, kernel_size=1, stride=1,\n padding='SAME',\n weights_initializer=ScaledVarianceRandomNormal(),\n weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY))\n with tf.variable_scope('bneck_%d_3x3_%dd' %(bottleneck_depth, bottleneck_depth)):\n bneck_3x3 = slim.conv2d(bneck_1x1, num_outputs=bottleneck_depth, kernel_size=3, stride=stride,\n padding='SAME',\n weights_initializer=ScaledVarianceRandomNormal(),\n weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY))\n\n return bneck_3x3\n\ndef bottleneck_b(input_layer, stride, cardinality, bottleneck_depth):\n '''\n The bottleneck strucutre in Figure 3b. Concatenates all the splits\n :param input_layer: 4D tensor in shape of [batch_size, input_height, input_width,\n input_channel]\n :param stride: int. 1 or 2. If want to shrink the image size, then stride = 2\n :return: 4D tensor in shape of [batch_size, output_height, output_width, output_channel]\n '''\n nInputPlane = input_layer.get_shape().as_list()[-1]\n\n split_list = []\n for i in range(cardinality):\n with tf.variable_scope('split_%i'%i):\n splits = split(input_layer=input_layer, stride=stride, bottleneck_depth=bottleneck_depth)\n split_list.append(splits)\n\n # Concatenate splits and check the dimension\n concat_bottleneck = tf.concat(values=split_list, axis=3, name='concat_splits')\n\n return concat_bottleneck\n\n\nclass ResNextAdaptor(object):\n '''\n The block structure in Figure 3b. Takes a 4D tensor as input layer and splits, concatenates\n the tensor and restores the depth. Finally adds the identity and ReLu.\n '''\n\n def __init__(self, scope, cardinality, output_depth, num_filters, stride, dropout=False):\n self.scope = scope\n self.dropout = dropout\n self.num_filters = num_filters\n self.output_depth = output_depth\n self.cardinality = cardinality\n self.stride = stride\n\n def apply(self, input_layer):\n input_depth = input_layer.get_shape().as_list()[-1]\n\n with tf.variable_scope(self.scope):\n\n bottleneck_out = bottleneck_b(input_layer, stride=self.stride, bottleneck_depth=self.num_filters,\n cardinality=self.cardinality)\n\n restored = slim.conv2d(bottleneck_out, num_outputs=self.output_depth, kernel_size=1, stride=1,\n scope='restore_num_outputs', padding='SAME', activation_fn=None,\n weights_initializer=ScaledVarianceRandomNormal(),\n weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY))\n\n with tf.variable_scope('shortcut'):\n if input_depth != self.output_depth:\n padded_input = slim.conv2d(input_layer, num_outputs=self.output_depth, kernel_size=1, stride=self.stride,\n padding='SAME', activation_fn=None,\n weights_initializer=ScaledVarianceRandomNormal(),\n weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY))\n else:\n padded_input = input_layer\n\n residual = tf.add(restored, padded_input, name='residual')\n return residual\n\nclass ResNextBlock(object):\n '''\n The block structure in Figure 3b. Takes a 4D tensor as input layer and splits, concatenates\n the tensor and restores the depth. Finally adds the identity and ReLu.\n '''\n def __init__(self, scope, cardinality, bottleneck_depth, num_filters, stride, dropout=False):\n self.scope = scope\n self.dropout = dropout\n self.num_filters = num_filters\n self.bottleneck_depth = bottleneck_depth\n self.cardinality = cardinality\n self.stride = stride\n\n def apply(self, input_layer):\n input_depth = input_layer.get_shape().as_list()[-1]\n # output width 4*self.num_filters as per line 96 in\n # https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua, commit 833a384\n output_depth = 4*self.num_filters\n\n with tf.variable_scope(self.scope):\n\n bottleneck_out = bottleneck_b(input_layer, stride=self.stride, bottleneck_depth = self.num_filters, cardinality=self.cardinality)\n\n\n restored = slim.conv2d(bottleneck_out, num_outputs=output_depth, kernel_size=1, stride=1,\n scope='restore_num_outputs', padding='SAME', activation_fn=None,\n weights_initializer=ScaledVarianceRandomNormal(),\n weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY))\n\n with tf.variable_scope('shortcut'):\n if input_depth != output_depth:\n padded_input = slim.conv2d(input_layer, num_outputs=output_depth, kernel_size=1, stride=self.stride,\n padding='SAME', activation_fn=None,\n weights_initializer=ScaledVarianceRandomNormal(),\n weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY))\n else:\n padded_input = input_layer\n\n residual = tf.add(restored, padded_input, name='residual')\n # output = tf.nn.relu(residual, name='residual_relu')\n output = residual\n\n return output\n\nclass ResNextGroup(object):\n\n def __init__(self, scope, num_blocks, num_filters, bottleneck_depth, cardinality, stride, dropout=False):\n self.scope = scope\n self.dropout = dropout\n self.num_filters = num_filters\n self.cardinality = cardinality\n self.num_blocks = num_blocks\n self.bottleneck_depth = bottleneck_depth\n self.stride = stride\n\n def apply(self, h):\n tensor_stack = [h]\n with tf.variable_scope(self.scope):\n for i in range(self.num_blocks):\n if i == 0:\n stride=self.stride\n else:\n stride=1\n h = ResNextBlock(num_filters=self.num_filters, cardinality=self.cardinality, bottleneck_depth = self.bottleneck_depth,\n stride=stride, dropout=self.dropout, scope='block%d' % i).apply(tensor_stack[-1])\n tensor_stack.append(h)\n return tensor_stack[-1]\n\nclass AveragePoolLayer(object):\n def __init__(self, scope, axis, keep_dims):\n self.scope = scope\n self.axis=axis\n self.keep_dims=keep_dims\n\n def apply(self, h):\n with tf.variable_scope(self.scope):\n average_pool = tf.reduce_mean(h, axis=self.axis, keep_dims=self.keep_dims)\n return average_pool\n\n\n# The ResNext architecture is based on the following code:\n# https://github.com/wenxinxu/ResNeXt-in-tensorflow/blob/master/resNeXt.py\n# commit 8a00577495fb01cf98bf77562422390b652e1a4e\n# ResNeXt. total layers = 1 + 3n + 3n + 3n +1 = 9n + 2\n\nArchitectureResNext = [\n ConvLayer(num_outputs=64, kernel_size=3, stride=1, scope='input/conv0',\n activation_fn=None,\n weights_initializer=ScaledVarianceRandomNormal(),\n weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY)),\n\n ResNextBlock(num_filters=64, cardinality=2, bottleneck_depth=64, stride=1, dropout=False, scope='bottleneck_group1/block1'),\n ResNextBlock(num_filters=64, cardinality=2, bottleneck_depth=64, stride=1, dropout=False, scope='bottleneck_group1/block2'),\n ResNextBlock(num_filters=64, cardinality=2, bottleneck_depth=64, stride=1, dropout=False, scope='bottleneck_group1/block3'),\n\n ResNextBlock(num_filters=128, cardinality=2, bottleneck_depth=64, stride=2, dropout=False, scope='bottleneck_group2/block1'),\n ResNextBlock(num_filters=128, cardinality=2, bottleneck_depth=64, stride=1, dropout=False, scope='bottleneck_group2/block2'),\n ResNextBlock(num_filters=128, cardinality=2, bottleneck_depth=64, stride=1, dropout=False, scope='bottleneck_group2/block3'),\n\n ResNextBlock(num_filters=256, cardinality=2, bottleneck_depth=64, stride=2, dropout=False, scope='bottleneck_group3/block1'),\n ResNextBlock(num_filters=256, cardinality=2, bottleneck_depth=64, stride=1, dropout=False, scope='bottleneck_group3/block2'),\n ResNextBlock(num_filters=256, cardinality=2, bottleneck_depth=64, stride=1, dropout=False, scope='bottleneck_group3/block3'),\n\n AveragePoolLayer(scope='avg_pool', axis=[1,2], keep_dims=True),\n ConvLayer(num_outputs=10, kernel_size=1, stride=1, normalizer_fn=None, activation_fn=None,\n weights_initializer=ScaledVarianceUniform(),\n weights_regularizer=tf.contrib.layers.l2_regularizer(scale=WEIGHT_DECAY), scope='logits/fc'),\n]\n\nAdaptorStack = [\n None,\n\n ResNextAdaptor(cardinality=2, output_depth=256, num_filters=64/4, stride=1, dropout=False, scope='bottleneck_group1/adaptor1'),\n ResNextAdaptor(cardinality=2, output_depth=256, num_filters=64/4, stride=1, dropout=False, scope='bottleneck_group1/adaptor2'),\n ResNextAdaptor(cardinality=2, output_depth=256, num_filters=64/4, stride=1, dropout=False, scope='bottleneck_group1/adaptor3'),\n\n ResNextAdaptor(cardinality=2, output_depth=512, num_filters=128/4, stride=2, dropout=False, scope='bottleneck_group2/adaptor1'),\n ResNextAdaptor(cardinality=2, output_depth=512, num_filters=128/4, stride=1, dropout=False, scope='bottleneck_group2/adaptor2'),\n ResNextAdaptor(cardinality=2, output_depth=512, num_filters=128/4, stride=1, dropout=False, scope='bottleneck_group2/adaptor3'),\n\n ResNextAdaptor(cardinality=2, output_depth=1024, num_filters=256/4, stride=2, dropout=False, scope='bottleneck_group3/adaptor1'),\n ResNextAdaptor(cardinality=2, output_depth=1024, num_filters=256/4, stride=1, dropout=False, scope='bottleneck_group3/adaptor2'),\n ResNextAdaptor(cardinality=2, output_depth=1024, num_filters=256/4, stride=1, dropout=False, scope='bottleneck_group3/adaptor3'),\n\n None,\n ProjectionAdaptor(projection_width=1024/8, num_outputs=10, dropout=False, scope='logits/adaptor')\n]",
"step-ids": [
19,
21,
23,
33,
35
]
}
|
[
19,
21,
23,
33,
35
] |
<|reserved_special_token_0|>
def appendimages(im1, im2):
""" Return a new image that appends the two images side-by-side. """
rows1 = im1.shape[0]
rows2 = im2.shape[0]
if rows1 < rows2:
im1 = np.concatenate((im1, zeros((rows2 - rows1, im1.shape[1]))),
axis=0)
elif rows1 > rows2:
im2 = np.concatenate((im2, zeros((rows1 - rows2, im2.shape[1]))),
axis=0)
return np.concatenate((im1, im2), axis=1)
def append_imgs(im1, im2, im3):
buff = np.concatenate((im1, im2), axis=1)
return np.concatenate((buff, im3), axis=1)
<|reserved_special_token_0|>
def good_points(x, y, x_next, y_next, img_height, img_width, threshold):
no_same_point = fabs(x - x_next) + fabs(y - y_next) > 2 * threshold
no_at_edge = x > threshold and y > threshold and fabs(x - img_width
) > threshold and fabs(y - img_height) > threshold
return no_same_point and no_at_edge
<|reserved_special_token_0|>
def find_wrist(center, contour, set_idx_convDefs):
n = len(set_idx_convDefs)
opposites = np.zeros((2, n))
for i in range(n):
opposites[0, i] = 2 * center[0] - contour[set_idx_convDefs[i], 0, 0]
opposites[1, i] = 2 * center[1] - contour[set_idx_convDefs[i], 0, 1]
total = np.sum(opposites, axis=1)
x = int(total[0] / n)
y = int(total[1] / n)
wrist = x, y
return wrist
<|reserved_special_token_0|>
def simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh,
neighbor_thresh):
dist_from_fixedPoint = []
img_height, img_width = img.shape[0:2]
hull_nbPts = hull.shape[0]
for i in range(hull_nbPts):
dist_from_fixedPoint.append(cv2.norm(fixedPoint - hull[i], cv2.NORM_L2)
)
max_indx = np.argsort(-1 * np.array(dist_from_fixedPoint))
idx_ok = []
for i in range(hull_nbPts):
idx = max_indx[i]
if point_not_at_edge(hull[idx, 0, 0], hull[idx, 0, 1], img_height,
img_width, edge_thresh):
if len(idx_ok) == 0:
idx_ok.append(idx)
else:
not_similar = True
for idx_neighbor in idx_ok:
not_similar = points_not_similar(hull[idx, 0, 0], hull[
idx, 0, 1], hull[idx_neighbor, 0, 0], hull[
idx_neighbor, 0, 1], neighbor_thresh)
if not not_similar:
break
if not_similar:
idx_ok.append(idx)
return idx_ok
<|reserved_special_token_0|>
def hsv_preprocessing(img):
lower = np.array([1, 38, 51])
upper = np.array([28, 250, 242])
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
skinMask = cv2.inRange(hsv, lower, upper)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))
skinMask = cv2.erode(skinMask, kernel, iterations=2)
skinMask = cv2.dilate(skinMask, kernel, iterations=2)
blur = cv2.GaussianBlur(skinMask, (5, 5), 0)
ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.
THRESH_OTSU)
return bin_image
def find_contour_hull(binary_image):
contours, hierarchy = cv2.findContours(binary_image, cv2.RETR_TREE, cv2
.CHAIN_APPROX_SIMPLE)
max_area = 0
ci = 0
for i in range(len(contours)):
cnt = contours[i]
area = cv2.contourArea(cnt)
if area > max_area:
max_area = area
ci = i
cnt = contours[ci]
hull = cv2.convexHull(cnt)
hull_idx = cv2.convexHull(cnt, returnPoints=False)
return cnt, hull, hull_idx
def draws_contour_hull(img, cnt, hull):
drawing = np.zeros(img.shape, np.uint8)
cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 3)
cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)
return drawing
def eliminate_background(img, backGround, thres_diff):
height, width, depth = img.shape
for i in range(height):
for j in range(width):
erase = True
for k in range(depth):
if fabs(img[i, j, k] - backGround[i, j, k]) > thres_diff:
erase = False
if erase:
img[i, j, :] = 0
return img
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def appendimages(im1, im2):
""" Return a new image that appends the two images side-by-side. """
rows1 = im1.shape[0]
rows2 = im2.shape[0]
if rows1 < rows2:
im1 = np.concatenate((im1, zeros((rows2 - rows1, im1.shape[1]))),
axis=0)
elif rows1 > rows2:
im2 = np.concatenate((im2, zeros((rows1 - rows2, im2.shape[1]))),
axis=0)
return np.concatenate((im1, im2), axis=1)
def append_imgs(im1, im2, im3):
buff = np.concatenate((im1, im2), axis=1)
return np.concatenate((buff, im3), axis=1)
<|reserved_special_token_0|>
def points_not_similar(x, y, x_neighb, y_neighb, threshold):
no_same_point = fabs(x - x_neighb) + fabs(y - y_neighb) > 2 * threshold
return no_same_point
def good_points(x, y, x_next, y_next, img_height, img_width, threshold):
no_same_point = fabs(x - x_next) + fabs(y - y_next) > 2 * threshold
no_at_edge = x > threshold and y > threshold and fabs(x - img_width
) > threshold and fabs(y - img_height) > threshold
return no_same_point and no_at_edge
<|reserved_special_token_0|>
def find_wrist(center, contour, set_idx_convDefs):
n = len(set_idx_convDefs)
opposites = np.zeros((2, n))
for i in range(n):
opposites[0, i] = 2 * center[0] - contour[set_idx_convDefs[i], 0, 0]
opposites[1, i] = 2 * center[1] - contour[set_idx_convDefs[i], 0, 1]
total = np.sum(opposites, axis=1)
x = int(total[0] / n)
y = int(total[1] / n)
wrist = x, y
return wrist
<|reserved_special_token_0|>
def simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh,
neighbor_thresh):
dist_from_fixedPoint = []
img_height, img_width = img.shape[0:2]
hull_nbPts = hull.shape[0]
for i in range(hull_nbPts):
dist_from_fixedPoint.append(cv2.norm(fixedPoint - hull[i], cv2.NORM_L2)
)
max_indx = np.argsort(-1 * np.array(dist_from_fixedPoint))
idx_ok = []
for i in range(hull_nbPts):
idx = max_indx[i]
if point_not_at_edge(hull[idx, 0, 0], hull[idx, 0, 1], img_height,
img_width, edge_thresh):
if len(idx_ok) == 0:
idx_ok.append(idx)
else:
not_similar = True
for idx_neighbor in idx_ok:
not_similar = points_not_similar(hull[idx, 0, 0], hull[
idx, 0, 1], hull[idx_neighbor, 0, 0], hull[
idx_neighbor, 0, 1], neighbor_thresh)
if not not_similar:
break
if not_similar:
idx_ok.append(idx)
return idx_ok
<|reserved_special_token_0|>
def hsv_preprocessing(img):
lower = np.array([1, 38, 51])
upper = np.array([28, 250, 242])
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
skinMask = cv2.inRange(hsv, lower, upper)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))
skinMask = cv2.erode(skinMask, kernel, iterations=2)
skinMask = cv2.dilate(skinMask, kernel, iterations=2)
blur = cv2.GaussianBlur(skinMask, (5, 5), 0)
ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.
THRESH_OTSU)
return bin_image
def find_contour_hull(binary_image):
contours, hierarchy = cv2.findContours(binary_image, cv2.RETR_TREE, cv2
.CHAIN_APPROX_SIMPLE)
max_area = 0
ci = 0
for i in range(len(contours)):
cnt = contours[i]
area = cv2.contourArea(cnt)
if area > max_area:
max_area = area
ci = i
cnt = contours[ci]
hull = cv2.convexHull(cnt)
hull_idx = cv2.convexHull(cnt, returnPoints=False)
return cnt, hull, hull_idx
def draws_contour_hull(img, cnt, hull):
drawing = np.zeros(img.shape, np.uint8)
cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 3)
cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)
return drawing
def eliminate_background(img, backGround, thres_diff):
height, width, depth = img.shape
for i in range(height):
for j in range(width):
erase = True
for k in range(depth):
if fabs(img[i, j, k] - backGround[i, j, k]) > thres_diff:
erase = False
if erase:
img[i, j, :] = 0
return img
<|reserved_special_token_0|>
def main():
image_name = 'hand_in_BG5.png'
img = cv2.imread(image_name)
bin_image = simple_preprocessing(img)
cv2.imshow('orig', img)
cv2.imshow('bin', bin_image)
cv2.waitKey(0)
cnt, hull, hull_idx = find_contour_hull(bin_image)
drawing = draws_contour_hull(img, cnt, hull)
convDefs = cv2.convexityDefects(cnt, hull_idx)
dist_order = np.argsort(-1 * convDefs[:, 0, 3])
max4dist = dist_order[0:4]
max4points = convDefs[max4dist, 0, 2]
for i in max4points:
cv2.circle(drawing, tuple(cnt[i, 0]), 5, [255, 255, 0], 2)
hull_nbPts = hull.shape[0]
"""
#draws all the points constitue the convex hull (for debugging)
for i in range(hull_nbPts):
cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2)
cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA)
"""
moments = cv2.moments(cnt)
if moments['m00'] != 0:
cx = int(moments['m10'] / moments['m00'])
cy = int(moments['m01'] / moments['m00'])
centr = cx, cy
cv2.circle(drawing, centr, 5, [0, 255, 255], 2)
wrist = find_wrist(centr, cnt, max4points)
cv2.circle(drawing, wrist, 5, [0, 255, 255], 2)
edge_thresh = 20
neighbor_thresh = 20
fixedPoint = wrist
idx_ok = simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh,
neighbor_thresh)
max_5hull_idx = idx_ok[0:1]
for i in max_5hull_idx:
cv2.circle(drawing, tuple(hull[i, 0]), 6, [0, 255, 0], 2)
cv2.imshow('contour and convex hull', drawing)
k = cv2.waitKey(0)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def appendimages(im1, im2):
""" Return a new image that appends the two images side-by-side. """
rows1 = im1.shape[0]
rows2 = im2.shape[0]
if rows1 < rows2:
im1 = np.concatenate((im1, zeros((rows2 - rows1, im1.shape[1]))),
axis=0)
elif rows1 > rows2:
im2 = np.concatenate((im2, zeros((rows1 - rows2, im2.shape[1]))),
axis=0)
return np.concatenate((im1, im2), axis=1)
def append_imgs(im1, im2, im3):
buff = np.concatenate((im1, im2), axis=1)
return np.concatenate((buff, im3), axis=1)
def point_not_at_edge(x, y, img_height, img_width, threshold):
no_at_edge = x > threshold and y > threshold and fabs(x - img_width
) > threshold and fabs(y - img_height) > threshold
return no_at_edge
def points_not_similar(x, y, x_neighb, y_neighb, threshold):
no_same_point = fabs(x - x_neighb) + fabs(y - y_neighb) > 2 * threshold
return no_same_point
def good_points(x, y, x_next, y_next, img_height, img_width, threshold):
no_same_point = fabs(x - x_next) + fabs(y - y_next) > 2 * threshold
no_at_edge = x > threshold and y > threshold and fabs(x - img_width
) > threshold and fabs(y - img_height) > threshold
return no_same_point and no_at_edge
<|reserved_special_token_0|>
def find_wrist(center, contour, set_idx_convDefs):
n = len(set_idx_convDefs)
opposites = np.zeros((2, n))
for i in range(n):
opposites[0, i] = 2 * center[0] - contour[set_idx_convDefs[i], 0, 0]
opposites[1, i] = 2 * center[1] - contour[set_idx_convDefs[i], 0, 1]
total = np.sum(opposites, axis=1)
x = int(total[0] / n)
y = int(total[1] / n)
wrist = x, y
return wrist
<|reserved_special_token_0|>
def simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh,
neighbor_thresh):
dist_from_fixedPoint = []
img_height, img_width = img.shape[0:2]
hull_nbPts = hull.shape[0]
for i in range(hull_nbPts):
dist_from_fixedPoint.append(cv2.norm(fixedPoint - hull[i], cv2.NORM_L2)
)
max_indx = np.argsort(-1 * np.array(dist_from_fixedPoint))
idx_ok = []
for i in range(hull_nbPts):
idx = max_indx[i]
if point_not_at_edge(hull[idx, 0, 0], hull[idx, 0, 1], img_height,
img_width, edge_thresh):
if len(idx_ok) == 0:
idx_ok.append(idx)
else:
not_similar = True
for idx_neighbor in idx_ok:
not_similar = points_not_similar(hull[idx, 0, 0], hull[
idx, 0, 1], hull[idx_neighbor, 0, 0], hull[
idx_neighbor, 0, 1], neighbor_thresh)
if not not_similar:
break
if not_similar:
idx_ok.append(idx)
return idx_ok
<|reserved_special_token_0|>
def hsv_preprocessing(img):
lower = np.array([1, 38, 51])
upper = np.array([28, 250, 242])
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
skinMask = cv2.inRange(hsv, lower, upper)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))
skinMask = cv2.erode(skinMask, kernel, iterations=2)
skinMask = cv2.dilate(skinMask, kernel, iterations=2)
blur = cv2.GaussianBlur(skinMask, (5, 5), 0)
ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.
THRESH_OTSU)
return bin_image
def find_contour_hull(binary_image):
contours, hierarchy = cv2.findContours(binary_image, cv2.RETR_TREE, cv2
.CHAIN_APPROX_SIMPLE)
max_area = 0
ci = 0
for i in range(len(contours)):
cnt = contours[i]
area = cv2.contourArea(cnt)
if area > max_area:
max_area = area
ci = i
cnt = contours[ci]
hull = cv2.convexHull(cnt)
hull_idx = cv2.convexHull(cnt, returnPoints=False)
return cnt, hull, hull_idx
def draws_contour_hull(img, cnt, hull):
drawing = np.zeros(img.shape, np.uint8)
cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 3)
cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)
return drawing
def eliminate_background(img, backGround, thres_diff):
height, width, depth = img.shape
for i in range(height):
for j in range(width):
erase = True
for k in range(depth):
if fabs(img[i, j, k] - backGround[i, j, k]) > thres_diff:
erase = False
if erase:
img[i, j, :] = 0
return img
<|reserved_special_token_0|>
def main():
image_name = 'hand_in_BG5.png'
img = cv2.imread(image_name)
bin_image = simple_preprocessing(img)
cv2.imshow('orig', img)
cv2.imshow('bin', bin_image)
cv2.waitKey(0)
cnt, hull, hull_idx = find_contour_hull(bin_image)
drawing = draws_contour_hull(img, cnt, hull)
convDefs = cv2.convexityDefects(cnt, hull_idx)
dist_order = np.argsort(-1 * convDefs[:, 0, 3])
max4dist = dist_order[0:4]
max4points = convDefs[max4dist, 0, 2]
for i in max4points:
cv2.circle(drawing, tuple(cnt[i, 0]), 5, [255, 255, 0], 2)
hull_nbPts = hull.shape[0]
"""
#draws all the points constitue the convex hull (for debugging)
for i in range(hull_nbPts):
cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2)
cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA)
"""
moments = cv2.moments(cnt)
if moments['m00'] != 0:
cx = int(moments['m10'] / moments['m00'])
cy = int(moments['m01'] / moments['m00'])
centr = cx, cy
cv2.circle(drawing, centr, 5, [0, 255, 255], 2)
wrist = find_wrist(centr, cnt, max4points)
cv2.circle(drawing, wrist, 5, [0, 255, 255], 2)
edge_thresh = 20
neighbor_thresh = 20
fixedPoint = wrist
idx_ok = simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh,
neighbor_thresh)
max_5hull_idx = idx_ok[0:1]
for i in max_5hull_idx:
cv2.circle(drawing, tuple(hull[i, 0]), 6, [0, 255, 0], 2)
cv2.imshow('contour and convex hull', drawing)
k = cv2.waitKey(0)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def appendimages(im1, im2):
""" Return a new image that appends the two images side-by-side. """
rows1 = im1.shape[0]
rows2 = im2.shape[0]
if rows1 < rows2:
im1 = np.concatenate((im1, zeros((rows2 - rows1, im1.shape[1]))),
axis=0)
elif rows1 > rows2:
im2 = np.concatenate((im2, zeros((rows1 - rows2, im2.shape[1]))),
axis=0)
return np.concatenate((im1, im2), axis=1)
def append_imgs(im1, im2, im3):
buff = np.concatenate((im1, im2), axis=1)
return np.concatenate((buff, im3), axis=1)
def point_not_at_edge(x, y, img_height, img_width, threshold):
no_at_edge = x > threshold and y > threshold and fabs(x - img_width
) > threshold and fabs(y - img_height) > threshold
return no_at_edge
def points_not_similar(x, y, x_neighb, y_neighb, threshold):
no_same_point = fabs(x - x_neighb) + fabs(y - y_neighb) > 2 * threshold
return no_same_point
def good_points(x, y, x_next, y_next, img_height, img_width, threshold):
no_same_point = fabs(x - x_next) + fabs(y - y_next) > 2 * threshold
no_at_edge = x > threshold and y > threshold and fabs(x - img_width
) > threshold and fabs(y - img_height) > threshold
return no_same_point and no_at_edge
<|reserved_special_token_0|>
def find_wrist(center, contour, set_idx_convDefs):
n = len(set_idx_convDefs)
opposites = np.zeros((2, n))
for i in range(n):
opposites[0, i] = 2 * center[0] - contour[set_idx_convDefs[i], 0, 0]
opposites[1, i] = 2 * center[1] - contour[set_idx_convDefs[i], 0, 1]
total = np.sum(opposites, axis=1)
x = int(total[0] / n)
y = int(total[1] / n)
wrist = x, y
return wrist
<|reserved_special_token_0|>
def simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh,
neighbor_thresh):
dist_from_fixedPoint = []
img_height, img_width = img.shape[0:2]
hull_nbPts = hull.shape[0]
for i in range(hull_nbPts):
dist_from_fixedPoint.append(cv2.norm(fixedPoint - hull[i], cv2.NORM_L2)
)
max_indx = np.argsort(-1 * np.array(dist_from_fixedPoint))
idx_ok = []
for i in range(hull_nbPts):
idx = max_indx[i]
if point_not_at_edge(hull[idx, 0, 0], hull[idx, 0, 1], img_height,
img_width, edge_thresh):
if len(idx_ok) == 0:
idx_ok.append(idx)
else:
not_similar = True
for idx_neighbor in idx_ok:
not_similar = points_not_similar(hull[idx, 0, 0], hull[
idx, 0, 1], hull[idx_neighbor, 0, 0], hull[
idx_neighbor, 0, 1], neighbor_thresh)
if not not_similar:
break
if not_similar:
idx_ok.append(idx)
return idx_ok
def simple_preprocessing(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5, 5), 0)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))
blur = cv2.erode(blur, kernel, iterations=2)
blur = cv2.dilate(blur, kernel, iterations=2)
ret, bin_image = cv2.threshold(blur, 50, 255, cv2.THRESH_BINARY + cv2.
THRESH_OTSU)
return bin_image
def simple_preprocessing2(img, backGround):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(backGround, cv2.COLOR_BGR2GRAY)
gray = gray - gray2
blur = cv2.GaussianBlur(gray, (5, 5), 0)
ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.
THRESH_OTSU)
return bin_image
def hsv_preprocessing(img):
lower = np.array([1, 38, 51])
upper = np.array([28, 250, 242])
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
skinMask = cv2.inRange(hsv, lower, upper)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))
skinMask = cv2.erode(skinMask, kernel, iterations=2)
skinMask = cv2.dilate(skinMask, kernel, iterations=2)
blur = cv2.GaussianBlur(skinMask, (5, 5), 0)
ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.
THRESH_OTSU)
return bin_image
def find_contour_hull(binary_image):
contours, hierarchy = cv2.findContours(binary_image, cv2.RETR_TREE, cv2
.CHAIN_APPROX_SIMPLE)
max_area = 0
ci = 0
for i in range(len(contours)):
cnt = contours[i]
area = cv2.contourArea(cnt)
if area > max_area:
max_area = area
ci = i
cnt = contours[ci]
hull = cv2.convexHull(cnt)
hull_idx = cv2.convexHull(cnt, returnPoints=False)
return cnt, hull, hull_idx
def draws_contour_hull(img, cnt, hull):
drawing = np.zeros(img.shape, np.uint8)
cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 3)
cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)
return drawing
def eliminate_background(img, backGround, thres_diff):
height, width, depth = img.shape
for i in range(height):
for j in range(width):
erase = True
for k in range(depth):
if fabs(img[i, j, k] - backGround[i, j, k]) > thres_diff:
erase = False
if erase:
img[i, j, :] = 0
return img
<|reserved_special_token_0|>
def tracking():
camera = cv2.VideoCapture(0)
_, img = camera.read()
h, w, d = img.shape
fourcc = cv2.cv.CV_FOURCC('F', 'M', 'P', '4')
out = cv2.VideoWriter()
success = out.open('output.avi', fourcc, 15, (3 * w, h), True)
waitTime = 100
for i in range(waitTime):
_, average = camera.read()
index_im = 0
while True:
grabbed, img = camera.read()
img_diff = cv2.absdiff(img, average)
bin_image = simple_preprocessing(img_diff)
bin_image2 = bin_image.copy()
cv2.imshow('binaire', bin_image2)
cnt, hull, hull_idx = find_contour_hull(bin_image)
drawing = draws_contour_hull(img, cnt, hull)
convDefs = cv2.convexityDefects(cnt, hull_idx)
dist_order = np.argsort(-1 * convDefs[:, 0, 3])
max4dist = dist_order[0:4]
max4points = convDefs[max4dist, 0, 2]
for i in max4points:
cv2.circle(drawing, tuple(cnt[i, 0]), 5, [255, 255, 0], 2)
hull_nbPts = hull.shape[0]
"""
#draws all the points constitue the convex hull (for debugging)
for i in range(hull_nbPts):
cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2)
cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA)
"""
moments = cv2.moments(cnt)
if moments['m00'] != 0:
cx = int(moments['m10'] / moments['m00'])
cy = int(moments['m01'] / moments['m00'])
centr = cx, cy
cv2.circle(drawing, centr, 5, [0, 255, 255], 2)
wrist = find_wrist(centr, cnt, max4points)
cv2.circle(drawing, wrist, 5, [0, 255, 255], 2)
edge_thresh = 20
neighbor_thresh = 20
fixedPoint = wrist
idx_ok = simple_detect_fingerTips(hull, img, fixedPoint,
edge_thresh, neighbor_thresh)
max_5hull_idx = idx_ok[0:5]
for i in max_5hull_idx:
cv2.circle(drawing, tuple(hull[i, 0]), 6, [0, 255, 0], 2)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
drawing = cv2.cvtColor(drawing, cv2.COLOR_BGR2GRAY)
"""
print img.shape
print bin_image2.shape
print drawing.shape
"""
frame = append_imgs(img, bin_image2, drawing)
cv2.imwrite('store2/' + 'img' + str(index_im) + '.jpg', frame)
index_im += 1
if cv2.waitKey(1) & 255 == ord('q'):
break
camera.release()
out.release()
cv2.destroyAllWindows()
def main():
image_name = 'hand_in_BG5.png'
img = cv2.imread(image_name)
bin_image = simple_preprocessing(img)
cv2.imshow('orig', img)
cv2.imshow('bin', bin_image)
cv2.waitKey(0)
cnt, hull, hull_idx = find_contour_hull(bin_image)
drawing = draws_contour_hull(img, cnt, hull)
convDefs = cv2.convexityDefects(cnt, hull_idx)
dist_order = np.argsort(-1 * convDefs[:, 0, 3])
max4dist = dist_order[0:4]
max4points = convDefs[max4dist, 0, 2]
for i in max4points:
cv2.circle(drawing, tuple(cnt[i, 0]), 5, [255, 255, 0], 2)
hull_nbPts = hull.shape[0]
"""
#draws all the points constitue the convex hull (for debugging)
for i in range(hull_nbPts):
cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2)
cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA)
"""
moments = cv2.moments(cnt)
if moments['m00'] != 0:
cx = int(moments['m10'] / moments['m00'])
cy = int(moments['m01'] / moments['m00'])
centr = cx, cy
cv2.circle(drawing, centr, 5, [0, 255, 255], 2)
wrist = find_wrist(centr, cnt, max4points)
cv2.circle(drawing, wrist, 5, [0, 255, 255], 2)
edge_thresh = 20
neighbor_thresh = 20
fixedPoint = wrist
idx_ok = simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh,
neighbor_thresh)
max_5hull_idx = idx_ok[0:1]
for i in max_5hull_idx:
cv2.circle(drawing, tuple(hull[i, 0]), 6, [0, 255, 0], 2)
cv2.imshow('contour and convex hull', drawing)
k = cv2.waitKey(0)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import cv2
import numpy as np
from math import *
def appendimages(im1,im2):
""" Return a new image that appends the two images side-by-side. """
# select the image with the fewest rows and fill in enough empty rows
rows1 = im1.shape[0]
rows2 = im2.shape[0]
if rows1 < rows2:
im1 = np.concatenate((im1,zeros((rows2-rows1,im1.shape[1]))),axis=0)
elif rows1 > rows2:
im2 = np.concatenate((im2,zeros((rows1-rows2,im2.shape[1]))),axis=0)
# if none of these cases they are equal, no filling needed.
return np.concatenate((im1,im2), axis=1)
def append_imgs(im1, im2, im3):
#buff = appendimages(im1,im2)
#return appendimages(buff,im3)
buff = np.concatenate((im1,im2), axis=1)
return np.concatenate((buff,im3), axis=1)
#check whether the point is near edge or not
def point_not_at_edge( x, y, img_height, img_width, threshold):
no_at_edge = ( (x > threshold) and (y > threshold) and ( fabs(x - img_width) > threshold ) and ( fabs(y - img_height) > threshold ) )
return no_at_edge
#check whether two points are too near from each other
def points_not_similar(x, y, x_neighb, y_neighb, threshold):
no_same_point = (fabs(x - x_neighb) + fabs(y - y_neighb) > 2*threshold)
return no_same_point
def good_points(x, y, x_next, y_next, img_height, img_width, threshold):
no_same_point = (fabs(x - x_next) + fabs(y - y_next) > 2*threshold)
no_at_edge = (x > threshold) and (y > threshold) and ( fabs(x - img_width) > threshold ) and ( fabs(y - img_height) > threshold )
return (no_same_point and no_at_edge)
'''
calculate the point on wrist of the hand
by taking the average of opposites of convexity defects to the center
'''
def find_wrist(center, contour, set_idx_convDefs):
n = len(set_idx_convDefs)
opposites = np.zeros((2,n))
for i in range(n):
opposites[0,i] = 2*center[0] - contour[set_idx_convDefs[i], 0, 0] #calcul x
opposites[1,i] = 2*center[1] - contour[set_idx_convDefs[i], 0, 1] #calcul y
total = np.sum(opposites, axis = 1)
#print total
x = int(total[0]/n)
y = int(total[1]/n)
wrist = (x, y)
#print 'wrist = ', wrist
return wrist
'''
simple methods to detect finger tips
by calculating the farthest points on convex hull
compared to a fixed point. This fixed point can be center or wrist
'''
def simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh, neighbor_thresh):
dist_from_fixedPoint = []
img_height, img_width = img.shape[0:2]
hull_nbPts = hull.shape[0]
#calculate distance to fixed Point
for i in range(hull_nbPts):
dist_from_fixedPoint.append(cv2.norm(fixedPoint - hull[i], cv2.NORM_L2))
#sort index from farthest to nearest
max_indx = np.argsort(-1*np.array(dist_from_fixedPoint))
#need to eliminate same points and points at edge
#results stored in idx_ok, the list of candidate indices of hulls
idx_ok = []
for i in range(hull_nbPts):
idx = max_indx[i]
if point_not_at_edge(hull[idx,0,0], hull[idx,0,1], img_height, img_width, edge_thresh):
if(len(idx_ok) == 0):
idx_ok.append(idx)
else:
not_similar = True
for idx_neighbor in idx_ok:
not_similar = (points_not_similar(hull[idx,0,0], hull[idx,0,1], hull[idx_neighbor,0,0], hull[idx_neighbor,0,1],neighbor_thresh))
if not not_similar: #if similar break the loop
break
if(not_similar):
idx_ok.append(idx)
return idx_ok
def simple_preprocessing(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5), 0)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10,10))
blur = cv2.erode(blur, kernel, iterations = 2)
blur = cv2.dilate(blur, kernel, iterations = 2)
ret, bin_image = cv2.threshold(blur, 50, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return bin_image
def simple_preprocessing2(img, backGround):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(backGround, cv2.COLOR_BGR2GRAY)
gray = gray-gray2
blur = cv2.GaussianBlur(gray, (5,5), 0)
#kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10,10))
#blur = cv2.erode(blur, kernel, iterations = 2)
#blur = cv2.dilate(blur, kernel, iterations = 2)
ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return bin_image
def hsv_preprocessing(img):
#define boundaries of HSV pixel intensities to be considered as 'skin'
#H: 2-39 / 360 * 255 = 1-28
#S: 0.15 - 0.9 / 1 * 255 = 38- 250
#V: 0.2 - 0.95 / 1 * 255 =
lower = np.array([1, 38, 51])
upper = np.array([28, 250, 242])
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#hsv = cv2.GaussianBlur(hsv, (5,5), 0)
skinMask = cv2.inRange(hsv, lower, upper)
#choosing a structure elements to apply noise-remove process
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10,10))
skinMask = cv2.erode(skinMask, kernel, iterations = 2)
skinMask = cv2.dilate(skinMask, kernel, iterations = 2)
blur = cv2.GaussianBlur(skinMask, (5,5), 0)
ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return bin_image
def find_contour_hull(binary_image):
#find the contour
contours, hierarchy = cv2.findContours(binary_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#search the maximum contour in the hierachy tree of contours
max_area = 0
ci = 0
for i in range(len(contours)):
cnt = contours[i]
area = cv2.contourArea(cnt)
if(area > max_area):
max_area = area
ci = i
cnt = contours[ci]
hull = cv2.convexHull(cnt)
hull_idx = cv2.convexHull(cnt, returnPoints = False)
return cnt, hull, hull_idx
def draws_contour_hull(img, cnt, hull):
#draws the image with only the contour and its convex hull
drawing = np.zeros(img.shape, np.uint8)
cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 3)
cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)
return drawing
def eliminate_background(img, backGround, thres_diff):
height, width, depth = img.shape
for i in range(height):
for j in range(width):
erase = True
for k in range(depth):
if(fabs(img[i,j,k] - backGround[i,j,k]) > thres_diff):
erase = False
if erase:
img[i,j,:] = 0
return img
'''
Tracking by camera
NOTE: hsv is very color and light sensitive and simple_preprocessing seems stabler
'''
'''
firstSec = 0
camera = cv2.VideoCapture(0)
for i in range(12):
camera.read()
grabbed, backGround = camera.read()
for i in range(12):
grabbed, img = camera.read()
backGround = backGround/2 + img/2
'''
def tracking():
camera = cv2.VideoCapture(0)
_,img = camera.read()
h,w,d = img.shape
#out = cv2.VideoWriter('video.avi',-1,1,(3*w,h))
fourcc = cv2.cv.CV_FOURCC('F', 'M', 'P', '4')
out = cv2.VideoWriter()
success = out.open('output.avi',fourcc, 15, (3*w,h), True)
waitTime = 100
for i in range(waitTime):
_, average = camera.read()
#average = np.float32(average)
index_im = 0
while True:
grabbed, img = camera.read()
#alpha = 0.01 #factor of forgetting
#cv2.accumulateWeighted(img, average, alpha)#img is src, average is dst
img_diff = cv2.absdiff(img, average)#convert scale and do subtract these 2 images
#cv2.imshow('img_diff', img_diff)
#substract background
#img = eliminate_background(img, backGround, 20)
#bin_image = simple_preprocessing(img, backGround)
bin_image = simple_preprocessing(img_diff)
bin_image2 = bin_image.copy()
cv2.imshow('binaire', bin_image2)
# bin_image = hsv_preprocessing(img)
# cv2.imshow('orig', img)
# cv2.imshow('bin', bin_image)
# cv2.waitKey(0)
cnt, hull, hull_idx = find_contour_hull(bin_image)
drawing = draws_contour_hull(img, cnt, hull)
#search the points between each finger by using convexity defects
#see the doc of opencv to understand implementation details
convDefs = cv2.convexityDefects(cnt, hull_idx)
dist_order = np.argsort((-1)*convDefs[:,0,3])
max4dist = dist_order[0:4]
max4points = convDefs[max4dist,0,2]
for i in max4points:
cv2.circle(drawing, tuple(cnt[i,0]), 5, [255,255,0], 2)
hull_nbPts = hull.shape[0]
'''
#draws all the points constitue the convex hull (for debugging)
for i in range(hull_nbPts):
cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2)
cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA)
'''
#find and draw center of contour
moments = cv2.moments(cnt)
if moments['m00']!=0:
cx = int(moments['m10']/moments['m00']) # cx = M10/M00
cy = int(moments['m01']/moments['m00']) # cy = M01/M00
centr=(cx,cy)
cv2.circle(drawing, centr, 5, [0, 255, 255], 2)
#find and draw point represents the wrist of the hand
wrist = find_wrist(centr, cnt, max4points)
cv2.circle(drawing, wrist, 5, [0, 255, 255], 2)
edge_thresh = 20
neighbor_thresh = 20
fixedPoint = wrist
idx_ok = simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh, neighbor_thresh)
#print 'list of idx_ok = ', idx_ok
max_5hull_idx = idx_ok[0:5]
#print 'first five of idx_ok = ', max_5hull_idx
for i in max_5hull_idx:
cv2.circle(drawing, tuple(hull[i,0]), 6, [0,255,0], 2)
#print hull[i]
#print dist_from_center
#cv2.imshow('contour and convex hull', drawing)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
drawing = cv2.cvtColor(drawing, cv2.COLOR_BGR2GRAY)
'''
print img.shape
print bin_image2.shape
print drawing.shape
'''
frame = append_imgs(img, bin_image2, drawing)
#cv2.imshow('frame', frame)
#out.write(frame)
cv2.imwrite("store2/" + "img"+str(index_im) + ".jpg", frame)
index_im += 1
if cv2.waitKey(1) & 0xFF == ord("q"):
break
camera.release()
out.release()
#self.out = None
cv2.destroyAllWindows()
def main():
image_name = "hand_in_BG5.png"
img = cv2.imread(image_name)
bin_image = simple_preprocessing(img)
#bin_image = hsv_preprocessing(img)
cv2.imshow('orig', img)
cv2.imshow('bin', bin_image)
cv2.waitKey(0)
cnt, hull, hull_idx = find_contour_hull(bin_image)
drawing = draws_contour_hull(img, cnt, hull)
#search the points between each finger by using convexity defects
#see the doc of opencv to understand implementation details
convDefs = cv2.convexityDefects(cnt, hull_idx)
dist_order = np.argsort((-1)*convDefs[:,0,3])
max4dist = dist_order[0:4]
max4points = convDefs[max4dist,0,2]
for i in max4points:
cv2.circle(drawing, tuple(cnt[i,0]), 5, [255,255,0], 2)
hull_nbPts = hull.shape[0]
'''
#draws all the points constitue the convex hull (for debugging)
for i in range(hull_nbPts):
cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2)
cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA)
'''
#find and draw center of contour
moments = cv2.moments(cnt)
if moments['m00']!=0:
cx = int(moments['m10']/moments['m00']) # cx = M10/M00
cy = int(moments['m01']/moments['m00']) # cy = M01/M00
centr=(cx,cy)
cv2.circle(drawing, centr, 5, [0, 255, 255], 2)
#find and draw point represents the wrist of the hand
wrist = find_wrist(centr, cnt, max4points)
cv2.circle(drawing, wrist, 5, [0, 255, 255], 2)
edge_thresh = 20
neighbor_thresh = 20
fixedPoint = wrist
idx_ok = simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh, neighbor_thresh)
#print 'list of idx_ok = ', idx_ok
max_5hull_idx = idx_ok[0:1]
#print 'first five of idx_ok = ', max_5hull_idx
for i in max_5hull_idx:
cv2.circle(drawing, tuple(hull[i,0]), 6, [0,255,0], 2)
#print hull[i]
#print dist_from_center
cv2.imshow('contour and convex hull', drawing)
k = cv2.waitKey(0)
if __name__ == "__main__":
# main()
tracking()
|
flexible
|
{
"blob_id": "c3e313805c6f91f9aac77922edfd09650143f905",
"index": 4862,
"step-1": "<mask token>\n\n\ndef appendimages(im1, im2):\n \"\"\" Return a new image that appends the two images side-by-side. \"\"\"\n rows1 = im1.shape[0]\n rows2 = im2.shape[0]\n if rows1 < rows2:\n im1 = np.concatenate((im1, zeros((rows2 - rows1, im1.shape[1]))),\n axis=0)\n elif rows1 > rows2:\n im2 = np.concatenate((im2, zeros((rows1 - rows2, im2.shape[1]))),\n axis=0)\n return np.concatenate((im1, im2), axis=1)\n\n\ndef append_imgs(im1, im2, im3):\n buff = np.concatenate((im1, im2), axis=1)\n return np.concatenate((buff, im3), axis=1)\n\n\n<mask token>\n\n\ndef good_points(x, y, x_next, y_next, img_height, img_width, threshold):\n no_same_point = fabs(x - x_next) + fabs(y - y_next) > 2 * threshold\n no_at_edge = x > threshold and y > threshold and fabs(x - img_width\n ) > threshold and fabs(y - img_height) > threshold\n return no_same_point and no_at_edge\n\n\n<mask token>\n\n\ndef find_wrist(center, contour, set_idx_convDefs):\n n = len(set_idx_convDefs)\n opposites = np.zeros((2, n))\n for i in range(n):\n opposites[0, i] = 2 * center[0] - contour[set_idx_convDefs[i], 0, 0]\n opposites[1, i] = 2 * center[1] - contour[set_idx_convDefs[i], 0, 1]\n total = np.sum(opposites, axis=1)\n x = int(total[0] / n)\n y = int(total[1] / n)\n wrist = x, y\n return wrist\n\n\n<mask token>\n\n\ndef simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh,\n neighbor_thresh):\n dist_from_fixedPoint = []\n img_height, img_width = img.shape[0:2]\n hull_nbPts = hull.shape[0]\n for i in range(hull_nbPts):\n dist_from_fixedPoint.append(cv2.norm(fixedPoint - hull[i], cv2.NORM_L2)\n )\n max_indx = np.argsort(-1 * np.array(dist_from_fixedPoint))\n idx_ok = []\n for i in range(hull_nbPts):\n idx = max_indx[i]\n if point_not_at_edge(hull[idx, 0, 0], hull[idx, 0, 1], img_height,\n img_width, edge_thresh):\n if len(idx_ok) == 0:\n idx_ok.append(idx)\n else:\n not_similar = True\n for idx_neighbor in idx_ok:\n not_similar = points_not_similar(hull[idx, 0, 0], hull[\n idx, 0, 1], hull[idx_neighbor, 0, 0], hull[\n idx_neighbor, 0, 1], neighbor_thresh)\n if not not_similar:\n break\n if not_similar:\n idx_ok.append(idx)\n return idx_ok\n\n\n<mask token>\n\n\ndef hsv_preprocessing(img):\n lower = np.array([1, 38, 51])\n upper = np.array([28, 250, 242])\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n skinMask = cv2.inRange(hsv, lower, upper)\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))\n skinMask = cv2.erode(skinMask, kernel, iterations=2)\n skinMask = cv2.dilate(skinMask, kernel, iterations=2)\n blur = cv2.GaussianBlur(skinMask, (5, 5), 0)\n ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.\n THRESH_OTSU)\n return bin_image\n\n\ndef find_contour_hull(binary_image):\n contours, hierarchy = cv2.findContours(binary_image, cv2.RETR_TREE, cv2\n .CHAIN_APPROX_SIMPLE)\n max_area = 0\n ci = 0\n for i in range(len(contours)):\n cnt = contours[i]\n area = cv2.contourArea(cnt)\n if area > max_area:\n max_area = area\n ci = i\n cnt = contours[ci]\n hull = cv2.convexHull(cnt)\n hull_idx = cv2.convexHull(cnt, returnPoints=False)\n return cnt, hull, hull_idx\n\n\ndef draws_contour_hull(img, cnt, hull):\n drawing = np.zeros(img.shape, np.uint8)\n cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 3)\n cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)\n return drawing\n\n\ndef eliminate_background(img, backGround, thres_diff):\n height, width, depth = img.shape\n for i in range(height):\n for j in range(width):\n erase = True\n for k in range(depth):\n if fabs(img[i, j, k] - backGround[i, j, k]) > thres_diff:\n erase = False\n if erase:\n img[i, j, :] = 0\n return img\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef appendimages(im1, im2):\n \"\"\" Return a new image that appends the two images side-by-side. \"\"\"\n rows1 = im1.shape[0]\n rows2 = im2.shape[0]\n if rows1 < rows2:\n im1 = np.concatenate((im1, zeros((rows2 - rows1, im1.shape[1]))),\n axis=0)\n elif rows1 > rows2:\n im2 = np.concatenate((im2, zeros((rows1 - rows2, im2.shape[1]))),\n axis=0)\n return np.concatenate((im1, im2), axis=1)\n\n\ndef append_imgs(im1, im2, im3):\n buff = np.concatenate((im1, im2), axis=1)\n return np.concatenate((buff, im3), axis=1)\n\n\n<mask token>\n\n\ndef points_not_similar(x, y, x_neighb, y_neighb, threshold):\n no_same_point = fabs(x - x_neighb) + fabs(y - y_neighb) > 2 * threshold\n return no_same_point\n\n\ndef good_points(x, y, x_next, y_next, img_height, img_width, threshold):\n no_same_point = fabs(x - x_next) + fabs(y - y_next) > 2 * threshold\n no_at_edge = x > threshold and y > threshold and fabs(x - img_width\n ) > threshold and fabs(y - img_height) > threshold\n return no_same_point and no_at_edge\n\n\n<mask token>\n\n\ndef find_wrist(center, contour, set_idx_convDefs):\n n = len(set_idx_convDefs)\n opposites = np.zeros((2, n))\n for i in range(n):\n opposites[0, i] = 2 * center[0] - contour[set_idx_convDefs[i], 0, 0]\n opposites[1, i] = 2 * center[1] - contour[set_idx_convDefs[i], 0, 1]\n total = np.sum(opposites, axis=1)\n x = int(total[0] / n)\n y = int(total[1] / n)\n wrist = x, y\n return wrist\n\n\n<mask token>\n\n\ndef simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh,\n neighbor_thresh):\n dist_from_fixedPoint = []\n img_height, img_width = img.shape[0:2]\n hull_nbPts = hull.shape[0]\n for i in range(hull_nbPts):\n dist_from_fixedPoint.append(cv2.norm(fixedPoint - hull[i], cv2.NORM_L2)\n )\n max_indx = np.argsort(-1 * np.array(dist_from_fixedPoint))\n idx_ok = []\n for i in range(hull_nbPts):\n idx = max_indx[i]\n if point_not_at_edge(hull[idx, 0, 0], hull[idx, 0, 1], img_height,\n img_width, edge_thresh):\n if len(idx_ok) == 0:\n idx_ok.append(idx)\n else:\n not_similar = True\n for idx_neighbor in idx_ok:\n not_similar = points_not_similar(hull[idx, 0, 0], hull[\n idx, 0, 1], hull[idx_neighbor, 0, 0], hull[\n idx_neighbor, 0, 1], neighbor_thresh)\n if not not_similar:\n break\n if not_similar:\n idx_ok.append(idx)\n return idx_ok\n\n\n<mask token>\n\n\ndef hsv_preprocessing(img):\n lower = np.array([1, 38, 51])\n upper = np.array([28, 250, 242])\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n skinMask = cv2.inRange(hsv, lower, upper)\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))\n skinMask = cv2.erode(skinMask, kernel, iterations=2)\n skinMask = cv2.dilate(skinMask, kernel, iterations=2)\n blur = cv2.GaussianBlur(skinMask, (5, 5), 0)\n ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.\n THRESH_OTSU)\n return bin_image\n\n\ndef find_contour_hull(binary_image):\n contours, hierarchy = cv2.findContours(binary_image, cv2.RETR_TREE, cv2\n .CHAIN_APPROX_SIMPLE)\n max_area = 0\n ci = 0\n for i in range(len(contours)):\n cnt = contours[i]\n area = cv2.contourArea(cnt)\n if area > max_area:\n max_area = area\n ci = i\n cnt = contours[ci]\n hull = cv2.convexHull(cnt)\n hull_idx = cv2.convexHull(cnt, returnPoints=False)\n return cnt, hull, hull_idx\n\n\ndef draws_contour_hull(img, cnt, hull):\n drawing = np.zeros(img.shape, np.uint8)\n cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 3)\n cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)\n return drawing\n\n\ndef eliminate_background(img, backGround, thres_diff):\n height, width, depth = img.shape\n for i in range(height):\n for j in range(width):\n erase = True\n for k in range(depth):\n if fabs(img[i, j, k] - backGround[i, j, k]) > thres_diff:\n erase = False\n if erase:\n img[i, j, :] = 0\n return img\n\n\n<mask token>\n\n\ndef main():\n image_name = 'hand_in_BG5.png'\n img = cv2.imread(image_name)\n bin_image = simple_preprocessing(img)\n cv2.imshow('orig', img)\n cv2.imshow('bin', bin_image)\n cv2.waitKey(0)\n cnt, hull, hull_idx = find_contour_hull(bin_image)\n drawing = draws_contour_hull(img, cnt, hull)\n convDefs = cv2.convexityDefects(cnt, hull_idx)\n dist_order = np.argsort(-1 * convDefs[:, 0, 3])\n max4dist = dist_order[0:4]\n max4points = convDefs[max4dist, 0, 2]\n for i in max4points:\n cv2.circle(drawing, tuple(cnt[i, 0]), 5, [255, 255, 0], 2)\n hull_nbPts = hull.shape[0]\n \"\"\"\n #draws all the points constitue the convex hull (for debugging)\n for i in range(hull_nbPts):\n cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2) \n cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA)\n \"\"\"\n moments = cv2.moments(cnt)\n if moments['m00'] != 0:\n cx = int(moments['m10'] / moments['m00'])\n cy = int(moments['m01'] / moments['m00'])\n centr = cx, cy\n cv2.circle(drawing, centr, 5, [0, 255, 255], 2)\n wrist = find_wrist(centr, cnt, max4points)\n cv2.circle(drawing, wrist, 5, [0, 255, 255], 2)\n edge_thresh = 20\n neighbor_thresh = 20\n fixedPoint = wrist\n idx_ok = simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh,\n neighbor_thresh)\n max_5hull_idx = idx_ok[0:1]\n for i in max_5hull_idx:\n cv2.circle(drawing, tuple(hull[i, 0]), 6, [0, 255, 0], 2)\n cv2.imshow('contour and convex hull', drawing)\n k = cv2.waitKey(0)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef appendimages(im1, im2):\n \"\"\" Return a new image that appends the two images side-by-side. \"\"\"\n rows1 = im1.shape[0]\n rows2 = im2.shape[0]\n if rows1 < rows2:\n im1 = np.concatenate((im1, zeros((rows2 - rows1, im1.shape[1]))),\n axis=0)\n elif rows1 > rows2:\n im2 = np.concatenate((im2, zeros((rows1 - rows2, im2.shape[1]))),\n axis=0)\n return np.concatenate((im1, im2), axis=1)\n\n\ndef append_imgs(im1, im2, im3):\n buff = np.concatenate((im1, im2), axis=1)\n return np.concatenate((buff, im3), axis=1)\n\n\ndef point_not_at_edge(x, y, img_height, img_width, threshold):\n no_at_edge = x > threshold and y > threshold and fabs(x - img_width\n ) > threshold and fabs(y - img_height) > threshold\n return no_at_edge\n\n\ndef points_not_similar(x, y, x_neighb, y_neighb, threshold):\n no_same_point = fabs(x - x_neighb) + fabs(y - y_neighb) > 2 * threshold\n return no_same_point\n\n\ndef good_points(x, y, x_next, y_next, img_height, img_width, threshold):\n no_same_point = fabs(x - x_next) + fabs(y - y_next) > 2 * threshold\n no_at_edge = x > threshold and y > threshold and fabs(x - img_width\n ) > threshold and fabs(y - img_height) > threshold\n return no_same_point and no_at_edge\n\n\n<mask token>\n\n\ndef find_wrist(center, contour, set_idx_convDefs):\n n = len(set_idx_convDefs)\n opposites = np.zeros((2, n))\n for i in range(n):\n opposites[0, i] = 2 * center[0] - contour[set_idx_convDefs[i], 0, 0]\n opposites[1, i] = 2 * center[1] - contour[set_idx_convDefs[i], 0, 1]\n total = np.sum(opposites, axis=1)\n x = int(total[0] / n)\n y = int(total[1] / n)\n wrist = x, y\n return wrist\n\n\n<mask token>\n\n\ndef simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh,\n neighbor_thresh):\n dist_from_fixedPoint = []\n img_height, img_width = img.shape[0:2]\n hull_nbPts = hull.shape[0]\n for i in range(hull_nbPts):\n dist_from_fixedPoint.append(cv2.norm(fixedPoint - hull[i], cv2.NORM_L2)\n )\n max_indx = np.argsort(-1 * np.array(dist_from_fixedPoint))\n idx_ok = []\n for i in range(hull_nbPts):\n idx = max_indx[i]\n if point_not_at_edge(hull[idx, 0, 0], hull[idx, 0, 1], img_height,\n img_width, edge_thresh):\n if len(idx_ok) == 0:\n idx_ok.append(idx)\n else:\n not_similar = True\n for idx_neighbor in idx_ok:\n not_similar = points_not_similar(hull[idx, 0, 0], hull[\n idx, 0, 1], hull[idx_neighbor, 0, 0], hull[\n idx_neighbor, 0, 1], neighbor_thresh)\n if not not_similar:\n break\n if not_similar:\n idx_ok.append(idx)\n return idx_ok\n\n\n<mask token>\n\n\ndef hsv_preprocessing(img):\n lower = np.array([1, 38, 51])\n upper = np.array([28, 250, 242])\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n skinMask = cv2.inRange(hsv, lower, upper)\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))\n skinMask = cv2.erode(skinMask, kernel, iterations=2)\n skinMask = cv2.dilate(skinMask, kernel, iterations=2)\n blur = cv2.GaussianBlur(skinMask, (5, 5), 0)\n ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.\n THRESH_OTSU)\n return bin_image\n\n\ndef find_contour_hull(binary_image):\n contours, hierarchy = cv2.findContours(binary_image, cv2.RETR_TREE, cv2\n .CHAIN_APPROX_SIMPLE)\n max_area = 0\n ci = 0\n for i in range(len(contours)):\n cnt = contours[i]\n area = cv2.contourArea(cnt)\n if area > max_area:\n max_area = area\n ci = i\n cnt = contours[ci]\n hull = cv2.convexHull(cnt)\n hull_idx = cv2.convexHull(cnt, returnPoints=False)\n return cnt, hull, hull_idx\n\n\ndef draws_contour_hull(img, cnt, hull):\n drawing = np.zeros(img.shape, np.uint8)\n cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 3)\n cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)\n return drawing\n\n\ndef eliminate_background(img, backGround, thres_diff):\n height, width, depth = img.shape\n for i in range(height):\n for j in range(width):\n erase = True\n for k in range(depth):\n if fabs(img[i, j, k] - backGround[i, j, k]) > thres_diff:\n erase = False\n if erase:\n img[i, j, :] = 0\n return img\n\n\n<mask token>\n\n\ndef main():\n image_name = 'hand_in_BG5.png'\n img = cv2.imread(image_name)\n bin_image = simple_preprocessing(img)\n cv2.imshow('orig', img)\n cv2.imshow('bin', bin_image)\n cv2.waitKey(0)\n cnt, hull, hull_idx = find_contour_hull(bin_image)\n drawing = draws_contour_hull(img, cnt, hull)\n convDefs = cv2.convexityDefects(cnt, hull_idx)\n dist_order = np.argsort(-1 * convDefs[:, 0, 3])\n max4dist = dist_order[0:4]\n max4points = convDefs[max4dist, 0, 2]\n for i in max4points:\n cv2.circle(drawing, tuple(cnt[i, 0]), 5, [255, 255, 0], 2)\n hull_nbPts = hull.shape[0]\n \"\"\"\n #draws all the points constitue the convex hull (for debugging)\n for i in range(hull_nbPts):\n cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2) \n cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA)\n \"\"\"\n moments = cv2.moments(cnt)\n if moments['m00'] != 0:\n cx = int(moments['m10'] / moments['m00'])\n cy = int(moments['m01'] / moments['m00'])\n centr = cx, cy\n cv2.circle(drawing, centr, 5, [0, 255, 255], 2)\n wrist = find_wrist(centr, cnt, max4points)\n cv2.circle(drawing, wrist, 5, [0, 255, 255], 2)\n edge_thresh = 20\n neighbor_thresh = 20\n fixedPoint = wrist\n idx_ok = simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh,\n neighbor_thresh)\n max_5hull_idx = idx_ok[0:1]\n for i in max_5hull_idx:\n cv2.circle(drawing, tuple(hull[i, 0]), 6, [0, 255, 0], 2)\n cv2.imshow('contour and convex hull', drawing)\n k = cv2.waitKey(0)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef appendimages(im1, im2):\n \"\"\" Return a new image that appends the two images side-by-side. \"\"\"\n rows1 = im1.shape[0]\n rows2 = im2.shape[0]\n if rows1 < rows2:\n im1 = np.concatenate((im1, zeros((rows2 - rows1, im1.shape[1]))),\n axis=0)\n elif rows1 > rows2:\n im2 = np.concatenate((im2, zeros((rows1 - rows2, im2.shape[1]))),\n axis=0)\n return np.concatenate((im1, im2), axis=1)\n\n\ndef append_imgs(im1, im2, im3):\n buff = np.concatenate((im1, im2), axis=1)\n return np.concatenate((buff, im3), axis=1)\n\n\ndef point_not_at_edge(x, y, img_height, img_width, threshold):\n no_at_edge = x > threshold and y > threshold and fabs(x - img_width\n ) > threshold and fabs(y - img_height) > threshold\n return no_at_edge\n\n\ndef points_not_similar(x, y, x_neighb, y_neighb, threshold):\n no_same_point = fabs(x - x_neighb) + fabs(y - y_neighb) > 2 * threshold\n return no_same_point\n\n\ndef good_points(x, y, x_next, y_next, img_height, img_width, threshold):\n no_same_point = fabs(x - x_next) + fabs(y - y_next) > 2 * threshold\n no_at_edge = x > threshold and y > threshold and fabs(x - img_width\n ) > threshold and fabs(y - img_height) > threshold\n return no_same_point and no_at_edge\n\n\n<mask token>\n\n\ndef find_wrist(center, contour, set_idx_convDefs):\n n = len(set_idx_convDefs)\n opposites = np.zeros((2, n))\n for i in range(n):\n opposites[0, i] = 2 * center[0] - contour[set_idx_convDefs[i], 0, 0]\n opposites[1, i] = 2 * center[1] - contour[set_idx_convDefs[i], 0, 1]\n total = np.sum(opposites, axis=1)\n x = int(total[0] / n)\n y = int(total[1] / n)\n wrist = x, y\n return wrist\n\n\n<mask token>\n\n\ndef simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh,\n neighbor_thresh):\n dist_from_fixedPoint = []\n img_height, img_width = img.shape[0:2]\n hull_nbPts = hull.shape[0]\n for i in range(hull_nbPts):\n dist_from_fixedPoint.append(cv2.norm(fixedPoint - hull[i], cv2.NORM_L2)\n )\n max_indx = np.argsort(-1 * np.array(dist_from_fixedPoint))\n idx_ok = []\n for i in range(hull_nbPts):\n idx = max_indx[i]\n if point_not_at_edge(hull[idx, 0, 0], hull[idx, 0, 1], img_height,\n img_width, edge_thresh):\n if len(idx_ok) == 0:\n idx_ok.append(idx)\n else:\n not_similar = True\n for idx_neighbor in idx_ok:\n not_similar = points_not_similar(hull[idx, 0, 0], hull[\n idx, 0, 1], hull[idx_neighbor, 0, 0], hull[\n idx_neighbor, 0, 1], neighbor_thresh)\n if not not_similar:\n break\n if not_similar:\n idx_ok.append(idx)\n return idx_ok\n\n\ndef simple_preprocessing(img):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(gray, (5, 5), 0)\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))\n blur = cv2.erode(blur, kernel, iterations=2)\n blur = cv2.dilate(blur, kernel, iterations=2)\n ret, bin_image = cv2.threshold(blur, 50, 255, cv2.THRESH_BINARY + cv2.\n THRESH_OTSU)\n return bin_image\n\n\ndef simple_preprocessing2(img, backGround):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n gray2 = cv2.cvtColor(backGround, cv2.COLOR_BGR2GRAY)\n gray = gray - gray2\n blur = cv2.GaussianBlur(gray, (5, 5), 0)\n ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.\n THRESH_OTSU)\n return bin_image\n\n\ndef hsv_preprocessing(img):\n lower = np.array([1, 38, 51])\n upper = np.array([28, 250, 242])\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n skinMask = cv2.inRange(hsv, lower, upper)\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))\n skinMask = cv2.erode(skinMask, kernel, iterations=2)\n skinMask = cv2.dilate(skinMask, kernel, iterations=2)\n blur = cv2.GaussianBlur(skinMask, (5, 5), 0)\n ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.\n THRESH_OTSU)\n return bin_image\n\n\ndef find_contour_hull(binary_image):\n contours, hierarchy = cv2.findContours(binary_image, cv2.RETR_TREE, cv2\n .CHAIN_APPROX_SIMPLE)\n max_area = 0\n ci = 0\n for i in range(len(contours)):\n cnt = contours[i]\n area = cv2.contourArea(cnt)\n if area > max_area:\n max_area = area\n ci = i\n cnt = contours[ci]\n hull = cv2.convexHull(cnt)\n hull_idx = cv2.convexHull(cnt, returnPoints=False)\n return cnt, hull, hull_idx\n\n\ndef draws_contour_hull(img, cnt, hull):\n drawing = np.zeros(img.shape, np.uint8)\n cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 3)\n cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)\n return drawing\n\n\ndef eliminate_background(img, backGround, thres_diff):\n height, width, depth = img.shape\n for i in range(height):\n for j in range(width):\n erase = True\n for k in range(depth):\n if fabs(img[i, j, k] - backGround[i, j, k]) > thres_diff:\n erase = False\n if erase:\n img[i, j, :] = 0\n return img\n\n\n<mask token>\n\n\ndef tracking():\n camera = cv2.VideoCapture(0)\n _, img = camera.read()\n h, w, d = img.shape\n fourcc = cv2.cv.CV_FOURCC('F', 'M', 'P', '4')\n out = cv2.VideoWriter()\n success = out.open('output.avi', fourcc, 15, (3 * w, h), True)\n waitTime = 100\n for i in range(waitTime):\n _, average = camera.read()\n index_im = 0\n while True:\n grabbed, img = camera.read()\n img_diff = cv2.absdiff(img, average)\n bin_image = simple_preprocessing(img_diff)\n bin_image2 = bin_image.copy()\n cv2.imshow('binaire', bin_image2)\n cnt, hull, hull_idx = find_contour_hull(bin_image)\n drawing = draws_contour_hull(img, cnt, hull)\n convDefs = cv2.convexityDefects(cnt, hull_idx)\n dist_order = np.argsort(-1 * convDefs[:, 0, 3])\n max4dist = dist_order[0:4]\n max4points = convDefs[max4dist, 0, 2]\n for i in max4points:\n cv2.circle(drawing, tuple(cnt[i, 0]), 5, [255, 255, 0], 2)\n hull_nbPts = hull.shape[0]\n \"\"\"\n #draws all the points constitue the convex hull (for debugging)\n for i in range(hull_nbPts):\n cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2) \n cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA)\n \"\"\"\n moments = cv2.moments(cnt)\n if moments['m00'] != 0:\n cx = int(moments['m10'] / moments['m00'])\n cy = int(moments['m01'] / moments['m00'])\n centr = cx, cy\n cv2.circle(drawing, centr, 5, [0, 255, 255], 2)\n wrist = find_wrist(centr, cnt, max4points)\n cv2.circle(drawing, wrist, 5, [0, 255, 255], 2)\n edge_thresh = 20\n neighbor_thresh = 20\n fixedPoint = wrist\n idx_ok = simple_detect_fingerTips(hull, img, fixedPoint,\n edge_thresh, neighbor_thresh)\n max_5hull_idx = idx_ok[0:5]\n for i in max_5hull_idx:\n cv2.circle(drawing, tuple(hull[i, 0]), 6, [0, 255, 0], 2)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n drawing = cv2.cvtColor(drawing, cv2.COLOR_BGR2GRAY)\n \"\"\"\n print img.shape\n print bin_image2.shape\n print drawing.shape\n \"\"\"\n frame = append_imgs(img, bin_image2, drawing)\n cv2.imwrite('store2/' + 'img' + str(index_im) + '.jpg', frame)\n index_im += 1\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n camera.release()\n out.release()\n cv2.destroyAllWindows()\n\n\ndef main():\n image_name = 'hand_in_BG5.png'\n img = cv2.imread(image_name)\n bin_image = simple_preprocessing(img)\n cv2.imshow('orig', img)\n cv2.imshow('bin', bin_image)\n cv2.waitKey(0)\n cnt, hull, hull_idx = find_contour_hull(bin_image)\n drawing = draws_contour_hull(img, cnt, hull)\n convDefs = cv2.convexityDefects(cnt, hull_idx)\n dist_order = np.argsort(-1 * convDefs[:, 0, 3])\n max4dist = dist_order[0:4]\n max4points = convDefs[max4dist, 0, 2]\n for i in max4points:\n cv2.circle(drawing, tuple(cnt[i, 0]), 5, [255, 255, 0], 2)\n hull_nbPts = hull.shape[0]\n \"\"\"\n #draws all the points constitue the convex hull (for debugging)\n for i in range(hull_nbPts):\n cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2) \n cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA)\n \"\"\"\n moments = cv2.moments(cnt)\n if moments['m00'] != 0:\n cx = int(moments['m10'] / moments['m00'])\n cy = int(moments['m01'] / moments['m00'])\n centr = cx, cy\n cv2.circle(drawing, centr, 5, [0, 255, 255], 2)\n wrist = find_wrist(centr, cnt, max4points)\n cv2.circle(drawing, wrist, 5, [0, 255, 255], 2)\n edge_thresh = 20\n neighbor_thresh = 20\n fixedPoint = wrist\n idx_ok = simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh,\n neighbor_thresh)\n max_5hull_idx = idx_ok[0:1]\n for i in max_5hull_idx:\n cv2.circle(drawing, tuple(hull[i, 0]), 6, [0, 255, 0], 2)\n cv2.imshow('contour and convex hull', drawing)\n k = cv2.waitKey(0)\n\n\n<mask token>\n",
"step-5": "import cv2\nimport numpy as np\nfrom math import *\n\n\ndef appendimages(im1,im2):\n \"\"\" Return a new image that appends the two images side-by-side. \"\"\"\n # select the image with the fewest rows and fill in enough empty rows\n rows1 = im1.shape[0]\n rows2 = im2.shape[0]\n if rows1 < rows2:\n im1 = np.concatenate((im1,zeros((rows2-rows1,im1.shape[1]))),axis=0)\n elif rows1 > rows2:\n im2 = np.concatenate((im2,zeros((rows1-rows2,im2.shape[1]))),axis=0)\n # if none of these cases they are equal, no filling needed.\n return np.concatenate((im1,im2), axis=1)\n\ndef append_imgs(im1, im2, im3):\n #buff = appendimages(im1,im2)\n #return appendimages(buff,im3)\n\n buff = np.concatenate((im1,im2), axis=1)\n return np.concatenate((buff,im3), axis=1)\n\n\n#check whether the point is near edge or not\ndef point_not_at_edge( x, y, img_height, img_width, threshold):\n no_at_edge = ( (x > threshold) and (y > threshold) and ( fabs(x - img_width) > threshold ) and ( fabs(y - img_height) > threshold ) )\n return no_at_edge\n \n#check whether two points are too near from each other \ndef points_not_similar(x, y, x_neighb, y_neighb, threshold):\n no_same_point = (fabs(x - x_neighb) + fabs(y - y_neighb) > 2*threshold)\n return no_same_point \n\n\ndef good_points(x, y, x_next, y_next, img_height, img_width, threshold):\n no_same_point = (fabs(x - x_next) + fabs(y - y_next) > 2*threshold)\n no_at_edge = (x > threshold) and (y > threshold) and ( fabs(x - img_width) > threshold ) and ( fabs(y - img_height) > threshold ) \n return (no_same_point and no_at_edge)\n\n'''\ncalculate the point on wrist of the hand\nby taking the average of opposites of convexity defects to the center\n''' \ndef find_wrist(center, contour, set_idx_convDefs):\n n = len(set_idx_convDefs)\n opposites = np.zeros((2,n))\n for i in range(n):\n opposites[0,i] = 2*center[0] - contour[set_idx_convDefs[i], 0, 0] #calcul x\n opposites[1,i] = 2*center[1] - contour[set_idx_convDefs[i], 0, 1] #calcul y\n \n total = np.sum(opposites, axis = 1)\n #print total\n x = int(total[0]/n)\n y = int(total[1]/n)\n wrist = (x, y)\n #print 'wrist = ', wrist\n return wrist\n\n'''\nsimple methods to detect finger tips\nby calculating the farthest points on convex hull\ncompared to a fixed point. This fixed point can be center or wrist\n'''\ndef simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh, neighbor_thresh):\n dist_from_fixedPoint = []\n img_height, img_width = img.shape[0:2]\n hull_nbPts = hull.shape[0]\n\n #calculate distance to fixed Point\n for i in range(hull_nbPts):\n dist_from_fixedPoint.append(cv2.norm(fixedPoint - hull[i], cv2.NORM_L2))\n \n #sort index from farthest to nearest\n max_indx = np.argsort(-1*np.array(dist_from_fixedPoint))\n\n #need to eliminate same points and points at edge\n #results stored in idx_ok, the list of candidate indices of hulls \n idx_ok = []\n\n for i in range(hull_nbPts):\n idx = max_indx[i]\n if point_not_at_edge(hull[idx,0,0], hull[idx,0,1], img_height, img_width, edge_thresh):\n if(len(idx_ok) == 0):\n idx_ok.append(idx)\n else:\n not_similar = True\n for idx_neighbor in idx_ok:\n not_similar = (points_not_similar(hull[idx,0,0], hull[idx,0,1], hull[idx_neighbor,0,0], hull[idx_neighbor,0,1],neighbor_thresh))\n if not not_similar: #if similar break the loop \n break\n \n if(not_similar):\n idx_ok.append(idx) \n return idx_ok\n\ndef simple_preprocessing(img):\n \n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(gray, (5,5), 0)\n \n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10,10))\n blur = cv2.erode(blur, kernel, iterations = 2)\n blur = cv2.dilate(blur, kernel, iterations = 2)\n\n ret, bin_image = cv2.threshold(blur, 50, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n return bin_image\n\n\ndef simple_preprocessing2(img, backGround):\n \n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n gray2 = cv2.cvtColor(backGround, cv2.COLOR_BGR2GRAY)\n\n gray = gray-gray2\n \n blur = cv2.GaussianBlur(gray, (5,5), 0)\n \n #kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10,10))\n #blur = cv2.erode(blur, kernel, iterations = 2)\n #blur = cv2.dilate(blur, kernel, iterations = 2)\n\n ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n return bin_image\n\n\n\ndef hsv_preprocessing(img):\n \n #define boundaries of HSV pixel intensities to be considered as 'skin'\n #H: 2-39 / 360 * 255 = 1-28\n #S: 0.15 - 0.9 / 1 * 255 = 38- 250\n #V: 0.2 - 0.95 / 1 * 255 = \n lower = np.array([1, 38, 51])\n upper = np.array([28, 250, 242])\n\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n #hsv = cv2.GaussianBlur(hsv, (5,5), 0)\n skinMask = cv2.inRange(hsv, lower, upper)\n\n #choosing a structure elements to apply noise-remove process\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10,10))\n skinMask = cv2.erode(skinMask, kernel, iterations = 2)\n skinMask = cv2.dilate(skinMask, kernel, iterations = 2)\n\n blur = cv2.GaussianBlur(skinMask, (5,5), 0)\n\n ret, bin_image = cv2.threshold(blur, 70, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n return bin_image\n\n\ndef find_contour_hull(binary_image):\n #find the contour\n contours, hierarchy = cv2.findContours(binary_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n #search the maximum contour in the hierachy tree of contours\n max_area = 0\n ci = 0\n for i in range(len(contours)):\n cnt = contours[i]\n area = cv2.contourArea(cnt)\n if(area > max_area):\n max_area = area\n ci = i\n\n cnt = contours[ci]\n hull = cv2.convexHull(cnt)\n hull_idx = cv2.convexHull(cnt, returnPoints = False)\n\n return cnt, hull, hull_idx\n\ndef draws_contour_hull(img, cnt, hull):\n #draws the image with only the contour and its convex hull\n drawing = np.zeros(img.shape, np.uint8)\n cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 3) \n cv2.drawContours(drawing, [hull], 0, (0, 0, 255), 3)\n return drawing\n\n\ndef eliminate_background(img, backGround, thres_diff):\n height, width, depth = img.shape\n for i in range(height):\n for j in range(width):\n erase = True\n for k in range(depth):\n if(fabs(img[i,j,k] - backGround[i,j,k]) > thres_diff):\n erase = False\n if erase:\n img[i,j,:] = 0\n return img\n'''\nTracking by camera\nNOTE: hsv is very color and light sensitive and simple_preprocessing seems stabler\n'''\n\n \n'''\n firstSec = 0\n camera = cv2.VideoCapture(0)\n for i in range(12):\n camera.read()\n \n grabbed, backGround = camera.read()\n for i in range(12):\n grabbed, img = camera.read()\n backGround = backGround/2 + img/2\n'''\n\n\ndef tracking():\n camera = cv2.VideoCapture(0)\n _,img = camera.read()\n \n h,w,d = img.shape\n\n #out = cv2.VideoWriter('video.avi',-1,1,(3*w,h))\n \n fourcc = cv2.cv.CV_FOURCC('F', 'M', 'P', '4')\n out = cv2.VideoWriter()\n success = out.open('output.avi',fourcc, 15, (3*w,h), True)\n \n\n waitTime = 100\n for i in range(waitTime):\n _, average = camera.read()\n\n #average = np.float32(average)\n index_im = 0\n while True:\n\n grabbed, img = camera.read()\n #alpha = 0.01 #factor of forgetting\n #cv2.accumulateWeighted(img, average, alpha)#img is src, average is dst\n\n img_diff = cv2.absdiff(img, average)#convert scale and do subtract these 2 images\n #cv2.imshow('img_diff', img_diff)\n \n #substract background\n #img = eliminate_background(img, backGround, 20)\n\n #bin_image = simple_preprocessing(img, backGround)\n bin_image = simple_preprocessing(img_diff)\n bin_image2 = bin_image.copy()\n cv2.imshow('binaire', bin_image2)\n # bin_image = hsv_preprocessing(img)\n\n # cv2.imshow('orig', img)\n # cv2.imshow('bin', bin_image)\n # cv2.waitKey(0)\n cnt, hull, hull_idx = find_contour_hull(bin_image)\n drawing = draws_contour_hull(img, cnt, hull)\n\n\n #search the points between each finger by using convexity defects\n #see the doc of opencv to understand implementation details\n\n convDefs = cv2.convexityDefects(cnt, hull_idx)\n dist_order = np.argsort((-1)*convDefs[:,0,3])\n max4dist = dist_order[0:4]\n max4points = convDefs[max4dist,0,2]\n\n for i in max4points:\n cv2.circle(drawing, tuple(cnt[i,0]), 5, [255,255,0], 2) \n \n hull_nbPts = hull.shape[0]\n\n '''\n #draws all the points constitue the convex hull (for debugging)\n for i in range(hull_nbPts):\n cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2) \n cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA)\n '''\n\n #find and draw center of contour\n moments = cv2.moments(cnt)\n if moments['m00']!=0:\n cx = int(moments['m10']/moments['m00']) # cx = M10/M00\n cy = int(moments['m01']/moments['m00']) # cy = M01/M00\n \n centr=(cx,cy) \n cv2.circle(drawing, centr, 5, [0, 255, 255], 2) \n\n #find and draw point represents the wrist of the hand\n wrist = find_wrist(centr, cnt, max4points)\n cv2.circle(drawing, wrist, 5, [0, 255, 255], 2) \n\n edge_thresh = 20\n neighbor_thresh = 20\n fixedPoint = wrist\n idx_ok = simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh, neighbor_thresh)\n\n #print 'list of idx_ok = ', idx_ok\n max_5hull_idx = idx_ok[0:5]\n #print 'first five of idx_ok = ', max_5hull_idx\n\n for i in max_5hull_idx:\n cv2.circle(drawing, tuple(hull[i,0]), 6, [0,255,0], 2)\n #print hull[i]\n\n #print dist_from_center\n #cv2.imshow('contour and convex hull', drawing)\n \n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n drawing = cv2.cvtColor(drawing, cv2.COLOR_BGR2GRAY)\n '''\n print img.shape\n print bin_image2.shape\n print drawing.shape\n '''\n \n frame = append_imgs(img, bin_image2, drawing)\n \n #cv2.imshow('frame', frame)\n #out.write(frame)\n cv2.imwrite(\"store2/\" + \"img\"+str(index_im) + \".jpg\", frame)\n index_im += 1\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n break\n \n camera.release()\n out.release()\n #self.out = None\n cv2.destroyAllWindows()\n\ndef main():\n\n image_name = \"hand_in_BG5.png\"\n\n img = cv2.imread(image_name)\n\n bin_image = simple_preprocessing(img)\n #bin_image = hsv_preprocessing(img)\n\n cv2.imshow('orig', img)\n cv2.imshow('bin', bin_image)\n cv2.waitKey(0)\n cnt, hull, hull_idx = find_contour_hull(bin_image)\n drawing = draws_contour_hull(img, cnt, hull)\n\n\n #search the points between each finger by using convexity defects\n #see the doc of opencv to understand implementation details\n\n convDefs = cv2.convexityDefects(cnt, hull_idx)\n dist_order = np.argsort((-1)*convDefs[:,0,3])\n max4dist = dist_order[0:4]\n max4points = convDefs[max4dist,0,2]\n\n for i in max4points:\n cv2.circle(drawing, tuple(cnt[i,0]), 5, [255,255,0], 2) \n \n hull_nbPts = hull.shape[0]\n\n '''\n #draws all the points constitue the convex hull (for debugging)\n for i in range(hull_nbPts):\n cv2.circle(drawing, tuple(hull[i,0]), 4, [255,0,0], 2) \n cv2.putText(drawing, str(i), tuple(hull[i,0]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, [255,0,0], 1, cv2.CV_AA)\n '''\n\n #find and draw center of contour\n moments = cv2.moments(cnt)\n if moments['m00']!=0:\n cx = int(moments['m10']/moments['m00']) # cx = M10/M00\n cy = int(moments['m01']/moments['m00']) # cy = M01/M00\n \n centr=(cx,cy) \n cv2.circle(drawing, centr, 5, [0, 255, 255], 2) \n\n #find and draw point represents the wrist of the hand\n wrist = find_wrist(centr, cnt, max4points)\n cv2.circle(drawing, wrist, 5, [0, 255, 255], 2) \n\n edge_thresh = 20\n neighbor_thresh = 20\n fixedPoint = wrist\n idx_ok = simple_detect_fingerTips(hull, img, fixedPoint, edge_thresh, neighbor_thresh)\n\n #print 'list of idx_ok = ', idx_ok\n max_5hull_idx = idx_ok[0:1]\n #print 'first five of idx_ok = ', max_5hull_idx\n\n for i in max_5hull_idx:\n cv2.circle(drawing, tuple(hull[i,0]), 6, [0,255,0], 2)\n #print hull[i]\n\n #print dist_from_center\n cv2.imshow('contour and convex hull', drawing)\n k = cv2.waitKey(0)\n\n\nif __name__ == \"__main__\":\n # main()\n tracking()\n",
"step-ids": [
9,
11,
12,
15,
18
]
}
|
[
9,
11,
12,
15,
18
] |
# Generated by Django 3.1.6 on 2021-02-15 12:13
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('name', models.CharField(max_length=250)),
('slug', models.SlugField(max_length=25, primary_key=True, serialize=False, unique=True)),
('author', models.CharField(max_length=250)),
('was_buplished', models.DateField()),
],
),
migrations.CreateModel(
name='Alias',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('alias', models.CharField(max_length=250)),
('start', models.DateTimeField()),
('end', models.DateTimeField(default=None)),
('target', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='alias.book')),
],
),
]
|
normal
|
{
"blob_id": "6239cb08509b8e84a88db95479af05845876d9b6",
"index": 1502,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Book', fields=[('name',\n models.CharField(max_length=250)), ('slug', models.SlugField(\n max_length=25, primary_key=True, serialize=False, unique=True)), (\n 'author', models.CharField(max_length=250)), ('was_buplished',\n models.DateField())]), migrations.CreateModel(name='Alias', fields=\n [('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('alias', models.CharField(\n max_length=250)), ('start', models.DateTimeField()), ('end', models\n .DateTimeField(default=None)), ('target', models.ForeignKey(\n on_delete=django.db.models.deletion.PROTECT, to='alias.book'))])]\n",
"step-4": "from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Book', fields=[('name',\n models.CharField(max_length=250)), ('slug', models.SlugField(\n max_length=25, primary_key=True, serialize=False, unique=True)), (\n 'author', models.CharField(max_length=250)), ('was_buplished',\n models.DateField())]), migrations.CreateModel(name='Alias', fields=\n [('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('alias', models.CharField(\n max_length=250)), ('start', models.DateTimeField()), ('end', models\n .DateTimeField(default=None)), ('target', models.ForeignKey(\n on_delete=django.db.models.deletion.PROTECT, to='alias.book'))])]\n",
"step-5": "# Generated by Django 3.1.6 on 2021-02-15 12:13\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Book',\n fields=[\n ('name', models.CharField(max_length=250)),\n ('slug', models.SlugField(max_length=25, primary_key=True, serialize=False, unique=True)),\n ('author', models.CharField(max_length=250)),\n ('was_buplished', models.DateField()),\n ],\n ),\n migrations.CreateModel(\n name='Alias',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('alias', models.CharField(max_length=250)),\n ('start', models.DateTimeField()),\n ('end', models.DateTimeField(default=None)),\n ('target', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='alias.book')),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class PrivateFile2(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PrivateFile(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class PrivateFile2(models.Model):
title = models.CharField('Title', max_length=200)
file = models.FileField('File')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PrivateFile(models.Model):
title = models.CharField('Title', max_length=200)
file = PrivateFileField('File')
class PrivateFile2(models.Model):
title = models.CharField('Title', max_length=200)
file = models.FileField('File')
<|reserved_special_token_1|>
from django.db import models
from private_storage.fields import PrivateFileField
class PrivateFile(models.Model):
title = models.CharField('Title', max_length=200)
file = PrivateFileField('File')
class PrivateFile2(models.Model):
title = models.CharField('Title', max_length=200)
file = models.FileField('File')
<|reserved_special_token_1|>
from django.db import models
from private_storage.fields import PrivateFileField
class PrivateFile(models.Model):
title = models.CharField("Title", max_length=200)
file = PrivateFileField("File")
class PrivateFile2(models.Model):
title = models.CharField("Title", max_length=200)
file = models.FileField("File")
|
flexible
|
{
"blob_id": "e12c397ca1ae91ce314cda5fe2cd8e0ec4cfa861",
"index": 2199,
"step-1": "<mask token>\n\n\nclass PrivateFile2(models.Model):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass PrivateFile(models.Model):\n <mask token>\n <mask token>\n\n\nclass PrivateFile2(models.Model):\n title = models.CharField('Title', max_length=200)\n file = models.FileField('File')\n",
"step-3": "<mask token>\n\n\nclass PrivateFile(models.Model):\n title = models.CharField('Title', max_length=200)\n file = PrivateFileField('File')\n\n\nclass PrivateFile2(models.Model):\n title = models.CharField('Title', max_length=200)\n file = models.FileField('File')\n",
"step-4": "from django.db import models\nfrom private_storage.fields import PrivateFileField\n\n\nclass PrivateFile(models.Model):\n title = models.CharField('Title', max_length=200)\n file = PrivateFileField('File')\n\n\nclass PrivateFile2(models.Model):\n title = models.CharField('Title', max_length=200)\n file = models.FileField('File')\n",
"step-5": "from django.db import models\nfrom private_storage.fields import PrivateFileField\n\n\nclass PrivateFile(models.Model):\n title = models.CharField(\"Title\", max_length=200)\n file = PrivateFileField(\"File\")\n\n\nclass PrivateFile2(models.Model):\n title = models.CharField(\"Title\", max_length=200)\n file = models.FileField(\"File\")\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
from selenium.webdriver.common.by import By
class BasePageLocators:
LOGIN_LINK = (By.CSS_SELECTOR, "#login_link")
BASKET_LINK = (By.CSS_SELECTOR, '[class="btn btn-default"]:nth-child(1)')
USER_ICON = (By.CSS_SELECTOR, ".icon-user")
class LoginPageLocators:
LOG_IN_FORM = (By.CSS_SELECTOR, "#login_form")
REGISTER_FORM = (By.CSS_SELECTOR, "#register_form")
REGISTRATION_EMAIL = (By.CSS_SELECTOR, '#id_registration-email')
REGISTRATION_PASSWORD = (By.CSS_SELECTOR, '#id_registration-password1')
REGISTRATION_PASSWORD_CONFIRM = (By.CSS_SELECTOR, '#id_registration-password2')
REGISTRATION_SUBMIT_BUTTON = (By.CSS_SELECTOR, '[name="registration_submit"]')
class BasketPageLocators:
BASKET_STATUS = (By.CSS_SELECTOR, '#content_inner')
NAME_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR, '#messages .alert:nth-child(1) > .alertinner strong')
PRICE_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR, '#messages .alert:nth-child(3) > .alertinner strong')
class ProductPageLocators:
ADD_IN_BASKET = (By.CSS_SELECTOR, '.btn-add-to-basket')
SHIPMENT_PRICE = (By.CSS_SELECTOR, '.product_main .price_color')
SHIPMENT_NAME = (By.CSS_SELECTOR, '.product_main h1')
|
normal
|
{
"blob_id": "5d3b9005b8924da36a5885201339aa41082034cd",
"index": 8692,
"step-1": "<mask token>\n\n\nclass BasketPageLocators:\n BASKET_STATUS = By.CSS_SELECTOR, '#content_inner'\n NAME_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,\n '#messages .alert:nth-child(1) > .alertinner strong')\n PRICE_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,\n '#messages .alert:nth-child(3) > .alertinner strong')\n\n\nclass ProductPageLocators:\n ADD_IN_BASKET = By.CSS_SELECTOR, '.btn-add-to-basket'\n SHIPMENT_PRICE = By.CSS_SELECTOR, '.product_main .price_color'\n SHIPMENT_NAME = By.CSS_SELECTOR, '.product_main h1'\n",
"step-2": "<mask token>\n\n\nclass LoginPageLocators:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass BasketPageLocators:\n BASKET_STATUS = By.CSS_SELECTOR, '#content_inner'\n NAME_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,\n '#messages .alert:nth-child(1) > .alertinner strong')\n PRICE_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,\n '#messages .alert:nth-child(3) > .alertinner strong')\n\n\nclass ProductPageLocators:\n ADD_IN_BASKET = By.CSS_SELECTOR, '.btn-add-to-basket'\n SHIPMENT_PRICE = By.CSS_SELECTOR, '.product_main .price_color'\n SHIPMENT_NAME = By.CSS_SELECTOR, '.product_main h1'\n",
"step-3": "<mask token>\n\n\nclass LoginPageLocators:\n LOG_IN_FORM = By.CSS_SELECTOR, '#login_form'\n REGISTER_FORM = By.CSS_SELECTOR, '#register_form'\n REGISTRATION_EMAIL = By.CSS_SELECTOR, '#id_registration-email'\n REGISTRATION_PASSWORD = By.CSS_SELECTOR, '#id_registration-password1'\n REGISTRATION_PASSWORD_CONFIRM = (By.CSS_SELECTOR,\n '#id_registration-password2')\n REGISTRATION_SUBMIT_BUTTON = (By.CSS_SELECTOR,\n '[name=\"registration_submit\"]')\n\n\nclass BasketPageLocators:\n BASKET_STATUS = By.CSS_SELECTOR, '#content_inner'\n NAME_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,\n '#messages .alert:nth-child(1) > .alertinner strong')\n PRICE_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,\n '#messages .alert:nth-child(3) > .alertinner strong')\n\n\nclass ProductPageLocators:\n ADD_IN_BASKET = By.CSS_SELECTOR, '.btn-add-to-basket'\n SHIPMENT_PRICE = By.CSS_SELECTOR, '.product_main .price_color'\n SHIPMENT_NAME = By.CSS_SELECTOR, '.product_main h1'\n",
"step-4": "<mask token>\n\n\nclass BasePageLocators:\n LOGIN_LINK = By.CSS_SELECTOR, '#login_link'\n BASKET_LINK = By.CSS_SELECTOR, '[class=\"btn btn-default\"]:nth-child(1)'\n USER_ICON = By.CSS_SELECTOR, '.icon-user'\n\n\nclass LoginPageLocators:\n LOG_IN_FORM = By.CSS_SELECTOR, '#login_form'\n REGISTER_FORM = By.CSS_SELECTOR, '#register_form'\n REGISTRATION_EMAIL = By.CSS_SELECTOR, '#id_registration-email'\n REGISTRATION_PASSWORD = By.CSS_SELECTOR, '#id_registration-password1'\n REGISTRATION_PASSWORD_CONFIRM = (By.CSS_SELECTOR,\n '#id_registration-password2')\n REGISTRATION_SUBMIT_BUTTON = (By.CSS_SELECTOR,\n '[name=\"registration_submit\"]')\n\n\nclass BasketPageLocators:\n BASKET_STATUS = By.CSS_SELECTOR, '#content_inner'\n NAME_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,\n '#messages .alert:nth-child(1) > .alertinner strong')\n PRICE_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR,\n '#messages .alert:nth-child(3) > .alertinner strong')\n\n\nclass ProductPageLocators:\n ADD_IN_BASKET = By.CSS_SELECTOR, '.btn-add-to-basket'\n SHIPMENT_PRICE = By.CSS_SELECTOR, '.product_main .price_color'\n SHIPMENT_NAME = By.CSS_SELECTOR, '.product_main h1'\n",
"step-5": "from selenium.webdriver.common.by import By\n\n\nclass BasePageLocators:\n LOGIN_LINK = (By.CSS_SELECTOR, \"#login_link\")\n BASKET_LINK = (By.CSS_SELECTOR, '[class=\"btn btn-default\"]:nth-child(1)')\n USER_ICON = (By.CSS_SELECTOR, \".icon-user\")\n\n\nclass LoginPageLocators:\n LOG_IN_FORM = (By.CSS_SELECTOR, \"#login_form\")\n REGISTER_FORM = (By.CSS_SELECTOR, \"#register_form\")\n REGISTRATION_EMAIL = (By.CSS_SELECTOR, '#id_registration-email')\n REGISTRATION_PASSWORD = (By.CSS_SELECTOR, '#id_registration-password1')\n REGISTRATION_PASSWORD_CONFIRM = (By.CSS_SELECTOR, '#id_registration-password2')\n REGISTRATION_SUBMIT_BUTTON = (By.CSS_SELECTOR, '[name=\"registration_submit\"]')\n\n\nclass BasketPageLocators:\n BASKET_STATUS = (By.CSS_SELECTOR, '#content_inner')\n NAME_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR, '#messages .alert:nth-child(1) > .alertinner strong')\n PRICE_OF_ADDED_SHIPMENT = (By.CSS_SELECTOR, '#messages .alert:nth-child(3) > .alertinner strong')\n\n\nclass ProductPageLocators:\n ADD_IN_BASKET = (By.CSS_SELECTOR, '.btn-add-to-basket')\n SHIPMENT_PRICE = (By.CSS_SELECTOR, '.product_main .price_color')\n SHIPMENT_NAME = (By.CSS_SELECTOR, '.product_main h1')\n\n",
"step-ids": [
4,
5,
6,
8,
10
]
}
|
[
4,
5,
6,
8,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for index in range(len(train_folder_list)):
path = os.path.join(TRAIN_DIR, train_folder_list[index])
path = path + '/'
img_list = os.listdir(path)
for img in img_list:
img_path = os.path.join(path, img)
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
train_input.append([np.array(img)])
train_label.append([np.array(index)])
<|reserved_special_token_0|>
np.save('train_data.npy', train_input)
np.save('train_label.npy', train_label)
<|reserved_special_token_0|>
for index in range(len(test_folder_list)):
path = os.path.join(TEST_DIR, test_folder_list[index])
path = path + '/'
img_list = os.listdir(path)
for img in img_list:
img_path = os.path.join(path, img)
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
test_input.append([np.array(img)])
test_label.append([np.array(index)])
<|reserved_special_token_0|>
np.save('test_input.npy', test_input)
np.save('test_label.npy', test_label)
<|reserved_special_token_0|>
np.random.seed(seed)
tf.set_random_seed(seed)
<|reserved_special_token_0|>
print('X train shape')
print(X_train.shape)
print('Y train shape')
print(Y_train.shape)
print('X test shape')
print(X_test.shape)
print('y test shape')
print(Y_test.shape)
<|reserved_special_token_0|>
model.add(Conv2D(32, kernel_size=(3, 3), input_shape=(28, 28, 1),
activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[
'accuracy'])
<|reserved_special_token_0|>
if not os.path.exists(MODEL_DIR):
os.mkdir(MODEL_DIR)
<|reserved_special_token_0|>
print("""
Test Accuracy: %.4f""" % model.evaluate(X_test, Y_test)[1])
<|reserved_special_token_0|>
plt.plot(x_len, y_vloss, marker='.', c='red', label='Testset_loss')
plt.plot(x_len, y_loss, marker='.', c='blue', label='Trainset_loss')
plt.legend(loc='upper right')
plt.grid()
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
TRAIN_DIR = 'C:/Users/vgg/untitled/MNIST/trainingSet/'
train_folder_list = array(os.listdir(TRAIN_DIR))
train_input = []
train_label = []
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(train_folder_list)
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
for index in range(len(train_folder_list)):
path = os.path.join(TRAIN_DIR, train_folder_list[index])
path = path + '/'
img_list = os.listdir(path)
for img in img_list:
img_path = os.path.join(path, img)
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
train_input.append([np.array(img)])
train_label.append([np.array(index)])
train_input = np.reshape(train_input, (-1, 28, 28))
train_label = np.reshape(train_label, (-1,))
train_input = np.array(train_input).astype(np.float32)
train_label = np.array(train_label).astype(np.float32)
np.save('train_data.npy', train_input)
np.save('train_label.npy', train_label)
TEST_DIR = 'C:/Users/vgg/untitled/MNIST/testSet/'
test_folder_list = array(os.listdir(TEST_DIR))
test_input = []
test_label = []
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(test_folder_list)
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
for index in range(len(test_folder_list)):
path = os.path.join(TEST_DIR, test_folder_list[index])
path = path + '/'
img_list = os.listdir(path)
for img in img_list:
img_path = os.path.join(path, img)
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
test_input.append([np.array(img)])
test_label.append([np.array(index)])
test_input = np.reshape(test_input, (-1, 28, 28))
test_label = np.reshape(test_label, (-1,))
test_input = np.array(test_input).astype(np.float32)
test_label = np.array(test_label).astype(np.float32)
np.save('test_input.npy', test_input)
np.save('test_label.npy', test_label)
<|reserved_special_token_0|>
seed = 0
np.random.seed(seed)
tf.set_random_seed(seed)
X_train = train_input
Y_train = train_label
X_test = test_input
Y_test = test_label
print('X train shape')
print(X_train.shape)
print('Y train shape')
print(Y_train.shape)
print('X test shape')
print(X_test.shape)
print('y test shape')
print(Y_test.shape)
X_train = X_train.reshape(X_train.shape[0], 28, 28, 1).astype('float32') / 255
X_test = X_test.reshape(X_test.shape[0], 28, 28, 1).astype('float32') / 255
Y_train = np_utils.to_categorical(Y_train)
Y_test = np_utils.to_categorical(Y_test)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), input_shape=(28, 28, 1),
activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[
'accuracy'])
MODEL_DIR = './model/'
if not os.path.exists(MODEL_DIR):
os.mkdir(MODEL_DIR)
modelpath = './model/{epoch:02d}-{val_loss:.4f}.hdf5'
checkpointer = ModelCheckpoint(filepath=modelpath, monitor='val_loss',
verbose=1, save_best_only=True)
early_stopping_callback = EarlyStopping(monitor='val_loss', patience=10)
history = model.fit(X_train, Y_train, validation_data=(X_test, Y_test),
epochs=15, batch_size=100, verbose=0, callbacks=[
early_stopping_callback, checkpointer])
print("""
Test Accuracy: %.4f""" % model.evaluate(X_test, Y_test)[1])
y_vloss = history.history['val_loss']
y_loss = history.history['loss']
x_len = np.arange(len(y_loss))
plt.plot(x_len, y_vloss, marker='.', c='red', label='Testset_loss')
plt.plot(x_len, y_loss, marker='.', c='blue', label='Trainset_loss')
plt.legend(loc='upper right')
plt.grid()
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()
<|reserved_special_token_1|>
import os
import cv2
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from numpy import array
import tensorflow as tf
TRAIN_DIR = 'C:/Users/vgg/untitled/MNIST/trainingSet/'
train_folder_list = array(os.listdir(TRAIN_DIR))
train_input = []
train_label = []
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(train_folder_list)
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
for index in range(len(train_folder_list)):
path = os.path.join(TRAIN_DIR, train_folder_list[index])
path = path + '/'
img_list = os.listdir(path)
for img in img_list:
img_path = os.path.join(path, img)
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
train_input.append([np.array(img)])
train_label.append([np.array(index)])
train_input = np.reshape(train_input, (-1, 28, 28))
train_label = np.reshape(train_label, (-1,))
train_input = np.array(train_input).astype(np.float32)
train_label = np.array(train_label).astype(np.float32)
np.save('train_data.npy', train_input)
np.save('train_label.npy', train_label)
TEST_DIR = 'C:/Users/vgg/untitled/MNIST/testSet/'
test_folder_list = array(os.listdir(TEST_DIR))
test_input = []
test_label = []
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(test_folder_list)
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
for index in range(len(test_folder_list)):
path = os.path.join(TEST_DIR, test_folder_list[index])
path = path + '/'
img_list = os.listdir(path)
for img in img_list:
img_path = os.path.join(path, img)
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
test_input.append([np.array(img)])
test_label.append([np.array(index)])
test_input = np.reshape(test_input, (-1, 28, 28))
test_label = np.reshape(test_label, (-1,))
test_input = np.array(test_input).astype(np.float32)
test_label = np.array(test_label).astype(np.float32)
np.save('test_input.npy', test_input)
np.save('test_label.npy', test_label)
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.callbacks import ModelCheckpoint, EarlyStopping
import matplotlib.pyplot as plt
seed = 0
np.random.seed(seed)
tf.set_random_seed(seed)
X_train = train_input
Y_train = train_label
X_test = test_input
Y_test = test_label
print('X train shape')
print(X_train.shape)
print('Y train shape')
print(Y_train.shape)
print('X test shape')
print(X_test.shape)
print('y test shape')
print(Y_test.shape)
X_train = X_train.reshape(X_train.shape[0], 28, 28, 1).astype('float32') / 255
X_test = X_test.reshape(X_test.shape[0], 28, 28, 1).astype('float32') / 255
Y_train = np_utils.to_categorical(Y_train)
Y_test = np_utils.to_categorical(Y_test)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), input_shape=(28, 28, 1),
activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[
'accuracy'])
MODEL_DIR = './model/'
if not os.path.exists(MODEL_DIR):
os.mkdir(MODEL_DIR)
modelpath = './model/{epoch:02d}-{val_loss:.4f}.hdf5'
checkpointer = ModelCheckpoint(filepath=modelpath, monitor='val_loss',
verbose=1, save_best_only=True)
early_stopping_callback = EarlyStopping(monitor='val_loss', patience=10)
history = model.fit(X_train, Y_train, validation_data=(X_test, Y_test),
epochs=15, batch_size=100, verbose=0, callbacks=[
early_stopping_callback, checkpointer])
print("""
Test Accuracy: %.4f""" % model.evaluate(X_test, Y_test)[1])
y_vloss = history.history['val_loss']
y_loss = history.history['loss']
x_len = np.arange(len(y_loss))
plt.plot(x_len, y_vloss, marker='.', c='red', label='Testset_loss')
plt.plot(x_len, y_loss, marker='.', c='blue', label='Trainset_loss')
plt.legend(loc='upper right')
plt.grid()
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()
<|reserved_special_token_1|>
import os
import cv2
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from numpy import array
import tensorflow as tf
TRAIN_DIR = 'C:/Users/vgg/untitled/MNIST/trainingSet/'
train_folder_list = array(os.listdir(TRAIN_DIR))
train_input = []
train_label = []
label_encoder = LabelEncoder() # LabelEncoder Class 호출
integer_encoded = label_encoder.fit_transform(train_folder_list)
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
for index in range(len(train_folder_list)):
path = os.path.join(TRAIN_DIR, train_folder_list[index])
path = path + '/'
img_list = os.listdir(path)
for img in img_list:
img_path = os.path.join(path, img)
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
train_input.append([np.array(img)])
train_label.append([np.array(index)])
train_input = np.reshape(train_input, (-1, 28, 28))
train_label = np.reshape(train_label, (-1,))
train_input = np.array(train_input).astype(np.float32)
train_label = np.array(train_label).astype(np.float32)
np.save("train_data.npy", train_input)
np.save("train_label.npy", train_label)
TEST_DIR = 'C:/Users/vgg/untitled/MNIST/testSet/'
test_folder_list = array(os.listdir(TEST_DIR))
test_input = []
test_label = []
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(test_folder_list)
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
for index in range(len(test_folder_list)):
path = os.path.join(TEST_DIR, test_folder_list[index])
path = path + '/'
img_list = os.listdir(path)
for img in img_list:
img_path = os.path.join(path, img)
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
test_input.append([np.array(img)])
test_label.append([np.array(index)])
test_input = np.reshape(test_input, (-1, 28, 28))
test_label = np.reshape(test_label, (-1,))
test_input = np.array(test_input).astype(np.float32)
test_label = np.array(test_label).astype(np.float32)
np.save("test_input.npy", test_input)
np.save("test_label.npy", test_label)
#-*- coding: utf-8 -*-
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.callbacks import ModelCheckpoint,EarlyStopping
import matplotlib.pyplot as plt
# seed 값 설정
seed = 0
np.random.seed(seed)
tf.set_random_seed(seed)
# 데이터 불러오기
# test_input = []
# test_label = []
#
# train_input = []
# train_label = []
X_train = train_input
Y_train = train_label
X_test = test_input
Y_test = test_label
print('X train shape')
print(X_train.shape)
print('Y train shape')
print(Y_train.shape)
print('X test shape')
print(X_test.shape)
print('y test shape')
print(Y_test.shape)
#(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0], 28, 28, 1).astype('float32') / 255
X_test = X_test.reshape(X_test.shape[0], 28, 28, 1).astype('float32') / 255
Y_train = np_utils.to_categorical(Y_train)
Y_test = np_utils.to_categorical(Y_test)
# 컨볼루션 신경망의 설정
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), input_shape=(28, 28, 1), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# 모델 최적화 설정
MODEL_DIR = './model/'
if not os.path.exists(MODEL_DIR):
os.mkdir(MODEL_DIR)
modelpath="./model/{epoch:02d}-{val_loss:.4f}.hdf5"
checkpointer = ModelCheckpoint(filepath=modelpath, monitor='val_loss', verbose=1, save_best_only=True)
early_stopping_callback = EarlyStopping(monitor='val_loss', patience=10)
# 모델의 실행
history = model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs=15, batch_size=100, verbose=0, callbacks=[early_stopping_callback,checkpointer])
# 테스트 정확도 출력
print("\n Test Accuracy: %.4f" % (model.evaluate(X_test, Y_test)[1]))
# 테스트 셋의 오차
y_vloss = history.history['val_loss']
# 학습셋의 오차
y_loss = history.history['loss']
# 그래프로 표현
x_len = np.arange(len(y_loss))
plt.plot(x_len, y_vloss, marker='.', c="red", label='Testset_loss')
plt.plot(x_len, y_loss, marker='.', c="blue", label='Trainset_loss')
# 그래프에 그리드를 주고 레이블을 표시
plt.legend(loc='upper right')
plt.grid()
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()
|
flexible
|
{
"blob_id": "01339324ad1a11aff062e8b27efabf27c97157fb",
"index": 9908,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor index in range(len(train_folder_list)):\n path = os.path.join(TRAIN_DIR, train_folder_list[index])\n path = path + '/'\n img_list = os.listdir(path)\n for img in img_list:\n img_path = os.path.join(path, img)\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\n train_input.append([np.array(img)])\n train_label.append([np.array(index)])\n<mask token>\nnp.save('train_data.npy', train_input)\nnp.save('train_label.npy', train_label)\n<mask token>\nfor index in range(len(test_folder_list)):\n path = os.path.join(TEST_DIR, test_folder_list[index])\n path = path + '/'\n img_list = os.listdir(path)\n for img in img_list:\n img_path = os.path.join(path, img)\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\n test_input.append([np.array(img)])\n test_label.append([np.array(index)])\n<mask token>\nnp.save('test_input.npy', test_input)\nnp.save('test_label.npy', test_label)\n<mask token>\nnp.random.seed(seed)\ntf.set_random_seed(seed)\n<mask token>\nprint('X train shape')\nprint(X_train.shape)\nprint('Y train shape')\nprint(Y_train.shape)\nprint('X test shape')\nprint(X_test.shape)\nprint('y test shape')\nprint(Y_test.shape)\n<mask token>\nmodel.add(Conv2D(32, kernel_size=(3, 3), input_shape=(28, 28, 1),\n activation='relu'))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=2))\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(10, activation='softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[\n 'accuracy'])\n<mask token>\nif not os.path.exists(MODEL_DIR):\n os.mkdir(MODEL_DIR)\n<mask token>\nprint(\"\"\"\n Test Accuracy: %.4f\"\"\" % model.evaluate(X_test, Y_test)[1])\n<mask token>\nplt.plot(x_len, y_vloss, marker='.', c='red', label='Testset_loss')\nplt.plot(x_len, y_loss, marker='.', c='blue', label='Trainset_loss')\nplt.legend(loc='upper right')\nplt.grid()\nplt.xlabel('epoch')\nplt.ylabel('loss')\nplt.show()\n",
"step-3": "<mask token>\nTRAIN_DIR = 'C:/Users/vgg/untitled/MNIST/trainingSet/'\ntrain_folder_list = array(os.listdir(TRAIN_DIR))\ntrain_input = []\ntrain_label = []\nlabel_encoder = LabelEncoder()\ninteger_encoded = label_encoder.fit_transform(train_folder_list)\nonehot_encoder = OneHotEncoder(sparse=False)\ninteger_encoded = integer_encoded.reshape(len(integer_encoded), 1)\nonehot_encoded = onehot_encoder.fit_transform(integer_encoded)\nfor index in range(len(train_folder_list)):\n path = os.path.join(TRAIN_DIR, train_folder_list[index])\n path = path + '/'\n img_list = os.listdir(path)\n for img in img_list:\n img_path = os.path.join(path, img)\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\n train_input.append([np.array(img)])\n train_label.append([np.array(index)])\ntrain_input = np.reshape(train_input, (-1, 28, 28))\ntrain_label = np.reshape(train_label, (-1,))\ntrain_input = np.array(train_input).astype(np.float32)\ntrain_label = np.array(train_label).astype(np.float32)\nnp.save('train_data.npy', train_input)\nnp.save('train_label.npy', train_label)\nTEST_DIR = 'C:/Users/vgg/untitled/MNIST/testSet/'\ntest_folder_list = array(os.listdir(TEST_DIR))\ntest_input = []\ntest_label = []\nlabel_encoder = LabelEncoder()\ninteger_encoded = label_encoder.fit_transform(test_folder_list)\nonehot_encoder = OneHotEncoder(sparse=False)\ninteger_encoded = integer_encoded.reshape(len(integer_encoded), 1)\nonehot_encoded = onehot_encoder.fit_transform(integer_encoded)\nfor index in range(len(test_folder_list)):\n path = os.path.join(TEST_DIR, test_folder_list[index])\n path = path + '/'\n img_list = os.listdir(path)\n for img in img_list:\n img_path = os.path.join(path, img)\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\n test_input.append([np.array(img)])\n test_label.append([np.array(index)])\ntest_input = np.reshape(test_input, (-1, 28, 28))\ntest_label = np.reshape(test_label, (-1,))\ntest_input = np.array(test_input).astype(np.float32)\ntest_label = np.array(test_label).astype(np.float32)\nnp.save('test_input.npy', test_input)\nnp.save('test_label.npy', test_label)\n<mask token>\nseed = 0\nnp.random.seed(seed)\ntf.set_random_seed(seed)\nX_train = train_input\nY_train = train_label\nX_test = test_input\nY_test = test_label\nprint('X train shape')\nprint(X_train.shape)\nprint('Y train shape')\nprint(Y_train.shape)\nprint('X test shape')\nprint(X_test.shape)\nprint('y test shape')\nprint(Y_test.shape)\nX_train = X_train.reshape(X_train.shape[0], 28, 28, 1).astype('float32') / 255\nX_test = X_test.reshape(X_test.shape[0], 28, 28, 1).astype('float32') / 255\nY_train = np_utils.to_categorical(Y_train)\nY_test = np_utils.to_categorical(Y_test)\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=(3, 3), input_shape=(28, 28, 1),\n activation='relu'))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=2))\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(10, activation='softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[\n 'accuracy'])\nMODEL_DIR = './model/'\nif not os.path.exists(MODEL_DIR):\n os.mkdir(MODEL_DIR)\nmodelpath = './model/{epoch:02d}-{val_loss:.4f}.hdf5'\ncheckpointer = ModelCheckpoint(filepath=modelpath, monitor='val_loss',\n verbose=1, save_best_only=True)\nearly_stopping_callback = EarlyStopping(monitor='val_loss', patience=10)\nhistory = model.fit(X_train, Y_train, validation_data=(X_test, Y_test),\n epochs=15, batch_size=100, verbose=0, callbacks=[\n early_stopping_callback, checkpointer])\nprint(\"\"\"\n Test Accuracy: %.4f\"\"\" % model.evaluate(X_test, Y_test)[1])\ny_vloss = history.history['val_loss']\ny_loss = history.history['loss']\nx_len = np.arange(len(y_loss))\nplt.plot(x_len, y_vloss, marker='.', c='red', label='Testset_loss')\nplt.plot(x_len, y_loss, marker='.', c='blue', label='Trainset_loss')\nplt.legend(loc='upper right')\nplt.grid()\nplt.xlabel('epoch')\nplt.ylabel('loss')\nplt.show()\n",
"step-4": "import os\nimport cv2\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import OneHotEncoder\nfrom numpy import array\nimport tensorflow as tf\nTRAIN_DIR = 'C:/Users/vgg/untitled/MNIST/trainingSet/'\ntrain_folder_list = array(os.listdir(TRAIN_DIR))\ntrain_input = []\ntrain_label = []\nlabel_encoder = LabelEncoder()\ninteger_encoded = label_encoder.fit_transform(train_folder_list)\nonehot_encoder = OneHotEncoder(sparse=False)\ninteger_encoded = integer_encoded.reshape(len(integer_encoded), 1)\nonehot_encoded = onehot_encoder.fit_transform(integer_encoded)\nfor index in range(len(train_folder_list)):\n path = os.path.join(TRAIN_DIR, train_folder_list[index])\n path = path + '/'\n img_list = os.listdir(path)\n for img in img_list:\n img_path = os.path.join(path, img)\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\n train_input.append([np.array(img)])\n train_label.append([np.array(index)])\ntrain_input = np.reshape(train_input, (-1, 28, 28))\ntrain_label = np.reshape(train_label, (-1,))\ntrain_input = np.array(train_input).astype(np.float32)\ntrain_label = np.array(train_label).astype(np.float32)\nnp.save('train_data.npy', train_input)\nnp.save('train_label.npy', train_label)\nTEST_DIR = 'C:/Users/vgg/untitled/MNIST/testSet/'\ntest_folder_list = array(os.listdir(TEST_DIR))\ntest_input = []\ntest_label = []\nlabel_encoder = LabelEncoder()\ninteger_encoded = label_encoder.fit_transform(test_folder_list)\nonehot_encoder = OneHotEncoder(sparse=False)\ninteger_encoded = integer_encoded.reshape(len(integer_encoded), 1)\nonehot_encoded = onehot_encoder.fit_transform(integer_encoded)\nfor index in range(len(test_folder_list)):\n path = os.path.join(TEST_DIR, test_folder_list[index])\n path = path + '/'\n img_list = os.listdir(path)\n for img in img_list:\n img_path = os.path.join(path, img)\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\n test_input.append([np.array(img)])\n test_label.append([np.array(index)])\ntest_input = np.reshape(test_input, (-1, 28, 28))\ntest_label = np.reshape(test_label, (-1,))\ntest_input = np.array(test_input).astype(np.float32)\ntest_label = np.array(test_label).astype(np.float32)\nnp.save('test_input.npy', test_input)\nnp.save('test_label.npy', test_label)\nfrom keras.datasets import mnist\nfrom keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\nimport matplotlib.pyplot as plt\nseed = 0\nnp.random.seed(seed)\ntf.set_random_seed(seed)\nX_train = train_input\nY_train = train_label\nX_test = test_input\nY_test = test_label\nprint('X train shape')\nprint(X_train.shape)\nprint('Y train shape')\nprint(Y_train.shape)\nprint('X test shape')\nprint(X_test.shape)\nprint('y test shape')\nprint(Y_test.shape)\nX_train = X_train.reshape(X_train.shape[0], 28, 28, 1).astype('float32') / 255\nX_test = X_test.reshape(X_test.shape[0], 28, 28, 1).astype('float32') / 255\nY_train = np_utils.to_categorical(Y_train)\nY_test = np_utils.to_categorical(Y_test)\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=(3, 3), input_shape=(28, 28, 1),\n activation='relu'))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=2))\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(10, activation='softmax'))\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=[\n 'accuracy'])\nMODEL_DIR = './model/'\nif not os.path.exists(MODEL_DIR):\n os.mkdir(MODEL_DIR)\nmodelpath = './model/{epoch:02d}-{val_loss:.4f}.hdf5'\ncheckpointer = ModelCheckpoint(filepath=modelpath, monitor='val_loss',\n verbose=1, save_best_only=True)\nearly_stopping_callback = EarlyStopping(monitor='val_loss', patience=10)\nhistory = model.fit(X_train, Y_train, validation_data=(X_test, Y_test),\n epochs=15, batch_size=100, verbose=0, callbacks=[\n early_stopping_callback, checkpointer])\nprint(\"\"\"\n Test Accuracy: %.4f\"\"\" % model.evaluate(X_test, Y_test)[1])\ny_vloss = history.history['val_loss']\ny_loss = history.history['loss']\nx_len = np.arange(len(y_loss))\nplt.plot(x_len, y_vloss, marker='.', c='red', label='Testset_loss')\nplt.plot(x_len, y_loss, marker='.', c='blue', label='Trainset_loss')\nplt.legend(loc='upper right')\nplt.grid()\nplt.xlabel('epoch')\nplt.ylabel('loss')\nplt.show()\n",
"step-5": "import os\r\nimport cv2\r\nimport numpy as np\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.preprocessing import OneHotEncoder\r\nfrom numpy import array\r\nimport tensorflow as tf\r\n\r\nTRAIN_DIR = 'C:/Users/vgg/untitled/MNIST/trainingSet/'\r\ntrain_folder_list = array(os.listdir(TRAIN_DIR))\r\n\r\ntrain_input = []\r\ntrain_label = []\r\n\r\nlabel_encoder = LabelEncoder() # LabelEncoder Class 호출\r\ninteger_encoded = label_encoder.fit_transform(train_folder_list)\r\nonehot_encoder = OneHotEncoder(sparse=False)\r\ninteger_encoded = integer_encoded.reshape(len(integer_encoded), 1)\r\nonehot_encoded = onehot_encoder.fit_transform(integer_encoded)\r\n\r\nfor index in range(len(train_folder_list)):\r\n path = os.path.join(TRAIN_DIR, train_folder_list[index])\r\n path = path + '/'\r\n img_list = os.listdir(path)\r\n for img in img_list:\r\n img_path = os.path.join(path, img)\r\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\r\n train_input.append([np.array(img)])\r\n train_label.append([np.array(index)])\r\n\r\ntrain_input = np.reshape(train_input, (-1, 28, 28))\r\ntrain_label = np.reshape(train_label, (-1,))\r\ntrain_input = np.array(train_input).astype(np.float32)\r\ntrain_label = np.array(train_label).astype(np.float32)\r\nnp.save(\"train_data.npy\", train_input)\r\nnp.save(\"train_label.npy\", train_label)\r\n\r\nTEST_DIR = 'C:/Users/vgg/untitled/MNIST/testSet/'\r\ntest_folder_list = array(os.listdir(TEST_DIR))\r\n\r\ntest_input = []\r\ntest_label = []\r\n\r\nlabel_encoder = LabelEncoder()\r\ninteger_encoded = label_encoder.fit_transform(test_folder_list)\r\n\r\nonehot_encoder = OneHotEncoder(sparse=False)\r\ninteger_encoded = integer_encoded.reshape(len(integer_encoded), 1)\r\nonehot_encoded = onehot_encoder.fit_transform(integer_encoded)\r\n\r\nfor index in range(len(test_folder_list)):\r\n path = os.path.join(TEST_DIR, test_folder_list[index])\r\n path = path + '/'\r\n img_list = os.listdir(path)\r\n for img in img_list:\r\n img_path = os.path.join(path, img)\r\n img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\r\n test_input.append([np.array(img)])\r\n test_label.append([np.array(index)])\r\n\r\ntest_input = np.reshape(test_input, (-1, 28, 28))\r\ntest_label = np.reshape(test_label, (-1,))\r\ntest_input = np.array(test_input).astype(np.float32)\r\ntest_label = np.array(test_label).astype(np.float32)\r\nnp.save(\"test_input.npy\", test_input)\r\nnp.save(\"test_label.npy\", test_label)\r\n\r\n\r\n#-*- coding: utf-8 -*-\r\nfrom keras.datasets import mnist\r\nfrom keras.utils import np_utils\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D\r\nfrom keras.callbacks import ModelCheckpoint,EarlyStopping\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\r\n\r\n# seed 값 설정\r\nseed = 0\r\nnp.random.seed(seed)\r\ntf.set_random_seed(seed)\r\n\r\n# 데이터 불러오기\r\n\r\n# test_input = []\r\n# test_label = []\r\n#\r\n# train_input = []\r\n# train_label = []\r\nX_train = train_input\r\nY_train = train_label\r\nX_test = test_input\r\nY_test = test_label\r\n\r\nprint('X train shape')\r\nprint(X_train.shape)\r\nprint('Y train shape')\r\nprint(Y_train.shape)\r\nprint('X test shape')\r\nprint(X_test.shape)\r\nprint('y test shape')\r\nprint(Y_test.shape)\r\n\r\n#(X_train, Y_train), (X_test, Y_test) = mnist.load_data()\r\nX_train = X_train.reshape(X_train.shape[0], 28, 28, 1).astype('float32') / 255\r\nX_test = X_test.reshape(X_test.shape[0], 28, 28, 1).astype('float32') / 255\r\nY_train = np_utils.to_categorical(Y_train)\r\nY_test = np_utils.to_categorical(Y_test)\r\n\r\n# 컨볼루션 신경망의 설정\r\nmodel = Sequential()\r\nmodel.add(Conv2D(32, kernel_size=(3, 3), input_shape=(28, 28, 1), activation='relu'))\r\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\r\nmodel.add(MaxPooling2D(pool_size=2))\r\nmodel.add(Dropout(0.25))\r\nmodel.add(Flatten())\r\nmodel.add(Dense(128, activation='relu'))\r\nmodel.add(Dropout(0.5))\r\nmodel.add(Dense(10, activation='softmax'))\r\n\r\nmodel.compile(loss='categorical_crossentropy',\r\n optimizer='adam',\r\n metrics=['accuracy'])\r\n\r\n# 모델 최적화 설정\r\nMODEL_DIR = './model/'\r\nif not os.path.exists(MODEL_DIR):\r\n os.mkdir(MODEL_DIR)\r\n\r\nmodelpath=\"./model/{epoch:02d}-{val_loss:.4f}.hdf5\"\r\ncheckpointer = ModelCheckpoint(filepath=modelpath, monitor='val_loss', verbose=1, save_best_only=True)\r\nearly_stopping_callback = EarlyStopping(monitor='val_loss', patience=10)\r\n\r\n# 모델의 실행\r\nhistory = model.fit(X_train, Y_train, validation_data=(X_test, Y_test), epochs=15, batch_size=100, verbose=0, callbacks=[early_stopping_callback,checkpointer])\r\n\r\n# 테스트 정확도 출력\r\nprint(\"\\n Test Accuracy: %.4f\" % (model.evaluate(X_test, Y_test)[1]))\r\n\r\n# 테스트 셋의 오차\r\ny_vloss = history.history['val_loss']\r\n\r\n# 학습셋의 오차\r\ny_loss = history.history['loss']\r\n\r\n# 그래프로 표현\r\nx_len = np.arange(len(y_loss))\r\nplt.plot(x_len, y_vloss, marker='.', c=\"red\", label='Testset_loss')\r\nplt.plot(x_len, y_loss, marker='.', c=\"blue\", label='Trainset_loss')\r\n\r\n# 그래프에 그리드를 주고 레이블을 표시\r\nplt.legend(loc='upper right')\r\nplt.grid()\r\nplt.xlabel('epoch')\r\nplt.ylabel('loss')\r\nplt.show()\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from typing import Union, Tuple
import numpy as np
from dispim import Volume
def extract_3d(data: np.ndarray, center: np.ndarray, half_size: int):
"""
Extract an area around a point in a 3d numpy array, zero padded as necessary such that the specified point is at the
center
:param data: The numpy array to extract from
:param center: The point around which to extract
:param half_size: The half-size of the extracted area (full size is half_size*2+1, where the th center point is
center)
:return: The extracted area
"""
# FIXME: Doesn't always return the expected shape
imax = np.clip(center + half_size + 1, 0, data.shape).astype(np.int)
imin = np.clip(center - half_size, 0, data.shape).astype(np.int)
subvol = data[imin[0]:imax[0], imin[1]:imax[1], imin[2]:imax[2]]
max_missing = ((center + half_size + 1) - imax).astype(np.int)
min_missing = (imin - (center - half_size)).astype(np.int)
return np.pad(subvol, [(min_missing[i], max_missing[i]) for i in range(3)], mode='constant')
def crop_view(data: np.ndarray, crop: Union[float, Tuple[float, float, float]], center_crop: bool = True):
"""
Get a cropped view of a 3d numpy array (does not modify the input)
:param data: The numpy array to crop
:param crop: The percentage to crop in each dimension
:param center_crop: If True, the crop is centered around the middle of the volume, otherwise, the crop expands from
(0, 0, 0)
:return: The cropped view
"""
if type(crop) == float or type(crop) == int:
if crop > 0.99999:
return data
icropx = 1 - crop
icropy = 1 - crop
icropz = 1 - crop
else:
icropx = 1 - crop[0]
icropy = 1 - crop[1]
icropz = 1 - crop[2]
w, h, l = data.shape
if center_crop:
view = data[int(w / 2 * icropx):int(-w / 2 * icropx),
int(h / 2 * icropy):int(-h / 2 * icropy),
int(l / 2 * icropz):int(-l / 2 * icropz)]
else:
view = data[:int(w * (1 - icropx)), :int(h * (1 - icropy)), :int(l * (1 - icropz))]
return view
def plot_ortho_overlayed(vol_a: Volume, vol_b: Volume, axis=2, pixel_size: float = 1.0) -> None:
"""
Plot two axis-reduced volumes overlayed as two channels (red and green), taking into account the spacing of both volumes
:param vol_a: The first volume to plot (red)
:param vol_b: The second volume to plot (green)
:param axis: The axis along which both volumes will be reduced
:param pixel_size: The size of a pixel, relative to the spacing of the the volumes
"""
from scipy.ndimage.interpolation import zoom
import matplotlib.pyplot as plt
vol_a_zoomed = np.mean(zoom(vol_a, np.array(vol_a.spacing) * pixel_size), axis=axis)
vol_b_zoomed = np.mean(zoom(vol_b, np.array(vol_b.spacing) * pixel_size), axis=axis)
b_channel = np.zeros_like(vol_a_zoomed)
max_val = max(vol_a_zoomed.max(), vol_b_zoomed.max())
min_val = min(vol_a_zoomed.min(), vol_b_zoomed.min())
vol_a_zoomed = (vol_a_zoomed - min_val) / (max_val - min_val)
vol_b_zoomed = (vol_b_zoomed - min_val) / (max_val - min_val)
plt.imshow(np.stack([vol_a_zoomed, vol_b_zoomed, b_channel], axis=2))
plt.show()
def show_ipv(data: np.ndarray):
"""
Show a 3d visualization of 3d numpy array
:param data: The numpy array to show
:return: The ipyvolume figure
"""
import ipyvolume as ipv
return ipv.quickvolshow(data)
def threshold_otsu(image: np.ndarray, nbins: int = 256, ignore: int = 0) -> float:
"""
Compute the Otsu threshold for a numpy array, without taking into account empty areas
:param image: The volume to compute the threshold for
:param nbins: The number of bins used
:param ignore: The value to ignore
:return: The Otsu threshold
"""
from skimage.filters.thresholding import histogram
# Check if the image is multi-colored or not
if image.min() == image.max():
raise ValueError("threshold_otsu is expected to work with images "
"having more than one color. The input image seems "
"to have just one color {0}.".format(image.min()))
img_flat = image.ravel()
img_flat = img_flat[img_flat != ignore]
hist, bin_centers = histogram(img_flat, nbins)
hist = hist.astype(float)
# class probabilities for all possible thresholds
weight1 = np.cumsum(hist)
weight2 = np.cumsum(hist[::-1])[::-1]
# class means for all possible thresholds
mean1 = np.cumsum(hist * bin_centers) / weight1
mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]
# Clip ends to align class 1 and class 2 variables:
# The last value of `weight1`/`mean1` should pair with zero values in
# `weight2`/`mean2`, which do not exist.
variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2
idx = np.argmax(variance12)
threshold = bin_centers[:-1][idx]
return threshold
|
normal
|
{
"blob_id": "26f486131bdf514cd8e41f75d414fe647eaf1140",
"index": 9243,
"step-1": "<mask token>\n\n\ndef extract_3d(data: np.ndarray, center: np.ndarray, half_size: int):\n \"\"\"\n Extract an area around a point in a 3d numpy array, zero padded as necessary such that the specified point is at the\n center\n\n :param data: The numpy array to extract from\n :param center: The point around which to extract\n :param half_size: The half-size of the extracted area (full size is half_size*2+1, where the th center point is\n center)\n :return: The extracted area\n \"\"\"\n imax = np.clip(center + half_size + 1, 0, data.shape).astype(np.int)\n imin = np.clip(center - half_size, 0, data.shape).astype(np.int)\n subvol = data[imin[0]:imax[0], imin[1]:imax[1], imin[2]:imax[2]]\n max_missing = (center + half_size + 1 - imax).astype(np.int)\n min_missing = (imin - (center - half_size)).astype(np.int)\n return np.pad(subvol, [(min_missing[i], max_missing[i]) for i in range(\n 3)], mode='constant')\n\n\ndef crop_view(data: np.ndarray, crop: Union[float, Tuple[float, float,\n float]], center_crop: bool=True):\n \"\"\"\n Get a cropped view of a 3d numpy array (does not modify the input)\n\n :param data: The numpy array to crop\n :param crop: The percentage to crop in each dimension\n :param center_crop: If True, the crop is centered around the middle of the volume, otherwise, the crop expands from\n (0, 0, 0)\n :return: The cropped view\n \"\"\"\n if type(crop) == float or type(crop) == int:\n if crop > 0.99999:\n return data\n icropx = 1 - crop\n icropy = 1 - crop\n icropz = 1 - crop\n else:\n icropx = 1 - crop[0]\n icropy = 1 - crop[1]\n icropz = 1 - crop[2]\n w, h, l = data.shape\n if center_crop:\n view = data[int(w / 2 * icropx):int(-w / 2 * icropx), int(h / 2 *\n icropy):int(-h / 2 * icropy), int(l / 2 * icropz):int(-l / 2 *\n icropz)]\n else:\n view = data[:int(w * (1 - icropx)), :int(h * (1 - icropy)), :int(l *\n (1 - icropz))]\n return view\n\n\n<mask token>\n\n\ndef threshold_otsu(image: np.ndarray, nbins: int=256, ignore: int=0) ->float:\n \"\"\"\n Compute the Otsu threshold for a numpy array, without taking into account empty areas\n\n :param image: The volume to compute the threshold for\n :param nbins: The number of bins used\n :param ignore: The value to ignore\n :return: The Otsu threshold\n \"\"\"\n from skimage.filters.thresholding import histogram\n if image.min() == image.max():\n raise ValueError(\n 'threshold_otsu is expected to work with images having more than one color. The input image seems to have just one color {0}.'\n .format(image.min()))\n img_flat = image.ravel()\n img_flat = img_flat[img_flat != ignore]\n hist, bin_centers = histogram(img_flat, nbins)\n hist = hist.astype(float)\n weight1 = np.cumsum(hist)\n weight2 = np.cumsum(hist[::-1])[::-1]\n mean1 = np.cumsum(hist * bin_centers) / weight1\n mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]\n variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2\n idx = np.argmax(variance12)\n threshold = bin_centers[:-1][idx]\n return threshold\n",
"step-2": "<mask token>\n\n\ndef extract_3d(data: np.ndarray, center: np.ndarray, half_size: int):\n \"\"\"\n Extract an area around a point in a 3d numpy array, zero padded as necessary such that the specified point is at the\n center\n\n :param data: The numpy array to extract from\n :param center: The point around which to extract\n :param half_size: The half-size of the extracted area (full size is half_size*2+1, where the th center point is\n center)\n :return: The extracted area\n \"\"\"\n imax = np.clip(center + half_size + 1, 0, data.shape).astype(np.int)\n imin = np.clip(center - half_size, 0, data.shape).astype(np.int)\n subvol = data[imin[0]:imax[0], imin[1]:imax[1], imin[2]:imax[2]]\n max_missing = (center + half_size + 1 - imax).astype(np.int)\n min_missing = (imin - (center - half_size)).astype(np.int)\n return np.pad(subvol, [(min_missing[i], max_missing[i]) for i in range(\n 3)], mode='constant')\n\n\ndef crop_view(data: np.ndarray, crop: Union[float, Tuple[float, float,\n float]], center_crop: bool=True):\n \"\"\"\n Get a cropped view of a 3d numpy array (does not modify the input)\n\n :param data: The numpy array to crop\n :param crop: The percentage to crop in each dimension\n :param center_crop: If True, the crop is centered around the middle of the volume, otherwise, the crop expands from\n (0, 0, 0)\n :return: The cropped view\n \"\"\"\n if type(crop) == float or type(crop) == int:\n if crop > 0.99999:\n return data\n icropx = 1 - crop\n icropy = 1 - crop\n icropz = 1 - crop\n else:\n icropx = 1 - crop[0]\n icropy = 1 - crop[1]\n icropz = 1 - crop[2]\n w, h, l = data.shape\n if center_crop:\n view = data[int(w / 2 * icropx):int(-w / 2 * icropx), int(h / 2 *\n icropy):int(-h / 2 * icropy), int(l / 2 * icropz):int(-l / 2 *\n icropz)]\n else:\n view = data[:int(w * (1 - icropx)), :int(h * (1 - icropy)), :int(l *\n (1 - icropz))]\n return view\n\n\ndef plot_ortho_overlayed(vol_a: Volume, vol_b: Volume, axis=2, pixel_size:\n float=1.0) ->None:\n \"\"\"\n Plot two axis-reduced volumes overlayed as two channels (red and green), taking into account the spacing of both volumes\n\n :param vol_a: The first volume to plot (red)\n :param vol_b: The second volume to plot (green)\n :param axis: The axis along which both volumes will be reduced\n :param pixel_size: The size of a pixel, relative to the spacing of the the volumes\n \"\"\"\n from scipy.ndimage.interpolation import zoom\n import matplotlib.pyplot as plt\n vol_a_zoomed = np.mean(zoom(vol_a, np.array(vol_a.spacing) * pixel_size\n ), axis=axis)\n vol_b_zoomed = np.mean(zoom(vol_b, np.array(vol_b.spacing) * pixel_size\n ), axis=axis)\n b_channel = np.zeros_like(vol_a_zoomed)\n max_val = max(vol_a_zoomed.max(), vol_b_zoomed.max())\n min_val = min(vol_a_zoomed.min(), vol_b_zoomed.min())\n vol_a_zoomed = (vol_a_zoomed - min_val) / (max_val - min_val)\n vol_b_zoomed = (vol_b_zoomed - min_val) / (max_val - min_val)\n plt.imshow(np.stack([vol_a_zoomed, vol_b_zoomed, b_channel], axis=2))\n plt.show()\n\n\n<mask token>\n\n\ndef threshold_otsu(image: np.ndarray, nbins: int=256, ignore: int=0) ->float:\n \"\"\"\n Compute the Otsu threshold for a numpy array, without taking into account empty areas\n\n :param image: The volume to compute the threshold for\n :param nbins: The number of bins used\n :param ignore: The value to ignore\n :return: The Otsu threshold\n \"\"\"\n from skimage.filters.thresholding import histogram\n if image.min() == image.max():\n raise ValueError(\n 'threshold_otsu is expected to work with images having more than one color. The input image seems to have just one color {0}.'\n .format(image.min()))\n img_flat = image.ravel()\n img_flat = img_flat[img_flat != ignore]\n hist, bin_centers = histogram(img_flat, nbins)\n hist = hist.astype(float)\n weight1 = np.cumsum(hist)\n weight2 = np.cumsum(hist[::-1])[::-1]\n mean1 = np.cumsum(hist * bin_centers) / weight1\n mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]\n variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2\n idx = np.argmax(variance12)\n threshold = bin_centers[:-1][idx]\n return threshold\n",
"step-3": "<mask token>\n\n\ndef extract_3d(data: np.ndarray, center: np.ndarray, half_size: int):\n \"\"\"\n Extract an area around a point in a 3d numpy array, zero padded as necessary such that the specified point is at the\n center\n\n :param data: The numpy array to extract from\n :param center: The point around which to extract\n :param half_size: The half-size of the extracted area (full size is half_size*2+1, where the th center point is\n center)\n :return: The extracted area\n \"\"\"\n imax = np.clip(center + half_size + 1, 0, data.shape).astype(np.int)\n imin = np.clip(center - half_size, 0, data.shape).astype(np.int)\n subvol = data[imin[0]:imax[0], imin[1]:imax[1], imin[2]:imax[2]]\n max_missing = (center + half_size + 1 - imax).astype(np.int)\n min_missing = (imin - (center - half_size)).astype(np.int)\n return np.pad(subvol, [(min_missing[i], max_missing[i]) for i in range(\n 3)], mode='constant')\n\n\ndef crop_view(data: np.ndarray, crop: Union[float, Tuple[float, float,\n float]], center_crop: bool=True):\n \"\"\"\n Get a cropped view of a 3d numpy array (does not modify the input)\n\n :param data: The numpy array to crop\n :param crop: The percentage to crop in each dimension\n :param center_crop: If True, the crop is centered around the middle of the volume, otherwise, the crop expands from\n (0, 0, 0)\n :return: The cropped view\n \"\"\"\n if type(crop) == float or type(crop) == int:\n if crop > 0.99999:\n return data\n icropx = 1 - crop\n icropy = 1 - crop\n icropz = 1 - crop\n else:\n icropx = 1 - crop[0]\n icropy = 1 - crop[1]\n icropz = 1 - crop[2]\n w, h, l = data.shape\n if center_crop:\n view = data[int(w / 2 * icropx):int(-w / 2 * icropx), int(h / 2 *\n icropy):int(-h / 2 * icropy), int(l / 2 * icropz):int(-l / 2 *\n icropz)]\n else:\n view = data[:int(w * (1 - icropx)), :int(h * (1 - icropy)), :int(l *\n (1 - icropz))]\n return view\n\n\ndef plot_ortho_overlayed(vol_a: Volume, vol_b: Volume, axis=2, pixel_size:\n float=1.0) ->None:\n \"\"\"\n Plot two axis-reduced volumes overlayed as two channels (red and green), taking into account the spacing of both volumes\n\n :param vol_a: The first volume to plot (red)\n :param vol_b: The second volume to plot (green)\n :param axis: The axis along which both volumes will be reduced\n :param pixel_size: The size of a pixel, relative to the spacing of the the volumes\n \"\"\"\n from scipy.ndimage.interpolation import zoom\n import matplotlib.pyplot as plt\n vol_a_zoomed = np.mean(zoom(vol_a, np.array(vol_a.spacing) * pixel_size\n ), axis=axis)\n vol_b_zoomed = np.mean(zoom(vol_b, np.array(vol_b.spacing) * pixel_size\n ), axis=axis)\n b_channel = np.zeros_like(vol_a_zoomed)\n max_val = max(vol_a_zoomed.max(), vol_b_zoomed.max())\n min_val = min(vol_a_zoomed.min(), vol_b_zoomed.min())\n vol_a_zoomed = (vol_a_zoomed - min_val) / (max_val - min_val)\n vol_b_zoomed = (vol_b_zoomed - min_val) / (max_val - min_val)\n plt.imshow(np.stack([vol_a_zoomed, vol_b_zoomed, b_channel], axis=2))\n plt.show()\n\n\ndef show_ipv(data: np.ndarray):\n \"\"\"\n Show a 3d visualization of 3d numpy array\n :param data: The numpy array to show\n :return: The ipyvolume figure\n \"\"\"\n import ipyvolume as ipv\n return ipv.quickvolshow(data)\n\n\ndef threshold_otsu(image: np.ndarray, nbins: int=256, ignore: int=0) ->float:\n \"\"\"\n Compute the Otsu threshold for a numpy array, without taking into account empty areas\n\n :param image: The volume to compute the threshold for\n :param nbins: The number of bins used\n :param ignore: The value to ignore\n :return: The Otsu threshold\n \"\"\"\n from skimage.filters.thresholding import histogram\n if image.min() == image.max():\n raise ValueError(\n 'threshold_otsu is expected to work with images having more than one color. The input image seems to have just one color {0}.'\n .format(image.min()))\n img_flat = image.ravel()\n img_flat = img_flat[img_flat != ignore]\n hist, bin_centers = histogram(img_flat, nbins)\n hist = hist.astype(float)\n weight1 = np.cumsum(hist)\n weight2 = np.cumsum(hist[::-1])[::-1]\n mean1 = np.cumsum(hist * bin_centers) / weight1\n mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]\n variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2\n idx = np.argmax(variance12)\n threshold = bin_centers[:-1][idx]\n return threshold\n",
"step-4": "from typing import Union, Tuple\nimport numpy as np\nfrom dispim import Volume\n\n\ndef extract_3d(data: np.ndarray, center: np.ndarray, half_size: int):\n \"\"\"\n Extract an area around a point in a 3d numpy array, zero padded as necessary such that the specified point is at the\n center\n\n :param data: The numpy array to extract from\n :param center: The point around which to extract\n :param half_size: The half-size of the extracted area (full size is half_size*2+1, where the th center point is\n center)\n :return: The extracted area\n \"\"\"\n imax = np.clip(center + half_size + 1, 0, data.shape).astype(np.int)\n imin = np.clip(center - half_size, 0, data.shape).astype(np.int)\n subvol = data[imin[0]:imax[0], imin[1]:imax[1], imin[2]:imax[2]]\n max_missing = (center + half_size + 1 - imax).astype(np.int)\n min_missing = (imin - (center - half_size)).astype(np.int)\n return np.pad(subvol, [(min_missing[i], max_missing[i]) for i in range(\n 3)], mode='constant')\n\n\ndef crop_view(data: np.ndarray, crop: Union[float, Tuple[float, float,\n float]], center_crop: bool=True):\n \"\"\"\n Get a cropped view of a 3d numpy array (does not modify the input)\n\n :param data: The numpy array to crop\n :param crop: The percentage to crop in each dimension\n :param center_crop: If True, the crop is centered around the middle of the volume, otherwise, the crop expands from\n (0, 0, 0)\n :return: The cropped view\n \"\"\"\n if type(crop) == float or type(crop) == int:\n if crop > 0.99999:\n return data\n icropx = 1 - crop\n icropy = 1 - crop\n icropz = 1 - crop\n else:\n icropx = 1 - crop[0]\n icropy = 1 - crop[1]\n icropz = 1 - crop[2]\n w, h, l = data.shape\n if center_crop:\n view = data[int(w / 2 * icropx):int(-w / 2 * icropx), int(h / 2 *\n icropy):int(-h / 2 * icropy), int(l / 2 * icropz):int(-l / 2 *\n icropz)]\n else:\n view = data[:int(w * (1 - icropx)), :int(h * (1 - icropy)), :int(l *\n (1 - icropz))]\n return view\n\n\ndef plot_ortho_overlayed(vol_a: Volume, vol_b: Volume, axis=2, pixel_size:\n float=1.0) ->None:\n \"\"\"\n Plot two axis-reduced volumes overlayed as two channels (red and green), taking into account the spacing of both volumes\n\n :param vol_a: The first volume to plot (red)\n :param vol_b: The second volume to plot (green)\n :param axis: The axis along which both volumes will be reduced\n :param pixel_size: The size of a pixel, relative to the spacing of the the volumes\n \"\"\"\n from scipy.ndimage.interpolation import zoom\n import matplotlib.pyplot as plt\n vol_a_zoomed = np.mean(zoom(vol_a, np.array(vol_a.spacing) * pixel_size\n ), axis=axis)\n vol_b_zoomed = np.mean(zoom(vol_b, np.array(vol_b.spacing) * pixel_size\n ), axis=axis)\n b_channel = np.zeros_like(vol_a_zoomed)\n max_val = max(vol_a_zoomed.max(), vol_b_zoomed.max())\n min_val = min(vol_a_zoomed.min(), vol_b_zoomed.min())\n vol_a_zoomed = (vol_a_zoomed - min_val) / (max_val - min_val)\n vol_b_zoomed = (vol_b_zoomed - min_val) / (max_val - min_val)\n plt.imshow(np.stack([vol_a_zoomed, vol_b_zoomed, b_channel], axis=2))\n plt.show()\n\n\ndef show_ipv(data: np.ndarray):\n \"\"\"\n Show a 3d visualization of 3d numpy array\n :param data: The numpy array to show\n :return: The ipyvolume figure\n \"\"\"\n import ipyvolume as ipv\n return ipv.quickvolshow(data)\n\n\ndef threshold_otsu(image: np.ndarray, nbins: int=256, ignore: int=0) ->float:\n \"\"\"\n Compute the Otsu threshold for a numpy array, without taking into account empty areas\n\n :param image: The volume to compute the threshold for\n :param nbins: The number of bins used\n :param ignore: The value to ignore\n :return: The Otsu threshold\n \"\"\"\n from skimage.filters.thresholding import histogram\n if image.min() == image.max():\n raise ValueError(\n 'threshold_otsu is expected to work with images having more than one color. The input image seems to have just one color {0}.'\n .format(image.min()))\n img_flat = image.ravel()\n img_flat = img_flat[img_flat != ignore]\n hist, bin_centers = histogram(img_flat, nbins)\n hist = hist.astype(float)\n weight1 = np.cumsum(hist)\n weight2 = np.cumsum(hist[::-1])[::-1]\n mean1 = np.cumsum(hist * bin_centers) / weight1\n mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]\n variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2\n idx = np.argmax(variance12)\n threshold = bin_centers[:-1][idx]\n return threshold\n",
"step-5": "from typing import Union, Tuple\n\nimport numpy as np\n\nfrom dispim import Volume\n\n\ndef extract_3d(data: np.ndarray, center: np.ndarray, half_size: int):\n \"\"\"\n Extract an area around a point in a 3d numpy array, zero padded as necessary such that the specified point is at the\n center\n\n :param data: The numpy array to extract from\n :param center: The point around which to extract\n :param half_size: The half-size of the extracted area (full size is half_size*2+1, where the th center point is\n center)\n :return: The extracted area\n \"\"\"\n # FIXME: Doesn't always return the expected shape\n imax = np.clip(center + half_size + 1, 0, data.shape).astype(np.int)\n imin = np.clip(center - half_size, 0, data.shape).astype(np.int)\n\n subvol = data[imin[0]:imax[0], imin[1]:imax[1], imin[2]:imax[2]]\n\n max_missing = ((center + half_size + 1) - imax).astype(np.int)\n min_missing = (imin - (center - half_size)).astype(np.int)\n\n return np.pad(subvol, [(min_missing[i], max_missing[i]) for i in range(3)], mode='constant')\n\n\ndef crop_view(data: np.ndarray, crop: Union[float, Tuple[float, float, float]], center_crop: bool = True):\n \"\"\"\n Get a cropped view of a 3d numpy array (does not modify the input)\n\n :param data: The numpy array to crop\n :param crop: The percentage to crop in each dimension\n :param center_crop: If True, the crop is centered around the middle of the volume, otherwise, the crop expands from\n (0, 0, 0)\n :return: The cropped view\n \"\"\"\n if type(crop) == float or type(crop) == int:\n if crop > 0.99999:\n return data\n icropx = 1 - crop\n icropy = 1 - crop\n icropz = 1 - crop\n else:\n icropx = 1 - crop[0]\n icropy = 1 - crop[1]\n icropz = 1 - crop[2]\n\n w, h, l = data.shape\n\n if center_crop:\n view = data[int(w / 2 * icropx):int(-w / 2 * icropx),\n int(h / 2 * icropy):int(-h / 2 * icropy),\n int(l / 2 * icropz):int(-l / 2 * icropz)]\n else:\n view = data[:int(w * (1 - icropx)), :int(h * (1 - icropy)), :int(l * (1 - icropz))]\n\n return view\n\n\ndef plot_ortho_overlayed(vol_a: Volume, vol_b: Volume, axis=2, pixel_size: float = 1.0) -> None:\n \"\"\"\n Plot two axis-reduced volumes overlayed as two channels (red and green), taking into account the spacing of both volumes\n\n :param vol_a: The first volume to plot (red)\n :param vol_b: The second volume to plot (green)\n :param axis: The axis along which both volumes will be reduced\n :param pixel_size: The size of a pixel, relative to the spacing of the the volumes\n \"\"\"\n from scipy.ndimage.interpolation import zoom\n import matplotlib.pyplot as plt\n\n vol_a_zoomed = np.mean(zoom(vol_a, np.array(vol_a.spacing) * pixel_size), axis=axis)\n vol_b_zoomed = np.mean(zoom(vol_b, np.array(vol_b.spacing) * pixel_size), axis=axis)\n b_channel = np.zeros_like(vol_a_zoomed)\n\n max_val = max(vol_a_zoomed.max(), vol_b_zoomed.max())\n min_val = min(vol_a_zoomed.min(), vol_b_zoomed.min())\n\n vol_a_zoomed = (vol_a_zoomed - min_val) / (max_val - min_val)\n vol_b_zoomed = (vol_b_zoomed - min_val) / (max_val - min_val)\n\n plt.imshow(np.stack([vol_a_zoomed, vol_b_zoomed, b_channel], axis=2))\n plt.show()\n\n\ndef show_ipv(data: np.ndarray):\n \"\"\"\n Show a 3d visualization of 3d numpy array\n :param data: The numpy array to show\n :return: The ipyvolume figure\n \"\"\"\n import ipyvolume as ipv\n return ipv.quickvolshow(data)\n\n\ndef threshold_otsu(image: np.ndarray, nbins: int = 256, ignore: int = 0) -> float:\n \"\"\"\n Compute the Otsu threshold for a numpy array, without taking into account empty areas\n\n :param image: The volume to compute the threshold for\n :param nbins: The number of bins used\n :param ignore: The value to ignore\n :return: The Otsu threshold\n \"\"\"\n from skimage.filters.thresholding import histogram\n # Check if the image is multi-colored or not\n if image.min() == image.max():\n raise ValueError(\"threshold_otsu is expected to work with images \"\n \"having more than one color. The input image seems \"\n \"to have just one color {0}.\".format(image.min()))\n\n img_flat = image.ravel()\n img_flat = img_flat[img_flat != ignore]\n hist, bin_centers = histogram(img_flat, nbins)\n hist = hist.astype(float)\n\n # class probabilities for all possible thresholds\n weight1 = np.cumsum(hist)\n weight2 = np.cumsum(hist[::-1])[::-1]\n # class means for all possible thresholds\n mean1 = np.cumsum(hist * bin_centers) / weight1\n mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]\n\n # Clip ends to align class 1 and class 2 variables:\n # The last value of `weight1`/`mean1` should pair with zero values in\n # `weight2`/`mean2`, which do not exist.\n variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2\n\n idx = np.argmax(variance12)\n threshold = bin_centers[:-1][idx]\n return threshold\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import numpy as np
import matplotlib.pyplot as plt
from math import *
from scipy.integrate import *
from pylab import *
from scipy.integrate import quad
MHD = np.zeros((80, 90, 5), dtype=float)
BGI = np.zeros((80, 90, 5), dtype=float)
Fp = np.zeros((80), dtype=float)
AngMHD = np.zeros((90,2), dtype=float)
AngBGI = np.zeros((90,2), dtype=float)
B0 = [0.5, 1.5, 3, 5, 10]
V = [0.3, 0.3, 0.2, 0.1, 0.1]
def PMHD(p, chi, b):
return b**2/p*(1 +(sin(chi))**2)
def xMHD(p, chi, b):
return -b**2/p**2*sin(chi)*cos(chi)
def PBGI(p, chi, b):
Q = 0.7*p/b**0.57/sqrt(cos(chi))
if Q > 1:
A = 1
else:
A = Q
return b**2/p*(A*(cos(chi))**2 + 0.01/sqrt(p))
def xBGI(p, chi, b):
Q = 0.7*p/b**0.57/sqrt(cos(chi))
if Q > 1:
A = 1
else:
A = Q
return A*b**2/p**2*sin(chi)*cos(chi)
P0 = 0.3
Pend = 1
B12 = 4
dx = 0.0001
for i in range(450):
xi0 = i/5 + 0.1
x0 = pi/180*xi0
P = P0
x = x0
while 0.7*P/B12**0.57/sqrt(cos(x)) < 2:
P = P + PMHD(P, x, B12)*dx
x = x + xMHD(P, x, B12)*dx
gx = 180/pi*x
iP = int(P/0.1)
ix = int(gx)
if iP < 80:
MHD[iP, ix, 0] = MHD[iP, ix, 0] + 1
for i in range(450):
xi0 = i/5 + 0.1
x0 = pi/180*xi0
P = P0
x = x0
while 0.7*P/B12**0.57/sqrt(cos(x)) < 2:
P = P + PBGI(P, x, B12)*dx
x = x + xBGI(P, x, B12)*dx
gx = 180/pi*x
iP = int(P/0.1)
ix = int(gx)
if iP < 80:
BGI[iP, ix, 0] = BGI[iP, ix, 0] + 1
#for j in range(80):
# for i in range(90):
# Fp[j] = Fp[j] + PxiB[j, i, 0]
# print(j/10, Fp[j])
for i in range(90):
j = int(10*Pend)
AngMHD[i,0] = i
AngBGI[i,0] = i
AngMHD[i,1] = MHD[j, i, 0]
AngBGI[i,1] = BGI[j, i, 0]
# print(i, PxiB[10, i, 0])
ymax = np.max(AngBGI)
fig, ax = plt.subplots()
x = np.linspace(0, 90)
plt.xlim(1, 90)
plt.ylim(0, 1.2*ymax)
data1 = np.array(AngMHD)
data2 = np.array(AngBGI)
X1,Y1 = data1.T
X2,Y2 = data2.T
plt.scatter(X1,Y1, color = 'blue', s=15, label="MHD")
plt.scatter(X2,Y2, color = 'red', s=15, label="BGI")
plt.title('$P_0$ = '+str(P0)+', P = '+str(Pend)+', $B_{12}$ = '+str(B12)+'')
plt.grid(True,which="both", ls="-")
plt.grid(True,which="both", ls="-")
plt.xlabel('$\chi$')
#plt.ylabel('$\lambda g(x_{0})$')
plt.legend()
plt.show()
#fig, ax = plt.subplots()
#x = np.linspace(0, 1)
#plt.xlim(0.0001, 1.0)
#plt.ylim(0, 0.1)
#plt.plot(x, x**2*(cos(ch)*(1 - x**2) + 1/2*sin(ch)*(x - x**3))**3, label="fitting")
#plt.title(''+str(PSR)+', $n_{\pm}$ (P = '+str(P)+', $B_{12}$ = '+str(B12)+', $\chi$ = '+str(chi)+'$^{\circ}$), $\lambda = 92$')
#plt.grid(True,which="both", ls="-")
#plt.grid(True,which="both", ls="-")
##ax.vlines(xcr, 0, 8, color = 'black', linewidth = 1.5, linestyle = '--')
#plt.xlabel('$r_{0}/R_0$')
#plt.ylabel('$n_{\pm}$')
#plt.legend()
#plt.show()
|
normal
|
{
"blob_id": "660334be611c30397c2f33890e1bca1fc43bd01f",
"index": 2420,
"step-1": "<mask token>\n\n\ndef PMHD(p, chi, b):\n return b ** 2 / p * (1 + sin(chi) ** 2)\n\n\ndef xMHD(p, chi, b):\n return -b ** 2 / p ** 2 * sin(chi) * cos(chi)\n\n\n<mask token>\n\n\ndef xBGI(p, chi, b):\n Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))\n if Q > 1:\n A = 1\n else:\n A = Q\n return A * b ** 2 / p ** 2 * sin(chi) * cos(chi)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef PMHD(p, chi, b):\n return b ** 2 / p * (1 + sin(chi) ** 2)\n\n\ndef xMHD(p, chi, b):\n return -b ** 2 / p ** 2 * sin(chi) * cos(chi)\n\n\ndef PBGI(p, chi, b):\n Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))\n if Q > 1:\n A = 1\n else:\n A = Q\n return b ** 2 / p * (A * cos(chi) ** 2 + 0.01 / sqrt(p))\n\n\ndef xBGI(p, chi, b):\n Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))\n if Q > 1:\n A = 1\n else:\n A = Q\n return A * b ** 2 / p ** 2 * sin(chi) * cos(chi)\n\n\n<mask token>\nfor i in range(450):\n xi0 = i / 5 + 0.1\n x0 = pi / 180 * xi0\n P = P0\n x = x0\n while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:\n P = P + PMHD(P, x, B12) * dx\n x = x + xMHD(P, x, B12) * dx\n gx = 180 / pi * x\n iP = int(P / 0.1)\n ix = int(gx)\n if iP < 80:\n MHD[iP, ix, 0] = MHD[iP, ix, 0] + 1\nfor i in range(450):\n xi0 = i / 5 + 0.1\n x0 = pi / 180 * xi0\n P = P0\n x = x0\n while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:\n P = P + PBGI(P, x, B12) * dx\n x = x + xBGI(P, x, B12) * dx\n gx = 180 / pi * x\n iP = int(P / 0.1)\n ix = int(gx)\n if iP < 80:\n BGI[iP, ix, 0] = BGI[iP, ix, 0] + 1\nfor i in range(90):\n j = int(10 * Pend)\n AngMHD[i, 0] = i\n AngBGI[i, 0] = i\n AngMHD[i, 1] = MHD[j, i, 0]\n AngBGI[i, 1] = BGI[j, i, 0]\n<mask token>\nplt.xlim(1, 90)\nplt.ylim(0, 1.2 * ymax)\n<mask token>\nplt.scatter(X1, Y1, color='blue', s=15, label='MHD')\nplt.scatter(X2, Y2, color='red', s=15, label='BGI')\nplt.title('$P_0$ = ' + str(P0) + ', P = ' + str(Pend) + ', $B_{12}$ = ' +\n str(B12) + '')\nplt.grid(True, which='both', ls='-')\nplt.grid(True, which='both', ls='-')\nplt.xlabel('$\\\\chi$')\nplt.legend()\nplt.show()\n",
"step-3": "<mask token>\nMHD = np.zeros((80, 90, 5), dtype=float)\nBGI = np.zeros((80, 90, 5), dtype=float)\nFp = np.zeros(80, dtype=float)\nAngMHD = np.zeros((90, 2), dtype=float)\nAngBGI = np.zeros((90, 2), dtype=float)\nB0 = [0.5, 1.5, 3, 5, 10]\nV = [0.3, 0.3, 0.2, 0.1, 0.1]\n\n\ndef PMHD(p, chi, b):\n return b ** 2 / p * (1 + sin(chi) ** 2)\n\n\ndef xMHD(p, chi, b):\n return -b ** 2 / p ** 2 * sin(chi) * cos(chi)\n\n\ndef PBGI(p, chi, b):\n Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))\n if Q > 1:\n A = 1\n else:\n A = Q\n return b ** 2 / p * (A * cos(chi) ** 2 + 0.01 / sqrt(p))\n\n\ndef xBGI(p, chi, b):\n Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))\n if Q > 1:\n A = 1\n else:\n A = Q\n return A * b ** 2 / p ** 2 * sin(chi) * cos(chi)\n\n\nP0 = 0.3\nPend = 1\nB12 = 4\ndx = 0.0001\nfor i in range(450):\n xi0 = i / 5 + 0.1\n x0 = pi / 180 * xi0\n P = P0\n x = x0\n while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:\n P = P + PMHD(P, x, B12) * dx\n x = x + xMHD(P, x, B12) * dx\n gx = 180 / pi * x\n iP = int(P / 0.1)\n ix = int(gx)\n if iP < 80:\n MHD[iP, ix, 0] = MHD[iP, ix, 0] + 1\nfor i in range(450):\n xi0 = i / 5 + 0.1\n x0 = pi / 180 * xi0\n P = P0\n x = x0\n while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:\n P = P + PBGI(P, x, B12) * dx\n x = x + xBGI(P, x, B12) * dx\n gx = 180 / pi * x\n iP = int(P / 0.1)\n ix = int(gx)\n if iP < 80:\n BGI[iP, ix, 0] = BGI[iP, ix, 0] + 1\nfor i in range(90):\n j = int(10 * Pend)\n AngMHD[i, 0] = i\n AngBGI[i, 0] = i\n AngMHD[i, 1] = MHD[j, i, 0]\n AngBGI[i, 1] = BGI[j, i, 0]\nymax = np.max(AngBGI)\nfig, ax = plt.subplots()\nx = np.linspace(0, 90)\nplt.xlim(1, 90)\nplt.ylim(0, 1.2 * ymax)\ndata1 = np.array(AngMHD)\ndata2 = np.array(AngBGI)\nX1, Y1 = data1.T\nX2, Y2 = data2.T\nplt.scatter(X1, Y1, color='blue', s=15, label='MHD')\nplt.scatter(X2, Y2, color='red', s=15, label='BGI')\nplt.title('$P_0$ = ' + str(P0) + ', P = ' + str(Pend) + ', $B_{12}$ = ' +\n str(B12) + '')\nplt.grid(True, which='both', ls='-')\nplt.grid(True, which='both', ls='-')\nplt.xlabel('$\\\\chi$')\nplt.legend()\nplt.show()\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom math import *\nfrom scipy.integrate import *\nfrom pylab import *\nfrom scipy.integrate import quad\nMHD = np.zeros((80, 90, 5), dtype=float)\nBGI = np.zeros((80, 90, 5), dtype=float)\nFp = np.zeros(80, dtype=float)\nAngMHD = np.zeros((90, 2), dtype=float)\nAngBGI = np.zeros((90, 2), dtype=float)\nB0 = [0.5, 1.5, 3, 5, 10]\nV = [0.3, 0.3, 0.2, 0.1, 0.1]\n\n\ndef PMHD(p, chi, b):\n return b ** 2 / p * (1 + sin(chi) ** 2)\n\n\ndef xMHD(p, chi, b):\n return -b ** 2 / p ** 2 * sin(chi) * cos(chi)\n\n\ndef PBGI(p, chi, b):\n Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))\n if Q > 1:\n A = 1\n else:\n A = Q\n return b ** 2 / p * (A * cos(chi) ** 2 + 0.01 / sqrt(p))\n\n\ndef xBGI(p, chi, b):\n Q = 0.7 * p / b ** 0.57 / sqrt(cos(chi))\n if Q > 1:\n A = 1\n else:\n A = Q\n return A * b ** 2 / p ** 2 * sin(chi) * cos(chi)\n\n\nP0 = 0.3\nPend = 1\nB12 = 4\ndx = 0.0001\nfor i in range(450):\n xi0 = i / 5 + 0.1\n x0 = pi / 180 * xi0\n P = P0\n x = x0\n while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:\n P = P + PMHD(P, x, B12) * dx\n x = x + xMHD(P, x, B12) * dx\n gx = 180 / pi * x\n iP = int(P / 0.1)\n ix = int(gx)\n if iP < 80:\n MHD[iP, ix, 0] = MHD[iP, ix, 0] + 1\nfor i in range(450):\n xi0 = i / 5 + 0.1\n x0 = pi / 180 * xi0\n P = P0\n x = x0\n while 0.7 * P / B12 ** 0.57 / sqrt(cos(x)) < 2:\n P = P + PBGI(P, x, B12) * dx\n x = x + xBGI(P, x, B12) * dx\n gx = 180 / pi * x\n iP = int(P / 0.1)\n ix = int(gx)\n if iP < 80:\n BGI[iP, ix, 0] = BGI[iP, ix, 0] + 1\nfor i in range(90):\n j = int(10 * Pend)\n AngMHD[i, 0] = i\n AngBGI[i, 0] = i\n AngMHD[i, 1] = MHD[j, i, 0]\n AngBGI[i, 1] = BGI[j, i, 0]\nymax = np.max(AngBGI)\nfig, ax = plt.subplots()\nx = np.linspace(0, 90)\nplt.xlim(1, 90)\nplt.ylim(0, 1.2 * ymax)\ndata1 = np.array(AngMHD)\ndata2 = np.array(AngBGI)\nX1, Y1 = data1.T\nX2, Y2 = data2.T\nplt.scatter(X1, Y1, color='blue', s=15, label='MHD')\nplt.scatter(X2, Y2, color='red', s=15, label='BGI')\nplt.title('$P_0$ = ' + str(P0) + ', P = ' + str(Pend) + ', $B_{12}$ = ' +\n str(B12) + '')\nplt.grid(True, which='both', ls='-')\nplt.grid(True, which='both', ls='-')\nplt.xlabel('$\\\\chi$')\nplt.legend()\nplt.show()\n",
"step-5": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom math import * \r\nfrom scipy.integrate import *\r\nfrom pylab import * \r\nfrom scipy.integrate import quad\r\n\r\n\r\nMHD = np.zeros((80, 90, 5), dtype=float)\r\nBGI = np.zeros((80, 90, 5), dtype=float)\r\nFp = np.zeros((80), dtype=float) \r\nAngMHD = np.zeros((90,2), dtype=float)\r\nAngBGI = np.zeros((90,2), dtype=float) \r\nB0 = [0.5, 1.5, 3, 5, 10]\r\nV = [0.3, 0.3, 0.2, 0.1, 0.1]\r\n\r\n\r\ndef PMHD(p, chi, b):\r\n return b**2/p*(1 +(sin(chi))**2)\r\n\r\ndef xMHD(p, chi, b):\r\n return -b**2/p**2*sin(chi)*cos(chi)\r\n\r\ndef PBGI(p, chi, b):\r\n Q = 0.7*p/b**0.57/sqrt(cos(chi))\r\n if Q > 1:\r\n A = 1\r\n else:\r\n A = Q\r\n return b**2/p*(A*(cos(chi))**2 + 0.01/sqrt(p))\r\n\r\ndef xBGI(p, chi, b):\r\n Q = 0.7*p/b**0.57/sqrt(cos(chi))\r\n if Q > 1:\r\n A = 1\r\n else:\r\n A = Q\r\n return A*b**2/p**2*sin(chi)*cos(chi)\r\n\r\nP0 = 0.3\r\nPend = 1\r\nB12 = 4\r\n\r\ndx = 0.0001\r\n\r\n\r\nfor i in range(450):\r\n xi0 = i/5 + 0.1\r\n x0 = pi/180*xi0\r\n P = P0\r\n x = x0\r\n while 0.7*P/B12**0.57/sqrt(cos(x)) < 2:\r\n P = P + PMHD(P, x, B12)*dx\r\n x = x + xMHD(P, x, B12)*dx\r\n gx = 180/pi*x\r\n iP = int(P/0.1)\r\n ix = int(gx)\r\n if iP < 80:\r\n MHD[iP, ix, 0] = MHD[iP, ix, 0] + 1\r\n\r\n\r\nfor i in range(450):\r\n xi0 = i/5 + 0.1\r\n x0 = pi/180*xi0\r\n P = P0\r\n x = x0\r\n while 0.7*P/B12**0.57/sqrt(cos(x)) < 2:\r\n P = P + PBGI(P, x, B12)*dx\r\n x = x + xBGI(P, x, B12)*dx\r\n gx = 180/pi*x\r\n iP = int(P/0.1)\r\n ix = int(gx)\r\n if iP < 80:\r\n BGI[iP, ix, 0] = BGI[iP, ix, 0] + 1\r\n\r\n#for j in range(80):\r\n# for i in range(90):\r\n# Fp[j] = Fp[j] + PxiB[j, i, 0] \r\n# print(j/10, Fp[j]) \r\n\r\n\r\nfor i in range(90):\r\n j = int(10*Pend)\r\n AngMHD[i,0] = i\r\n AngBGI[i,0] = i\r\n AngMHD[i,1] = MHD[j, i, 0]\r\n AngBGI[i,1] = BGI[j, i, 0]\r\n# print(i, PxiB[10, i, 0])\r\n\r\n\r\nymax = np.max(AngBGI)\r\n\r\nfig, ax = plt.subplots()\r\nx = np.linspace(0, 90)\r\nplt.xlim(1, 90)\r\nplt.ylim(0, 1.2*ymax)\r\ndata1 = np.array(AngMHD)\r\ndata2 = np.array(AngBGI)\r\nX1,Y1 = data1.T\r\nX2,Y2 = data2.T\r\nplt.scatter(X1,Y1, color = 'blue', s=15, label=\"MHD\")\r\nplt.scatter(X2,Y2, color = 'red', s=15, label=\"BGI\")\r\nplt.title('$P_0$ = '+str(P0)+', P = '+str(Pend)+', $B_{12}$ = '+str(B12)+'')\r\nplt.grid(True,which=\"both\", ls=\"-\")\r\nplt.grid(True,which=\"both\", ls=\"-\")\r\nplt.xlabel('$\\chi$')\r\n#plt.ylabel('$\\lambda g(x_{0})$')\r\nplt.legend()\r\nplt.show() \r\n\r\n\r\n#fig, ax = plt.subplots()\r\n#x = np.linspace(0, 1)\r\n#plt.xlim(0.0001, 1.0)\r\n#plt.ylim(0, 0.1)\r\n#plt.plot(x, x**2*(cos(ch)*(1 - x**2) + 1/2*sin(ch)*(x - x**3))**3, label=\"fitting\")\r\n#plt.title(''+str(PSR)+', $n_{\\pm}$ (P = '+str(P)+', $B_{12}$ = '+str(B12)+', $\\chi$ = '+str(chi)+'$^{\\circ}$), $\\lambda = 92$')\r\n#plt.grid(True,which=\"both\", ls=\"-\")\r\n#plt.grid(True,which=\"both\", ls=\"-\")\r\n##ax.vlines(xcr, 0, 8, color = 'black', linewidth = 1.5, linestyle = '--')\r\n#plt.xlabel('$r_{0}/R_0$')\r\n#plt.ylabel('$n_{\\pm}$')\r\n#plt.legend()\r\n#plt.show() ",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
# -*- coding: utf-8 -*-
"""
current_models - library of ionic current models implemented in Python
Created on Mon Apr 10 16:30:04 2017
@author: Oliver Britton
"""
import os
import sys
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
" Voltage clamp generator functions "
" //--Nav models--\\ "
" -- Nav 1.7 models -- "
def nav17vw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Human Nav 1.7 from Vasylyev Waxman "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 10.22 - 10.22/(1 + np.exp((v+7.19)/15.43)) # Rate for closed -> open (sort of)
beta_m = 23.76/(1 + np.exp((v+70.37)/14.53)) # Rate for open->closed
"""
Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.
"""
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.0744/(1 + np.exp((v+99.76)/11.07))
beta_h = 2.54 - 2.54/(1 + np.exp((v+7.8)/10.68))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
def nav17cw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Rat? Nav 1.7 from Choi Waxman 2011 "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
s = Y[2]
alpha_m = 15.5/(1 + np.exp(-(v-5)/(12.08)))
beta_m = 35.2/(1 + np.exp((v+72.7)/16.7))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.38685/(1 + np.exp((v+122.35)/15.29))
beta_h = -0.00283 + 2.00283/(1 + np.exp(-(v+5.5266)/12.70195)) # Rate is negative if v = -inf?
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
alpha_s = 0.00003 + 0.00092/(1 + np.exp((v+93.9)/16.6))
beta_s = 132.05 - 132.05/(1 + np.exp((v-384.9)/28.5))
sinf = alpha_s/(alpha_s + beta_s)
stau = 1/(alpha_s + beta_s)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
ds = (sinf-s)/stau
return [dm, dh, ds]
" -- Nav 1.8 models -- "
def nav18hw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Human Nav 1.8 from Huang Waxman 20(14?) "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 7.35 - 7.35/(1 + np.exp((v+1.38)/10.9))
beta_m = 5.97/(1 + np.exp((v+56.43)/18.26))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.011 + 1.39/(1 + np.exp((v+78.04)/11.32))
beta_h = 0.56 - 0.56/(1 + np.exp((v-21.82)/20.03))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
def nav18tf(Y,t,voltage_clamp_func,voltage_clamp_params):
" Rat? Nav 1.8 used in Tigerholm model "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
s = Y[2]
u = Y[3]
alpha_m = 2.85 - 2.839/(1 + np.exp((v-1.159)/13.95))
beta_m = 7.6205/(1 + np.exp((v+46.463)/8.8289))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
hinf = 1/(1+np.exp((v+32.2)/4))
htau = 1.218 + 42.043*np.exp(-((v+38.1)**2)/(2*15.19**2))
alpha_s = 0.001 * 5.4203 / (1 + np.exp((v+79.816)/16.269))
beta_s = 0.001 * 5.0757 / (1 + np.exp(-(v+15.968)/11.542))
sinf = 1/(1+np.exp((v+45.0)/8))
stau = 1/(alpha_s + beta_s)
alpha_u = 0.002 * 2.0434 / (1 + np.exp((v+67.499)/19.51))
beta_u = 0.002 * 1.9952 / (1 + np.exp(-(v+30.963)/14.792))
uinf = 1/(1+np.exp((v+51.0)/8))
utau = 1.0/(alpha_u + beta_u)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
ds = (sinf-s)/stau
du = (uinf-u)/utau
return [dm, dh, ds, du]
def nav18cw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.8 model used in Choi Waxman 2011 "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 2.85 - 2.839/(1 + np.exp((v-1.159)/13.95))
beta_m = 7.6205/(1 + np.exp((v+46.463)/8.8289))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
hinf = 1/(1+np.exp((v+32.2)/4))
htau = 1.218 + 42.043*np.exp(-((v+38.1)**2)/(2*15.19**2))
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
" -- Nav 1.9 models -- "
def nav19hw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.9 model from Huang Waxman 2014"
m = Y[0]
h = Y[1]
s = Y[2]
v = voltage_clamp_func(t,voltage_clamp_params)
alpha_m = 0.751/(1 + np.exp(-(v+32.26)/13.71))
beta_m = 5.68/(1 + np.exp((v+123.71)/13.94))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.082/(1 + np.exp((v+113.69)/17.4))
beta_h = 0.24/(1 + np.exp(-(v-10.1)/17.2))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
alpha_s = 0.019/(1 + np.exp((v+154.51)/11.46))
beta_s = 0.000376/(1 + np.exp(-(v+60.92)/15.79))
sinf = alpha_s/(alpha_s + beta_s)
stau = 1/(alpha_s + beta_s)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
ds = (sinf-s)/stau
return [dm, dh, ds]
def nav19md(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.9 model from Maingret 2008"
m = Y[0]
h = Y[1]
s = Y[2]
v = voltage_clamp_func(t,voltage_clamp_params)
return [dm, dh, ds]
def nav16zm(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.6 model from Zach Mainen 1994 "
m = Y[0]
h = Y[1]
v = voltage_clamp_func(t,voltage_clamp_params)
vhalf = -43.0
a_m = 0.182*(v-vhalf)/(1-np.exp((vhalf-v)/6.))
b_m = 0.124*(-v+vhalf)/(1-np.exp((-vhalf+v)/6.))
m_inf = a_m/(a_m + b_m)
m_tau = 1./(a_m + b_m)
vhalf_ha = -50.0
vhalf_hb = -75.0
q_h = 5.0
vhalf_inf = -72.0
qinf = 6.2
rate_ha = 0.0091
rate_hb = 0.024
a_h = rate_ha*(v-vhalf_ha)/(1-np.exp((vhalf_ha-v)/q_h))
b_h = rate_hb*(-v+vhalf_hb)/(1-np.exp((-vhalf_hb+v)/q_h))
h_inf = 1.0/(1.0 + np.exp((v-vhalf_inf)/qinf))
h_tau = 1./(a_h + b_h)
dm = (m_inf-m)/m_tau
dh = (h_inf-h)/h_tau
return [dm, dh]
" Kv models "
def kdr_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
" Tigerholm version of the Sheets et al. IKdr model "
" Model was developed from data recorded at 21 oC "
v = voltage_clamp_func(t,voltage_clamp_params)
n = Y[0]
q10 = 1.0#3.3 # Preserved in case it is useful but disabled
if v > -31.0:
tau = 0.16+0.8*np.exp(-0.0267*(v+11))
else:
tau = 1000*(0.000688 + 1/(np.exp((v+75.2)/6.5) + np.exp(-(v-131.5)/(34.8))))
ninf = 1/(1 + np.exp(-(v+45)/15.4))
ntau = tau/q10
dn = (ninf-n)/ntau
return [dn]
def km_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
""" Tigerholm version of the IM current. Current is from multiple sources:
The voltage dependence of steady-state activation forthe KM current is from
Maingret et al. (2008), which was derived from Passmore 2003. The KM channel activation has a fast and a slow
time constant as described by Passmore et al. (2003). To account for the
two time constants, weimplemented one fast (nf) and one slow (ns) gate,
combined as follows.
"""
# g = gbar * (0.25*ns + 0.75*nf)
v = voltage_clamp_func(t,voltage_clamp_params)
ns = Y[0]
nf = Y[1]
q10 = 1.0#3.3 # Preserved in case it is useful but disabled
if v < -60.0:
nstau = 219.0*q10
else:
nstau = 13.0*v + 1000.0*q10
nftau_alpha = 0.00395*np.exp((v+30.0)/40.0)
nftau_beta = 0.00395*np.exp(-(v+30.0)/20.0)*q10
nftau = 1.0/(nftau_alpha + nftau_beta)
ninf = 1.0/(1.0 + np.exp(-(v+30.0)/6.0)) # Threshold is around -30 mV
dns = (ninf-ns)/nstau
dnf = (ninf-nf)/nftau
return [dns,dnf]
def ka_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
""" Tigerholm version of IA.
"""
# g = gbar * n * h
v = voltage_clamp_func(t,voltage_clamp_params)
n = Y[0]
h = Y[1]
q10 = 1.0#3.3 # Preserved in case it is useful but disabled
ninf = (1.0/(1.0 + np.exp(-(v+5.4+15)/16.4)))**4
ntau = 0.25 + 10.04*np.exp((-(v+24.67)**2)/(2*34.8**2))*q10
hinf = 1.0/(1.0 + np.exp((v+49.9 + 15.0)/4.6))
htau = 20.0 + 50.0 * np.exp((-(v+40.0)**2)/(2.0*40.0**2))*q10
# Trap for htau following Sheets /ChoiWaxman/Tigerholm - set it to 5 ms if less than 5 ms
if htau < 5.0:
htau = 5.0
dn = (ninf-n)/ntau
dh = (hinf-h)/htau
return [dn,dh]
"""
Ca models
Implemented:
cal_ja - Jaffe et al. 1994 ICaL model.
can_mi - Model of N-type Ca current from Migliore 95
To do:
SK
BK
Ca diffusion
"""
def cal_ja(Y,t,voltage_clamp_func,voltage_clamp_params):
"""
Jaffe et al. 1994 ICaL model.
"""
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
tfa = 1.
ki = 0.001 # (mM)
cao = 2.5 # Davidson (mM)
" To do: make cai variable as an input like voltage "
cai = 1.e-4 # (mM) Roughly values (100 nM) from Intracellular calcium regulation among subpopulations of rat dorsal root ganglion neurons by Lu, Zhang, Gold 2007
celsius = 37.
def alpha(v):
return 15.69*(81.5 - v)/(np.exp((-1.0*v+81.5)/10.0)-1.0)
def beta(v):
return 0.29*np.exp(-v/10.86)
def KTF(celsius):
return ((25./293.15)*(celsius + 273.15))
def efun(z):
return np.array([1 - i/2 if i < 1e-4 else i/(np.exp(i)-1) for i in z])
def calc_ghk(v, cai, cao):
f = KTF(celsius)/2
nu = v/f
return -f*(1. - (cai/cao)*np.exp(nu))*efun(nu)
a = alpha(v)
b = beta(v)
tau = 1./(tfa*(a + b))
minf = a/(a+b)
dm = (minf - m)/tau
""" Calculating the current
# h gate
h2 = ki/(ki+cai)
gcalbar = 0.003
ghk = calc_ghk(v,cai,cao)
ical = gcalbar*m*m*h2*ghk
"""
return [dm]
def can_mi():
"""
Model of N-type Ca current from Migliore 95
"""
pass
" HCN models "
def hcn_kn(Y,t,voltage_clamp_func,voltage_clamp_params):
"""
Kouranova Ih model with non-specific current (reversal potential should be set at -30 mV
"""
v = voltage_clamp_func(t,voltage_clamp_params)
n_s = Y[0]
n_f = Y[1]
ninf_s = 1/(1 + np.exp((v+87.2)/9.7))
ninf_f = ninf_s
if v > -70.0:
tau_ns = 300.0 + 542.0 * np.exp((v+25.0)/20.0)
tau_nf = 140.0 + 50.0 * np.exp(-(v+25.0)/20.0)
else:
tau_ns = 2500.0 + 100.0 * np.exp((v+240.0)/50.0)
tau_nf = 250.0 + 12.0 * np.exp((v+240.0)/50.0)
dns = (ninf_s - n_s)/tau_ns
dnf = (ninf_f - n_f)/tau_nf
return [dns, dnf]
def hcn_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
"""
Tigerholm version of the Kouranova Ih model which is identical except
that when you calculate the current you don't use a nonspecific reversal potential and instead split the current between Na+ and K+, 50/50.
"""
v = voltage_clamp_func(t,voltage_clamp_params)
n_s = Y[0]
n_f = Y[1]
ninf_s = 1/(1 + np.exp((v+87.2)/9.7))
ninf_f = ninf_s
if v > -70.0:
tau_ns = 300.0 + 542.0 * np.exp((v+25.0)/20.0)
tau_nf = 140.0 + 50.0 * np.exp(-(v+25.0)/20.0)
else:
tau_ns = 2500.0 + 100.0 * np.exp((v+240.0)/50.0)
tau_nf = 250.0 + 12.0 * np.exp((v+240.0)/50.0)
dns = (ninf_s - n_s)/tau_ns
dnf = (ninf_f - n_f)/tau_nf
return [dns, dnf]
"""
# ena, ek, + or -?
Ih_na = 0.5 * g_h (0.5*n_s + 0.5*n_f) * (Vm + ena)
Ih_k = 0.5 * g_h * (0.5*n_s + 0.5*n_f) * (Vm + ek)
"""
" Test models "
def nav17test(Y,t,voltage_clamp_func,voltage_clamp_params):
" Human Nav 1.7 from Vasylyev Waxman "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 10.22 - 10.22/(1 + np.exp((v+7.19)/15.43)) # Rate for closed -> open (sort of)
beta_m = 23.76/(1 + np.exp((v+70.37)/14.53)) # Rate for open->closed
"""
Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.
"""
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.0744/(1 + np.exp((v+99.76)/11.07))
beta_h = 2.54 - 2.54/(1 + np.exp((v+7.8)/10.68))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
|
normal
|
{
"blob_id": "012ab947f7a2c9d44f54464b3e477582ffcf3d77",
"index": 5589,
"step-1": "<mask token>\n\n\ndef nav17vw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Human Nav 1.7 from Vasylyev Waxman \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 10.22 - 10.22 / (1 + np.exp((v + 7.19) / 15.43))\n beta_m = 23.76 / (1 + np.exp((v + 70.37) / 14.53))\n \"\"\"\n Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.\n \"\"\"\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.0744 / (1 + np.exp((v + 99.76) / 11.07))\n beta_h = 2.54 - 2.54 / (1 + np.exp((v + 7.8) / 10.68))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\ndef nav17cw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Rat? Nav 1.7 from Choi Waxman 2011 \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n s = Y[2]\n alpha_m = 15.5 / (1 + np.exp(-(v - 5) / 12.08))\n beta_m = 35.2 / (1 + np.exp((v + 72.7) / 16.7))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.38685 / (1 + np.exp((v + 122.35) / 15.29))\n beta_h = -0.00283 + 2.00283 / (1 + np.exp(-(v + 5.5266) / 12.70195))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n alpha_s = 3e-05 + 0.00092 / (1 + np.exp((v + 93.9) / 16.6))\n beta_s = 132.05 - 132.05 / (1 + np.exp((v - 384.9) / 28.5))\n sinf = alpha_s / (alpha_s + beta_s)\n stau = 1 / (alpha_s + beta_s)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n return [dm, dh, ds]\n\n\n<mask token>\n\n\ndef nav18tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Rat? Nav 1.8 used in Tigerholm model \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n s = Y[2]\n u = Y[3]\n alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))\n beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n hinf = 1 / (1 + np.exp((v + 32.2) / 4))\n htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))\n alpha_s = 0.001 * 5.4203 / (1 + np.exp((v + 79.816) / 16.269))\n beta_s = 0.001 * 5.0757 / (1 + np.exp(-(v + 15.968) / 11.542))\n sinf = 1 / (1 + np.exp((v + 45.0) / 8))\n stau = 1 / (alpha_s + beta_s)\n alpha_u = 0.002 * 2.0434 / (1 + np.exp((v + 67.499) / 19.51))\n beta_u = 0.002 * 1.9952 / (1 + np.exp(-(v + 30.963) / 14.792))\n uinf = 1 / (1 + np.exp((v + 51.0) / 8))\n utau = 1.0 / (alpha_u + beta_u)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n du = (uinf - u) / utau\n return [dm, dh, ds, du]\n\n\ndef nav18cw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.8 model used in Choi Waxman 2011 \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))\n beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n hinf = 1 / (1 + np.exp((v + 32.2) / 4))\n htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\n<mask token>\n\n\ndef nav19hw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.9 model from Huang Waxman 2014\"\"\"\n m = Y[0]\n h = Y[1]\n s = Y[2]\n v = voltage_clamp_func(t, voltage_clamp_params)\n alpha_m = 0.751 / (1 + np.exp(-(v + 32.26) / 13.71))\n beta_m = 5.68 / (1 + np.exp((v + 123.71) / 13.94))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.082 / (1 + np.exp((v + 113.69) / 17.4))\n beta_h = 0.24 / (1 + np.exp(-(v - 10.1) / 17.2))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n alpha_s = 0.019 / (1 + np.exp((v + 154.51) / 11.46))\n beta_s = 0.000376 / (1 + np.exp(-(v + 60.92) / 15.79))\n sinf = alpha_s / (alpha_s + beta_s)\n stau = 1 / (alpha_s + beta_s)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n return [dm, dh, ds]\n\n\n<mask token>\n\n\ndef nav16zm(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.6 model from Zach Mainen 1994 \"\"\"\n m = Y[0]\n h = Y[1]\n v = voltage_clamp_func(t, voltage_clamp_params)\n vhalf = -43.0\n a_m = 0.182 * (v - vhalf) / (1 - np.exp((vhalf - v) / 6.0))\n b_m = 0.124 * (-v + vhalf) / (1 - np.exp((-vhalf + v) / 6.0))\n m_inf = a_m / (a_m + b_m)\n m_tau = 1.0 / (a_m + b_m)\n vhalf_ha = -50.0\n vhalf_hb = -75.0\n q_h = 5.0\n vhalf_inf = -72.0\n qinf = 6.2\n rate_ha = 0.0091\n rate_hb = 0.024\n a_h = rate_ha * (v - vhalf_ha) / (1 - np.exp((vhalf_ha - v) / q_h))\n b_h = rate_hb * (-v + vhalf_hb) / (1 - np.exp((-vhalf_hb + v) / q_h))\n h_inf = 1.0 / (1.0 + np.exp((v - vhalf_inf) / qinf))\n h_tau = 1.0 / (a_h + b_h)\n dm = (m_inf - m) / m_tau\n dh = (h_inf - h) / h_tau\n return [dm, dh]\n\n\n<mask token>\n\n\ndef kdr_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Tigerholm version of the Sheets et al. IKdr model \"\"\"\n \"\"\" Model was developed from data recorded at 21 oC \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n = Y[0]\n q10 = 1.0\n if v > -31.0:\n tau = 0.16 + 0.8 * np.exp(-0.0267 * (v + 11))\n else:\n tau = 1000 * (0.000688 + 1 / (np.exp((v + 75.2) / 6.5) + np.exp(-(v -\n 131.5) / 34.8)))\n ninf = 1 / (1 + np.exp(-(v + 45) / 15.4))\n ntau = tau / q10\n dn = (ninf - n) / ntau\n return [dn]\n\n\ndef km_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Tigerholm version of the IM current. Current is from multiple sources:\n The voltage dependence of steady-state activation forthe KM current is from\n Maingret et al. (2008), which was derived from Passmore 2003. The KM channel activation has a fast and a slow \n time constant as described by Passmore et al. (2003). To account for the \n two time constants, weimplemented one fast (nf) and one slow (ns) gate, \n combined as follows.\n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n ns = Y[0]\n nf = Y[1]\n q10 = 1.0\n if v < -60.0:\n nstau = 219.0 * q10\n else:\n nstau = 13.0 * v + 1000.0 * q10\n nftau_alpha = 0.00395 * np.exp((v + 30.0) / 40.0)\n nftau_beta = 0.00395 * np.exp(-(v + 30.0) / 20.0) * q10\n nftau = 1.0 / (nftau_alpha + nftau_beta)\n ninf = 1.0 / (1.0 + np.exp(-(v + 30.0) / 6.0))\n dns = (ninf - ns) / nstau\n dnf = (ninf - nf) / nftau\n return [dns, dnf]\n\n\n<mask token>\n\n\ndef cal_ja(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\"\n Jaffe et al. 1994 ICaL model. \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n tfa = 1.0\n ki = 0.001\n cao = 2.5\n \"\"\" To do: make cai variable as an input like voltage \"\"\"\n cai = 0.0001\n celsius = 37.0\n\n def alpha(v):\n return 15.69 * (81.5 - v) / (np.exp((-1.0 * v + 81.5) / 10.0) - 1.0)\n\n def beta(v):\n return 0.29 * np.exp(-v / 10.86)\n\n def KTF(celsius):\n return 25.0 / 293.15 * (celsius + 273.15)\n\n def efun(z):\n return np.array([(1 - i / 2 if i < 0.0001 else i / (np.exp(i) - 1)) for\n i in z])\n\n def calc_ghk(v, cai, cao):\n f = KTF(celsius) / 2\n nu = v / f\n return -f * (1.0 - cai / cao * np.exp(nu)) * efun(nu)\n a = alpha(v)\n b = beta(v)\n tau = 1.0 / (tfa * (a + b))\n minf = a / (a + b)\n dm = (minf - m) / tau\n \"\"\" Calculating the current \n # h gate\n h2 = ki/(ki+cai)\n gcalbar = 0.003\n ghk = calc_ghk(v,cai,cao)\n ical = gcalbar*m*m*h2*ghk\n \"\"\"\n return [dm]\n\n\ndef can_mi():\n \"\"\"\n Model of N-type Ca current from Migliore 95\n \"\"\"\n pass\n\n\n<mask token>\n\n\ndef hcn_kn(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" \n Kouranova Ih model with non-specific current (reversal potential should be set at -30 mV \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))\n ninf_f = ninf_s\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)\n dns = (ninf_s - n_s) / tau_ns\n dnf = (ninf_f - n_f) / tau_nf\n return [dns, dnf]\n\n\ndef hcn_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\"\n Tigerholm version of the Kouranova Ih model which is identical except\n that when you calculate the current you don't use a nonspecific reversal potential and instead split the current between Na+ and K+, 50/50. \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))\n ninf_f = ninf_s\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)\n dns = (ninf_s - n_s) / tau_ns\n dnf = (ninf_f - n_f) / tau_nf\n return [dns, dnf]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef nav17vw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Human Nav 1.7 from Vasylyev Waxman \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 10.22 - 10.22 / (1 + np.exp((v + 7.19) / 15.43))\n beta_m = 23.76 / (1 + np.exp((v + 70.37) / 14.53))\n \"\"\"\n Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.\n \"\"\"\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.0744 / (1 + np.exp((v + 99.76) / 11.07))\n beta_h = 2.54 - 2.54 / (1 + np.exp((v + 7.8) / 10.68))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\ndef nav17cw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Rat? Nav 1.7 from Choi Waxman 2011 \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n s = Y[2]\n alpha_m = 15.5 / (1 + np.exp(-(v - 5) / 12.08))\n beta_m = 35.2 / (1 + np.exp((v + 72.7) / 16.7))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.38685 / (1 + np.exp((v + 122.35) / 15.29))\n beta_h = -0.00283 + 2.00283 / (1 + np.exp(-(v + 5.5266) / 12.70195))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n alpha_s = 3e-05 + 0.00092 / (1 + np.exp((v + 93.9) / 16.6))\n beta_s = 132.05 - 132.05 / (1 + np.exp((v - 384.9) / 28.5))\n sinf = alpha_s / (alpha_s + beta_s)\n stau = 1 / (alpha_s + beta_s)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n return [dm, dh, ds]\n\n\n<mask token>\n\n\ndef nav18tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Rat? Nav 1.8 used in Tigerholm model \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n s = Y[2]\n u = Y[3]\n alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))\n beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n hinf = 1 / (1 + np.exp((v + 32.2) / 4))\n htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))\n alpha_s = 0.001 * 5.4203 / (1 + np.exp((v + 79.816) / 16.269))\n beta_s = 0.001 * 5.0757 / (1 + np.exp(-(v + 15.968) / 11.542))\n sinf = 1 / (1 + np.exp((v + 45.0) / 8))\n stau = 1 / (alpha_s + beta_s)\n alpha_u = 0.002 * 2.0434 / (1 + np.exp((v + 67.499) / 19.51))\n beta_u = 0.002 * 1.9952 / (1 + np.exp(-(v + 30.963) / 14.792))\n uinf = 1 / (1 + np.exp((v + 51.0) / 8))\n utau = 1.0 / (alpha_u + beta_u)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n du = (uinf - u) / utau\n return [dm, dh, ds, du]\n\n\ndef nav18cw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.8 model used in Choi Waxman 2011 \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))\n beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n hinf = 1 / (1 + np.exp((v + 32.2) / 4))\n htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\n<mask token>\n\n\ndef nav19hw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.9 model from Huang Waxman 2014\"\"\"\n m = Y[0]\n h = Y[1]\n s = Y[2]\n v = voltage_clamp_func(t, voltage_clamp_params)\n alpha_m = 0.751 / (1 + np.exp(-(v + 32.26) / 13.71))\n beta_m = 5.68 / (1 + np.exp((v + 123.71) / 13.94))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.082 / (1 + np.exp((v + 113.69) / 17.4))\n beta_h = 0.24 / (1 + np.exp(-(v - 10.1) / 17.2))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n alpha_s = 0.019 / (1 + np.exp((v + 154.51) / 11.46))\n beta_s = 0.000376 / (1 + np.exp(-(v + 60.92) / 15.79))\n sinf = alpha_s / (alpha_s + beta_s)\n stau = 1 / (alpha_s + beta_s)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n return [dm, dh, ds]\n\n\n<mask token>\n\n\ndef nav16zm(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.6 model from Zach Mainen 1994 \"\"\"\n m = Y[0]\n h = Y[1]\n v = voltage_clamp_func(t, voltage_clamp_params)\n vhalf = -43.0\n a_m = 0.182 * (v - vhalf) / (1 - np.exp((vhalf - v) / 6.0))\n b_m = 0.124 * (-v + vhalf) / (1 - np.exp((-vhalf + v) / 6.0))\n m_inf = a_m / (a_m + b_m)\n m_tau = 1.0 / (a_m + b_m)\n vhalf_ha = -50.0\n vhalf_hb = -75.0\n q_h = 5.0\n vhalf_inf = -72.0\n qinf = 6.2\n rate_ha = 0.0091\n rate_hb = 0.024\n a_h = rate_ha * (v - vhalf_ha) / (1 - np.exp((vhalf_ha - v) / q_h))\n b_h = rate_hb * (-v + vhalf_hb) / (1 - np.exp((-vhalf_hb + v) / q_h))\n h_inf = 1.0 / (1.0 + np.exp((v - vhalf_inf) / qinf))\n h_tau = 1.0 / (a_h + b_h)\n dm = (m_inf - m) / m_tau\n dh = (h_inf - h) / h_tau\n return [dm, dh]\n\n\n<mask token>\n\n\ndef kdr_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Tigerholm version of the Sheets et al. IKdr model \"\"\"\n \"\"\" Model was developed from data recorded at 21 oC \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n = Y[0]\n q10 = 1.0\n if v > -31.0:\n tau = 0.16 + 0.8 * np.exp(-0.0267 * (v + 11))\n else:\n tau = 1000 * (0.000688 + 1 / (np.exp((v + 75.2) / 6.5) + np.exp(-(v -\n 131.5) / 34.8)))\n ninf = 1 / (1 + np.exp(-(v + 45) / 15.4))\n ntau = tau / q10\n dn = (ninf - n) / ntau\n return [dn]\n\n\ndef km_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Tigerholm version of the IM current. Current is from multiple sources:\n The voltage dependence of steady-state activation forthe KM current is from\n Maingret et al. (2008), which was derived from Passmore 2003. The KM channel activation has a fast and a slow \n time constant as described by Passmore et al. (2003). To account for the \n two time constants, weimplemented one fast (nf) and one slow (ns) gate, \n combined as follows.\n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n ns = Y[0]\n nf = Y[1]\n q10 = 1.0\n if v < -60.0:\n nstau = 219.0 * q10\n else:\n nstau = 13.0 * v + 1000.0 * q10\n nftau_alpha = 0.00395 * np.exp((v + 30.0) / 40.0)\n nftau_beta = 0.00395 * np.exp(-(v + 30.0) / 20.0) * q10\n nftau = 1.0 / (nftau_alpha + nftau_beta)\n ninf = 1.0 / (1.0 + np.exp(-(v + 30.0) / 6.0))\n dns = (ninf - ns) / nstau\n dnf = (ninf - nf) / nftau\n return [dns, dnf]\n\n\n<mask token>\n\n\ndef cal_ja(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\"\n Jaffe et al. 1994 ICaL model. \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n tfa = 1.0\n ki = 0.001\n cao = 2.5\n \"\"\" To do: make cai variable as an input like voltage \"\"\"\n cai = 0.0001\n celsius = 37.0\n\n def alpha(v):\n return 15.69 * (81.5 - v) / (np.exp((-1.0 * v + 81.5) / 10.0) - 1.0)\n\n def beta(v):\n return 0.29 * np.exp(-v / 10.86)\n\n def KTF(celsius):\n return 25.0 / 293.15 * (celsius + 273.15)\n\n def efun(z):\n return np.array([(1 - i / 2 if i < 0.0001 else i / (np.exp(i) - 1)) for\n i in z])\n\n def calc_ghk(v, cai, cao):\n f = KTF(celsius) / 2\n nu = v / f\n return -f * (1.0 - cai / cao * np.exp(nu)) * efun(nu)\n a = alpha(v)\n b = beta(v)\n tau = 1.0 / (tfa * (a + b))\n minf = a / (a + b)\n dm = (minf - m) / tau\n \"\"\" Calculating the current \n # h gate\n h2 = ki/(ki+cai)\n gcalbar = 0.003\n ghk = calc_ghk(v,cai,cao)\n ical = gcalbar*m*m*h2*ghk\n \"\"\"\n return [dm]\n\n\ndef can_mi():\n \"\"\"\n Model of N-type Ca current from Migliore 95\n \"\"\"\n pass\n\n\n<mask token>\n\n\ndef hcn_kn(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" \n Kouranova Ih model with non-specific current (reversal potential should be set at -30 mV \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))\n ninf_f = ninf_s\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)\n dns = (ninf_s - n_s) / tau_ns\n dnf = (ninf_f - n_f) / tau_nf\n return [dns, dnf]\n\n\ndef hcn_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\"\n Tigerholm version of the Kouranova Ih model which is identical except\n that when you calculate the current you don't use a nonspecific reversal potential and instead split the current between Na+ and K+, 50/50. \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))\n ninf_f = ninf_s\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)\n dns = (ninf_s - n_s) / tau_ns\n dnf = (ninf_f - n_f) / tau_nf\n return [dns, dnf]\n\n\n<mask token>\n\n\ndef nav17test(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Human Nav 1.7 from Vasylyev Waxman \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 10.22 - 10.22 / (1 + np.exp((v + 7.19) / 15.43))\n beta_m = 23.76 / (1 + np.exp((v + 70.37) / 14.53))\n \"\"\"\n Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.\n \"\"\"\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.0744 / (1 + np.exp((v + 99.76) / 11.07))\n beta_h = 2.54 - 2.54 / (1 + np.exp((v + 7.8) / 10.68))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n",
"step-3": "<mask token>\n\n\ndef nav17vw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Human Nav 1.7 from Vasylyev Waxman \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 10.22 - 10.22 / (1 + np.exp((v + 7.19) / 15.43))\n beta_m = 23.76 / (1 + np.exp((v + 70.37) / 14.53))\n \"\"\"\n Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.\n \"\"\"\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.0744 / (1 + np.exp((v + 99.76) / 11.07))\n beta_h = 2.54 - 2.54 / (1 + np.exp((v + 7.8) / 10.68))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\ndef nav17cw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Rat? Nav 1.7 from Choi Waxman 2011 \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n s = Y[2]\n alpha_m = 15.5 / (1 + np.exp(-(v - 5) / 12.08))\n beta_m = 35.2 / (1 + np.exp((v + 72.7) / 16.7))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.38685 / (1 + np.exp((v + 122.35) / 15.29))\n beta_h = -0.00283 + 2.00283 / (1 + np.exp(-(v + 5.5266) / 12.70195))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n alpha_s = 3e-05 + 0.00092 / (1 + np.exp((v + 93.9) / 16.6))\n beta_s = 132.05 - 132.05 / (1 + np.exp((v - 384.9) / 28.5))\n sinf = alpha_s / (alpha_s + beta_s)\n stau = 1 / (alpha_s + beta_s)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n return [dm, dh, ds]\n\n\n<mask token>\n\n\ndef nav18hw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Human Nav 1.8 from Huang Waxman 20(14?) \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 7.35 - 7.35 / (1 + np.exp((v + 1.38) / 10.9))\n beta_m = 5.97 / (1 + np.exp((v + 56.43) / 18.26))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.011 + 1.39 / (1 + np.exp((v + 78.04) / 11.32))\n beta_h = 0.56 - 0.56 / (1 + np.exp((v - 21.82) / 20.03))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\ndef nav18tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Rat? Nav 1.8 used in Tigerholm model \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n s = Y[2]\n u = Y[3]\n alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))\n beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n hinf = 1 / (1 + np.exp((v + 32.2) / 4))\n htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))\n alpha_s = 0.001 * 5.4203 / (1 + np.exp((v + 79.816) / 16.269))\n beta_s = 0.001 * 5.0757 / (1 + np.exp(-(v + 15.968) / 11.542))\n sinf = 1 / (1 + np.exp((v + 45.0) / 8))\n stau = 1 / (alpha_s + beta_s)\n alpha_u = 0.002 * 2.0434 / (1 + np.exp((v + 67.499) / 19.51))\n beta_u = 0.002 * 1.9952 / (1 + np.exp(-(v + 30.963) / 14.792))\n uinf = 1 / (1 + np.exp((v + 51.0) / 8))\n utau = 1.0 / (alpha_u + beta_u)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n du = (uinf - u) / utau\n return [dm, dh, ds, du]\n\n\ndef nav18cw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.8 model used in Choi Waxman 2011 \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))\n beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n hinf = 1 / (1 + np.exp((v + 32.2) / 4))\n htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\n<mask token>\n\n\ndef nav19hw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.9 model from Huang Waxman 2014\"\"\"\n m = Y[0]\n h = Y[1]\n s = Y[2]\n v = voltage_clamp_func(t, voltage_clamp_params)\n alpha_m = 0.751 / (1 + np.exp(-(v + 32.26) / 13.71))\n beta_m = 5.68 / (1 + np.exp((v + 123.71) / 13.94))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.082 / (1 + np.exp((v + 113.69) / 17.4))\n beta_h = 0.24 / (1 + np.exp(-(v - 10.1) / 17.2))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n alpha_s = 0.019 / (1 + np.exp((v + 154.51) / 11.46))\n beta_s = 0.000376 / (1 + np.exp(-(v + 60.92) / 15.79))\n sinf = alpha_s / (alpha_s + beta_s)\n stau = 1 / (alpha_s + beta_s)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n return [dm, dh, ds]\n\n\n<mask token>\n\n\ndef nav16zm(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.6 model from Zach Mainen 1994 \"\"\"\n m = Y[0]\n h = Y[1]\n v = voltage_clamp_func(t, voltage_clamp_params)\n vhalf = -43.0\n a_m = 0.182 * (v - vhalf) / (1 - np.exp((vhalf - v) / 6.0))\n b_m = 0.124 * (-v + vhalf) / (1 - np.exp((-vhalf + v) / 6.0))\n m_inf = a_m / (a_m + b_m)\n m_tau = 1.0 / (a_m + b_m)\n vhalf_ha = -50.0\n vhalf_hb = -75.0\n q_h = 5.0\n vhalf_inf = -72.0\n qinf = 6.2\n rate_ha = 0.0091\n rate_hb = 0.024\n a_h = rate_ha * (v - vhalf_ha) / (1 - np.exp((vhalf_ha - v) / q_h))\n b_h = rate_hb * (-v + vhalf_hb) / (1 - np.exp((-vhalf_hb + v) / q_h))\n h_inf = 1.0 / (1.0 + np.exp((v - vhalf_inf) / qinf))\n h_tau = 1.0 / (a_h + b_h)\n dm = (m_inf - m) / m_tau\n dh = (h_inf - h) / h_tau\n return [dm, dh]\n\n\n<mask token>\n\n\ndef kdr_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Tigerholm version of the Sheets et al. IKdr model \"\"\"\n \"\"\" Model was developed from data recorded at 21 oC \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n = Y[0]\n q10 = 1.0\n if v > -31.0:\n tau = 0.16 + 0.8 * np.exp(-0.0267 * (v + 11))\n else:\n tau = 1000 * (0.000688 + 1 / (np.exp((v + 75.2) / 6.5) + np.exp(-(v -\n 131.5) / 34.8)))\n ninf = 1 / (1 + np.exp(-(v + 45) / 15.4))\n ntau = tau / q10\n dn = (ninf - n) / ntau\n return [dn]\n\n\ndef km_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Tigerholm version of the IM current. Current is from multiple sources:\n The voltage dependence of steady-state activation forthe KM current is from\n Maingret et al. (2008), which was derived from Passmore 2003. The KM channel activation has a fast and a slow \n time constant as described by Passmore et al. (2003). To account for the \n two time constants, weimplemented one fast (nf) and one slow (ns) gate, \n combined as follows.\n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n ns = Y[0]\n nf = Y[1]\n q10 = 1.0\n if v < -60.0:\n nstau = 219.0 * q10\n else:\n nstau = 13.0 * v + 1000.0 * q10\n nftau_alpha = 0.00395 * np.exp((v + 30.0) / 40.0)\n nftau_beta = 0.00395 * np.exp(-(v + 30.0) / 20.0) * q10\n nftau = 1.0 / (nftau_alpha + nftau_beta)\n ninf = 1.0 / (1.0 + np.exp(-(v + 30.0) / 6.0))\n dns = (ninf - ns) / nstau\n dnf = (ninf - nf) / nftau\n return [dns, dnf]\n\n\n<mask token>\n\n\ndef cal_ja(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\"\n Jaffe et al. 1994 ICaL model. \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n tfa = 1.0\n ki = 0.001\n cao = 2.5\n \"\"\" To do: make cai variable as an input like voltage \"\"\"\n cai = 0.0001\n celsius = 37.0\n\n def alpha(v):\n return 15.69 * (81.5 - v) / (np.exp((-1.0 * v + 81.5) / 10.0) - 1.0)\n\n def beta(v):\n return 0.29 * np.exp(-v / 10.86)\n\n def KTF(celsius):\n return 25.0 / 293.15 * (celsius + 273.15)\n\n def efun(z):\n return np.array([(1 - i / 2 if i < 0.0001 else i / (np.exp(i) - 1)) for\n i in z])\n\n def calc_ghk(v, cai, cao):\n f = KTF(celsius) / 2\n nu = v / f\n return -f * (1.0 - cai / cao * np.exp(nu)) * efun(nu)\n a = alpha(v)\n b = beta(v)\n tau = 1.0 / (tfa * (a + b))\n minf = a / (a + b)\n dm = (minf - m) / tau\n \"\"\" Calculating the current \n # h gate\n h2 = ki/(ki+cai)\n gcalbar = 0.003\n ghk = calc_ghk(v,cai,cao)\n ical = gcalbar*m*m*h2*ghk\n \"\"\"\n return [dm]\n\n\ndef can_mi():\n \"\"\"\n Model of N-type Ca current from Migliore 95\n \"\"\"\n pass\n\n\n<mask token>\n\n\ndef hcn_kn(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" \n Kouranova Ih model with non-specific current (reversal potential should be set at -30 mV \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))\n ninf_f = ninf_s\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)\n dns = (ninf_s - n_s) / tau_ns\n dnf = (ninf_f - n_f) / tau_nf\n return [dns, dnf]\n\n\ndef hcn_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\"\n Tigerholm version of the Kouranova Ih model which is identical except\n that when you calculate the current you don't use a nonspecific reversal potential and instead split the current between Na+ and K+, 50/50. \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))\n ninf_f = ninf_s\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)\n dns = (ninf_s - n_s) / tau_ns\n dnf = (ninf_f - n_f) / tau_nf\n return [dns, dnf]\n\n\n<mask token>\n\n\ndef nav17test(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Human Nav 1.7 from Vasylyev Waxman \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 10.22 - 10.22 / (1 + np.exp((v + 7.19) / 15.43))\n beta_m = 23.76 / (1 + np.exp((v + 70.37) / 14.53))\n \"\"\"\n Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.\n \"\"\"\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.0744 / (1 + np.exp((v + 99.76) / 11.07))\n beta_h = 2.54 - 2.54 / (1 + np.exp((v + 7.8) / 10.68))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n",
"step-4": "<mask token>\n\n\ndef nav17vw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Human Nav 1.7 from Vasylyev Waxman \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 10.22 - 10.22 / (1 + np.exp((v + 7.19) / 15.43))\n beta_m = 23.76 / (1 + np.exp((v + 70.37) / 14.53))\n \"\"\"\n Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.\n \"\"\"\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.0744 / (1 + np.exp((v + 99.76) / 11.07))\n beta_h = 2.54 - 2.54 / (1 + np.exp((v + 7.8) / 10.68))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\ndef nav17cw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Rat? Nav 1.7 from Choi Waxman 2011 \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n s = Y[2]\n alpha_m = 15.5 / (1 + np.exp(-(v - 5) / 12.08))\n beta_m = 35.2 / (1 + np.exp((v + 72.7) / 16.7))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.38685 / (1 + np.exp((v + 122.35) / 15.29))\n beta_h = -0.00283 + 2.00283 / (1 + np.exp(-(v + 5.5266) / 12.70195))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n alpha_s = 3e-05 + 0.00092 / (1 + np.exp((v + 93.9) / 16.6))\n beta_s = 132.05 - 132.05 / (1 + np.exp((v - 384.9) / 28.5))\n sinf = alpha_s / (alpha_s + beta_s)\n stau = 1 / (alpha_s + beta_s)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n return [dm, dh, ds]\n\n\n<mask token>\n\n\ndef nav18hw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Human Nav 1.8 from Huang Waxman 20(14?) \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 7.35 - 7.35 / (1 + np.exp((v + 1.38) / 10.9))\n beta_m = 5.97 / (1 + np.exp((v + 56.43) / 18.26))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.011 + 1.39 / (1 + np.exp((v + 78.04) / 11.32))\n beta_h = 0.56 - 0.56 / (1 + np.exp((v - 21.82) / 20.03))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\ndef nav18tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Rat? Nav 1.8 used in Tigerholm model \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n s = Y[2]\n u = Y[3]\n alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))\n beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n hinf = 1 / (1 + np.exp((v + 32.2) / 4))\n htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))\n alpha_s = 0.001 * 5.4203 / (1 + np.exp((v + 79.816) / 16.269))\n beta_s = 0.001 * 5.0757 / (1 + np.exp(-(v + 15.968) / 11.542))\n sinf = 1 / (1 + np.exp((v + 45.0) / 8))\n stau = 1 / (alpha_s + beta_s)\n alpha_u = 0.002 * 2.0434 / (1 + np.exp((v + 67.499) / 19.51))\n beta_u = 0.002 * 1.9952 / (1 + np.exp(-(v + 30.963) / 14.792))\n uinf = 1 / (1 + np.exp((v + 51.0) / 8))\n utau = 1.0 / (alpha_u + beta_u)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n du = (uinf - u) / utau\n return [dm, dh, ds, du]\n\n\ndef nav18cw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.8 model used in Choi Waxman 2011 \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))\n beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n hinf = 1 / (1 + np.exp((v + 32.2) / 4))\n htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\n<mask token>\n\n\ndef nav19hw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.9 model from Huang Waxman 2014\"\"\"\n m = Y[0]\n h = Y[1]\n s = Y[2]\n v = voltage_clamp_func(t, voltage_clamp_params)\n alpha_m = 0.751 / (1 + np.exp(-(v + 32.26) / 13.71))\n beta_m = 5.68 / (1 + np.exp((v + 123.71) / 13.94))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.082 / (1 + np.exp((v + 113.69) / 17.4))\n beta_h = 0.24 / (1 + np.exp(-(v - 10.1) / 17.2))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n alpha_s = 0.019 / (1 + np.exp((v + 154.51) / 11.46))\n beta_s = 0.000376 / (1 + np.exp(-(v + 60.92) / 15.79))\n sinf = alpha_s / (alpha_s + beta_s)\n stau = 1 / (alpha_s + beta_s)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n return [dm, dh, ds]\n\n\ndef nav19md(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.9 model from Maingret 2008\"\"\"\n m = Y[0]\n h = Y[1]\n s = Y[2]\n v = voltage_clamp_func(t, voltage_clamp_params)\n return [dm, dh, ds]\n\n\ndef nav16zm(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.6 model from Zach Mainen 1994 \"\"\"\n m = Y[0]\n h = Y[1]\n v = voltage_clamp_func(t, voltage_clamp_params)\n vhalf = -43.0\n a_m = 0.182 * (v - vhalf) / (1 - np.exp((vhalf - v) / 6.0))\n b_m = 0.124 * (-v + vhalf) / (1 - np.exp((-vhalf + v) / 6.0))\n m_inf = a_m / (a_m + b_m)\n m_tau = 1.0 / (a_m + b_m)\n vhalf_ha = -50.0\n vhalf_hb = -75.0\n q_h = 5.0\n vhalf_inf = -72.0\n qinf = 6.2\n rate_ha = 0.0091\n rate_hb = 0.024\n a_h = rate_ha * (v - vhalf_ha) / (1 - np.exp((vhalf_ha - v) / q_h))\n b_h = rate_hb * (-v + vhalf_hb) / (1 - np.exp((-vhalf_hb + v) / q_h))\n h_inf = 1.0 / (1.0 + np.exp((v - vhalf_inf) / qinf))\n h_tau = 1.0 / (a_h + b_h)\n dm = (m_inf - m) / m_tau\n dh = (h_inf - h) / h_tau\n return [dm, dh]\n\n\n<mask token>\n\n\ndef kdr_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Tigerholm version of the Sheets et al. IKdr model \"\"\"\n \"\"\" Model was developed from data recorded at 21 oC \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n = Y[0]\n q10 = 1.0\n if v > -31.0:\n tau = 0.16 + 0.8 * np.exp(-0.0267 * (v + 11))\n else:\n tau = 1000 * (0.000688 + 1 / (np.exp((v + 75.2) / 6.5) + np.exp(-(v -\n 131.5) / 34.8)))\n ninf = 1 / (1 + np.exp(-(v + 45) / 15.4))\n ntau = tau / q10\n dn = (ninf - n) / ntau\n return [dn]\n\n\ndef km_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Tigerholm version of the IM current. Current is from multiple sources:\n The voltage dependence of steady-state activation forthe KM current is from\n Maingret et al. (2008), which was derived from Passmore 2003. The KM channel activation has a fast and a slow \n time constant as described by Passmore et al. (2003). To account for the \n two time constants, weimplemented one fast (nf) and one slow (ns) gate, \n combined as follows.\n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n ns = Y[0]\n nf = Y[1]\n q10 = 1.0\n if v < -60.0:\n nstau = 219.0 * q10\n else:\n nstau = 13.0 * v + 1000.0 * q10\n nftau_alpha = 0.00395 * np.exp((v + 30.0) / 40.0)\n nftau_beta = 0.00395 * np.exp(-(v + 30.0) / 20.0) * q10\n nftau = 1.0 / (nftau_alpha + nftau_beta)\n ninf = 1.0 / (1.0 + np.exp(-(v + 30.0) / 6.0))\n dns = (ninf - ns) / nstau\n dnf = (ninf - nf) / nftau\n return [dns, dnf]\n\n\n<mask token>\n\n\ndef cal_ja(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\"\n Jaffe et al. 1994 ICaL model. \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n tfa = 1.0\n ki = 0.001\n cao = 2.5\n \"\"\" To do: make cai variable as an input like voltage \"\"\"\n cai = 0.0001\n celsius = 37.0\n\n def alpha(v):\n return 15.69 * (81.5 - v) / (np.exp((-1.0 * v + 81.5) / 10.0) - 1.0)\n\n def beta(v):\n return 0.29 * np.exp(-v / 10.86)\n\n def KTF(celsius):\n return 25.0 / 293.15 * (celsius + 273.15)\n\n def efun(z):\n return np.array([(1 - i / 2 if i < 0.0001 else i / (np.exp(i) - 1)) for\n i in z])\n\n def calc_ghk(v, cai, cao):\n f = KTF(celsius) / 2\n nu = v / f\n return -f * (1.0 - cai / cao * np.exp(nu)) * efun(nu)\n a = alpha(v)\n b = beta(v)\n tau = 1.0 / (tfa * (a + b))\n minf = a / (a + b)\n dm = (minf - m) / tau\n \"\"\" Calculating the current \n # h gate\n h2 = ki/(ki+cai)\n gcalbar = 0.003\n ghk = calc_ghk(v,cai,cao)\n ical = gcalbar*m*m*h2*ghk\n \"\"\"\n return [dm]\n\n\ndef can_mi():\n \"\"\"\n Model of N-type Ca current from Migliore 95\n \"\"\"\n pass\n\n\n<mask token>\n\n\ndef hcn_kn(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" \n Kouranova Ih model with non-specific current (reversal potential should be set at -30 mV \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))\n ninf_f = ninf_s\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)\n dns = (ninf_s - n_s) / tau_ns\n dnf = (ninf_f - n_f) / tau_nf\n return [dns, dnf]\n\n\ndef hcn_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\"\n Tigerholm version of the Kouranova Ih model which is identical except\n that when you calculate the current you don't use a nonspecific reversal potential and instead split the current between Na+ and K+, 50/50. \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))\n ninf_f = ninf_s\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)\n dns = (ninf_s - n_s) / tau_ns\n dnf = (ninf_f - n_f) / tau_nf\n return [dns, dnf]\n\n\n<mask token>\n\n\ndef nav17test(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Human Nav 1.7 from Vasylyev Waxman \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 10.22 - 10.22 / (1 + np.exp((v + 7.19) / 15.43))\n beta_m = 23.76 / (1 + np.exp((v + 70.37) / 14.53))\n \"\"\"\n Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.\n \"\"\"\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.0744 / (1 + np.exp((v + 99.76) / 11.07))\n beta_h = 2.54 - 2.54 / (1 + np.exp((v + 7.8) / 10.68))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\ncurrent_models - library of ionic current models implemented in Python\n\nCreated on Mon Apr 10 16:30:04 2017\n\n@author: Oliver Britton\n\"\"\"\n\nimport os\nimport sys\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\n\n\" Voltage clamp generator functions \"\n\n\n\" //--Nav models--\\\\ \"\n\n\" -- Nav 1.7 models -- \"\n\ndef nav17vw(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Human Nav 1.7 from Vasylyev Waxman \"\n \n v = voltage_clamp_func(t,voltage_clamp_params)\n \n m = Y[0]\n h = Y[1]\n \n alpha_m = 10.22 - 10.22/(1 + np.exp((v+7.19)/15.43)) # Rate for closed -> open (sort of)\n beta_m = 23.76/(1 + np.exp((v+70.37)/14.53)) # Rate for open->closed\n \n \"\"\"\n Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.\n \"\"\"\n\n minf = alpha_m/(alpha_m + beta_m)\n mtau = 1/(alpha_m + beta_m)\n\n alpha_h = 0.0744/(1 + np.exp((v+99.76)/11.07))\n beta_h = 2.54 - 2.54/(1 + np.exp((v+7.8)/10.68))\n\n hinf = alpha_h/(alpha_h + beta_h)\n htau = 1/(alpha_h + beta_h)\n \n dm = (minf-m)/mtau\n dh = (hinf-h)/htau\n return [dm, dh]\n\ndef nav17cw(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Rat? Nav 1.7 from Choi Waxman 2011 \"\n v = voltage_clamp_func(t,voltage_clamp_params)\n \n m = Y[0]\n h = Y[1]\n s = Y[2]\n \n alpha_m = 15.5/(1 + np.exp(-(v-5)/(12.08)))\n beta_m = 35.2/(1 + np.exp((v+72.7)/16.7))\n\n minf = alpha_m/(alpha_m + beta_m)\n mtau = 1/(alpha_m + beta_m)\n\n alpha_h = 0.38685/(1 + np.exp((v+122.35)/15.29))\n beta_h = -0.00283 + 2.00283/(1 + np.exp(-(v+5.5266)/12.70195)) # Rate is negative if v = -inf?\n\n hinf = alpha_h/(alpha_h + beta_h)\n htau = 1/(alpha_h + beta_h)\n\n alpha_s = 0.00003 + 0.00092/(1 + np.exp((v+93.9)/16.6))\n beta_s = 132.05 - 132.05/(1 + np.exp((v-384.9)/28.5))\n\n sinf = alpha_s/(alpha_s + beta_s)\n stau = 1/(alpha_s + beta_s)\n \n dm = (minf-m)/mtau\n dh = (hinf-h)/htau\n ds = (sinf-s)/stau\n \n return [dm, dh, ds]\n \n\" -- Nav 1.8 models -- \"\ndef nav18hw(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Human Nav 1.8 from Huang Waxman 20(14?) \"\n \n v = voltage_clamp_func(t,voltage_clamp_params)\n \n m = Y[0]\n h = Y[1]\n \n alpha_m = 7.35 - 7.35/(1 + np.exp((v+1.38)/10.9))\n beta_m = 5.97/(1 + np.exp((v+56.43)/18.26))\n\n minf = alpha_m/(alpha_m + beta_m)\n mtau = 1/(alpha_m + beta_m)\n\n alpha_h = 0.011 + 1.39/(1 + np.exp((v+78.04)/11.32))\n beta_h = 0.56 - 0.56/(1 + np.exp((v-21.82)/20.03))\n\n hinf = alpha_h/(alpha_h + beta_h)\n htau = 1/(alpha_h + beta_h)\n\n dm = (minf-m)/mtau\n dh = (hinf-h)/htau\n \n return [dm, dh]\n\ndef nav18tf(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Rat? Nav 1.8 used in Tigerholm model \"\n v = voltage_clamp_func(t,voltage_clamp_params)\n \n m = Y[0]\n h = Y[1]\n s = Y[2]\n u = Y[3]\n \n alpha_m = 2.85 - 2.839/(1 + np.exp((v-1.159)/13.95))\n beta_m = 7.6205/(1 + np.exp((v+46.463)/8.8289))\n\n minf = alpha_m/(alpha_m + beta_m)\n mtau = 1/(alpha_m + beta_m)\n\n hinf = 1/(1+np.exp((v+32.2)/4))\n htau = 1.218 + 42.043*np.exp(-((v+38.1)**2)/(2*15.19**2))\n\n alpha_s = 0.001 * 5.4203 / (1 + np.exp((v+79.816)/16.269))\n beta_s = 0.001 * 5.0757 / (1 + np.exp(-(v+15.968)/11.542))\n\n sinf = 1/(1+np.exp((v+45.0)/8))\n stau = 1/(alpha_s + beta_s)\n\n alpha_u = 0.002 * 2.0434 / (1 + np.exp((v+67.499)/19.51))\n beta_u = 0.002 * 1.9952 / (1 + np.exp(-(v+30.963)/14.792))\n\n uinf = 1/(1+np.exp((v+51.0)/8))\n utau = 1.0/(alpha_u + beta_u) \n \n dm = (minf-m)/mtau\n dh = (hinf-h)/htau\n ds = (sinf-s)/stau\n du = (uinf-u)/utau\n \n return [dm, dh, ds, du]\n \ndef nav18cw(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Nav 1.8 model used in Choi Waxman 2011 \"\n v = voltage_clamp_func(t,voltage_clamp_params)\n \n m = Y[0]\n h = Y[1]\n \n alpha_m = 2.85 - 2.839/(1 + np.exp((v-1.159)/13.95))\n beta_m = 7.6205/(1 + np.exp((v+46.463)/8.8289))\n\n minf = alpha_m/(alpha_m + beta_m)\n mtau = 1/(alpha_m + beta_m)\n\n hinf = 1/(1+np.exp((v+32.2)/4))\n htau = 1.218 + 42.043*np.exp(-((v+38.1)**2)/(2*15.19**2))\n \n dm = (minf-m)/mtau\n dh = (hinf-h)/htau\n \n return [dm, dh]\n \n\" -- Nav 1.9 models -- \"\n\ndef nav19hw(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Nav 1.9 model from Huang Waxman 2014\"\n m = Y[0]\n h = Y[1]\n s = Y[2]\n \n v = voltage_clamp_func(t,voltage_clamp_params)\n \n alpha_m = 0.751/(1 + np.exp(-(v+32.26)/13.71))\n beta_m = 5.68/(1 + np.exp((v+123.71)/13.94))\n minf = alpha_m/(alpha_m + beta_m)\n mtau = 1/(alpha_m + beta_m)\n\n alpha_h = 0.082/(1 + np.exp((v+113.69)/17.4))\n beta_h = 0.24/(1 + np.exp(-(v-10.1)/17.2))\n hinf = alpha_h/(alpha_h + beta_h)\n htau = 1/(alpha_h + beta_h)\n \n alpha_s = 0.019/(1 + np.exp((v+154.51)/11.46))\n beta_s = 0.000376/(1 + np.exp(-(v+60.92)/15.79))\n sinf = alpha_s/(alpha_s + beta_s)\n stau = 1/(alpha_s + beta_s)\n \n dm = (minf-m)/mtau\n dh = (hinf-h)/htau\n ds = (sinf-s)/stau\n \n return [dm, dh, ds]\n \ndef nav19md(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Nav 1.9 model from Maingret 2008\"\n m = Y[0]\n h = Y[1]\n s = Y[2]\n \n v = voltage_clamp_func(t,voltage_clamp_params)\n \n \n return [dm, dh, ds]\n \ndef nav16zm(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Nav 1.6 model from Zach Mainen 1994 \"\n m = Y[0]\n h = Y[1]\n \n v = voltage_clamp_func(t,voltage_clamp_params)\n \n vhalf = -43.0\n a_m = 0.182*(v-vhalf)/(1-np.exp((vhalf-v)/6.))\n b_m = 0.124*(-v+vhalf)/(1-np.exp((-vhalf+v)/6.))\n \n m_inf = a_m/(a_m + b_m)\n m_tau = 1./(a_m + b_m)\n \n vhalf_ha = -50.0\n vhalf_hb = -75.0\n q_h = 5.0\n\n vhalf_inf = -72.0\n qinf = 6.2\n\n rate_ha = 0.0091\n rate_hb = 0.024\n\n a_h = rate_ha*(v-vhalf_ha)/(1-np.exp((vhalf_ha-v)/q_h))\n b_h = rate_hb*(-v+vhalf_hb)/(1-np.exp((-vhalf_hb+v)/q_h))\n\n h_inf = 1.0/(1.0 + np.exp((v-vhalf_inf)/qinf))\n h_tau = 1./(a_h + b_h)\n \n dm = (m_inf-m)/m_tau\n dh = (h_inf-h)/h_tau\n \n return [dm, dh]\n\n\" Kv models \"\n\ndef kdr_tf(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Tigerholm version of the Sheets et al. IKdr model \"\n \" Model was developed from data recorded at 21 oC \"\n \n \n v = voltage_clamp_func(t,voltage_clamp_params)\n n = Y[0]\n q10 = 1.0#3.3 # Preserved in case it is useful but disabled\n \n if v > -31.0:\n tau = 0.16+0.8*np.exp(-0.0267*(v+11))\n else:\n tau = 1000*(0.000688 + 1/(np.exp((v+75.2)/6.5) + np.exp(-(v-131.5)/(34.8))))\n\t\t\n ninf = 1/(1 + np.exp(-(v+45)/15.4))\n ntau = tau/q10\n \n dn = (ninf-n)/ntau\n return [dn]\n \ndef km_tf(Y,t,voltage_clamp_func,voltage_clamp_params):\n \"\"\" Tigerholm version of the IM current. Current is from multiple sources:\n The voltage dependence of steady-state activation forthe KM current is from\n Maingret et al. (2008), which was derived from Passmore 2003. The KM channel activation has a fast and a slow \n time constant as described by Passmore et al. (2003). To account for the \n two time constants, weimplemented one fast (nf) and one slow (ns) gate, \n combined as follows.\n \"\"\"\n # g = gbar * (0.25*ns + 0.75*nf)\n v = voltage_clamp_func(t,voltage_clamp_params)\n ns = Y[0]\n nf = Y[1]\n q10 = 1.0#3.3 # Preserved in case it is useful but disabled\n \n if v < -60.0:\n nstau = 219.0*q10\n else:\n nstau = 13.0*v + 1000.0*q10\n \n nftau_alpha = 0.00395*np.exp((v+30.0)/40.0)\n nftau_beta = 0.00395*np.exp(-(v+30.0)/20.0)*q10\n nftau = 1.0/(nftau_alpha + nftau_beta)\n \n ninf = 1.0/(1.0 + np.exp(-(v+30.0)/6.0)) # Threshold is around -30 mV\n \n dns = (ninf-ns)/nstau\n dnf = (ninf-nf)/nftau\n \n return [dns,dnf]\n \ndef ka_tf(Y,t,voltage_clamp_func,voltage_clamp_params):\n \"\"\" Tigerholm version of IA.\n \"\"\"\n # g = gbar * n * h\n v = voltage_clamp_func(t,voltage_clamp_params)\n n = Y[0]\n h = Y[1]\n q10 = 1.0#3.3 # Preserved in case it is useful but disabled\n \n ninf = (1.0/(1.0 + np.exp(-(v+5.4+15)/16.4)))**4\n ntau = 0.25 + 10.04*np.exp((-(v+24.67)**2)/(2*34.8**2))*q10\n\t\t\n hinf = 1.0/(1.0 + np.exp((v+49.9 + 15.0)/4.6))\n htau = 20.0 + 50.0 * np.exp((-(v+40.0)**2)/(2.0*40.0**2))*q10\n \n # Trap for htau following Sheets /ChoiWaxman/Tigerholm - set it to 5 ms if less than 5 ms\n if htau < 5.0:\n htau = 5.0\n\n dn = (ninf-n)/ntau\n dh = (hinf-h)/htau\n \n return [dn,dh]\n\n\"\"\" \nCa models \n\nImplemented:\ncal_ja - Jaffe et al. 1994 ICaL model. \ncan_mi - Model of N-type Ca current from Migliore 95\n\nTo do:\nSK\nBK\nCa diffusion\n\n\n\"\"\"\n\ndef cal_ja(Y,t,voltage_clamp_func,voltage_clamp_params):\n \"\"\"\n Jaffe et al. 1994 ICaL model. \n \"\"\"\n v = voltage_clamp_func(t,voltage_clamp_params)\n m = Y[0]\n \n tfa = 1.\n ki = 0.001 # (mM)\n \n cao = 2.5 # Davidson (mM)\n \" To do: make cai variable as an input like voltage \"\n cai = 1.e-4 # (mM) Roughly values (100 nM) from Intracellular calcium regulation among subpopulations of rat dorsal root ganglion neurons by Lu, Zhang, Gold 2007\n \n celsius = 37.\n \n def alpha(v):\n return 15.69*(81.5 - v)/(np.exp((-1.0*v+81.5)/10.0)-1.0)\n def beta(v):\n return 0.29*np.exp(-v/10.86)\n def KTF(celsius):\n return ((25./293.15)*(celsius + 273.15))\n def efun(z):\n return np.array([1 - i/2 if i < 1e-4 else i/(np.exp(i)-1) for i in z])\n def calc_ghk(v, cai, cao): \n f = KTF(celsius)/2\n nu = v/f\n return -f*(1. - (cai/cao)*np.exp(nu))*efun(nu)\n\n a = alpha(v)\n b = beta(v)\n tau = 1./(tfa*(a + b))\n minf = a/(a+b)\n dm = (minf - m)/tau\n \n \"\"\" Calculating the current \n # h gate\n h2 = ki/(ki+cai)\n gcalbar = 0.003\n ghk = calc_ghk(v,cai,cao)\n ical = gcalbar*m*m*h2*ghk\n \"\"\"\n return [dm]\n\ndef can_mi():\n \"\"\"\n Model of N-type Ca current from Migliore 95\n \"\"\"\n pass\n \n\" HCN models \"\ndef hcn_kn(Y,t,voltage_clamp_func,voltage_clamp_params):\n \"\"\" \n Kouranova Ih model with non-specific current (reversal potential should be set at -30 mV \n \"\"\"\n\n v = voltage_clamp_func(t,voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n\n ninf_s = 1/(1 + np.exp((v+87.2)/9.7))\n ninf_f = ninf_s\n\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v+25.0)/20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v+25.0)/20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v+240.0)/50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v+240.0)/50.0)\n\n dns = (ninf_s - n_s)/tau_ns\n dnf = (ninf_f - n_f)/tau_nf\n\n return [dns, dnf]\n \ndef hcn_tf(Y,t,voltage_clamp_func,voltage_clamp_params):\n \"\"\"\n Tigerholm version of the Kouranova Ih model which is identical except\n that when you calculate the current you don't use a nonspecific reversal potential and instead split the current between Na+ and K+, 50/50. \n \"\"\"\n \n v = voltage_clamp_func(t,voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n \n ninf_s = 1/(1 + np.exp((v+87.2)/9.7))\n ninf_f = ninf_s\n\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v+25.0)/20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v+25.0)/20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v+240.0)/50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v+240.0)/50.0)\n\n dns = (ninf_s - n_s)/tau_ns\n dnf = (ninf_f - n_f)/tau_nf\n \n return [dns, dnf]\n\n\"\"\"\n # ena, ek, + or -?\n Ih_na = 0.5 * g_h (0.5*n_s + 0.5*n_f) * (Vm + ena)\n Ih_k = 0.5 * g_h * (0.5*n_s + 0.5*n_f) * (Vm + ek) \n\n\"\"\"\n\n\" Test models \"\ndef nav17test(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Human Nav 1.7 from Vasylyev Waxman \"\n \n v = voltage_clamp_func(t,voltage_clamp_params)\n \n m = Y[0]\n h = Y[1]\n \n alpha_m = 10.22 - 10.22/(1 + np.exp((v+7.19)/15.43)) # Rate for closed -> open (sort of)\n beta_m = 23.76/(1 + np.exp((v+70.37)/14.53)) # Rate for open->closed\n \n \"\"\"\n Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.\n \"\"\"\n\n minf = alpha_m/(alpha_m + beta_m)\n mtau = 1/(alpha_m + beta_m)\n\n alpha_h = 0.0744/(1 + np.exp((v+99.76)/11.07))\n beta_h = 2.54 - 2.54/(1 + np.exp((v+7.8)/10.68))\n\n hinf = alpha_h/(alpha_h + beta_h)\n htau = 1/(alpha_h + beta_h)\n \n dm = (minf-m)/mtau\n dh = (hinf-h)/htau\n return [dm, dh]\n \n\n \n",
"step-ids": [
12,
13,
14,
15,
18
]
}
|
[
12,
13,
14,
15,
18
] |
<|reserved_special_token_0|>
def squeezenet_fire_module(input, input_channel_small=16,
input_channel_large=64):
channel_axis = 3
input = Conv2D(input_channel_small, (1, 1), padding='valid')(input)
input = Activation('relu')(input)
input_branch_1 = Conv2D(input_channel_large, (1, 1), padding='valid')(input
)
input_branch_1 = Activation('relu')(input_branch_1)
input_branch_2 = Conv2D(input_channel_large, (3, 3), padding='same')(input)
input_branch_2 = Activation('relu')(input_branch_2)
input = concatenate([input_branch_1, input_branch_2], axis=channel_axis)
return input
<|reserved_special_token_0|>
def main(opt):
"""Convert a model from keras to tensorflow lite."""
weights_path: Path = Path('../weights')
model_path = weights_path / opt.model_path
if not model_path.exists():
raise ValueError(f'Invalid model path: {model_path}')
print(f"Loading keras model: '{model_path}'")
keras_model = SqueezeNet()
keras_model.load_weights(model_path)
output_file = get_tf_filename(str(model_path))
keras_to_tensorflow(keras_model, output_dir=weights_path, model_name=
output_file)
print('MODEL SAVED')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def keras_to_tensorflow(keras_model, output_dir: Path, model_name,
out_prefix='output_', log_tensorboard=True):
"""Convert from keras to tf"""
if not output_dir.exists():
output_dir.mkdir(parents=True, exist_ok=True)
output_dir: str = str(output_dir)
out_nodes = []
for i in range(len(keras_model.outputs)):
out_nodes.append(out_prefix + str(i + 1))
tf.identity(keras_model.output[i], out_prefix + str(i + 1))
sess = K.get_session()
init_graph = sess.graph.as_graph_def()
main_graph = graph_util.convert_variables_to_constants(sess, init_graph,
out_nodes)
graph_io.write_graph(main_graph, output_dir, name=model_name, as_text=False
)
if log_tensorboard:
import_pb_to_tensorboard.import_to_tensorboard(os.path.join(
output_dir, model_name), output_dir)
<|reserved_special_token_0|>
def squeezenet_fire_module(input, input_channel_small=16,
input_channel_large=64):
channel_axis = 3
input = Conv2D(input_channel_small, (1, 1), padding='valid')(input)
input = Activation('relu')(input)
input_branch_1 = Conv2D(input_channel_large, (1, 1), padding='valid')(input
)
input_branch_1 = Activation('relu')(input_branch_1)
input_branch_2 = Conv2D(input_channel_large, (3, 3), padding='same')(input)
input_branch_2 = Activation('relu')(input_branch_2)
input = concatenate([input_branch_1, input_branch_2], axis=channel_axis)
return input
def SqueezeNet(input_shape=(224, 224, 3)):
"""Returns a new keras SqueezeNet model"""
image_input = Input(shape=input_shape)
network = Conv2D(64, (3, 3), strides=(2, 2), padding='valid')(image_input)
network = Activation('relu')(network)
network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)
network = squeezenet_fire_module(input=network, input_channel_small=16,
input_channel_large=64)
network = squeezenet_fire_module(input=network, input_channel_small=16,
input_channel_large=64)
network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)
network = squeezenet_fire_module(input=network, input_channel_small=32,
input_channel_large=128)
network = squeezenet_fire_module(input=network, input_channel_small=32,
input_channel_large=128)
network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)
network = squeezenet_fire_module(input=network, input_channel_small=48,
input_channel_large=192)
network = squeezenet_fire_module(input=network, input_channel_small=48,
input_channel_large=192)
network = squeezenet_fire_module(input=network, input_channel_small=64,
input_channel_large=256)
network = squeezenet_fire_module(input=network, input_channel_small=64,
input_channel_large=256)
network = Conv2D(1000, kernel_size=(1, 1), padding='valid', name=
'last_conv')(network)
network = Activation('relu')(network)
network = GlobalAvgPool2D()(network)
network = Activation('softmax', name='output')(network)
input_image = image_input
model = Model(inputs=input_image, outputs=network)
return model
<|reserved_special_token_0|>
def main(opt):
"""Convert a model from keras to tensorflow lite."""
weights_path: Path = Path('../weights')
model_path = weights_path / opt.model_path
if not model_path.exists():
raise ValueError(f'Invalid model path: {model_path}')
print(f"Loading keras model: '{model_path}'")
keras_model = SqueezeNet()
keras_model.load_weights(model_path)
output_file = get_tf_filename(str(model_path))
keras_to_tensorflow(keras_model, output_dir=weights_path, model_name=
output_file)
print('MODEL SAVED')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def keras_to_tensorflow(keras_model, output_dir: Path, model_name,
out_prefix='output_', log_tensorboard=True):
"""Convert from keras to tf"""
if not output_dir.exists():
output_dir.mkdir(parents=True, exist_ok=True)
output_dir: str = str(output_dir)
out_nodes = []
for i in range(len(keras_model.outputs)):
out_nodes.append(out_prefix + str(i + 1))
tf.identity(keras_model.output[i], out_prefix + str(i + 1))
sess = K.get_session()
init_graph = sess.graph.as_graph_def()
main_graph = graph_util.convert_variables_to_constants(sess, init_graph,
out_nodes)
graph_io.write_graph(main_graph, output_dir, name=model_name, as_text=False
)
if log_tensorboard:
import_pb_to_tensorboard.import_to_tensorboard(os.path.join(
output_dir, model_name), output_dir)
<|reserved_special_token_0|>
def squeezenet_fire_module(input, input_channel_small=16,
input_channel_large=64):
channel_axis = 3
input = Conv2D(input_channel_small, (1, 1), padding='valid')(input)
input = Activation('relu')(input)
input_branch_1 = Conv2D(input_channel_large, (1, 1), padding='valid')(input
)
input_branch_1 = Activation('relu')(input_branch_1)
input_branch_2 = Conv2D(input_channel_large, (3, 3), padding='same')(input)
input_branch_2 = Activation('relu')(input_branch_2)
input = concatenate([input_branch_1, input_branch_2], axis=channel_axis)
return input
def SqueezeNet(input_shape=(224, 224, 3)):
"""Returns a new keras SqueezeNet model"""
image_input = Input(shape=input_shape)
network = Conv2D(64, (3, 3), strides=(2, 2), padding='valid')(image_input)
network = Activation('relu')(network)
network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)
network = squeezenet_fire_module(input=network, input_channel_small=16,
input_channel_large=64)
network = squeezenet_fire_module(input=network, input_channel_small=16,
input_channel_large=64)
network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)
network = squeezenet_fire_module(input=network, input_channel_small=32,
input_channel_large=128)
network = squeezenet_fire_module(input=network, input_channel_small=32,
input_channel_large=128)
network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)
network = squeezenet_fire_module(input=network, input_channel_small=48,
input_channel_large=192)
network = squeezenet_fire_module(input=network, input_channel_small=48,
input_channel_large=192)
network = squeezenet_fire_module(input=network, input_channel_small=64,
input_channel_large=256)
network = squeezenet_fire_module(input=network, input_channel_small=64,
input_channel_large=256)
network = Conv2D(1000, kernel_size=(1, 1), padding='valid', name=
'last_conv')(network)
network = Activation('relu')(network)
network = GlobalAvgPool2D()(network)
network = Activation('softmax', name='output')(network)
input_image = image_input
model = Model(inputs=input_image, outputs=network)
return model
def get_tf_filename(keras_filename) ->str:
return keras_filename.replace('.h5', '.pb')
def main(opt):
"""Convert a model from keras to tensorflow lite."""
weights_path: Path = Path('../weights')
model_path = weights_path / opt.model_path
if not model_path.exists():
raise ValueError(f'Invalid model path: {model_path}')
print(f"Loading keras model: '{model_path}'")
keras_model = SqueezeNet()
keras_model.load_weights(model_path)
output_file = get_tf_filename(str(model_path))
keras_to_tensorflow(keras_model, output_dir=weights_path, model_name=
output_file)
print('MODEL SAVED')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def keras_to_tensorflow(keras_model, output_dir: Path, model_name,
out_prefix='output_', log_tensorboard=True):
"""Convert from keras to tf"""
if not output_dir.exists():
output_dir.mkdir(parents=True, exist_ok=True)
output_dir: str = str(output_dir)
out_nodes = []
for i in range(len(keras_model.outputs)):
out_nodes.append(out_prefix + str(i + 1))
tf.identity(keras_model.output[i], out_prefix + str(i + 1))
sess = K.get_session()
init_graph = sess.graph.as_graph_def()
main_graph = graph_util.convert_variables_to_constants(sess, init_graph,
out_nodes)
graph_io.write_graph(main_graph, output_dir, name=model_name, as_text=False
)
if log_tensorboard:
import_pb_to_tensorboard.import_to_tensorboard(os.path.join(
output_dir, model_name), output_dir)
<|reserved_special_token_0|>
def squeezenet_fire_module(input, input_channel_small=16,
input_channel_large=64):
channel_axis = 3
input = Conv2D(input_channel_small, (1, 1), padding='valid')(input)
input = Activation('relu')(input)
input_branch_1 = Conv2D(input_channel_large, (1, 1), padding='valid')(input
)
input_branch_1 = Activation('relu')(input_branch_1)
input_branch_2 = Conv2D(input_channel_large, (3, 3), padding='same')(input)
input_branch_2 = Activation('relu')(input_branch_2)
input = concatenate([input_branch_1, input_branch_2], axis=channel_axis)
return input
def SqueezeNet(input_shape=(224, 224, 3)):
"""Returns a new keras SqueezeNet model"""
image_input = Input(shape=input_shape)
network = Conv2D(64, (3, 3), strides=(2, 2), padding='valid')(image_input)
network = Activation('relu')(network)
network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)
network = squeezenet_fire_module(input=network, input_channel_small=16,
input_channel_large=64)
network = squeezenet_fire_module(input=network, input_channel_small=16,
input_channel_large=64)
network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)
network = squeezenet_fire_module(input=network, input_channel_small=32,
input_channel_large=128)
network = squeezenet_fire_module(input=network, input_channel_small=32,
input_channel_large=128)
network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)
network = squeezenet_fire_module(input=network, input_channel_small=48,
input_channel_large=192)
network = squeezenet_fire_module(input=network, input_channel_small=48,
input_channel_large=192)
network = squeezenet_fire_module(input=network, input_channel_small=64,
input_channel_large=256)
network = squeezenet_fire_module(input=network, input_channel_small=64,
input_channel_large=256)
network = Conv2D(1000, kernel_size=(1, 1), padding='valid', name=
'last_conv')(network)
network = Activation('relu')(network)
network = GlobalAvgPool2D()(network)
network = Activation('softmax', name='output')(network)
input_image = image_input
model = Model(inputs=input_image, outputs=network)
return model
def get_tf_filename(keras_filename) ->str:
return keras_filename.replace('.h5', '.pb')
def main(opt):
"""Convert a model from keras to tensorflow lite."""
weights_path: Path = Path('../weights')
model_path = weights_path / opt.model_path
if not model_path.exists():
raise ValueError(f'Invalid model path: {model_path}')
print(f"Loading keras model: '{model_path}'")
keras_model = SqueezeNet()
keras_model.load_weights(model_path)
output_file = get_tf_filename(str(model_path))
keras_to_tensorflow(keras_model, output_dir=weights_path, model_name=
output_file)
print('MODEL SAVED')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, default='squeezenet.h5',
help=
'filename of model to convert. Path should be relative to the ./training/models/ folder'
)
opt = parser.parse_args()
main(opt)
<|reserved_special_token_1|>
# Standard Library imports:
import argparse
import os
from pathlib import Path
from typing import Dict, List
# 3rd Party imports:
import keras.backend as K
from keras.layers import *
from keras.models import Model
import tensorflow as tf
from tensorflow.python.framework import graph_io, graph_util
from tensorflow.python.tools import import_pb_to_tensorboard
def keras_to_tensorflow(
keras_model,
output_dir: Path,
model_name,
out_prefix="output_",
log_tensorboard=True,
):
"""Convert from keras to tf"""
if not output_dir.exists():
output_dir.mkdir(parents=True, exist_ok=True)
output_dir: str = str(output_dir)
out_nodes = []
for i in range(len(keras_model.outputs)):
out_nodes.append(out_prefix + str(i + 1))
tf.identity(keras_model.output[i], out_prefix + str(i + 1))
sess = K.get_session()
init_graph = sess.graph.as_graph_def()
main_graph = graph_util.convert_variables_to_constants(sess, init_graph, out_nodes)
graph_io.write_graph(main_graph, output_dir, name=model_name, as_text=False)
if log_tensorboard:
import_pb_to_tensorboard.import_to_tensorboard(
os.path.join(output_dir, model_name), output_dir
)
"""
We explicitly redefine the SqueezNet architecture since Keras has no predefined
SqueezNet
"""
def squeezenet_fire_module(input, input_channel_small=16, input_channel_large=64):
channel_axis = 3
input = Conv2D(input_channel_small, (1, 1), padding="valid")(input)
input = Activation("relu")(input)
input_branch_1 = Conv2D(input_channel_large, (1, 1), padding="valid")(input)
input_branch_1 = Activation("relu")(input_branch_1)
input_branch_2 = Conv2D(input_channel_large, (3, 3), padding="same")(input)
input_branch_2 = Activation("relu")(input_branch_2)
input = concatenate([input_branch_1, input_branch_2], axis=channel_axis)
return input
def SqueezeNet(input_shape=(224, 224, 3)):
"""Returns a new keras SqueezeNet model"""
image_input = Input(shape=input_shape)
network = Conv2D(64, (3, 3), strides=(2, 2), padding="valid")(image_input)
network = Activation("relu")(network)
network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)
network = squeezenet_fire_module(
input=network, input_channel_small=16, input_channel_large=64
)
network = squeezenet_fire_module(
input=network, input_channel_small=16, input_channel_large=64
)
network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)
network = squeezenet_fire_module(
input=network, input_channel_small=32, input_channel_large=128
)
network = squeezenet_fire_module(
input=network, input_channel_small=32, input_channel_large=128
)
network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)
network = squeezenet_fire_module(
input=network, input_channel_small=48, input_channel_large=192
)
network = squeezenet_fire_module(
input=network, input_channel_small=48, input_channel_large=192
)
network = squeezenet_fire_module(
input=network, input_channel_small=64, input_channel_large=256
)
network = squeezenet_fire_module(
input=network, input_channel_small=64, input_channel_large=256
)
# Remove layers like Dropout and BatchNormalization, they are only needed in training
# network = Dropout(0.5)(network)
network = Conv2D(1000, kernel_size=(1, 1), padding="valid", name="last_conv")(
network
)
network = Activation("relu")(network)
network = GlobalAvgPool2D()(network)
network = Activation("softmax", name="output")(network)
input_image = image_input
model = Model(inputs=input_image, outputs=network)
return model
def get_tf_filename(keras_filename) -> str:
return keras_filename.replace(".h5", ".pb")
def main(opt):
"""Convert a model from keras to tensorflow lite."""
weights_path: Path = Path("../weights")
model_path = weights_path / opt.model_path
if not model_path.exists():
raise ValueError(f"Invalid model path: {model_path}")
print(f"Loading keras model: '{model_path}'")
keras_model = SqueezeNet()
keras_model.load_weights(model_path)
output_file = get_tf_filename(str(model_path))
keras_to_tensorflow(keras_model, output_dir=weights_path, model_name=output_file)
print("MODEL SAVED")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
default="squeezenet.h5",
help="filename of model to convert. Path should be relative to the ./training/models/ folder",
)
opt = parser.parse_args()
main(opt)
|
flexible
|
{
"blob_id": "a5f3af6fc890f61eecb35bd157fc51bb65b4c586",
"index": 3958,
"step-1": "<mask token>\n\n\ndef squeezenet_fire_module(input, input_channel_small=16,\n input_channel_large=64):\n channel_axis = 3\n input = Conv2D(input_channel_small, (1, 1), padding='valid')(input)\n input = Activation('relu')(input)\n input_branch_1 = Conv2D(input_channel_large, (1, 1), padding='valid')(input\n )\n input_branch_1 = Activation('relu')(input_branch_1)\n input_branch_2 = Conv2D(input_channel_large, (3, 3), padding='same')(input)\n input_branch_2 = Activation('relu')(input_branch_2)\n input = concatenate([input_branch_1, input_branch_2], axis=channel_axis)\n return input\n\n\n<mask token>\n\n\ndef main(opt):\n \"\"\"Convert a model from keras to tensorflow lite.\"\"\"\n weights_path: Path = Path('../weights')\n model_path = weights_path / opt.model_path\n if not model_path.exists():\n raise ValueError(f'Invalid model path: {model_path}')\n print(f\"Loading keras model: '{model_path}'\")\n keras_model = SqueezeNet()\n keras_model.load_weights(model_path)\n output_file = get_tf_filename(str(model_path))\n keras_to_tensorflow(keras_model, output_dir=weights_path, model_name=\n output_file)\n print('MODEL SAVED')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef keras_to_tensorflow(keras_model, output_dir: Path, model_name,\n out_prefix='output_', log_tensorboard=True):\n \"\"\"Convert from keras to tf\"\"\"\n if not output_dir.exists():\n output_dir.mkdir(parents=True, exist_ok=True)\n output_dir: str = str(output_dir)\n out_nodes = []\n for i in range(len(keras_model.outputs)):\n out_nodes.append(out_prefix + str(i + 1))\n tf.identity(keras_model.output[i], out_prefix + str(i + 1))\n sess = K.get_session()\n init_graph = sess.graph.as_graph_def()\n main_graph = graph_util.convert_variables_to_constants(sess, init_graph,\n out_nodes)\n graph_io.write_graph(main_graph, output_dir, name=model_name, as_text=False\n )\n if log_tensorboard:\n import_pb_to_tensorboard.import_to_tensorboard(os.path.join(\n output_dir, model_name), output_dir)\n\n\n<mask token>\n\n\ndef squeezenet_fire_module(input, input_channel_small=16,\n input_channel_large=64):\n channel_axis = 3\n input = Conv2D(input_channel_small, (1, 1), padding='valid')(input)\n input = Activation('relu')(input)\n input_branch_1 = Conv2D(input_channel_large, (1, 1), padding='valid')(input\n )\n input_branch_1 = Activation('relu')(input_branch_1)\n input_branch_2 = Conv2D(input_channel_large, (3, 3), padding='same')(input)\n input_branch_2 = Activation('relu')(input_branch_2)\n input = concatenate([input_branch_1, input_branch_2], axis=channel_axis)\n return input\n\n\ndef SqueezeNet(input_shape=(224, 224, 3)):\n \"\"\"Returns a new keras SqueezeNet model\"\"\"\n image_input = Input(shape=input_shape)\n network = Conv2D(64, (3, 3), strides=(2, 2), padding='valid')(image_input)\n network = Activation('relu')(network)\n network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)\n network = squeezenet_fire_module(input=network, input_channel_small=16,\n input_channel_large=64)\n network = squeezenet_fire_module(input=network, input_channel_small=16,\n input_channel_large=64)\n network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)\n network = squeezenet_fire_module(input=network, input_channel_small=32,\n input_channel_large=128)\n network = squeezenet_fire_module(input=network, input_channel_small=32,\n input_channel_large=128)\n network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)\n network = squeezenet_fire_module(input=network, input_channel_small=48,\n input_channel_large=192)\n network = squeezenet_fire_module(input=network, input_channel_small=48,\n input_channel_large=192)\n network = squeezenet_fire_module(input=network, input_channel_small=64,\n input_channel_large=256)\n network = squeezenet_fire_module(input=network, input_channel_small=64,\n input_channel_large=256)\n network = Conv2D(1000, kernel_size=(1, 1), padding='valid', name=\n 'last_conv')(network)\n network = Activation('relu')(network)\n network = GlobalAvgPool2D()(network)\n network = Activation('softmax', name='output')(network)\n input_image = image_input\n model = Model(inputs=input_image, outputs=network)\n return model\n\n\n<mask token>\n\n\ndef main(opt):\n \"\"\"Convert a model from keras to tensorflow lite.\"\"\"\n weights_path: Path = Path('../weights')\n model_path = weights_path / opt.model_path\n if not model_path.exists():\n raise ValueError(f'Invalid model path: {model_path}')\n print(f\"Loading keras model: '{model_path}'\")\n keras_model = SqueezeNet()\n keras_model.load_weights(model_path)\n output_file = get_tf_filename(str(model_path))\n keras_to_tensorflow(keras_model, output_dir=weights_path, model_name=\n output_file)\n print('MODEL SAVED')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef keras_to_tensorflow(keras_model, output_dir: Path, model_name,\n out_prefix='output_', log_tensorboard=True):\n \"\"\"Convert from keras to tf\"\"\"\n if not output_dir.exists():\n output_dir.mkdir(parents=True, exist_ok=True)\n output_dir: str = str(output_dir)\n out_nodes = []\n for i in range(len(keras_model.outputs)):\n out_nodes.append(out_prefix + str(i + 1))\n tf.identity(keras_model.output[i], out_prefix + str(i + 1))\n sess = K.get_session()\n init_graph = sess.graph.as_graph_def()\n main_graph = graph_util.convert_variables_to_constants(sess, init_graph,\n out_nodes)\n graph_io.write_graph(main_graph, output_dir, name=model_name, as_text=False\n )\n if log_tensorboard:\n import_pb_to_tensorboard.import_to_tensorboard(os.path.join(\n output_dir, model_name), output_dir)\n\n\n<mask token>\n\n\ndef squeezenet_fire_module(input, input_channel_small=16,\n input_channel_large=64):\n channel_axis = 3\n input = Conv2D(input_channel_small, (1, 1), padding='valid')(input)\n input = Activation('relu')(input)\n input_branch_1 = Conv2D(input_channel_large, (1, 1), padding='valid')(input\n )\n input_branch_1 = Activation('relu')(input_branch_1)\n input_branch_2 = Conv2D(input_channel_large, (3, 3), padding='same')(input)\n input_branch_2 = Activation('relu')(input_branch_2)\n input = concatenate([input_branch_1, input_branch_2], axis=channel_axis)\n return input\n\n\ndef SqueezeNet(input_shape=(224, 224, 3)):\n \"\"\"Returns a new keras SqueezeNet model\"\"\"\n image_input = Input(shape=input_shape)\n network = Conv2D(64, (3, 3), strides=(2, 2), padding='valid')(image_input)\n network = Activation('relu')(network)\n network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)\n network = squeezenet_fire_module(input=network, input_channel_small=16,\n input_channel_large=64)\n network = squeezenet_fire_module(input=network, input_channel_small=16,\n input_channel_large=64)\n network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)\n network = squeezenet_fire_module(input=network, input_channel_small=32,\n input_channel_large=128)\n network = squeezenet_fire_module(input=network, input_channel_small=32,\n input_channel_large=128)\n network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)\n network = squeezenet_fire_module(input=network, input_channel_small=48,\n input_channel_large=192)\n network = squeezenet_fire_module(input=network, input_channel_small=48,\n input_channel_large=192)\n network = squeezenet_fire_module(input=network, input_channel_small=64,\n input_channel_large=256)\n network = squeezenet_fire_module(input=network, input_channel_small=64,\n input_channel_large=256)\n network = Conv2D(1000, kernel_size=(1, 1), padding='valid', name=\n 'last_conv')(network)\n network = Activation('relu')(network)\n network = GlobalAvgPool2D()(network)\n network = Activation('softmax', name='output')(network)\n input_image = image_input\n model = Model(inputs=input_image, outputs=network)\n return model\n\n\ndef get_tf_filename(keras_filename) ->str:\n return keras_filename.replace('.h5', '.pb')\n\n\ndef main(opt):\n \"\"\"Convert a model from keras to tensorflow lite.\"\"\"\n weights_path: Path = Path('../weights')\n model_path = weights_path / opt.model_path\n if not model_path.exists():\n raise ValueError(f'Invalid model path: {model_path}')\n print(f\"Loading keras model: '{model_path}'\")\n keras_model = SqueezeNet()\n keras_model.load_weights(model_path)\n output_file = get_tf_filename(str(model_path))\n keras_to_tensorflow(keras_model, output_dir=weights_path, model_name=\n output_file)\n print('MODEL SAVED')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef keras_to_tensorflow(keras_model, output_dir: Path, model_name,\n out_prefix='output_', log_tensorboard=True):\n \"\"\"Convert from keras to tf\"\"\"\n if not output_dir.exists():\n output_dir.mkdir(parents=True, exist_ok=True)\n output_dir: str = str(output_dir)\n out_nodes = []\n for i in range(len(keras_model.outputs)):\n out_nodes.append(out_prefix + str(i + 1))\n tf.identity(keras_model.output[i], out_prefix + str(i + 1))\n sess = K.get_session()\n init_graph = sess.graph.as_graph_def()\n main_graph = graph_util.convert_variables_to_constants(sess, init_graph,\n out_nodes)\n graph_io.write_graph(main_graph, output_dir, name=model_name, as_text=False\n )\n if log_tensorboard:\n import_pb_to_tensorboard.import_to_tensorboard(os.path.join(\n output_dir, model_name), output_dir)\n\n\n<mask token>\n\n\ndef squeezenet_fire_module(input, input_channel_small=16,\n input_channel_large=64):\n channel_axis = 3\n input = Conv2D(input_channel_small, (1, 1), padding='valid')(input)\n input = Activation('relu')(input)\n input_branch_1 = Conv2D(input_channel_large, (1, 1), padding='valid')(input\n )\n input_branch_1 = Activation('relu')(input_branch_1)\n input_branch_2 = Conv2D(input_channel_large, (3, 3), padding='same')(input)\n input_branch_2 = Activation('relu')(input_branch_2)\n input = concatenate([input_branch_1, input_branch_2], axis=channel_axis)\n return input\n\n\ndef SqueezeNet(input_shape=(224, 224, 3)):\n \"\"\"Returns a new keras SqueezeNet model\"\"\"\n image_input = Input(shape=input_shape)\n network = Conv2D(64, (3, 3), strides=(2, 2), padding='valid')(image_input)\n network = Activation('relu')(network)\n network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)\n network = squeezenet_fire_module(input=network, input_channel_small=16,\n input_channel_large=64)\n network = squeezenet_fire_module(input=network, input_channel_small=16,\n input_channel_large=64)\n network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)\n network = squeezenet_fire_module(input=network, input_channel_small=32,\n input_channel_large=128)\n network = squeezenet_fire_module(input=network, input_channel_small=32,\n input_channel_large=128)\n network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)\n network = squeezenet_fire_module(input=network, input_channel_small=48,\n input_channel_large=192)\n network = squeezenet_fire_module(input=network, input_channel_small=48,\n input_channel_large=192)\n network = squeezenet_fire_module(input=network, input_channel_small=64,\n input_channel_large=256)\n network = squeezenet_fire_module(input=network, input_channel_small=64,\n input_channel_large=256)\n network = Conv2D(1000, kernel_size=(1, 1), padding='valid', name=\n 'last_conv')(network)\n network = Activation('relu')(network)\n network = GlobalAvgPool2D()(network)\n network = Activation('softmax', name='output')(network)\n input_image = image_input\n model = Model(inputs=input_image, outputs=network)\n return model\n\n\ndef get_tf_filename(keras_filename) ->str:\n return keras_filename.replace('.h5', '.pb')\n\n\ndef main(opt):\n \"\"\"Convert a model from keras to tensorflow lite.\"\"\"\n weights_path: Path = Path('../weights')\n model_path = weights_path / opt.model_path\n if not model_path.exists():\n raise ValueError(f'Invalid model path: {model_path}')\n print(f\"Loading keras model: '{model_path}'\")\n keras_model = SqueezeNet()\n keras_model.load_weights(model_path)\n output_file = get_tf_filename(str(model_path))\n keras_to_tensorflow(keras_model, output_dir=weights_path, model_name=\n output_file)\n print('MODEL SAVED')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--model_path', type=str, default='squeezenet.h5',\n help=\n 'filename of model to convert. Path should be relative to the ./training/models/ folder'\n )\n opt = parser.parse_args()\n main(opt)\n",
"step-5": "# Standard Library imports:\nimport argparse\nimport os\nfrom pathlib import Path\nfrom typing import Dict, List\n\n# 3rd Party imports:\nimport keras.backend as K\nfrom keras.layers import *\nfrom keras.models import Model\nimport tensorflow as tf\nfrom tensorflow.python.framework import graph_io, graph_util\nfrom tensorflow.python.tools import import_pb_to_tensorboard\n\n\ndef keras_to_tensorflow(\n keras_model,\n output_dir: Path,\n model_name,\n out_prefix=\"output_\",\n log_tensorboard=True,\n):\n \"\"\"Convert from keras to tf\"\"\"\n if not output_dir.exists():\n output_dir.mkdir(parents=True, exist_ok=True)\n output_dir: str = str(output_dir)\n\n out_nodes = []\n\n for i in range(len(keras_model.outputs)):\n out_nodes.append(out_prefix + str(i + 1))\n tf.identity(keras_model.output[i], out_prefix + str(i + 1))\n\n sess = K.get_session()\n\n init_graph = sess.graph.as_graph_def()\n\n main_graph = graph_util.convert_variables_to_constants(sess, init_graph, out_nodes)\n\n graph_io.write_graph(main_graph, output_dir, name=model_name, as_text=False)\n\n if log_tensorboard:\n import_pb_to_tensorboard.import_to_tensorboard(\n os.path.join(output_dir, model_name), output_dir\n )\n\n\n\"\"\"\nWe explicitly redefine the SqueezNet architecture since Keras has no predefined\nSqueezNet\n\"\"\"\n\n\ndef squeezenet_fire_module(input, input_channel_small=16, input_channel_large=64):\n\n channel_axis = 3\n\n input = Conv2D(input_channel_small, (1, 1), padding=\"valid\")(input)\n input = Activation(\"relu\")(input)\n\n input_branch_1 = Conv2D(input_channel_large, (1, 1), padding=\"valid\")(input)\n input_branch_1 = Activation(\"relu\")(input_branch_1)\n\n input_branch_2 = Conv2D(input_channel_large, (3, 3), padding=\"same\")(input)\n input_branch_2 = Activation(\"relu\")(input_branch_2)\n\n input = concatenate([input_branch_1, input_branch_2], axis=channel_axis)\n\n return input\n\n\ndef SqueezeNet(input_shape=(224, 224, 3)):\n \"\"\"Returns a new keras SqueezeNet model\"\"\"\n image_input = Input(shape=input_shape)\n\n network = Conv2D(64, (3, 3), strides=(2, 2), padding=\"valid\")(image_input)\n network = Activation(\"relu\")(network)\n network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)\n\n network = squeezenet_fire_module(\n input=network, input_channel_small=16, input_channel_large=64\n )\n network = squeezenet_fire_module(\n input=network, input_channel_small=16, input_channel_large=64\n )\n network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)\n\n network = squeezenet_fire_module(\n input=network, input_channel_small=32, input_channel_large=128\n )\n network = squeezenet_fire_module(\n input=network, input_channel_small=32, input_channel_large=128\n )\n network = MaxPool2D(pool_size=(3, 3), strides=(2, 2))(network)\n\n network = squeezenet_fire_module(\n input=network, input_channel_small=48, input_channel_large=192\n )\n network = squeezenet_fire_module(\n input=network, input_channel_small=48, input_channel_large=192\n )\n network = squeezenet_fire_module(\n input=network, input_channel_small=64, input_channel_large=256\n )\n network = squeezenet_fire_module(\n input=network, input_channel_small=64, input_channel_large=256\n )\n\n # Remove layers like Dropout and BatchNormalization, they are only needed in training\n # network = Dropout(0.5)(network)\n\n network = Conv2D(1000, kernel_size=(1, 1), padding=\"valid\", name=\"last_conv\")(\n network\n )\n network = Activation(\"relu\")(network)\n\n network = GlobalAvgPool2D()(network)\n network = Activation(\"softmax\", name=\"output\")(network)\n\n input_image = image_input\n model = Model(inputs=input_image, outputs=network)\n\n return model\n\n\ndef get_tf_filename(keras_filename) -> str:\n return keras_filename.replace(\".h5\", \".pb\")\n\n\ndef main(opt):\n \"\"\"Convert a model from keras to tensorflow lite.\"\"\"\n weights_path: Path = Path(\"../weights\")\n model_path = weights_path / opt.model_path\n if not model_path.exists():\n raise ValueError(f\"Invalid model path: {model_path}\")\n\n print(f\"Loading keras model: '{model_path}'\")\n keras_model = SqueezeNet()\n keras_model.load_weights(model_path)\n output_file = get_tf_filename(str(model_path))\n keras_to_tensorflow(keras_model, output_dir=weights_path, model_name=output_file)\n print(\"MODEL SAVED\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--model_path\",\n type=str,\n default=\"squeezenet.h5\",\n help=\"filename of model to convert. Path should be relative to the ./training/models/ folder\",\n )\n opt = parser.parse_args()\n main(opt)\n",
"step-ids": [
2,
4,
5,
6,
8
]
}
|
[
2,
4,
5,
6,
8
] |
<|reserved_special_token_0|>
def adjustMotorPowers():
global slave_power
global en_left
global en_right
global kp
error = en_right + en_left
slave_power -= error / kp
encoders.clear()
time.sleep(0.1)
def readEncoder():
global en_left
global en_right
global right_num_revs
global left_num_revs
new_en_left, new_en_right = encoders.read()
if new_en_right != en_right or new_en_left != en_left:
en_right = new_en_right
right_num_revs += en_right
en_left = new_en_left
left_num_revs += en_left
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append('.')
<|reserved_special_token_0|>
encoders.init()
motors.init()
<|reserved_special_token_0|>
def adjustMotorPowers():
global slave_power
global en_left
global en_right
global kp
error = en_right + en_left
slave_power -= error / kp
encoders.clear()
time.sleep(0.1)
def readEncoder():
global en_left
global en_right
global right_num_revs
global left_num_revs
new_en_left, new_en_right = encoders.read()
if new_en_right != en_right or new_en_left != en_left:
en_right = new_en_right
right_num_revs += en_right
en_left = new_en_left
left_num_revs += en_left
print('Using settings file ' + SETTINGS_FILE + '.ini')
if not os.path.exists(SETTINGS_FILE + '.ini'):
print('Settings file does not exist, will be created')
<|reserved_special_token_0|>
print('IMU Name: ' + imu.IMUName())
if not imu.IMUInit():
print('IMU Init Failed')
sys.exit(1)
else:
print('IMU Init Succeeded')
imu.setSlerpPower(0.02)
imu.setGyroEnable(True)
imu.setAccelEnable(True)
imu.setCompassEnable(True)
<|reserved_special_token_0|>
print('Recommended Poll Interval: %dmS\n' % poll_interval)
<|reserved_special_token_0|>
while True:
if imu.IMURead():
data = imu.getIMUData()
fusionPose = data['fusionPose']
x = math.degrees(fusionPose[0])
y = math.degrees(fusionPose[1])
z = math.degrees(fusionPose[2])
if abs(x - old_x) > 0.3 or abs(y - old_y) > 0.3 or abs(z - old_z
) > 0.3:
print('r: %f p: %f y: %f' % (math.degrees(fusionPose[0]), math.
degrees(fusionPose[1]), math.degrees(fusionPose[2])))
old_x = x
old_y = y
old_z = z
time.sleep(poll_interval * 1.0 / 1000.0)
try:
print(str(right_num_revs) + ' ' + str(left_num_revs))
motors.speed(slave_power, master_power)
adjustMotorPowers()
readEncoder()
except KeyboardInterrupt:
break
motors.cleanup()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append('.')
<|reserved_special_token_0|>
master_power = 0.6
slave_power = -0.6
right_num_revs = 0
left_num_revs = 0
kp = 0.5
encoders.init()
motors.init()
en_left, en_right = encoders.read()
SETTINGS_FILE = 'RTIMULib'
def adjustMotorPowers():
global slave_power
global en_left
global en_right
global kp
error = en_right + en_left
slave_power -= error / kp
encoders.clear()
time.sleep(0.1)
def readEncoder():
global en_left
global en_right
global right_num_revs
global left_num_revs
new_en_left, new_en_right = encoders.read()
if new_en_right != en_right or new_en_left != en_left:
en_right = new_en_right
right_num_revs += en_right
en_left = new_en_left
left_num_revs += en_left
print('Using settings file ' + SETTINGS_FILE + '.ini')
if not os.path.exists(SETTINGS_FILE + '.ini'):
print('Settings file does not exist, will be created')
s = RTIMU.Settings(SETTINGS_FILE)
imu = RTIMU.RTIMU(s)
print('IMU Name: ' + imu.IMUName())
if not imu.IMUInit():
print('IMU Init Failed')
sys.exit(1)
else:
print('IMU Init Succeeded')
imu.setSlerpPower(0.02)
imu.setGyroEnable(True)
imu.setAccelEnable(True)
imu.setCompassEnable(True)
poll_interval = imu.IMUGetPollInterval()
print('Recommended Poll Interval: %dmS\n' % poll_interval)
old_x = 0
old_y = 0
old_z = 0
while True:
if imu.IMURead():
data = imu.getIMUData()
fusionPose = data['fusionPose']
x = math.degrees(fusionPose[0])
y = math.degrees(fusionPose[1])
z = math.degrees(fusionPose[2])
if abs(x - old_x) > 0.3 or abs(y - old_y) > 0.3 or abs(z - old_z
) > 0.3:
print('r: %f p: %f y: %f' % (math.degrees(fusionPose[0]), math.
degrees(fusionPose[1]), math.degrees(fusionPose[2])))
old_x = x
old_y = y
old_z = z
time.sleep(poll_interval * 1.0 / 1000.0)
try:
print(str(right_num_revs) + ' ' + str(left_num_revs))
motors.speed(slave_power, master_power)
adjustMotorPowers()
readEncoder()
except KeyboardInterrupt:
break
motors.cleanup()
<|reserved_special_token_1|>
import sys, getopt
sys.path.append('.')
import RTIMU
import os.path
import time
import math
import encoders
import motors
master_power = 0.6
slave_power = -0.6
right_num_revs = 0
left_num_revs = 0
kp = 0.5
encoders.init()
motors.init()
en_left, en_right = encoders.read()
SETTINGS_FILE = 'RTIMULib'
def adjustMotorPowers():
global slave_power
global en_left
global en_right
global kp
error = en_right + en_left
slave_power -= error / kp
encoders.clear()
time.sleep(0.1)
def readEncoder():
global en_left
global en_right
global right_num_revs
global left_num_revs
new_en_left, new_en_right = encoders.read()
if new_en_right != en_right or new_en_left != en_left:
en_right = new_en_right
right_num_revs += en_right
en_left = new_en_left
left_num_revs += en_left
print('Using settings file ' + SETTINGS_FILE + '.ini')
if not os.path.exists(SETTINGS_FILE + '.ini'):
print('Settings file does not exist, will be created')
s = RTIMU.Settings(SETTINGS_FILE)
imu = RTIMU.RTIMU(s)
print('IMU Name: ' + imu.IMUName())
if not imu.IMUInit():
print('IMU Init Failed')
sys.exit(1)
else:
print('IMU Init Succeeded')
imu.setSlerpPower(0.02)
imu.setGyroEnable(True)
imu.setAccelEnable(True)
imu.setCompassEnable(True)
poll_interval = imu.IMUGetPollInterval()
print('Recommended Poll Interval: %dmS\n' % poll_interval)
old_x = 0
old_y = 0
old_z = 0
while True:
if imu.IMURead():
data = imu.getIMUData()
fusionPose = data['fusionPose']
x = math.degrees(fusionPose[0])
y = math.degrees(fusionPose[1])
z = math.degrees(fusionPose[2])
if abs(x - old_x) > 0.3 or abs(y - old_y) > 0.3 or abs(z - old_z
) > 0.3:
print('r: %f p: %f y: %f' % (math.degrees(fusionPose[0]), math.
degrees(fusionPose[1]), math.degrees(fusionPose[2])))
old_x = x
old_y = y
old_z = z
time.sleep(poll_interval * 1.0 / 1000.0)
try:
print(str(right_num_revs) + ' ' + str(left_num_revs))
motors.speed(slave_power, master_power)
adjustMotorPowers()
readEncoder()
except KeyboardInterrupt:
break
motors.cleanup()
<|reserved_special_token_1|>
import sys, getopt
sys.path.append('.')
import RTIMU
import os.path
import time
import math
import encoders
import motors
#right is master, left is slave
master_power = .6
slave_power = -.6
right_num_revs = 0
left_num_revs = 0
kp = .5
encoders.init()
motors.init()
en_left, en_right = encoders.read()
SETTINGS_FILE = "RTIMULib"
def adjustMotorPowers():
global slave_power
global en_left
global en_right
global kp
error = en_right + en_left
slave_power -= error/kp
encoders.clear()
time.sleep(.1)
def readEncoder():
global en_left
global en_right
global right_num_revs
global left_num_revs
new_en_left, new_en_right = encoders.read()
if(new_en_right != en_right or new_en_left != en_left):
en_right = new_en_right
right_num_revs += en_right
en_left = new_en_left
left_num_revs += en_left
print("Using settings file " + SETTINGS_FILE + ".ini")
if not os.path.exists(SETTINGS_FILE + ".ini"):
print("Settings file does not exist, will be created")
s = RTIMU.Settings(SETTINGS_FILE)
imu = RTIMU.RTIMU(s)
print("IMU Name: " + imu.IMUName())
if (not imu.IMUInit()):
print("IMU Init Failed")
sys.exit(1)
else:
print("IMU Init Succeeded")
# this is a good time to set any fusion parameters
imu.setSlerpPower(0.02)
imu.setGyroEnable(True)
imu.setAccelEnable(True)
imu.setCompassEnable(True)
poll_interval = imu.IMUGetPollInterval()
print("Recommended Poll Interval: %dmS\n" % poll_interval)
old_x = 0
old_y = 0
old_z = 0
while True:
if imu.IMURead():
# x, y, z = imu.getFusionData()
# print("%f %f %f" % (x,y,z))
data = imu.getIMUData()
fusionPose = data["fusionPose"]
x = math.degrees(fusionPose[0])
y = math.degrees(fusionPose[1])
z = math.degrees(fusionPose[2])
if(abs(x-old_x)>0.3 or abs(y-old_y)>0.3 or abs(z-old_z)>0.3):
print("r: %f p: %f y: %f" % (math.degrees(fusionPose[0]),math.degrees(fusionPose[1]), math.degrees(fusionPose[2])))
old_x = x
old_y = y
old_z = z
time.sleep(poll_interval*1.0/1000.0)
try:
print(str(right_num_revs)+" "+str(left_num_revs))
motors.speed(slave_power, master_power)
adjustMotorPowers()
readEncoder()
except KeyboardInterrupt:
break
motors.cleanup()
|
flexible
|
{
"blob_id": "00f8a56b160cab22bf73c0d2397eb2c411e8c966",
"index": 7714,
"step-1": "<mask token>\n\n\ndef adjustMotorPowers():\n global slave_power\n global en_left\n global en_right\n global kp\n error = en_right + en_left\n slave_power -= error / kp\n encoders.clear()\n time.sleep(0.1)\n\n\ndef readEncoder():\n global en_left\n global en_right\n global right_num_revs\n global left_num_revs\n new_en_left, new_en_right = encoders.read()\n if new_en_right != en_right or new_en_left != en_left:\n en_right = new_en_right\n right_num_revs += en_right\n en_left = new_en_left\n left_num_revs += en_left\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.append('.')\n<mask token>\nencoders.init()\nmotors.init()\n<mask token>\n\n\ndef adjustMotorPowers():\n global slave_power\n global en_left\n global en_right\n global kp\n error = en_right + en_left\n slave_power -= error / kp\n encoders.clear()\n time.sleep(0.1)\n\n\ndef readEncoder():\n global en_left\n global en_right\n global right_num_revs\n global left_num_revs\n new_en_left, new_en_right = encoders.read()\n if new_en_right != en_right or new_en_left != en_left:\n en_right = new_en_right\n right_num_revs += en_right\n en_left = new_en_left\n left_num_revs += en_left\n\n\nprint('Using settings file ' + SETTINGS_FILE + '.ini')\nif not os.path.exists(SETTINGS_FILE + '.ini'):\n print('Settings file does not exist, will be created')\n<mask token>\nprint('IMU Name: ' + imu.IMUName())\nif not imu.IMUInit():\n print('IMU Init Failed')\n sys.exit(1)\nelse:\n print('IMU Init Succeeded')\nimu.setSlerpPower(0.02)\nimu.setGyroEnable(True)\nimu.setAccelEnable(True)\nimu.setCompassEnable(True)\n<mask token>\nprint('Recommended Poll Interval: %dmS\\n' % poll_interval)\n<mask token>\nwhile True:\n if imu.IMURead():\n data = imu.getIMUData()\n fusionPose = data['fusionPose']\n x = math.degrees(fusionPose[0])\n y = math.degrees(fusionPose[1])\n z = math.degrees(fusionPose[2])\n if abs(x - old_x) > 0.3 or abs(y - old_y) > 0.3 or abs(z - old_z\n ) > 0.3:\n print('r: %f p: %f y: %f' % (math.degrees(fusionPose[0]), math.\n degrees(fusionPose[1]), math.degrees(fusionPose[2])))\n old_x = x\n old_y = y\n old_z = z\n time.sleep(poll_interval * 1.0 / 1000.0)\n try:\n print(str(right_num_revs) + ' ' + str(left_num_revs))\n motors.speed(slave_power, master_power)\n adjustMotorPowers()\n readEncoder()\n except KeyboardInterrupt:\n break\nmotors.cleanup()\n",
"step-3": "<mask token>\nsys.path.append('.')\n<mask token>\nmaster_power = 0.6\nslave_power = -0.6\nright_num_revs = 0\nleft_num_revs = 0\nkp = 0.5\nencoders.init()\nmotors.init()\nen_left, en_right = encoders.read()\nSETTINGS_FILE = 'RTIMULib'\n\n\ndef adjustMotorPowers():\n global slave_power\n global en_left\n global en_right\n global kp\n error = en_right + en_left\n slave_power -= error / kp\n encoders.clear()\n time.sleep(0.1)\n\n\ndef readEncoder():\n global en_left\n global en_right\n global right_num_revs\n global left_num_revs\n new_en_left, new_en_right = encoders.read()\n if new_en_right != en_right or new_en_left != en_left:\n en_right = new_en_right\n right_num_revs += en_right\n en_left = new_en_left\n left_num_revs += en_left\n\n\nprint('Using settings file ' + SETTINGS_FILE + '.ini')\nif not os.path.exists(SETTINGS_FILE + '.ini'):\n print('Settings file does not exist, will be created')\ns = RTIMU.Settings(SETTINGS_FILE)\nimu = RTIMU.RTIMU(s)\nprint('IMU Name: ' + imu.IMUName())\nif not imu.IMUInit():\n print('IMU Init Failed')\n sys.exit(1)\nelse:\n print('IMU Init Succeeded')\nimu.setSlerpPower(0.02)\nimu.setGyroEnable(True)\nimu.setAccelEnable(True)\nimu.setCompassEnable(True)\npoll_interval = imu.IMUGetPollInterval()\nprint('Recommended Poll Interval: %dmS\\n' % poll_interval)\nold_x = 0\nold_y = 0\nold_z = 0\nwhile True:\n if imu.IMURead():\n data = imu.getIMUData()\n fusionPose = data['fusionPose']\n x = math.degrees(fusionPose[0])\n y = math.degrees(fusionPose[1])\n z = math.degrees(fusionPose[2])\n if abs(x - old_x) > 0.3 or abs(y - old_y) > 0.3 or abs(z - old_z\n ) > 0.3:\n print('r: %f p: %f y: %f' % (math.degrees(fusionPose[0]), math.\n degrees(fusionPose[1]), math.degrees(fusionPose[2])))\n old_x = x\n old_y = y\n old_z = z\n time.sleep(poll_interval * 1.0 / 1000.0)\n try:\n print(str(right_num_revs) + ' ' + str(left_num_revs))\n motors.speed(slave_power, master_power)\n adjustMotorPowers()\n readEncoder()\n except KeyboardInterrupt:\n break\nmotors.cleanup()\n",
"step-4": "import sys, getopt\nsys.path.append('.')\nimport RTIMU\nimport os.path\nimport time\nimport math\nimport encoders\nimport motors\nmaster_power = 0.6\nslave_power = -0.6\nright_num_revs = 0\nleft_num_revs = 0\nkp = 0.5\nencoders.init()\nmotors.init()\nen_left, en_right = encoders.read()\nSETTINGS_FILE = 'RTIMULib'\n\n\ndef adjustMotorPowers():\n global slave_power\n global en_left\n global en_right\n global kp\n error = en_right + en_left\n slave_power -= error / kp\n encoders.clear()\n time.sleep(0.1)\n\n\ndef readEncoder():\n global en_left\n global en_right\n global right_num_revs\n global left_num_revs\n new_en_left, new_en_right = encoders.read()\n if new_en_right != en_right or new_en_left != en_left:\n en_right = new_en_right\n right_num_revs += en_right\n en_left = new_en_left\n left_num_revs += en_left\n\n\nprint('Using settings file ' + SETTINGS_FILE + '.ini')\nif not os.path.exists(SETTINGS_FILE + '.ini'):\n print('Settings file does not exist, will be created')\ns = RTIMU.Settings(SETTINGS_FILE)\nimu = RTIMU.RTIMU(s)\nprint('IMU Name: ' + imu.IMUName())\nif not imu.IMUInit():\n print('IMU Init Failed')\n sys.exit(1)\nelse:\n print('IMU Init Succeeded')\nimu.setSlerpPower(0.02)\nimu.setGyroEnable(True)\nimu.setAccelEnable(True)\nimu.setCompassEnable(True)\npoll_interval = imu.IMUGetPollInterval()\nprint('Recommended Poll Interval: %dmS\\n' % poll_interval)\nold_x = 0\nold_y = 0\nold_z = 0\nwhile True:\n if imu.IMURead():\n data = imu.getIMUData()\n fusionPose = data['fusionPose']\n x = math.degrees(fusionPose[0])\n y = math.degrees(fusionPose[1])\n z = math.degrees(fusionPose[2])\n if abs(x - old_x) > 0.3 or abs(y - old_y) > 0.3 or abs(z - old_z\n ) > 0.3:\n print('r: %f p: %f y: %f' % (math.degrees(fusionPose[0]), math.\n degrees(fusionPose[1]), math.degrees(fusionPose[2])))\n old_x = x\n old_y = y\n old_z = z\n time.sleep(poll_interval * 1.0 / 1000.0)\n try:\n print(str(right_num_revs) + ' ' + str(left_num_revs))\n motors.speed(slave_power, master_power)\n adjustMotorPowers()\n readEncoder()\n except KeyboardInterrupt:\n break\nmotors.cleanup()\n",
"step-5": "import sys, getopt\n\nsys.path.append('.')\nimport RTIMU\nimport os.path\nimport time\nimport math\nimport encoders\nimport motors\n\n#right is master, left is slave\nmaster_power = .6\nslave_power = -.6\nright_num_revs = 0\nleft_num_revs = 0\nkp = .5\n\nencoders.init()\nmotors.init()\n\nen_left, en_right = encoders.read()\n\nSETTINGS_FILE = \"RTIMULib\"\n\ndef adjustMotorPowers():\n\tglobal slave_power\n\tglobal en_left\n\tglobal en_right\n\tglobal kp\n\n\terror = en_right + en_left\n\tslave_power -= error/kp\n\tencoders.clear()\n\ttime.sleep(.1)\n\ndef readEncoder():\n\tglobal en_left\n\tglobal en_right\n\tglobal right_num_revs\n\tglobal left_num_revs\n\n\tnew_en_left, new_en_right = encoders.read()\n\tif(new_en_right != en_right or new_en_left != en_left):\n\t\ten_right = new_en_right\n\t\tright_num_revs += en_right\n\t\ten_left = new_en_left\n\t\tleft_num_revs += en_left\n\nprint(\"Using settings file \" + SETTINGS_FILE + \".ini\")\nif not os.path.exists(SETTINGS_FILE + \".ini\"):\n print(\"Settings file does not exist, will be created\")\n\ns = RTIMU.Settings(SETTINGS_FILE)\nimu = RTIMU.RTIMU(s)\n\nprint(\"IMU Name: \" + imu.IMUName())\n\nif (not imu.IMUInit()):\n print(\"IMU Init Failed\")\n sys.exit(1)\nelse:\n print(\"IMU Init Succeeded\")\n\n# this is a good time to set any fusion parameters\n\nimu.setSlerpPower(0.02)\nimu.setGyroEnable(True)\nimu.setAccelEnable(True)\nimu.setCompassEnable(True)\n\npoll_interval = imu.IMUGetPollInterval()\nprint(\"Recommended Poll Interval: %dmS\\n\" % poll_interval)\n\n\nold_x = 0\nold_y = 0\nold_z = 0\nwhile True:\n if imu.IMURead():\n # x, y, z = imu.getFusionData()\n # print(\"%f %f %f\" % (x,y,z))\n data = imu.getIMUData()\n fusionPose = data[\"fusionPose\"]\n x = math.degrees(fusionPose[0])\n y = math.degrees(fusionPose[1])\n z = math.degrees(fusionPose[2])\n \n if(abs(x-old_x)>0.3 or abs(y-old_y)>0.3 or abs(z-old_z)>0.3):\n print(\"r: %f p: %f y: %f\" % (math.degrees(fusionPose[0]),math.degrees(fusionPose[1]), math.degrees(fusionPose[2])))\n \n old_x = x\n old_y = y\n old_z = z\t\n time.sleep(poll_interval*1.0/1000.0)\n\t\n try:\n print(str(right_num_revs)+\" \"+str(left_num_revs))\n motors.speed(slave_power, master_power)\n adjustMotorPowers()\n readEncoder()\n except KeyboardInterrupt:\n break\n\nmotors.cleanup()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import pathlib
import shutil
import os
import glob
import pandas as pd
import sqlalchemy as sqla
"""
SCRIPT TO FILL THE DATABASE FROM CSV ON MEGA IF LOSE DATA IN PARTICULAR DATE
"""
PATH = "/home/thomas/Documents/TER/AJOUTER_CSV_BDD/"
folder = "test/"
files_used = []
totalFiles = 0
contents = pathlib.Path(PATH+folder).iterdir()
for path in sorted(contents): # utiliser .stem -> nom sans extension fichier / .name -> nom fichier complet
files_used.append(path.name)
totalFiles+=1
print(files_used)
print(totalFiles)
li = []
for filename in files_used:
df = pd.read_csv(PATH+folder+filename,sep=';',skiprows=range(1,6),index_col=0)
li.append(df)
frame = pd.concat(li)
frame.to_csv("merged.csv",sep=';')
print('FINISH MERGING FILES!')
#Move all files used in folder dest
folder_dest = 'dest'
for file in files_used:
shutil.move(PATH+folder+file, PATH+folder_dest)
print('FINISH MOVING MERGED FILES!')
df = pd.read_csv('merged.csv',sep=';')
df['Date'] = df['Date'].str[0:10] +' '+df['Date'].str[11:19]
df = df.rename(columns={'Date': 'horodatage','Nom parking': 'nom','Type de parc': 'type_parking',"Horaires d'accès au public (pour les usagers non abonnés)": 'horaires','Code parking': 'code_parking','Type de compteur': 'type_compteur', 'Places disponibles': 'places_disponibles'})
df['horodatage'] = pd.to_datetime(df['horodatage'])
df = df.loc[: ,['code_parking','type_compteur','horodatage','places_disponibles']]
print('FINISH CLEAN DF!')
print(df)
df.info()
host = ''
port = ''
db = ''
user = ''
psw = ''
name_table = ''
# dialect+driver://username:password@host:port/database
engine = sqla.create_engine('mysql://'+user+':'+psw+'@'+host+':'+port+'/'+db)
print('CONNECTED!')
"""
df.to_sql(name_table,engine,if_exists='append',index=False,chunksize=1024,dtype={'id': sqla.Integer,'code_parking': sqla.String(255),'type_compteur': sqla.String(255),'horodatage': sqla.DateTime,'places_disponibles': sqla.Integer})
print('Finished export to Database!')
"""
|
normal
|
{
"blob_id": "795936dad7a9e51edf0df66207a43ac4d97e9023",
"index": 3781,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor path in sorted(contents):\n files_used.append(path.name)\n totalFiles += 1\nprint(files_used)\nprint(totalFiles)\n<mask token>\nfor filename in files_used:\n df = pd.read_csv(PATH + folder + filename, sep=';', skiprows=range(1, 6\n ), index_col=0)\n li.append(df)\n<mask token>\nframe.to_csv('merged.csv', sep=';')\nprint('FINISH MERGING FILES!')\n<mask token>\nfor file in files_used:\n shutil.move(PATH + folder + file, PATH + folder_dest)\nprint('FINISH MOVING MERGED FILES!')\n<mask token>\nprint('FINISH CLEAN DF!')\nprint(df)\ndf.info()\n<mask token>\nprint('CONNECTED!')\n<mask token>\n",
"step-3": "<mask token>\nPATH = '/home/thomas/Documents/TER/AJOUTER_CSV_BDD/'\nfolder = 'test/'\nfiles_used = []\ntotalFiles = 0\ncontents = pathlib.Path(PATH + folder).iterdir()\nfor path in sorted(contents):\n files_used.append(path.name)\n totalFiles += 1\nprint(files_used)\nprint(totalFiles)\nli = []\nfor filename in files_used:\n df = pd.read_csv(PATH + folder + filename, sep=';', skiprows=range(1, 6\n ), index_col=0)\n li.append(df)\nframe = pd.concat(li)\nframe.to_csv('merged.csv', sep=';')\nprint('FINISH MERGING FILES!')\nfolder_dest = 'dest'\nfor file in files_used:\n shutil.move(PATH + folder + file, PATH + folder_dest)\nprint('FINISH MOVING MERGED FILES!')\ndf = pd.read_csv('merged.csv', sep=';')\ndf['Date'] = df['Date'].str[0:10] + ' ' + df['Date'].str[11:19]\ndf = df.rename(columns={'Date': 'horodatage', 'Nom parking': 'nom',\n 'Type de parc': 'type_parking',\n \"Horaires d'accès au public (pour les usagers non abonnés)\": 'horaires',\n 'Code parking': 'code_parking', 'Type de compteur': 'type_compteur',\n 'Places disponibles': 'places_disponibles'})\ndf['horodatage'] = pd.to_datetime(df['horodatage'])\ndf = df.loc[:, ['code_parking', 'type_compteur', 'horodatage',\n 'places_disponibles']]\nprint('FINISH CLEAN DF!')\nprint(df)\ndf.info()\nhost = ''\nport = ''\ndb = ''\nuser = ''\npsw = ''\nname_table = ''\nengine = sqla.create_engine('mysql://' + user + ':' + psw + '@' + host +\n ':' + port + '/' + db)\nprint('CONNECTED!')\n<mask token>\n",
"step-4": "import pathlib\nimport shutil\nimport os\nimport glob\nimport pandas as pd\nimport sqlalchemy as sqla\n<mask token>\nPATH = '/home/thomas/Documents/TER/AJOUTER_CSV_BDD/'\nfolder = 'test/'\nfiles_used = []\ntotalFiles = 0\ncontents = pathlib.Path(PATH + folder).iterdir()\nfor path in sorted(contents):\n files_used.append(path.name)\n totalFiles += 1\nprint(files_used)\nprint(totalFiles)\nli = []\nfor filename in files_used:\n df = pd.read_csv(PATH + folder + filename, sep=';', skiprows=range(1, 6\n ), index_col=0)\n li.append(df)\nframe = pd.concat(li)\nframe.to_csv('merged.csv', sep=';')\nprint('FINISH MERGING FILES!')\nfolder_dest = 'dest'\nfor file in files_used:\n shutil.move(PATH + folder + file, PATH + folder_dest)\nprint('FINISH MOVING MERGED FILES!')\ndf = pd.read_csv('merged.csv', sep=';')\ndf['Date'] = df['Date'].str[0:10] + ' ' + df['Date'].str[11:19]\ndf = df.rename(columns={'Date': 'horodatage', 'Nom parking': 'nom',\n 'Type de parc': 'type_parking',\n \"Horaires d'accès au public (pour les usagers non abonnés)\": 'horaires',\n 'Code parking': 'code_parking', 'Type de compteur': 'type_compteur',\n 'Places disponibles': 'places_disponibles'})\ndf['horodatage'] = pd.to_datetime(df['horodatage'])\ndf = df.loc[:, ['code_parking', 'type_compteur', 'horodatage',\n 'places_disponibles']]\nprint('FINISH CLEAN DF!')\nprint(df)\ndf.info()\nhost = ''\nport = ''\ndb = ''\nuser = ''\npsw = ''\nname_table = ''\nengine = sqla.create_engine('mysql://' + user + ':' + psw + '@' + host +\n ':' + port + '/' + db)\nprint('CONNECTED!')\n<mask token>\n",
"step-5": "import pathlib\nimport shutil\nimport os\nimport glob\nimport pandas as pd\nimport sqlalchemy as sqla\n\n\"\"\"\nSCRIPT TO FILL THE DATABASE FROM CSV ON MEGA IF LOSE DATA IN PARTICULAR DATE\n\"\"\"\n\nPATH = \"/home/thomas/Documents/TER/AJOUTER_CSV_BDD/\"\nfolder = \"test/\"\nfiles_used = []\ntotalFiles = 0\ncontents = pathlib.Path(PATH+folder).iterdir()\nfor path in sorted(contents): # utiliser .stem -> nom sans extension fichier / .name -> nom fichier complet\n files_used.append(path.name)\n totalFiles+=1\n\nprint(files_used)\nprint(totalFiles)\n\nli = []\n\nfor filename in files_used:\n\tdf = pd.read_csv(PATH+folder+filename,sep=';',skiprows=range(1,6),index_col=0)\n\tli.append(df)\n\nframe = pd.concat(li)\n\n\nframe.to_csv(\"merged.csv\",sep=';')\nprint('FINISH MERGING FILES!')\n\n#Move all files used in folder dest\nfolder_dest = 'dest'\nfor file in files_used:\n shutil.move(PATH+folder+file, PATH+folder_dest)\nprint('FINISH MOVING MERGED FILES!')\n\n\ndf = pd.read_csv('merged.csv',sep=';') \n\n\ndf['Date'] = df['Date'].str[0:10] +' '+df['Date'].str[11:19]\ndf = df.rename(columns={'Date': 'horodatage','Nom parking': 'nom','Type de parc': 'type_parking',\"Horaires d'accès au public (pour les usagers non abonnés)\": 'horaires','Code parking': 'code_parking','Type de compteur': 'type_compteur', 'Places disponibles': 'places_disponibles'})\ndf['horodatage'] = pd.to_datetime(df['horodatage'])\ndf = df.loc[: ,['code_parking','type_compteur','horodatage','places_disponibles']]\nprint('FINISH CLEAN DF!')\nprint(df)\ndf.info()\n\nhost = ''\nport = ''\ndb = ''\nuser = ''\npsw = ''\nname_table = ''\n\n\n# dialect+driver://username:password@host:port/database\nengine = sqla.create_engine('mysql://'+user+':'+psw+'@'+host+':'+port+'/'+db)\nprint('CONNECTED!')\n\n\"\"\"\n\ndf.to_sql(name_table,engine,if_exists='append',index=False,chunksize=1024,dtype={'id': sqla.Integer,'code_parking': sqla.String(255),'type_compteur': sqla.String(255),'horodatage': sqla.DateTime,'places_disponibles': sqla.Integer})\nprint('Finished export to Database!')\n\"\"\"\n\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print("""
Employee Details:
Employee Id:""", id, '\nName:', employee[id][
'empname'], '\nDepartment:', employee[id]['Department'],
'\nDesignation:', DA[employee[id]['Designation Code']]['designation'],
'\nSalary:', employee[id]['Basic'] + employee[id]['HRA'] + employee[id]
['IT'])
<|reserved_special_token_1|>
employee = {(1001): {'empname': 'Ashish', 'Designation Code': 'E',
'Department': 'R&D', 'Basic': 20000, 'HRA': 8000, 'IT': 3000}, (1002):
{'empname': 'Sushma', 'Designation Code': 'C', 'Department': 'PM',
'Basic': 30000, 'HRA': 12000, 'IT': 9000}, (1003): {'empname': 'Rahul',
'Designation Code': 'K', 'Department': 'Account', 'Basic': 10000, 'HRA':
8000, 'IT': 1000}, (1004): {'empname': 'Chahat', 'Designation Code':
'R', 'Department': 'Front Desk', 'Basic': 12000, 'HRA': 6000, 'IT':
2000}, (1005): {'empname': 'Ranjan', 'Designation Code': 'M',
'Department': 'Engg', 'Basic': 50000, 'HRA': 20000, 'IT': 20000}, (1006
): {'empname': 'Suman', 'Designation Code': 'E', 'Department':
'Manufacturing', 'Basic': 23000, 'HRA': 9000, 'IT': 4400}, (1007): {
'empname': 'Tanmay', 'Designation Code': 'C', 'Department': 'PM',
'Basic': 29000, 'HRA': 12000, 'IT': 10000}}
DA = {'E': {'designation': 'Engineer', 'DA': 20000}, 'C': {'designation':
'Consultant', 'DA': 32000}, 'K': {'designation': 'Clerk', 'DA': 12000},
'R': {'designation': 'Receptionist', 'DA': 15000}, 'M': {'designation':
'Manager', 'DA': 40000}}
id = int(input('Enter Employee id: '))
print("""
Employee Details:
Employee Id:""", id, '\nName:', employee[id][
'empname'], '\nDepartment:', employee[id]['Department'],
'\nDesignation:', DA[employee[id]['Designation Code']]['designation'],
'\nSalary:', employee[id]['Basic'] + employee[id]['HRA'] + employee[id]
['IT'])
<|reserved_special_token_1|>
# Employee Table's Dictionary
employee={
1001:{
"empname":"Ashish",
"Designation Code":'E',
"Department":"R&D",
"Basic": 20000,
"HRA": 8000,
"IT": 3000
},
1002:{
"empname":"Sushma",
"Designation Code":'C',
"Department":"PM",
"Basic": 30000,
"HRA": 12000,
"IT": 9000
},
1003:{
"empname":"Rahul",
"Designation Code":'K',
"Department":"Account",
"Basic": 10000,
"HRA": 8000,
"IT": 1000
},
1004:{
"empname":"Chahat",
"Designation Code":'R',
"Department":"Front Desk",
"Basic": 12000,
"HRA": 6000,
"IT": 2000
},
1005:{
"empname":"Ranjan",
"Designation Code":'M',
"Department":"Engg",
"Basic": 50000,
"HRA": 20000,
"IT": 20000
},
1006:{
"empname":"Suman",
"Designation Code":'E',
"Department":"Manufacturing",
"Basic": 23000,
"HRA": 9000,
"IT": 4400
},
1007:{
"empname":"Tanmay",
"Designation Code":'C',
"Department":"PM",
"Basic": 29000,
"HRA": 12000,
"IT": 10000
}
}
#DA Table's Dictionary
DA={
'E':{'designation':'Engineer','DA':20000},
'C':{'designation':'Consultant','DA':32000},
'K':{'designation':'Clerk','DA':12000},
'R':{'designation':'Receptionist','DA':15000},
'M':{'designation':'Manager','DA':40000}
}
id=int(input("Enter Employee id: "))
print("\n\nEmployee Details:\nEmployee Id:",id,
"\nName:",employee[id]["empname"],
"\nDepartment:",employee[id]["Department"],
"\nDesignation:",DA[employee[id]["Designation Code"]]['designation'],
"\nSalary:",employee[id]["Basic"]+employee[id]["HRA"]+employee[id]["IT"])
|
flexible
|
{
"blob_id": "fcb0fb439db77c4d57c449ec8f720dbd3fef5abc",
"index": 2871,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(\"\"\"\n\nEmployee Details:\nEmployee Id:\"\"\", id, '\\nName:', employee[id][\n 'empname'], '\\nDepartment:', employee[id]['Department'],\n '\\nDesignation:', DA[employee[id]['Designation Code']]['designation'],\n '\\nSalary:', employee[id]['Basic'] + employee[id]['HRA'] + employee[id]\n ['IT'])\n",
"step-3": "employee = {(1001): {'empname': 'Ashish', 'Designation Code': 'E',\n 'Department': 'R&D', 'Basic': 20000, 'HRA': 8000, 'IT': 3000}, (1002):\n {'empname': 'Sushma', 'Designation Code': 'C', 'Department': 'PM',\n 'Basic': 30000, 'HRA': 12000, 'IT': 9000}, (1003): {'empname': 'Rahul',\n 'Designation Code': 'K', 'Department': 'Account', 'Basic': 10000, 'HRA':\n 8000, 'IT': 1000}, (1004): {'empname': 'Chahat', 'Designation Code':\n 'R', 'Department': 'Front Desk', 'Basic': 12000, 'HRA': 6000, 'IT': \n 2000}, (1005): {'empname': 'Ranjan', 'Designation Code': 'M',\n 'Department': 'Engg', 'Basic': 50000, 'HRA': 20000, 'IT': 20000}, (1006\n ): {'empname': 'Suman', 'Designation Code': 'E', 'Department':\n 'Manufacturing', 'Basic': 23000, 'HRA': 9000, 'IT': 4400}, (1007): {\n 'empname': 'Tanmay', 'Designation Code': 'C', 'Department': 'PM',\n 'Basic': 29000, 'HRA': 12000, 'IT': 10000}}\nDA = {'E': {'designation': 'Engineer', 'DA': 20000}, 'C': {'designation':\n 'Consultant', 'DA': 32000}, 'K': {'designation': 'Clerk', 'DA': 12000},\n 'R': {'designation': 'Receptionist', 'DA': 15000}, 'M': {'designation':\n 'Manager', 'DA': 40000}}\nid = int(input('Enter Employee id: '))\nprint(\"\"\"\n\nEmployee Details:\nEmployee Id:\"\"\", id, '\\nName:', employee[id][\n 'empname'], '\\nDepartment:', employee[id]['Department'],\n '\\nDesignation:', DA[employee[id]['Designation Code']]['designation'],\n '\\nSalary:', employee[id]['Basic'] + employee[id]['HRA'] + employee[id]\n ['IT'])\n",
"step-4": "# Employee Table's Dictionary\nemployee={\n 1001:{\n \"empname\":\"Ashish\",\n \"Designation Code\":'E',\n \"Department\":\"R&D\",\n \"Basic\": 20000,\n \"HRA\": 8000,\n \"IT\": 3000\n },\n 1002:{\n \"empname\":\"Sushma\",\n \"Designation Code\":'C',\n \"Department\":\"PM\",\n \"Basic\": 30000,\n \"HRA\": 12000,\n \"IT\": 9000\n },\n 1003:{\n \"empname\":\"Rahul\",\n \"Designation Code\":'K',\n \"Department\":\"Account\",\n \"Basic\": 10000,\n \"HRA\": 8000,\n \"IT\": 1000\n },\n 1004:{\n \"empname\":\"Chahat\",\n \"Designation Code\":'R',\n \"Department\":\"Front Desk\",\n \"Basic\": 12000,\n \"HRA\": 6000,\n \"IT\": 2000\n },\n 1005:{\n \"empname\":\"Ranjan\",\n \"Designation Code\":'M',\n \"Department\":\"Engg\",\n \"Basic\": 50000,\n \"HRA\": 20000,\n \"IT\": 20000\n },\n 1006:{\n \"empname\":\"Suman\",\n \"Designation Code\":'E',\n \"Department\":\"Manufacturing\",\n \"Basic\": 23000,\n \"HRA\": 9000,\n \"IT\": 4400\n },\n 1007:{\n \"empname\":\"Tanmay\",\n \"Designation Code\":'C',\n \"Department\":\"PM\",\n \"Basic\": 29000,\n \"HRA\": 12000,\n \"IT\": 10000\n }\n}\n#DA Table's Dictionary\nDA={\n 'E':{'designation':'Engineer','DA':20000},\n 'C':{'designation':'Consultant','DA':32000},\n 'K':{'designation':'Clerk','DA':12000},\n 'R':{'designation':'Receptionist','DA':15000},\n 'M':{'designation':'Manager','DA':40000}\n}\nid=int(input(\"Enter Employee id: \"))\nprint(\"\\n\\nEmployee Details:\\nEmployee Id:\",id,\n\"\\nName:\",employee[id][\"empname\"],\n\"\\nDepartment:\",employee[id][\"Department\"],\n\"\\nDesignation:\",DA[employee[id][\"Designation Code\"]]['designation'],\n\"\\nSalary:\",employee[id][\"Basic\"]+employee[id][\"HRA\"]+employee[id][\"IT\"]) ",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from random import randrange
from django.core.exceptions import ValidationError
from django.contrib.auth import get_user_model
from rest_framework import serializers
from rest_framework_simplejwt.serializers import TokenObtainPairSerializer
from .models import EmailValidation
from ..emails.models import Email
from ..users.serializers import FullUserSerializer
User = get_user_model()
def user_with_email_not_existing(email):
try:
User.objects.get(email=email)
raise ValidationError(message='This email is taken')
except User.DoesNotExist:
return email
def email_does_exist(email):
try:
User.objects.get(email=email)
return email
except User.DoesNotExist:
raise ValidationError(message='User does not exist!')
class CreatePasswordEmailValidationSerializer(serializers.Serializer):
email = serializers.EmailField(validators=[email_does_exist])
def save(self):
validation_code = randrange(10000000, 100000000)
email = Email.objects.create(
validation_code=validation_code,
to=self.validated_data.get('email'),
type=self.validated_data.get('type')
)
new_validation = EmailValidation.objects.create(
validation_code=validation_code,
email=email,
type=self.validated_data.get('type'))
return new_validation
class CreateEmailValidationSerializer(serializers.Serializer):
email = serializers.EmailField(validators=[user_with_email_not_existing])
def save(self):
validation_code = randrange(10000000, 100000000)
email = Email.objects.create(
validation_code=validation_code,
to=self.validated_data.get('email'),
type=self.validated_data.get('type')
)
new_validation = EmailValidation.objects.create(
validation_code=validation_code,
email=email,
type=self.validated_data.get('type'))
return new_validation
class EmailSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
class Meta:
model = EmailValidation
fields = ['email']
class EmailValidationSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
class Meta:
model = EmailValidation
fields = ['email', 'validation_code']
class EmailValidationPasswordSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
password = serializers.CharField(max_length=200)
class Meta:
model = EmailValidation
fields = ['email', 'validation_code', 'password']
class NewUserSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
class Meta:
model = EmailValidation
fields = ['email']
class TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):
def validate(self, attrs):
data = super().validate(attrs)
refresh = self.get_token(self.user)
data['refresh'] = str(refresh)
data['access'] = str(refresh.access_token)
data['user'] = FullUserSerializer(self.user).data
return data
|
normal
|
{
"blob_id": "9f34bf3a0bb24db428b7af1a354aec1d3a72df98",
"index": 359,
"step-1": "<mask token>\n\n\nclass CreatePasswordEmailValidationSerializer(serializers.Serializer):\n <mask token>\n\n def save(self):\n validation_code = randrange(10000000, 100000000)\n email = Email.objects.create(validation_code=validation_code, to=\n self.validated_data.get('email'), type=self.validated_data.get(\n 'type'))\n new_validation = EmailValidation.objects.create(validation_code=\n validation_code, email=email, type=self.validated_data.get('type'))\n return new_validation\n\n\nclass CreateEmailValidationSerializer(serializers.Serializer):\n email = serializers.EmailField(validators=[user_with_email_not_existing])\n\n def save(self):\n validation_code = randrange(10000000, 100000000)\n email = Email.objects.create(validation_code=validation_code, to=\n self.validated_data.get('email'), type=self.validated_data.get(\n 'type'))\n new_validation = EmailValidation.objects.create(validation_code=\n validation_code, email=email, type=self.validated_data.get('type'))\n return new_validation\n\n\nclass EmailSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass EmailValidationSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code']\n\n\nclass EmailValidationPasswordSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n password = serializers.CharField(max_length=200)\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code', 'password']\n\n\nclass NewUserSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):\n\n def validate(self, attrs):\n data = super().validate(attrs)\n refresh = self.get_token(self.user)\n data['refresh'] = str(refresh)\n data['access'] = str(refresh.access_token)\n data['user'] = FullUserSerializer(self.user).data\n return data\n",
"step-2": "<mask token>\n\n\nclass CreatePasswordEmailValidationSerializer(serializers.Serializer):\n email = serializers.EmailField(validators=[email_does_exist])\n\n def save(self):\n validation_code = randrange(10000000, 100000000)\n email = Email.objects.create(validation_code=validation_code, to=\n self.validated_data.get('email'), type=self.validated_data.get(\n 'type'))\n new_validation = EmailValidation.objects.create(validation_code=\n validation_code, email=email, type=self.validated_data.get('type'))\n return new_validation\n\n\nclass CreateEmailValidationSerializer(serializers.Serializer):\n email = serializers.EmailField(validators=[user_with_email_not_existing])\n\n def save(self):\n validation_code = randrange(10000000, 100000000)\n email = Email.objects.create(validation_code=validation_code, to=\n self.validated_data.get('email'), type=self.validated_data.get(\n 'type'))\n new_validation = EmailValidation.objects.create(validation_code=\n validation_code, email=email, type=self.validated_data.get('type'))\n return new_validation\n\n\nclass EmailSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass EmailValidationSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code']\n\n\nclass EmailValidationPasswordSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n password = serializers.CharField(max_length=200)\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code', 'password']\n\n\nclass NewUserSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):\n\n def validate(self, attrs):\n data = super().validate(attrs)\n refresh = self.get_token(self.user)\n data['refresh'] = str(refresh)\n data['access'] = str(refresh.access_token)\n data['user'] = FullUserSerializer(self.user).data\n return data\n",
"step-3": "<mask token>\n\n\ndef user_with_email_not_existing(email):\n try:\n User.objects.get(email=email)\n raise ValidationError(message='This email is taken')\n except User.DoesNotExist:\n return email\n\n\ndef email_does_exist(email):\n try:\n User.objects.get(email=email)\n return email\n except User.DoesNotExist:\n raise ValidationError(message='User does not exist!')\n\n\nclass CreatePasswordEmailValidationSerializer(serializers.Serializer):\n email = serializers.EmailField(validators=[email_does_exist])\n\n def save(self):\n validation_code = randrange(10000000, 100000000)\n email = Email.objects.create(validation_code=validation_code, to=\n self.validated_data.get('email'), type=self.validated_data.get(\n 'type'))\n new_validation = EmailValidation.objects.create(validation_code=\n validation_code, email=email, type=self.validated_data.get('type'))\n return new_validation\n\n\nclass CreateEmailValidationSerializer(serializers.Serializer):\n email = serializers.EmailField(validators=[user_with_email_not_existing])\n\n def save(self):\n validation_code = randrange(10000000, 100000000)\n email = Email.objects.create(validation_code=validation_code, to=\n self.validated_data.get('email'), type=self.validated_data.get(\n 'type'))\n new_validation = EmailValidation.objects.create(validation_code=\n validation_code, email=email, type=self.validated_data.get('type'))\n return new_validation\n\n\nclass EmailSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass EmailValidationSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code']\n\n\nclass EmailValidationPasswordSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n password = serializers.CharField(max_length=200)\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code', 'password']\n\n\nclass NewUserSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):\n\n def validate(self, attrs):\n data = super().validate(attrs)\n refresh = self.get_token(self.user)\n data['refresh'] = str(refresh)\n data['access'] = str(refresh.access_token)\n data['user'] = FullUserSerializer(self.user).data\n return data\n",
"step-4": "<mask token>\nUser = get_user_model()\n\n\ndef user_with_email_not_existing(email):\n try:\n User.objects.get(email=email)\n raise ValidationError(message='This email is taken')\n except User.DoesNotExist:\n return email\n\n\ndef email_does_exist(email):\n try:\n User.objects.get(email=email)\n return email\n except User.DoesNotExist:\n raise ValidationError(message='User does not exist!')\n\n\nclass CreatePasswordEmailValidationSerializer(serializers.Serializer):\n email = serializers.EmailField(validators=[email_does_exist])\n\n def save(self):\n validation_code = randrange(10000000, 100000000)\n email = Email.objects.create(validation_code=validation_code, to=\n self.validated_data.get('email'), type=self.validated_data.get(\n 'type'))\n new_validation = EmailValidation.objects.create(validation_code=\n validation_code, email=email, type=self.validated_data.get('type'))\n return new_validation\n\n\nclass CreateEmailValidationSerializer(serializers.Serializer):\n email = serializers.EmailField(validators=[user_with_email_not_existing])\n\n def save(self):\n validation_code = randrange(10000000, 100000000)\n email = Email.objects.create(validation_code=validation_code, to=\n self.validated_data.get('email'), type=self.validated_data.get(\n 'type'))\n new_validation = EmailValidation.objects.create(validation_code=\n validation_code, email=email, type=self.validated_data.get('type'))\n return new_validation\n\n\nclass EmailSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass EmailValidationSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code']\n\n\nclass EmailValidationPasswordSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n password = serializers.CharField(max_length=200)\n\n\n class Meta:\n model = EmailValidation\n fields = ['email', 'validation_code', 'password']\n\n\nclass NewUserSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n\n class Meta:\n model = EmailValidation\n fields = ['email']\n\n\nclass TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):\n\n def validate(self, attrs):\n data = super().validate(attrs)\n refresh = self.get_token(self.user)\n data['refresh'] = str(refresh)\n data['access'] = str(refresh.access_token)\n data['user'] = FullUserSerializer(self.user).data\n return data\n",
"step-5": "from random import randrange\r\n\r\nfrom django.core.exceptions import ValidationError\r\nfrom django.contrib.auth import get_user_model\r\n\r\nfrom rest_framework import serializers\r\nfrom rest_framework_simplejwt.serializers import TokenObtainPairSerializer\r\n\r\nfrom .models import EmailValidation\r\nfrom ..emails.models import Email\r\nfrom ..users.serializers import FullUserSerializer\r\n\r\nUser = get_user_model()\r\n\r\n\r\ndef user_with_email_not_existing(email):\r\n try:\r\n User.objects.get(email=email)\r\n raise ValidationError(message='This email is taken')\r\n except User.DoesNotExist:\r\n return email\r\n\r\n\r\ndef email_does_exist(email):\r\n try:\r\n User.objects.get(email=email)\r\n return email\r\n except User.DoesNotExist:\r\n raise ValidationError(message='User does not exist!')\r\n\r\n\r\nclass CreatePasswordEmailValidationSerializer(serializers.Serializer):\r\n email = serializers.EmailField(validators=[email_does_exist])\r\n\r\n def save(self):\r\n validation_code = randrange(10000000, 100000000)\r\n email = Email.objects.create(\r\n validation_code=validation_code,\r\n to=self.validated_data.get('email'),\r\n type=self.validated_data.get('type')\r\n )\r\n new_validation = EmailValidation.objects.create(\r\n validation_code=validation_code,\r\n email=email,\r\n type=self.validated_data.get('type'))\r\n return new_validation\r\n\r\n\r\nclass CreateEmailValidationSerializer(serializers.Serializer):\r\n email = serializers.EmailField(validators=[user_with_email_not_existing])\r\n\r\n def save(self):\r\n validation_code = randrange(10000000, 100000000)\r\n email = Email.objects.create(\r\n validation_code=validation_code,\r\n to=self.validated_data.get('email'),\r\n type=self.validated_data.get('type')\r\n )\r\n new_validation = EmailValidation.objects.create(\r\n validation_code=validation_code,\r\n email=email,\r\n type=self.validated_data.get('type'))\r\n return new_validation\r\n\r\n\r\nclass EmailSerializer(serializers.ModelSerializer):\r\n email = serializers.EmailField()\r\n\r\n class Meta:\r\n model = EmailValidation\r\n fields = ['email']\r\n\r\n\r\nclass EmailValidationSerializer(serializers.ModelSerializer):\r\n email = serializers.EmailField()\r\n\r\n class Meta:\r\n model = EmailValidation\r\n fields = ['email', 'validation_code']\r\n\r\n\r\nclass EmailValidationPasswordSerializer(serializers.ModelSerializer):\r\n email = serializers.EmailField()\r\n password = serializers.CharField(max_length=200)\r\n\r\n class Meta:\r\n model = EmailValidation\r\n fields = ['email', 'validation_code', 'password']\r\n\r\n\r\nclass NewUserSerializer(serializers.ModelSerializer):\r\n email = serializers.EmailField()\r\n\r\n class Meta:\r\n model = EmailValidation\r\n fields = ['email']\r\n\r\n\r\nclass TokenObtainPairViewWithUserProfileSerializer(TokenObtainPairSerializer):\r\n def validate(self, attrs):\r\n data = super().validate(attrs)\r\n\r\n refresh = self.get_token(self.user)\r\n\r\n data['refresh'] = str(refresh)\r\n data['access'] = str(refresh.access_token)\r\n\r\n data['user'] = FullUserSerializer(self.user).data\r\n return data\r\n",
"step-ids": [
15,
16,
18,
19,
21
]
}
|
[
15,
16,
18,
19,
21
] |
# Generated by Django 2.2.10 on 2020-05-06 14:43
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('planner', '0023_auto_20191226_1330'),
]
operations = [
migrations.AddField(
model_name='employee',
name='coefficient',
field=models.PositiveSmallIntegerField(default=100, validators=[django.core.validators.MaxValueValidator(200)], verbose_name='Коєфіцієнт плану'),
),
]
|
normal
|
{
"blob_id": "c7558486fc50623f6e64b58668153b75bb6149b9",
"index": 6613,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('planner', '0023_auto_20191226_1330')]\n operations = [migrations.AddField(model_name='employee', name=\n 'coefficient', field=models.PositiveSmallIntegerField(default=100,\n validators=[django.core.validators.MaxValueValidator(200)],\n verbose_name='Коєфіцієнт плану'))]\n",
"step-4": "import django.core.validators\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('planner', '0023_auto_20191226_1330')]\n operations = [migrations.AddField(model_name='employee', name=\n 'coefficient', field=models.PositiveSmallIntegerField(default=100,\n validators=[django.core.validators.MaxValueValidator(200)],\n verbose_name='Коєфіцієнт плану'))]\n",
"step-5": "# Generated by Django 2.2.10 on 2020-05-06 14:43\n\nimport django.core.validators\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('planner', '0023_auto_20191226_1330'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='employee',\n name='coefficient',\n field=models.PositiveSmallIntegerField(default=100, validators=[django.core.validators.MaxValueValidator(200)], verbose_name='Коєфіцієнт плану'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from datetime import date
import config
import datetime
import numpy
import pandas
import data_sources
from data_sources import POPULATION, convert_to_ccaa_iso
import material_line_chart
import ministry_datasources
HEADER = '''<html>
<head>
<title>{}</title>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<script type="text/javascript">
'''
HEADER2 = '''
google.charts.load('current', {'packages':['line', 'corechart', 'controls']});
'''
DESCRIPTIONS_CCAA = {
'incidencia_acumulada': 'Número de casos informados en los 15 días anteriores por cien mil habitantes. Datos obtenidos de los informes del Carlos III.',
'hospitalized': 'Número medio de hospitalizaciones por cien mil habitantes (media de 7 días). Datos obtenidos a partir de las cifras acumuladas que aparecen en los informes diarios del ministerio.',
'deceased': 'Número medio de fallecidos por cien mil habitantes (media de 7 días). Datos obtenidos a partir del excel con datos de fallecidos diarios del ministerio.',
}
DESCRIPTIONS_SPA = {
'incidencia_acumulada': 'Número de casos informados en los 15 días anteriores por cien mil habitantes. Datos obtenidos de los informes del Carlos III.',
'hospitalized': 'Número medio de hospitalizaciones (media de 7 días). Datos obtenidos a partir de las cifras acumuladas que aparecen en los informes diarios del ministerio.',
'deceased': 'Número medio de fallecidos (media de 7 días). Datos obtenidos a partir del excel con datos de fallecidos diarios del ministerio.',
}
DESCRIPTIONS = {True: DESCRIPTIONS_SPA, False: DESCRIPTIONS_CCAA}
def calc_accumulated_indicende_per_ccaa(report, num_days=15):
ccaas = data_sources.get_ccaas_in_dset(report)
dframe = report['dframe']
num_cases = dframe['num_casos']
ccaa_column = data_sources.get_ccaa_column_in_index(num_cases.index)
index = num_cases.index.to_frame(index=False)
time_delta = numpy.timedelta64(num_days, 'D')
accumulated_cases_by_ccaa = {}
for ccaa in ccaas:
mask = index[ccaa_column] == ccaa
mask = mask.values
num_cases_for_this_ccaa = num_cases[mask]
this_ccaa_index = num_cases_for_this_ccaa.index.to_frame(index=False)
this_ccaa_dates = this_ccaa_index['fecha']
num_accumulated_cases = []
valid_dates = []
for date in this_ccaa_dates:
date0 = date - time_delta
mask = numpy.logical_and(this_ccaa_dates > date0,
this_ccaa_dates <= date)
mask = mask.values
if numpy.sum(mask) < num_days:
continue
num_accumulated_cases.append(numpy.sum(num_cases_for_this_ccaa[mask]))
valid_dates.append(date)
num_accumulated_cases = pandas.Series(num_accumulated_cases, index=valid_dates)
num_accumulated_cases = num_accumulated_cases / data_sources.POPULATION[ccaa] * 1e5
accumulated_cases_by_ccaa[ccaa] = num_accumulated_cases
return accumulated_cases_by_ccaa
def _create_js_chart(dframe, date_range, js_function_name, div_id, title, width, height):
table = []
ccaas = sorted(dframe.index)
dates = list(dframe.columns)
if date_range is not None:
dates = [date for date in dates if date > date_range[0] and date <= date_range[1]]
columns = [('date', 'fecha')]
columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for ccaa in ccaas])
for date in dates:
row = [date.date()]
for ccaa in ccaas:
value = dframe.loc[ccaa, date]
row.append(value)
table.append(row)
js_function_name = js_function_name
html = material_line_chart.create_chart_js(js_function_name, div_id, title,
columns, table,
width=width, height=height)
return html
def _write_table_from_series(series):
html = '<table>'
for index, value in zip(series.index, series.values):
html += f'<tr><td>{index}</td><td>{value}</td></tr>\n'
html += '</table>'
return html
def is_desired_ccaa(ccaa, desired_ccaas):
return desired_ccaas is None or data_sources.convert_to_ccaa_iso(ccaa) in desired_ccaas
def _create_table_for_chart_from_dict(dict_data, desired_ccaas):
one_data = list(dict_data.values())[0]
ccaas = sorted(dict_data.keys())
ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]
dates = list(one_data.index)
table = []
for date in dates:
row = [date.date()]
for ccaa in ccaas:
row.append(dict_data[ccaa][date])
table.append(row)
return table, ccaas, dates
def _create_accumulate_indicence_table_for_spa_chart_from_report(report, num_days):
dframe = report['dframe']
time_delta = numpy.timedelta64(num_days, 'D')
num_cases = dframe.groupby(level=1).sum().loc[:, 'num_casos']
tot_pop = sum(data_sources.POPULATION.values())
dates = numpy.array(num_cases.index)
num_accumulated_cases = []
valid_dates = []
for date in dates:
date0 = date - time_delta
mask = numpy.logical_and(dates > date0,
dates <= date)
if numpy.sum(mask) < num_days:
continue
num_accumulated_cases.append(numpy.sum(num_cases[mask]) / tot_pop * 1e5)
date = datetime.datetime.fromtimestamp(date.astype('O') / 1e9)
valid_dates.append(date)
table = [(date.date(), cases) for date, cases in zip(valid_dates, num_accumulated_cases)]
dates = valid_dates
return table, dates
def _create_table_for_chart_from_dframe(dframe, desired_ccaas):
ccaas = sorted(dframe.index)
ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]
dates = list(dframe.columns)
table = []
for date in dates:
row = [date.date()]
for ccaa in ccaas:
row.append(dframe.loc[ccaa, date])
table.append(row)
return table, ccaas, dates
def _create_table_for_chart_from_series(series):
table = [(date.date(), value) for date, value in zip(series.index, series.values)]
return table
def write_html_report(out_path, date_range=None, desired_ccaas=None, spa_report=False):
if spa_report and desired_ccaas:
raise ValueError('choose one, either spa or ccaa report')
if desired_ccaas and len(desired_ccaas) == 1:
only_one_ccaa = True
ccaa_iso = convert_to_ccaa_iso(desired_ccaas[0])
else:
only_one_ccaa = False
ccaa_info = data_sources.get_sorted_downloaded_ccaa_info()
report = ccaa_info[-1]
accumulaed_incidence = calc_accumulated_indicende_per_ccaa(report)
deaths = sorted(ministry_datasources.read_deceased_excel_ministry_files(),
key=lambda x: x['max_date'])[-1]
if spa_report:
accumulated_incidence_table, dates = _create_accumulate_indicence_table_for_spa_chart_from_report(report, 15)
else:
accumulated_incidence_table, ccaas, dates = _create_table_for_chart_from_dict(accumulaed_incidence, desired_ccaas)
title = 'Resumen situación Covid-19'
if spa_report:
title += ' España'
elif only_one_ccaa:
title += ': ' + data_sources.convert_to_ccaa_name(ccaa_iso)
else:
title += ' por comunidad autónoma'
html = HEADER.format(title)
html += HEADER2
js_function_name = 'drawAccumulatedCasesIncidence'
columns = [('date', 'fecha')]
if spa_report:
columns.extend([('number', 'España')])
else:
columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)])
title = 'Incidencia acumulada por 100.000 hab. (15 días)'
width =900
height = 800
rangeslider_height = 50
js_sizes = {'dashboard': {'height': height + rangeslider_height, 'width': width},
'chart': {'height': height, 'width': width},
'rangeslider': {'height': rangeslider_height, 'width': 600},
}
div_sizes = {}
for html_element in js_sizes:
div_sizes[html_element] = {}
div_sizes[html_element]['height'] = f"{js_sizes[html_element]['height']}px"
div_sizes[html_element]['width'] = f"{js_sizes[html_element]['width']}px"
slider_config = {'column_controlled': 'fecha',
'min_value': dates[0],
'max_value': dates[-1],
'min_init_value': date_range[0],
'max_init_value': date_range[-1]}
div_ids_accumulated_cases = {'dashboard': 'accumulated_cases_dashboard',
'chart': 'accumulated_cases_chart',
'rangeslider': 'accumulated_cases_rangeslider'}
html += material_line_chart.create_chart_js_with_slider(js_function_name,
slider_config,
div_ids_accumulated_cases,
title,
columns,
accumulated_incidence_table,
sizes=js_sizes)
js_function_names = {'hospitalized': 'drawHospitalized',
'icu': 'drawICU',
'deceased': 'drawDeceased'}
div_ids = {'hospitalized': 'hospitalized_chart',
'icu': 'icu_chart',
'deceased': 'deceased_chart'
}
titles = {'hospitalized': 'Num. hospitalizaciones por 100.000 hab. (media 7 días)',
'icu': 'Num. ingresos UCI por 100.000 hab. (media 7 días)',
'deceased': 'Num. fallecidos por 100.000 hab. (media 7 días)'
}
if False:
if spa_report:
rolling_means = ministry_datasources.get_ministry_rolling_mean_spa()
titles = {'hospitalized': 'Num. hospitalizaciones. (media 7 días)',
'icu': 'Num. ingresos UCI. (media 7 días)',
'deceased': 'Num. fallecidos. (media 7 días)'
}
else:
rolling_means = ministry_datasources.get_ministry_rolling_mean()
titles = {'hospitalized': 'Num. hospitalizaciones por 100.000 hab. (media 7 días)',
'icu': 'Num. ingresos UCI por 100.000 hab. (media 7 días)',
'deceased': 'Num. fallecidos por 100.000 hab. (media 7 días)'
}
div_ids_hospitalized = {'dashboard': 'hospitalized_dashboard',
'chart': 'hospitalized_chart',
'rangeslider': 'hospitalized_rangeslider'}
div_ids_deceased = {'dashboard': 'deceased_dashboard',
'chart': 'deceased_chart',
'rangeslider': 'deceased_rangeslider'}
div_ids = {'hospitalized': div_ids_hospitalized,
'deceased': div_ids_deceased,
}
if False:
dframe = rolling_means['hospitalized']
if spa_report:
columns = [('date', 'fecha'), ('number', 'España')]
table = _create_table_for_chart_from_series(dframe)
else:
populations = [data_sources.get_population(ccaa) for ccaa in dframe.index]
dframe = dframe.divide(populations, axis=0) * 1e5
table, ccaas, _ = _create_table_for_chart_from_dframe(dframe, desired_ccaas)
columns = [('date', 'fecha')]
columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for ccaa in ccaas])
key = 'hospitalized'
hospitalized_slider_config = {'column_controlled': 'fecha',
'min_value': dates[0],
'max_value': dates[-1],
'min_init_value': date_range[0],
'max_init_value': datetime.datetime.now()}
html += material_line_chart.create_chart_js_with_slider(js_function_names[key],
hospitalized_slider_config,
div_ids[key],
title=titles[key],
columns=columns,
data_table=table,
sizes=js_sizes)
num_days = 7
key = 'deceased'
deaths_dframe = deaths['dframe']
if spa_report:
spa_deaths = deaths_dframe.sum(axis=0)
deaths_rolling_mean = spa_deaths.rolling(num_days, center=True, min_periods=num_days).mean().dropna()
table = _create_table_for_chart_from_series(deaths_rolling_mean)
columns = [('date', 'fecha'), ('number', 'España')]
else:
deaths_rolling_mean = deaths_dframe.rolling(num_days, center=True, min_periods=num_days, axis=1).mean()
deaths_rolling_mean = deaths_rolling_mean.dropna(axis=1, how='all')
populations = [data_sources.get_population(ccaa) for ccaa in deaths_rolling_mean.index]
deaths_rolling_mean = deaths_rolling_mean.divide(populations, axis=0) * 1e5
table, ccaas, _ = _create_table_for_chart_from_dframe(deaths_rolling_mean, desired_ccaas)
columns = [('date', 'fecha')]
columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for ccaa in ccaas])
html += material_line_chart.create_chart_js_with_slider(js_function_names[key],
slider_config,
div_ids[key],
title=titles[key],
columns=columns,
data_table=table,
sizes=js_sizes)
html += ' </script>\n </head>\n <body>\n'
today = datetime.datetime.now()
html += '<p><a href="../">Menu</a></p>'
html += f'<p>Informe generado el día: {today.day}-{today.month}-{today.year}</p>'
html += f'<p>Este informe está generado para uso personal por <a href="https://twitter.com/jblanca42">@jblanca42</a>, pero lo sube a la web por si le pudiese ser de utilidad a alguien más.</p>'
html += f'<p>El código utilizado para generarlo se encuentra en <a href="https://github.com/JoseBlanca/seguimiento_covid">github</a>, si encuentras algún fallo o quieres mejorar algo envía un mensaje o haz un pull request.</p>'
if desired_ccaas:
index = [ccaa for ccaa in deaths['dframe'].index if is_desired_ccaa(ccaa, desired_ccaas)]
tot_deaths = deaths['dframe'].loc[index, :].values.sum()
else:
tot_deaths = deaths['dframe'].values.sum() + deaths['unassinged_deaths']
html += f'<p>Número total de fallecidos: {tot_deaths}</p>'
if spa_report:
death_rate = round(sum(data_sources.POPULATION.values()) / tot_deaths)
html += f'<p>Una de cada {death_rate} personas han fallecido.</p>'
elif desired_ccaas and len(desired_ccaas) == 1:
death_rate = round(data_sources.get_population(desired_ccaas[0]) / tot_deaths)
html += f'<p>Una de cada {death_rate} personas han fallecido en esta comunidad autónoma.</p>'
else:
deaths_per_ccaa = deaths['dframe'].sum(axis=1)
populations = [data_sources.get_population(ccaa) for ccaa in deaths_per_ccaa.index]
populations = pandas.Series(populations, index=deaths_per_ccaa.index)
death_rate = (populations / deaths_per_ccaa).round().sort_values().astype(int)
html += '<p>¿Una de cada cuántas personas han fallecido por comunidad autónoma?</p>'
html += _write_table_from_series(death_rate)
if False:
for key in ['hospitalized']:
html += f"<p>{DESCRIPTIONS[spa_report][key]}</p>\n"
html += material_line_chart.create_chart_with_slider_divs(div_ids[key],
sizes=div_sizes)
html += f"<p>{DESCRIPTIONS[spa_report]['incidencia_acumulada']}</p>\n"
html += material_line_chart.create_chart_with_slider_divs(div_ids_accumulated_cases,
sizes=div_sizes)
for key in ['deceased']:
html += f"<p>{DESCRIPTIONS[spa_report][key]}</p>\n"
html += material_line_chart.create_chart_with_slider_divs(div_ids[key],
sizes=div_sizes)
html += ' </body>\n</html>'
out_path.open('wt').write(html)
if __name__ == '__main__':
ten_days_ago = datetime.datetime.now() - datetime.timedelta(days=10)
forty_days_ago = datetime.datetime.now() - datetime.timedelta(days=40)
first_date = datetime.datetime(2020, 9, 1)
out_dir = config.HTML_REPORTS_DIR
out_dir.mkdir(exist_ok=True)
out_path = out_dir / 'situacion_covid_por_ca.html'
write_html_report(out_path, date_range=[forty_days_ago, ten_days_ago])
|
normal
|
{
"blob_id": "4c5b3042a785342d6ef06fdc882e0dcf91a787c3",
"index": 7816,
"step-1": "<mask token>\n\n\ndef calc_accumulated_indicende_per_ccaa(report, num_days=15):\n ccaas = data_sources.get_ccaas_in_dset(report)\n dframe = report['dframe']\n num_cases = dframe['num_casos']\n ccaa_column = data_sources.get_ccaa_column_in_index(num_cases.index)\n index = num_cases.index.to_frame(index=False)\n time_delta = numpy.timedelta64(num_days, 'D')\n accumulated_cases_by_ccaa = {}\n for ccaa in ccaas:\n mask = index[ccaa_column] == ccaa\n mask = mask.values\n num_cases_for_this_ccaa = num_cases[mask]\n this_ccaa_index = num_cases_for_this_ccaa.index.to_frame(index=False)\n this_ccaa_dates = this_ccaa_index['fecha']\n num_accumulated_cases = []\n valid_dates = []\n for date in this_ccaa_dates:\n date0 = date - time_delta\n mask = numpy.logical_and(this_ccaa_dates > date0, \n this_ccaa_dates <= date)\n mask = mask.values\n if numpy.sum(mask) < num_days:\n continue\n num_accumulated_cases.append(numpy.sum(num_cases_for_this_ccaa[\n mask]))\n valid_dates.append(date)\n num_accumulated_cases = pandas.Series(num_accumulated_cases, index=\n valid_dates)\n num_accumulated_cases = (num_accumulated_cases / data_sources.\n POPULATION[ccaa] * 100000.0)\n accumulated_cases_by_ccaa[ccaa] = num_accumulated_cases\n return accumulated_cases_by_ccaa\n\n\n<mask token>\n\n\ndef is_desired_ccaa(ccaa, desired_ccaas):\n return desired_ccaas is None or data_sources.convert_to_ccaa_iso(ccaa\n ) in desired_ccaas\n\n\n<mask token>\n\n\ndef _create_table_for_chart_from_dframe(dframe, desired_ccaas):\n ccaas = sorted(dframe.index)\n ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]\n dates = list(dframe.columns)\n table = []\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n row.append(dframe.loc[ccaa, date])\n table.append(row)\n return table, ccaas, dates\n\n\ndef _create_table_for_chart_from_series(series):\n table = [(date.date(), value) for date, value in zip(series.index,\n series.values)]\n return table\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef calc_accumulated_indicende_per_ccaa(report, num_days=15):\n ccaas = data_sources.get_ccaas_in_dset(report)\n dframe = report['dframe']\n num_cases = dframe['num_casos']\n ccaa_column = data_sources.get_ccaa_column_in_index(num_cases.index)\n index = num_cases.index.to_frame(index=False)\n time_delta = numpy.timedelta64(num_days, 'D')\n accumulated_cases_by_ccaa = {}\n for ccaa in ccaas:\n mask = index[ccaa_column] == ccaa\n mask = mask.values\n num_cases_for_this_ccaa = num_cases[mask]\n this_ccaa_index = num_cases_for_this_ccaa.index.to_frame(index=False)\n this_ccaa_dates = this_ccaa_index['fecha']\n num_accumulated_cases = []\n valid_dates = []\n for date in this_ccaa_dates:\n date0 = date - time_delta\n mask = numpy.logical_and(this_ccaa_dates > date0, \n this_ccaa_dates <= date)\n mask = mask.values\n if numpy.sum(mask) < num_days:\n continue\n num_accumulated_cases.append(numpy.sum(num_cases_for_this_ccaa[\n mask]))\n valid_dates.append(date)\n num_accumulated_cases = pandas.Series(num_accumulated_cases, index=\n valid_dates)\n num_accumulated_cases = (num_accumulated_cases / data_sources.\n POPULATION[ccaa] * 100000.0)\n accumulated_cases_by_ccaa[ccaa] = num_accumulated_cases\n return accumulated_cases_by_ccaa\n\n\ndef _create_js_chart(dframe, date_range, js_function_name, div_id, title,\n width, height):\n table = []\n ccaas = sorted(dframe.index)\n dates = list(dframe.columns)\n if date_range is not None:\n dates = [date for date in dates if date > date_range[0] and date <=\n date_range[1]]\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for\n ccaa in ccaas])\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n value = dframe.loc[ccaa, date]\n row.append(value)\n table.append(row)\n js_function_name = js_function_name\n html = material_line_chart.create_chart_js(js_function_name, div_id,\n title, columns, table, width=width, height=height)\n return html\n\n\ndef _write_table_from_series(series):\n html = '<table>'\n for index, value in zip(series.index, series.values):\n html += f'<tr><td>{index}</td><td>{value}</td></tr>\\n'\n html += '</table>'\n return html\n\n\ndef is_desired_ccaa(ccaa, desired_ccaas):\n return desired_ccaas is None or data_sources.convert_to_ccaa_iso(ccaa\n ) in desired_ccaas\n\n\ndef _create_table_for_chart_from_dict(dict_data, desired_ccaas):\n one_data = list(dict_data.values())[0]\n ccaas = sorted(dict_data.keys())\n ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]\n dates = list(one_data.index)\n table = []\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n row.append(dict_data[ccaa][date])\n table.append(row)\n return table, ccaas, dates\n\n\ndef _create_accumulate_indicence_table_for_spa_chart_from_report(report,\n num_days):\n dframe = report['dframe']\n time_delta = numpy.timedelta64(num_days, 'D')\n num_cases = dframe.groupby(level=1).sum().loc[:, 'num_casos']\n tot_pop = sum(data_sources.POPULATION.values())\n dates = numpy.array(num_cases.index)\n num_accumulated_cases = []\n valid_dates = []\n for date in dates:\n date0 = date - time_delta\n mask = numpy.logical_and(dates > date0, dates <= date)\n if numpy.sum(mask) < num_days:\n continue\n num_accumulated_cases.append(numpy.sum(num_cases[mask]) / tot_pop *\n 100000.0)\n date = datetime.datetime.fromtimestamp(date.astype('O') / 1000000000.0)\n valid_dates.append(date)\n table = [(date.date(), cases) for date, cases in zip(valid_dates,\n num_accumulated_cases)]\n dates = valid_dates\n return table, dates\n\n\ndef _create_table_for_chart_from_dframe(dframe, desired_ccaas):\n ccaas = sorted(dframe.index)\n ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]\n dates = list(dframe.columns)\n table = []\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n row.append(dframe.loc[ccaa, date])\n table.append(row)\n return table, ccaas, dates\n\n\ndef _create_table_for_chart_from_series(series):\n table = [(date.date(), value) for date, value in zip(series.index,\n series.values)]\n return table\n\n\ndef write_html_report(out_path, date_range=None, desired_ccaas=None,\n spa_report=False):\n if spa_report and desired_ccaas:\n raise ValueError('choose one, either spa or ccaa report')\n if desired_ccaas and len(desired_ccaas) == 1:\n only_one_ccaa = True\n ccaa_iso = convert_to_ccaa_iso(desired_ccaas[0])\n else:\n only_one_ccaa = False\n ccaa_info = data_sources.get_sorted_downloaded_ccaa_info()\n report = ccaa_info[-1]\n accumulaed_incidence = calc_accumulated_indicende_per_ccaa(report)\n deaths = sorted(ministry_datasources.read_deceased_excel_ministry_files\n (), key=lambda x: x['max_date'])[-1]\n if spa_report:\n accumulated_incidence_table, dates = (\n _create_accumulate_indicence_table_for_spa_chart_from_report(\n report, 15))\n else:\n accumulated_incidence_table, ccaas, dates = (\n _create_table_for_chart_from_dict(accumulaed_incidence,\n desired_ccaas))\n title = 'Resumen situación Covid-19'\n if spa_report:\n title += ' España'\n elif only_one_ccaa:\n title += ': ' + data_sources.convert_to_ccaa_name(ccaa_iso)\n else:\n title += ' por comunidad autónoma'\n html = HEADER.format(title)\n html += HEADER2\n js_function_name = 'drawAccumulatedCasesIncidence'\n columns = [('date', 'fecha')]\n if spa_report:\n columns.extend([('number', 'España')])\n else:\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for\n ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)])\n title = 'Incidencia acumulada por 100.000 hab. (15 días)'\n width = 900\n height = 800\n rangeslider_height = 50\n js_sizes = {'dashboard': {'height': height + rangeslider_height,\n 'width': width}, 'chart': {'height': height, 'width': width},\n 'rangeslider': {'height': rangeslider_height, 'width': 600}}\n div_sizes = {}\n for html_element in js_sizes:\n div_sizes[html_element] = {}\n div_sizes[html_element]['height'\n ] = f\"{js_sizes[html_element]['height']}px\"\n div_sizes[html_element]['width'\n ] = f\"{js_sizes[html_element]['width']}px\"\n slider_config = {'column_controlled': 'fecha', 'min_value': dates[0],\n 'max_value': dates[-1], 'min_init_value': date_range[0],\n 'max_init_value': date_range[-1]}\n div_ids_accumulated_cases = {'dashboard': 'accumulated_cases_dashboard',\n 'chart': 'accumulated_cases_chart', 'rangeslider':\n 'accumulated_cases_rangeslider'}\n html += material_line_chart.create_chart_js_with_slider(js_function_name,\n slider_config, div_ids_accumulated_cases, title, columns,\n accumulated_incidence_table, sizes=js_sizes)\n js_function_names = {'hospitalized': 'drawHospitalized', 'icu':\n 'drawICU', 'deceased': 'drawDeceased'}\n div_ids = {'hospitalized': 'hospitalized_chart', 'icu': 'icu_chart',\n 'deceased': 'deceased_chart'}\n titles = {'hospitalized':\n 'Num. hospitalizaciones por 100.000 hab. (media 7 días)', 'icu':\n 'Num. ingresos UCI por 100.000 hab. (media 7 días)', 'deceased':\n 'Num. fallecidos por 100.000 hab. (media 7 días)'}\n if False:\n if spa_report:\n rolling_means = ministry_datasources.get_ministry_rolling_mean_spa(\n )\n titles = {'hospitalized':\n 'Num. hospitalizaciones. (media 7 días)', 'icu':\n 'Num. ingresos UCI. (media 7 días)', 'deceased':\n 'Num. fallecidos. (media 7 días)'}\n else:\n rolling_means = ministry_datasources.get_ministry_rolling_mean()\n titles = {'hospitalized':\n 'Num. hospitalizaciones por 100.000 hab. (media 7 días)',\n 'icu': 'Num. ingresos UCI por 100.000 hab. (media 7 días)',\n 'deceased': 'Num. fallecidos por 100.000 hab. (media 7 días)'}\n div_ids_hospitalized = {'dashboard': 'hospitalized_dashboard', 'chart':\n 'hospitalized_chart', 'rangeslider': 'hospitalized_rangeslider'}\n div_ids_deceased = {'dashboard': 'deceased_dashboard', 'chart':\n 'deceased_chart', 'rangeslider': 'deceased_rangeslider'}\n div_ids = {'hospitalized': div_ids_hospitalized, 'deceased':\n div_ids_deceased}\n if False:\n dframe = rolling_means['hospitalized']\n if spa_report:\n columns = [('date', 'fecha'), ('number', 'España')]\n table = _create_table_for_chart_from_series(dframe)\n else:\n populations = [data_sources.get_population(ccaa) for ccaa in\n dframe.index]\n dframe = dframe.divide(populations, axis=0) * 100000.0\n table, ccaas, _ = _create_table_for_chart_from_dframe(dframe,\n desired_ccaas)\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(\n ccaa)) for ccaa in ccaas])\n key = 'hospitalized'\n hospitalized_slider_config = {'column_controlled': 'fecha',\n 'min_value': dates[0], 'max_value': dates[-1], 'min_init_value':\n date_range[0], 'max_init_value': datetime.datetime.now()}\n html += material_line_chart.create_chart_js_with_slider(\n js_function_names[key], hospitalized_slider_config, div_ids[key\n ], title=titles[key], columns=columns, data_table=table, sizes=\n js_sizes)\n num_days = 7\n key = 'deceased'\n deaths_dframe = deaths['dframe']\n if spa_report:\n spa_deaths = deaths_dframe.sum(axis=0)\n deaths_rolling_mean = spa_deaths.rolling(num_days, center=True,\n min_periods=num_days).mean().dropna()\n table = _create_table_for_chart_from_series(deaths_rolling_mean)\n columns = [('date', 'fecha'), ('number', 'España')]\n else:\n deaths_rolling_mean = deaths_dframe.rolling(num_days, center=True,\n min_periods=num_days, axis=1).mean()\n deaths_rolling_mean = deaths_rolling_mean.dropna(axis=1, how='all')\n populations = [data_sources.get_population(ccaa) for ccaa in\n deaths_rolling_mean.index]\n deaths_rolling_mean = deaths_rolling_mean.divide(populations, axis=0\n ) * 100000.0\n table, ccaas, _ = _create_table_for_chart_from_dframe(\n deaths_rolling_mean, desired_ccaas)\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for\n ccaa in ccaas])\n html += material_line_chart.create_chart_js_with_slider(js_function_names\n [key], slider_config, div_ids[key], title=titles[key], columns=\n columns, data_table=table, sizes=js_sizes)\n html += ' </script>\\n </head>\\n <body>\\n'\n today = datetime.datetime.now()\n html += '<p><a href=\"../\">Menu</a></p>'\n html += (\n f'<p>Informe generado el día: {today.day}-{today.month}-{today.year}</p>'\n )\n html += (\n f'<p>Este informe está generado para uso personal por <a href=\"https://twitter.com/jblanca42\">@jblanca42</a>, pero lo sube a la web por si le pudiese ser de utilidad a alguien más.</p>'\n )\n html += (\n f'<p>El código utilizado para generarlo se encuentra en <a href=\"https://github.com/JoseBlanca/seguimiento_covid\">github</a>, si encuentras algún fallo o quieres mejorar algo envía un mensaje o haz un pull request.</p>'\n )\n if desired_ccaas:\n index = [ccaa for ccaa in deaths['dframe'].index if is_desired_ccaa\n (ccaa, desired_ccaas)]\n tot_deaths = deaths['dframe'].loc[index, :].values.sum()\n else:\n tot_deaths = deaths['dframe'].values.sum() + deaths['unassinged_deaths'\n ]\n html += f'<p>Número total de fallecidos: {tot_deaths}</p>'\n if spa_report:\n death_rate = round(sum(data_sources.POPULATION.values()) / tot_deaths)\n html += f'<p>Una de cada {death_rate} personas han fallecido.</p>'\n elif desired_ccaas and len(desired_ccaas) == 1:\n death_rate = round(data_sources.get_population(desired_ccaas[0]) /\n tot_deaths)\n html += (\n f'<p>Una de cada {death_rate} personas han fallecido en esta comunidad autónoma.</p>'\n )\n else:\n deaths_per_ccaa = deaths['dframe'].sum(axis=1)\n populations = [data_sources.get_population(ccaa) for ccaa in\n deaths_per_ccaa.index]\n populations = pandas.Series(populations, index=deaths_per_ccaa.index)\n death_rate = (populations / deaths_per_ccaa).round().sort_values(\n ).astype(int)\n html += (\n '<p>¿Una de cada cuántas personas han fallecido por comunidad autónoma?</p>'\n )\n html += _write_table_from_series(death_rate)\n if False:\n for key in ['hospitalized']:\n html += f'<p>{DESCRIPTIONS[spa_report][key]}</p>\\n'\n html += material_line_chart.create_chart_with_slider_divs(div_ids\n [key], sizes=div_sizes)\n html += f\"<p>{DESCRIPTIONS[spa_report]['incidencia_acumulada']}</p>\\n\"\n html += material_line_chart.create_chart_with_slider_divs(\n div_ids_accumulated_cases, sizes=div_sizes)\n for key in ['deceased']:\n html += f'<p>{DESCRIPTIONS[spa_report][key]}</p>\\n'\n html += material_line_chart.create_chart_with_slider_divs(div_ids[\n key], sizes=div_sizes)\n html += ' </body>\\n</html>'\n out_path.open('wt').write(html)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef calc_accumulated_indicende_per_ccaa(report, num_days=15):\n ccaas = data_sources.get_ccaas_in_dset(report)\n dframe = report['dframe']\n num_cases = dframe['num_casos']\n ccaa_column = data_sources.get_ccaa_column_in_index(num_cases.index)\n index = num_cases.index.to_frame(index=False)\n time_delta = numpy.timedelta64(num_days, 'D')\n accumulated_cases_by_ccaa = {}\n for ccaa in ccaas:\n mask = index[ccaa_column] == ccaa\n mask = mask.values\n num_cases_for_this_ccaa = num_cases[mask]\n this_ccaa_index = num_cases_for_this_ccaa.index.to_frame(index=False)\n this_ccaa_dates = this_ccaa_index['fecha']\n num_accumulated_cases = []\n valid_dates = []\n for date in this_ccaa_dates:\n date0 = date - time_delta\n mask = numpy.logical_and(this_ccaa_dates > date0, \n this_ccaa_dates <= date)\n mask = mask.values\n if numpy.sum(mask) < num_days:\n continue\n num_accumulated_cases.append(numpy.sum(num_cases_for_this_ccaa[\n mask]))\n valid_dates.append(date)\n num_accumulated_cases = pandas.Series(num_accumulated_cases, index=\n valid_dates)\n num_accumulated_cases = (num_accumulated_cases / data_sources.\n POPULATION[ccaa] * 100000.0)\n accumulated_cases_by_ccaa[ccaa] = num_accumulated_cases\n return accumulated_cases_by_ccaa\n\n\ndef _create_js_chart(dframe, date_range, js_function_name, div_id, title,\n width, height):\n table = []\n ccaas = sorted(dframe.index)\n dates = list(dframe.columns)\n if date_range is not None:\n dates = [date for date in dates if date > date_range[0] and date <=\n date_range[1]]\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for\n ccaa in ccaas])\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n value = dframe.loc[ccaa, date]\n row.append(value)\n table.append(row)\n js_function_name = js_function_name\n html = material_line_chart.create_chart_js(js_function_name, div_id,\n title, columns, table, width=width, height=height)\n return html\n\n\ndef _write_table_from_series(series):\n html = '<table>'\n for index, value in zip(series.index, series.values):\n html += f'<tr><td>{index}</td><td>{value}</td></tr>\\n'\n html += '</table>'\n return html\n\n\ndef is_desired_ccaa(ccaa, desired_ccaas):\n return desired_ccaas is None or data_sources.convert_to_ccaa_iso(ccaa\n ) in desired_ccaas\n\n\ndef _create_table_for_chart_from_dict(dict_data, desired_ccaas):\n one_data = list(dict_data.values())[0]\n ccaas = sorted(dict_data.keys())\n ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]\n dates = list(one_data.index)\n table = []\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n row.append(dict_data[ccaa][date])\n table.append(row)\n return table, ccaas, dates\n\n\ndef _create_accumulate_indicence_table_for_spa_chart_from_report(report,\n num_days):\n dframe = report['dframe']\n time_delta = numpy.timedelta64(num_days, 'D')\n num_cases = dframe.groupby(level=1).sum().loc[:, 'num_casos']\n tot_pop = sum(data_sources.POPULATION.values())\n dates = numpy.array(num_cases.index)\n num_accumulated_cases = []\n valid_dates = []\n for date in dates:\n date0 = date - time_delta\n mask = numpy.logical_and(dates > date0, dates <= date)\n if numpy.sum(mask) < num_days:\n continue\n num_accumulated_cases.append(numpy.sum(num_cases[mask]) / tot_pop *\n 100000.0)\n date = datetime.datetime.fromtimestamp(date.astype('O') / 1000000000.0)\n valid_dates.append(date)\n table = [(date.date(), cases) for date, cases in zip(valid_dates,\n num_accumulated_cases)]\n dates = valid_dates\n return table, dates\n\n\ndef _create_table_for_chart_from_dframe(dframe, desired_ccaas):\n ccaas = sorted(dframe.index)\n ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]\n dates = list(dframe.columns)\n table = []\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n row.append(dframe.loc[ccaa, date])\n table.append(row)\n return table, ccaas, dates\n\n\ndef _create_table_for_chart_from_series(series):\n table = [(date.date(), value) for date, value in zip(series.index,\n series.values)]\n return table\n\n\ndef write_html_report(out_path, date_range=None, desired_ccaas=None,\n spa_report=False):\n if spa_report and desired_ccaas:\n raise ValueError('choose one, either spa or ccaa report')\n if desired_ccaas and len(desired_ccaas) == 1:\n only_one_ccaa = True\n ccaa_iso = convert_to_ccaa_iso(desired_ccaas[0])\n else:\n only_one_ccaa = False\n ccaa_info = data_sources.get_sorted_downloaded_ccaa_info()\n report = ccaa_info[-1]\n accumulaed_incidence = calc_accumulated_indicende_per_ccaa(report)\n deaths = sorted(ministry_datasources.read_deceased_excel_ministry_files\n (), key=lambda x: x['max_date'])[-1]\n if spa_report:\n accumulated_incidence_table, dates = (\n _create_accumulate_indicence_table_for_spa_chart_from_report(\n report, 15))\n else:\n accumulated_incidence_table, ccaas, dates = (\n _create_table_for_chart_from_dict(accumulaed_incidence,\n desired_ccaas))\n title = 'Resumen situación Covid-19'\n if spa_report:\n title += ' España'\n elif only_one_ccaa:\n title += ': ' + data_sources.convert_to_ccaa_name(ccaa_iso)\n else:\n title += ' por comunidad autónoma'\n html = HEADER.format(title)\n html += HEADER2\n js_function_name = 'drawAccumulatedCasesIncidence'\n columns = [('date', 'fecha')]\n if spa_report:\n columns.extend([('number', 'España')])\n else:\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for\n ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)])\n title = 'Incidencia acumulada por 100.000 hab. (15 días)'\n width = 900\n height = 800\n rangeslider_height = 50\n js_sizes = {'dashboard': {'height': height + rangeslider_height,\n 'width': width}, 'chart': {'height': height, 'width': width},\n 'rangeslider': {'height': rangeslider_height, 'width': 600}}\n div_sizes = {}\n for html_element in js_sizes:\n div_sizes[html_element] = {}\n div_sizes[html_element]['height'\n ] = f\"{js_sizes[html_element]['height']}px\"\n div_sizes[html_element]['width'\n ] = f\"{js_sizes[html_element]['width']}px\"\n slider_config = {'column_controlled': 'fecha', 'min_value': dates[0],\n 'max_value': dates[-1], 'min_init_value': date_range[0],\n 'max_init_value': date_range[-1]}\n div_ids_accumulated_cases = {'dashboard': 'accumulated_cases_dashboard',\n 'chart': 'accumulated_cases_chart', 'rangeslider':\n 'accumulated_cases_rangeslider'}\n html += material_line_chart.create_chart_js_with_slider(js_function_name,\n slider_config, div_ids_accumulated_cases, title, columns,\n accumulated_incidence_table, sizes=js_sizes)\n js_function_names = {'hospitalized': 'drawHospitalized', 'icu':\n 'drawICU', 'deceased': 'drawDeceased'}\n div_ids = {'hospitalized': 'hospitalized_chart', 'icu': 'icu_chart',\n 'deceased': 'deceased_chart'}\n titles = {'hospitalized':\n 'Num. hospitalizaciones por 100.000 hab. (media 7 días)', 'icu':\n 'Num. ingresos UCI por 100.000 hab. (media 7 días)', 'deceased':\n 'Num. fallecidos por 100.000 hab. (media 7 días)'}\n if False:\n if spa_report:\n rolling_means = ministry_datasources.get_ministry_rolling_mean_spa(\n )\n titles = {'hospitalized':\n 'Num. hospitalizaciones. (media 7 días)', 'icu':\n 'Num. ingresos UCI. (media 7 días)', 'deceased':\n 'Num. fallecidos. (media 7 días)'}\n else:\n rolling_means = ministry_datasources.get_ministry_rolling_mean()\n titles = {'hospitalized':\n 'Num. hospitalizaciones por 100.000 hab. (media 7 días)',\n 'icu': 'Num. ingresos UCI por 100.000 hab. (media 7 días)',\n 'deceased': 'Num. fallecidos por 100.000 hab. (media 7 días)'}\n div_ids_hospitalized = {'dashboard': 'hospitalized_dashboard', 'chart':\n 'hospitalized_chart', 'rangeslider': 'hospitalized_rangeslider'}\n div_ids_deceased = {'dashboard': 'deceased_dashboard', 'chart':\n 'deceased_chart', 'rangeslider': 'deceased_rangeslider'}\n div_ids = {'hospitalized': div_ids_hospitalized, 'deceased':\n div_ids_deceased}\n if False:\n dframe = rolling_means['hospitalized']\n if spa_report:\n columns = [('date', 'fecha'), ('number', 'España')]\n table = _create_table_for_chart_from_series(dframe)\n else:\n populations = [data_sources.get_population(ccaa) for ccaa in\n dframe.index]\n dframe = dframe.divide(populations, axis=0) * 100000.0\n table, ccaas, _ = _create_table_for_chart_from_dframe(dframe,\n desired_ccaas)\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(\n ccaa)) for ccaa in ccaas])\n key = 'hospitalized'\n hospitalized_slider_config = {'column_controlled': 'fecha',\n 'min_value': dates[0], 'max_value': dates[-1], 'min_init_value':\n date_range[0], 'max_init_value': datetime.datetime.now()}\n html += material_line_chart.create_chart_js_with_slider(\n js_function_names[key], hospitalized_slider_config, div_ids[key\n ], title=titles[key], columns=columns, data_table=table, sizes=\n js_sizes)\n num_days = 7\n key = 'deceased'\n deaths_dframe = deaths['dframe']\n if spa_report:\n spa_deaths = deaths_dframe.sum(axis=0)\n deaths_rolling_mean = spa_deaths.rolling(num_days, center=True,\n min_periods=num_days).mean().dropna()\n table = _create_table_for_chart_from_series(deaths_rolling_mean)\n columns = [('date', 'fecha'), ('number', 'España')]\n else:\n deaths_rolling_mean = deaths_dframe.rolling(num_days, center=True,\n min_periods=num_days, axis=1).mean()\n deaths_rolling_mean = deaths_rolling_mean.dropna(axis=1, how='all')\n populations = [data_sources.get_population(ccaa) for ccaa in\n deaths_rolling_mean.index]\n deaths_rolling_mean = deaths_rolling_mean.divide(populations, axis=0\n ) * 100000.0\n table, ccaas, _ = _create_table_for_chart_from_dframe(\n deaths_rolling_mean, desired_ccaas)\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for\n ccaa in ccaas])\n html += material_line_chart.create_chart_js_with_slider(js_function_names\n [key], slider_config, div_ids[key], title=titles[key], columns=\n columns, data_table=table, sizes=js_sizes)\n html += ' </script>\\n </head>\\n <body>\\n'\n today = datetime.datetime.now()\n html += '<p><a href=\"../\">Menu</a></p>'\n html += (\n f'<p>Informe generado el día: {today.day}-{today.month}-{today.year}</p>'\n )\n html += (\n f'<p>Este informe está generado para uso personal por <a href=\"https://twitter.com/jblanca42\">@jblanca42</a>, pero lo sube a la web por si le pudiese ser de utilidad a alguien más.</p>'\n )\n html += (\n f'<p>El código utilizado para generarlo se encuentra en <a href=\"https://github.com/JoseBlanca/seguimiento_covid\">github</a>, si encuentras algún fallo o quieres mejorar algo envía un mensaje o haz un pull request.</p>'\n )\n if desired_ccaas:\n index = [ccaa for ccaa in deaths['dframe'].index if is_desired_ccaa\n (ccaa, desired_ccaas)]\n tot_deaths = deaths['dframe'].loc[index, :].values.sum()\n else:\n tot_deaths = deaths['dframe'].values.sum() + deaths['unassinged_deaths'\n ]\n html += f'<p>Número total de fallecidos: {tot_deaths}</p>'\n if spa_report:\n death_rate = round(sum(data_sources.POPULATION.values()) / tot_deaths)\n html += f'<p>Una de cada {death_rate} personas han fallecido.</p>'\n elif desired_ccaas and len(desired_ccaas) == 1:\n death_rate = round(data_sources.get_population(desired_ccaas[0]) /\n tot_deaths)\n html += (\n f'<p>Una de cada {death_rate} personas han fallecido en esta comunidad autónoma.</p>'\n )\n else:\n deaths_per_ccaa = deaths['dframe'].sum(axis=1)\n populations = [data_sources.get_population(ccaa) for ccaa in\n deaths_per_ccaa.index]\n populations = pandas.Series(populations, index=deaths_per_ccaa.index)\n death_rate = (populations / deaths_per_ccaa).round().sort_values(\n ).astype(int)\n html += (\n '<p>¿Una de cada cuántas personas han fallecido por comunidad autónoma?</p>'\n )\n html += _write_table_from_series(death_rate)\n if False:\n for key in ['hospitalized']:\n html += f'<p>{DESCRIPTIONS[spa_report][key]}</p>\\n'\n html += material_line_chart.create_chart_with_slider_divs(div_ids\n [key], sizes=div_sizes)\n html += f\"<p>{DESCRIPTIONS[spa_report]['incidencia_acumulada']}</p>\\n\"\n html += material_line_chart.create_chart_with_slider_divs(\n div_ids_accumulated_cases, sizes=div_sizes)\n for key in ['deceased']:\n html += f'<p>{DESCRIPTIONS[spa_report][key]}</p>\\n'\n html += material_line_chart.create_chart_with_slider_divs(div_ids[\n key], sizes=div_sizes)\n html += ' </body>\\n</html>'\n out_path.open('wt').write(html)\n\n\nif __name__ == '__main__':\n ten_days_ago = datetime.datetime.now() - datetime.timedelta(days=10)\n forty_days_ago = datetime.datetime.now() - datetime.timedelta(days=40)\n first_date = datetime.datetime(2020, 9, 1)\n out_dir = config.HTML_REPORTS_DIR\n out_dir.mkdir(exist_ok=True)\n out_path = out_dir / 'situacion_covid_por_ca.html'\n write_html_report(out_path, date_range=[forty_days_ago, ten_days_ago])\n",
"step-4": "<mask token>\nHEADER = \"\"\"<html>\n <head>\n <title>{}</title>\n <script type=\"text/javascript\" src=\"https://www.gstatic.com/charts/loader.js\"></script>\n <script type=\"text/javascript\">\n\"\"\"\nHEADER2 = \"\"\"\n google.charts.load('current', {'packages':['line', 'corechart', 'controls']});\n\n\"\"\"\nDESCRIPTIONS_CCAA = {'incidencia_acumulada':\n 'Número de casos informados en los 15 días anteriores por cien mil habitantes. Datos obtenidos de los informes del Carlos III.'\n , 'hospitalized':\n 'Número medio de hospitalizaciones por cien mil habitantes (media de 7 días). Datos obtenidos a partir de las cifras acumuladas que aparecen en los informes diarios del ministerio.'\n , 'deceased':\n 'Número medio de fallecidos por cien mil habitantes (media de 7 días). Datos obtenidos a partir del excel con datos de fallecidos diarios del ministerio.'\n }\nDESCRIPTIONS_SPA = {'incidencia_acumulada':\n 'Número de casos informados en los 15 días anteriores por cien mil habitantes. Datos obtenidos de los informes del Carlos III.'\n , 'hospitalized':\n 'Número medio de hospitalizaciones (media de 7 días). Datos obtenidos a partir de las cifras acumuladas que aparecen en los informes diarios del ministerio.'\n , 'deceased':\n 'Número medio de fallecidos (media de 7 días). Datos obtenidos a partir del excel con datos de fallecidos diarios del ministerio.'\n }\nDESCRIPTIONS = {(True): DESCRIPTIONS_SPA, (False): DESCRIPTIONS_CCAA}\n\n\ndef calc_accumulated_indicende_per_ccaa(report, num_days=15):\n ccaas = data_sources.get_ccaas_in_dset(report)\n dframe = report['dframe']\n num_cases = dframe['num_casos']\n ccaa_column = data_sources.get_ccaa_column_in_index(num_cases.index)\n index = num_cases.index.to_frame(index=False)\n time_delta = numpy.timedelta64(num_days, 'D')\n accumulated_cases_by_ccaa = {}\n for ccaa in ccaas:\n mask = index[ccaa_column] == ccaa\n mask = mask.values\n num_cases_for_this_ccaa = num_cases[mask]\n this_ccaa_index = num_cases_for_this_ccaa.index.to_frame(index=False)\n this_ccaa_dates = this_ccaa_index['fecha']\n num_accumulated_cases = []\n valid_dates = []\n for date in this_ccaa_dates:\n date0 = date - time_delta\n mask = numpy.logical_and(this_ccaa_dates > date0, \n this_ccaa_dates <= date)\n mask = mask.values\n if numpy.sum(mask) < num_days:\n continue\n num_accumulated_cases.append(numpy.sum(num_cases_for_this_ccaa[\n mask]))\n valid_dates.append(date)\n num_accumulated_cases = pandas.Series(num_accumulated_cases, index=\n valid_dates)\n num_accumulated_cases = (num_accumulated_cases / data_sources.\n POPULATION[ccaa] * 100000.0)\n accumulated_cases_by_ccaa[ccaa] = num_accumulated_cases\n return accumulated_cases_by_ccaa\n\n\ndef _create_js_chart(dframe, date_range, js_function_name, div_id, title,\n width, height):\n table = []\n ccaas = sorted(dframe.index)\n dates = list(dframe.columns)\n if date_range is not None:\n dates = [date for date in dates if date > date_range[0] and date <=\n date_range[1]]\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for\n ccaa in ccaas])\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n value = dframe.loc[ccaa, date]\n row.append(value)\n table.append(row)\n js_function_name = js_function_name\n html = material_line_chart.create_chart_js(js_function_name, div_id,\n title, columns, table, width=width, height=height)\n return html\n\n\ndef _write_table_from_series(series):\n html = '<table>'\n for index, value in zip(series.index, series.values):\n html += f'<tr><td>{index}</td><td>{value}</td></tr>\\n'\n html += '</table>'\n return html\n\n\ndef is_desired_ccaa(ccaa, desired_ccaas):\n return desired_ccaas is None or data_sources.convert_to_ccaa_iso(ccaa\n ) in desired_ccaas\n\n\ndef _create_table_for_chart_from_dict(dict_data, desired_ccaas):\n one_data = list(dict_data.values())[0]\n ccaas = sorted(dict_data.keys())\n ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]\n dates = list(one_data.index)\n table = []\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n row.append(dict_data[ccaa][date])\n table.append(row)\n return table, ccaas, dates\n\n\ndef _create_accumulate_indicence_table_for_spa_chart_from_report(report,\n num_days):\n dframe = report['dframe']\n time_delta = numpy.timedelta64(num_days, 'D')\n num_cases = dframe.groupby(level=1).sum().loc[:, 'num_casos']\n tot_pop = sum(data_sources.POPULATION.values())\n dates = numpy.array(num_cases.index)\n num_accumulated_cases = []\n valid_dates = []\n for date in dates:\n date0 = date - time_delta\n mask = numpy.logical_and(dates > date0, dates <= date)\n if numpy.sum(mask) < num_days:\n continue\n num_accumulated_cases.append(numpy.sum(num_cases[mask]) / tot_pop *\n 100000.0)\n date = datetime.datetime.fromtimestamp(date.astype('O') / 1000000000.0)\n valid_dates.append(date)\n table = [(date.date(), cases) for date, cases in zip(valid_dates,\n num_accumulated_cases)]\n dates = valid_dates\n return table, dates\n\n\ndef _create_table_for_chart_from_dframe(dframe, desired_ccaas):\n ccaas = sorted(dframe.index)\n ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]\n dates = list(dframe.columns)\n table = []\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n row.append(dframe.loc[ccaa, date])\n table.append(row)\n return table, ccaas, dates\n\n\ndef _create_table_for_chart_from_series(series):\n table = [(date.date(), value) for date, value in zip(series.index,\n series.values)]\n return table\n\n\ndef write_html_report(out_path, date_range=None, desired_ccaas=None,\n spa_report=False):\n if spa_report and desired_ccaas:\n raise ValueError('choose one, either spa or ccaa report')\n if desired_ccaas and len(desired_ccaas) == 1:\n only_one_ccaa = True\n ccaa_iso = convert_to_ccaa_iso(desired_ccaas[0])\n else:\n only_one_ccaa = False\n ccaa_info = data_sources.get_sorted_downloaded_ccaa_info()\n report = ccaa_info[-1]\n accumulaed_incidence = calc_accumulated_indicende_per_ccaa(report)\n deaths = sorted(ministry_datasources.read_deceased_excel_ministry_files\n (), key=lambda x: x['max_date'])[-1]\n if spa_report:\n accumulated_incidence_table, dates = (\n _create_accumulate_indicence_table_for_spa_chart_from_report(\n report, 15))\n else:\n accumulated_incidence_table, ccaas, dates = (\n _create_table_for_chart_from_dict(accumulaed_incidence,\n desired_ccaas))\n title = 'Resumen situación Covid-19'\n if spa_report:\n title += ' España'\n elif only_one_ccaa:\n title += ': ' + data_sources.convert_to_ccaa_name(ccaa_iso)\n else:\n title += ' por comunidad autónoma'\n html = HEADER.format(title)\n html += HEADER2\n js_function_name = 'drawAccumulatedCasesIncidence'\n columns = [('date', 'fecha')]\n if spa_report:\n columns.extend([('number', 'España')])\n else:\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for\n ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)])\n title = 'Incidencia acumulada por 100.000 hab. (15 días)'\n width = 900\n height = 800\n rangeslider_height = 50\n js_sizes = {'dashboard': {'height': height + rangeslider_height,\n 'width': width}, 'chart': {'height': height, 'width': width},\n 'rangeslider': {'height': rangeslider_height, 'width': 600}}\n div_sizes = {}\n for html_element in js_sizes:\n div_sizes[html_element] = {}\n div_sizes[html_element]['height'\n ] = f\"{js_sizes[html_element]['height']}px\"\n div_sizes[html_element]['width'\n ] = f\"{js_sizes[html_element]['width']}px\"\n slider_config = {'column_controlled': 'fecha', 'min_value': dates[0],\n 'max_value': dates[-1], 'min_init_value': date_range[0],\n 'max_init_value': date_range[-1]}\n div_ids_accumulated_cases = {'dashboard': 'accumulated_cases_dashboard',\n 'chart': 'accumulated_cases_chart', 'rangeslider':\n 'accumulated_cases_rangeslider'}\n html += material_line_chart.create_chart_js_with_slider(js_function_name,\n slider_config, div_ids_accumulated_cases, title, columns,\n accumulated_incidence_table, sizes=js_sizes)\n js_function_names = {'hospitalized': 'drawHospitalized', 'icu':\n 'drawICU', 'deceased': 'drawDeceased'}\n div_ids = {'hospitalized': 'hospitalized_chart', 'icu': 'icu_chart',\n 'deceased': 'deceased_chart'}\n titles = {'hospitalized':\n 'Num. hospitalizaciones por 100.000 hab. (media 7 días)', 'icu':\n 'Num. ingresos UCI por 100.000 hab. (media 7 días)', 'deceased':\n 'Num. fallecidos por 100.000 hab. (media 7 días)'}\n if False:\n if spa_report:\n rolling_means = ministry_datasources.get_ministry_rolling_mean_spa(\n )\n titles = {'hospitalized':\n 'Num. hospitalizaciones. (media 7 días)', 'icu':\n 'Num. ingresos UCI. (media 7 días)', 'deceased':\n 'Num. fallecidos. (media 7 días)'}\n else:\n rolling_means = ministry_datasources.get_ministry_rolling_mean()\n titles = {'hospitalized':\n 'Num. hospitalizaciones por 100.000 hab. (media 7 días)',\n 'icu': 'Num. ingresos UCI por 100.000 hab. (media 7 días)',\n 'deceased': 'Num. fallecidos por 100.000 hab. (media 7 días)'}\n div_ids_hospitalized = {'dashboard': 'hospitalized_dashboard', 'chart':\n 'hospitalized_chart', 'rangeslider': 'hospitalized_rangeslider'}\n div_ids_deceased = {'dashboard': 'deceased_dashboard', 'chart':\n 'deceased_chart', 'rangeslider': 'deceased_rangeslider'}\n div_ids = {'hospitalized': div_ids_hospitalized, 'deceased':\n div_ids_deceased}\n if False:\n dframe = rolling_means['hospitalized']\n if spa_report:\n columns = [('date', 'fecha'), ('number', 'España')]\n table = _create_table_for_chart_from_series(dframe)\n else:\n populations = [data_sources.get_population(ccaa) for ccaa in\n dframe.index]\n dframe = dframe.divide(populations, axis=0) * 100000.0\n table, ccaas, _ = _create_table_for_chart_from_dframe(dframe,\n desired_ccaas)\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(\n ccaa)) for ccaa in ccaas])\n key = 'hospitalized'\n hospitalized_slider_config = {'column_controlled': 'fecha',\n 'min_value': dates[0], 'max_value': dates[-1], 'min_init_value':\n date_range[0], 'max_init_value': datetime.datetime.now()}\n html += material_line_chart.create_chart_js_with_slider(\n js_function_names[key], hospitalized_slider_config, div_ids[key\n ], title=titles[key], columns=columns, data_table=table, sizes=\n js_sizes)\n num_days = 7\n key = 'deceased'\n deaths_dframe = deaths['dframe']\n if spa_report:\n spa_deaths = deaths_dframe.sum(axis=0)\n deaths_rolling_mean = spa_deaths.rolling(num_days, center=True,\n min_periods=num_days).mean().dropna()\n table = _create_table_for_chart_from_series(deaths_rolling_mean)\n columns = [('date', 'fecha'), ('number', 'España')]\n else:\n deaths_rolling_mean = deaths_dframe.rolling(num_days, center=True,\n min_periods=num_days, axis=1).mean()\n deaths_rolling_mean = deaths_rolling_mean.dropna(axis=1, how='all')\n populations = [data_sources.get_population(ccaa) for ccaa in\n deaths_rolling_mean.index]\n deaths_rolling_mean = deaths_rolling_mean.divide(populations, axis=0\n ) * 100000.0\n table, ccaas, _ = _create_table_for_chart_from_dframe(\n deaths_rolling_mean, desired_ccaas)\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for\n ccaa in ccaas])\n html += material_line_chart.create_chart_js_with_slider(js_function_names\n [key], slider_config, div_ids[key], title=titles[key], columns=\n columns, data_table=table, sizes=js_sizes)\n html += ' </script>\\n </head>\\n <body>\\n'\n today = datetime.datetime.now()\n html += '<p><a href=\"../\">Menu</a></p>'\n html += (\n f'<p>Informe generado el día: {today.day}-{today.month}-{today.year}</p>'\n )\n html += (\n f'<p>Este informe está generado para uso personal por <a href=\"https://twitter.com/jblanca42\">@jblanca42</a>, pero lo sube a la web por si le pudiese ser de utilidad a alguien más.</p>'\n )\n html += (\n f'<p>El código utilizado para generarlo se encuentra en <a href=\"https://github.com/JoseBlanca/seguimiento_covid\">github</a>, si encuentras algún fallo o quieres mejorar algo envía un mensaje o haz un pull request.</p>'\n )\n if desired_ccaas:\n index = [ccaa for ccaa in deaths['dframe'].index if is_desired_ccaa\n (ccaa, desired_ccaas)]\n tot_deaths = deaths['dframe'].loc[index, :].values.sum()\n else:\n tot_deaths = deaths['dframe'].values.sum() + deaths['unassinged_deaths'\n ]\n html += f'<p>Número total de fallecidos: {tot_deaths}</p>'\n if spa_report:\n death_rate = round(sum(data_sources.POPULATION.values()) / tot_deaths)\n html += f'<p>Una de cada {death_rate} personas han fallecido.</p>'\n elif desired_ccaas and len(desired_ccaas) == 1:\n death_rate = round(data_sources.get_population(desired_ccaas[0]) /\n tot_deaths)\n html += (\n f'<p>Una de cada {death_rate} personas han fallecido en esta comunidad autónoma.</p>'\n )\n else:\n deaths_per_ccaa = deaths['dframe'].sum(axis=1)\n populations = [data_sources.get_population(ccaa) for ccaa in\n deaths_per_ccaa.index]\n populations = pandas.Series(populations, index=deaths_per_ccaa.index)\n death_rate = (populations / deaths_per_ccaa).round().sort_values(\n ).astype(int)\n html += (\n '<p>¿Una de cada cuántas personas han fallecido por comunidad autónoma?</p>'\n )\n html += _write_table_from_series(death_rate)\n if False:\n for key in ['hospitalized']:\n html += f'<p>{DESCRIPTIONS[spa_report][key]}</p>\\n'\n html += material_line_chart.create_chart_with_slider_divs(div_ids\n [key], sizes=div_sizes)\n html += f\"<p>{DESCRIPTIONS[spa_report]['incidencia_acumulada']}</p>\\n\"\n html += material_line_chart.create_chart_with_slider_divs(\n div_ids_accumulated_cases, sizes=div_sizes)\n for key in ['deceased']:\n html += f'<p>{DESCRIPTIONS[spa_report][key]}</p>\\n'\n html += material_line_chart.create_chart_with_slider_divs(div_ids[\n key], sizes=div_sizes)\n html += ' </body>\\n</html>'\n out_path.open('wt').write(html)\n\n\nif __name__ == '__main__':\n ten_days_ago = datetime.datetime.now() - datetime.timedelta(days=10)\n forty_days_ago = datetime.datetime.now() - datetime.timedelta(days=40)\n first_date = datetime.datetime(2020, 9, 1)\n out_dir = config.HTML_REPORTS_DIR\n out_dir.mkdir(exist_ok=True)\n out_path = out_dir / 'situacion_covid_por_ca.html'\n write_html_report(out_path, date_range=[forty_days_ago, ten_days_ago])\n",
"step-5": "\nfrom datetime import date\nimport config\n\nimport datetime\n\nimport numpy\nimport pandas\n\nimport data_sources\nfrom data_sources import POPULATION, convert_to_ccaa_iso\nimport material_line_chart\nimport ministry_datasources\n\n\nHEADER = '''<html>\n <head>\n <title>{}</title>\n <script type=\"text/javascript\" src=\"https://www.gstatic.com/charts/loader.js\"></script>\n <script type=\"text/javascript\">\n'''\n\nHEADER2 = '''\n google.charts.load('current', {'packages':['line', 'corechart', 'controls']});\n\n'''\n\n\nDESCRIPTIONS_CCAA = {\n'incidencia_acumulada': 'Número de casos informados en los 15 días anteriores por cien mil habitantes. Datos obtenidos de los informes del Carlos III.',\n'hospitalized': 'Número medio de hospitalizaciones por cien mil habitantes (media de 7 días). Datos obtenidos a partir de las cifras acumuladas que aparecen en los informes diarios del ministerio.',\n'deceased': 'Número medio de fallecidos por cien mil habitantes (media de 7 días). Datos obtenidos a partir del excel con datos de fallecidos diarios del ministerio.',\n}\nDESCRIPTIONS_SPA = {\n'incidencia_acumulada': 'Número de casos informados en los 15 días anteriores por cien mil habitantes. Datos obtenidos de los informes del Carlos III.',\n'hospitalized': 'Número medio de hospitalizaciones (media de 7 días). Datos obtenidos a partir de las cifras acumuladas que aparecen en los informes diarios del ministerio.',\n'deceased': 'Número medio de fallecidos (media de 7 días). Datos obtenidos a partir del excel con datos de fallecidos diarios del ministerio.',\n}\nDESCRIPTIONS = {True: DESCRIPTIONS_SPA, False: DESCRIPTIONS_CCAA}\n\n\ndef calc_accumulated_indicende_per_ccaa(report, num_days=15):\n ccaas = data_sources.get_ccaas_in_dset(report)\n dframe = report['dframe']\n num_cases = dframe['num_casos'] \n ccaa_column = data_sources.get_ccaa_column_in_index(num_cases.index)\n index = num_cases.index.to_frame(index=False)\n\n time_delta = numpy.timedelta64(num_days, 'D')\n\n accumulated_cases_by_ccaa = {}\n for ccaa in ccaas:\n mask = index[ccaa_column] == ccaa\n mask = mask.values\n num_cases_for_this_ccaa = num_cases[mask]\n this_ccaa_index = num_cases_for_this_ccaa.index.to_frame(index=False)\n this_ccaa_dates = this_ccaa_index['fecha']\n num_accumulated_cases = []\n valid_dates = []\n for date in this_ccaa_dates:\n date0 = date - time_delta\n mask = numpy.logical_and(this_ccaa_dates > date0,\n this_ccaa_dates <= date)\n mask = mask.values\n if numpy.sum(mask) < num_days:\n continue\n num_accumulated_cases.append(numpy.sum(num_cases_for_this_ccaa[mask]))\n valid_dates.append(date)\n \n num_accumulated_cases = pandas.Series(num_accumulated_cases, index=valid_dates)\n num_accumulated_cases = num_accumulated_cases / data_sources.POPULATION[ccaa] * 1e5\n accumulated_cases_by_ccaa[ccaa] = num_accumulated_cases\n return accumulated_cases_by_ccaa\n\n\ndef _create_js_chart(dframe, date_range, js_function_name, div_id, title, width, height):\n table = []\n ccaas = sorted(dframe.index)\n dates = list(dframe.columns)\n\n if date_range is not None:\n dates = [date for date in dates if date > date_range[0] and date <= date_range[1]]\n\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for ccaa in ccaas])\n\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n value = dframe.loc[ccaa, date]\n row.append(value)\n table.append(row)\n js_function_name = js_function_name\n html = material_line_chart.create_chart_js(js_function_name, div_id, title,\n columns, table,\n width=width, height=height)\n return html\n\n\ndef _write_table_from_series(series):\n html = '<table>'\n for index, value in zip(series.index, series.values):\n html += f'<tr><td>{index}</td><td>{value}</td></tr>\\n'\n html += '</table>'\n return html\n\n\ndef is_desired_ccaa(ccaa, desired_ccaas):\n return desired_ccaas is None or data_sources.convert_to_ccaa_iso(ccaa) in desired_ccaas\n\n\ndef _create_table_for_chart_from_dict(dict_data, desired_ccaas):\n one_data = list(dict_data.values())[0]\n\n ccaas = sorted(dict_data.keys())\n ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]\n\n dates = list(one_data.index)\n table = []\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n row.append(dict_data[ccaa][date])\n table.append(row)\n return table, ccaas, dates\n\n\ndef _create_accumulate_indicence_table_for_spa_chart_from_report(report, num_days):\n dframe = report['dframe']\n time_delta = numpy.timedelta64(num_days, 'D')\n\n num_cases = dframe.groupby(level=1).sum().loc[:, 'num_casos']\n\n tot_pop = sum(data_sources.POPULATION.values())\n dates = numpy.array(num_cases.index)\n num_accumulated_cases = []\n valid_dates = []\n for date in dates:\n date0 = date - time_delta\n mask = numpy.logical_and(dates > date0,\n dates <= date)\n if numpy.sum(mask) < num_days:\n continue\n num_accumulated_cases.append(numpy.sum(num_cases[mask]) / tot_pop * 1e5)\n date = datetime.datetime.fromtimestamp(date.astype('O') / 1e9)\n valid_dates.append(date)\n\n table = [(date.date(), cases) for date, cases in zip(valid_dates, num_accumulated_cases)]\n dates = valid_dates\n\n return table, dates\n\n\ndef _create_table_for_chart_from_dframe(dframe, desired_ccaas):\n\n ccaas = sorted(dframe.index)\n ccaas = [ccaa for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)]\n dates = list(dframe.columns)\n table = []\n for date in dates:\n row = [date.date()]\n for ccaa in ccaas:\n row.append(dframe.loc[ccaa, date])\n table.append(row)\n return table, ccaas, dates\n\n\ndef _create_table_for_chart_from_series(series):\n table = [(date.date(), value) for date, value in zip(series.index, series.values)]\n return table\n\n\ndef write_html_report(out_path, date_range=None, desired_ccaas=None, spa_report=False):\n\n if spa_report and desired_ccaas:\n raise ValueError('choose one, either spa or ccaa report')\n\n if desired_ccaas and len(desired_ccaas) == 1:\n only_one_ccaa = True\n ccaa_iso = convert_to_ccaa_iso(desired_ccaas[0])\n else:\n only_one_ccaa = False\n\n ccaa_info = data_sources.get_sorted_downloaded_ccaa_info()\n report = ccaa_info[-1]\n accumulaed_incidence = calc_accumulated_indicende_per_ccaa(report)\n\n deaths = sorted(ministry_datasources.read_deceased_excel_ministry_files(),\n key=lambda x: x['max_date'])[-1]\n\n if spa_report:\n accumulated_incidence_table, dates = _create_accumulate_indicence_table_for_spa_chart_from_report(report, 15)\n else:\n accumulated_incidence_table, ccaas, dates = _create_table_for_chart_from_dict(accumulaed_incidence, desired_ccaas)\n\n title = 'Resumen situación Covid-19'\n if spa_report:\n title += ' España'\n elif only_one_ccaa:\n title += ': ' + data_sources.convert_to_ccaa_name(ccaa_iso)\n else:\n title += ' por comunidad autónoma'\n html = HEADER.format(title)\n html += HEADER2\n\n js_function_name = 'drawAccumulatedCasesIncidence'\n columns = [('date', 'fecha')]\n if spa_report:\n columns.extend([('number', 'España')])\n else:\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for ccaa in ccaas if is_desired_ccaa(ccaa, desired_ccaas)])\n title = 'Incidencia acumulada por 100.000 hab. (15 días)'\n\n width =900\n height = 800\n rangeslider_height = 50\n js_sizes = {'dashboard': {'height': height + rangeslider_height, 'width': width},\n 'chart': {'height': height, 'width': width},\n 'rangeslider': {'height': rangeslider_height, 'width': 600},\n }\n div_sizes = {}\n for html_element in js_sizes:\n div_sizes[html_element] = {}\n div_sizes[html_element]['height'] = f\"{js_sizes[html_element]['height']}px\"\n div_sizes[html_element]['width'] = f\"{js_sizes[html_element]['width']}px\"\n\n slider_config = {'column_controlled': 'fecha',\n 'min_value': dates[0],\n 'max_value': dates[-1],\n 'min_init_value': date_range[0],\n 'max_init_value': date_range[-1]}\n div_ids_accumulated_cases = {'dashboard': 'accumulated_cases_dashboard',\n 'chart': 'accumulated_cases_chart',\n 'rangeslider': 'accumulated_cases_rangeslider'}\n\n html += material_line_chart.create_chart_js_with_slider(js_function_name,\n slider_config,\n div_ids_accumulated_cases,\n title,\n columns,\n accumulated_incidence_table,\n sizes=js_sizes)\n\n js_function_names = {'hospitalized': 'drawHospitalized',\n 'icu': 'drawICU',\n 'deceased': 'drawDeceased'}\n div_ids = {'hospitalized': 'hospitalized_chart',\n 'icu': 'icu_chart',\n 'deceased': 'deceased_chart'\n }\n titles = {'hospitalized': 'Num. hospitalizaciones por 100.000 hab. (media 7 días)',\n 'icu': 'Num. ingresos UCI por 100.000 hab. (media 7 días)',\n 'deceased': 'Num. fallecidos por 100.000 hab. (media 7 días)'\n }\n\n if False:\n if spa_report:\n rolling_means = ministry_datasources.get_ministry_rolling_mean_spa()\n titles = {'hospitalized': 'Num. hospitalizaciones. (media 7 días)',\n 'icu': 'Num. ingresos UCI. (media 7 días)',\n 'deceased': 'Num. fallecidos. (media 7 días)'\n }\n else:\n rolling_means = ministry_datasources.get_ministry_rolling_mean()\n titles = {'hospitalized': 'Num. hospitalizaciones por 100.000 hab. (media 7 días)',\n 'icu': 'Num. ingresos UCI por 100.000 hab. (media 7 días)',\n 'deceased': 'Num. fallecidos por 100.000 hab. (media 7 días)'\n }\n\n div_ids_hospitalized = {'dashboard': 'hospitalized_dashboard',\n 'chart': 'hospitalized_chart',\n 'rangeslider': 'hospitalized_rangeslider'}\n div_ids_deceased = {'dashboard': 'deceased_dashboard',\n 'chart': 'deceased_chart',\n 'rangeslider': 'deceased_rangeslider'}\n div_ids = {'hospitalized': div_ids_hospitalized,\n 'deceased': div_ids_deceased,\n }\n\n if False:\n dframe = rolling_means['hospitalized']\n if spa_report:\n columns = [('date', 'fecha'), ('number', 'España')]\n table = _create_table_for_chart_from_series(dframe)\n else:\n populations = [data_sources.get_population(ccaa) for ccaa in dframe.index]\n dframe = dframe.divide(populations, axis=0) * 1e5\n table, ccaas, _ = _create_table_for_chart_from_dframe(dframe, desired_ccaas)\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for ccaa in ccaas])\n\n key = 'hospitalized'\n hospitalized_slider_config = {'column_controlled': 'fecha',\n 'min_value': dates[0],\n 'max_value': dates[-1],\n 'min_init_value': date_range[0],\n 'max_init_value': datetime.datetime.now()}\n html += material_line_chart.create_chart_js_with_slider(js_function_names[key],\n hospitalized_slider_config,\n div_ids[key],\n title=titles[key],\n columns=columns,\n data_table=table,\n sizes=js_sizes)\n\n num_days = 7\n key = 'deceased'\n deaths_dframe = deaths['dframe']\n if spa_report:\n spa_deaths = deaths_dframe.sum(axis=0)\n deaths_rolling_mean = spa_deaths.rolling(num_days, center=True, min_periods=num_days).mean().dropna()\n table = _create_table_for_chart_from_series(deaths_rolling_mean)\n columns = [('date', 'fecha'), ('number', 'España')]\n else:\n deaths_rolling_mean = deaths_dframe.rolling(num_days, center=True, min_periods=num_days, axis=1).mean()\n deaths_rolling_mean = deaths_rolling_mean.dropna(axis=1, how='all')\n populations = [data_sources.get_population(ccaa) for ccaa in deaths_rolling_mean.index]\n deaths_rolling_mean = deaths_rolling_mean.divide(populations, axis=0) * 1e5\n\n table, ccaas, _ = _create_table_for_chart_from_dframe(deaths_rolling_mean, desired_ccaas)\n columns = [('date', 'fecha')]\n columns.extend([('number', data_sources.convert_to_ccaa_name(ccaa)) for ccaa in ccaas])\n\n html += material_line_chart.create_chart_js_with_slider(js_function_names[key],\n slider_config,\n div_ids[key],\n title=titles[key],\n columns=columns,\n data_table=table,\n sizes=js_sizes)\n\n html += ' </script>\\n </head>\\n <body>\\n'\n today = datetime.datetime.now()\n html += '<p><a href=\"../\">Menu</a></p>'\n html += f'<p>Informe generado el día: {today.day}-{today.month}-{today.year}</p>'\n\n html += f'<p>Este informe está generado para uso personal por <a href=\"https://twitter.com/jblanca42\">@jblanca42</a>, pero lo sube a la web por si le pudiese ser de utilidad a alguien más.</p>'\n html += f'<p>El código utilizado para generarlo se encuentra en <a href=\"https://github.com/JoseBlanca/seguimiento_covid\">github</a>, si encuentras algún fallo o quieres mejorar algo envía un mensaje o haz un pull request.</p>'\n\n if desired_ccaas:\n index = [ccaa for ccaa in deaths['dframe'].index if is_desired_ccaa(ccaa, desired_ccaas)]\n tot_deaths = deaths['dframe'].loc[index, :].values.sum()\n else:\n tot_deaths = deaths['dframe'].values.sum() + deaths['unassinged_deaths']\n html += f'<p>Número total de fallecidos: {tot_deaths}</p>'\n\n if spa_report:\n death_rate = round(sum(data_sources.POPULATION.values()) / tot_deaths)\n html += f'<p>Una de cada {death_rate} personas han fallecido.</p>'\n elif desired_ccaas and len(desired_ccaas) == 1:\n death_rate = round(data_sources.get_population(desired_ccaas[0]) / tot_deaths)\n html += f'<p>Una de cada {death_rate} personas han fallecido en esta comunidad autónoma.</p>'\n else:\n deaths_per_ccaa = deaths['dframe'].sum(axis=1)\n populations = [data_sources.get_population(ccaa) for ccaa in deaths_per_ccaa.index]\n populations = pandas.Series(populations, index=deaths_per_ccaa.index)\n death_rate = (populations / deaths_per_ccaa).round().sort_values().astype(int)\n html += '<p>¿Una de cada cuántas personas han fallecido por comunidad autónoma?</p>'\n html += _write_table_from_series(death_rate)\n\n if False:\n for key in ['hospitalized']:\n html += f\"<p>{DESCRIPTIONS[spa_report][key]}</p>\\n\"\n html += material_line_chart.create_chart_with_slider_divs(div_ids[key],\n sizes=div_sizes)\n\n html += f\"<p>{DESCRIPTIONS[spa_report]['incidencia_acumulada']}</p>\\n\"\n\n html += material_line_chart.create_chart_with_slider_divs(div_ids_accumulated_cases,\n sizes=div_sizes)\n for key in ['deceased']:\n html += f\"<p>{DESCRIPTIONS[spa_report][key]}</p>\\n\"\n html += material_line_chart.create_chart_with_slider_divs(div_ids[key],\n sizes=div_sizes)\n\n html += ' </body>\\n</html>'\n\n out_path.open('wt').write(html)\n\n\nif __name__ == '__main__':\n\n ten_days_ago = datetime.datetime.now() - datetime.timedelta(days=10)\n forty_days_ago = datetime.datetime.now() - datetime.timedelta(days=40)\n first_date = datetime.datetime(2020, 9, 1)\n\n out_dir = config.HTML_REPORTS_DIR\n out_dir.mkdir(exist_ok=True)\n out_path = out_dir / 'situacion_covid_por_ca.html'\n write_html_report(out_path, date_range=[forty_days_ago, ten_days_ago])\n",
"step-ids": [
4,
9,
10,
11,
13
]
}
|
[
4,
9,
10,
11,
13
] |
from flask_wtf import FlaskForm
from wtforms import StringField, DateField, DecimalField
class HoursForm(FlaskForm):
date = StringField("Date")
begins = DecimalField("Begins")
ends = DecimalField("Ends")
class Meta:
csrf = False
|
normal
|
{
"blob_id": "b1a808e76008edec02d37ec596461e3a00a1d349",
"index": 4553,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass HoursForm(FlaskForm):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n csrf = False\n",
"step-3": "<mask token>\n\n\nclass HoursForm(FlaskForm):\n date = StringField('Date')\n begins = DecimalField('Begins')\n ends = DecimalField('Ends')\n\n\n class Meta:\n csrf = False\n",
"step-4": "from flask_wtf import FlaskForm\nfrom wtforms import StringField, DateField, DecimalField\n\n\nclass HoursForm(FlaskForm):\n date = StringField('Date')\n begins = DecimalField('Begins')\n ends = DecimalField('Ends')\n\n\n class Meta:\n csrf = False\n",
"step-5": "from flask_wtf import FlaskForm\nfrom wtforms import StringField, DateField, DecimalField\n\nclass HoursForm(FlaskForm):\n date = StringField(\"Date\")\n begins = DecimalField(\"Begins\")\n ends = DecimalField(\"Ends\")\n \n class Meta:\n csrf = False\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Video(ABC):
def __init__(self, filename: str, *, scale: float=1, w_stretch: float=2,
gradient: typing.Union[int, str]=0, verbose: int=False):
if not os.path.isfile(filename):
raise FileNotFound(filename)
self.filename = filename
self.video = cv2.VideoCapture(filename)
self.frames = []
self.fps = self.video.get(cv2.CAP_PROP_FPS)
self.width = self.video.get(3)
self.height = self.video.get(4)
if scale > 1:
scale /= 100
self.scale = scale
self.w_stretch = w_stretch
self.scaled_width = int(self.width * self.scale * self.w_stretch)
self.scaled_height = int(self.height * self.scale)
if type(gradient) == int:
if 0 > gradient > len(gradients) - 1:
raise IndexError(
f'The gradient must either be a string or an integer between the value of 0 and {len(gradients)}.'
)
else:
self.gradient = gradients[gradient]
else:
self.gradient = gradient
self.gradient = tuple([c for c in self.gradient])
self.gradient_len = len(self.gradient)
self.verbose = verbose
self.current_frame = 0
self.end_frame = None
if os.name == 'nt':
self.clear_cmd = 'cls'
else:
self.clear_cmd = 'clear'
if self.verbose:
print(f'Dimensions: {self.width}x{self.height}')
print(f'Scale Factor: {self.scale}')
print(f'Scaled Dims: {self.scaled_width}x{self.scaled_height}')
print(f"Gradient: '{''.join(self.gradient)}'")
print(f'FPS: {self.fps}')
<|reserved_special_token_0|>
def view(self, *, fps: float=None):
if fps is None:
spf = 1 / self.fps
else:
spf = 1 / fps
try:
for frame in self.frames:
start = time.perf_counter()
print(frame)
diff = start - time.perf_counter()
time.sleep((spf - diff + abs(spf - diff)) / 2)
os.system(self.clear_cmd)
except KeyboardInterrupt:
pass
def __iter__(self):
return self
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Video(ABC):
def __init__(self, filename: str, *, scale: float=1, w_stretch: float=2,
gradient: typing.Union[int, str]=0, verbose: int=False):
if not os.path.isfile(filename):
raise FileNotFound(filename)
self.filename = filename
self.video = cv2.VideoCapture(filename)
self.frames = []
self.fps = self.video.get(cv2.CAP_PROP_FPS)
self.width = self.video.get(3)
self.height = self.video.get(4)
if scale > 1:
scale /= 100
self.scale = scale
self.w_stretch = w_stretch
self.scaled_width = int(self.width * self.scale * self.w_stretch)
self.scaled_height = int(self.height * self.scale)
if type(gradient) == int:
if 0 > gradient > len(gradients) - 1:
raise IndexError(
f'The gradient must either be a string or an integer between the value of 0 and {len(gradients)}.'
)
else:
self.gradient = gradients[gradient]
else:
self.gradient = gradient
self.gradient = tuple([c for c in self.gradient])
self.gradient_len = len(self.gradient)
self.verbose = verbose
self.current_frame = 0
self.end_frame = None
if os.name == 'nt':
self.clear_cmd = 'cls'
else:
self.clear_cmd = 'clear'
if self.verbose:
print(f'Dimensions: {self.width}x{self.height}')
print(f'Scale Factor: {self.scale}')
print(f'Scaled Dims: {self.scaled_width}x{self.scaled_height}')
print(f"Gradient: '{''.join(self.gradient)}'")
print(f'FPS: {self.fps}')
<|reserved_special_token_0|>
def view(self, *, fps: float=None):
if fps is None:
spf = 1 / self.fps
else:
spf = 1 / fps
try:
for frame in self.frames:
start = time.perf_counter()
print(frame)
diff = start - time.perf_counter()
time.sleep((spf - diff + abs(spf - diff)) / 2)
os.system(self.clear_cmd)
except KeyboardInterrupt:
pass
def __iter__(self):
return self
def __next__(self):
if self.current_frame > self.end_frame:
raise StopIteration
self.current_frame += 1
return self.frames[self.current_frame - 1]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Video(ABC):
def __init__(self, filename: str, *, scale: float=1, w_stretch: float=2,
gradient: typing.Union[int, str]=0, verbose: int=False):
if not os.path.isfile(filename):
raise FileNotFound(filename)
self.filename = filename
self.video = cv2.VideoCapture(filename)
self.frames = []
self.fps = self.video.get(cv2.CAP_PROP_FPS)
self.width = self.video.get(3)
self.height = self.video.get(4)
if scale > 1:
scale /= 100
self.scale = scale
self.w_stretch = w_stretch
self.scaled_width = int(self.width * self.scale * self.w_stretch)
self.scaled_height = int(self.height * self.scale)
if type(gradient) == int:
if 0 > gradient > len(gradients) - 1:
raise IndexError(
f'The gradient must either be a string or an integer between the value of 0 and {len(gradients)}.'
)
else:
self.gradient = gradients[gradient]
else:
self.gradient = gradient
self.gradient = tuple([c for c in self.gradient])
self.gradient_len = len(self.gradient)
self.verbose = verbose
self.current_frame = 0
self.end_frame = None
if os.name == 'nt':
self.clear_cmd = 'cls'
else:
self.clear_cmd = 'clear'
if self.verbose:
print(f'Dimensions: {self.width}x{self.height}')
print(f'Scale Factor: {self.scale}')
print(f'Scaled Dims: {self.scaled_width}x{self.scaled_height}')
print(f"Gradient: '{''.join(self.gradient)}'")
print(f'FPS: {self.fps}')
def convert(self):
if self.verbose:
print('Converting...')
while True:
succ, img = self.video.read()
if not succ:
break
img = cv2.resize(img, (self.scaled_width, self.scaled_height))
self.frames.append(self.asciify_img(img))
self.end_frame = len(self.frames)
if self.verbose:
print('Done.')
return self
def view(self, *, fps: float=None):
if fps is None:
spf = 1 / self.fps
else:
spf = 1 / fps
try:
for frame in self.frames:
start = time.perf_counter()
print(frame)
diff = start - time.perf_counter()
time.sleep((spf - diff + abs(spf - diff)) / 2)
os.system(self.clear_cmd)
except KeyboardInterrupt:
pass
def __iter__(self):
return self
def __next__(self):
if self.current_frame > self.end_frame:
raise StopIteration
self.current_frame += 1
return self.frames[self.current_frame - 1]
<|reserved_special_token_1|>
import typing
import time
import cv2
import os
from .ABC import ABC
from .Exceptions import *
from .Constants import *
class Video(ABC):
def __init__(self, filename: str, *, scale: float=1, w_stretch: float=2,
gradient: typing.Union[int, str]=0, verbose: int=False):
if not os.path.isfile(filename):
raise FileNotFound(filename)
self.filename = filename
self.video = cv2.VideoCapture(filename)
self.frames = []
self.fps = self.video.get(cv2.CAP_PROP_FPS)
self.width = self.video.get(3)
self.height = self.video.get(4)
if scale > 1:
scale /= 100
self.scale = scale
self.w_stretch = w_stretch
self.scaled_width = int(self.width * self.scale * self.w_stretch)
self.scaled_height = int(self.height * self.scale)
if type(gradient) == int:
if 0 > gradient > len(gradients) - 1:
raise IndexError(
f'The gradient must either be a string or an integer between the value of 0 and {len(gradients)}.'
)
else:
self.gradient = gradients[gradient]
else:
self.gradient = gradient
self.gradient = tuple([c for c in self.gradient])
self.gradient_len = len(self.gradient)
self.verbose = verbose
self.current_frame = 0
self.end_frame = None
if os.name == 'nt':
self.clear_cmd = 'cls'
else:
self.clear_cmd = 'clear'
if self.verbose:
print(f'Dimensions: {self.width}x{self.height}')
print(f'Scale Factor: {self.scale}')
print(f'Scaled Dims: {self.scaled_width}x{self.scaled_height}')
print(f"Gradient: '{''.join(self.gradient)}'")
print(f'FPS: {self.fps}')
def convert(self):
if self.verbose:
print('Converting...')
while True:
succ, img = self.video.read()
if not succ:
break
img = cv2.resize(img, (self.scaled_width, self.scaled_height))
self.frames.append(self.asciify_img(img))
self.end_frame = len(self.frames)
if self.verbose:
print('Done.')
return self
def view(self, *, fps: float=None):
if fps is None:
spf = 1 / self.fps
else:
spf = 1 / fps
try:
for frame in self.frames:
start = time.perf_counter()
print(frame)
diff = start - time.perf_counter()
time.sleep((spf - diff + abs(spf - diff)) / 2)
os.system(self.clear_cmd)
except KeyboardInterrupt:
pass
def __iter__(self):
return self
def __next__(self):
if self.current_frame > self.end_frame:
raise StopIteration
self.current_frame += 1
return self.frames[self.current_frame - 1]
<|reserved_special_token_1|>
import typing
import time
import cv2
import os
from .ABC import ABC
from .Exceptions import *
from .Constants import *
class Video(ABC):
def __init__(self, filename: str, *, scale: float = 1, w_stretch: float = 2, gradient: typing.Union[int, str] = 0, verbose: int = False):
if not os.path.isfile(filename): # check to make sure file actually exists
raise FileNotFound(filename) # FileNotFound is from .Exceptions
self.filename = filename
self.video = cv2.VideoCapture(filename)
# self.frames is a frames[frame[row[char, char,..], row[],..], frame[],..]
self.frames = [] # converted frames (will be populated when convert() is called)
self.fps = self.video.get(cv2.CAP_PROP_FPS) # fps of the origin video
self.width = self.video.get(3) # float, width of the video
self.height = self.video.get(4) # float, height of the video
# if scale was given as a percentage (out of 100 rather than out of 1)
if scale > 1:
scale /= 100
self.scale = scale # scale which both dimensions are multiplied by
self.w_stretch = w_stretch # scale which the width dimension is multiplied by (to account for text which is taller than it is wide)
# scaled dimensions
self.scaled_width = int(self.width*self.scale*self.w_stretch)
self.scaled_height = int(self.height*self.scale)
# determine what the gradient / brightness to character mapping will be
if type(gradient) == int:
if 0 > gradient > (len(gradients) - 1):
raise IndexError(f'The gradient must either be a string or an integer between the value of 0 and {len(gradients)}.')
else:
self.gradient = gradients[gradient]
else:
self.gradient = gradient
self.gradient = tuple([c for c in self.gradient]) # turn self.gradient into a tuple
self.gradient_len = len(self.gradient)
self.verbose = verbose # whether or not to do extra logging of information
# for __iter__ to allow this to be used in a for loop to iterate through the frames
self.current_frame = 0
self.end_frame = None
# determine what the clear command will be when viewing the final asciified frames
if os.name == 'nt':
self.clear_cmd = 'cls'
else:
self.clear_cmd = 'clear'
if self.verbose:
print(f'Dimensions: {self.width}x{self.height}')
print(f'Scale Factor: {self.scale}')
print(f'Scaled Dims: {self.scaled_width}x{self.scaled_height}')
print(f'Gradient: \'{"".join(self.gradient)}\'')
print(f'FPS: {self.fps}')
def convert(self): # function which is called to populate the list of converted frames (self.frames)
if self.verbose: print('Converting...')
while True:
succ, img = self.video.read() # read frame from video
if not succ: break # if failed when reading
# resize image to the scale specified in __init__
img = cv2.resize(img, (self.scaled_width, self.scaled_height,))
self.frames.append(self.asciify_img(img)) # add the asciified image to the list of converted frames
self.end_frame = len(self.frames)
if self.verbose: print('Done.')
return self # returns self for fluent chaining
def view(self, *, fps: float=None): # function to view all the frames in the console like a video
if fps is None:
spf = 1/self.fps
else:
spf = 1/fps
try:
for frame in self.frames:
start = time.perf_counter()
print(frame)
diff = start - time.perf_counter()
time.sleep((spf - diff + abs(spf - diff)) / 2)
os.system(self.clear_cmd)
except KeyboardInterrupt:
pass
def __iter__(self): # allow iteration over the frames (like in a for loop)
return self
def __next__(self): # allow iteration over the frames (like in a for loop)
if self.current_frame > self.end_frame:
raise StopIteration
self.current_frame += 1
return self.frames[self.current_frame - 1]
|
flexible
|
{
"blob_id": "24368b6c607c0524f8b52b279a6dce0fde72294b",
"index": 8936,
"step-1": "<mask token>\n\n\nclass Video(ABC):\n\n def __init__(self, filename: str, *, scale: float=1, w_stretch: float=2,\n gradient: typing.Union[int, str]=0, verbose: int=False):\n if not os.path.isfile(filename):\n raise FileNotFound(filename)\n self.filename = filename\n self.video = cv2.VideoCapture(filename)\n self.frames = []\n self.fps = self.video.get(cv2.CAP_PROP_FPS)\n self.width = self.video.get(3)\n self.height = self.video.get(4)\n if scale > 1:\n scale /= 100\n self.scale = scale\n self.w_stretch = w_stretch\n self.scaled_width = int(self.width * self.scale * self.w_stretch)\n self.scaled_height = int(self.height * self.scale)\n if type(gradient) == int:\n if 0 > gradient > len(gradients) - 1:\n raise IndexError(\n f'The gradient must either be a string or an integer between the value of 0 and {len(gradients)}.'\n )\n else:\n self.gradient = gradients[gradient]\n else:\n self.gradient = gradient\n self.gradient = tuple([c for c in self.gradient])\n self.gradient_len = len(self.gradient)\n self.verbose = verbose\n self.current_frame = 0\n self.end_frame = None\n if os.name == 'nt':\n self.clear_cmd = 'cls'\n else:\n self.clear_cmd = 'clear'\n if self.verbose:\n print(f'Dimensions: {self.width}x{self.height}')\n print(f'Scale Factor: {self.scale}')\n print(f'Scaled Dims: {self.scaled_width}x{self.scaled_height}')\n print(f\"Gradient: '{''.join(self.gradient)}'\")\n print(f'FPS: {self.fps}')\n <mask token>\n\n def view(self, *, fps: float=None):\n if fps is None:\n spf = 1 / self.fps\n else:\n spf = 1 / fps\n try:\n for frame in self.frames:\n start = time.perf_counter()\n print(frame)\n diff = start - time.perf_counter()\n time.sleep((spf - diff + abs(spf - diff)) / 2)\n os.system(self.clear_cmd)\n except KeyboardInterrupt:\n pass\n\n def __iter__(self):\n return self\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Video(ABC):\n\n def __init__(self, filename: str, *, scale: float=1, w_stretch: float=2,\n gradient: typing.Union[int, str]=0, verbose: int=False):\n if not os.path.isfile(filename):\n raise FileNotFound(filename)\n self.filename = filename\n self.video = cv2.VideoCapture(filename)\n self.frames = []\n self.fps = self.video.get(cv2.CAP_PROP_FPS)\n self.width = self.video.get(3)\n self.height = self.video.get(4)\n if scale > 1:\n scale /= 100\n self.scale = scale\n self.w_stretch = w_stretch\n self.scaled_width = int(self.width * self.scale * self.w_stretch)\n self.scaled_height = int(self.height * self.scale)\n if type(gradient) == int:\n if 0 > gradient > len(gradients) - 1:\n raise IndexError(\n f'The gradient must either be a string or an integer between the value of 0 and {len(gradients)}.'\n )\n else:\n self.gradient = gradients[gradient]\n else:\n self.gradient = gradient\n self.gradient = tuple([c for c in self.gradient])\n self.gradient_len = len(self.gradient)\n self.verbose = verbose\n self.current_frame = 0\n self.end_frame = None\n if os.name == 'nt':\n self.clear_cmd = 'cls'\n else:\n self.clear_cmd = 'clear'\n if self.verbose:\n print(f'Dimensions: {self.width}x{self.height}')\n print(f'Scale Factor: {self.scale}')\n print(f'Scaled Dims: {self.scaled_width}x{self.scaled_height}')\n print(f\"Gradient: '{''.join(self.gradient)}'\")\n print(f'FPS: {self.fps}')\n <mask token>\n\n def view(self, *, fps: float=None):\n if fps is None:\n spf = 1 / self.fps\n else:\n spf = 1 / fps\n try:\n for frame in self.frames:\n start = time.perf_counter()\n print(frame)\n diff = start - time.perf_counter()\n time.sleep((spf - diff + abs(spf - diff)) / 2)\n os.system(self.clear_cmd)\n except KeyboardInterrupt:\n pass\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.current_frame > self.end_frame:\n raise StopIteration\n self.current_frame += 1\n return self.frames[self.current_frame - 1]\n",
"step-3": "<mask token>\n\n\nclass Video(ABC):\n\n def __init__(self, filename: str, *, scale: float=1, w_stretch: float=2,\n gradient: typing.Union[int, str]=0, verbose: int=False):\n if not os.path.isfile(filename):\n raise FileNotFound(filename)\n self.filename = filename\n self.video = cv2.VideoCapture(filename)\n self.frames = []\n self.fps = self.video.get(cv2.CAP_PROP_FPS)\n self.width = self.video.get(3)\n self.height = self.video.get(4)\n if scale > 1:\n scale /= 100\n self.scale = scale\n self.w_stretch = w_stretch\n self.scaled_width = int(self.width * self.scale * self.w_stretch)\n self.scaled_height = int(self.height * self.scale)\n if type(gradient) == int:\n if 0 > gradient > len(gradients) - 1:\n raise IndexError(\n f'The gradient must either be a string or an integer between the value of 0 and {len(gradients)}.'\n )\n else:\n self.gradient = gradients[gradient]\n else:\n self.gradient = gradient\n self.gradient = tuple([c for c in self.gradient])\n self.gradient_len = len(self.gradient)\n self.verbose = verbose\n self.current_frame = 0\n self.end_frame = None\n if os.name == 'nt':\n self.clear_cmd = 'cls'\n else:\n self.clear_cmd = 'clear'\n if self.verbose:\n print(f'Dimensions: {self.width}x{self.height}')\n print(f'Scale Factor: {self.scale}')\n print(f'Scaled Dims: {self.scaled_width}x{self.scaled_height}')\n print(f\"Gradient: '{''.join(self.gradient)}'\")\n print(f'FPS: {self.fps}')\n\n def convert(self):\n if self.verbose:\n print('Converting...')\n while True:\n succ, img = self.video.read()\n if not succ:\n break\n img = cv2.resize(img, (self.scaled_width, self.scaled_height))\n self.frames.append(self.asciify_img(img))\n self.end_frame = len(self.frames)\n if self.verbose:\n print('Done.')\n return self\n\n def view(self, *, fps: float=None):\n if fps is None:\n spf = 1 / self.fps\n else:\n spf = 1 / fps\n try:\n for frame in self.frames:\n start = time.perf_counter()\n print(frame)\n diff = start - time.perf_counter()\n time.sleep((spf - diff + abs(spf - diff)) / 2)\n os.system(self.clear_cmd)\n except KeyboardInterrupt:\n pass\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.current_frame > self.end_frame:\n raise StopIteration\n self.current_frame += 1\n return self.frames[self.current_frame - 1]\n",
"step-4": "import typing\nimport time\nimport cv2\nimport os\nfrom .ABC import ABC\nfrom .Exceptions import *\nfrom .Constants import *\n\n\nclass Video(ABC):\n\n def __init__(self, filename: str, *, scale: float=1, w_stretch: float=2,\n gradient: typing.Union[int, str]=0, verbose: int=False):\n if not os.path.isfile(filename):\n raise FileNotFound(filename)\n self.filename = filename\n self.video = cv2.VideoCapture(filename)\n self.frames = []\n self.fps = self.video.get(cv2.CAP_PROP_FPS)\n self.width = self.video.get(3)\n self.height = self.video.get(4)\n if scale > 1:\n scale /= 100\n self.scale = scale\n self.w_stretch = w_stretch\n self.scaled_width = int(self.width * self.scale * self.w_stretch)\n self.scaled_height = int(self.height * self.scale)\n if type(gradient) == int:\n if 0 > gradient > len(gradients) - 1:\n raise IndexError(\n f'The gradient must either be a string or an integer between the value of 0 and {len(gradients)}.'\n )\n else:\n self.gradient = gradients[gradient]\n else:\n self.gradient = gradient\n self.gradient = tuple([c for c in self.gradient])\n self.gradient_len = len(self.gradient)\n self.verbose = verbose\n self.current_frame = 0\n self.end_frame = None\n if os.name == 'nt':\n self.clear_cmd = 'cls'\n else:\n self.clear_cmd = 'clear'\n if self.verbose:\n print(f'Dimensions: {self.width}x{self.height}')\n print(f'Scale Factor: {self.scale}')\n print(f'Scaled Dims: {self.scaled_width}x{self.scaled_height}')\n print(f\"Gradient: '{''.join(self.gradient)}'\")\n print(f'FPS: {self.fps}')\n\n def convert(self):\n if self.verbose:\n print('Converting...')\n while True:\n succ, img = self.video.read()\n if not succ:\n break\n img = cv2.resize(img, (self.scaled_width, self.scaled_height))\n self.frames.append(self.asciify_img(img))\n self.end_frame = len(self.frames)\n if self.verbose:\n print('Done.')\n return self\n\n def view(self, *, fps: float=None):\n if fps is None:\n spf = 1 / self.fps\n else:\n spf = 1 / fps\n try:\n for frame in self.frames:\n start = time.perf_counter()\n print(frame)\n diff = start - time.perf_counter()\n time.sleep((spf - diff + abs(spf - diff)) / 2)\n os.system(self.clear_cmd)\n except KeyboardInterrupt:\n pass\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.current_frame > self.end_frame:\n raise StopIteration\n self.current_frame += 1\n return self.frames[self.current_frame - 1]\n",
"step-5": "import typing\nimport time\nimport cv2\nimport os\n\nfrom .ABC import ABC\nfrom .Exceptions import *\nfrom .Constants import *\n\n\nclass Video(ABC):\n def __init__(self, filename: str, *, scale: float = 1, w_stretch: float = 2, gradient: typing.Union[int, str] = 0, verbose: int = False):\n if not os.path.isfile(filename): # check to make sure file actually exists\n raise FileNotFound(filename) # FileNotFound is from .Exceptions\n\n self.filename = filename\n self.video = cv2.VideoCapture(filename)\n\n # self.frames is a frames[frame[row[char, char,..], row[],..], frame[],..]\n self.frames = [] # converted frames (will be populated when convert() is called)\n\n self.fps = self.video.get(cv2.CAP_PROP_FPS) # fps of the origin video\n\n self.width = self.video.get(3) # float, width of the video\n self.height = self.video.get(4) # float, height of the video\n\n # if scale was given as a percentage (out of 100 rather than out of 1)\n if scale > 1:\n scale /= 100\n\n self.scale = scale # scale which both dimensions are multiplied by\n self.w_stretch = w_stretch # scale which the width dimension is multiplied by (to account for text which is taller than it is wide)\n\n # scaled dimensions\n self.scaled_width = int(self.width*self.scale*self.w_stretch)\n self.scaled_height = int(self.height*self.scale)\n\n # determine what the gradient / brightness to character mapping will be\n if type(gradient) == int:\n if 0 > gradient > (len(gradients) - 1):\n raise IndexError(f'The gradient must either be a string or an integer between the value of 0 and {len(gradients)}.')\n else:\n self.gradient = gradients[gradient]\n else:\n self.gradient = gradient\n\n self.gradient = tuple([c for c in self.gradient]) # turn self.gradient into a tuple\n self.gradient_len = len(self.gradient)\n\n self.verbose = verbose # whether or not to do extra logging of information\n\n # for __iter__ to allow this to be used in a for loop to iterate through the frames\n self.current_frame = 0\n self.end_frame = None\n\n # determine what the clear command will be when viewing the final asciified frames\n if os.name == 'nt':\n self.clear_cmd = 'cls'\n else:\n self.clear_cmd = 'clear'\n\n if self.verbose:\n print(f'Dimensions: {self.width}x{self.height}')\n print(f'Scale Factor: {self.scale}')\n print(f'Scaled Dims: {self.scaled_width}x{self.scaled_height}')\n print(f'Gradient: \\'{\"\".join(self.gradient)}\\'')\n print(f'FPS: {self.fps}')\n\n def convert(self): # function which is called to populate the list of converted frames (self.frames)\n if self.verbose: print('Converting...')\n\n while True:\n succ, img = self.video.read() # read frame from video\n\n if not succ: break # if failed when reading\n\n # resize image to the scale specified in __init__\n img = cv2.resize(img, (self.scaled_width, self.scaled_height,))\n\n self.frames.append(self.asciify_img(img)) # add the asciified image to the list of converted frames\n\n self.end_frame = len(self.frames)\n\n if self.verbose: print('Done.')\n\n return self # returns self for fluent chaining\n\n def view(self, *, fps: float=None): # function to view all the frames in the console like a video\n if fps is None:\n spf = 1/self.fps\n else:\n spf = 1/fps\n\n try:\n for frame in self.frames:\n start = time.perf_counter()\n print(frame)\n diff = start - time.perf_counter()\n time.sleep((spf - diff + abs(spf - diff)) / 2)\n os.system(self.clear_cmd)\n except KeyboardInterrupt:\n pass\n\n def __iter__(self): # allow iteration over the frames (like in a for loop)\n return self\n\n def __next__(self): # allow iteration over the frames (like in a for loop)\n if self.current_frame > self.end_frame:\n raise StopIteration\n\n self.current_frame += 1\n return self.frames[self.current_frame - 1]\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
"""
Problem Link: https://practice.geeksforgeeks.org/problems/palindrome/0
Given an integer, check whether it is a palindrome or not.
Input:
The first line of input contains an integer T denoting the number of test cases.
For each test case there will be single line containing single integer N.
Output:
Print "Yes" or "No" (without quotes) depending on whether the number is palindrome or not.
Constraints:
1 <= T <= 1000
1 <= N <= 10000
Example:
Input:
3
6
167
55555
Output:
Yes
No
Yes
"""
for _ in range(int(input())):
n = int(input())
temp = n
rev = 0
while temp:
rev = (rev*10)+(temp%10)
temp //= 10
print("Yes" if rev == n else "No")
|
normal
|
{
"blob_id": "ea12ede51881f6e826a044df5d7aba457c434658",
"index": 6050,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor _ in range(int(input())):\n n = int(input())\n temp = n\n rev = 0\n while temp:\n rev = rev * 10 + temp % 10\n temp //= 10\n print('Yes' if rev == n else 'No')\n",
"step-3": "\"\"\"\nProblem Link: https://practice.geeksforgeeks.org/problems/palindrome/0\n\nGiven an integer, check whether it is a palindrome or not.\n\nInput:\nThe first line of input contains an integer T denoting the number of test cases. \nFor each test case there will be single line containing single integer N.\n\nOutput:\nPrint \"Yes\" or \"No\" (without quotes) depending on whether the number is palindrome or not.\n\nConstraints:\n1 <= T <= 1000\n1 <= N <= 10000\n\nExample:\nInput:\n3\n6\n167\n55555\n\nOutput:\nYes\nNo\nYes\n\"\"\"\nfor _ in range(int(input())):\n n = int(input())\n temp = n\n rev = 0\n while temp:\n rev = (rev*10)+(temp%10)\n temp //= 10\n print(\"Yes\" if rev == n else \"No\")",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
"""
This handy script will download all wallpapears from simpledesktops.com
Requirements
============
BeautifulSoup - http://www.crummy.com/software/BeautifulSoup/
Python-Requests - http://docs.python-requests.org/en/latest/index.html
Usage
=====
cd /path/to/the/script/
python simpledesktops.py
"""
from StringIO import StringIO
from bs4 import BeautifulSoup
import requests
import os
try:
os.mkdir('walls')
except OSError:
pass
page = 1
while True:
page_request = requests.get('http://simpledesktops.com/browse/%s/' % page)
if page_request.status_code != 200:
print 'page %s does not exist' % page
break
html = BeautifulSoup(page_request.text)
images = html.findAll('img')
for image in images:
img_src = image['src']
if 'static.simpledesktops.com/desktops/' in img_src:
full_size_img = img_src.replace('.295x184_q100.png', '')
img_name = full_size_img.split('/')[-1]
img_request = requests.get(full_size_img)
img_buffer = StringIO(img_request.content)
img_file = open('walls/%s' % img_name, 'wb')
img_file.write(img_buffer.getvalue())
img_file.close()
print '%s downloaded' % img_name
print '\n================'
print 'page %s finished' % page
print '================\n'
page += 1
|
normal
|
{
"blob_id": "452d5d98b6c0b82a1f4ec18f29d9710a8c0f4dc9",
"index": 7371,
"step-1": "\"\"\"\nThis handy script will download all wallpapears from simpledesktops.com\n\nRequirements\n============\nBeautifulSoup - http://www.crummy.com/software/BeautifulSoup/\nPython-Requests - http://docs.python-requests.org/en/latest/index.html\n\nUsage\n=====\ncd /path/to/the/script/\npython simpledesktops.py\n\"\"\"\n\nfrom StringIO import StringIO\nfrom bs4 import BeautifulSoup\nimport requests\nimport os\n\ntry:\n\tos.mkdir('walls')\nexcept OSError:\n\tpass\n\npage = 1\nwhile True:\n\tpage_request = requests.get('http://simpledesktops.com/browse/%s/' % page)\n\tif page_request.status_code != 200:\n\t\tprint 'page %s does not exist' % page\n\t\tbreak\n\thtml = BeautifulSoup(page_request.text)\n\timages = html.findAll('img')\n\tfor image in images:\n\t\timg_src = image['src']\n\t\tif 'static.simpledesktops.com/desktops/' in img_src:\n\t\t\tfull_size_img = img_src.replace('.295x184_q100.png', '')\n\t\t\timg_name = full_size_img.split('/')[-1]\n\t\t\timg_request = requests.get(full_size_img)\n\t\t\timg_buffer = StringIO(img_request.content)\n\t\t\t\n\t\t\timg_file = open('walls/%s' % img_name, 'wb')\n\t\t\timg_file.write(img_buffer.getvalue())\n\t\t\timg_file.close()\n\t\t\tprint '%s downloaded' % img_name\n\tprint '\\n================'\n\tprint 'page %s finished' % page\n\tprint '================\\n'\n\tpage += 1",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for _ in range(N):
n = int(input())
arr = collections.defaultdict(list)
parent = [i for i in range(n + 1)]
for i in range(n - 1):
a, b = map(int, input().split())
arr[a].append(b)
parent[b] = a
node_1, node_2 = map(int, input().split())
p = [i for i, e in enumerate(parent) if i > 0 and i == e]
def bfs(p, goal):
queue = collections.deque()
queue.append([p, [p]])
discoverd = [False] * (n + 1)
while queue:
m, r = queue.popleft()
if not discoverd[m]:
discoverd[m] = True
if m == goal:
return r
for i in arr[m]:
queue.append([i, r + [i]])
for i in p:
a = bfs(i, node_1)
b = bfs(i, node_2)
result = 0
for aa, bb in zip(a, b):
if aa == bb:
result = aa
print(result)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
input = sys.stdin.readline
N = int(input())
for _ in range(N):
n = int(input())
arr = collections.defaultdict(list)
parent = [i for i in range(n + 1)]
for i in range(n - 1):
a, b = map(int, input().split())
arr[a].append(b)
parent[b] = a
node_1, node_2 = map(int, input().split())
p = [i for i, e in enumerate(parent) if i > 0 and i == e]
def bfs(p, goal):
queue = collections.deque()
queue.append([p, [p]])
discoverd = [False] * (n + 1)
while queue:
m, r = queue.popleft()
if not discoverd[m]:
discoverd[m] = True
if m == goal:
return r
for i in arr[m]:
queue.append([i, r + [i]])
for i in p:
a = bfs(i, node_1)
b = bfs(i, node_2)
result = 0
for aa, bb in zip(a, b):
if aa == bb:
result = aa
print(result)
<|reserved_special_token_1|>
import sys, collections
input = sys.stdin.readline
N = int(input())
for _ in range(N):
n = int(input())
arr = collections.defaultdict(list)
parent = [i for i in range(n + 1)]
for i in range(n - 1):
a, b = map(int, input().split())
arr[a].append(b)
parent[b] = a
node_1, node_2 = map(int, input().split())
p = [i for i, e in enumerate(parent) if i > 0 and i == e]
def bfs(p, goal):
queue = collections.deque()
queue.append([p, [p]])
discoverd = [False] * (n + 1)
while queue:
m, r = queue.popleft()
if not discoverd[m]:
discoverd[m] = True
if m == goal:
return r
for i in arr[m]:
queue.append([i, r + [i]])
for i in p:
a = bfs(i, node_1)
b = bfs(i, node_2)
result = 0
for aa, bb in zip(a, b):
if aa == bb:
result = aa
print(result)
<|reserved_special_token_1|>
# https://www.acmicpc.net/problem/3584
import sys, collections
input = sys.stdin.readline
N = int(input())
for _ in range(N):
n = int(input())
arr = collections.defaultdict(list)
parent = [i for i in range(n + 1)]
for i in range(n - 1):
a, b = map(int, input().split())
arr[a].append(b)
parent[b] = a
node_1, node_2 = map(int, input().split())
p = [i for i, e in enumerate(parent) if i > 0 and i == e]
def bfs(p, goal):
queue = collections.deque()
queue.append([p, [p]])
discoverd = [False] * (n + 1)
while queue:
m, r = queue.popleft()
if not discoverd[m]:
discoverd[m] = True
if m == goal:
return r
for i in arr[m]:
queue.append([i, r + [i]])
for i in p:
a = bfs(i, node_1)
b = bfs(i, node_2)
result = 0
for aa, bb in zip(a,b):
if aa==bb:
result = aa
print(result)
|
flexible
|
{
"blob_id": "d60a2d4c819f701e8e439b8839415aa2838df185",
"index": 6415,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor _ in range(N):\n n = int(input())\n arr = collections.defaultdict(list)\n parent = [i for i in range(n + 1)]\n for i in range(n - 1):\n a, b = map(int, input().split())\n arr[a].append(b)\n parent[b] = a\n node_1, node_2 = map(int, input().split())\n p = [i for i, e in enumerate(parent) if i > 0 and i == e]\n\n def bfs(p, goal):\n queue = collections.deque()\n queue.append([p, [p]])\n discoverd = [False] * (n + 1)\n while queue:\n m, r = queue.popleft()\n if not discoverd[m]:\n discoverd[m] = True\n if m == goal:\n return r\n for i in arr[m]:\n queue.append([i, r + [i]])\n for i in p:\n a = bfs(i, node_1)\n b = bfs(i, node_2)\n result = 0\n for aa, bb in zip(a, b):\n if aa == bb:\n result = aa\n print(result)\n",
"step-3": "<mask token>\ninput = sys.stdin.readline\nN = int(input())\nfor _ in range(N):\n n = int(input())\n arr = collections.defaultdict(list)\n parent = [i for i in range(n + 1)]\n for i in range(n - 1):\n a, b = map(int, input().split())\n arr[a].append(b)\n parent[b] = a\n node_1, node_2 = map(int, input().split())\n p = [i for i, e in enumerate(parent) if i > 0 and i == e]\n\n def bfs(p, goal):\n queue = collections.deque()\n queue.append([p, [p]])\n discoverd = [False] * (n + 1)\n while queue:\n m, r = queue.popleft()\n if not discoverd[m]:\n discoverd[m] = True\n if m == goal:\n return r\n for i in arr[m]:\n queue.append([i, r + [i]])\n for i in p:\n a = bfs(i, node_1)\n b = bfs(i, node_2)\n result = 0\n for aa, bb in zip(a, b):\n if aa == bb:\n result = aa\n print(result)\n",
"step-4": "import sys, collections\ninput = sys.stdin.readline\nN = int(input())\nfor _ in range(N):\n n = int(input())\n arr = collections.defaultdict(list)\n parent = [i for i in range(n + 1)]\n for i in range(n - 1):\n a, b = map(int, input().split())\n arr[a].append(b)\n parent[b] = a\n node_1, node_2 = map(int, input().split())\n p = [i for i, e in enumerate(parent) if i > 0 and i == e]\n\n def bfs(p, goal):\n queue = collections.deque()\n queue.append([p, [p]])\n discoverd = [False] * (n + 1)\n while queue:\n m, r = queue.popleft()\n if not discoverd[m]:\n discoverd[m] = True\n if m == goal:\n return r\n for i in arr[m]:\n queue.append([i, r + [i]])\n for i in p:\n a = bfs(i, node_1)\n b = bfs(i, node_2)\n result = 0\n for aa, bb in zip(a, b):\n if aa == bb:\n result = aa\n print(result)\n",
"step-5": "# https://www.acmicpc.net/problem/3584\nimport sys, collections\ninput = sys.stdin.readline\nN = int(input())\nfor _ in range(N):\n n = int(input())\n arr = collections.defaultdict(list)\n parent = [i for i in range(n + 1)]\n for i in range(n - 1):\n a, b = map(int, input().split())\n arr[a].append(b)\n parent[b] = a\n node_1, node_2 = map(int, input().split())\n p = [i for i, e in enumerate(parent) if i > 0 and i == e]\n\n\n def bfs(p, goal):\n queue = collections.deque()\n queue.append([p, [p]])\n discoverd = [False] * (n + 1)\n while queue:\n m, r = queue.popleft()\n if not discoverd[m]:\n discoverd[m] = True\n if m == goal:\n return r\n for i in arr[m]:\n queue.append([i, r + [i]])\n\n for i in p:\n a = bfs(i, node_1)\n b = bfs(i, node_2)\n result = 0\n for aa, bb in zip(a,b):\n if aa==bb:\n result = aa\n print(result)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Logins(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Logins(models.Model):
created = models.DateTimeField(auto_now_add=True)
login_addr = models.GenericIPAddressField()
hostname = models.CharField(max_length=200)
<|reserved_special_token_1|>
from django.db import models
class Logins(models.Model):
created = models.DateTimeField(auto_now_add=True)
login_addr = models.GenericIPAddressField()
hostname = models.CharField(max_length=200)
<|reserved_special_token_1|>
from django.db import models
# Create your models here.
class Logins(models.Model):
created = models.DateTimeField(auto_now_add=True)
login_addr = models.GenericIPAddressField()
hostname = models.CharField(max_length=200)
|
flexible
|
{
"blob_id": "9a55ccf758b4b2cc440153ab3b1f97823863a848",
"index": 165,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Logins(models.Model):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Logins(models.Model):\n created = models.DateTimeField(auto_now_add=True)\n login_addr = models.GenericIPAddressField()\n hostname = models.CharField(max_length=200)\n",
"step-4": "from django.db import models\n\n\nclass Logins(models.Model):\n created = models.DateTimeField(auto_now_add=True)\n login_addr = models.GenericIPAddressField()\n hostname = models.CharField(max_length=200)\n",
"step-5": "from django.db import models\n\n# Create your models here.\nclass Logins(models.Model):\n created = models.DateTimeField(auto_now_add=True)\n login_addr = models.GenericIPAddressField()\n hostname = models.CharField(max_length=200)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#! /usr/bin/env python
import sys
import socket
def handle_connection(sock):
do_close = False
while 1:
try:
data = sock.recv(4096)
if not data: # closed! stop monitoring this socket.
do_close = True
break
print 'data:', (data,)
sock.sendall(data)
if '.\r\n' in data:
sock.close()
do_close = True # stop monitoring this socket.
break
except socket.error:
print 'no data waiting...'
break
return do_close
if __name__ == '__main__':
interface, port = sys.argv[1:3]
port = int(port)
print 'binding', interface, port
sock = socket.socket()
sock.bind( (interface, port) )
sock.listen(5)
sock.setblocking(0)
connections = []
while 1:
# loop, doing two things:
# first, get a new connection
# second, process (receive/send) data for each existing connection
# first, do we have a new connection waiting?
try:
print 'testing for new connection'
(client_sock, client_address) = sock.accept()
# if this succeeds, we got a new connection... no new connection
# raises a 'socket.error'
print 'got connection', client_address
client_sock.setblocking(0)
connections.append((client_sock, client_address))
except socket.error: # no new connection! do nothing.
pass
# now, process data for existing connections.
open_connections = []
for (client_sock, client_address) in connections:
print 'processing data for', client_address
do_close = handle_connection(client_sock)
if not do_close:
open_connections.append((client_sock, client_address))
connections = open_connections
|
normal
|
{
"blob_id": "fde4c10e2ed0ed38d683a220e2985c3f3f336601",
"index": 7258,
"step-1": "#! /usr/bin/env python\nimport sys\nimport socket\n\ndef handle_connection(sock):\n do_close = False\n \n while 1:\n try:\n data = sock.recv(4096)\n if not data: # closed! stop monitoring this socket.\n do_close = True\n break\n\n print 'data:', (data,)\n\n sock.sendall(data)\n\n if '.\\r\\n' in data:\n sock.close()\n do_close = True # stop monitoring this socket.\n break\n except socket.error:\n print 'no data waiting...'\n break\n\n return do_close\n\nif __name__ == '__main__':\n interface, port = sys.argv[1:3]\n port = int(port)\n\n print 'binding', interface, port\n sock = socket.socket()\n sock.bind( (interface, port) )\n sock.listen(5)\n\n sock.setblocking(0)\n\n connections = []\n while 1:\n\n # loop, doing two things:\n # first, get a new connection\n # second, process (receive/send) data for each existing connection\n\n # first, do we have a new connection waiting?\n try:\n print 'testing for new connection'\n (client_sock, client_address) = sock.accept()\n\n # if this succeeds, we got a new connection... no new connection\n # raises a 'socket.error'\n print 'got connection', client_address\n client_sock.setblocking(0)\n connections.append((client_sock, client_address))\n except socket.error: # no new connection! do nothing.\n pass\n\n # now, process data for existing connections.\n open_connections = []\n for (client_sock, client_address) in connections:\n print 'processing data for', client_address\n do_close = handle_connection(client_sock)\n\n if not do_close:\n open_connections.append((client_sock, client_address))\n\n connections = open_connections\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class BaseSaver:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class AttachmentsSaver(BaseSaver):
"""Document saver context handling attachments."""
def prepare(self):
self._delete_attachments = set()
self._add_attachments = []
def wrapup(self):
"""Delete any specified attachments.
Store the input files as attachments.
Must be done after document is saved.
"""
for filename in self._delete_attachments:
rev = flask.g.db.delete_attachment(self.doc, filename)
self.doc['_rev'] = rev
for attachment in self._add_attachments:
flask.g.db.put_attachment(self.doc, attachment['content'],
filename=attachment['filename'], content_type=attachment[
'mimetype'])
def add_attachment(self, filename, content, mimetype):
self._add_attachments.append({'filename': filename, 'content':
content, 'mimetype': mimetype})
def delete_attachment(self, filename):
self._delete_attachments.add(filename)
def modify_log_items(self, entry):
"""Modify the log entry to add info about attachment changes."""
if self._delete_attachments:
entry['attachments_deleted'] = self._delete_attachments
if self._add_attachments:
for att in self._add_attachments:
att['size'] = len(att.pop('content'))
entry['attachments_added'] = self._add_attachments
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseSaver:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __enter__(self):
return self
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __setitem__(self, key, value):
self.doc[key] = value
def initialize(self):
"""Initialize the new document."""
pass
<|reserved_special_token_0|>
def finish(self):
"""Final changes and checks on the document before storing it."""
pass
def wrapup(self):
"""Wrap up the save operation by performing actions that
must be done after the document has been stored.
"""
pass
def add_log(self):
"""Add a log entry recording the the difference betweens the current and
the original document, hiding values of specified keys.
'added': list of keys for items added in the current.
'updated': dictionary of items updated; original values.
'removed': dictionary of items removed; original values.
"""
self.stack = []
diff = self.diff(self.original, self.doc)
entry = {'_id': utils.get_iuid(), 'doctype': constants.DOCTYPE_LOG,
'docid': self.doc['_id'], 'diff': diff, 'timestamp': utils.
get_time()}
self.modify_log_entry(entry)
if hasattr(flask.g, 'current_user') and flask.g.current_user:
entry['username'] = flask.g.current_user['username']
else:
entry['username'] = None
if flask.has_request_context():
entry['remote_addr'] = str(flask.request.remote_addr)
entry['user_agent'] = str(flask.request.user_agent)
else:
entry['remote_addr'] = None
entry['user_agent'] = os.path.basename(sys.argv[0])
flask.g.db.put(entry)
def diff(self, old, new):
"""Find the differences between the old and the new documents.
Uses a fairly simple algorithm which is OK for shallow hierarchies.
"""
added = {}
removed = {}
updated = {}
new_keys = set(new.keys())
old_keys = set(old.keys())
for key in new_keys.difference(old_keys):
self.stack.append(key)
if self.stack not in self.EXCLUDE_PATHS:
if self.stack in self.HIDDEN_VALUE_PATHS:
added[key] = '<hidden>'
else:
added[key] = new[key]
self.stack.pop()
for key in old_keys.difference(new_keys):
self.stack.append(key)
if self.stack not in self.EXCLUDE_PATHS:
if self.stack in self.HIDDEN_VALUE_PATHS:
removed[key] = '<hidden>'
else:
removed[key] = old[key]
self.stack.pop()
for key in new_keys.intersection(old_keys):
self.stack.append(key)
if self.stack not in self.EXCLUDE_PATHS:
new_value = new[key]
old_value = old[key]
if isinstance(new_value, dict) and isinstance(old_value, dict):
changes = self.diff(old_value, new_value)
if changes:
if self.stack in self.HIDDEN_VALUE_PATHS:
updated[key] = '<hidden>'
else:
updated[key] = changes
elif new_value != old_value:
if self.stack in self.HIDDEN_VALUE_PATHS:
updated[key] = dict(new_value='<hidden>', old_value
='<hidden>')
else:
updated[key] = dict(new_value=new_value, old_value=
old_value)
self.stack.pop()
result = {}
if added:
result['added'] = added
if removed:
result['removed'] = removed
if updated:
result['updated'] = updated
return result
<|reserved_special_token_0|>
class AttachmentsSaver(BaseSaver):
"""Document saver context handling attachments."""
def prepare(self):
self._delete_attachments = set()
self._add_attachments = []
def wrapup(self):
"""Delete any specified attachments.
Store the input files as attachments.
Must be done after document is saved.
"""
for filename in self._delete_attachments:
rev = flask.g.db.delete_attachment(self.doc, filename)
self.doc['_rev'] = rev
for attachment in self._add_attachments:
flask.g.db.put_attachment(self.doc, attachment['content'],
filename=attachment['filename'], content_type=attachment[
'mimetype'])
def add_attachment(self, filename, content, mimetype):
self._add_attachments.append({'filename': filename, 'content':
content, 'mimetype': mimetype})
def delete_attachment(self, filename):
self._delete_attachments.add(filename)
def modify_log_items(self, entry):
"""Modify the log entry to add info about attachment changes."""
if self._delete_attachments:
entry['attachments_deleted'] = self._delete_attachments
if self._add_attachments:
for att in self._add_attachments:
att['size'] = len(att.pop('content'))
entry['attachments_added'] = self._add_attachments
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseSaver:
<|reserved_special_token_0|>
DOCTYPE = None
EXCLUDE_PATHS = [['_id'], ['_rev'], ['doctype'], ['modified']]
HIDDEN_VALUE_PATHS = []
def __init__(self, doc=None):
if doc is None:
self.original = {}
self.doc = {'_id': utils.get_iuid(), 'created': utils.get_time()}
self.initialize()
else:
self.original = copy.deepcopy(doc)
self.doc = doc
self.prepare()
def __enter__(self):
return self
def __exit__(self, etyp, einst, etb):
if etyp is not None:
return False
self.finish()
self.doc['doctype'] = self.DOCTYPE
self.doc['modified'] = utils.get_time()
flask.g.db.put(self.doc)
self.add_log()
def __getitem__(self, key):
return self.doc[key]
def __setitem__(self, key, value):
self.doc[key] = value
def initialize(self):
"""Initialize the new document."""
pass
def prepare(self):
"""Preparations before making any changes."""
pass
def finish(self):
"""Final changes and checks on the document before storing it."""
pass
def wrapup(self):
"""Wrap up the save operation by performing actions that
must be done after the document has been stored.
"""
pass
def add_log(self):
"""Add a log entry recording the the difference betweens the current and
the original document, hiding values of specified keys.
'added': list of keys for items added in the current.
'updated': dictionary of items updated; original values.
'removed': dictionary of items removed; original values.
"""
self.stack = []
diff = self.diff(self.original, self.doc)
entry = {'_id': utils.get_iuid(), 'doctype': constants.DOCTYPE_LOG,
'docid': self.doc['_id'], 'diff': diff, 'timestamp': utils.
get_time()}
self.modify_log_entry(entry)
if hasattr(flask.g, 'current_user') and flask.g.current_user:
entry['username'] = flask.g.current_user['username']
else:
entry['username'] = None
if flask.has_request_context():
entry['remote_addr'] = str(flask.request.remote_addr)
entry['user_agent'] = str(flask.request.user_agent)
else:
entry['remote_addr'] = None
entry['user_agent'] = os.path.basename(sys.argv[0])
flask.g.db.put(entry)
def diff(self, old, new):
"""Find the differences between the old and the new documents.
Uses a fairly simple algorithm which is OK for shallow hierarchies.
"""
added = {}
removed = {}
updated = {}
new_keys = set(new.keys())
old_keys = set(old.keys())
for key in new_keys.difference(old_keys):
self.stack.append(key)
if self.stack not in self.EXCLUDE_PATHS:
if self.stack in self.HIDDEN_VALUE_PATHS:
added[key] = '<hidden>'
else:
added[key] = new[key]
self.stack.pop()
for key in old_keys.difference(new_keys):
self.stack.append(key)
if self.stack not in self.EXCLUDE_PATHS:
if self.stack in self.HIDDEN_VALUE_PATHS:
removed[key] = '<hidden>'
else:
removed[key] = old[key]
self.stack.pop()
for key in new_keys.intersection(old_keys):
self.stack.append(key)
if self.stack not in self.EXCLUDE_PATHS:
new_value = new[key]
old_value = old[key]
if isinstance(new_value, dict) and isinstance(old_value, dict):
changes = self.diff(old_value, new_value)
if changes:
if self.stack in self.HIDDEN_VALUE_PATHS:
updated[key] = '<hidden>'
else:
updated[key] = changes
elif new_value != old_value:
if self.stack in self.HIDDEN_VALUE_PATHS:
updated[key] = dict(new_value='<hidden>', old_value
='<hidden>')
else:
updated[key] = dict(new_value=new_value, old_value=
old_value)
self.stack.pop()
result = {}
if added:
result['added'] = added
if removed:
result['removed'] = removed
if updated:
result['updated'] = updated
return result
def modify_log_entry(self, entry):
"""Modify the log entry, if required."""
pass
class AttachmentsSaver(BaseSaver):
"""Document saver context handling attachments."""
def prepare(self):
self._delete_attachments = set()
self._add_attachments = []
def wrapup(self):
"""Delete any specified attachments.
Store the input files as attachments.
Must be done after document is saved.
"""
for filename in self._delete_attachments:
rev = flask.g.db.delete_attachment(self.doc, filename)
self.doc['_rev'] = rev
for attachment in self._add_attachments:
flask.g.db.put_attachment(self.doc, attachment['content'],
filename=attachment['filename'], content_type=attachment[
'mimetype'])
def add_attachment(self, filename, content, mimetype):
self._add_attachments.append({'filename': filename, 'content':
content, 'mimetype': mimetype})
def delete_attachment(self, filename):
self._delete_attachments.add(filename)
def modify_log_items(self, entry):
"""Modify the log entry to add info about attachment changes."""
if self._delete_attachments:
entry['attachments_deleted'] = self._delete_attachments
if self._add_attachments:
for att in self._add_attachments:
att['size'] = len(att.pop('content'))
entry['attachments_added'] = self._add_attachments
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseSaver:
"""Base document saver context."""
DOCTYPE = None
EXCLUDE_PATHS = [['_id'], ['_rev'], ['doctype'], ['modified']]
HIDDEN_VALUE_PATHS = []
def __init__(self, doc=None):
if doc is None:
self.original = {}
self.doc = {'_id': utils.get_iuid(), 'created': utils.get_time()}
self.initialize()
else:
self.original = copy.deepcopy(doc)
self.doc = doc
self.prepare()
def __enter__(self):
return self
def __exit__(self, etyp, einst, etb):
if etyp is not None:
return False
self.finish()
self.doc['doctype'] = self.DOCTYPE
self.doc['modified'] = utils.get_time()
flask.g.db.put(self.doc)
self.add_log()
def __getitem__(self, key):
return self.doc[key]
def __setitem__(self, key, value):
self.doc[key] = value
def initialize(self):
"""Initialize the new document."""
pass
def prepare(self):
"""Preparations before making any changes."""
pass
def finish(self):
"""Final changes and checks on the document before storing it."""
pass
def wrapup(self):
"""Wrap up the save operation by performing actions that
must be done after the document has been stored.
"""
pass
def add_log(self):
"""Add a log entry recording the the difference betweens the current and
the original document, hiding values of specified keys.
'added': list of keys for items added in the current.
'updated': dictionary of items updated; original values.
'removed': dictionary of items removed; original values.
"""
self.stack = []
diff = self.diff(self.original, self.doc)
entry = {'_id': utils.get_iuid(), 'doctype': constants.DOCTYPE_LOG,
'docid': self.doc['_id'], 'diff': diff, 'timestamp': utils.
get_time()}
self.modify_log_entry(entry)
if hasattr(flask.g, 'current_user') and flask.g.current_user:
entry['username'] = flask.g.current_user['username']
else:
entry['username'] = None
if flask.has_request_context():
entry['remote_addr'] = str(flask.request.remote_addr)
entry['user_agent'] = str(flask.request.user_agent)
else:
entry['remote_addr'] = None
entry['user_agent'] = os.path.basename(sys.argv[0])
flask.g.db.put(entry)
def diff(self, old, new):
"""Find the differences between the old and the new documents.
Uses a fairly simple algorithm which is OK for shallow hierarchies.
"""
added = {}
removed = {}
updated = {}
new_keys = set(new.keys())
old_keys = set(old.keys())
for key in new_keys.difference(old_keys):
self.stack.append(key)
if self.stack not in self.EXCLUDE_PATHS:
if self.stack in self.HIDDEN_VALUE_PATHS:
added[key] = '<hidden>'
else:
added[key] = new[key]
self.stack.pop()
for key in old_keys.difference(new_keys):
self.stack.append(key)
if self.stack not in self.EXCLUDE_PATHS:
if self.stack in self.HIDDEN_VALUE_PATHS:
removed[key] = '<hidden>'
else:
removed[key] = old[key]
self.stack.pop()
for key in new_keys.intersection(old_keys):
self.stack.append(key)
if self.stack not in self.EXCLUDE_PATHS:
new_value = new[key]
old_value = old[key]
if isinstance(new_value, dict) and isinstance(old_value, dict):
changes = self.diff(old_value, new_value)
if changes:
if self.stack in self.HIDDEN_VALUE_PATHS:
updated[key] = '<hidden>'
else:
updated[key] = changes
elif new_value != old_value:
if self.stack in self.HIDDEN_VALUE_PATHS:
updated[key] = dict(new_value='<hidden>', old_value
='<hidden>')
else:
updated[key] = dict(new_value=new_value, old_value=
old_value)
self.stack.pop()
result = {}
if added:
result['added'] = added
if removed:
result['removed'] = removed
if updated:
result['updated'] = updated
return result
def modify_log_entry(self, entry):
"""Modify the log entry, if required."""
pass
class AttachmentsSaver(BaseSaver):
"""Document saver context handling attachments."""
def prepare(self):
self._delete_attachments = set()
self._add_attachments = []
def wrapup(self):
"""Delete any specified attachments.
Store the input files as attachments.
Must be done after document is saved.
"""
for filename in self._delete_attachments:
rev = flask.g.db.delete_attachment(self.doc, filename)
self.doc['_rev'] = rev
for attachment in self._add_attachments:
flask.g.db.put_attachment(self.doc, attachment['content'],
filename=attachment['filename'], content_type=attachment[
'mimetype'])
def add_attachment(self, filename, content, mimetype):
self._add_attachments.append({'filename': filename, 'content':
content, 'mimetype': mimetype})
def delete_attachment(self, filename):
self._delete_attachments.add(filename)
def modify_log_items(self, entry):
"""Modify the log entry to add info about attachment changes."""
if self._delete_attachments:
entry['attachments_deleted'] = self._delete_attachments
if self._add_attachments:
for att in self._add_attachments:
att['size'] = len(att.pop('content'))
entry['attachments_added'] = self._add_attachments
<|reserved_special_token_1|>
"Base document saver context classes."
import copy
import os.path
import sys
import flask
from . import constants
from . import utils
class BaseSaver:
"Base document saver context."
DOCTYPE = None
EXCLUDE_PATHS = [["_id"], ["_rev"], ["doctype"], ["modified"]]
HIDDEN_VALUE_PATHS = []
def __init__(self, doc=None):
if doc is None:
self.original = {}
self.doc = {"_id": utils.get_iuid(),
"created": utils.get_time()}
self.initialize()
else:
self.original = copy.deepcopy(doc)
self.doc = doc
self.prepare()
def __enter__(self):
return self
def __exit__(self, etyp, einst, etb):
if etyp is not None: return False
self.finish()
self.doc["doctype"] = self.DOCTYPE
self.doc["modified"] = utils.get_time()
flask.g.db.put(self.doc)
self.add_log()
def __getitem__(self, key):
return self.doc[key]
def __setitem__(self, key, value):
self.doc[key] = value
def initialize(self):
"Initialize the new document."
pass
def prepare(self):
"Preparations before making any changes."
pass
def finish(self):
"Final changes and checks on the document before storing it."
pass
def wrapup(self):
"""Wrap up the save operation by performing actions that
must be done after the document has been stored.
"""
pass
def add_log(self):
"""Add a log entry recording the the difference betweens the current and
the original document, hiding values of specified keys.
'added': list of keys for items added in the current.
'updated': dictionary of items updated; original values.
'removed': dictionary of items removed; original values.
"""
self.stack = []
diff = self.diff(self.original, self.doc)
entry = {"_id": utils.get_iuid(),
"doctype": constants.DOCTYPE_LOG,
"docid": self.doc["_id"],
"diff": diff,
"timestamp": utils.get_time()}
self.modify_log_entry(entry)
if hasattr(flask.g, "current_user") and flask.g.current_user:
entry["username"] = flask.g.current_user["username"]
else:
entry["username"] = None
if flask.has_request_context():
entry["remote_addr"] = str(flask.request.remote_addr)
entry["user_agent"] = str(flask.request.user_agent)
else:
entry["remote_addr"] = None
entry["user_agent"] = os.path.basename(sys.argv[0])
flask.g.db.put(entry)
def diff(self, old, new):
"""Find the differences between the old and the new documents.
Uses a fairly simple algorithm which is OK for shallow hierarchies.
"""
added = {}
removed = {}
updated = {}
new_keys = set(new.keys())
old_keys = set(old.keys())
for key in new_keys.difference(old_keys):
self.stack.append(key)
if self.stack not in self.EXCLUDE_PATHS:
if self.stack in self.HIDDEN_VALUE_PATHS:
added[key] = "<hidden>"
else:
added[key] = new[key]
self.stack.pop()
for key in old_keys.difference(new_keys):
self.stack.append(key)
if self.stack not in self.EXCLUDE_PATHS:
if self.stack in self.HIDDEN_VALUE_PATHS:
removed[key] = "<hidden>"
else:
removed[key] = old[key]
self.stack.pop()
for key in new_keys.intersection(old_keys):
self.stack.append(key)
if self.stack not in self.EXCLUDE_PATHS:
new_value = new[key]
old_value = old[key]
if isinstance(new_value, dict) and isinstance(old_value, dict):
changes = self.diff(old_value, new_value)
if changes:
if self.stack in self.HIDDEN_VALUE_PATHS:
updated[key] = "<hidden>"
else:
updated[key] = changes
elif new_value != old_value:
if self.stack in self.HIDDEN_VALUE_PATHS:
updated[key]= dict(new_value="<hidden>",
old_value="<hidden>")
else:
updated[key]= dict(new_value= new_value,
old_value=old_value)
self.stack.pop()
result = {}
if added:
result['added'] = added
if removed:
result['removed'] = removed
if updated:
result['updated'] = updated
return result
def modify_log_entry(self, entry):
"Modify the log entry, if required."
pass
class AttachmentsSaver(BaseSaver):
"Document saver context handling attachments."
def prepare(self):
self._delete_attachments = set()
self._add_attachments = []
def wrapup(self):
"""Delete any specified attachments.
Store the input files as attachments.
Must be done after document is saved.
"""
for filename in self._delete_attachments:
rev = flask.g.db.delete_attachment(self.doc, filename)
self.doc["_rev"] = rev
for attachment in self._add_attachments:
flask.g.db.put_attachment(self.doc,
attachment["content"],
filename=attachment["filename"],
content_type=attachment["mimetype"])
def add_attachment(self, filename, content, mimetype):
self._add_attachments.append({"filename": filename,
"content": content,
"mimetype": mimetype})
def delete_attachment(self, filename):
self._delete_attachments.add(filename)
def modify_log_items(self, entry):
"Modify the log entry to add info about attachment changes."
if self._delete_attachments:
entry["attachments_deleted"] = self._delete_attachments
if self._add_attachments:
for att in self._add_attachments:
att["size"] = len(att.pop("content"))
entry["attachments_added"] = self._add_attachments
|
flexible
|
{
"blob_id": "83fe635e35711c2c41d043a59d00a50cc87e69fa",
"index": 7696,
"step-1": "<mask token>\n\n\nclass BaseSaver:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass AttachmentsSaver(BaseSaver):\n \"\"\"Document saver context handling attachments.\"\"\"\n\n def prepare(self):\n self._delete_attachments = set()\n self._add_attachments = []\n\n def wrapup(self):\n \"\"\"Delete any specified attachments.\n Store the input files as attachments.\n Must be done after document is saved.\n \"\"\"\n for filename in self._delete_attachments:\n rev = flask.g.db.delete_attachment(self.doc, filename)\n self.doc['_rev'] = rev\n for attachment in self._add_attachments:\n flask.g.db.put_attachment(self.doc, attachment['content'],\n filename=attachment['filename'], content_type=attachment[\n 'mimetype'])\n\n def add_attachment(self, filename, content, mimetype):\n self._add_attachments.append({'filename': filename, 'content':\n content, 'mimetype': mimetype})\n\n def delete_attachment(self, filename):\n self._delete_attachments.add(filename)\n\n def modify_log_items(self, entry):\n \"\"\"Modify the log entry to add info about attachment changes.\"\"\"\n if self._delete_attachments:\n entry['attachments_deleted'] = self._delete_attachments\n if self._add_attachments:\n for att in self._add_attachments:\n att['size'] = len(att.pop('content'))\n entry['attachments_added'] = self._add_attachments\n",
"step-2": "<mask token>\n\n\nclass BaseSaver:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __enter__(self):\n return self\n <mask token>\n <mask token>\n\n def __setitem__(self, key, value):\n self.doc[key] = value\n\n def initialize(self):\n \"\"\"Initialize the new document.\"\"\"\n pass\n <mask token>\n\n def finish(self):\n \"\"\"Final changes and checks on the document before storing it.\"\"\"\n pass\n\n def wrapup(self):\n \"\"\"Wrap up the save operation by performing actions that\n must be done after the document has been stored.\n \"\"\"\n pass\n\n def add_log(self):\n \"\"\"Add a log entry recording the the difference betweens the current and\n the original document, hiding values of specified keys.\n 'added': list of keys for items added in the current.\n 'updated': dictionary of items updated; original values.\n 'removed': dictionary of items removed; original values.\n \"\"\"\n self.stack = []\n diff = self.diff(self.original, self.doc)\n entry = {'_id': utils.get_iuid(), 'doctype': constants.DOCTYPE_LOG,\n 'docid': self.doc['_id'], 'diff': diff, 'timestamp': utils.\n get_time()}\n self.modify_log_entry(entry)\n if hasattr(flask.g, 'current_user') and flask.g.current_user:\n entry['username'] = flask.g.current_user['username']\n else:\n entry['username'] = None\n if flask.has_request_context():\n entry['remote_addr'] = str(flask.request.remote_addr)\n entry['user_agent'] = str(flask.request.user_agent)\n else:\n entry['remote_addr'] = None\n entry['user_agent'] = os.path.basename(sys.argv[0])\n flask.g.db.put(entry)\n\n def diff(self, old, new):\n \"\"\"Find the differences between the old and the new documents.\n Uses a fairly simple algorithm which is OK for shallow hierarchies.\n \"\"\"\n added = {}\n removed = {}\n updated = {}\n new_keys = set(new.keys())\n old_keys = set(old.keys())\n for key in new_keys.difference(old_keys):\n self.stack.append(key)\n if self.stack not in self.EXCLUDE_PATHS:\n if self.stack in self.HIDDEN_VALUE_PATHS:\n added[key] = '<hidden>'\n else:\n added[key] = new[key]\n self.stack.pop()\n for key in old_keys.difference(new_keys):\n self.stack.append(key)\n if self.stack not in self.EXCLUDE_PATHS:\n if self.stack in self.HIDDEN_VALUE_PATHS:\n removed[key] = '<hidden>'\n else:\n removed[key] = old[key]\n self.stack.pop()\n for key in new_keys.intersection(old_keys):\n self.stack.append(key)\n if self.stack not in self.EXCLUDE_PATHS:\n new_value = new[key]\n old_value = old[key]\n if isinstance(new_value, dict) and isinstance(old_value, dict):\n changes = self.diff(old_value, new_value)\n if changes:\n if self.stack in self.HIDDEN_VALUE_PATHS:\n updated[key] = '<hidden>'\n else:\n updated[key] = changes\n elif new_value != old_value:\n if self.stack in self.HIDDEN_VALUE_PATHS:\n updated[key] = dict(new_value='<hidden>', old_value\n ='<hidden>')\n else:\n updated[key] = dict(new_value=new_value, old_value=\n old_value)\n self.stack.pop()\n result = {}\n if added:\n result['added'] = added\n if removed:\n result['removed'] = removed\n if updated:\n result['updated'] = updated\n return result\n <mask token>\n\n\nclass AttachmentsSaver(BaseSaver):\n \"\"\"Document saver context handling attachments.\"\"\"\n\n def prepare(self):\n self._delete_attachments = set()\n self._add_attachments = []\n\n def wrapup(self):\n \"\"\"Delete any specified attachments.\n Store the input files as attachments.\n Must be done after document is saved.\n \"\"\"\n for filename in self._delete_attachments:\n rev = flask.g.db.delete_attachment(self.doc, filename)\n self.doc['_rev'] = rev\n for attachment in self._add_attachments:\n flask.g.db.put_attachment(self.doc, attachment['content'],\n filename=attachment['filename'], content_type=attachment[\n 'mimetype'])\n\n def add_attachment(self, filename, content, mimetype):\n self._add_attachments.append({'filename': filename, 'content':\n content, 'mimetype': mimetype})\n\n def delete_attachment(self, filename):\n self._delete_attachments.add(filename)\n\n def modify_log_items(self, entry):\n \"\"\"Modify the log entry to add info about attachment changes.\"\"\"\n if self._delete_attachments:\n entry['attachments_deleted'] = self._delete_attachments\n if self._add_attachments:\n for att in self._add_attachments:\n att['size'] = len(att.pop('content'))\n entry['attachments_added'] = self._add_attachments\n",
"step-3": "<mask token>\n\n\nclass BaseSaver:\n <mask token>\n DOCTYPE = None\n EXCLUDE_PATHS = [['_id'], ['_rev'], ['doctype'], ['modified']]\n HIDDEN_VALUE_PATHS = []\n\n def __init__(self, doc=None):\n if doc is None:\n self.original = {}\n self.doc = {'_id': utils.get_iuid(), 'created': utils.get_time()}\n self.initialize()\n else:\n self.original = copy.deepcopy(doc)\n self.doc = doc\n self.prepare()\n\n def __enter__(self):\n return self\n\n def __exit__(self, etyp, einst, etb):\n if etyp is not None:\n return False\n self.finish()\n self.doc['doctype'] = self.DOCTYPE\n self.doc['modified'] = utils.get_time()\n flask.g.db.put(self.doc)\n self.add_log()\n\n def __getitem__(self, key):\n return self.doc[key]\n\n def __setitem__(self, key, value):\n self.doc[key] = value\n\n def initialize(self):\n \"\"\"Initialize the new document.\"\"\"\n pass\n\n def prepare(self):\n \"\"\"Preparations before making any changes.\"\"\"\n pass\n\n def finish(self):\n \"\"\"Final changes and checks on the document before storing it.\"\"\"\n pass\n\n def wrapup(self):\n \"\"\"Wrap up the save operation by performing actions that\n must be done after the document has been stored.\n \"\"\"\n pass\n\n def add_log(self):\n \"\"\"Add a log entry recording the the difference betweens the current and\n the original document, hiding values of specified keys.\n 'added': list of keys for items added in the current.\n 'updated': dictionary of items updated; original values.\n 'removed': dictionary of items removed; original values.\n \"\"\"\n self.stack = []\n diff = self.diff(self.original, self.doc)\n entry = {'_id': utils.get_iuid(), 'doctype': constants.DOCTYPE_LOG,\n 'docid': self.doc['_id'], 'diff': diff, 'timestamp': utils.\n get_time()}\n self.modify_log_entry(entry)\n if hasattr(flask.g, 'current_user') and flask.g.current_user:\n entry['username'] = flask.g.current_user['username']\n else:\n entry['username'] = None\n if flask.has_request_context():\n entry['remote_addr'] = str(flask.request.remote_addr)\n entry['user_agent'] = str(flask.request.user_agent)\n else:\n entry['remote_addr'] = None\n entry['user_agent'] = os.path.basename(sys.argv[0])\n flask.g.db.put(entry)\n\n def diff(self, old, new):\n \"\"\"Find the differences between the old and the new documents.\n Uses a fairly simple algorithm which is OK for shallow hierarchies.\n \"\"\"\n added = {}\n removed = {}\n updated = {}\n new_keys = set(new.keys())\n old_keys = set(old.keys())\n for key in new_keys.difference(old_keys):\n self.stack.append(key)\n if self.stack not in self.EXCLUDE_PATHS:\n if self.stack in self.HIDDEN_VALUE_PATHS:\n added[key] = '<hidden>'\n else:\n added[key] = new[key]\n self.stack.pop()\n for key in old_keys.difference(new_keys):\n self.stack.append(key)\n if self.stack not in self.EXCLUDE_PATHS:\n if self.stack in self.HIDDEN_VALUE_PATHS:\n removed[key] = '<hidden>'\n else:\n removed[key] = old[key]\n self.stack.pop()\n for key in new_keys.intersection(old_keys):\n self.stack.append(key)\n if self.stack not in self.EXCLUDE_PATHS:\n new_value = new[key]\n old_value = old[key]\n if isinstance(new_value, dict) and isinstance(old_value, dict):\n changes = self.diff(old_value, new_value)\n if changes:\n if self.stack in self.HIDDEN_VALUE_PATHS:\n updated[key] = '<hidden>'\n else:\n updated[key] = changes\n elif new_value != old_value:\n if self.stack in self.HIDDEN_VALUE_PATHS:\n updated[key] = dict(new_value='<hidden>', old_value\n ='<hidden>')\n else:\n updated[key] = dict(new_value=new_value, old_value=\n old_value)\n self.stack.pop()\n result = {}\n if added:\n result['added'] = added\n if removed:\n result['removed'] = removed\n if updated:\n result['updated'] = updated\n return result\n\n def modify_log_entry(self, entry):\n \"\"\"Modify the log entry, if required.\"\"\"\n pass\n\n\nclass AttachmentsSaver(BaseSaver):\n \"\"\"Document saver context handling attachments.\"\"\"\n\n def prepare(self):\n self._delete_attachments = set()\n self._add_attachments = []\n\n def wrapup(self):\n \"\"\"Delete any specified attachments.\n Store the input files as attachments.\n Must be done after document is saved.\n \"\"\"\n for filename in self._delete_attachments:\n rev = flask.g.db.delete_attachment(self.doc, filename)\n self.doc['_rev'] = rev\n for attachment in self._add_attachments:\n flask.g.db.put_attachment(self.doc, attachment['content'],\n filename=attachment['filename'], content_type=attachment[\n 'mimetype'])\n\n def add_attachment(self, filename, content, mimetype):\n self._add_attachments.append({'filename': filename, 'content':\n content, 'mimetype': mimetype})\n\n def delete_attachment(self, filename):\n self._delete_attachments.add(filename)\n\n def modify_log_items(self, entry):\n \"\"\"Modify the log entry to add info about attachment changes.\"\"\"\n if self._delete_attachments:\n entry['attachments_deleted'] = self._delete_attachments\n if self._add_attachments:\n for att in self._add_attachments:\n att['size'] = len(att.pop('content'))\n entry['attachments_added'] = self._add_attachments\n",
"step-4": "<mask token>\n\n\nclass BaseSaver:\n \"\"\"Base document saver context.\"\"\"\n DOCTYPE = None\n EXCLUDE_PATHS = [['_id'], ['_rev'], ['doctype'], ['modified']]\n HIDDEN_VALUE_PATHS = []\n\n def __init__(self, doc=None):\n if doc is None:\n self.original = {}\n self.doc = {'_id': utils.get_iuid(), 'created': utils.get_time()}\n self.initialize()\n else:\n self.original = copy.deepcopy(doc)\n self.doc = doc\n self.prepare()\n\n def __enter__(self):\n return self\n\n def __exit__(self, etyp, einst, etb):\n if etyp is not None:\n return False\n self.finish()\n self.doc['doctype'] = self.DOCTYPE\n self.doc['modified'] = utils.get_time()\n flask.g.db.put(self.doc)\n self.add_log()\n\n def __getitem__(self, key):\n return self.doc[key]\n\n def __setitem__(self, key, value):\n self.doc[key] = value\n\n def initialize(self):\n \"\"\"Initialize the new document.\"\"\"\n pass\n\n def prepare(self):\n \"\"\"Preparations before making any changes.\"\"\"\n pass\n\n def finish(self):\n \"\"\"Final changes and checks on the document before storing it.\"\"\"\n pass\n\n def wrapup(self):\n \"\"\"Wrap up the save operation by performing actions that\n must be done after the document has been stored.\n \"\"\"\n pass\n\n def add_log(self):\n \"\"\"Add a log entry recording the the difference betweens the current and\n the original document, hiding values of specified keys.\n 'added': list of keys for items added in the current.\n 'updated': dictionary of items updated; original values.\n 'removed': dictionary of items removed; original values.\n \"\"\"\n self.stack = []\n diff = self.diff(self.original, self.doc)\n entry = {'_id': utils.get_iuid(), 'doctype': constants.DOCTYPE_LOG,\n 'docid': self.doc['_id'], 'diff': diff, 'timestamp': utils.\n get_time()}\n self.modify_log_entry(entry)\n if hasattr(flask.g, 'current_user') and flask.g.current_user:\n entry['username'] = flask.g.current_user['username']\n else:\n entry['username'] = None\n if flask.has_request_context():\n entry['remote_addr'] = str(flask.request.remote_addr)\n entry['user_agent'] = str(flask.request.user_agent)\n else:\n entry['remote_addr'] = None\n entry['user_agent'] = os.path.basename(sys.argv[0])\n flask.g.db.put(entry)\n\n def diff(self, old, new):\n \"\"\"Find the differences between the old and the new documents.\n Uses a fairly simple algorithm which is OK for shallow hierarchies.\n \"\"\"\n added = {}\n removed = {}\n updated = {}\n new_keys = set(new.keys())\n old_keys = set(old.keys())\n for key in new_keys.difference(old_keys):\n self.stack.append(key)\n if self.stack not in self.EXCLUDE_PATHS:\n if self.stack in self.HIDDEN_VALUE_PATHS:\n added[key] = '<hidden>'\n else:\n added[key] = new[key]\n self.stack.pop()\n for key in old_keys.difference(new_keys):\n self.stack.append(key)\n if self.stack not in self.EXCLUDE_PATHS:\n if self.stack in self.HIDDEN_VALUE_PATHS:\n removed[key] = '<hidden>'\n else:\n removed[key] = old[key]\n self.stack.pop()\n for key in new_keys.intersection(old_keys):\n self.stack.append(key)\n if self.stack not in self.EXCLUDE_PATHS:\n new_value = new[key]\n old_value = old[key]\n if isinstance(new_value, dict) and isinstance(old_value, dict):\n changes = self.diff(old_value, new_value)\n if changes:\n if self.stack in self.HIDDEN_VALUE_PATHS:\n updated[key] = '<hidden>'\n else:\n updated[key] = changes\n elif new_value != old_value:\n if self.stack in self.HIDDEN_VALUE_PATHS:\n updated[key] = dict(new_value='<hidden>', old_value\n ='<hidden>')\n else:\n updated[key] = dict(new_value=new_value, old_value=\n old_value)\n self.stack.pop()\n result = {}\n if added:\n result['added'] = added\n if removed:\n result['removed'] = removed\n if updated:\n result['updated'] = updated\n return result\n\n def modify_log_entry(self, entry):\n \"\"\"Modify the log entry, if required.\"\"\"\n pass\n\n\nclass AttachmentsSaver(BaseSaver):\n \"\"\"Document saver context handling attachments.\"\"\"\n\n def prepare(self):\n self._delete_attachments = set()\n self._add_attachments = []\n\n def wrapup(self):\n \"\"\"Delete any specified attachments.\n Store the input files as attachments.\n Must be done after document is saved.\n \"\"\"\n for filename in self._delete_attachments:\n rev = flask.g.db.delete_attachment(self.doc, filename)\n self.doc['_rev'] = rev\n for attachment in self._add_attachments:\n flask.g.db.put_attachment(self.doc, attachment['content'],\n filename=attachment['filename'], content_type=attachment[\n 'mimetype'])\n\n def add_attachment(self, filename, content, mimetype):\n self._add_attachments.append({'filename': filename, 'content':\n content, 'mimetype': mimetype})\n\n def delete_attachment(self, filename):\n self._delete_attachments.add(filename)\n\n def modify_log_items(self, entry):\n \"\"\"Modify the log entry to add info about attachment changes.\"\"\"\n if self._delete_attachments:\n entry['attachments_deleted'] = self._delete_attachments\n if self._add_attachments:\n for att in self._add_attachments:\n att['size'] = len(att.pop('content'))\n entry['attachments_added'] = self._add_attachments\n",
"step-5": "\"Base document saver context classes.\"\n\nimport copy\nimport os.path\nimport sys\n\nimport flask\n\nfrom . import constants\nfrom . import utils\n\n\nclass BaseSaver:\n \"Base document saver context.\"\n\n DOCTYPE = None\n EXCLUDE_PATHS = [[\"_id\"], [\"_rev\"], [\"doctype\"], [\"modified\"]]\n HIDDEN_VALUE_PATHS = []\n\n def __init__(self, doc=None):\n if doc is None:\n self.original = {}\n self.doc = {\"_id\": utils.get_iuid(),\n \"created\": utils.get_time()}\n self.initialize()\n else:\n self.original = copy.deepcopy(doc)\n self.doc = doc\n self.prepare()\n\n def __enter__(self):\n return self\n\n def __exit__(self, etyp, einst, etb):\n if etyp is not None: return False\n self.finish()\n self.doc[\"doctype\"] = self.DOCTYPE\n self.doc[\"modified\"] = utils.get_time()\n flask.g.db.put(self.doc)\n self.add_log()\n\n def __getitem__(self, key):\n return self.doc[key]\n\n def __setitem__(self, key, value):\n self.doc[key] = value\n\n def initialize(self):\n \"Initialize the new document.\"\n pass\n\n def prepare(self):\n \"Preparations before making any changes.\"\n pass\n\n def finish(self):\n \"Final changes and checks on the document before storing it.\"\n pass\n\n def wrapup(self):\n \"\"\"Wrap up the save operation by performing actions that\n must be done after the document has been stored.\n \"\"\"\n pass\n\n def add_log(self):\n \"\"\"Add a log entry recording the the difference betweens the current and\n the original document, hiding values of specified keys.\n 'added': list of keys for items added in the current.\n 'updated': dictionary of items updated; original values.\n 'removed': dictionary of items removed; original values.\n \"\"\"\n self.stack = []\n diff = self.diff(self.original, self.doc)\n entry = {\"_id\": utils.get_iuid(),\n \"doctype\": constants.DOCTYPE_LOG,\n \"docid\": self.doc[\"_id\"],\n \"diff\": diff,\n \"timestamp\": utils.get_time()}\n self.modify_log_entry(entry)\n if hasattr(flask.g, \"current_user\") and flask.g.current_user:\n entry[\"username\"] = flask.g.current_user[\"username\"]\n else:\n entry[\"username\"] = None\n if flask.has_request_context():\n entry[\"remote_addr\"] = str(flask.request.remote_addr)\n entry[\"user_agent\"] = str(flask.request.user_agent)\n else:\n entry[\"remote_addr\"] = None\n entry[\"user_agent\"] = os.path.basename(sys.argv[0])\n flask.g.db.put(entry)\n\n def diff(self, old, new):\n \"\"\"Find the differences between the old and the new documents.\n Uses a fairly simple algorithm which is OK for shallow hierarchies.\n \"\"\"\n added = {}\n removed = {}\n updated = {}\n new_keys = set(new.keys())\n old_keys = set(old.keys())\n for key in new_keys.difference(old_keys):\n self.stack.append(key)\n if self.stack not in self.EXCLUDE_PATHS:\n if self.stack in self.HIDDEN_VALUE_PATHS:\n added[key] = \"<hidden>\"\n else:\n added[key] = new[key]\n self.stack.pop()\n for key in old_keys.difference(new_keys):\n self.stack.append(key)\n if self.stack not in self.EXCLUDE_PATHS:\n if self.stack in self.HIDDEN_VALUE_PATHS:\n removed[key] = \"<hidden>\"\n else:\n removed[key] = old[key]\n self.stack.pop()\n for key in new_keys.intersection(old_keys):\n self.stack.append(key)\n if self.stack not in self.EXCLUDE_PATHS:\n new_value = new[key]\n old_value = old[key]\n if isinstance(new_value, dict) and isinstance(old_value, dict):\n changes = self.diff(old_value, new_value)\n if changes:\n if self.stack in self.HIDDEN_VALUE_PATHS:\n updated[key] = \"<hidden>\"\n else:\n updated[key] = changes\n elif new_value != old_value:\n if self.stack in self.HIDDEN_VALUE_PATHS:\n updated[key]= dict(new_value=\"<hidden>\",\n old_value=\"<hidden>\")\n else:\n updated[key]= dict(new_value= new_value,\n old_value=old_value)\n self.stack.pop()\n result = {}\n if added:\n result['added'] = added\n if removed:\n result['removed'] = removed\n if updated:\n result['updated'] = updated\n return result\n\n def modify_log_entry(self, entry):\n \"Modify the log entry, if required.\"\n pass\n\n\nclass AttachmentsSaver(BaseSaver):\n \"Document saver context handling attachments.\"\n\n def prepare(self):\n self._delete_attachments = set()\n self._add_attachments = []\n\n def wrapup(self):\n \"\"\"Delete any specified attachments.\n Store the input files as attachments.\n Must be done after document is saved.\n \"\"\"\n for filename in self._delete_attachments:\n rev = flask.g.db.delete_attachment(self.doc, filename)\n self.doc[\"_rev\"] = rev\n for attachment in self._add_attachments:\n flask.g.db.put_attachment(self.doc,\n attachment[\"content\"],\n filename=attachment[\"filename\"],\n content_type=attachment[\"mimetype\"])\n\n def add_attachment(self, filename, content, mimetype):\n self._add_attachments.append({\"filename\": filename,\n \"content\": content,\n \"mimetype\": mimetype})\n\n def delete_attachment(self, filename):\n self._delete_attachments.add(filename)\n\n def modify_log_items(self, entry):\n \"Modify the log entry to add info about attachment changes.\"\n if self._delete_attachments:\n entry[\"attachments_deleted\"] = self._delete_attachments\n if self._add_attachments:\n for att in self._add_attachments:\n att[\"size\"] = len(att.pop(\"content\"))\n entry[\"attachments_added\"] = self._add_attachments\n",
"step-ids": [
8,
15,
21,
22,
24
]
}
|
[
8,
15,
21,
22,
24
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print(180 / 4)
|
flexible
|
{
"blob_id": "509129052f97bb32b4ba0e71ecd7b1061d5f8da2",
"index": 38,
"step-1": "<mask token>\n",
"step-2": "print(180 / 4)\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
#d
#b
#c
#b,c
|
normal
|
{
"blob_id": "8ecd1d6b43027153e05c771eb7183c062319eebc",
"index": 2716,
"step-1": "#d\n#b\n#c\n#b,c",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
# -*- coding: utf-8 -*-
class Config(object):
def __init__(self):
self.config_dict = {
"data_path": {
# "vocab_path": "../data/cnews/cnews.vocab.txt",
"vocab_path": "../data/rumor/cnews.vocab.txt",
# "trainingSet_path": "../data/cnews/cnews.train.txt",
"trainingSet_path": "../data/rumor/train_list.txt",
# "valSet_path": "../data/cnews/cnews.val.txt",
"valSet_path": "../data/rumor/val_list.txt",
# "testingSet_path": "../data/cnews/cnews.test.txt",
"testingSet_path": "../data/rumor/test_list.txt"
},
"CNN_training_rule": {
"embedding_dim": 64,
"seq_length": 200,
"num_classes": 2,
"conv1_num_filters": 128,
"conv1_kernel_size": 1,
"conv2_num_filters": 128,
"conv2_kernel_size": 1,
"vocab_size": 5000,
"hidden_dim": 256,
"dropout_keep_prob": 0.5,
"learning_rate": 1e-3,
"batch_size": 64,
"epochs": 5,
"print_per_batch": 50,
"save_per_batch": 500
},
"LSTM": {
"seq_length": 300,
"num_classes": 2,
"vocab_size": 5000,
"batch_size": 64
},
"result": {
"CNN_model_path": "CNN_model.h5",
"LSTM_model_path": "LSTM_model.h5"
}
}
def get(self, section, name):
return self.config_dict[section][name]
|
normal
|
{
"blob_id": "9cb4e550a0d19b44ec8357882f353b04748b213b",
"index": 2589,
"step-1": "<mask token>\n",
"step-2": "class Config(object):\n <mask token>\n <mask token>\n",
"step-3": "class Config(object):\n <mask token>\n\n def get(self, section, name):\n return self.config_dict[section][name]\n",
"step-4": "class Config(object):\n\n def __init__(self):\n self.config_dict = {'data_path': {'vocab_path':\n '../data/rumor/cnews.vocab.txt', 'trainingSet_path':\n '../data/rumor/train_list.txt', 'valSet_path':\n '../data/rumor/val_list.txt', 'testingSet_path':\n '../data/rumor/test_list.txt'}, 'CNN_training_rule': {\n 'embedding_dim': 64, 'seq_length': 200, 'num_classes': 2,\n 'conv1_num_filters': 128, 'conv1_kernel_size': 1,\n 'conv2_num_filters': 128, 'conv2_kernel_size': 1, 'vocab_size':\n 5000, 'hidden_dim': 256, 'dropout_keep_prob': 0.5,\n 'learning_rate': 0.001, 'batch_size': 64, 'epochs': 5,\n 'print_per_batch': 50, 'save_per_batch': 500}, 'LSTM': {\n 'seq_length': 300, 'num_classes': 2, 'vocab_size': 5000,\n 'batch_size': 64}, 'result': {'CNN_model_path': 'CNN_model.h5',\n 'LSTM_model_path': 'LSTM_model.h5'}}\n\n def get(self, section, name):\n return self.config_dict[section][name]\n",
"step-5": "# -*- coding: utf-8 -*-\n\n\n\nclass Config(object):\n def __init__(self):\n self.config_dict = {\n \"data_path\": {\n # \"vocab_path\": \"../data/cnews/cnews.vocab.txt\",\n \"vocab_path\": \"../data/rumor/cnews.vocab.txt\",\n # \"trainingSet_path\": \"../data/cnews/cnews.train.txt\",\n \"trainingSet_path\": \"../data/rumor/train_list.txt\",\n # \"valSet_path\": \"../data/cnews/cnews.val.txt\",\n \"valSet_path\": \"../data/rumor/val_list.txt\",\n # \"testingSet_path\": \"../data/cnews/cnews.test.txt\",\n \"testingSet_path\": \"../data/rumor/test_list.txt\"\n },\n \"CNN_training_rule\": {\n \"embedding_dim\": 64,\n \"seq_length\": 200,\n \"num_classes\": 2,\n\n \"conv1_num_filters\": 128,\n \"conv1_kernel_size\": 1,\n\n \"conv2_num_filters\": 128,\n \"conv2_kernel_size\": 1,\n\n \"vocab_size\": 5000,\n\n \"hidden_dim\": 256,\n\n \"dropout_keep_prob\": 0.5,\n \"learning_rate\": 1e-3,\n\n \"batch_size\": 64,\n \"epochs\": 5,\n\n \"print_per_batch\": 50,\n \"save_per_batch\": 500\n },\n \"LSTM\": {\n \"seq_length\": 300,\n \"num_classes\": 2,\n \"vocab_size\": 5000,\n \"batch_size\": 64\n },\n \"result\": {\n \"CNN_model_path\": \"CNN_model.h5\",\n \"LSTM_model_path\": \"LSTM_model.h5\"\n }\n }\n\n def get(self, section, name):\n return self.config_dict[section][name]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class TflearnDataSourceExtraTemplate(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class TflearnDataSourceExtraTemplate(object):
<|reserved_special_token_0|>
def __init__(self, rewrite_data_aug=False):
self.rewrite_data_aug = rewrite_data_aug
<|reserved_special_token_1|>
class TflearnDataSourceExtraTemplate(object):
"""
Base class for TFLearn's DataSource (if we use wrapping).
Parameters:
----------
rewrite_data_aug : bool
use wrapper for data augmentation
"""
def __init__(self, rewrite_data_aug=False):
self.rewrite_data_aug = rewrite_data_aug
|
flexible
|
{
"blob_id": "70c084dab8469ca34b0e3e5174101111e695f1ca",
"index": 6638,
"step-1": "<mask token>\n",
"step-2": "class TflearnDataSourceExtraTemplate(object):\n <mask token>\n <mask token>\n",
"step-3": "class TflearnDataSourceExtraTemplate(object):\n <mask token>\n\n def __init__(self, rewrite_data_aug=False):\n self.rewrite_data_aug = rewrite_data_aug\n",
"step-4": "class TflearnDataSourceExtraTemplate(object):\n \"\"\"\n Base class for TFLearn's DataSource (if we use wrapping).\n\n Parameters:\n ----------\n rewrite_data_aug : bool\n use wrapper for data augmentation\n \"\"\"\n\n def __init__(self, rewrite_data_aug=False):\n self.rewrite_data_aug = rewrite_data_aug\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
2. Schreiben Sie die Anzahl von symmetrischen Paaren (xy) und (yx).
"""
def symetrisch(x, y):
"""
bestimmt weder zwei zweistellige Zahlen x und y symetrisch sind
:param x: ein Element der Liste
:param y: ein Element der Liste
:return: True- wenn x und y symetrisch
False - sonst
"""
if ((x % 10) == (y // 10)) and ((x // 10) == (y % 10)):
return True
else:
return False
def anz_von_sym(lst):
"""
mit 2 For-Schleifen durchquert die Funktion die Liste und untersucht je ein Element mit der restlichen Liste
:param lst: die Liste
:return: Anzahl der symetrischen Paaren der Liste
"""
anz = 0
for i in range(len(lst) - 1):
for j in range(i, len(lst)):
if symetrisch(lst[i], lst[j]):
anz += 1
print("Anzahl symmetrischer Paaren:", anz)
|
normal
|
{
"blob_id": "2c6dc4d55f64d7c3c01b3f504a72904451cb4610",
"index": 6532,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef symetrisch(x, y):\n \"\"\"\n bestimmt weder zwei zweistellige Zahlen x und y symetrisch sind\n :param x: ein Element der Liste\n :param y: ein Element der Liste\n :return: True- wenn x und y symetrisch\n False - sonst\n \"\"\"\n if x % 10 == y // 10 and x // 10 == y % 10:\n return True\n else:\n return False\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef symetrisch(x, y):\n \"\"\"\n bestimmt weder zwei zweistellige Zahlen x und y symetrisch sind\n :param x: ein Element der Liste\n :param y: ein Element der Liste\n :return: True- wenn x und y symetrisch\n False - sonst\n \"\"\"\n if x % 10 == y // 10 and x // 10 == y % 10:\n return True\n else:\n return False\n\n\ndef anz_von_sym(lst):\n \"\"\"\n mit 2 For-Schleifen durchquert die Funktion die Liste und untersucht je ein Element mit der restlichen Liste\n :param lst: die Liste\n :return: Anzahl der symetrischen Paaren der Liste\n \"\"\"\n anz = 0\n for i in range(len(lst) - 1):\n for j in range(i, len(lst)):\n if symetrisch(lst[i], lst[j]):\n anz += 1\n print('Anzahl symmetrischer Paaren:', anz)\n",
"step-4": "\"\"\"\n2. Schreiben Sie die Anzahl von symmetrischen Paaren (xy) und (yx).\n\"\"\"\n\n\ndef symetrisch(x, y):\n \"\"\"\n bestimmt weder zwei zweistellige Zahlen x und y symetrisch sind\n :param x: ein Element der Liste\n :param y: ein Element der Liste\n :return: True- wenn x und y symetrisch\n False - sonst\n \"\"\"\n if ((x % 10) == (y // 10)) and ((x // 10) == (y % 10)):\n return True\n else:\n return False\n\n\ndef anz_von_sym(lst):\n \"\"\"\n mit 2 For-Schleifen durchquert die Funktion die Liste und untersucht je ein Element mit der restlichen Liste\n :param lst: die Liste\n :return: Anzahl der symetrischen Paaren der Liste\n \"\"\"\n anz = 0\n for i in range(len(lst) - 1):\n for j in range(i, len(lst)):\n if symetrisch(lst[i], lst[j]):\n anz += 1\n print(\"Anzahl symmetrischer Paaren:\", anz)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Circle:
<|reserved_special_token_0|>
def __init__(self, radius=1):
self.radius = radius
self.area = radius * radius * Circle.pi
def get_circumference(self):
return self.radius * Circle.pi * 2
<|reserved_special_token_0|>
class Animal:
def __init__(self):
print('Animal Created')
def who_am_i(self):
print('I am an animal')
def eat(self):
print('I am eating')
<|reserved_special_token_0|>
class Dog(Animal):
def __init__(self):
Animal.__init__(self)
print('Dog Created')
def bark(self):
print('Woof! Woof!')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Circle:
pi = 3.14
def __init__(self, radius=1):
self.radius = radius
self.area = radius * radius * Circle.pi
def get_circumference(self):
return self.radius * Circle.pi * 2
<|reserved_special_token_0|>
class Animal:
def __init__(self):
print('Animal Created')
def who_am_i(self):
print('I am an animal')
def eat(self):
print('I am eating')
<|reserved_special_token_0|>
class Dog(Animal):
def __init__(self):
Animal.__init__(self)
print('Dog Created')
def bark(self):
print('Woof! Woof!')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Dog:
<|reserved_special_token_0|>
def __init__(self, breed, name):
self.breed = breed
self.name = name
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Circle:
pi = 3.14
def __init__(self, radius=1):
self.radius = radius
self.area = radius * radius * Circle.pi
def get_circumference(self):
return self.radius * Circle.pi * 2
<|reserved_special_token_0|>
class Animal:
def __init__(self):
print('Animal Created')
def who_am_i(self):
print('I am an animal')
def eat(self):
print('I am eating')
<|reserved_special_token_0|>
class Dog(Animal):
def __init__(self):
Animal.__init__(self)
print('Dog Created')
def bark(self):
print('Woof! Woof!')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Dog:
species = 'mammal'
def __init__(self, breed, name):
self.breed = breed
self.name = name
def bark(self, number):
print(f'Woof! My name is {self.name} and the number is {number}')
<|reserved_special_token_0|>
print(type(my_dog))
print(my_dog.breed)
print(my_dog.name)
my_dog.bark(10)
class Circle:
pi = 3.14
def __init__(self, radius=1):
self.radius = radius
self.area = radius * radius * Circle.pi
def get_circumference(self):
return self.radius * Circle.pi * 2
<|reserved_special_token_0|>
print(my_circle.area)
<|reserved_special_token_0|>
print(test)
class Animal:
def __init__(self):
print('Animal Created')
def who_am_i(self):
print('I am an animal')
def eat(self):
print('I am eating')
print('\n')
class Dog(Animal):
def __init__(self):
Animal.__init__(self)
print('Dog Created')
def bark(self):
print('Woof! Woof!')
<|reserved_special_token_0|>
print(mydog.bark())
<|reserved_special_token_1|>
# Getting familiar with OOP and using Functions and Classes :)
class Dog():
species = 'mammal'
def __init__(self,breed,name):
self.breed = breed
self.name = name
def bark(self,number):
print(f'Woof! My name is {self.name} and the number is {number}')
my_dog = Dog('Corgi','RTZY')
print(type(my_dog))
print(my_dog.breed)
print(my_dog.name)
my_dog.bark(10)
class Circle():
pi = 3.14
def __init__(self,radius = 1):
self.radius = radius
self.area = radius * radius * Circle.pi
def get_circumference(self):
return (self.radius * Circle.pi) * 2
my_circle = Circle(30)
print(my_circle.area)
test = my_circle.get_circumference()
print(test)
class Animal():
def __init__(self):
print('Animal Created')
def who_am_i(self):
print('I am an animal')
def eat(self):
print('I am eating')
print('\n')
class Dog(Animal):
def __init__(self):
Animal.__init__(self)
print('Dog Created')
def bark(self):
print('Woof! Woof!')
mydog = Dog()
print(mydog.bark())
|
flexible
|
{
"blob_id": "c8137aacfb0f35c9630515442d5bdda870e9908a",
"index": 4827,
"step-1": "<mask token>\n\n\nclass Circle:\n <mask token>\n\n def __init__(self, radius=1):\n self.radius = radius\n self.area = radius * radius * Circle.pi\n\n def get_circumference(self):\n return self.radius * Circle.pi * 2\n\n\n<mask token>\n\n\nclass Animal:\n\n def __init__(self):\n print('Animal Created')\n\n def who_am_i(self):\n print('I am an animal')\n\n def eat(self):\n print('I am eating')\n\n\n<mask token>\n\n\nclass Dog(Animal):\n\n def __init__(self):\n Animal.__init__(self)\n print('Dog Created')\n\n def bark(self):\n print('Woof! Woof!')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Circle:\n pi = 3.14\n\n def __init__(self, radius=1):\n self.radius = radius\n self.area = radius * radius * Circle.pi\n\n def get_circumference(self):\n return self.radius * Circle.pi * 2\n\n\n<mask token>\n\n\nclass Animal:\n\n def __init__(self):\n print('Animal Created')\n\n def who_am_i(self):\n print('I am an animal')\n\n def eat(self):\n print('I am eating')\n\n\n<mask token>\n\n\nclass Dog(Animal):\n\n def __init__(self):\n Animal.__init__(self)\n print('Dog Created')\n\n def bark(self):\n print('Woof! Woof!')\n\n\n<mask token>\n",
"step-3": "class Dog:\n <mask token>\n\n def __init__(self, breed, name):\n self.breed = breed\n self.name = name\n <mask token>\n\n\n<mask token>\n\n\nclass Circle:\n pi = 3.14\n\n def __init__(self, radius=1):\n self.radius = radius\n self.area = radius * radius * Circle.pi\n\n def get_circumference(self):\n return self.radius * Circle.pi * 2\n\n\n<mask token>\n\n\nclass Animal:\n\n def __init__(self):\n print('Animal Created')\n\n def who_am_i(self):\n print('I am an animal')\n\n def eat(self):\n print('I am eating')\n\n\n<mask token>\n\n\nclass Dog(Animal):\n\n def __init__(self):\n Animal.__init__(self)\n print('Dog Created')\n\n def bark(self):\n print('Woof! Woof!')\n\n\n<mask token>\n",
"step-4": "class Dog:\n species = 'mammal'\n\n def __init__(self, breed, name):\n self.breed = breed\n self.name = name\n\n def bark(self, number):\n print(f'Woof! My name is {self.name} and the number is {number}')\n\n\n<mask token>\nprint(type(my_dog))\nprint(my_dog.breed)\nprint(my_dog.name)\nmy_dog.bark(10)\n\n\nclass Circle:\n pi = 3.14\n\n def __init__(self, radius=1):\n self.radius = radius\n self.area = radius * radius * Circle.pi\n\n def get_circumference(self):\n return self.radius * Circle.pi * 2\n\n\n<mask token>\nprint(my_circle.area)\n<mask token>\nprint(test)\n\n\nclass Animal:\n\n def __init__(self):\n print('Animal Created')\n\n def who_am_i(self):\n print('I am an animal')\n\n def eat(self):\n print('I am eating')\n\n\nprint('\\n')\n\n\nclass Dog(Animal):\n\n def __init__(self):\n Animal.__init__(self)\n print('Dog Created')\n\n def bark(self):\n print('Woof! Woof!')\n\n\n<mask token>\nprint(mydog.bark())\n",
"step-5": "# Getting familiar with OOP and using Functions and Classes :)\nclass Dog():\n \n species = 'mammal'\n\n def __init__(self,breed,name):\n\n self.breed = breed\n self.name = name\n \n def bark(self,number):\n print(f'Woof! My name is {self.name} and the number is {number}')\n\nmy_dog = Dog('Corgi','RTZY')\nprint(type(my_dog))\nprint(my_dog.breed)\nprint(my_dog.name)\nmy_dog.bark(10)\n\nclass Circle():\n \n pi = 3.14\n\n def __init__(self,radius = 1):\n self.radius = radius\n self.area = radius * radius * Circle.pi\n \n def get_circumference(self):\n return (self.radius * Circle.pi) * 2\n\nmy_circle = Circle(30)\nprint(my_circle.area)\ntest = my_circle.get_circumference()\nprint(test)\n\nclass Animal():\n\n def __init__(self):\n print('Animal Created')\n \n def who_am_i(self):\n print('I am an animal')\n \n def eat(self):\n print('I am eating')\n\nprint('\\n')\nclass Dog(Animal):\n \n def __init__(self):\n Animal.__init__(self)\n print('Dog Created')\n def bark(self):\n print('Woof! Woof!')\n\nmydog = Dog()\nprint(mydog.bark())",
"step-ids": [
10,
11,
13,
16,
18
]
}
|
[
10,
11,
13,
16,
18
] |
def filter(txt): # can be improved using regular expression
output = []
for t in txt:
if t == "(" or t == ")" or t == "[" or t == "]":
output.append(t)
return output
result = []
while True:
raw_input = input()
line = filter(raw_input)
if raw_input != ".":
stack = []
err = False
for l in line:
try:
if l == "(" or l == "[":
stack.append(l)
elif l == "]":
if stack[len(stack) - 1] == "[":
stack.pop()
else:
err = True
break
elif l == ")":
if stack[len(stack) - 1] == "(":
stack.pop()
else:
err = True
break
except:
err = True
break
if err == True or len(stack) != 0:
result.append("no")
else:
result.append("yes")
else:
break
for r in result:
print(r)
|
normal
|
{
"blob_id": "9ca769ae8bbabee20b5dd4d75ab91d3c30e8d1bf",
"index": 8387,
"step-1": "<mask token>\n",
"step-2": "def filter(txt):\n output = []\n for t in txt:\n if t == '(' or t == ')' or t == '[' or t == ']':\n output.append(t)\n return output\n\n\n<mask token>\n",
"step-3": "def filter(txt):\n output = []\n for t in txt:\n if t == '(' or t == ')' or t == '[' or t == ']':\n output.append(t)\n return output\n\n\n<mask token>\nwhile True:\n raw_input = input()\n line = filter(raw_input)\n if raw_input != '.':\n stack = []\n err = False\n for l in line:\n try:\n if l == '(' or l == '[':\n stack.append(l)\n elif l == ']':\n if stack[len(stack) - 1] == '[':\n stack.pop()\n else:\n err = True\n break\n elif l == ')':\n if stack[len(stack) - 1] == '(':\n stack.pop()\n else:\n err = True\n break\n except:\n err = True\n break\n if err == True or len(stack) != 0:\n result.append('no')\n else:\n result.append('yes')\n else:\n break\nfor r in result:\n print(r)\n",
"step-4": "def filter(txt):\n output = []\n for t in txt:\n if t == '(' or t == ')' or t == '[' or t == ']':\n output.append(t)\n return output\n\n\nresult = []\nwhile True:\n raw_input = input()\n line = filter(raw_input)\n if raw_input != '.':\n stack = []\n err = False\n for l in line:\n try:\n if l == '(' or l == '[':\n stack.append(l)\n elif l == ']':\n if stack[len(stack) - 1] == '[':\n stack.pop()\n else:\n err = True\n break\n elif l == ')':\n if stack[len(stack) - 1] == '(':\n stack.pop()\n else:\n err = True\n break\n except:\n err = True\n break\n if err == True or len(stack) != 0:\n result.append('no')\n else:\n result.append('yes')\n else:\n break\nfor r in result:\n print(r)\n",
"step-5": "def filter(txt): # can be improved using regular expression\n\toutput = []\n\tfor t in txt:\n\t\tif t == \"(\" or t == \")\" or t == \"[\" or t == \"]\":\n\t\t\toutput.append(t)\n\treturn output\n\nresult = []\nwhile True:\n\traw_input = input()\n\tline = filter(raw_input)\n\t\n\tif raw_input != \".\":\n\t\tstack = []\n\t\terr = False\n\t\t\n\t\tfor l in line:\n\t\t\ttry:\n\t\t\t\tif l == \"(\" or l == \"[\":\n\t\t\t\t\tstack.append(l)\n\t\t\t\telif l == \"]\":\n\t\t\t\t\tif stack[len(stack) - 1] == \"[\":\n\t\t\t\t\t\tstack.pop()\n\t\t\t\t\telse:\n\t\t\t\t\t\terr = True\n\t\t\t\t\t\tbreak\n\t\t\t\telif l == \")\":\n\t\t\t\t\tif stack[len(stack) - 1] == \"(\":\n\t\t\t\t\t\tstack.pop()\n\t\t\t\t\telse:\n\t\t\t\t\t\terr = True\n\t\t\t\t\t\tbreak\n\t\t\texcept:\n\t\t\t\terr = True\n\t\t\t\tbreak\n\t\tif err == True or len(stack) != 0:\n\t\t\tresult.append(\"no\")\n\t\telse:\n\t\t\tresult.append(\"yes\")\n\telse:\n\t\tbreak\n\nfor r in result:\n\tprint(r)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
admin.site.register(statuses)
admin.site.register(sites)
admin.site.register(redirects)
<|reserved_special_token_1|>
from django.contrib import admin
from main_app.models import sites, statuses, redirects
admin.site.register(statuses)
admin.site.register(sites)
admin.site.register(redirects)
<|reserved_special_token_1|>
from django.contrib import admin
from main_app.models import sites, statuses, redirects
# Register your models here.
admin.site.register(statuses)
admin.site.register(sites)
admin.site.register(redirects)
|
flexible
|
{
"blob_id": "2b8ca0c8c7878536da4f31652976988cdba62d89",
"index": 491,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(statuses)\nadmin.site.register(sites)\nadmin.site.register(redirects)\n",
"step-3": "from django.contrib import admin\nfrom main_app.models import sites, statuses, redirects\nadmin.site.register(statuses)\nadmin.site.register(sites)\nadmin.site.register(redirects)\n",
"step-4": "from django.contrib import admin\nfrom main_app.models import sites, statuses, redirects\n# Register your models here.\nadmin.site.register(statuses)\nadmin.site.register(sites)\nadmin.site.register(redirects)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from math import ceil
n, k = map(int, input().split())
d = list(map(int, input().split()))
packs = [0]*k
for i in d:
packs[i%k] += 1
counter = packs[0]//2
if (k % 2) == 0:
counter += packs[k//2]//2
for i in range(1, ceil(k/2)):
counter += min(packs[i], packs[k-i])
print(counter*2)
|
normal
|
{
"blob_id": "2226382c494af33957a44d9f1682f7deacf574a2",
"index": 2075,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in d:\n packs[i % k] += 1\n<mask token>\nif k % 2 == 0:\n counter += packs[k // 2] // 2\nfor i in range(1, ceil(k / 2)):\n counter += min(packs[i], packs[k - i])\nprint(counter * 2)\n",
"step-3": "<mask token>\nn, k = map(int, input().split())\nd = list(map(int, input().split()))\npacks = [0] * k\nfor i in d:\n packs[i % k] += 1\ncounter = packs[0] // 2\nif k % 2 == 0:\n counter += packs[k // 2] // 2\nfor i in range(1, ceil(k / 2)):\n counter += min(packs[i], packs[k - i])\nprint(counter * 2)\n",
"step-4": "from math import ceil\nn, k = map(int, input().split())\nd = list(map(int, input().split()))\npacks = [0] * k\nfor i in d:\n packs[i % k] += 1\ncounter = packs[0] // 2\nif k % 2 == 0:\n counter += packs[k // 2] // 2\nfor i in range(1, ceil(k / 2)):\n counter += min(packs[i], packs[k - i])\nprint(counter * 2)\n",
"step-5": "from math import ceil\n\nn, k = map(int, input().split())\nd = list(map(int, input().split()))\n\npacks = [0]*k\nfor i in d:\n packs[i%k] += 1\n\ncounter = packs[0]//2\nif (k % 2) == 0:\n counter += packs[k//2]//2\nfor i in range(1, ceil(k/2)):\n counter += min(packs[i], packs[k-i])\n\nprint(counter*2)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
[Interactive Programming with Python - Part 1]
[Arithmetic Expressions]
# numbers - two types, an integer or a decimal number
# two correspending data types int() and float()
print 3, -1, 3.14159, -2.8
# we can convert between data types using int() and float()
# note that int() take the "whole" part of a decimal number
# float() applied to integers is boring
print type(3), type(3.14159), type(3.0)
#=> <type 'int'><type 'float'><type 'float'>
print int(3.14159), int(-2.8)
#=> 3 -2
print float(3), float(-1)
#=> 3.0 -1.0
# floating point number have around 15 decimal digits of accuracy
# pi is 3.1415926535897932384626433832795028841971...
# square root of two is 1.4142135623730950488016887242096980785696...
# approximation of pi, Python displays 12 decimal digits
print 3.1415926535897932384626433832795028841971
#=> 3.14159265359
# appoximation of square root of two, Python displays 12 decimal digits
print 1.4142135623730950488016887242096980785696
#=> 1.41421356237
# arithmetic operators
# + plus addition
# - minus subtraction
# * times multiplication
# / divided by division
# ** power exponentiation
# If one operand is a decimal (float), the answer is decimal
print 1.0 / 3, 5.0 / 2.0, -7 / 3.0
#=> 0.333333333333 2.5 -2.33333333333
# If both operands are ints, the answer is an int (rounded down)
print 1 / 3, 5 / 2, -7 / 3
#=> 0 2 -3
# expressions - number or a binary operator applied to two expressions
# minus is also a unary operator and can be applied to a single expression
print 1 + 2 * 3, 4.0 - 5.0 / 6.0, 7 * 8 + 9 * 10
# expressions are entered as sequence of numbers and operations
# how are the number and operators grouped to form expressions?
# operator precedence - "Please Excuse My Dear Aunt Sallie" = (), **, *, /, +,-
print 1 * 2 + 3 * 4
print 2 + 12
# always manually group using parentheses when in doubt
print 1 * (2 + 3) * 4
print 1 * 5 * 4
[Variables]
# valid variable names - consists of letters, numbers, underscore (_)
# starts with letter or underscore
# case sensitive (capitalization matters)
# legal names - ninja, Ninja, n_i_n_j_a
# illegal names - 1337, 1337ninja
# Python convention - multiple words joined by _
# legal names - elite_ninja, leet_ninja, ninja_1337
# illegal name 1337_ninja
# assign to variable name using single equal sign =
# (remember that double equals == is used to test equality)
# examples
my_name = "Joe Warren"
print my_name
my_age = 51
print my_age
my_age = my_age + 1 == my_age += 1
# the story of the magic pill
magic_pill = 30
print my_age - magic_pill
my_grand_dad = 74
print my_grand_dad - 2 * magic_pill
# Temperature examples
# convert from Fahrenheit to Celsuis
# c = 5 / 9 * (f - 32)
# use explanatory names
temp_Fahrenheit = 212
temp_Celsius = 5.0 / 9.0 * (temp_Fahrenheit - 32)
print temp_Celsius
# test it! 32 Fahrenheit is 0 Celsius, 212 Fahrenheit is 100 Celsius
# convert from Celsius to Fahrenheit
# f = 9 / 5 * c + 32
temp_Celsius = 100
temp_Fahrenheit = 9.0 / 5.0 * temp_Celsius + 32
print temp_Fahrenheit
[Functions]
# computes the area of a triangle
def triangle_area(base, height): # header - ends in colon
area = (1.0 / 2) * base * height # body - all of body is indented
return area # body - return outputs value
a1 = triangle_area(3, 8)
print a1
a2 = triangle_area(14, 2)
print a2
# converts fahrenheit to celsius
def fahrenheit2celsius(fahrenheit):
celsius = (5.0 / 9) * (fahrenheit - 32)
return celsius
# test!!!
c1 = fahrenheit2celsius(32)
c2 = fahrenheit2celsius(212)
print c1, c2
# converts fahrenheit to kelvin
def fahrenheit2kelvin(fahrenheit):
celsius = fahrenheit2celsius(fahrenheit)
kelvin = celsius + 273.15
return kelvin
# test!!!
k1 = fahrenheit2kelvin(32)
k2 = fahrenheit2kelvin(212)
print k1, k2
# prints hello, world!
def hello():
print "Hello, world!"
# test!!!
hello() # call to hello prints "Hello, world!"
h = hello() # call to hello prints "Hello, world!" a second time
print h # prints None since there was no return value
Do not forget:
- :
- return
- indentation
[More Operations]
# Remainder / % / modulo - modular arithmetic works both in negative as positive direction
# systematically restrict computation to a range
# long division - divide by a number, we get a quotient plus a remainder
# quotient is integer division //, the remainder is % (Docs)
# problem - get the ones digit of a number
num = 49
tens = num // 10 # --> 4
ones = num % 10 # --> 9
print tens, ones
print 10 * tens + ones, num
# application - 24 hour clock
# http://en.wikipedia.org/wiki/24-hour_clock
hour = 20
shift = 8
print (hour + shift) % 24
# application - screen wraparound
# Spaceship from week seven
width = 800
position = 797
move = 5
position = (position + move) % width
print position # --> 2
width = 800
position = 797
move = -5
position = (position + move) % width
print position # --> 797
# Data conversion operations
# convert an integer into string - str
# convert an hour into 24-hour format "03:00", always print leading zero
hour = 3
ones = hour % 10 # --> 3
tens = hour // 10 # --> 0
print tens, ones, ":00" # --> 0 3 :00
print str(tens), str(ones), ":00" # --> 0 3 :00
print str(tens) + str(ones) + ":00" # --> 03:00
# convert a string into numbers using int and float
# Python modules - extra functions implemented outside basic Python
import simplegui # access to drawing operations for interactive applications
import math # access to standard math functions, e.g; trig
import random # functions to generate random numbers
# look in Docs for useful functions
print math.pi
[Logic and Comparisons]
Evaluation hierarchy: NOT - AND - OR
-- Comparison Operators
# >
# <
# >=
# <=
# ==
# !=
[Conditionals]
def greet(friend, money):
if friend and (money > 20):
print "Hi!"
money = money - 20
elif friend:
print "Hello"
else:
print "Ha ha"
money = money + 10
return money
money = 15
money = greet(True, money)
print "Money:", money
print ""
money = greet(False, money)
print "Money:", money
print ""
money = greet(True, money)
print "Money:", money
print ""
[Programming Tips]
import random
def random_dice():
die1 = random.randrange(1, 7)
die2 = random.randrange(1, 7)
return die1 + die2
def volume_sphere(radius):
return 4.0/3.0 * math.pi * (radius ** 3)
# => attribute error is a syntax error after the '.'
def area_triangle(base, height):
return 0.5 * base * height
# Poor readability
def area(a,b,c):
s = (a+b+c)/2.0
return math.sqrt(s*(s-a)*(s-b)*(s-c))
# Improved readability
def area_triangle_sss(side1, side2, side3):
"""
Returns the area of a triangle, given the lengths of [Documentation String]
its three sides.
"""
# Use Heron's formula
semiperim = (side1 + side2 + side3) / 2.0
return math.sqrt(semiperim *
(semiperim - side1) *
(semiperim - side2) *
(semiperim - side3))
[Rock-paper-scissors-lizard-Spock]
n = 123
print n % 100 #=> 23
print n % 10 #=> 3
print n // 10 #=> 12
[Event-driven Programming]
Start --> Initialize --> Wait <---> (Event +) Handler
Events
- Input (e.g. button, text box)
- Keyboard (e.g key down, key up)
- Mouse (e.g. click, drag)
- Timer
# Example of a simple event-driven program
# CodeSkulptor GUI module
import simplegui
# Event handler
def tick():
print "tick!"
# Register handler
timer = simplegui.create_timer(1000, tick)
# Start timer
timer.start()
Event Queue
- System puts events in this (invisible) queue
[Local vs. Global Variables]
# global vs local examples
# num1 is a global variable
num1 = 1
print num1
# num2 is a local variable
def fun():
num1 = 2
num2 = num1 + 1
print num2
fun()
# the scope of global num1 is the whole program, num 1 remains defined
print num1
# the scope of the variable num2 is fun(), num2 is now undefined
# print num2 #=> error 'num2' not defined
# why use local variables?
# give a descriptive name to a quantity
# avoid computing something multiple times
def fahren_to_kelvin(fahren):
celsius = 5.0 / 9 * (fahren - 32)
zero_celsius_in_kelvin = 273.15
return celsius + zero_celsius_in_kelvin
print fahren_to_kelvin(212)
# the risk/reward of using global variables
# risk - consider the software system for an airliner
# critical piece - flight control system
# non-critical piece - in-flight entertainment system
# both systems might use a variable called "dial"
# we don't want possibility that change the volume on your audio
# causes the plane's flaps to change!
# example
num = 4
def fun1():
global num # to access global variable
num = 5
def fun2():
global num
num = 6
# note that num changes after each call with no obvious explanation
print num
fun1()
print num
fun2()
print num
# global variables are an easy way for event handlers
# to communicate game information.
# safer method - but they required more sophisticated
# object-programming techniques
[SimpleGUI]
import simplegui
message = "Welcome!"
# Handler for mouse click
def click():
global message
message = "Good job!"
# Handler to draw on canvas
def draw(canvas):
canvas.draw_text(message, [50,112], 36, "Red")
# Create a frame and assign callbacks to event handlers
frame = simplegui.create_frame("Home", 300, 200)
frame.add_button("Click me", click)
frame.set_draw_handler(draw)
# Start the frame animation
frame.start()
-- Program Structure
1 - Define globals (state)
2 - Define Helper functions
3 - Define Classes
4 - Define event handlers
5 - Create a frame
6 - Register event handlers
7 - Start the frame & timers
# SimpleGUI program template
# Import the module
import simplegui
# Define global variables (program state)
counter = 0
# Define "helper" functions
def increment():
global counter
counter = counter + 1
# Define event handler functions
def tick():
increment()
print counter
def buttonpress():
global counter:
counter = 0
# Create a frame
frame = simplegui.create_frame["SimpelGUI Test", 100, 100]
# Register event handlers
timer = simplegui.create_timer[1000, tick]
frame.add_button("Click me!", buttonpress)
# Start frame and timers
frame.start()
timer.start()
[Buttons & Input Fields]
# calculator with all buttons
import simplegui
# intialize globals
store = 0
operand = 0
# event handlers for calculator with a store and operand
def output():
"""prints contents of store and operand"""
print "Store = ", store
print "Operand = ", operand
print ""
def swap():
""" swap contents of store and operand"""
global store, operand
store, operand = operand, store
output()
def add():
""" add operand to store"""
global store
store = store + operand
output()
def sub():
""" subtract operand from store"""
global store
store = store - operand
output()
def mult():
""" multiply store by operand"""
global store
store = store * operand
output()
def div():
""" divide store by operand"""
global store
store = store / operand
output()
def enter(t):
""" enter a new operand"""
global operand
operand = float(t)
output()
# create frame
f = simplegui.create_frame("Calculator",300,300)
# register event handlers and create control elements
f.add_button("Print", output, 100)
f.add_button("Swap", swap, 100)
f.add_button("Add", add, 100)
f.add_button("Sub", sub, 100)
f.add_button("Mult", mult, 100)
f.add_button("Div", div, 100)
f.add_input("Enter", enter, 100)
# get frame rolling
f.start()
[Programming Tips]
##############
# Example of missing "global"
n1 = 0
def increment():
n1 = n1 + 1
increment()
increment()
increment()
print n1
##############
# Example of missing "global"
n2 = 0
def assign(x):
n2 = x
assign(2)
assign(15)
assign(7)
print n2
##############
# Example of missing "return"
n3 = 0
def decrement():
global n3
n3 = n3 - 1
x = decrement()
print "x = ", x
print "n = ", n
##############
# Example of print debugging
import simplegui
x = 0
def f(n):
print "f: n,x = ", n, x
result = n ** x
print "f: result = ",result
return result
def button_handler():
global x
print "bh : x = ", x
x += 1
print "bh : x = ", x
def input_handler(text):
print "ih : text = ", text
print f(float(text))
frame = simplegui.create_frame("Example", 200, 200)
frame.add_button("Increment", button_handler)
frame.add_input("Number:", input_handler, 100)
frame.start()
##############
# Examples of simplifying conditionals
def f1(a, b):
"""Returns True exactly when a is False and b is True."""
if a == False and b == True:
return True
else:
return False
def f2(a, b):
"""Returns True exactly when a is False and b is True."""
if not a and b:
return True
else:
return False
def f3(a, b):
"""Returns True exactly when a is False and b is True."""
return not a and b
def g1(a, b):
"""Returns False eactly when a and b are both True."""
if a == True and b == True:
return False
else:
return True
def g2(a, b):
"""Returns False eactly when a and b are both True."""
if a and b:
return False
else:
return True
def g3(a, b):
"""Returns False eactly when a and b are both True."""
return not (a and b)
[PEP 8 - Styleguide]
- Use 4-space indentation, and no tabs.
- 4 spaces are a good compromise between small indentation (allows greater nesting depth) and large indentation (easier to read). Tabs introduce confusion, and are best left out.
- Wrap lines so that they don’t exceed 79 characters.
- This helps users with small displays and makes it possible to have several code files side-by-side on larger displays.
- Use blank lines to separate functions and classes, and larger blocks of code inside functions.
- When possible, put comments on a line of their own.
- Use docstrings.
- Use spaces around operators and after commas, but not directly inside bracketing constructs: a = f(1, 2) + g(3, 4).
- Name your classes and functions consistently; the convention is to use CamelCase for classes and lower_case_with_underscores for functions and methods. Always use self as the name for the first method argument (see A First Look at Classes for more on classes and methods).
- Don’t use fancy encodings if your code is meant to be used in international environments. Plain ASCII works best in any case.
[Guess the Number - http://www.codeskulptor.org/#user40_QwCzfXhK4H_9.py]
# template for "Guess the number" mini-project
import simplegui
import random
import math
# Global Variables
num_range = 100
num_guesses = 7
secret_number = 0
# Helper Function
def new_game():
global secret_number, num_range, num_guesses
secret_number = random.randint(0,num_range)
calculation_n_1 = max(0,num_range) - min(0,num_range) + 1
calculation_n_2 = math.ceil(math.log(calculation_n_1,2))
num_guesses = int(calculation_n_2)
print "New game started with range 0 - ", num_range, "!"
print "Number of guesses left: ", num_guesses
# Event Handlers
def range100():
global num_range
num_range = 100
new_game()
def range1000():
global num_range
num_range = 1000
new_game()
def input_guess(guess):
global secret_number, num_guesses
value = int(guess)
print "Guess was ", value
if value > secret_number:
num_guesses -= 1
if num_guesses == 0:
print "Lower & Game Over. Guesses left: ", num_guesses
new_game()
else:
print "Lower, number of guesses left: ", num_guesses
elif value < secret_number:
num_guesses -= 1
if num_guesses == 0:
print "Higher & Game Over. Guesses left: ", num_guesses
new_game()
else:
print "Higher, number of guesses left: ", num_guesses
elif value == secret_number:
num_guesses -= 1
print "Correct!"
new_game()
else:
print "Error"
# Create Frame
f = simplegui.create_frame("Guess the number", 200, 200)
# Registration Event Handlers & Start Frame
f.add_button("Range is (0, 100)", range100, 200)
f.add_button("range is (0, 1000)", range1000, 200)
f.add_input("Enter a guess", input_guess, 200)
# Starting the Game
new_game()
[Canvas and Drawing]
Event-Driven Drawing
- Computor monitor - 2D grid of pixels stored logically in a frame buffer (something which keeps track of the values of the pixels)
- Computers update the monitor based on the frame buffer at rate of around 60-72 times a second (refresh rate)
- Many applications will register a special function called a "draw handler" which will update the frame buffer.
- In CodeSkulptur we will register a simple draw handler using a simpleGUI command. CodeSkultor calls the draw handler at around 60 times per second.
- The draw handler updates the canvas using a collection of draw commands that include things like draw_text, draw_line, draw_circle.
Canvas Coordinates
- Origin (0) is always in the left uppper corner, not lower!
# first example of drawing on the canvas
import simplegui
# define draw handler
def draw(canvas):
canvas.draw_text("Hello!",[100, 100], 24, "White")
canvas.draw_circle([100, 100], 2, 2, "Red")
# create frame
frame = simplegui.create_frame("Text drawing", 300, 200)
# register draw handler
frame.set_draw_handler(draw)
# start frame
frame.start()
- You start text at the lower left of the string [X,Y.
# example of drawing operations in simplegui
# standard HMTL color such as "Red" and "Green"
# note later drawing operations overwrite earlier drawing operations
import simplegui
# Handler to draw on canvas
def draw(canvas):
canvas.draw_circle([100, 100], 50, 2, "Red", "Pink")
canvas.draw_circle([300, 300], 50, 2, "Red", "Pink")
canvas.draw_line([100, 100],[300, 300], 2, "Black")
canvas.draw_circle([100, 300], 50, 2, "Green", "Lime")
canvas.draw_circle([300, 100], 50, 2, "Green", "Lime")
canvas.draw_line([100, 300],[300, 100], 2, "Black")
canvas.draw_polygon([[150, 150], [250, 150], [250, 250], [150, 250]], 2,
"Blue", "Aqua")
canvas.draw_text("An example of drawing", [60, 385], 24, "Black")
# Create a frame and assign callbacks to event handlers
frame = simplegui.create_frame("Home", 400, 400)
frame.set_draw_handler(draw)
frame.set_canvas_background("Yellow")
# Start the frame animation
frame.start()
[String Processing]
# String literals
s1 = "Rixner's funny"
s2 = 'Warren wears nice ties!'
s3 = " t-shirts!"
#print s1, s2
#print s3
# Combining strings
a = ' and '
s4 = "Warren" + a + "Rixner" + ' are nuts!'
print s4
# Characters and slices
print s1[3] #=> n
print s1[-1] #=> y
print s1[-2] #=> n
print len(s1)
print s1[0:6] + s2[6:] --> up to but NOT including.
print s2[:13] + s1[9:] + s3
# Converting strings
s5 = str(375)
print s5[1:]
i1 = int(s5[1:])
print i1 + 38
# Handle single quantity
def convert_units(val, name):
result = str(val) + " " + name
if val > 1:
result = result + "s"
return result
# convert xx.yy to xx dollars and yy cents
def convert(val):
# Split into dollars and cents
dollars = int(val)
cents = int(round(100 * (val - dollars)))
# Convert to strings
dollars_string = convert_units(dollars, "dollar")
cents_string = convert_units(cents, "cent")
# return composite string
if dollars == 0 and cents == 0:
return "Broke!"
elif dollars == 0:
return cents_string
elif cents == 0:
return dollars_string
else:
return dollars_string + " and " + cents_string
# Tests
print convert(11.23)
print convert(11.20)
print convert(1.12)
print convert(12.01)
print convert(1.01)
print convert(0.01)
print convert(1.00)
print convert(0)
[Interactive Drawing]
# interactive application to convert a float in dollars and cents
import simplegui
# define global value
value = 3.12
# Handle single quantity
def convert_units(val, name):
result = str(val) + " " + name
if val > 1:
result = result + "s"
return result
# convert xx.yy to xx dollars and yy cents
def convert(val):
# Split into dollars and cents
dollars = int(val)
cents = int(round(100 * (val - dollars)))
# Convert to strings
dollars_string = convert_units(dollars, "dollar")
cents_string = convert_units(cents, "cent")
# return composite string
if dollars == 0 and cents == 0:
return "Broke!"
elif dollars == 0:
return cents_string
elif cents == 0:
return dollars_string
else:
return dollars_string + " and " + cents_string
# define draw handler
def draw(canvas):
canvas.draw_text(convert(value), [60, 110], 24, "White")
# define an input field handler
def input_handler(text):
global value
value = float(text)
# create a frame
frame = simplegui.create_frame("Converter", 400, 200)
frame.add_input("Enter value", input_handler, 100)
# register event handlers
frame.set_draw_handler(draw)
# start the frame
frame.start()
---
string = '1lll1l1l1l1ll1l111ll1l1ll1l1ll1ll111ll1ll1ll1l1ll1ll1ll1ll1lll1l1l1l1l1l1l1l1l1l1l1l1ll1lll1l111ll1l1l1l1l1'
print len(string)
ones = 0
els = 0
other = 0
for i in range(0,len(string)):
if string[i] == '1':
ones += 1
elif string[i] == 'l':
els += 1
else:
other += 1
print "Ones: ", ones
print "L's: ", els
print "Other: ", other
[Timers]
# Simple "screensaver" program.
# Import modules
import simplegui
import random
# Global state
message = "Python is Fun!"
position = [50, 50]
width = 500
height = 500
interval = 2000
# Handler for text box
def update(text):
global message
message = text
# Handler for timer
def tick():
x = random.randrange(0, width)
y = random.randrange(0, height)
position[0] = x #=> When you are changing elements of a global variable, the global declaration is optional!
position[1] = y #=> When you are changing elements of a global variable, the global declaration is optional!
# Handler to draw on canvas
def draw(canvas):
canvas.draw_text(message, position, 36, "Red")
# Create a frame
frame = simplegui.create_frame("Home", width, height)
# Register event handlers
text = frame.add_input("Message:", update, 150)
frame.set_draw_handler(draw)
timer = simplegui.create_timer(interval, tick)
# Start the frame animation
frame.start()
timer.start()
[Programming Tips - Week 3]
#####################
# Example of event-driven code, buggy version
import simplegui
size = 10
radius = 10
# Define event handlers.
def incr_button_handler():
"""Increment the size."""
global size
size += 1
label.set_text("Value: " + str(size))
def decr_button_handler():
"""Decrement the size."""
global size
# Insert check that size > 1, to make sure it stays positive
# NOTE that this restriction has changed from the video
# since draw_circle now throws an error if radius is zero
size -= 1
label.set_text("Value: " + str(size))
def change_circle_handler():
"""Change the circle radius."""
global radius
radius = size
# Insert code to make radius label change.
def draw_handler(canvas):
"""Draw the circle."""
canvas.draw_circle((100, 100), radius, 5, "Red")
# Create a frame and assign callbacks to event handlers.
frame = simplegui.create_frame("Home", 200, 200)
label = frame.add_label("Value: " + str(size))
frame.add_button("Increase", incr_button_handler)
frame.add_button("Decrease", decr_button_handler)
frame.add_label("Radius: " + str(radius))
frame.add_button("Change circle", change_circle_handler)
frame.set_draw_handler(draw_handler)
# Start the frame animation
frame.start()
---
import simplegui
#####################
# Buggy code -- doesn't start frame
message = "Welcome!"
def click():
"""Change message on mouse click."""
global message
message = "Good job!"
def draw(canvas):
"""Draw message."""
canvas.draw_text(message, [50,112], 36, "Red")
# Create a frame and assign callbacks to event handlers
frame = simplegui.create_frame("Home", 300, 200)
frame.add_button("Click me", click)
frame.set_draw_handler(draw)
frame.start()
#####################
# Buggy code -- doesn't start timers
def timer1_handler():
print "1"
def timer2_handler():
print "2"
timer1 = simplegui.create_timer(100, timer1_handler)
timer2 = simplegui.create_timer(300, timer2_handler)
timer1.start()
timer2.start()
Mini-Project 3 - [Stopwatch: The Game]
http://www.codeskulptor.org/#user40_6D32nD7Dqj_6.py
# template for "Stopwatch: The Game"
import simplegui
# define global variables
time = 0
X = 0
Y = 0
XY = str(X) + '/' + str(Y)
# define helper function format that converts time
# in tenths of seconds into formatted string A:BC.D
def format(time):
A = time // 600
B = (time - A * 600) // 100
C = time % 100 // 10
D = time % 10
return str(A) + ':' + str(B) + str(C) + ':' + str(D)
# define event handlers for buttons; "Start", "Stop", "Reset"
def start():
timer.start()
def stop():
global X, Y, XY
if timer.is_running():
Y += 1
if time % 10 == 0:
X += 1
XY = str(X) + '/' + str(Y)
timer.stop()
def reset():
global time, X, Y, XY
time = 0
X = 0
Y = 0
XY = str(X) + '/' + str(Y)
# define event handler for timer with 0.1 sec interval
def tick():
global time
time += 1
# define draw handler
def draw(canvas):
canvas.draw_text(format(time), [110, 120], 36, 'White', 'sans-serif')
canvas.draw_text(XY, [215, 35], 36, 'Green', 'sans-serif')
# create frame
frame = simplegui.create_frame("Stopwatch", 300, 200)
timer = simplegui.create_timer(100, tick)
# register event handlers
frame.add_button('Start', start)
frame.add_button('Stop', stop)
frame.add_button('Reset', reset)
frame.set_draw_handler(draw)
# start frame
frame.start()
# Please remember to review the grading rubric
- In Python, the time module can be used to determine the current time. This module includes the method time which returns the current system time in seconds since a date referred as the Epoch. The Epoch is fixed common date shared by all Python installations. Using the date of the Epoch and the current system time, an application such as a clock or calendar can compute the current time/date using basic arithmetic.
import simplegui
n = 23
def collatz_conjecture():
global n
if n == 1:
timer.stop()
elif n % 2 == 0:
n = n / 2
print n
else:
n = (n * 3) + 1
print n
timer = simplegui.create_timer(100, collatz_conjecture)
timer.start()
[Lists]
- A list is a sequence type
- lists use square brackets
- [] = empty list
- position = [x, y]
l = [1, 3, 4, -7, 62, 43]
l2 = ['milk', 'eggs', 'bread', 'butter']
l3 = [[3, 4], ['a', 'b', 'c'], []]
print len(l) #=> 6
print len(l2) #=> 4
print len(l3) #=> 3
print "first element: ", l[0] #=> 1
print "last element: ", l[-1] #=> 43
print l3[1] #=> ['a', 'b', 'c'] -- start counting at 0
print l3[0][1] #=> 4
l4 = 12[1:3] # starting at element 1 but up to (not including) 3
print l4 #=> ['eggs', 'bread']
l2[0] = 'cheese'
print l2 #=> ['cheese', 'eggs', 'bread', 'butter']
- Good programmers keep their lists monogamous (basically vectors) --> all data types of the same type, strings, numerics, objects, etc.
[Keyboard Input]
===
# Keyboard echo
import simplegui
# initialize state
current_key = ' '
# event handlers
def keydown(key):
global current_key
current_key = chr(key) # chr turns a number into a string
def keyup(key):
global current_key
current_key = ' '
def draw(c):
# NOTE draw_text now throws an error on some non-printable characters
# Since keydown event key codes do not all map directly to
# the printable character via ord(), this example now restricts
# keys to alphanumerics
if current_key in "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789":
c.draw_text(current_key, [10, 25], 20, "Red")
# create frame
f = simplegui.create_frame("Echo", 35, 35)
# register event handlers
f.set_keydown_handler(keydown)
f.set_keyup_handler(keyup)
f.set_draw_handler(draw)
# start frame
f.start()
# <18> are the acutal key codes
===
# control the position of a ball using the arrow keys
import simplegui
# Initialize globals
WIDTH = 600
HEIGHT = 400
BALL_RADIUS = 20
ball_pos = [WIDTH / 2, HEIGHT / 2]
# define event handlers
def draw(canvas):
canvas.draw_circle(ball_pos, BALL_RADIUS, 2, "Red", "White")
def keydown(key):
vel = 4 # velocity
if key == simplegui.KEY_MAP["left"]:
ball_pos[0] -= vel
elif key == simplegui.KEY_MAP["right"]:
ball_pos[0] += vel
elif key == simplegui.KEY_MAP["down"]:
ball_pos[1] += vel
elif key == simplegui.KEY_MAP["up"]:
ball_pos[1] -= vel
# create frame
frame = simplegui.create_frame("Positional ball control", WIDTH, HEIGHT)
# register event handlers
frame.set_draw_handler(draw)
frame.set_keydown_handler(keydown)
# start frame
frame.start()
===
[Motion]
position = velocity * time [p = v * t]
# assumes velocity is constant
===
# Ball motion with an explicit timer
import simplegui
# Initialize globals
WIDTH = 600
HEIGHT = 400
BALL_RADIUS = 20
init_pos = [WIDTH / 2, HEIGHT / 2] # middle of canvas
vel = [0, 3] # pixels per tick
time = 0
# define event handlers
def tick():
global time
time = time + 1
def draw(canvas):
# create a list to hold ball position
ball_pos = [0, 0]
# calculate ball position
ball_pos[0] = init_pos[0] + time * vel[0]
ball_pos[1] = init_pos[1] + time * vel[1]
# draw ball
canvas.draw_circle(ball_pos, BALL_RADIUS, 2, "Red", "White")
# create frame
frame = simplegui.create_frame("Motion", WIDTH, HEIGHT)
# register event handlers
frame.set_draw_handler(draw)
timer = simplegui.create_timer(100, tick)
# start frame
frame.start()
timer.start()
===
- [3,3] + vector [6,1] == [9,4]
P(0) ----> P(1) ----> P(2) ----------> P(3)
V(0) V(1) V(2)
P(t+1) = P(t) + (1 * V(t))
P[0] = P[0] + V[0]
P[1] = P[1] + V[1]
===
# Ball motion with an implicit timer
import simplegui
# Initialize globals
WIDTH = 600
HEIGHT = 400
BALL_RADIUS = 20
ball_pos = [WIDTH / 2, HEIGHT / 2]
vel = [0, 1] # pixels per update (1/60 seconds -- implicit to the draw handler)
# define event handlers
def draw(canvas):
# Update ball position
ball_pos[0] += vel[0]
ball_pos[1] += vel[1]
# Draw ball
canvas.draw_circle(ball_pos, BALL_RADIUS, 2, "Red", "White")
# create frame
frame = simplegui.create_frame("Motion", WIDTH, HEIGHT)
# register event handlers
frame.set_draw_handler(draw)
# start frame
frame.start()
===
[Collisions and Reflections]
# Distance between two points
Point 1 == p[x,y] # end
Point 2 == q[x,y] # start
math
dist(p,q)^2 == (p[0] - q[0])^2 + (p[1] - q[1])^2 # C^2 = A^2 + B^2
Python
def dist(p, q):
return math.sqrt((p[0] - q[0])**2 + (P[1] - q[1])**2)a=
# Vectors and Motion
v[0] = p[0] - q[0]
v[1] = p[1] - v[1]
Moving/translate a point using a vector: p = q + v
p[0] = q[0] + v[0]
p[1] = q[1] + v[1]
# Update for Motion
Math - point at position p with velocity v
p = p + a * v # 'a' is 'some' constant multiple of the velocity
p[0] = p[0] + a * v[0]
p[1] = p[1] + a * v[1]
[Collisions]
Left wall
p[0] <= 0
Right wall
p[0] >= width - 1
Collision of ball with center p and radius r with wall
Left wall
p[0] <= r
Right wall
p[0] >= (width - 1) - r
Bottom wall
p[1] >= (height - 1) - r
Reflections - update the velocity vector v
Left wall - compute reflected velocity vector (negate it)
v[0] = -v[0] # negate
v[1] = v[1] # stays the same
===
0 == x == horizontal
1 == y == vertical
import simplegui
# Initialize globals
WIDTH = 600
HEIGHT = 400
BALL_RADIUS = 20
ball_pos = [WIDTH / 2, HEIGHT / 2]
vel = [-40.0 / 60.0, 5.0 / 60.0]
# define event handlers
def draw(canvas):
# Update ball position
ball_pos[0] += vel[0]
ball_pos[1] += vel[1]
# collide and reflect off of left hand side of canvas
if ball_pos[0] <= BALL_RADIUS:
vel[0] = - vel[0]
# Draw ball
canvas.draw_circle(ball_pos, BALL_RADIUS, 2, "Red", "White")
# create frame
frame = simplegui.create_frame("Ball physics", WIDTH, HEIGHT)
# register event handlers
frame.set_draw_handler(draw)
# start frame
frame.start()
===
[Velocity Control]
===
# control the position of a ball using the arrow keys
import simplegui
# Initialize globals
WIDTH = 600
HEIGHT = 400
BALL_RADIUS = 20
ball_pos = [WIDTH / 2, HEIGHT / 2]
# define event handlers
def draw(canvas):
canvas.draw_circle(ball_pos, BALL_RADIUS, 2, "Red", "White")
def keydown(key):
vel = 4
if key == simplegui.KEY_MAP["left"]:
ball_pos[0] -= vel
elif key == simplegui.KEY_MAP["right"]:
ball_pos[0] += vel
elif key == simplegui.KEY_MAP["down"]:
ball_pos[1] += vel
elif key == simplegui.KEY_MAP["up"]:
ball_pos[1] -= vel
print ball_pos
# create frame
frame = simplegui.create_frame("Positional ball control", WIDTH, HEIGHT)
# register event handlers
frame.set_draw_handler(draw)
frame.set_keydown_handler(keydown)
# start frame
frame.start()
===
# control the velocity of a ball using the arrow keys
import simplegui
# Initialize globals
WIDTH = 600
HEIGHT = 400
BALL_RADIUS = 20
ball_pos = [WIDTH / 2, HEIGHT / 2]
vel = [0, 0]
# define event handlers
def draw(canvas):
# Update ball position
ball_pos[0] += vel[0]
ball_pos[1] += vel[1]
# Draw ball
canvas.draw_circle(ball_pos, BALL_RADIUS, 2, "Red", "White")
def keydown(key):
acc = 1
if key==simplegui.KEY_MAP["left"]:
vel[0] -= acc
elif key==simplegui.KEY_MAP["right"]:
vel[0] += acc
elif key==simplegui.KEY_MAP["down"]:
vel[1] += acc
elif key==simplegui.KEY_MAP["up"]:
vel[1] -= acc
print ball_pos
# create frame
frame = simplegui.create_frame("Velocity ball control", WIDTH, HEIGHT)
# register event handlers
frame.set_draw_handler(draw)
frame.set_keydown_handler(keydown)
# start frame
frame.start()
[Visualizing Lists and Mutation]
###################################
# Mutation vs. assignment
is == ==
################
# Look alike, but different
a = [4, 5, 6]
b = [4, 5, 6]
print "Original a and b:", a, b
print "Are they same thing?", a is b #=> False
a[1] = 20
print "New a and b:", a, b
print
################
# Aliased
c = [4, 5, 6]
d = c
print "Original c and d:", c, d
print "Are they same thing?", c is d #=> True
c[1] = 20
print "New c and d:", c, d
print
################
# Copied
e = [4, 5, 6]
f = list(e)
print "Original e and f:", e, f
print "Are they same thing?", e is f
e[1] = 20
print "New e and f:", e, f
print
###################################
# Interaction with globals
a = [4, 5, 6]
def mutate_part(x):
a[1] = x #=> for item assignment (mutation) you don't need to specify global, it assumes it
def assign_whole(x):
a = x #=> here it assumes a is a local variable
def assign_whole_global(x):
global a
a = x
mutate_part(100)
print a
assign_whole(200)
print a
assign_whole_global(300)
print a
[Programming Tips]
print 1 is 1 # True
print 1.0 is 1.0 # True
print True is True # True
print "abc" is "abc" # True
print [4, 5, 6] is [4, 5, 6] # False - only type that is mutable // two different lists that happen to look-a-like
print 1 is 1.0 # False - integers are not floating type
print (4, 5, 6) is (4, 5, 6) # False - Tuple
Tuples
- Look like lists but are NOT mutable.
- Tuples and lists support the same non-mutation operations. Like lists, you can loop on tuples.
- The benefit is that sometimes you want to make sure your data is not changed, to protect you data.
# Lists (mutable) vs. tuples (immutable)
print [4, 5, 6] #=> [4, 5, 6]
print (4, 5, 6) #=> (4, 5, 6)
print type([4, 5, 6]) #=> <class 'list'>
print type((4, 5, 6)) #=> <class 'tuple'>
a = [4, 5, 6]
a[1] = 100
print a #=> [4, 100, 6]
b = (4, 5, 6)
b[1] = 100
print b #=> Error - 'tuple' does not support item assignment
[Pong]
===
# Implementation of classic arcade game Pong
import simplegui
import random
# initialize globals
WIDTH = 600
HEIGHT = 400
BALL_RADIUS = 20
PAD_WIDTH = 8
PAD_HEIGHT = 80
HALF_PAD_WIDTH = PAD_WIDTH / 2
HALF_PAD_HEIGHT = PAD_HEIGHT / 2
LEFT = False
RIGHT = True
paddle1_vel = [0] # only one item since we do not move horizontally
paddle1_pos = [(WIDTH - 4.0),(HEIGHT / 2.0)]
paddle2_vel = [0] # only one item since we do not move horizontally
paddle2_pos = [(WIDTH - (PAD_WIDTH / 2.0)),(HEIGHT / 2.0)]
ball_pos = [(WIDTH/2), (HEIGHT/2)]
ball_vel = [0.0, 0.0]
acc = 4
vel_increase = 0.1
score_left = 0
score_right = 1
def spawn_ball(direction):
global ball_pos
ball_pos = [(WIDTH/2), (HEIGHT/2)]
if direction == 'LEFT':
# draw handler draws 60x per second: 120/60 = 2 & 240/60 = 4
ball_vel[0] = (random.randrange(2.0, 4.0, 1) * -1)
ball_vel[1] = (random.randrange(1.0, 3.0, 1) * -1)
elif direction == 'RIGHT':
# draw handler draws 60x per second: 60/60 = 1 & 180/60 = 3
ball_vel[0] = random.randrange(2.0, 4.0, 1)
ball_vel[1] = (random.randrange(1.0, 3.0, 1) * -1)
else:
print "Direction parameter of spawn_ball() not recognized.."
# define event handlers
def new_game():
global paddle1_pos, paddle2_pos, paddle1_vel, paddle2_vel
global score_left, score_right
score_left = 0
score_right = 0
random_side = random.randint(1, 2)
if random_side == 1:
spawn_ball('LEFT')
elif random_side == 2:
spawn_ball('RIGHT')
else:
print "Error new_game() direction not recognized"
def draw(canvas):
global vel_increase, score_left, score_right
# draw mid line and gutters
canvas.draw_line([WIDTH / 2, 0],[WIDTH / 2, HEIGHT], 1, "White")
canvas.draw_line([PAD_WIDTH, 0],[PAD_WIDTH, HEIGHT], 1, "White")
canvas.draw_line([WIDTH - PAD_WIDTH, 0],[WIDTH - PAD_WIDTH, HEIGHT], 1, "White")
# draw ball
canvas.draw_circle([(ball_pos[0] + ball_vel[0]),(ball_pos[1] + ball_vel[1])], BALL_RADIUS, 5, "White", "White")
# Paddle 1 position + keep on screen
if paddle1_pos[1] - HALF_PAD_HEIGHT < 0:
paddle1_pos[1] = HALF_PAD_HEIGHT
elif paddle1_pos[1] + HALF_PAD_HEIGHT > HEIGHT:
paddle1_pos[1] = (HEIGHT - HALF_PAD_HEIGHT)
else:
paddle1_pos[1] += paddle1_vel[0]
# Paddle 2 position + keep on screen
if paddle2_pos[1] - HALF_PAD_HEIGHT < 0:
paddle2_pos[1] = HALF_PAD_HEIGHT
elif paddle2_pos[1] + HALF_PAD_HEIGHT > HEIGHT:
paddle2_pos[1] = (HEIGHT - HALF_PAD_HEIGHT)
else:
paddle2_pos[1] += paddle2_vel[0]
# Ball position + collision
if ball_pos[1] >= (HEIGHT - 1) - BALL_RADIUS:
ball_vel[1] = -ball_vel[1]
elif ball_pos[1] < BALL_RADIUS + 1:
ball_vel[1] = -ball_vel[1]
elif ball_pos[0] + BALL_RADIUS >= WIDTH - PAD_WIDTH:
if ball_pos[1] > (paddle2_pos[1] - HALF_PAD_HEIGHT) and ball_pos[1] < (paddle2_pos[1] + HALF_PAD_HEIGHT):
ball_vel[0] = -ball_vel[0]
ball_vel[0] = ball_vel[0] * (1 + vel_increase)
ball_vel[1] = ball_vel[1] * (1 + vel_increase)
else:
spawn_ball('LEFT')
score_right += 1
elif ball_pos[0] - BALL_RADIUS <= PAD_WIDTH:
if ball_pos[1] > (paddle1_pos[1] - HALF_PAD_HEIGHT) and ball_pos[1] < (paddle1_pos[1] + HALF_PAD_HEIGHT):
ball_vel[0] = -ball_vel[0]
ball_vel[0] = ball_vel[0] * (1 + vel_increase)
ball_vel[1] = ball_vel[1] * (1 + vel_increase)
else:
spawn_ball('RIGHT')
score_left += 1
ball_pos[0] += ball_vel[0]
ball_pos[1] += ball_vel[1]
# Draw Paddle 1
canvas.draw_line([(PAD_WIDTH / 2),(paddle1_pos[1] + HALF_PAD_HEIGHT)], [(PAD_WIDTH / 2),(paddle1_pos[1] - HALF_PAD_HEIGHT)], PAD_WIDTH, "White")
# Draw Paddle 2
canvas.draw_line([(WIDTH - (PAD_WIDTH / 2)),(paddle2_pos[1] + HALF_PAD_HEIGHT)], [(WIDTH - (PAD_WIDTH / 2)),(paddle2_pos[1] - HALF_PAD_HEIGHT)], PAD_WIDTH, "White")
# draw scores
canvas.draw_text(str(score_left), (450, 30), 24, "White", "monospace")
canvas.draw_text(str(score_right), (150, 30), 24, "White", "monospace")
def keydown(key):
global acc
if key == simplegui.KEY_MAP["w"]:
paddle1_vel[0] -= acc
elif key == simplegui.KEY_MAP["s"]:
paddle1_vel[0] += acc
elif key == simplegui.KEY_MAP["up"]:
paddle2_vel[0] -= acc
elif key == simplegui.KEY_MAP["down"]:
paddle2_vel[0] += acc
def keyup(key):
if key == simplegui.KEY_MAP["w"]:
paddle1_vel[0] = 0
elif key == simplegui.KEY_MAP["s"]:
paddle1_vel[0] = 0
elif key == simplegui.KEY_MAP["up"]:
paddle2_vel[0] = 0
elif key == simplegui.KEY_MAP["down"]:
paddle2_vel[0] = 0
# create frame
frame = simplegui.create_frame("Pong", WIDTH, HEIGHT)
frame.set_draw_handler(draw)
frame.set_keydown_handler(keydown)
frame.set_keyup_handler(keyup)
frame.add_button('Restart', new_game)
# start frame
new_game()
frame.start()
http://www.codeskulptor.org/#user40_zOy9sLlDqc_31.py
===
Dividing lists:
my_list[: len(my_list) // 2] and my_list[len(my_list) // 2 :]
my_list[0 : len(my_list) // 2] and my_list[len(my_list) // 2 : len(my_list)]
import math
def dist(p, q):
radius = 2
distance = math.sqrt((p[0] - q[0])**2 + (p[1] - q[1])**2)
result = distance - radius
return result
p = [4, 7]
q = [2, 9]
print dist(p,q)
===
import simplegui
global_var = 5
def draw(canvas):
global global_var
canvas.draw_text(str(global_var), (10, 50), 24, "White", "monospace")
def keydown(key):
global global_var
if key == simplegui.KEY_MAP["w"]:
global_var *= 2
def keyup(key):
global global_var
if key == simplegui.KEY_MAP["w"]:
global_var -= 3
frame = simplegui.create_frame("Quiz", 100, 100)
frame.set_keydown_handler(keydown)
frame.set_keyup_handler(keyup)
frame.set_draw_handler(draw)
frame.start()
|
normal
|
{
"blob_id": "f1396179152641abf76256dfeab346907cb1e386",
"index": 2738,
"step-1": "[Interactive Programming with Python - Part 1]\n\n[Arithmetic Expressions]\n\n# numbers - two types, an integer or a decimal number\n# two correspending data types int() and float()\n\nprint 3, -1, 3.14159, -2.8\n\n# we can convert between data types using int() and float()\n# note that int() take the \"whole\" part of a decimal number\n# float() applied to integers is boring\n\nprint type(3), type(3.14159), type(3.0)\n#=> <type 'int'><type 'float'><type 'float'>\n\nprint int(3.14159), int(-2.8)\n#=> 3 -2\nprint float(3), float(-1)\n#=> 3.0 -1.0\n\n# floating point number have around 15 decimal digits of accuracy\n# pi is 3.1415926535897932384626433832795028841971...\n# square root of two is 1.4142135623730950488016887242096980785696...\n\n# approximation of pi, Python displays 12 decimal digits\n\nprint 3.1415926535897932384626433832795028841971\n#=> 3.14159265359\n\n# appoximation of square root of two, Python displays 12 decimal digits\n\nprint 1.4142135623730950488016887242096980785696\n#=> 1.41421356237\n\n# arithmetic operators\n# +\t\tplus\t\taddition\n# -\t\tminus\t\tsubtraction\n# *\t\ttimes\t\tmultiplication\n# /\t\tdivided by \tdivision\n# ** power\t\texponentiation\n\n# If one operand is a decimal (float), the answer is decimal\n\nprint 1.0 / 3, 5.0 / 2.0, -7 / 3.0\n#=> 0.333333333333 2.5 -2.33333333333\n\n# If both operands are ints, the answer is an int (rounded down)\n\nprint 1 / 3, 5 / 2, -7 / 3\n#=> 0 2 -3 \n\n# expressions - number or a binary operator applied to two expressions\n# minus is also a unary operator and can be applied to a single expression\n\nprint 1 + 2 * 3, 4.0 - 5.0 / 6.0, 7 * 8 + 9 * 10\n\n# expressions are entered as sequence of numbers and operations\n# how are the number and operators grouped to form expressions?\n# operator precedence - \"Please Excuse My Dear Aunt Sallie\" = (), **, *, /, +,-\n\nprint 1 * 2 + 3 * 4\nprint 2 + 12\n\n# always manually group using parentheses when in doubt\n\nprint 1 * (2 + 3) * 4\nprint 1 * 5 * 4\n\n[Variables]\n\n# valid variable names - consists of letters, numbers, underscore (_)\n# starts with letter or underscore\n# case sensitive (capitalization matters)\n\n# legal names - ninja, Ninja, n_i_n_j_a\n# illegal names - 1337, 1337ninja\n\n# Python convention - multiple words joined by _\n# legal names - elite_ninja, leet_ninja, ninja_1337\n# illegal name 1337_ninja\n\n# assign to variable name using single equal sign =\n# (remember that double equals == is used to test equality)\n\n# examples \n\nmy_name = \"Joe Warren\"\nprint my_name\n\nmy_age = 51\nprint my_age\n\nmy_age = my_age + 1 == my_age += 1\n\n# the story of the magic pill\n\nmagic_pill = 30\nprint my_age - magic_pill\n\nmy_grand_dad = 74\n\nprint my_grand_dad - 2 * magic_pill\n\n# Temperature examples\n\n# convert from Fahrenheit to Celsuis\n# c = 5 / 9 * (f - 32)\n# use explanatory names\n\ntemp_Fahrenheit = 212\n\ntemp_Celsius = 5.0 / 9.0 * (temp_Fahrenheit - 32)\n\nprint temp_Celsius\n\n# test it! 32 Fahrenheit is 0 Celsius, 212 Fahrenheit is 100 Celsius\n\n\n# convert from Celsius to Fahrenheit\n# f = 9 / 5 * c + 32\n\ntemp_Celsius = 100\n\ntemp_Fahrenheit = 9.0 / 5.0 * temp_Celsius + 32\n\nprint temp_Fahrenheit\n\n[Functions]\n\n# computes the area of a triangle\ndef triangle_area(base, height): # header - ends in colon\n area = (1.0 / 2) * base * height # body - all of body is indented\n return area # body - return outputs value\n\na1 = triangle_area(3, 8)\nprint a1\na2 = triangle_area(14, 2)\nprint a2\n\n# converts fahrenheit to celsius\ndef fahrenheit2celsius(fahrenheit):\n celsius = (5.0 / 9) * (fahrenheit - 32)\n return celsius\n\n# test!!!\nc1 = fahrenheit2celsius(32)\nc2 = fahrenheit2celsius(212)\nprint c1, c2\n\n# converts fahrenheit to kelvin\ndef fahrenheit2kelvin(fahrenheit):\n celsius = fahrenheit2celsius(fahrenheit)\n kelvin = celsius + 273.15\n return kelvin\n\n# test!!!\nk1 = fahrenheit2kelvin(32)\nk2 = fahrenheit2kelvin(212)\nprint k1, k2\n\n# prints hello, world!\ndef hello():\n print \"Hello, world!\"\n\n# test!!!\nhello() # call to hello prints \"Hello, world!\"\nh = hello() # call to hello prints \"Hello, world!\" a second time\nprint h # prints None since there was no return value\n\nDo not forget:\n- :\n- return\n- indentation\n\n[More Operations]\n\n# Remainder / % / modulo - modular arithmetic works both in negative as positive direction\n\n# systematically restrict computation to a range\n# long division - divide by a number, we get a quotient plus a remainder\n# quotient is integer division //, the remainder is % (Docs)\n\n# problem - get the ones digit of a number\nnum = 49\ntens = num // 10 # --> 4\nones = num % 10 # --> 9\nprint tens, ones\nprint 10 * tens + ones, num\n\n# application - 24 hour clock\n# http://en.wikipedia.org/wiki/24-hour_clock\n\nhour = 20\nshift = 8\nprint (hour + shift) % 24\n\n# application - screen wraparound\n# Spaceship from week seven\n\nwidth = 800\nposition = 797\nmove = 5\nposition = (position + move) % width\nprint position # --> 2\n\nwidth = 800\nposition = 797\nmove = -5\nposition = (position + move) % width\nprint position # --> 797\n\n# Data conversion operations\n\n# convert an integer into string - str\n# convert an hour into 24-hour format \"03:00\", always print leading zero\n\nhour = 3\nones = hour % 10 \t\t\t\t\t# --> 3\ntens = hour // 10 \t\t\t\t\t# --> 0\nprint tens, ones, \":00\" \t\t\t# --> 0 3 :00\nprint str(tens), str(ones), \":00\" \t# --> 0 3 :00\nprint str(tens) + str(ones) + \":00\" # --> 03:00\n\n# convert a string into numbers using int and float\n\n# Python modules - extra functions implemented outside basic Python\n\nimport simplegui\t# access to drawing operations for interactive applications\n\nimport math\t \t\t# access to standard math functions, e.g; trig\n\nimport random \t# functions to generate random numbers\n\n# look in Docs for useful functions\n\nprint math.pi\n\n[Logic and Comparisons]\n\nEvaluation hierarchy: NOT - AND - OR\n\n\n-- Comparison Operators\n\n# >\n# <\n# >=\n# <=\n# ==\n# !=\n\n[Conditionals]\n\ndef greet(friend, money):\n if friend and (money > 20):\n print \"Hi!\"\n money = money - 20\n elif friend:\n print \"Hello\"\n else:\n print \"Ha ha\"\n money = money + 10\n return money\n\n\nmoney = 15\n\nmoney = greet(True, money)\nprint \"Money:\", money\nprint \"\"\n\nmoney = greet(False, money)\nprint \"Money:\", money\nprint \"\"\n\nmoney = greet(True, money)\nprint \"Money:\", money\nprint \"\"\n\n[Programming Tips]\n\nimport random\n\ndef random_dice():\n die1 = random.randrange(1, 7)\n die2 = random.randrange(1, 7)\n return die1 + die2\n\ndef volume_sphere(radius):\n return 4.0/3.0 * math.pi * (radius ** 3)\n\n# => attribute error is a syntax error after the '.'\n\ndef area_triangle(base, height):\n return 0.5 * base * height\n\n# Poor readability\ndef area(a,b,c):\n s = (a+b+c)/2.0\n return math.sqrt(s*(s-a)*(s-b)*(s-c))\n\n# Improved readability\ndef area_triangle_sss(side1, side2, side3):\n \n \"\"\"\n Returns the area of a triangle, given the lengths of [Documentation String]\n its three sides.\n \"\"\"\n \n # Use Heron's formula\n semiperim = (side1 + side2 + side3) / 2.0\n return math.sqrt(semiperim *\n (semiperim - side1) *\n (semiperim - side2) * \n (semiperim - side3))\n\n[Rock-paper-scissors-lizard-Spock]\n\nn = 123\n\nprint n % 100 #=> 23\nprint n % 10 #=> 3\nprint n // 10 #=> 12\n\n[Event-driven Programming]\n\nStart --> Initialize --> Wait <---> (Event +) Handler\n\nEvents\n- Input (e.g. button, text box)\n- Keyboard (e.g key down, key up)\n- Mouse (e.g. click, drag)\n- Timer\n\n# Example of a simple event-driven program\n\n# CodeSkulptor GUI module\nimport simplegui\n\n# Event handler\ndef tick():\n print \"tick!\"\n\n# Register handler\ntimer = simplegui.create_timer(1000, tick)\n\n# Start timer\ntimer.start()\n\nEvent Queue\n- System puts events in this (invisible) queue\n\n[Local vs. Global Variables]\n\n# global vs local examples\n\n# num1 is a global variable\n\nnum1 = 1\nprint num1\n\n# num2 is a local variable\n\ndef fun():\n num1 = 2\n num2 = num1 + 1\n print num2\n \nfun()\n\n# the scope of global num1 is the whole program, num 1 remains defined\nprint num1\n\n# the scope of the variable num2 is fun(), num2 is now undefined\n# print num2 #=> error 'num2' not defined\n\n# why use local variables?\n# give a descriptive name to a quantity\n# avoid computing something multiple times\n\ndef fahren_to_kelvin(fahren):\n celsius = 5.0 / 9 * (fahren - 32)\n zero_celsius_in_kelvin = 273.15\n return celsius + zero_celsius_in_kelvin\n\nprint fahren_to_kelvin(212)\n\n# the risk/reward of using global variables\n\n# risk - consider the software system for an airliner\n# critical piece - flight control system\n# non-critical piece - in-flight entertainment system\n\n# both systems might use a variable called \"dial\"\n# we don't want possibility that change the volume on your audio\n# causes the plane's flaps to change!\n\n# example\nnum = 4\n\ndef fun1():\n global num # to access global variable\n num = 5\n \ndef fun2():\n global num\n num = 6\n\n# note that num changes after each call with no obvious explanation \nprint num\nfun1()\nprint num\nfun2()\nprint num\n\n# global variables are an easy way for event handlers\n# to communicate game information.\n\n# safer method - but they required more sophisticated\n# object-programming techniques\n\n[SimpleGUI]\n\nimport simplegui\n\nmessage = \"Welcome!\"\n\n# Handler for mouse click\ndef click():\n global message\n message = \"Good job!\"\n\n# Handler to draw on canvas\ndef draw(canvas):\n canvas.draw_text(message, [50,112], 36, \"Red\")\n\n# Create a frame and assign callbacks to event handlers\nframe = simplegui.create_frame(\"Home\", 300, 200)\nframe.add_button(\"Click me\", click)\nframe.set_draw_handler(draw)\n\n# Start the frame animation\nframe.start()\n\n-- Program Structure\n1 - Define globals (state)\n2 - Define Helper functions\n3 - Define Classes\n4 - Define event handlers\n5 - Create a frame\n6 - Register event handlers\n7 - Start the frame & timers\n\n# SimpleGUI program template\n\n# Import the module\nimport simplegui\n\n# Define global variables (program state)\ncounter = 0\n\n# Define \"helper\" functions\ndef increment():\n global counter \n counter = counter + 1\n\n# Define event handler functions\ndef tick():\n increment()\n print counter\n\ndef buttonpress():\n global counter:\n counter = 0\n\n# Create a frame\nframe = simplegui.create_frame[\"SimpelGUI Test\", 100, 100]\n\n# Register event handlers\ntimer = simplegui.create_timer[1000, tick]\nframe.add_button(\"Click me!\", buttonpress)\n\n# Start frame and timers\nframe.start()\ntimer.start()\n\n[Buttons & Input Fields]\n\n# calculator with all buttons\n\nimport simplegui\n\n# intialize globals\nstore = 0\noperand = 0\n\n\n# event handlers for calculator with a store and operand\n\ndef output():\n \"\"\"prints contents of store and operand\"\"\"\n print \"Store = \", store\n print \"Operand = \", operand\n print \"\"\n \ndef swap():\n \"\"\" swap contents of store and operand\"\"\"\n global store, operand\n store, operand = operand, store\n output()\n \ndef add():\n \"\"\" add operand to store\"\"\"\n global store\n store = store + operand\n output()\n\ndef sub():\n \"\"\" subtract operand from store\"\"\"\n global store\n store = store - operand\n output()\n\ndef mult():\n \"\"\" multiply store by operand\"\"\"\n global store\n store = store * operand\n output()\n\ndef div():\n \"\"\" divide store by operand\"\"\"\n global store\n store = store / operand\n output()\n\ndef enter(t):\n \"\"\" enter a new operand\"\"\"\n global operand\n operand = float(t)\n output()\n \n# create frame\nf = simplegui.create_frame(\"Calculator\",300,300)\n\n# register event handlers and create control elements\nf.add_button(\"Print\", output, 100)\nf.add_button(\"Swap\", swap, 100)\nf.add_button(\"Add\", add, 100)\nf.add_button(\"Sub\", sub, 100)\nf.add_button(\"Mult\", mult, 100)\nf.add_button(\"Div\", div, 100)\nf.add_input(\"Enter\", enter, 100)\n\n\n# get frame rolling\nf.start()\n\n[Programming Tips]\n\n##############\n# Example of missing \"global\"\n\nn1 = 0\n\ndef increment():\n n1 = n1 + 1\n\nincrement()\nincrement()\nincrement()\n\nprint n1\n\n\n##############\n# Example of missing \"global\"\n\nn2 = 0\n\ndef assign(x):\n n2 = x\n\nassign(2)\nassign(15)\nassign(7)\n\nprint n2\n\n\n##############\n# Example of missing \"return\"\n\nn3 = 0\n\ndef decrement():\n global n3\n n3 = n3 - 1\n\nx = decrement()\n\nprint \"x = \", x\nprint \"n = \", n\n\n\n##############\n# Example of print debugging\n\nimport simplegui\n\nx = 0\n\ndef f(n):\n print \"f: n,x = \", n, x\n result = n ** x\n print \"f: result = \",result\n return result\n \ndef button_handler():\n global x\n print \"bh : x = \", x\n x += 1\n print \"bh : x = \", x\n\ndef input_handler(text):\n print \"ih : text = \", text\n print f(float(text))\n \nframe = simplegui.create_frame(\"Example\", 200, 200)\nframe.add_button(\"Increment\", button_handler)\nframe.add_input(\"Number:\", input_handler, 100)\n\nframe.start()\n\n\n##############\n# Examples of simplifying conditionals\n\ndef f1(a, b):\n \"\"\"Returns True exactly when a is False and b is True.\"\"\" \n if a == False and b == True:\n return True\n else:\n return False\n\ndef f2(a, b):\n \"\"\"Returns True exactly when a is False and b is True.\"\"\" \n if not a and b:\n return True\n else:\n return False \n\ndef f3(a, b):\n \"\"\"Returns True exactly when a is False and b is True.\"\"\" \n return not a and b\n\ndef g1(a, b):\n \"\"\"Returns False eactly when a and b are both True.\"\"\" \n if a == True and b == True:\n return False\n else:\n return True\n \ndef g2(a, b):\n \"\"\"Returns False eactly when a and b are both True.\"\"\" \n if a and b:\n return False\n else:\n return True\n\ndef g3(a, b):\n \"\"\"Returns False eactly when a and b are both True.\"\"\" \n return not (a and b)\n\n[PEP 8 - Styleguide]\n\n- Use 4-space indentation, and no tabs.\n\n- 4 spaces are a good compromise between small indentation (allows greater nesting depth) and large indentation (easier to read). Tabs introduce confusion, and are best left out.\n\n- Wrap lines so that they don’t exceed 79 characters.\n\n- This helps users with small displays and makes it possible to have several code files side-by-side on larger displays.\n\n- Use blank lines to separate functions and classes, and larger blocks of code inside functions.\n\n- When possible, put comments on a line of their own.\n\n- Use docstrings.\n\n- Use spaces around operators and after commas, but not directly inside bracketing constructs: a = f(1, 2) + g(3, 4).\n\n- Name your classes and functions consistently; the convention is to use CamelCase for classes and lower_case_with_underscores for functions and methods. Always use self as the name for the first method argument (see A First Look at Classes for more on classes and methods).\n\n- Don’t use fancy encodings if your code is meant to be used in international environments. Plain ASCII works best in any case.\n\n[Guess the Number - http://www.codeskulptor.org/#user40_QwCzfXhK4H_9.py]\n\n# template for \"Guess the number\" mini-project\n\nimport simplegui\nimport random\nimport math\n\n# Global Variables\n\nnum_range = 100\nnum_guesses = 7\nsecret_number = 0\n\n# Helper Function\n\ndef new_game():\n global secret_number, num_range, num_guesses\n secret_number = random.randint(0,num_range)\n calculation_n_1 = max(0,num_range) - min(0,num_range) + 1\n calculation_n_2 = math.ceil(math.log(calculation_n_1,2))\n num_guesses = int(calculation_n_2)\n print \"New game started with range 0 - \", num_range, \"!\"\n print \"Number of guesses left: \", num_guesses\n \n# Event Handlers\n\ndef range100():\n global num_range\n num_range = 100\n new_game()\n \ndef range1000():\n global num_range\n num_range = 1000\n new_game()\n \ndef input_guess(guess):\n global secret_number, num_guesses\n value = int(guess)\n print \"Guess was \", value\n \n if value > secret_number:\n num_guesses -= 1\n if num_guesses == 0:\n print \"Lower & Game Over. Guesses left: \", num_guesses\n new_game()\n else:\n print \"Lower, number of guesses left: \", num_guesses\n elif value < secret_number:\n num_guesses -= 1\n if num_guesses == 0:\n print \"Higher & Game Over. Guesses left: \", num_guesses\n new_game()\n else:\n print \"Higher, number of guesses left: \", num_guesses\n elif value == secret_number:\n num_guesses -= 1 \n print \"Correct!\"\n new_game()\n else:\n print \"Error\"\n \n# Create Frame\n\nf = simplegui.create_frame(\"Guess the number\", 200, 200)\n\n# Registration Event Handlers & Start Frame\n\nf.add_button(\"Range is (0, 100)\", range100, 200)\nf.add_button(\"range is (0, 1000)\", range1000, 200)\nf.add_input(\"Enter a guess\", input_guess, 200)\n\n# Starting the Game\n\nnew_game()\n\n[Canvas and Drawing]\n\nEvent-Driven Drawing\n- Computor monitor - 2D grid of pixels stored logically in a frame buffer (something which keeps track of the values of the pixels)\n- Computers update the monitor based on the frame buffer at rate of around 60-72 times a second (refresh rate)\n- Many applications will register a special function called a \"draw handler\" which will update the frame buffer.\n- In CodeSkulptur we will register a simple draw handler using a simpleGUI command. CodeSkultor calls the draw handler at around 60 times per second.\n- The draw handler updates the canvas using a collection of draw commands that include things like draw_text, draw_line, draw_circle. \n\nCanvas Coordinates\n- Origin (0) is always in the left uppper corner, not lower!\n\n# first example of drawing on the canvas\n\nimport simplegui\n\n# define draw handler\ndef draw(canvas):\n canvas.draw_text(\"Hello!\",[100, 100], 24, \"White\")\n canvas.draw_circle([100, 100], 2, 2, \"Red\")\n\n# create frame\nframe = simplegui.create_frame(\"Text drawing\", 300, 200)\n\n# register draw handler \nframe.set_draw_handler(draw)\n\n# start frame\nframe.start()\n\n- You start text at the lower left of the string [X,Y.\n\n# example of drawing operations in simplegui\n# standard HMTL color such as \"Red\" and \"Green\"\n# note later drawing operations overwrite earlier drawing operations\n\nimport simplegui\n\n\n# Handler to draw on canvas\ndef draw(canvas):\n canvas.draw_circle([100, 100], 50, 2, \"Red\", \"Pink\")\n canvas.draw_circle([300, 300], 50, 2, \"Red\", \"Pink\")\n canvas.draw_line([100, 100],[300, 300], 2, \"Black\")\n canvas.draw_circle([100, 300], 50, 2, \"Green\", \"Lime\")\n canvas.draw_circle([300, 100], 50, 2, \"Green\", \"Lime\")\n canvas.draw_line([100, 300],[300, 100], 2, \"Black\")\n canvas.draw_polygon([[150, 150], [250, 150], [250, 250], [150, 250]], 2, \n \"Blue\", \"Aqua\")\n canvas.draw_text(\"An example of drawing\", [60, 385], 24, \"Black\")\n\n \n# Create a frame and assign callbacks to event handlers\nframe = simplegui.create_frame(\"Home\", 400, 400)\nframe.set_draw_handler(draw)\nframe.set_canvas_background(\"Yellow\")\n\n\n# Start the frame animation\nframe.start()\n\n[String Processing]\n\n# String literals\ns1 = \"Rixner's funny\"\ns2 = 'Warren wears nice ties!'\ns3 = \" t-shirts!\"\n#print s1, s2\n#print s3\n\n# Combining strings\na = ' and '\ns4 = \"Warren\" + a + \"Rixner\" + ' are nuts!'\nprint s4\n\n# Characters and slices\nprint s1[3] #=> n\nprint s1[-1] #=> y\nprint s1[-2] #=> n\nprint len(s1)\nprint s1[0:6] + s2[6:] --> up to but NOT including.\nprint s2[:13] + s1[9:] + s3\n\n# Converting strings\ns5 = str(375)\nprint s5[1:]\ni1 = int(s5[1:])\nprint i1 + 38\n\n# Handle single quantity\ndef convert_units(val, name):\n result = str(val) + \" \" + name\n if val > 1:\n result = result + \"s\"\n return result\n \n# convert xx.yy to xx dollars and yy cents\ndef convert(val):\n # Split into dollars and cents\n dollars = int(val)\n cents = int(round(100 * (val - dollars)))\n\n # Convert to strings\n dollars_string = convert_units(dollars, \"dollar\")\n cents_string = convert_units(cents, \"cent\")\n\n # return composite string\n if dollars == 0 and cents == 0:\n return \"Broke!\"\n elif dollars == 0:\n return cents_string\n elif cents == 0:\n return dollars_string\n else:\n return dollars_string + \" and \" + cents_string\n \n \n# Tests\nprint convert(11.23)\nprint convert(11.20) \nprint convert(1.12)\nprint convert(12.01)\nprint convert(1.01)\nprint convert(0.01)\nprint convert(1.00)\nprint convert(0)\n\n[Interactive Drawing]\n\n# interactive application to convert a float in dollars and cents\n\nimport simplegui\n\n# define global value\n\nvalue = 3.12\n\n# Handle single quantity\ndef convert_units(val, name):\n result = str(val) + \" \" + name\n if val > 1:\n result = result + \"s\"\n return result\n \n# convert xx.yy to xx dollars and yy cents\ndef convert(val):\n # Split into dollars and cents\n dollars = int(val)\n cents = int(round(100 * (val - dollars)))\n\n # Convert to strings\n dollars_string = convert_units(dollars, \"dollar\")\n cents_string = convert_units(cents, \"cent\")\n\n # return composite string\n if dollars == 0 and cents == 0:\n return \"Broke!\"\n elif dollars == 0:\n return cents_string\n elif cents == 0:\n return dollars_string\n else:\n return dollars_string + \" and \" + cents_string \n\n# define draw handler\ndef draw(canvas):\n canvas.draw_text(convert(value), [60, 110], 24, \"White\")\n\n# define an input field handler\ndef input_handler(text):\n global value\n value = float(text)\n\n# create a frame\nframe = simplegui.create_frame(\"Converter\", 400, 200)\nframe.add_input(\"Enter value\", input_handler, 100)\n\n\n# register event handlers\nframe.set_draw_handler(draw)\n\n# start the frame\nframe.start()\n\n---\n\nstring = '1lll1l1l1l1ll1l111ll1l1ll1l1ll1ll111ll1ll1ll1l1ll1ll1ll1ll1lll1l1l1l1l1l1l1l1l1l1l1l1ll1lll1l111ll1l1l1l1l1'\nprint len(string)\n\nones = 0\nels = 0\nother = 0\n \nfor i in range(0,len(string)):\n if string[i] == '1':\n ones += 1\n elif string[i] == 'l':\n els += 1\n else:\n other += 1\n\nprint \"Ones: \", ones\nprint \"L's: \", els\nprint \"Other: \", other\n\n[Timers]\n\n# Simple \"screensaver\" program.\n\n# Import modules\nimport simplegui\nimport random\n\n# Global state\nmessage = \"Python is Fun!\"\nposition = [50, 50]\nwidth = 500\nheight = 500\ninterval = 2000\n\n# Handler for text box\ndef update(text):\n global message\n message = text\n \n# Handler for timer\ndef tick():\n x = random.randrange(0, width)\n y = random.randrange(0, height)\n position[0] = x #=> When you are changing elements of a global variable, the global declaration is optional!\n position[1] = y #=> When you are changing elements of a global variable, the global declaration is optional!\n\n# Handler to draw on canvas\ndef draw(canvas):\n canvas.draw_text(message, position, 36, \"Red\")\n\n# Create a frame \nframe = simplegui.create_frame(\"Home\", width, height)\n\n# Register event handlers\ntext = frame.add_input(\"Message:\", update, 150)\nframe.set_draw_handler(draw)\ntimer = simplegui.create_timer(interval, tick)\n\n# Start the frame animation\nframe.start()\ntimer.start()\n\n[Programming Tips - Week 3]\n\n#####################\n# Example of event-driven code, buggy version\n\nimport simplegui\n\nsize = 10\nradius = 10\n\n# Define event handlers.\n\ndef incr_button_handler():\n \"\"\"Increment the size.\"\"\"\n global size\n size += 1\n label.set_text(\"Value: \" + str(size))\n \ndef decr_button_handler():\n \"\"\"Decrement the size.\"\"\"\n global size\n # Insert check that size > 1, to make sure it stays positive\n # NOTE that this restriction has changed from the video\n # since draw_circle now throws an error if radius is zero\n size -= 1\n label.set_text(\"Value: \" + str(size))\n\ndef change_circle_handler():\n \"\"\"Change the circle radius.\"\"\"\n global radius\n radius = size\n # Insert code to make radius label change.\n \ndef draw_handler(canvas):\n \"\"\"Draw the circle.\"\"\"\n canvas.draw_circle((100, 100), radius, 5, \"Red\")\n\n# Create a frame and assign callbacks to event handlers.\n\nframe = simplegui.create_frame(\"Home\", 200, 200)\nlabel = frame.add_label(\"Value: \" + str(size))\nframe.add_button(\"Increase\", incr_button_handler)\nframe.add_button(\"Decrease\", decr_button_handler)\nframe.add_label(\"Radius: \" + str(radius))\nframe.add_button(\"Change circle\", change_circle_handler)\nframe.set_draw_handler(draw_handler)\n\n# Start the frame animation\n\nframe.start()\n\n---\n\nimport simplegui\n\n#####################\n# Buggy code -- doesn't start frame\n\nmessage = \"Welcome!\"\n\ndef click():\n \"\"\"Change message on mouse click.\"\"\"\n global message\n message = \"Good job!\"\n\ndef draw(canvas):\n \"\"\"Draw message.\"\"\"\n canvas.draw_text(message, [50,112], 36, \"Red\")\n\n# Create a frame and assign callbacks to event handlers\n\nframe = simplegui.create_frame(\"Home\", 300, 200)\nframe.add_button(\"Click me\", click)\nframe.set_draw_handler(draw)\n\nframe.start()\n\n#####################\n# Buggy code -- doesn't start timers\n\ndef timer1_handler():\n print \"1\"\n \ndef timer2_handler():\n print \"2\"\n\ntimer1 = simplegui.create_timer(100, timer1_handler)\ntimer2 = simplegui.create_timer(300, timer2_handler)\n\ntimer1.start()\ntimer2.start()\n\nMini-Project 3 - [Stopwatch: The Game]\n\nhttp://www.codeskulptor.org/#user40_6D32nD7Dqj_6.py\n\n# template for \"Stopwatch: The Game\"\n\nimport simplegui\n\n# define global variables\n\ntime = 0\nX = 0\nY = 0\nXY = str(X) + '/' + str(Y)\n\n# define helper function format that converts time\n# in tenths of seconds into formatted string A:BC.D\n\ndef format(time):\n A = time // 600 \n B = (time - A * 600) // 100\n C = time % 100 // 10\n D = time % 10\n return str(A) + ':' + str(B) + str(C) + ':' + str(D)\n \n# define event handlers for buttons; \"Start\", \"Stop\", \"Reset\"\n\ndef start():\n timer.start()\n\ndef stop():\n global X, Y, XY\n if timer.is_running():\n Y += 1\n if time % 10 == 0:\n X += 1\n XY = str(X) + '/' + str(Y)\n timer.stop()\n\ndef reset():\n global time, X, Y, XY\n time = 0\n X = 0\n Y = 0 \n XY = str(X) + '/' + str(Y)\n \n# define event handler for timer with 0.1 sec interval\n\ndef tick():\n global time\n time += 1\n\n# define draw handler\ndef draw(canvas):\n canvas.draw_text(format(time), [110, 120], 36, 'White', 'sans-serif')\n canvas.draw_text(XY, [215, 35], 36, 'Green', 'sans-serif')\n \n# create frame\nframe = simplegui.create_frame(\"Stopwatch\", 300, 200)\ntimer = simplegui.create_timer(100, tick)\n\n# register event handlers\n\nframe.add_button('Start', start)\nframe.add_button('Stop', stop)\nframe.add_button('Reset', reset)\nframe.set_draw_handler(draw)\n\n# start frame\n\nframe.start()\n\n# Please remember to review the grading rubric\n\n- In Python, the time module can be used to determine the current time. This module includes the method time which returns the current system time in seconds since a date referred as the Epoch. The Epoch is fixed common date shared by all Python installations. Using the date of the Epoch and the current system time, an application such as a clock or calendar can compute the current time/date using basic arithmetic.\n\nimport simplegui\n\nn = 23\n\ndef collatz_conjecture():\n global n\n if n == 1:\n timer.stop()\n elif n % 2 == 0:\n n = n / 2\n print n\n else:\n n = (n * 3) + 1\n print n\n \ntimer = simplegui.create_timer(100, collatz_conjecture)\ntimer.start()\n\n[Lists]\n\n- A list is a sequence type\n- lists use square brackets \n- [] = empty list\n- position = [x, y]\n\nl = [1, 3, 4, -7, 62, 43]\nl2 = ['milk', 'eggs', 'bread', 'butter']\nl3 = [[3, 4], ['a', 'b', 'c'], []]\n\nprint len(l) #=> 6\nprint len(l2) #=> 4\nprint len(l3) #=> 3\n\nprint \"first element: \", l[0] #=> 1\nprint \"last element: \", l[-1] #=> 43\nprint l3[1] #=> ['a', 'b', 'c'] -- start counting at 0\nprint l3[0][1] #=> 4\nl4 = 12[1:3] # starting at element 1 but up to (not including) 3\nprint l4 #=> ['eggs', 'bread']\n\nl2[0] = 'cheese'\nprint l2 #=> ['cheese', 'eggs', 'bread', 'butter']\n- Good programmers keep their lists monogamous (basically vectors) --> all data types of the same type, strings, numerics, objects, etc.\n\n[Keyboard Input]\n\n===\n\n# Keyboard echo\n\nimport simplegui\n\n# initialize state\ncurrent_key = ' '\n\n# event handlers\ndef keydown(key):\n global current_key\n current_key = chr(key) # chr turns a number into a string\n \ndef keyup(key):\n global current_key\n current_key = ' '\n \ndef draw(c):\n # NOTE draw_text now throws an error on some non-printable characters\n # Since keydown event key codes do not all map directly to\n # the printable character via ord(), this example now restricts\n # keys to alphanumerics\n \n if current_key in \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\":\n c.draw_text(current_key, [10, 25], 20, \"Red\") \n \n# create frame \nf = simplegui.create_frame(\"Echo\", 35, 35)\n\n# register event handlers\nf.set_keydown_handler(keydown)\nf.set_keyup_handler(keyup)\nf.set_draw_handler(draw)\n\n# start frame\nf.start()\n\n# <18> are the acutal key codes\n\n===\n\n# control the position of a ball using the arrow keys\n\nimport simplegui\n\n# Initialize globals\nWIDTH = 600\nHEIGHT = 400\nBALL_RADIUS = 20\n\nball_pos = [WIDTH / 2, HEIGHT / 2]\n\n# define event handlers\ndef draw(canvas):\n canvas.draw_circle(ball_pos, BALL_RADIUS, 2, \"Red\", \"White\")\n\ndef keydown(key):\n vel = 4 # velocity\n if key == simplegui.KEY_MAP[\"left\"]:\n ball_pos[0] -= vel\n elif key == simplegui.KEY_MAP[\"right\"]:\n ball_pos[0] += vel\n elif key == simplegui.KEY_MAP[\"down\"]:\n ball_pos[1] += vel\n elif key == simplegui.KEY_MAP[\"up\"]:\n ball_pos[1] -= vel \n \n# create frame \nframe = simplegui.create_frame(\"Positional ball control\", WIDTH, HEIGHT)\n\n# register event handlers\nframe.set_draw_handler(draw)\nframe.set_keydown_handler(keydown)\n\n# start frame\nframe.start()\n\n===\n\n[Motion]\n\nposition = velocity * time [p = v * t]\n# assumes velocity is constant \n\n===\n\n# Ball motion with an explicit timer\n\nimport simplegui\n\n# Initialize globals\nWIDTH = 600\nHEIGHT = 400\nBALL_RADIUS = 20\n\ninit_pos = [WIDTH / 2, HEIGHT / 2] # middle of canvas\nvel = [0, 3] # pixels per tick\ntime = 0\n\n# define event handlers\ndef tick():\n global time\n time = time + 1\n\ndef draw(canvas):\n # create a list to hold ball position\n ball_pos = [0, 0]\n\n # calculate ball position\n ball_pos[0] = init_pos[0] + time * vel[0]\n ball_pos[1] = init_pos[1] + time * vel[1]\n \n # draw ball\n canvas.draw_circle(ball_pos, BALL_RADIUS, 2, \"Red\", \"White\")\n\n# create frame\nframe = simplegui.create_frame(\"Motion\", WIDTH, HEIGHT)\n\n# register event handlers\nframe.set_draw_handler(draw)\n\ntimer = simplegui.create_timer(100, tick)\n\n# start frame\nframe.start()\ntimer.start()\n\n===\n\n- [3,3] + vector [6,1] == [9,4]\n\nP(0) ----> P(1) ----> P(2) ----------> P(3)\n V(0) V(1) V(2)\n\nP(t+1) = P(t) + (1 * V(t))\n\nP[0] = P[0] + V[0]\nP[1] = P[1] + V[1]\n\n===\n\n# Ball motion with an implicit timer\n\nimport simplegui\n\n# Initialize globals\nWIDTH = 600\nHEIGHT = 400\nBALL_RADIUS = 20\n\nball_pos = [WIDTH / 2, HEIGHT / 2]\nvel = [0, 1] # pixels per update (1/60 seconds -- implicit to the draw handler)\n\n# define event handlers\ndef draw(canvas):\n # Update ball position\n ball_pos[0] += vel[0]\n ball_pos[1] += vel[1]\n\n # Draw ball\n canvas.draw_circle(ball_pos, BALL_RADIUS, 2, \"Red\", \"White\")\n\n# create frame\nframe = simplegui.create_frame(\"Motion\", WIDTH, HEIGHT)\n\n# register event handlers\nframe.set_draw_handler(draw)\n\n# start frame\nframe.start()\n\n===\n\n[Collisions and Reflections]\n\n# Distance between two points\n\nPoint 1 == p[x,y] # end\nPoint 2 == q[x,y] # start\n\nmath\ndist(p,q)^2 == (p[0] - q[0])^2 + (p[1] - q[1])^2 # C^2 = A^2 + B^2\n\nPython\ndef dist(p, q):\n return math.sqrt((p[0] - q[0])**2 + (P[1] - q[1])**2)a=\n \n# Vectors and Motion\n\nv[0] = p[0] - q[0]\nv[1] = p[1] - v[1]\n\nMoving/translate a point using a vector: p = q + v\n\np[0] = q[0] + v[0]\np[1] = q[1] + v[1]\n\n# Update for Motion\n\nMath - point at position p with velocity v\np = p + a * v # 'a' is 'some' constant multiple of the velocity\n\np[0] = p[0] + a * v[0]\np[1] = p[1] + a * v[1]\n\n[Collisions]\n\nLeft wall\np[0] <= 0\n\nRight wall\np[0] >= width - 1\n\nCollision of ball with center p and radius r with wall\n\nLeft wall\np[0] <= r\n\nRight wall\np[0] >= (width - 1) - r\n\nBottom wall\np[1] >= (height - 1) - r \n\nReflections - update the velocity vector v\n\nLeft wall - compute reflected velocity vector (negate it)\nv[0] = -v[0] # negate\nv[1] = v[1] # stays the same\n\n===\n\n0 == x == horizontal\n1 == y == vertical\n\nimport simplegui\n\n# Initialize globals\nWIDTH = 600\nHEIGHT = 400\nBALL_RADIUS = 20\n\nball_pos = [WIDTH / 2, HEIGHT / 2]\nvel = [-40.0 / 60.0, 5.0 / 60.0]\n\n# define event handlers\ndef draw(canvas):\n # Update ball position\n ball_pos[0] += vel[0]\n ball_pos[1] += vel[1]\n \n # collide and reflect off of left hand side of canvas\n if ball_pos[0] <= BALL_RADIUS:\n vel[0] = - vel[0]\n \n # Draw ball\n canvas.draw_circle(ball_pos, BALL_RADIUS, 2, \"Red\", \"White\")\n\n# create frame\nframe = simplegui.create_frame(\"Ball physics\", WIDTH, HEIGHT)\n\n# register event handlers\nframe.set_draw_handler(draw)\n\n# start frame\nframe.start()\n\n===\n\n[Velocity Control]\n\n===\n\n# control the position of a ball using the arrow keys\n\nimport simplegui\n\n# Initialize globals\nWIDTH = 600\nHEIGHT = 400\nBALL_RADIUS = 20\n\nball_pos = [WIDTH / 2, HEIGHT / 2]\n\n# define event handlers\ndef draw(canvas):\n canvas.draw_circle(ball_pos, BALL_RADIUS, 2, \"Red\", \"White\")\n\ndef keydown(key):\n vel = 4\n if key == simplegui.KEY_MAP[\"left\"]:\n ball_pos[0] -= vel\n elif key == simplegui.KEY_MAP[\"right\"]:\n ball_pos[0] += vel\n elif key == simplegui.KEY_MAP[\"down\"]:\n ball_pos[1] += vel\n elif key == simplegui.KEY_MAP[\"up\"]:\n ball_pos[1] -= vel \n \n print ball_pos\n\n# create frame \nframe = simplegui.create_frame(\"Positional ball control\", WIDTH, HEIGHT)\n\n# register event handlers\nframe.set_draw_handler(draw)\nframe.set_keydown_handler(keydown)\n\n# start frame\nframe.start()\n\n===\n\n# control the velocity of a ball using the arrow keys\n\nimport simplegui\n\n# Initialize globals\nWIDTH = 600\nHEIGHT = 400\nBALL_RADIUS = 20\n\nball_pos = [WIDTH / 2, HEIGHT / 2]\nvel = [0, 0]\n\n# define event handlers\ndef draw(canvas):\n # Update ball position\n ball_pos[0] += vel[0]\n ball_pos[1] += vel[1]\n\n # Draw ball\n canvas.draw_circle(ball_pos, BALL_RADIUS, 2, \"Red\", \"White\")\n\ndef keydown(key):\n acc = 1\n if key==simplegui.KEY_MAP[\"left\"]:\n vel[0] -= acc\n elif key==simplegui.KEY_MAP[\"right\"]:\n vel[0] += acc\n elif key==simplegui.KEY_MAP[\"down\"]:\n vel[1] += acc\n elif key==simplegui.KEY_MAP[\"up\"]:\n vel[1] -= acc\n \n print ball_pos\n \n# create frame \nframe = simplegui.create_frame(\"Velocity ball control\", WIDTH, HEIGHT)\n\n# register event handlers\nframe.set_draw_handler(draw)\nframe.set_keydown_handler(keydown)\n\n# start frame\nframe.start()\n\n[Visualizing Lists and Mutation]\n\n###################################\n# Mutation vs. assignment\n\nis == ==\n\n################\n# Look alike, but different\n\na = [4, 5, 6]\nb = [4, 5, 6]\nprint \"Original a and b:\", a, b\nprint \"Are they same thing?\", a is b #=> False\n\na[1] = 20\nprint \"New a and b:\", a, b\nprint\n\n################\n# Aliased\n\nc = [4, 5, 6]\nd = c\nprint \"Original c and d:\", c, d\nprint \"Are they same thing?\", c is d #=> True\n\nc[1] = 20\nprint \"New c and d:\", c, d\nprint\n\n################\n# Copied\n\ne = [4, 5, 6]\nf = list(e)\nprint \"Original e and f:\", e, f\nprint \"Are they same thing?\", e is f\n\ne[1] = 20\nprint \"New e and f:\", e, f\nprint\n\n\n###################################\n# Interaction with globals\n\n\na = [4, 5, 6]\n\ndef mutate_part(x):\n a[1] = x #=> for item assignment (mutation) you don't need to specify global, it assumes it\n\ndef assign_whole(x):\n a = x #=> here it assumes a is a local variable\n\ndef assign_whole_global(x):\n global a\n a = x\n\nmutate_part(100)\nprint a\n\nassign_whole(200)\nprint a\n\nassign_whole_global(300)\nprint a\n\n[Programming Tips]\n\nprint 1 is 1 # True\nprint 1.0 is 1.0 # True\nprint True is True # True\nprint \"abc\" is \"abc\" # True\nprint [4, 5, 6] is [4, 5, 6] # False - only type that is mutable // two different lists that happen to look-a-like\nprint 1 is 1.0 # False - integers are not floating type\nprint (4, 5, 6) is (4, 5, 6) # False - Tuple\n\nTuples\n- Look like lists but are NOT mutable.\n- Tuples and lists support the same non-mutation operations. Like lists, you can loop on tuples.\n- The benefit is that sometimes you want to make sure your data is not changed, to protect you data.\n\n# Lists (mutable) vs. tuples (immutable)\n\nprint [4, 5, 6] #=> [4, 5, 6]\nprint (4, 5, 6) #=> (4, 5, 6)\n\nprint type([4, 5, 6]) #=> <class 'list'>\nprint type((4, 5, 6)) #=> <class 'tuple'>\n\na = [4, 5, 6]\na[1] = 100\nprint a #=> [4, 100, 6]\n\nb = (4, 5, 6)\nb[1] = 100\nprint b #=> Error - 'tuple' does not support item assignment\n\n[Pong]\n\n===\n\n# Implementation of classic arcade game Pong\n\nimport simplegui\nimport random\n\n# initialize globals \n\nWIDTH = 600\nHEIGHT = 400 \nBALL_RADIUS = 20\nPAD_WIDTH = 8\nPAD_HEIGHT = 80\nHALF_PAD_WIDTH = PAD_WIDTH / 2\nHALF_PAD_HEIGHT = PAD_HEIGHT / 2\nLEFT = False\nRIGHT = True\n\npaddle1_vel = [0] # only one item since we do not move horizontally\npaddle1_pos = [(WIDTH - 4.0),(HEIGHT / 2.0)]\npaddle2_vel = [0] # only one item since we do not move horizontally\npaddle2_pos = [(WIDTH - (PAD_WIDTH / 2.0)),(HEIGHT / 2.0)]\n\nball_pos = [(WIDTH/2), (HEIGHT/2)]\nball_vel = [0.0, 0.0]\nacc = 4\nvel_increase = 0.1\n\nscore_left = 0\nscore_right = 1\n\ndef spawn_ball(direction):\n global ball_pos \n ball_pos = [(WIDTH/2), (HEIGHT/2)]\n \n if direction == 'LEFT':\n # draw handler draws 60x per second: 120/60 = 2 & 240/60 = 4\n ball_vel[0] = (random.randrange(2.0, 4.0, 1) * -1) \n ball_vel[1] = (random.randrange(1.0, 3.0, 1) * -1)\n elif direction == 'RIGHT':\n # draw handler draws 60x per second: 60/60 = 1 & 180/60 = 3\n ball_vel[0] = random.randrange(2.0, 4.0, 1)\n ball_vel[1] = (random.randrange(1.0, 3.0, 1) * -1)\n else: \n print \"Direction parameter of spawn_ball() not recognized..\"\n\n# define event handlers\n\ndef new_game():\n global paddle1_pos, paddle2_pos, paddle1_vel, paddle2_vel\n global score_left, score_right\n score_left = 0\n score_right = 0\n random_side = random.randint(1, 2)\n \n if random_side == 1:\n spawn_ball('LEFT')\n elif random_side == 2:\n spawn_ball('RIGHT')\n else:\n print \"Error new_game() direction not recognized\"\n \ndef draw(canvas):\n global vel_increase, score_left, score_right\n \n # draw mid line and gutters\n \n canvas.draw_line([WIDTH / 2, 0],[WIDTH / 2, HEIGHT], 1, \"White\")\n canvas.draw_line([PAD_WIDTH, 0],[PAD_WIDTH, HEIGHT], 1, \"White\")\n canvas.draw_line([WIDTH - PAD_WIDTH, 0],[WIDTH - PAD_WIDTH, HEIGHT], 1, \"White\")\n \n # draw ball\n\n canvas.draw_circle([(ball_pos[0] + ball_vel[0]),(ball_pos[1] + ball_vel[1])], BALL_RADIUS, 5, \"White\", \"White\")\n \n # Paddle 1 position + keep on screen\n \n if paddle1_pos[1] - HALF_PAD_HEIGHT < 0:\n paddle1_pos[1] = HALF_PAD_HEIGHT\n elif paddle1_pos[1] + HALF_PAD_HEIGHT > HEIGHT:\n paddle1_pos[1] = (HEIGHT - HALF_PAD_HEIGHT)\n else:\n paddle1_pos[1] += paddle1_vel[0]\n \n # Paddle 2 position + keep on screen\n \n if paddle2_pos[1] - HALF_PAD_HEIGHT < 0:\n paddle2_pos[1] = HALF_PAD_HEIGHT\n elif paddle2_pos[1] + HALF_PAD_HEIGHT > HEIGHT:\n paddle2_pos[1] = (HEIGHT - HALF_PAD_HEIGHT)\n else:\n paddle2_pos[1] += paddle2_vel[0]\n \n # Ball position + collision\n \n if ball_pos[1] >= (HEIGHT - 1) - BALL_RADIUS:\n ball_vel[1] = -ball_vel[1]\n elif ball_pos[1] < BALL_RADIUS + 1:\n ball_vel[1] = -ball_vel[1]\n elif ball_pos[0] + BALL_RADIUS >= WIDTH - PAD_WIDTH:\n \n if ball_pos[1] > (paddle2_pos[1] - HALF_PAD_HEIGHT) and ball_pos[1] < (paddle2_pos[1] + HALF_PAD_HEIGHT):\n ball_vel[0] = -ball_vel[0]\n ball_vel[0] = ball_vel[0] * (1 + vel_increase)\n ball_vel[1] = ball_vel[1] * (1 + vel_increase)\n else:\n spawn_ball('LEFT')\n score_right += 1\n \n elif ball_pos[0] - BALL_RADIUS <= PAD_WIDTH:\n \n if ball_pos[1] > (paddle1_pos[1] - HALF_PAD_HEIGHT) and ball_pos[1] < (paddle1_pos[1] + HALF_PAD_HEIGHT):\n ball_vel[0] = -ball_vel[0]\n ball_vel[0] = ball_vel[0] * (1 + vel_increase)\n ball_vel[1] = ball_vel[1] * (1 + vel_increase)\n else:\n spawn_ball('RIGHT')\n score_left += 1\n \n ball_pos[0] += ball_vel[0] \n ball_pos[1] += ball_vel[1]\n \n # Draw Paddle 1\n\n canvas.draw_line([(PAD_WIDTH / 2),(paddle1_pos[1] + HALF_PAD_HEIGHT)], [(PAD_WIDTH / 2),(paddle1_pos[1] - HALF_PAD_HEIGHT)], PAD_WIDTH, \"White\")\n \n # Draw Paddle 2\n\n canvas.draw_line([(WIDTH - (PAD_WIDTH / 2)),(paddle2_pos[1] + HALF_PAD_HEIGHT)], [(WIDTH - (PAD_WIDTH / 2)),(paddle2_pos[1] - HALF_PAD_HEIGHT)], PAD_WIDTH, \"White\") \n \n # draw scores\n\n canvas.draw_text(str(score_left), (450, 30), 24, \"White\", \"monospace\")\n canvas.draw_text(str(score_right), (150, 30), 24, \"White\", \"monospace\")\n\ndef keydown(key):\n global acc\n if key == simplegui.KEY_MAP[\"w\"]:\n paddle1_vel[0] -= acc\n elif key == simplegui.KEY_MAP[\"s\"]:\n paddle1_vel[0] += acc\n elif key == simplegui.KEY_MAP[\"up\"]:\n paddle2_vel[0] -= acc\n elif key == simplegui.KEY_MAP[\"down\"]:\n paddle2_vel[0] += acc\n\ndef keyup(key):\n if key == simplegui.KEY_MAP[\"w\"]:\n paddle1_vel[0] = 0\n elif key == simplegui.KEY_MAP[\"s\"]:\n paddle1_vel[0] = 0\n elif key == simplegui.KEY_MAP[\"up\"]:\n paddle2_vel[0] = 0\n elif key == simplegui.KEY_MAP[\"down\"]:\n paddle2_vel[0] = 0\n \n# create frame\n\nframe = simplegui.create_frame(\"Pong\", WIDTH, HEIGHT)\nframe.set_draw_handler(draw)\nframe.set_keydown_handler(keydown)\nframe.set_keyup_handler(keyup)\nframe.add_button('Restart', new_game)\n\n# start frame\n\nnew_game()\nframe.start()\n\nhttp://www.codeskulptor.org/#user40_zOy9sLlDqc_31.py\n\n===\n\nDividing lists:\n\nmy_list[: len(my_list) // 2] and my_list[len(my_list) // 2 :]\nmy_list[0 : len(my_list) // 2] and my_list[len(my_list) // 2 : len(my_list)]\n\nimport math\n\ndef dist(p, q):\n radius = 2\n distance = math.sqrt((p[0] - q[0])**2 + (p[1] - q[1])**2)\n result = distance - radius\n return result\n \np = [4, 7]\nq = [2, 9]\n\nprint dist(p,q)\n\n===\n\nimport simplegui\n\nglobal_var = 5\n\ndef draw(canvas):\n global global_var\n canvas.draw_text(str(global_var), (10, 50), 24, \"White\", \"monospace\")\n\ndef keydown(key):\n global global_var\n if key == simplegui.KEY_MAP[\"w\"]:\n global_var *= 2\n \ndef keyup(key):\n global global_var\n if key == simplegui.KEY_MAP[\"w\"]:\n global_var -= 3\n\nframe = simplegui.create_frame(\"Quiz\", 100, 100)\nframe.set_keydown_handler(keydown)\nframe.set_keyup_handler(keyup)\nframe.set_draw_handler(draw)\n\nframe.start()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def compute(plus, minus, total, inp):
if plus == 1 and minus == 0:
print(total)
return
elif plus == 1 and minus == 1:
print('Impossible')
return
elif abs(plus - minus) > total:
plus
temp = total
total += minus
res = []
if int(total / plus) > temp:
print('Impossible')
return
elif int(total % plus) == 0:
res = [int(total / plus) for i in range(0, plus)]
else:
res = [int(total / (plus - 1)) for i in range(0, plus - 1)]
res.append(total % (plus - 1))
j = 0
prev = 0
for i in inp.split():
if j == 0:
print(res[j], end=' ')
j += 1
elif i == '+' or i == '-':
print(i, end=' ')
prev = i
elif i == '?':
if prev == '+':
print(res[j], end=' ')
j += 1
else:
print('1', end=' ')
else:
print(i, end=' ')
<|reserved_special_token_0|>
for i in inp.split():
if i == '?' or i == '=':
continue
elif i == '+':
plus += 1
elif i == '-':
minus += 1
else:
total = int(i)
compute(plus, minus, total, inp)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.stdin = open('/home/shiva/Learning/1.txt', 'r')
sys.stdout = open('/home/shiva/Learning/2.txt', 'w')
def compute(plus, minus, total, inp):
if plus == 1 and minus == 0:
print(total)
return
elif plus == 1 and minus == 1:
print('Impossible')
return
elif abs(plus - minus) > total:
plus
temp = total
total += minus
res = []
if int(total / plus) > temp:
print('Impossible')
return
elif int(total % plus) == 0:
res = [int(total / plus) for i in range(0, plus)]
else:
res = [int(total / (plus - 1)) for i in range(0, plus - 1)]
res.append(total % (plus - 1))
j = 0
prev = 0
for i in inp.split():
if j == 0:
print(res[j], end=' ')
j += 1
elif i == '+' or i == '-':
print(i, end=' ')
prev = i
elif i == '?':
if prev == '+':
print(res[j], end=' ')
j += 1
else:
print('1', end=' ')
else:
print(i, end=' ')
inp = input()
plus = 1
minus = 0
total = 0
for i in inp.split():
if i == '?' or i == '=':
continue
elif i == '+':
plus += 1
elif i == '-':
minus += 1
else:
total = int(i)
compute(plus, minus, total, inp)
<|reserved_special_token_1|>
import sys
import psyco
sys.stdin = open('/home/shiva/Learning/1.txt', 'r')
sys.stdout = open('/home/shiva/Learning/2.txt', 'w')
def compute(plus, minus, total, inp):
if plus == 1 and minus == 0:
print(total)
return
elif plus == 1 and minus == 1:
print('Impossible')
return
elif abs(plus - minus) > total:
plus
temp = total
total += minus
res = []
if int(total / plus) > temp:
print('Impossible')
return
elif int(total % plus) == 0:
res = [int(total / plus) for i in range(0, plus)]
else:
res = [int(total / (plus - 1)) for i in range(0, plus - 1)]
res.append(total % (plus - 1))
j = 0
prev = 0
for i in inp.split():
if j == 0:
print(res[j], end=' ')
j += 1
elif i == '+' or i == '-':
print(i, end=' ')
prev = i
elif i == '?':
if prev == '+':
print(res[j], end=' ')
j += 1
else:
print('1', end=' ')
else:
print(i, end=' ')
inp = input()
plus = 1
minus = 0
total = 0
for i in inp.split():
if i == '?' or i == '=':
continue
elif i == '+':
plus += 1
elif i == '-':
minus += 1
else:
total = int(i)
compute(plus, minus, total, inp)
<|reserved_special_token_1|>
import sys
import psyco
sys.stdin = open("/home/shiva/Learning/1.txt", "r")
sys.stdout = open("/home/shiva/Learning/2.txt", "w")
def compute(plus,minus,total,inp):
if plus == 1 and minus == 0:
print(total); return
elif (plus == 1 and minus == 1):
print("Impossible"); return
elif (abs(plus-minus) > total):
plus
temp = total
total += minus
res = []
if int(total/plus) > temp:
print("Impossible"); return
elif int(total%plus) == 0:
res = [int(total/plus) for i in range(0,plus)]
else:
res = [int(total/(plus-1)) for i in range(0,plus-1)]
res.append(total%(plus-1))
j = 0
prev = 0
for i in inp.split():
if j == 0:
print(res[j],end=' ')
j+=1
elif i == '+' or i=='-':
print(i,end=' ')
prev = i
elif i == '?':
if prev == '+':
print(res[j],end=' ')
j+=1
else:
print('1',end=' ')
else:
print(i,end=' ')
inp = input()
plus =1
minus = 0
total = 0
for i in inp.split():
if i=='?' or i=='=':
continue
elif i == '+':
plus+=1
elif i == '-':
minus +=1
else:
total = int(i)
compute(plus,minus,total,inp)
|
flexible
|
{
"blob_id": "d29c8ec737b8e962d381c8fdd0999e7e01847836",
"index": 5274,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef compute(plus, minus, total, inp):\n if plus == 1 and minus == 0:\n print(total)\n return\n elif plus == 1 and minus == 1:\n print('Impossible')\n return\n elif abs(plus - minus) > total:\n plus\n temp = total\n total += minus\n res = []\n if int(total / plus) > temp:\n print('Impossible')\n return\n elif int(total % plus) == 0:\n res = [int(total / plus) for i in range(0, plus)]\n else:\n res = [int(total / (plus - 1)) for i in range(0, plus - 1)]\n res.append(total % (plus - 1))\n j = 0\n prev = 0\n for i in inp.split():\n if j == 0:\n print(res[j], end=' ')\n j += 1\n elif i == '+' or i == '-':\n print(i, end=' ')\n prev = i\n elif i == '?':\n if prev == '+':\n print(res[j], end=' ')\n j += 1\n else:\n print('1', end=' ')\n else:\n print(i, end=' ')\n\n\n<mask token>\nfor i in inp.split():\n if i == '?' or i == '=':\n continue\n elif i == '+':\n plus += 1\n elif i == '-':\n minus += 1\n else:\n total = int(i)\ncompute(plus, minus, total, inp)\n",
"step-3": "<mask token>\nsys.stdin = open('/home/shiva/Learning/1.txt', 'r')\nsys.stdout = open('/home/shiva/Learning/2.txt', 'w')\n\n\ndef compute(plus, minus, total, inp):\n if plus == 1 and minus == 0:\n print(total)\n return\n elif plus == 1 and minus == 1:\n print('Impossible')\n return\n elif abs(plus - minus) > total:\n plus\n temp = total\n total += minus\n res = []\n if int(total / plus) > temp:\n print('Impossible')\n return\n elif int(total % plus) == 0:\n res = [int(total / plus) for i in range(0, plus)]\n else:\n res = [int(total / (plus - 1)) for i in range(0, plus - 1)]\n res.append(total % (plus - 1))\n j = 0\n prev = 0\n for i in inp.split():\n if j == 0:\n print(res[j], end=' ')\n j += 1\n elif i == '+' or i == '-':\n print(i, end=' ')\n prev = i\n elif i == '?':\n if prev == '+':\n print(res[j], end=' ')\n j += 1\n else:\n print('1', end=' ')\n else:\n print(i, end=' ')\n\n\ninp = input()\nplus = 1\nminus = 0\ntotal = 0\nfor i in inp.split():\n if i == '?' or i == '=':\n continue\n elif i == '+':\n plus += 1\n elif i == '-':\n minus += 1\n else:\n total = int(i)\ncompute(plus, minus, total, inp)\n",
"step-4": "import sys\nimport psyco\nsys.stdin = open('/home/shiva/Learning/1.txt', 'r')\nsys.stdout = open('/home/shiva/Learning/2.txt', 'w')\n\n\ndef compute(plus, minus, total, inp):\n if plus == 1 and minus == 0:\n print(total)\n return\n elif plus == 1 and minus == 1:\n print('Impossible')\n return\n elif abs(plus - minus) > total:\n plus\n temp = total\n total += minus\n res = []\n if int(total / plus) > temp:\n print('Impossible')\n return\n elif int(total % plus) == 0:\n res = [int(total / plus) for i in range(0, plus)]\n else:\n res = [int(total / (plus - 1)) for i in range(0, plus - 1)]\n res.append(total % (plus - 1))\n j = 0\n prev = 0\n for i in inp.split():\n if j == 0:\n print(res[j], end=' ')\n j += 1\n elif i == '+' or i == '-':\n print(i, end=' ')\n prev = i\n elif i == '?':\n if prev == '+':\n print(res[j], end=' ')\n j += 1\n else:\n print('1', end=' ')\n else:\n print(i, end=' ')\n\n\ninp = input()\nplus = 1\nminus = 0\ntotal = 0\nfor i in inp.split():\n if i == '?' or i == '=':\n continue\n elif i == '+':\n plus += 1\n elif i == '-':\n minus += 1\n else:\n total = int(i)\ncompute(plus, minus, total, inp)\n",
"step-5": "import sys\nimport psyco\nsys.stdin = open(\"/home/shiva/Learning/1.txt\", \"r\")\nsys.stdout = open(\"/home/shiva/Learning/2.txt\", \"w\")\n\ndef compute(plus,minus,total,inp):\n\tif plus == 1 and minus == 0:\n\t\tprint(total); return\n\telif (plus == 1 and minus == 1): \n\t\tprint(\"Impossible\"); return\n\telif (abs(plus-minus) > total):\n\t\tplus\n\n\ttemp = total\n\ttotal += minus\n\tres = []\n\tif int(total/plus) > temp:\n\t\tprint(\"Impossible\"); return\n\telif int(total%plus) == 0:\n\t\tres = [int(total/plus) for i in range(0,plus)]\n\telse:\n\t\tres = [int(total/(plus-1)) for i in range(0,plus-1)]\n\t\tres.append(total%(plus-1))\n\t\n\tj = 0\n\tprev = 0\n\tfor i in inp.split():\n\t\tif j == 0:\n\t\t\tprint(res[j],end=' ')\n\t\t\tj+=1\n\t\telif i == '+' or i=='-':\n\t\t\tprint(i,end=' ')\n\t\t\tprev = i\n\t\telif i == '?':\n\t\t\tif prev == '+':\n\n\t\t\t\tprint(res[j],end=' ')\n\t\t\t\tj+=1\n\t\t\telse:\n\t\t\t\tprint('1',end=' ')\n\t\telse:\n\t\t\tprint(i,end=' ')\n\ninp = input()\nplus =1\nminus = 0\ntotal = 0\nfor i in inp.split():\n\tif i=='?' or i=='=':\n\t\tcontinue\n\telif i == '+':\n\t\tplus+=1\n\telif i == '-':\n\t\tminus +=1\n\telse:\n\t\ttotal = int(i)\n\ncompute(plus,minus,total,inp)\n\n\n\n\n\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
from core.detector import Detector
from utils.augmentations import *
from torchvision.transforms.transforms import Compose
from config.mask_config import *
from config.train_config import model_info
np.random.seed(3)
colors = np.random.randint(128, 256, (100, 3))
def to_image(det):
size = 512
val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]
val_trans = Compose(val_trans)
for i in range(5, 200):
path = f"D:/temp_data/mask/test/{i}.jpg "
print(path)
image = cv2.imread(path)
image = cv2.resize(image, (size, size))
bboxes = det.predict(image.copy(), size, (0.2, 0.2))
for cid, bbox in bboxes[0].items():
cls = "mask" if cid == 1 else "face"
for b in bbox:
prob = b[-1]
b = b[:4].astype(int)
cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid].tolist(), 1, cv2.LINE_AA)
cv2.putText(image, "{}:{}".format(cls, int(prob*100)), (b[0], b[1]), cv2.FONT_ITALIC, 1, colors[cid].tolist(), 2)
cv2.imshow("image", image)
cv2.waitKey()
def to_video(det):
size = 512
val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]
cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW) # 参数为0时调用本地摄像头;url连接调取网络摄像头;文件地址获取本地视频
cap.set(3, 1920) # 设置分辨率
cap.set(4, 1080)
cap.set(cv2.CAP_PROP_FPS, 30)
ret, frame = cap.read()
while (True):
ret, frame = cap.read()
frame = frame[:, ::-1]
frame = frame[:, 440: -440]
image = cv2.resize(frame, (size, size))
bboxes = det.predict(image.copy(), size, (0.5, 0.5))
for cid, bbox in bboxes[0].items():
cls = "mask" if cid == 1 else "face"
for b in bbox:
prob = b[-1]
b = b[:4].astype(int)
cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid].tolist(), 1, cv2.LINE_AA)
cv2.putText(image, "{}:{}".format(cls, int(prob * 100)), (b[0], b[1]), cv2.FONT_ITALIC, 1,
colors[cid].tolist(), 2)
cv2.imshow("image", image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if __name__ == '__main__':
det = Detector(classes_info, model_info, "cuda")
det.load_model("checkpoints/2021-03-08 00.11.56/epoch=331_4.7689.pth")
# to_image(det)
to_video(det)
|
normal
|
{
"blob_id": "97e7ca02d85267492a0dcbbda9d8754a0a3735a5",
"index": 5315,
"step-1": "<mask token>\n\n\ndef to_image(det):\n size = 512\n val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]\n val_trans = Compose(val_trans)\n for i in range(5, 200):\n path = f'D:/temp_data/mask/test/{i}.jpg '\n print(path)\n image = cv2.imread(path)\n image = cv2.resize(image, (size, size))\n bboxes = det.predict(image.copy(), size, (0.2, 0.2))\n for cid, bbox in bboxes[0].items():\n cls = 'mask' if cid == 1 else 'face'\n for b in bbox:\n prob = b[-1]\n b = b[:4].astype(int)\n cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid\n ].tolist(), 1, cv2.LINE_AA)\n cv2.putText(image, '{}:{}'.format(cls, int(prob * 100)), (b\n [0], b[1]), cv2.FONT_ITALIC, 1, colors[cid].tolist(), 2)\n cv2.imshow('image', image)\n cv2.waitKey()\n\n\ndef to_video(det):\n size = 512\n val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]\n cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW)\n cap.set(3, 1920)\n cap.set(4, 1080)\n cap.set(cv2.CAP_PROP_FPS, 30)\n ret, frame = cap.read()\n while True:\n ret, frame = cap.read()\n frame = frame[:, ::-1]\n frame = frame[:, 440:-440]\n image = cv2.resize(frame, (size, size))\n bboxes = det.predict(image.copy(), size, (0.5, 0.5))\n for cid, bbox in bboxes[0].items():\n cls = 'mask' if cid == 1 else 'face'\n for b in bbox:\n prob = b[-1]\n b = b[:4].astype(int)\n cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid\n ].tolist(), 1, cv2.LINE_AA)\n cv2.putText(image, '{}:{}'.format(cls, int(prob * 100)), (b\n [0], b[1]), cv2.FONT_ITALIC, 1, colors[cid].tolist(), 2)\n cv2.imshow('image', image)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n\n\n<mask token>\n",
"step-2": "<mask token>\nnp.random.seed(3)\n<mask token>\n\n\ndef to_image(det):\n size = 512\n val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]\n val_trans = Compose(val_trans)\n for i in range(5, 200):\n path = f'D:/temp_data/mask/test/{i}.jpg '\n print(path)\n image = cv2.imread(path)\n image = cv2.resize(image, (size, size))\n bboxes = det.predict(image.copy(), size, (0.2, 0.2))\n for cid, bbox in bboxes[0].items():\n cls = 'mask' if cid == 1 else 'face'\n for b in bbox:\n prob = b[-1]\n b = b[:4].astype(int)\n cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid\n ].tolist(), 1, cv2.LINE_AA)\n cv2.putText(image, '{}:{}'.format(cls, int(prob * 100)), (b\n [0], b[1]), cv2.FONT_ITALIC, 1, colors[cid].tolist(), 2)\n cv2.imshow('image', image)\n cv2.waitKey()\n\n\ndef to_video(det):\n size = 512\n val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]\n cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW)\n cap.set(3, 1920)\n cap.set(4, 1080)\n cap.set(cv2.CAP_PROP_FPS, 30)\n ret, frame = cap.read()\n while True:\n ret, frame = cap.read()\n frame = frame[:, ::-1]\n frame = frame[:, 440:-440]\n image = cv2.resize(frame, (size, size))\n bboxes = det.predict(image.copy(), size, (0.5, 0.5))\n for cid, bbox in bboxes[0].items():\n cls = 'mask' if cid == 1 else 'face'\n for b in bbox:\n prob = b[-1]\n b = b[:4].astype(int)\n cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid\n ].tolist(), 1, cv2.LINE_AA)\n cv2.putText(image, '{}:{}'.format(cls, int(prob * 100)), (b\n [0], b[1]), cv2.FONT_ITALIC, 1, colors[cid].tolist(), 2)\n cv2.imshow('image', image)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n\n\nif __name__ == '__main__':\n det = Detector(classes_info, model_info, 'cuda')\n det.load_model('checkpoints/2021-03-08 00.11.56/epoch=331_4.7689.pth')\n to_video(det)\n",
"step-3": "<mask token>\nnp.random.seed(3)\ncolors = np.random.randint(128, 256, (100, 3))\n\n\ndef to_image(det):\n size = 512\n val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]\n val_trans = Compose(val_trans)\n for i in range(5, 200):\n path = f'D:/temp_data/mask/test/{i}.jpg '\n print(path)\n image = cv2.imread(path)\n image = cv2.resize(image, (size, size))\n bboxes = det.predict(image.copy(), size, (0.2, 0.2))\n for cid, bbox in bboxes[0].items():\n cls = 'mask' if cid == 1 else 'face'\n for b in bbox:\n prob = b[-1]\n b = b[:4].astype(int)\n cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid\n ].tolist(), 1, cv2.LINE_AA)\n cv2.putText(image, '{}:{}'.format(cls, int(prob * 100)), (b\n [0], b[1]), cv2.FONT_ITALIC, 1, colors[cid].tolist(), 2)\n cv2.imshow('image', image)\n cv2.waitKey()\n\n\ndef to_video(det):\n size = 512\n val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]\n cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW)\n cap.set(3, 1920)\n cap.set(4, 1080)\n cap.set(cv2.CAP_PROP_FPS, 30)\n ret, frame = cap.read()\n while True:\n ret, frame = cap.read()\n frame = frame[:, ::-1]\n frame = frame[:, 440:-440]\n image = cv2.resize(frame, (size, size))\n bboxes = det.predict(image.copy(), size, (0.5, 0.5))\n for cid, bbox in bboxes[0].items():\n cls = 'mask' if cid == 1 else 'face'\n for b in bbox:\n prob = b[-1]\n b = b[:4].astype(int)\n cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid\n ].tolist(), 1, cv2.LINE_AA)\n cv2.putText(image, '{}:{}'.format(cls, int(prob * 100)), (b\n [0], b[1]), cv2.FONT_ITALIC, 1, colors[cid].tolist(), 2)\n cv2.imshow('image', image)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n\n\nif __name__ == '__main__':\n det = Detector(classes_info, model_info, 'cuda')\n det.load_model('checkpoints/2021-03-08 00.11.56/epoch=331_4.7689.pth')\n to_video(det)\n",
"step-4": "from core.detector import Detector\nfrom utils.augmentations import *\nfrom torchvision.transforms.transforms import Compose\nfrom config.mask_config import *\nfrom config.train_config import model_info\nnp.random.seed(3)\ncolors = np.random.randint(128, 256, (100, 3))\n\n\ndef to_image(det):\n size = 512\n val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]\n val_trans = Compose(val_trans)\n for i in range(5, 200):\n path = f'D:/temp_data/mask/test/{i}.jpg '\n print(path)\n image = cv2.imread(path)\n image = cv2.resize(image, (size, size))\n bboxes = det.predict(image.copy(), size, (0.2, 0.2))\n for cid, bbox in bboxes[0].items():\n cls = 'mask' if cid == 1 else 'face'\n for b in bbox:\n prob = b[-1]\n b = b[:4].astype(int)\n cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid\n ].tolist(), 1, cv2.LINE_AA)\n cv2.putText(image, '{}:{}'.format(cls, int(prob * 100)), (b\n [0], b[1]), cv2.FONT_ITALIC, 1, colors[cid].tolist(), 2)\n cv2.imshow('image', image)\n cv2.waitKey()\n\n\ndef to_video(det):\n size = 512\n val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]\n cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW)\n cap.set(3, 1920)\n cap.set(4, 1080)\n cap.set(cv2.CAP_PROP_FPS, 30)\n ret, frame = cap.read()\n while True:\n ret, frame = cap.read()\n frame = frame[:, ::-1]\n frame = frame[:, 440:-440]\n image = cv2.resize(frame, (size, size))\n bboxes = det.predict(image.copy(), size, (0.5, 0.5))\n for cid, bbox in bboxes[0].items():\n cls = 'mask' if cid == 1 else 'face'\n for b in bbox:\n prob = b[-1]\n b = b[:4].astype(int)\n cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid\n ].tolist(), 1, cv2.LINE_AA)\n cv2.putText(image, '{}:{}'.format(cls, int(prob * 100)), (b\n [0], b[1]), cv2.FONT_ITALIC, 1, colors[cid].tolist(), 2)\n cv2.imshow('image', image)\n if cv2.waitKey(1) & 255 == ord('q'):\n break\n\n\nif __name__ == '__main__':\n det = Detector(classes_info, model_info, 'cuda')\n det.load_model('checkpoints/2021-03-08 00.11.56/epoch=331_4.7689.pth')\n to_video(det)\n",
"step-5": "from core.detector import Detector\nfrom utils.augmentations import *\nfrom torchvision.transforms.transforms import Compose\nfrom config.mask_config import *\nfrom config.train_config import model_info\n\n\nnp.random.seed(3)\ncolors = np.random.randint(128, 256, (100, 3))\n\n\ndef to_image(det):\n size = 512\n\n val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]\n val_trans = Compose(val_trans)\n for i in range(5, 200):\n\n path = f\"D:/temp_data/mask/test/{i}.jpg \"\n print(path)\n image = cv2.imread(path)\n\n image = cv2.resize(image, (size, size))\n bboxes = det.predict(image.copy(), size, (0.2, 0.2))\n\n for cid, bbox in bboxes[0].items():\n cls = \"mask\" if cid == 1 else \"face\"\n for b in bbox:\n prob = b[-1]\n b = b[:4].astype(int)\n cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid].tolist(), 1, cv2.LINE_AA)\n cv2.putText(image, \"{}:{}\".format(cls, int(prob*100)), (b[0], b[1]), cv2.FONT_ITALIC, 1, colors[cid].tolist(), 2)\n cv2.imshow(\"image\", image)\n cv2.waitKey()\n\ndef to_video(det):\n size = 512\n\n val_trans = [Normalization([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]\n\n cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW) # 参数为0时调用本地摄像头;url连接调取网络摄像头;文件地址获取本地视频\n cap.set(3, 1920) # 设置分辨率\n cap.set(4, 1080)\n cap.set(cv2.CAP_PROP_FPS, 30)\n ret, frame = cap.read()\n while (True):\n ret, frame = cap.read()\n frame = frame[:, ::-1]\n frame = frame[:, 440: -440]\n image = cv2.resize(frame, (size, size))\n bboxes = det.predict(image.copy(), size, (0.5, 0.5))\n\n for cid, bbox in bboxes[0].items():\n cls = \"mask\" if cid == 1 else \"face\"\n for b in bbox:\n prob = b[-1]\n b = b[:4].astype(int)\n cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), colors[cid].tolist(), 1, cv2.LINE_AA)\n cv2.putText(image, \"{}:{}\".format(cls, int(prob * 100)), (b[0], b[1]), cv2.FONT_ITALIC, 1,\n colors[cid].tolist(), 2)\n cv2.imshow(\"image\", image)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\nif __name__ == '__main__':\n det = Detector(classes_info, model_info, \"cuda\")\n det.load_model(\"checkpoints/2021-03-08 00.11.56/epoch=331_4.7689.pth\")\n # to_image(det)\n to_video(det)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from Store import Store
from MusicProduct import MusicProduct
class MusicStore(Store):
def make_product(self, name):
'''Overides from parent - return a new MusicProduct Object'''
|
normal
|
{
"blob_id": "0a50b31155afce2558ec066267a9fd0c56964759",
"index": 5653,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass MusicStore(Store):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass MusicStore(Store):\n\n def make_product(self, name):\n \"\"\"Overides from parent - return a new MusicProduct Object\"\"\"\n",
"step-4": "from Store import Store\nfrom MusicProduct import MusicProduct\n\n\nclass MusicStore(Store):\n\n def make_product(self, name):\n \"\"\"Overides from parent - return a new MusicProduct Object\"\"\"\n",
"step-5": "from Store import Store\nfrom MusicProduct import MusicProduct\n\nclass MusicStore(Store):\n\n def make_product(self, name):\n '''Overides from parent - return a new MusicProduct Object'''\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def analyze_commit(row):
row['conventional'] = row['lax'] = False
m = re.search(STRICT, row['subj'])
if m:
row['conventional'] = True
else:
m = re.search(LAX, row['subj'])
if m:
row['lax'] = True
if m:
row['label'] = m['label']
row['breaking'] = bool(m['breaking'])
row['subjtext'] = m['subjtext']
row['bodylines'] = len(row['body'].splitlines())
<|reserved_special_token_0|>
@click.command(help='Collect stats about commits in local git repos')
@click.option('--db', 'dbfile', default='commits.db', help=
'SQLite database file to write to')
@click.option('--ignore', multiple=True, help='Repos to ignore')
@click.option('--require', help='A file that must exist to process the repo')
@click.argument('repos', nargs=-1)
def main(dbfile, ignore, require, repos):
db = dataset.connect('sqlite:///' + dbfile)
for repo in repos:
if any(fnmatch.fnmatch(repo, pat) for pat in ignore):
print(f'Ignoring {repo}')
continue
if require is not None:
if not os.path.exists(os.path.join(repo, require)):
print(f'Skipping {repo}')
continue
print(repo)
with change_dir(repo) as repo_dir:
repo_name = '/'.join(repo_dir.split('/')[-2:])
load_commits(db, repo_name)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_cmd_output(cmd):
"""Run a command in shell, and return the Unicode output."""
try:
data = subprocess.check_output(cmd, shell=True, stderr=subprocess.
STDOUT)
except subprocess.CalledProcessError as ex:
data = ex.output
try:
data = data.decode('utf-8')
except UnicodeDecodeError:
data = data.decode('latin1')
return data
def load_commits(db, repo_name):
"""Load the commits from the current directory repo."""
SEP = '-=:=-=:=-=:=-=:=-=:=-=:=-=:=-'
GITLOG = (
f"git log --no-merges --format='format:date: %aI%nhash: %H%nauth: %aE%nname: %aN%nsubj: %s%n%b%n{SEP}'"
)
SHORT_LINES = 5
with db:
commit_table = db['commits']
log = get_cmd_output(GITLOG)
for i, commit in enumerate(log.split(SEP + '\n')):
if commit:
lines = commit.split('\n', maxsplit=SHORT_LINES)
row = {'repo': repo_name}
for line in lines[:SHORT_LINES]:
key, val = line.split(': ', maxsplit=1)
row[key] = val
row['body'] = lines[SHORT_LINES].strip()
analyze_commit(row)
commit_table.insert(row)
<|reserved_special_token_0|>
def analyze_commit(row):
row['conventional'] = row['lax'] = False
m = re.search(STRICT, row['subj'])
if m:
row['conventional'] = True
else:
m = re.search(LAX, row['subj'])
if m:
row['lax'] = True
if m:
row['label'] = m['label']
row['breaking'] = bool(m['breaking'])
row['subjtext'] = m['subjtext']
row['bodylines'] = len(row['body'].splitlines())
@contextlib.contextmanager
def change_dir(new_dir):
"""Change directory, and then change back.
Use as a context manager, it will give you the new directory, and later
restore the old one.
"""
old_dir = os.getcwd()
os.chdir(new_dir)
try:
yield os.getcwd()
finally:
os.chdir(old_dir)
@click.command(help='Collect stats about commits in local git repos')
@click.option('--db', 'dbfile', default='commits.db', help=
'SQLite database file to write to')
@click.option('--ignore', multiple=True, help='Repos to ignore')
@click.option('--require', help='A file that must exist to process the repo')
@click.argument('repos', nargs=-1)
def main(dbfile, ignore, require, repos):
db = dataset.connect('sqlite:///' + dbfile)
for repo in repos:
if any(fnmatch.fnmatch(repo, pat) for pat in ignore):
print(f'Ignoring {repo}')
continue
if require is not None:
if not os.path.exists(os.path.join(repo, require)):
print(f'Skipping {repo}')
continue
print(repo)
with change_dir(repo) as repo_dir:
repo_name = '/'.join(repo_dir.split('/')[-2:])
load_commits(db, repo_name)
if __name__ == '__main__':
main()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_cmd_output(cmd):
"""Run a command in shell, and return the Unicode output."""
try:
data = subprocess.check_output(cmd, shell=True, stderr=subprocess.
STDOUT)
except subprocess.CalledProcessError as ex:
data = ex.output
try:
data = data.decode('utf-8')
except UnicodeDecodeError:
data = data.decode('latin1')
return data
def load_commits(db, repo_name):
"""Load the commits from the current directory repo."""
SEP = '-=:=-=:=-=:=-=:=-=:=-=:=-=:=-'
GITLOG = (
f"git log --no-merges --format='format:date: %aI%nhash: %H%nauth: %aE%nname: %aN%nsubj: %s%n%b%n{SEP}'"
)
SHORT_LINES = 5
with db:
commit_table = db['commits']
log = get_cmd_output(GITLOG)
for i, commit in enumerate(log.split(SEP + '\n')):
if commit:
lines = commit.split('\n', maxsplit=SHORT_LINES)
row = {'repo': repo_name}
for line in lines[:SHORT_LINES]:
key, val = line.split(': ', maxsplit=1)
row[key] = val
row['body'] = lines[SHORT_LINES].strip()
analyze_commit(row)
commit_table.insert(row)
STRICT = """(?x)
^
(?P<label>build|chore|docs|feat|fix|perf|refactor|revert|style|test|temp)
(?P<breaking>!?):\\s
(?P<subjtext>.+)
$
"""
LAX = """(?xi)
^
(?P<label>\\w+)
(?:\\(\\w+\\))?
(?P<breaking>!?):\\s
(?P<subjtext>.+)
$
"""
def analyze_commit(row):
row['conventional'] = row['lax'] = False
m = re.search(STRICT, row['subj'])
if m:
row['conventional'] = True
else:
m = re.search(LAX, row['subj'])
if m:
row['lax'] = True
if m:
row['label'] = m['label']
row['breaking'] = bool(m['breaking'])
row['subjtext'] = m['subjtext']
row['bodylines'] = len(row['body'].splitlines())
@contextlib.contextmanager
def change_dir(new_dir):
"""Change directory, and then change back.
Use as a context manager, it will give you the new directory, and later
restore the old one.
"""
old_dir = os.getcwd()
os.chdir(new_dir)
try:
yield os.getcwd()
finally:
os.chdir(old_dir)
@click.command(help='Collect stats about commits in local git repos')
@click.option('--db', 'dbfile', default='commits.db', help=
'SQLite database file to write to')
@click.option('--ignore', multiple=True, help='Repos to ignore')
@click.option('--require', help='A file that must exist to process the repo')
@click.argument('repos', nargs=-1)
def main(dbfile, ignore, require, repos):
db = dataset.connect('sqlite:///' + dbfile)
for repo in repos:
if any(fnmatch.fnmatch(repo, pat) for pat in ignore):
print(f'Ignoring {repo}')
continue
if require is not None:
if not os.path.exists(os.path.join(repo, require)):
print(f'Skipping {repo}')
continue
print(repo)
with change_dir(repo) as repo_dir:
repo_name = '/'.join(repo_dir.split('/')[-2:])
load_commits(db, repo_name)
if __name__ == '__main__':
main()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import contextlib
import datetime
import fnmatch
import os
import os.path
import re
import subprocess
import sys
import click
import dataset
def get_cmd_output(cmd):
"""Run a command in shell, and return the Unicode output."""
try:
data = subprocess.check_output(cmd, shell=True, stderr=subprocess.
STDOUT)
except subprocess.CalledProcessError as ex:
data = ex.output
try:
data = data.decode('utf-8')
except UnicodeDecodeError:
data = data.decode('latin1')
return data
def load_commits(db, repo_name):
"""Load the commits from the current directory repo."""
SEP = '-=:=-=:=-=:=-=:=-=:=-=:=-=:=-'
GITLOG = (
f"git log --no-merges --format='format:date: %aI%nhash: %H%nauth: %aE%nname: %aN%nsubj: %s%n%b%n{SEP}'"
)
SHORT_LINES = 5
with db:
commit_table = db['commits']
log = get_cmd_output(GITLOG)
for i, commit in enumerate(log.split(SEP + '\n')):
if commit:
lines = commit.split('\n', maxsplit=SHORT_LINES)
row = {'repo': repo_name}
for line in lines[:SHORT_LINES]:
key, val = line.split(': ', maxsplit=1)
row[key] = val
row['body'] = lines[SHORT_LINES].strip()
analyze_commit(row)
commit_table.insert(row)
STRICT = """(?x)
^
(?P<label>build|chore|docs|feat|fix|perf|refactor|revert|style|test|temp)
(?P<breaking>!?):\\s
(?P<subjtext>.+)
$
"""
LAX = """(?xi)
^
(?P<label>\\w+)
(?:\\(\\w+\\))?
(?P<breaking>!?):\\s
(?P<subjtext>.+)
$
"""
def analyze_commit(row):
row['conventional'] = row['lax'] = False
m = re.search(STRICT, row['subj'])
if m:
row['conventional'] = True
else:
m = re.search(LAX, row['subj'])
if m:
row['lax'] = True
if m:
row['label'] = m['label']
row['breaking'] = bool(m['breaking'])
row['subjtext'] = m['subjtext']
row['bodylines'] = len(row['body'].splitlines())
@contextlib.contextmanager
def change_dir(new_dir):
"""Change directory, and then change back.
Use as a context manager, it will give you the new directory, and later
restore the old one.
"""
old_dir = os.getcwd()
os.chdir(new_dir)
try:
yield os.getcwd()
finally:
os.chdir(old_dir)
@click.command(help='Collect stats about commits in local git repos')
@click.option('--db', 'dbfile', default='commits.db', help=
'SQLite database file to write to')
@click.option('--ignore', multiple=True, help='Repos to ignore')
@click.option('--require', help='A file that must exist to process the repo')
@click.argument('repos', nargs=-1)
def main(dbfile, ignore, require, repos):
db = dataset.connect('sqlite:///' + dbfile)
for repo in repos:
if any(fnmatch.fnmatch(repo, pat) for pat in ignore):
print(f'Ignoring {repo}')
continue
if require is not None:
if not os.path.exists(os.path.join(repo, require)):
print(f'Skipping {repo}')
continue
print(repo)
with change_dir(repo) as repo_dir:
repo_name = '/'.join(repo_dir.split('/')[-2:])
load_commits(db, repo_name)
if __name__ == '__main__':
main()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import contextlib
import datetime
import fnmatch
import os
import os.path
import re
import subprocess
import sys
import click
import dataset
def get_cmd_output(cmd):
"""Run a command in shell, and return the Unicode output."""
try:
data = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
data = ex.output
try:
data = data.decode("utf-8")
except UnicodeDecodeError:
data = data.decode("latin1")
return data
def load_commits(db, repo_name):
"""Load the commits from the current directory repo."""
SEP = "-=:=-=:=-=:=-=:=-=:=-=:=-=:=-"
GITLOG = f"git log --no-merges --format='format:date: %aI%nhash: %H%nauth: %aE%nname: %aN%nsubj: %s%n%b%n{SEP}'"
SHORT_LINES = 5
# $ git log --format="format:---------------------%ndate: %aI%nhash: %H%nauth: %aE%nname: %aN%nsubj: %s%n%b"
# ---------------------
# date: 2021-04-21T16:13:23-04:00
# hash: efa13ff1d2fb3d8b2ddee8be0868ae60f9bc35a6
# auth: julia.eskew@edx.org
# name: Julia Eskew
# subj: fix: TNL-8233: Change exception raised at problem creation failure from generic exception to LoncapaProblemError. (#27361)
# Raising this specific exception will cause the failure to be handled more gracefully by problem rescoring code.
# ---------------------
# date: 2021-04-15T21:36:47-04:00
# hash: a1fe3d58dc112bd975f1237baaee787ba22929f1
# auth: astaubin@edx.org
# name: Albert (AJ) St. Aubin
# subj: [bug] Corrected issue where program dash showed incorrect completed count
# [MICROBA-1163]
#
# This change will correct an issue in the Program Dashboard where a user
# would see a course as completed, but not see their Certificate because
# it was not available to them yet.
# ---------------------
with db:
commit_table = db["commits"]
log = get_cmd_output(GITLOG)
for i, commit in enumerate(log.split(SEP + "\n")):
if commit:
lines = commit.split("\n", maxsplit=SHORT_LINES)
row = {"repo": repo_name}
for line in lines[:SHORT_LINES]:
key, val = line.split(": ", maxsplit=1)
row[key] = val
row["body"] = lines[SHORT_LINES].strip()
analyze_commit(row)
commit_table.insert(row)
STRICT = r"""(?x)
^
(?P<label>build|chore|docs|feat|fix|perf|refactor|revert|style|test|temp)
(?P<breaking>!?):\s
(?P<subjtext>.+)
$
"""
LAX = r"""(?xi)
^
(?P<label>\w+)
(?:\(\w+\))?
(?P<breaking>!?):\s
(?P<subjtext>.+)
$
"""
def analyze_commit(row):
row["conventional"] = row["lax"] = False
m = re.search(STRICT, row["subj"])
if m:
row["conventional"] = True
else:
m = re.search(LAX, row["subj"])
if m:
row["lax"] = True
if m:
row["label"] = m["label"]
row["breaking"] = bool(m["breaking"])
row["subjtext"] = m["subjtext"]
row["bodylines"] = len(row["body"].splitlines())
@contextlib.contextmanager
def change_dir(new_dir):
"""Change directory, and then change back.
Use as a context manager, it will give you the new directory, and later
restore the old one.
"""
old_dir = os.getcwd()
os.chdir(new_dir)
try:
yield os.getcwd()
finally:
os.chdir(old_dir)
@click.command(help="Collect stats about commits in local git repos")
@click.option("--db", "dbfile", default="commits.db", help="SQLite database file to write to")
@click.option("--ignore", multiple=True, help="Repos to ignore")
@click.option("--require", help="A file that must exist to process the repo")
@click.argument("repos", nargs=-1)
def main(dbfile, ignore, require, repos):
db = dataset.connect("sqlite:///" + dbfile)
for repo in repos:
if any(fnmatch.fnmatch(repo, pat) for pat in ignore):
print(f"Ignoring {repo}")
continue
if require is not None:
if not os.path.exists(os.path.join(repo, require)):
print(f"Skipping {repo}")
continue
print(repo)
with change_dir(repo) as repo_dir:
repo_name = "/".join(repo_dir.split("/")[-2:])
load_commits(db, repo_name)
if __name__ == "__main__":
main()
# then:
# gittreeif nedbat/meta/installed python /src/ghorg/commitstats.py /src/ghorg/commits.db
#
# in sqlite:
# select strftime("%Y%W", date, "weekday 0") as yw, count(*) total, sum(conventional) as con from commits group by yw;
# select yw, total, con, cast((con*100.0)/total as integer) pctcon from (select strftime("%Y%W", date, "weekday 0") as yw, count(*) total, sum(conventional) as con from commits group by yw);
"""
select
weekend, total, con, cast((con*100.0)/total as integer) pctcon, bod, cast((bod*100.0)/total as integer) pctbod
from (
select
strftime("%Y%m%d", date, "weekday 0") as weekend,
count(*) total,
sum(conventional) as con, sum(bodylines > 0) as bod
from commits where repo = "edx/edx-platform" group by weekend
)
where weekend > '202009';
"""
|
flexible
|
{
"blob_id": "16446c2c5612a14d4364cbefb949da0b473f7454",
"index": 7934,
"step-1": "<mask token>\n\n\ndef analyze_commit(row):\n row['conventional'] = row['lax'] = False\n m = re.search(STRICT, row['subj'])\n if m:\n row['conventional'] = True\n else:\n m = re.search(LAX, row['subj'])\n if m:\n row['lax'] = True\n if m:\n row['label'] = m['label']\n row['breaking'] = bool(m['breaking'])\n row['subjtext'] = m['subjtext']\n row['bodylines'] = len(row['body'].splitlines())\n\n\n<mask token>\n\n\n@click.command(help='Collect stats about commits in local git repos')\n@click.option('--db', 'dbfile', default='commits.db', help=\n 'SQLite database file to write to')\n@click.option('--ignore', multiple=True, help='Repos to ignore')\n@click.option('--require', help='A file that must exist to process the repo')\n@click.argument('repos', nargs=-1)\ndef main(dbfile, ignore, require, repos):\n db = dataset.connect('sqlite:///' + dbfile)\n for repo in repos:\n if any(fnmatch.fnmatch(repo, pat) for pat in ignore):\n print(f'Ignoring {repo}')\n continue\n if require is not None:\n if not os.path.exists(os.path.join(repo, require)):\n print(f'Skipping {repo}')\n continue\n print(repo)\n with change_dir(repo) as repo_dir:\n repo_name = '/'.join(repo_dir.split('/')[-2:])\n load_commits(db, repo_name)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_cmd_output(cmd):\n \"\"\"Run a command in shell, and return the Unicode output.\"\"\"\n try:\n data = subprocess.check_output(cmd, shell=True, stderr=subprocess.\n STDOUT)\n except subprocess.CalledProcessError as ex:\n data = ex.output\n try:\n data = data.decode('utf-8')\n except UnicodeDecodeError:\n data = data.decode('latin1')\n return data\n\n\ndef load_commits(db, repo_name):\n \"\"\"Load the commits from the current directory repo.\"\"\"\n SEP = '-=:=-=:=-=:=-=:=-=:=-=:=-=:=-'\n GITLOG = (\n f\"git log --no-merges --format='format:date: %aI%nhash: %H%nauth: %aE%nname: %aN%nsubj: %s%n%b%n{SEP}'\"\n )\n SHORT_LINES = 5\n with db:\n commit_table = db['commits']\n log = get_cmd_output(GITLOG)\n for i, commit in enumerate(log.split(SEP + '\\n')):\n if commit:\n lines = commit.split('\\n', maxsplit=SHORT_LINES)\n row = {'repo': repo_name}\n for line in lines[:SHORT_LINES]:\n key, val = line.split(': ', maxsplit=1)\n row[key] = val\n row['body'] = lines[SHORT_LINES].strip()\n analyze_commit(row)\n commit_table.insert(row)\n\n\n<mask token>\n\n\ndef analyze_commit(row):\n row['conventional'] = row['lax'] = False\n m = re.search(STRICT, row['subj'])\n if m:\n row['conventional'] = True\n else:\n m = re.search(LAX, row['subj'])\n if m:\n row['lax'] = True\n if m:\n row['label'] = m['label']\n row['breaking'] = bool(m['breaking'])\n row['subjtext'] = m['subjtext']\n row['bodylines'] = len(row['body'].splitlines())\n\n\n@contextlib.contextmanager\ndef change_dir(new_dir):\n \"\"\"Change directory, and then change back.\n\n Use as a context manager, it will give you the new directory, and later\n restore the old one.\n\n \"\"\"\n old_dir = os.getcwd()\n os.chdir(new_dir)\n try:\n yield os.getcwd()\n finally:\n os.chdir(old_dir)\n\n\n@click.command(help='Collect stats about commits in local git repos')\n@click.option('--db', 'dbfile', default='commits.db', help=\n 'SQLite database file to write to')\n@click.option('--ignore', multiple=True, help='Repos to ignore')\n@click.option('--require', help='A file that must exist to process the repo')\n@click.argument('repos', nargs=-1)\ndef main(dbfile, ignore, require, repos):\n db = dataset.connect('sqlite:///' + dbfile)\n for repo in repos:\n if any(fnmatch.fnmatch(repo, pat) for pat in ignore):\n print(f'Ignoring {repo}')\n continue\n if require is not None:\n if not os.path.exists(os.path.join(repo, require)):\n print(f'Skipping {repo}')\n continue\n print(repo)\n with change_dir(repo) as repo_dir:\n repo_name = '/'.join(repo_dir.split('/')[-2:])\n load_commits(db, repo_name)\n\n\nif __name__ == '__main__':\n main()\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_cmd_output(cmd):\n \"\"\"Run a command in shell, and return the Unicode output.\"\"\"\n try:\n data = subprocess.check_output(cmd, shell=True, stderr=subprocess.\n STDOUT)\n except subprocess.CalledProcessError as ex:\n data = ex.output\n try:\n data = data.decode('utf-8')\n except UnicodeDecodeError:\n data = data.decode('latin1')\n return data\n\n\ndef load_commits(db, repo_name):\n \"\"\"Load the commits from the current directory repo.\"\"\"\n SEP = '-=:=-=:=-=:=-=:=-=:=-=:=-=:=-'\n GITLOG = (\n f\"git log --no-merges --format='format:date: %aI%nhash: %H%nauth: %aE%nname: %aN%nsubj: %s%n%b%n{SEP}'\"\n )\n SHORT_LINES = 5\n with db:\n commit_table = db['commits']\n log = get_cmd_output(GITLOG)\n for i, commit in enumerate(log.split(SEP + '\\n')):\n if commit:\n lines = commit.split('\\n', maxsplit=SHORT_LINES)\n row = {'repo': repo_name}\n for line in lines[:SHORT_LINES]:\n key, val = line.split(': ', maxsplit=1)\n row[key] = val\n row['body'] = lines[SHORT_LINES].strip()\n analyze_commit(row)\n commit_table.insert(row)\n\n\nSTRICT = \"\"\"(?x)\n ^\n (?P<label>build|chore|docs|feat|fix|perf|refactor|revert|style|test|temp)\n (?P<breaking>!?):\\\\s\n (?P<subjtext>.+)\n $\n \"\"\"\nLAX = \"\"\"(?xi)\n ^\n (?P<label>\\\\w+)\n (?:\\\\(\\\\w+\\\\))?\n (?P<breaking>!?):\\\\s\n (?P<subjtext>.+)\n $\n \"\"\"\n\n\ndef analyze_commit(row):\n row['conventional'] = row['lax'] = False\n m = re.search(STRICT, row['subj'])\n if m:\n row['conventional'] = True\n else:\n m = re.search(LAX, row['subj'])\n if m:\n row['lax'] = True\n if m:\n row['label'] = m['label']\n row['breaking'] = bool(m['breaking'])\n row['subjtext'] = m['subjtext']\n row['bodylines'] = len(row['body'].splitlines())\n\n\n@contextlib.contextmanager\ndef change_dir(new_dir):\n \"\"\"Change directory, and then change back.\n\n Use as a context manager, it will give you the new directory, and later\n restore the old one.\n\n \"\"\"\n old_dir = os.getcwd()\n os.chdir(new_dir)\n try:\n yield os.getcwd()\n finally:\n os.chdir(old_dir)\n\n\n@click.command(help='Collect stats about commits in local git repos')\n@click.option('--db', 'dbfile', default='commits.db', help=\n 'SQLite database file to write to')\n@click.option('--ignore', multiple=True, help='Repos to ignore')\n@click.option('--require', help='A file that must exist to process the repo')\n@click.argument('repos', nargs=-1)\ndef main(dbfile, ignore, require, repos):\n db = dataset.connect('sqlite:///' + dbfile)\n for repo in repos:\n if any(fnmatch.fnmatch(repo, pat) for pat in ignore):\n print(f'Ignoring {repo}')\n continue\n if require is not None:\n if not os.path.exists(os.path.join(repo, require)):\n print(f'Skipping {repo}')\n continue\n print(repo)\n with change_dir(repo) as repo_dir:\n repo_name = '/'.join(repo_dir.split('/')[-2:])\n load_commits(db, repo_name)\n\n\nif __name__ == '__main__':\n main()\n<mask token>\n",
"step-4": "import contextlib\nimport datetime\nimport fnmatch\nimport os\nimport os.path\nimport re\nimport subprocess\nimport sys\nimport click\nimport dataset\n\n\ndef get_cmd_output(cmd):\n \"\"\"Run a command in shell, and return the Unicode output.\"\"\"\n try:\n data = subprocess.check_output(cmd, shell=True, stderr=subprocess.\n STDOUT)\n except subprocess.CalledProcessError as ex:\n data = ex.output\n try:\n data = data.decode('utf-8')\n except UnicodeDecodeError:\n data = data.decode('latin1')\n return data\n\n\ndef load_commits(db, repo_name):\n \"\"\"Load the commits from the current directory repo.\"\"\"\n SEP = '-=:=-=:=-=:=-=:=-=:=-=:=-=:=-'\n GITLOG = (\n f\"git log --no-merges --format='format:date: %aI%nhash: %H%nauth: %aE%nname: %aN%nsubj: %s%n%b%n{SEP}'\"\n )\n SHORT_LINES = 5\n with db:\n commit_table = db['commits']\n log = get_cmd_output(GITLOG)\n for i, commit in enumerate(log.split(SEP + '\\n')):\n if commit:\n lines = commit.split('\\n', maxsplit=SHORT_LINES)\n row = {'repo': repo_name}\n for line in lines[:SHORT_LINES]:\n key, val = line.split(': ', maxsplit=1)\n row[key] = val\n row['body'] = lines[SHORT_LINES].strip()\n analyze_commit(row)\n commit_table.insert(row)\n\n\nSTRICT = \"\"\"(?x)\n ^\n (?P<label>build|chore|docs|feat|fix|perf|refactor|revert|style|test|temp)\n (?P<breaking>!?):\\\\s\n (?P<subjtext>.+)\n $\n \"\"\"\nLAX = \"\"\"(?xi)\n ^\n (?P<label>\\\\w+)\n (?:\\\\(\\\\w+\\\\))?\n (?P<breaking>!?):\\\\s\n (?P<subjtext>.+)\n $\n \"\"\"\n\n\ndef analyze_commit(row):\n row['conventional'] = row['lax'] = False\n m = re.search(STRICT, row['subj'])\n if m:\n row['conventional'] = True\n else:\n m = re.search(LAX, row['subj'])\n if m:\n row['lax'] = True\n if m:\n row['label'] = m['label']\n row['breaking'] = bool(m['breaking'])\n row['subjtext'] = m['subjtext']\n row['bodylines'] = len(row['body'].splitlines())\n\n\n@contextlib.contextmanager\ndef change_dir(new_dir):\n \"\"\"Change directory, and then change back.\n\n Use as a context manager, it will give you the new directory, and later\n restore the old one.\n\n \"\"\"\n old_dir = os.getcwd()\n os.chdir(new_dir)\n try:\n yield os.getcwd()\n finally:\n os.chdir(old_dir)\n\n\n@click.command(help='Collect stats about commits in local git repos')\n@click.option('--db', 'dbfile', default='commits.db', help=\n 'SQLite database file to write to')\n@click.option('--ignore', multiple=True, help='Repos to ignore')\n@click.option('--require', help='A file that must exist to process the repo')\n@click.argument('repos', nargs=-1)\ndef main(dbfile, ignore, require, repos):\n db = dataset.connect('sqlite:///' + dbfile)\n for repo in repos:\n if any(fnmatch.fnmatch(repo, pat) for pat in ignore):\n print(f'Ignoring {repo}')\n continue\n if require is not None:\n if not os.path.exists(os.path.join(repo, require)):\n print(f'Skipping {repo}')\n continue\n print(repo)\n with change_dir(repo) as repo_dir:\n repo_name = '/'.join(repo_dir.split('/')[-2:])\n load_commits(db, repo_name)\n\n\nif __name__ == '__main__':\n main()\n<mask token>\n",
"step-5": "import contextlib\nimport datetime\nimport fnmatch\nimport os\nimport os.path\nimport re\nimport subprocess\nimport sys\n\nimport click\nimport dataset\n\ndef get_cmd_output(cmd):\n \"\"\"Run a command in shell, and return the Unicode output.\"\"\"\n try:\n data = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as ex:\n data = ex.output\n try:\n data = data.decode(\"utf-8\")\n except UnicodeDecodeError:\n data = data.decode(\"latin1\")\n return data\n\ndef load_commits(db, repo_name):\n \"\"\"Load the commits from the current directory repo.\"\"\"\n\n SEP = \"-=:=-=:=-=:=-=:=-=:=-=:=-=:=-\"\n GITLOG = f\"git log --no-merges --format='format:date: %aI%nhash: %H%nauth: %aE%nname: %aN%nsubj: %s%n%b%n{SEP}'\"\n SHORT_LINES = 5\n\n # $ git log --format=\"format:---------------------%ndate: %aI%nhash: %H%nauth: %aE%nname: %aN%nsubj: %s%n%b\"\n # ---------------------\n # date: 2021-04-21T16:13:23-04:00\n # hash: efa13ff1d2fb3d8b2ddee8be0868ae60f9bc35a6\n # auth: julia.eskew@edx.org\n # name: Julia Eskew\n # subj: fix: TNL-8233: Change exception raised at problem creation failure from generic exception to LoncapaProblemError. (#27361)\n # Raising this specific exception will cause the failure to be handled more gracefully by problem rescoring code.\n # ---------------------\n # date: 2021-04-15T21:36:47-04:00\n # hash: a1fe3d58dc112bd975f1237baaee787ba22929f1\n # auth: astaubin@edx.org\n # name: Albert (AJ) St. Aubin\n # subj: [bug] Corrected issue where program dash showed incorrect completed count\n # [MICROBA-1163]\n # \n # This change will correct an issue in the Program Dashboard where a user\n # would see a course as completed, but not see their Certificate because\n # it was not available to them yet.\n # ---------------------\n\n with db:\n commit_table = db[\"commits\"]\n\n log = get_cmd_output(GITLOG)\n for i, commit in enumerate(log.split(SEP + \"\\n\")):\n if commit:\n lines = commit.split(\"\\n\", maxsplit=SHORT_LINES)\n row = {\"repo\": repo_name}\n for line in lines[:SHORT_LINES]:\n key, val = line.split(\": \", maxsplit=1)\n row[key] = val\n row[\"body\"] = lines[SHORT_LINES].strip()\n analyze_commit(row)\n commit_table.insert(row)\n\nSTRICT = r\"\"\"(?x)\n ^\n (?P<label>build|chore|docs|feat|fix|perf|refactor|revert|style|test|temp)\n (?P<breaking>!?):\\s\n (?P<subjtext>.+)\n $\n \"\"\"\n\nLAX = r\"\"\"(?xi)\n ^\n (?P<label>\\w+)\n (?:\\(\\w+\\))?\n (?P<breaking>!?):\\s\n (?P<subjtext>.+)\n $\n \"\"\"\n\ndef analyze_commit(row):\n row[\"conventional\"] = row[\"lax\"] = False\n m = re.search(STRICT, row[\"subj\"])\n if m:\n row[\"conventional\"] = True\n else:\n m = re.search(LAX, row[\"subj\"])\n if m:\n row[\"lax\"] = True\n if m:\n row[\"label\"] = m[\"label\"]\n row[\"breaking\"] = bool(m[\"breaking\"])\n row[\"subjtext\"] = m[\"subjtext\"]\n row[\"bodylines\"] = len(row[\"body\"].splitlines())\n\n@contextlib.contextmanager\ndef change_dir(new_dir):\n \"\"\"Change directory, and then change back.\n\n Use as a context manager, it will give you the new directory, and later\n restore the old one.\n\n \"\"\"\n old_dir = os.getcwd()\n os.chdir(new_dir)\n try:\n yield os.getcwd()\n finally:\n os.chdir(old_dir)\n\n@click.command(help=\"Collect stats about commits in local git repos\")\n@click.option(\"--db\", \"dbfile\", default=\"commits.db\", help=\"SQLite database file to write to\")\n@click.option(\"--ignore\", multiple=True, help=\"Repos to ignore\")\n@click.option(\"--require\", help=\"A file that must exist to process the repo\")\n@click.argument(\"repos\", nargs=-1)\ndef main(dbfile, ignore, require, repos):\n db = dataset.connect(\"sqlite:///\" + dbfile)\n for repo in repos:\n if any(fnmatch.fnmatch(repo, pat) for pat in ignore):\n print(f\"Ignoring {repo}\")\n continue\n if require is not None:\n if not os.path.exists(os.path.join(repo, require)):\n print(f\"Skipping {repo}\")\n continue\n print(repo)\n with change_dir(repo) as repo_dir:\n repo_name = \"/\".join(repo_dir.split(\"/\")[-2:])\n load_commits(db, repo_name)\n\nif __name__ == \"__main__\":\n main()\n\n# then:\n# gittreeif nedbat/meta/installed python /src/ghorg/commitstats.py /src/ghorg/commits.db\n#\n# in sqlite:\n# select strftime(\"%Y%W\", date, \"weekday 0\") as yw, count(*) total, sum(conventional) as con from commits group by yw;\n# select yw, total, con, cast((con*100.0)/total as integer) pctcon from (select strftime(\"%Y%W\", date, \"weekday 0\") as yw, count(*) total, sum(conventional) as con from commits group by yw);\n\n\"\"\"\n select\n weekend, total, con, cast((con*100.0)/total as integer) pctcon, bod, cast((bod*100.0)/total as integer) pctbod\n from (\n select\n strftime(\"%Y%m%d\", date, \"weekday 0\") as weekend,\n count(*) total,\n sum(conventional) as con, sum(bodylines > 0) as bod\n from commits where repo = \"edx/edx-platform\" group by weekend\n )\n where weekend > '202009';\n\"\"\"\n",
"step-ids": [
2,
6,
7,
8,
9
]
}
|
[
2,
6,
7,
8,
9
] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Invoice',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_on', models.DateTimeField(verbose_name='Created on', unique=True, editable=False)),
('payment_no', models.PositiveIntegerField(verbose_name='Payment on', unique=True, editable=False)),
('payment_info', models.CharField(verbose_name='Payment Info', max_length=128, editable=False)),
('user', models.ForeignKey(editable=False, to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'verbose_name': 'invoice',
'verbose_name_plural': 'invoices',
},
),
migrations.CreateModel(
name='Payment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_on', models.DateTimeField(auto_now_add=True, verbose_name='Created on')),
('amount', models.DecimalField(verbose_name='Amount', max_digits=9, decimal_places=2)),
('payment_no', models.PositiveIntegerField(unique=True, verbose_name='Payment no')),
('mode', models.PositiveSmallIntegerField(verbose_name='Mode', choices=[(0, b'REAL'), (1, b'TEST')])),
('sys_invs_no', models.PositiveIntegerField(verbose_name=b'LMI_SYS_INVS_NO')),
('sys_trans_no', models.PositiveIntegerField(verbose_name=b'LMI_SYS_TRANS_NO')),
('sys_trans_date', models.DateTimeField(verbose_name=b'LMI_SYS_TRANS_DATE')),
('payer_purse', models.CharField(max_length=13, verbose_name='Payer purse')),
('payer_wm', models.CharField(max_length=12, verbose_name='Payer WM')),
('paymer_number', models.CharField(max_length=30, verbose_name='Paymer number', blank=True)),
('paymer_email', models.EmailField(max_length=254, verbose_name='Paymer email', blank=True)),
('telepat_phonenumber', models.CharField(max_length=30, verbose_name='Phone number', blank=True)),
('telepat_orderid', models.CharField(max_length=30, verbose_name='Order id', blank=True)),
('payment_creditdays', models.PositiveIntegerField(null=True, verbose_name='Credit days', blank=True)),
('invoice', models.OneToOneField(related_name='payment', null=True, blank=True, to='webmoney_merchant.Invoice', verbose_name='Invoice')),
],
options={
'verbose_name': 'payment',
'verbose_name_plural': 'payments',
},
),
migrations.CreateModel(
name='Purse',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('purse', models.CharField(unique=True, max_length=13, verbose_name='Purse')),
('purse_type', models.CharField(default=b'B', unique=True, max_length=1, verbose_name='Purse type', choices=[(b'B', b'WMB'), (b'C', b'WMC'), (b'D', b'WMD'), (b'E', b'WME'), (b'G', b'WMG'), (b'K', b'WMK'), (b'R', b'WMR'), (b'U', b'WMU'), (b'X', b'WMX'), (b'Y', b'WMY'), (b'Z', b'WMZ')])),
('secret_key', models.CharField(max_length=50, verbose_name='Secret key')),
],
options={
'verbose_name': 'purse',
'verbose_name_plural': 'purses',
},
),
migrations.AddField(
model_name='payment',
name='payee_purse',
field=models.ForeignKey(related_name='payments', verbose_name='Payee purse', to='webmoney_merchant.Purse'),
),
]
|
normal
|
{
"blob_id": "deb8ee1d6327a6406244147a819821e8d2b2890e",
"index": 1385,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='Invoice', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('created_on', models.DateTimeField(\n verbose_name='Created on', unique=True, editable=False)), (\n 'payment_no', models.PositiveIntegerField(verbose_name='Payment on',\n unique=True, editable=False)), ('payment_info', models.CharField(\n verbose_name='Payment Info', max_length=128, editable=False)), (\n 'user', models.ForeignKey(editable=False, to=settings.\n AUTH_USER_MODEL, verbose_name='User'))], options={'verbose_name':\n 'invoice', 'verbose_name_plural': 'invoices'}), migrations.\n CreateModel(name='Payment', fields=[('id', models.AutoField(\n verbose_name='ID', serialize=False, auto_created=True, primary_key=\n True)), ('created_on', models.DateTimeField(auto_now_add=True,\n verbose_name='Created on')), ('amount', models.DecimalField(\n verbose_name='Amount', max_digits=9, decimal_places=2)), (\n 'payment_no', models.PositiveIntegerField(unique=True, verbose_name\n ='Payment no')), ('mode', models.PositiveSmallIntegerField(\n verbose_name='Mode', choices=[(0, b'REAL'), (1, b'TEST')])), (\n 'sys_invs_no', models.PositiveIntegerField(verbose_name=\n b'LMI_SYS_INVS_NO')), ('sys_trans_no', models.PositiveIntegerField(\n verbose_name=b'LMI_SYS_TRANS_NO')), ('sys_trans_date', models.\n DateTimeField(verbose_name=b'LMI_SYS_TRANS_DATE')), ('payer_purse',\n models.CharField(max_length=13, verbose_name='Payer purse')), (\n 'payer_wm', models.CharField(max_length=12, verbose_name='Payer WM'\n )), ('paymer_number', models.CharField(max_length=30, verbose_name=\n 'Paymer number', blank=True)), ('paymer_email', models.EmailField(\n max_length=254, verbose_name='Paymer email', blank=True)), (\n 'telepat_phonenumber', models.CharField(max_length=30, verbose_name\n ='Phone number', blank=True)), ('telepat_orderid', models.CharField\n (max_length=30, verbose_name='Order id', blank=True)), (\n 'payment_creditdays', models.PositiveIntegerField(null=True,\n verbose_name='Credit days', blank=True)), ('invoice', models.\n OneToOneField(related_name='payment', null=True, blank=True, to=\n 'webmoney_merchant.Invoice', verbose_name='Invoice'))], options={\n 'verbose_name': 'payment', 'verbose_name_plural': 'payments'}),\n migrations.CreateModel(name='Purse', fields=[('id', models.\n AutoField(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)), ('purse', models.CharField(unique=True,\n max_length=13, verbose_name='Purse')), ('purse_type', models.\n CharField(default=b'B', unique=True, max_length=1, verbose_name=\n 'Purse type', choices=[(b'B', b'WMB'), (b'C', b'WMC'), (b'D',\n b'WMD'), (b'E', b'WME'), (b'G', b'WMG'), (b'K', b'WMK'), (b'R',\n b'WMR'), (b'U', b'WMU'), (b'X', b'WMX'), (b'Y', b'WMY'), (b'Z',\n b'WMZ')])), ('secret_key', models.CharField(max_length=50,\n verbose_name='Secret key'))], options={'verbose_name': 'purse',\n 'verbose_name_plural': 'purses'}), migrations.AddField(model_name=\n 'payment', name='payee_purse', field=models.ForeignKey(related_name\n ='payments', verbose_name='Payee purse', to='webmoney_merchant.Purse'))\n ]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='Invoice', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('created_on', models.DateTimeField(\n verbose_name='Created on', unique=True, editable=False)), (\n 'payment_no', models.PositiveIntegerField(verbose_name='Payment on',\n unique=True, editable=False)), ('payment_info', models.CharField(\n verbose_name='Payment Info', max_length=128, editable=False)), (\n 'user', models.ForeignKey(editable=False, to=settings.\n AUTH_USER_MODEL, verbose_name='User'))], options={'verbose_name':\n 'invoice', 'verbose_name_plural': 'invoices'}), migrations.\n CreateModel(name='Payment', fields=[('id', models.AutoField(\n verbose_name='ID', serialize=False, auto_created=True, primary_key=\n True)), ('created_on', models.DateTimeField(auto_now_add=True,\n verbose_name='Created on')), ('amount', models.DecimalField(\n verbose_name='Amount', max_digits=9, decimal_places=2)), (\n 'payment_no', models.PositiveIntegerField(unique=True, verbose_name\n ='Payment no')), ('mode', models.PositiveSmallIntegerField(\n verbose_name='Mode', choices=[(0, b'REAL'), (1, b'TEST')])), (\n 'sys_invs_no', models.PositiveIntegerField(verbose_name=\n b'LMI_SYS_INVS_NO')), ('sys_trans_no', models.PositiveIntegerField(\n verbose_name=b'LMI_SYS_TRANS_NO')), ('sys_trans_date', models.\n DateTimeField(verbose_name=b'LMI_SYS_TRANS_DATE')), ('payer_purse',\n models.CharField(max_length=13, verbose_name='Payer purse')), (\n 'payer_wm', models.CharField(max_length=12, verbose_name='Payer WM'\n )), ('paymer_number', models.CharField(max_length=30, verbose_name=\n 'Paymer number', blank=True)), ('paymer_email', models.EmailField(\n max_length=254, verbose_name='Paymer email', blank=True)), (\n 'telepat_phonenumber', models.CharField(max_length=30, verbose_name\n ='Phone number', blank=True)), ('telepat_orderid', models.CharField\n (max_length=30, verbose_name='Order id', blank=True)), (\n 'payment_creditdays', models.PositiveIntegerField(null=True,\n verbose_name='Credit days', blank=True)), ('invoice', models.\n OneToOneField(related_name='payment', null=True, blank=True, to=\n 'webmoney_merchant.Invoice', verbose_name='Invoice'))], options={\n 'verbose_name': 'payment', 'verbose_name_plural': 'payments'}),\n migrations.CreateModel(name='Purse', fields=[('id', models.\n AutoField(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)), ('purse', models.CharField(unique=True,\n max_length=13, verbose_name='Purse')), ('purse_type', models.\n CharField(default=b'B', unique=True, max_length=1, verbose_name=\n 'Purse type', choices=[(b'B', b'WMB'), (b'C', b'WMC'), (b'D',\n b'WMD'), (b'E', b'WME'), (b'G', b'WMG'), (b'K', b'WMK'), (b'R',\n b'WMR'), (b'U', b'WMU'), (b'X', b'WMX'), (b'Y', b'WMY'), (b'Z',\n b'WMZ')])), ('secret_key', models.CharField(max_length=50,\n verbose_name='Secret key'))], options={'verbose_name': 'purse',\n 'verbose_name_plural': 'purses'}), migrations.AddField(model_name=\n 'payment', name='payee_purse', field=models.ForeignKey(related_name\n ='payments', verbose_name='Payee purse', to='webmoney_merchant.Purse'))\n ]\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Invoice',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created_on', models.DateTimeField(verbose_name='Created on', unique=True, editable=False)),\n ('payment_no', models.PositiveIntegerField(verbose_name='Payment on', unique=True, editable=False)),\n ('payment_info', models.CharField(verbose_name='Payment Info', max_length=128, editable=False)),\n ('user', models.ForeignKey(editable=False, to=settings.AUTH_USER_MODEL, verbose_name='User')),\n ],\n options={\n 'verbose_name': 'invoice',\n 'verbose_name_plural': 'invoices',\n },\n ),\n migrations.CreateModel(\n name='Payment',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created_on', models.DateTimeField(auto_now_add=True, verbose_name='Created on')),\n ('amount', models.DecimalField(verbose_name='Amount', max_digits=9, decimal_places=2)),\n ('payment_no', models.PositiveIntegerField(unique=True, verbose_name='Payment no')),\n ('mode', models.PositiveSmallIntegerField(verbose_name='Mode', choices=[(0, b'REAL'), (1, b'TEST')])),\n ('sys_invs_no', models.PositiveIntegerField(verbose_name=b'LMI_SYS_INVS_NO')),\n ('sys_trans_no', models.PositiveIntegerField(verbose_name=b'LMI_SYS_TRANS_NO')),\n ('sys_trans_date', models.DateTimeField(verbose_name=b'LMI_SYS_TRANS_DATE')),\n ('payer_purse', models.CharField(max_length=13, verbose_name='Payer purse')),\n ('payer_wm', models.CharField(max_length=12, verbose_name='Payer WM')),\n ('paymer_number', models.CharField(max_length=30, verbose_name='Paymer number', blank=True)),\n ('paymer_email', models.EmailField(max_length=254, verbose_name='Paymer email', blank=True)),\n ('telepat_phonenumber', models.CharField(max_length=30, verbose_name='Phone number', blank=True)),\n ('telepat_orderid', models.CharField(max_length=30, verbose_name='Order id', blank=True)),\n ('payment_creditdays', models.PositiveIntegerField(null=True, verbose_name='Credit days', blank=True)),\n ('invoice', models.OneToOneField(related_name='payment', null=True, blank=True, to='webmoney_merchant.Invoice', verbose_name='Invoice')),\n ],\n options={\n 'verbose_name': 'payment',\n 'verbose_name_plural': 'payments',\n },\n ),\n migrations.CreateModel(\n name='Purse',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('purse', models.CharField(unique=True, max_length=13, verbose_name='Purse')),\n ('purse_type', models.CharField(default=b'B', unique=True, max_length=1, verbose_name='Purse type', choices=[(b'B', b'WMB'), (b'C', b'WMC'), (b'D', b'WMD'), (b'E', b'WME'), (b'G', b'WMG'), (b'K', b'WMK'), (b'R', b'WMR'), (b'U', b'WMU'), (b'X', b'WMX'), (b'Y', b'WMY'), (b'Z', b'WMZ')])),\n ('secret_key', models.CharField(max_length=50, verbose_name='Secret key')),\n ],\n options={\n 'verbose_name': 'purse',\n 'verbose_name_plural': 'purses',\n },\n ),\n migrations.AddField(\n model_name='payment',\n name='payee_purse',\n field=models.ForeignKey(related_name='payments', verbose_name='Payee purse', to='webmoney_merchant.Purse'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def main():
x = float(input("Coordenada x: "))
y = float(input("Coordenada y: "))
if 1 <= y <= 2 and -3 <= x <= 3:
print("dentro")
elif (4 <= y <= 5 or 6 <= x <= 7) and ( -4 <= x <= -3 or -2 <= x <= -1 or 1 <= x <= 2 or 3 <= x <= 4):
print("dentro")
else:
print("fora")
#-----------------------------------------------------
if __name__ == '__main__': # chamada da funcao principal
main()
|
normal
|
{
"blob_id": "06cb832c3adae95fcd1d1d2d0663641d3ac671ef",
"index": 9132,
"step-1": "<mask token>\n",
"step-2": "def main():\n x = float(input('Coordenada x: '))\n y = float(input('Coordenada y: '))\n if 1 <= y <= 2 and -3 <= x <= 3:\n print('dentro')\n elif (4 <= y <= 5 or 6 <= x <= 7) and (-4 <= x <= -3 or -2 <= x <= -1 or\n 1 <= x <= 2 or 3 <= x <= 4):\n print('dentro')\n else:\n print('fora')\n\n\n<mask token>\n",
"step-3": "def main():\n x = float(input('Coordenada x: '))\n y = float(input('Coordenada y: '))\n if 1 <= y <= 2 and -3 <= x <= 3:\n print('dentro')\n elif (4 <= y <= 5 or 6 <= x <= 7) and (-4 <= x <= -3 or -2 <= x <= -1 or\n 1 <= x <= 2 or 3 <= x <= 4):\n print('dentro')\n else:\n print('fora')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "def main():\r\n x = float(input(\"Coordenada x: \"))\r\n y = float(input(\"Coordenada y: \"))\r\n \r\n if 1 <= y <= 2 and -3 <= x <= 3:\r\n print(\"dentro\")\r\n \r\n elif (4 <= y <= 5 or 6 <= x <= 7) and ( -4 <= x <= -3 or -2 <= x <= -1 or 1 <= x <= 2 or 3 <= x <= 4):\r\n print(\"dentro\")\r\n \r\n else:\r\n print(\"fora\")\r\n\r\n\r\n\r\n#-----------------------------------------------------\r\nif __name__ == '__main__': # chamada da funcao principal\r\n main()\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class k_box:
def __init__(self, string=0, file=0):
if string != 0:
if not len(string) == 64:
raise Exception('Bad len')
self.__priv_key = bytes.fromhex(string)
else:
self.__priv_key = secrets.randbits(256).to_bytes(32, byteorder=
byteorder)
self.__pub_key = ecdsa.SigningKey.from_string(self.__priv_key,
curve=ecdsa.SECP256k1).verifying_key.to_string()
def get_secret_key(self):
return self.__priv_key
def get_public_key(self) ->bytearray:
return (4).to_bytes(1, byteorder=byteorder) + self.__pub_key
def get_public_key_compresed(self) ->bytearray:
return (b'\x03' if self.__pub_key[-1] % 2 else b'\x02'
) + self.__pub_key[0:32]
def get_address(self) ->str:
e_pub = self.get_encrypted_pub_key()
main_net_key = (0).to_bytes(1, byteorder=byteorder) + e_pub
check_sum = hashlib.sha256(hashlib.sha256(main_net_key).digest()
).digest()[:4]
hex_addr = main_net_key + check_sum
return base58_encode(hex_addr)
def get_encrypted_pub_key(self):
sha = hashlib.sha256(self.get_public_key_compresed()).digest()
result = hashlib.new(name='ripemd160', data=sha).digest()
return result
def sign(self, message: bytes=0) ->bytearray:
sk = ecdsa.SigningKey.from_string(self.__priv_key, curve=ecdsa.
SECP256k1)
return sk.sign(message)
def verify(self, signature, message):
vk = ecdsa.VerifyingKey.from_string(self.__pub_key, curve=ecdsa.
SECP256k1)
return vk.verify(signature, message.encode())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class k_box:
def __init__(self, string=0, file=0):
if string != 0:
if not len(string) == 64:
raise Exception('Bad len')
self.__priv_key = bytes.fromhex(string)
else:
self.__priv_key = secrets.randbits(256).to_bytes(32, byteorder=
byteorder)
self.__pub_key = ecdsa.SigningKey.from_string(self.__priv_key,
curve=ecdsa.SECP256k1).verifying_key.to_string()
def get_secret_key(self):
return self.__priv_key
def get_public_key(self) ->bytearray:
return (4).to_bytes(1, byteorder=byteorder) + self.__pub_key
def get_public_key_compresed(self) ->bytearray:
return (b'\x03' if self.__pub_key[-1] % 2 else b'\x02'
) + self.__pub_key[0:32]
def get_address(self) ->str:
e_pub = self.get_encrypted_pub_key()
main_net_key = (0).to_bytes(1, byteorder=byteorder) + e_pub
check_sum = hashlib.sha256(hashlib.sha256(main_net_key).digest()
).digest()[:4]
hex_addr = main_net_key + check_sum
return base58_encode(hex_addr)
def get_encrypted_pub_key(self):
sha = hashlib.sha256(self.get_public_key_compresed()).digest()
result = hashlib.new(name='ripemd160', data=sha).digest()
return result
def sign(self, message: bytes=0) ->bytearray:
sk = ecdsa.SigningKey.from_string(self.__priv_key, curve=ecdsa.
SECP256k1)
return sk.sign(message)
def verify(self, signature, message):
vk = ecdsa.VerifyingKey.from_string(self.__pub_key, curve=ecdsa.
SECP256k1)
return vk.verify(signature, message.encode())
<|reserved_special_token_0|>
def base58_decode(s):
"""Decode a base58-encoding string, returning bytes"""
if not s:
return b''
alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
n = 0
for c in s:
n *= 58
if c not in alphabet:
raise Exception('Character %r is not a valid base58 character' % c)
digit = alphabet.index(c)
n += digit
h = '%x' % n
if len(h) % 2:
h = '0' + h
res = bytearray.fromhex(h)
pad = 0
for c in s[:-1]:
if c == alphabet[0]:
pad += 1
else:
break
return b'\x00' * pad + res
def to_WIF(key: str):
if not len(key) == 64:
raise Exception('Bad key len')
key = '80' + key
key_b = bytes.fromhex(key)
sha_key1 = hashlib.sha256(hashlib.sha256(key_b).digest()).digest()
key_b = key_b + sha_key1[0:4]
return base58_encode(key_b)
<|reserved_special_token_0|>
def from_WIF(wif_key):
if not len(wif_key) == 51:
raise Exception('Bad len of WIF key')
key = base58_decode(wif_key)
checksum = key[-4:]
key = key[1:33]
if hashlib.sha256(hashlib.sha256((128).to_bytes(1, 'big') + key).digest()
).digest()[0:4] != checksum:
raise Exception('Bad checksum')
return key
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class k_box:
def __init__(self, string=0, file=0):
if string != 0:
if not len(string) == 64:
raise Exception('Bad len')
self.__priv_key = bytes.fromhex(string)
else:
self.__priv_key = secrets.randbits(256).to_bytes(32, byteorder=
byteorder)
self.__pub_key = ecdsa.SigningKey.from_string(self.__priv_key,
curve=ecdsa.SECP256k1).verifying_key.to_string()
def get_secret_key(self):
return self.__priv_key
def get_public_key(self) ->bytearray:
return (4).to_bytes(1, byteorder=byteorder) + self.__pub_key
def get_public_key_compresed(self) ->bytearray:
return (b'\x03' if self.__pub_key[-1] % 2 else b'\x02'
) + self.__pub_key[0:32]
def get_address(self) ->str:
e_pub = self.get_encrypted_pub_key()
main_net_key = (0).to_bytes(1, byteorder=byteorder) + e_pub
check_sum = hashlib.sha256(hashlib.sha256(main_net_key).digest()
).digest()[:4]
hex_addr = main_net_key + check_sum
return base58_encode(hex_addr)
def get_encrypted_pub_key(self):
sha = hashlib.sha256(self.get_public_key_compresed()).digest()
result = hashlib.new(name='ripemd160', data=sha).digest()
return result
def sign(self, message: bytes=0) ->bytearray:
sk = ecdsa.SigningKey.from_string(self.__priv_key, curve=ecdsa.
SECP256k1)
return sk.sign(message)
def verify(self, signature, message):
vk = ecdsa.VerifyingKey.from_string(self.__pub_key, curve=ecdsa.
SECP256k1)
return vk.verify(signature, message.encode())
def covert_to_address(pub_key: bytes) ->str:
sha = hashlib.sha256(pub_key).digest()
pub_key = hashlib.new(name='ripemd160', data=sha).digest()
main_net_key = (0).to_bytes(1, byteorder=byteorder) + pub_key
check_sum = hashlib.sha256(hashlib.sha256(main_net_key).digest()).digest()[
:4]
hex_addr = main_net_key + check_sum
return base58_encode(hex_addr)
def base58_encode(n: bytearray) ->str:
alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
b58_string = ''
leading_zeros = len(n.hex()) - len(n.hex().lstrip('0'))
address_int = int.from_bytes(n, byteorder='big')
while address_int > 0:
digit = address_int % 58
digit_char = alphabet[digit]
b58_string = digit_char + b58_string
address_int //= 58
ones = leading_zeros // 2
for one in range(ones):
b58_string = '1' + b58_string
return b58_string
def base58_decode(s):
"""Decode a base58-encoding string, returning bytes"""
if not s:
return b''
alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
n = 0
for c in s:
n *= 58
if c not in alphabet:
raise Exception('Character %r is not a valid base58 character' % c)
digit = alphabet.index(c)
n += digit
h = '%x' % n
if len(h) % 2:
h = '0' + h
res = bytearray.fromhex(h)
pad = 0
for c in s[:-1]:
if c == alphabet[0]:
pad += 1
else:
break
return b'\x00' * pad + res
def to_WIF(key: str):
if not len(key) == 64:
raise Exception('Bad key len')
key = '80' + key
key_b = bytes.fromhex(key)
sha_key1 = hashlib.sha256(hashlib.sha256(key_b).digest()).digest()
key_b = key_b + sha_key1[0:4]
return base58_encode(key_b)
<|reserved_special_token_0|>
def from_WIF(wif_key):
if not len(wif_key) == 51:
raise Exception('Bad len of WIF key')
key = base58_decode(wif_key)
checksum = key[-4:]
key = key[1:33]
if hashlib.sha256(hashlib.sha256((128).to_bytes(1, 'big') + key).digest()
).digest()[0:4] != checksum:
raise Exception('Bad checksum')
return key
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class k_box:
def __init__(self, string=0, file=0):
if string != 0:
if not len(string) == 64:
raise Exception('Bad len')
self.__priv_key = bytes.fromhex(string)
else:
self.__priv_key = secrets.randbits(256).to_bytes(32, byteorder=
byteorder)
self.__pub_key = ecdsa.SigningKey.from_string(self.__priv_key,
curve=ecdsa.SECP256k1).verifying_key.to_string()
def get_secret_key(self):
return self.__priv_key
def get_public_key(self) ->bytearray:
return (4).to_bytes(1, byteorder=byteorder) + self.__pub_key
def get_public_key_compresed(self) ->bytearray:
return (b'\x03' if self.__pub_key[-1] % 2 else b'\x02'
) + self.__pub_key[0:32]
def get_address(self) ->str:
e_pub = self.get_encrypted_pub_key()
main_net_key = (0).to_bytes(1, byteorder=byteorder) + e_pub
check_sum = hashlib.sha256(hashlib.sha256(main_net_key).digest()
).digest()[:4]
hex_addr = main_net_key + check_sum
return base58_encode(hex_addr)
def get_encrypted_pub_key(self):
sha = hashlib.sha256(self.get_public_key_compresed()).digest()
result = hashlib.new(name='ripemd160', data=sha).digest()
return result
def sign(self, message: bytes=0) ->bytearray:
sk = ecdsa.SigningKey.from_string(self.__priv_key, curve=ecdsa.
SECP256k1)
return sk.sign(message)
def verify(self, signature, message):
vk = ecdsa.VerifyingKey.from_string(self.__pub_key, curve=ecdsa.
SECP256k1)
return vk.verify(signature, message.encode())
def covert_to_address(pub_key: bytes) ->str:
sha = hashlib.sha256(pub_key).digest()
pub_key = hashlib.new(name='ripemd160', data=sha).digest()
main_net_key = (0).to_bytes(1, byteorder=byteorder) + pub_key
check_sum = hashlib.sha256(hashlib.sha256(main_net_key).digest()).digest()[
:4]
hex_addr = main_net_key + check_sum
return base58_encode(hex_addr)
def base58_encode(n: bytearray) ->str:
alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
b58_string = ''
leading_zeros = len(n.hex()) - len(n.hex().lstrip('0'))
address_int = int.from_bytes(n, byteorder='big')
while address_int > 0:
digit = address_int % 58
digit_char = alphabet[digit]
b58_string = digit_char + b58_string
address_int //= 58
ones = leading_zeros // 2
for one in range(ones):
b58_string = '1' + b58_string
return b58_string
def base58_decode(s):
"""Decode a base58-encoding string, returning bytes"""
if not s:
return b''
alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
n = 0
for c in s:
n *= 58
if c not in alphabet:
raise Exception('Character %r is not a valid base58 character' % c)
digit = alphabet.index(c)
n += digit
h = '%x' % n
if len(h) % 2:
h = '0' + h
res = bytearray.fromhex(h)
pad = 0
for c in s[:-1]:
if c == alphabet[0]:
pad += 1
else:
break
return b'\x00' * pad + res
def to_WIF(key: str):
if not len(key) == 64:
raise Exception('Bad key len')
key = '80' + key
key_b = bytes.fromhex(key)
sha_key1 = hashlib.sha256(hashlib.sha256(key_b).digest()).digest()
key_b = key_b + sha_key1[0:4]
return base58_encode(key_b)
def f_import_private(filename):
file = open(filename, 'r')
wif_key = file.read()
file.close()
key = from_WIF(wif_key)
key_pair = k_box(string=key.hex())
return key_pair
def from_WIF(wif_key):
if not len(wif_key) == 51:
raise Exception('Bad len of WIF key')
key = base58_decode(wif_key)
checksum = key[-4:]
key = key[1:33]
if hashlib.sha256(hashlib.sha256((128).to_bytes(1, 'big') + key).digest()
).digest()[0:4] != checksum:
raise Exception('Bad checksum')
return key
def uncompress_key(comp_key: bytearray):
x = int.from_bytes(comp_key[1:], byteorder='big')
is_even = True if comp_key[1] == '2' else False
""" Derive y point from x point """
curve = ecdsa.SECP256k1.curve
a, b, p = curve.a(), curve.b(), curve.p()
alpha = (pow(x, 3, p) + a * x + b) % p
beta = ecdsa.numbertheory.square_root_mod_prime(alpha, p)
if beta % 2 == is_even:
beta = p - beta
return bytearray.fromhex(f'04{x:064x}{beta:064x}')
<|reserved_special_token_1|>
#!/usr/bin/env python
# coding: utf-8
#%%:
import secrets
import hashlib
import base64
import ecdsa
from sys import byteorder
#%%:
class k_box:
def __init__(self, string = 0, file = 0):
if string != 0:
if not(len(string) == 64):
raise Exception("Bad len")
self.__priv_key = bytes.fromhex(string)
else:
self.__priv_key = secrets.randbits(256).to_bytes(32,byteorder=byteorder)
self.__pub_key = ecdsa.SigningKey.from_string(self.__priv_key, curve=ecdsa.SECP256k1).verifying_key.to_string()
def get_secret_key(self):
return self.__priv_key
def get_public_key(self)->bytearray:
return (0x04.to_bytes(1,byteorder=byteorder) + self.__pub_key)
def get_public_key_compresed(self) -> bytearray:
return (b'\x03' if self.__pub_key[-1] % 2 else b'\x02') + self.__pub_key[0:32]
def get_address(self) -> str:
e_pub = self.get_encrypted_pub_key()
main_net_key = 0x00.to_bytes(1,byteorder=byteorder) + e_pub
check_sum = hashlib.sha256(hashlib.sha256(main_net_key).digest()).digest()[:4]
hex_addr = main_net_key + check_sum
return base58_encode(hex_addr)
def get_encrypted_pub_key(self):
sha = hashlib.sha256(self.get_public_key_compresed()).digest()
result = hashlib.new(name='ripemd160', data=sha).digest()
return result
def sign(self, message:bytes = 0) -> bytearray:
sk = ecdsa.SigningKey.from_string(self.__priv_key, curve=ecdsa.SECP256k1 )
return sk.sign(message)
def verify(self, signature, message):
vk = ecdsa.VerifyingKey.from_string(self.__pub_key, curve=ecdsa.SECP256k1)
return vk.verify(signature, message.encode())
#%%:
def covert_to_address(pub_key:bytes) -> str:
sha = hashlib.sha256(pub_key).digest()
pub_key = hashlib.new(name='ripemd160', data=sha).digest()
main_net_key = 0x00.to_bytes(1,byteorder=byteorder) + pub_key
check_sum = hashlib.sha256(hashlib.sha256(main_net_key).digest()).digest()[:4]
hex_addr = main_net_key + check_sum
return base58_encode(hex_addr)
#%%:
def base58_encode(n:bytearray)->str:
alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
b58_string = ""
leading_zeros = len(n.hex()) - len(n.hex().lstrip('0')) # ! refactor counting zeros
address_int = int.from_bytes(n,byteorder="big")
while address_int > 0:
digit = address_int % 58
digit_char = alphabet[digit]
b58_string = digit_char + b58_string
address_int //= 58
ones = leading_zeros // 2
for one in range(ones):
b58_string = '1' + b58_string
return b58_string
def base58_decode(s):
"""Decode a base58-encoding string, returning bytes"""
if not s:
return b''
alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
# Convert the string to an integer
n = 0
for c in s:
n *= 58
if c not in alphabet:
raise Exception('Character %r is not a valid base58 character' % c)
digit = alphabet.index(c)
n += digit
# Convert the integer to bytes
h = '%x' % n
if len(h) % 2:
h = '0' + h
# res = ""
res = bytearray.fromhex(h)
# Add padding back.
pad = 0
for c in s[:-1]:
if c == alphabet[0]: pad += 1
else: break
return b'\x00' * pad + res
# def base58_decode(s:str, len):
# alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
# result = 0
# for c in s:
# result = result * 58 + alphabet.index(c)
# return bytearray.fromhex(f"{result:0x}".rjust(len * 2, '0'))
# %%:
def to_WIF(key:str):
if not(len(key) == 64):
raise Exception("Bad key len")
key = "80" + key
key_b = bytes.fromhex(key)
sha_key1 = hashlib.sha256(hashlib.sha256(key_b).digest()).digest()
key_b = key_b + sha_key1[0:4]
return base58_encode(key_b)
def f_import_private(filename):
file = open(filename, 'r')
wif_key = file.read()
file.close()
key = from_WIF(wif_key)
key_pair = k_box(string=key.hex())
return key_pair
#%%:
def from_WIF(wif_key):
if not(len(wif_key) == 51):
raise Exception("Bad len of WIF key")
key = base58_decode(wif_key)
checksum = key[-4:]
key = key[1:33]
if hashlib.sha256(hashlib.sha256(0x80.to_bytes(1,"big") + key).digest()).digest()[0:4] != checksum:
raise Exception("Bad checksum")
return key
#%%:
def uncompress_key(comp_key: bytearray):
x = int.from_bytes(comp_key[1:], byteorder='big')
is_even = True if comp_key[1] == '2' else False
""" Derive y point from x point """
curve = ecdsa.SECP256k1.curve
# The curve equation over F_p is:
# y^2 = x^3 + ax + b
a, b, p = curve.a(), curve.b(), curve.p()
alpha = (pow(x, 3, p) + a * x + b) % p
beta = ecdsa.numbertheory.square_root_mod_prime(alpha, p)
if (beta % 2) == is_even:
beta = p - beta
return bytearray.fromhex( f"04{x:064x}{beta:064x}")
|
flexible
|
{
"blob_id": "1ff2f06349ab1906a1649bdb83828fbdb3cf584f",
"index": 4516,
"step-1": "<mask token>\n\n\nclass k_box:\n\n def __init__(self, string=0, file=0):\n if string != 0:\n if not len(string) == 64:\n raise Exception('Bad len')\n self.__priv_key = bytes.fromhex(string)\n else:\n self.__priv_key = secrets.randbits(256).to_bytes(32, byteorder=\n byteorder)\n self.__pub_key = ecdsa.SigningKey.from_string(self.__priv_key,\n curve=ecdsa.SECP256k1).verifying_key.to_string()\n\n def get_secret_key(self):\n return self.__priv_key\n\n def get_public_key(self) ->bytearray:\n return (4).to_bytes(1, byteorder=byteorder) + self.__pub_key\n\n def get_public_key_compresed(self) ->bytearray:\n return (b'\\x03' if self.__pub_key[-1] % 2 else b'\\x02'\n ) + self.__pub_key[0:32]\n\n def get_address(self) ->str:\n e_pub = self.get_encrypted_pub_key()\n main_net_key = (0).to_bytes(1, byteorder=byteorder) + e_pub\n check_sum = hashlib.sha256(hashlib.sha256(main_net_key).digest()\n ).digest()[:4]\n hex_addr = main_net_key + check_sum\n return base58_encode(hex_addr)\n\n def get_encrypted_pub_key(self):\n sha = hashlib.sha256(self.get_public_key_compresed()).digest()\n result = hashlib.new(name='ripemd160', data=sha).digest()\n return result\n\n def sign(self, message: bytes=0) ->bytearray:\n sk = ecdsa.SigningKey.from_string(self.__priv_key, curve=ecdsa.\n SECP256k1)\n return sk.sign(message)\n\n def verify(self, signature, message):\n vk = ecdsa.VerifyingKey.from_string(self.__pub_key, curve=ecdsa.\n SECP256k1)\n return vk.verify(signature, message.encode())\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass k_box:\n\n def __init__(self, string=0, file=0):\n if string != 0:\n if not len(string) == 64:\n raise Exception('Bad len')\n self.__priv_key = bytes.fromhex(string)\n else:\n self.__priv_key = secrets.randbits(256).to_bytes(32, byteorder=\n byteorder)\n self.__pub_key = ecdsa.SigningKey.from_string(self.__priv_key,\n curve=ecdsa.SECP256k1).verifying_key.to_string()\n\n def get_secret_key(self):\n return self.__priv_key\n\n def get_public_key(self) ->bytearray:\n return (4).to_bytes(1, byteorder=byteorder) + self.__pub_key\n\n def get_public_key_compresed(self) ->bytearray:\n return (b'\\x03' if self.__pub_key[-1] % 2 else b'\\x02'\n ) + self.__pub_key[0:32]\n\n def get_address(self) ->str:\n e_pub = self.get_encrypted_pub_key()\n main_net_key = (0).to_bytes(1, byteorder=byteorder) + e_pub\n check_sum = hashlib.sha256(hashlib.sha256(main_net_key).digest()\n ).digest()[:4]\n hex_addr = main_net_key + check_sum\n return base58_encode(hex_addr)\n\n def get_encrypted_pub_key(self):\n sha = hashlib.sha256(self.get_public_key_compresed()).digest()\n result = hashlib.new(name='ripemd160', data=sha).digest()\n return result\n\n def sign(self, message: bytes=0) ->bytearray:\n sk = ecdsa.SigningKey.from_string(self.__priv_key, curve=ecdsa.\n SECP256k1)\n return sk.sign(message)\n\n def verify(self, signature, message):\n vk = ecdsa.VerifyingKey.from_string(self.__pub_key, curve=ecdsa.\n SECP256k1)\n return vk.verify(signature, message.encode())\n\n\n<mask token>\n\n\ndef base58_decode(s):\n \"\"\"Decode a base58-encoding string, returning bytes\"\"\"\n if not s:\n return b''\n alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'\n n = 0\n for c in s:\n n *= 58\n if c not in alphabet:\n raise Exception('Character %r is not a valid base58 character' % c)\n digit = alphabet.index(c)\n n += digit\n h = '%x' % n\n if len(h) % 2:\n h = '0' + h\n res = bytearray.fromhex(h)\n pad = 0\n for c in s[:-1]:\n if c == alphabet[0]:\n pad += 1\n else:\n break\n return b'\\x00' * pad + res\n\n\ndef to_WIF(key: str):\n if not len(key) == 64:\n raise Exception('Bad key len')\n key = '80' + key\n key_b = bytes.fromhex(key)\n sha_key1 = hashlib.sha256(hashlib.sha256(key_b).digest()).digest()\n key_b = key_b + sha_key1[0:4]\n return base58_encode(key_b)\n\n\n<mask token>\n\n\ndef from_WIF(wif_key):\n if not len(wif_key) == 51:\n raise Exception('Bad len of WIF key')\n key = base58_decode(wif_key)\n checksum = key[-4:]\n key = key[1:33]\n if hashlib.sha256(hashlib.sha256((128).to_bytes(1, 'big') + key).digest()\n ).digest()[0:4] != checksum:\n raise Exception('Bad checksum')\n return key\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass k_box:\n\n def __init__(self, string=0, file=0):\n if string != 0:\n if not len(string) == 64:\n raise Exception('Bad len')\n self.__priv_key = bytes.fromhex(string)\n else:\n self.__priv_key = secrets.randbits(256).to_bytes(32, byteorder=\n byteorder)\n self.__pub_key = ecdsa.SigningKey.from_string(self.__priv_key,\n curve=ecdsa.SECP256k1).verifying_key.to_string()\n\n def get_secret_key(self):\n return self.__priv_key\n\n def get_public_key(self) ->bytearray:\n return (4).to_bytes(1, byteorder=byteorder) + self.__pub_key\n\n def get_public_key_compresed(self) ->bytearray:\n return (b'\\x03' if self.__pub_key[-1] % 2 else b'\\x02'\n ) + self.__pub_key[0:32]\n\n def get_address(self) ->str:\n e_pub = self.get_encrypted_pub_key()\n main_net_key = (0).to_bytes(1, byteorder=byteorder) + e_pub\n check_sum = hashlib.sha256(hashlib.sha256(main_net_key).digest()\n ).digest()[:4]\n hex_addr = main_net_key + check_sum\n return base58_encode(hex_addr)\n\n def get_encrypted_pub_key(self):\n sha = hashlib.sha256(self.get_public_key_compresed()).digest()\n result = hashlib.new(name='ripemd160', data=sha).digest()\n return result\n\n def sign(self, message: bytes=0) ->bytearray:\n sk = ecdsa.SigningKey.from_string(self.__priv_key, curve=ecdsa.\n SECP256k1)\n return sk.sign(message)\n\n def verify(self, signature, message):\n vk = ecdsa.VerifyingKey.from_string(self.__pub_key, curve=ecdsa.\n SECP256k1)\n return vk.verify(signature, message.encode())\n\n\ndef covert_to_address(pub_key: bytes) ->str:\n sha = hashlib.sha256(pub_key).digest()\n pub_key = hashlib.new(name='ripemd160', data=sha).digest()\n main_net_key = (0).to_bytes(1, byteorder=byteorder) + pub_key\n check_sum = hashlib.sha256(hashlib.sha256(main_net_key).digest()).digest()[\n :4]\n hex_addr = main_net_key + check_sum\n return base58_encode(hex_addr)\n\n\ndef base58_encode(n: bytearray) ->str:\n alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'\n b58_string = ''\n leading_zeros = len(n.hex()) - len(n.hex().lstrip('0'))\n address_int = int.from_bytes(n, byteorder='big')\n while address_int > 0:\n digit = address_int % 58\n digit_char = alphabet[digit]\n b58_string = digit_char + b58_string\n address_int //= 58\n ones = leading_zeros // 2\n for one in range(ones):\n b58_string = '1' + b58_string\n return b58_string\n\n\ndef base58_decode(s):\n \"\"\"Decode a base58-encoding string, returning bytes\"\"\"\n if not s:\n return b''\n alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'\n n = 0\n for c in s:\n n *= 58\n if c not in alphabet:\n raise Exception('Character %r is not a valid base58 character' % c)\n digit = alphabet.index(c)\n n += digit\n h = '%x' % n\n if len(h) % 2:\n h = '0' + h\n res = bytearray.fromhex(h)\n pad = 0\n for c in s[:-1]:\n if c == alphabet[0]:\n pad += 1\n else:\n break\n return b'\\x00' * pad + res\n\n\ndef to_WIF(key: str):\n if not len(key) == 64:\n raise Exception('Bad key len')\n key = '80' + key\n key_b = bytes.fromhex(key)\n sha_key1 = hashlib.sha256(hashlib.sha256(key_b).digest()).digest()\n key_b = key_b + sha_key1[0:4]\n return base58_encode(key_b)\n\n\n<mask token>\n\n\ndef from_WIF(wif_key):\n if not len(wif_key) == 51:\n raise Exception('Bad len of WIF key')\n key = base58_decode(wif_key)\n checksum = key[-4:]\n key = key[1:33]\n if hashlib.sha256(hashlib.sha256((128).to_bytes(1, 'big') + key).digest()\n ).digest()[0:4] != checksum:\n raise Exception('Bad checksum')\n return key\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass k_box:\n\n def __init__(self, string=0, file=0):\n if string != 0:\n if not len(string) == 64:\n raise Exception('Bad len')\n self.__priv_key = bytes.fromhex(string)\n else:\n self.__priv_key = secrets.randbits(256).to_bytes(32, byteorder=\n byteorder)\n self.__pub_key = ecdsa.SigningKey.from_string(self.__priv_key,\n curve=ecdsa.SECP256k1).verifying_key.to_string()\n\n def get_secret_key(self):\n return self.__priv_key\n\n def get_public_key(self) ->bytearray:\n return (4).to_bytes(1, byteorder=byteorder) + self.__pub_key\n\n def get_public_key_compresed(self) ->bytearray:\n return (b'\\x03' if self.__pub_key[-1] % 2 else b'\\x02'\n ) + self.__pub_key[0:32]\n\n def get_address(self) ->str:\n e_pub = self.get_encrypted_pub_key()\n main_net_key = (0).to_bytes(1, byteorder=byteorder) + e_pub\n check_sum = hashlib.sha256(hashlib.sha256(main_net_key).digest()\n ).digest()[:4]\n hex_addr = main_net_key + check_sum\n return base58_encode(hex_addr)\n\n def get_encrypted_pub_key(self):\n sha = hashlib.sha256(self.get_public_key_compresed()).digest()\n result = hashlib.new(name='ripemd160', data=sha).digest()\n return result\n\n def sign(self, message: bytes=0) ->bytearray:\n sk = ecdsa.SigningKey.from_string(self.__priv_key, curve=ecdsa.\n SECP256k1)\n return sk.sign(message)\n\n def verify(self, signature, message):\n vk = ecdsa.VerifyingKey.from_string(self.__pub_key, curve=ecdsa.\n SECP256k1)\n return vk.verify(signature, message.encode())\n\n\ndef covert_to_address(pub_key: bytes) ->str:\n sha = hashlib.sha256(pub_key).digest()\n pub_key = hashlib.new(name='ripemd160', data=sha).digest()\n main_net_key = (0).to_bytes(1, byteorder=byteorder) + pub_key\n check_sum = hashlib.sha256(hashlib.sha256(main_net_key).digest()).digest()[\n :4]\n hex_addr = main_net_key + check_sum\n return base58_encode(hex_addr)\n\n\ndef base58_encode(n: bytearray) ->str:\n alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'\n b58_string = ''\n leading_zeros = len(n.hex()) - len(n.hex().lstrip('0'))\n address_int = int.from_bytes(n, byteorder='big')\n while address_int > 0:\n digit = address_int % 58\n digit_char = alphabet[digit]\n b58_string = digit_char + b58_string\n address_int //= 58\n ones = leading_zeros // 2\n for one in range(ones):\n b58_string = '1' + b58_string\n return b58_string\n\n\ndef base58_decode(s):\n \"\"\"Decode a base58-encoding string, returning bytes\"\"\"\n if not s:\n return b''\n alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'\n n = 0\n for c in s:\n n *= 58\n if c not in alphabet:\n raise Exception('Character %r is not a valid base58 character' % c)\n digit = alphabet.index(c)\n n += digit\n h = '%x' % n\n if len(h) % 2:\n h = '0' + h\n res = bytearray.fromhex(h)\n pad = 0\n for c in s[:-1]:\n if c == alphabet[0]:\n pad += 1\n else:\n break\n return b'\\x00' * pad + res\n\n\ndef to_WIF(key: str):\n if not len(key) == 64:\n raise Exception('Bad key len')\n key = '80' + key\n key_b = bytes.fromhex(key)\n sha_key1 = hashlib.sha256(hashlib.sha256(key_b).digest()).digest()\n key_b = key_b + sha_key1[0:4]\n return base58_encode(key_b)\n\n\ndef f_import_private(filename):\n file = open(filename, 'r')\n wif_key = file.read()\n file.close()\n key = from_WIF(wif_key)\n key_pair = k_box(string=key.hex())\n return key_pair\n\n\ndef from_WIF(wif_key):\n if not len(wif_key) == 51:\n raise Exception('Bad len of WIF key')\n key = base58_decode(wif_key)\n checksum = key[-4:]\n key = key[1:33]\n if hashlib.sha256(hashlib.sha256((128).to_bytes(1, 'big') + key).digest()\n ).digest()[0:4] != checksum:\n raise Exception('Bad checksum')\n return key\n\n\ndef uncompress_key(comp_key: bytearray):\n x = int.from_bytes(comp_key[1:], byteorder='big')\n is_even = True if comp_key[1] == '2' else False\n \"\"\" Derive y point from x point \"\"\"\n curve = ecdsa.SECP256k1.curve\n a, b, p = curve.a(), curve.b(), curve.p()\n alpha = (pow(x, 3, p) + a * x + b) % p\n beta = ecdsa.numbertheory.square_root_mod_prime(alpha, p)\n if beta % 2 == is_even:\n beta = p - beta\n return bytearray.fromhex(f'04{x:064x}{beta:064x}')\n",
"step-5": "#!/usr/bin/env python\n# coding: utf-8\n\n#%%:\nimport secrets\nimport hashlib\nimport base64\nimport ecdsa\nfrom sys import byteorder\n\n\n#%%:\nclass k_box:\n def __init__(self, string = 0, file = 0):\n if string != 0:\n if not(len(string) == 64):\n raise Exception(\"Bad len\")\n self.__priv_key = bytes.fromhex(string)\n else:\n self.__priv_key = secrets.randbits(256).to_bytes(32,byteorder=byteorder)\n self.__pub_key = ecdsa.SigningKey.from_string(self.__priv_key, curve=ecdsa.SECP256k1).verifying_key.to_string()\n\n \n def get_secret_key(self):\n return self.__priv_key\n\n \n def get_public_key(self)->bytearray:\n return (0x04.to_bytes(1,byteorder=byteorder) + self.__pub_key)\n\n\n def get_public_key_compresed(self) -> bytearray:\n return (b'\\x03' if self.__pub_key[-1] % 2 else b'\\x02') + self.__pub_key[0:32]\n \n \n def get_address(self) -> str:\n e_pub = self.get_encrypted_pub_key()\n main_net_key = 0x00.to_bytes(1,byteorder=byteorder) + e_pub\n check_sum = hashlib.sha256(hashlib.sha256(main_net_key).digest()).digest()[:4]\n hex_addr = main_net_key + check_sum\n return base58_encode(hex_addr)\n\n def get_encrypted_pub_key(self):\n sha = hashlib.sha256(self.get_public_key_compresed()).digest()\n result = hashlib.new(name='ripemd160', data=sha).digest() \n return result\n \n \n def sign(self, message:bytes = 0) -> bytearray:\n sk = ecdsa.SigningKey.from_string(self.__priv_key, curve=ecdsa.SECP256k1 )\n return sk.sign(message)\n\n \n def verify(self, signature, message):\n vk = ecdsa.VerifyingKey.from_string(self.__pub_key, curve=ecdsa.SECP256k1)\n return vk.verify(signature, message.encode()) \n \n\n\n#%%:\ndef covert_to_address(pub_key:bytes) -> str:\n sha = hashlib.sha256(pub_key).digest()\n pub_key = hashlib.new(name='ripemd160', data=sha).digest() \n main_net_key = 0x00.to_bytes(1,byteorder=byteorder) + pub_key\n check_sum = hashlib.sha256(hashlib.sha256(main_net_key).digest()).digest()[:4]\n hex_addr = main_net_key + check_sum\n return base58_encode(hex_addr)\n\n\n#%%:\ndef base58_encode(n:bytearray)->str:\n alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'\n b58_string = \"\"\n leading_zeros = len(n.hex()) - len(n.hex().lstrip('0')) # ! refactor counting zeros\n address_int = int.from_bytes(n,byteorder=\"big\")\n while address_int > 0:\n digit = address_int % 58\n digit_char = alphabet[digit]\n b58_string = digit_char + b58_string\n address_int //= 58\n ones = leading_zeros // 2\n for one in range(ones):\n b58_string = '1' + b58_string\n return b58_string\n\ndef base58_decode(s):\n \"\"\"Decode a base58-encoding string, returning bytes\"\"\"\n if not s:\n return b''\n alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'\n # Convert the string to an integer\n n = 0\n for c in s:\n n *= 58\n if c not in alphabet:\n raise Exception('Character %r is not a valid base58 character' % c)\n digit = alphabet.index(c)\n n += digit\n\n # Convert the integer to bytes\n h = '%x' % n\n if len(h) % 2:\n h = '0' + h\n # res = \"\"\n res = bytearray.fromhex(h)\n\n # Add padding back.\n pad = 0\n for c in s[:-1]:\n if c == alphabet[0]: pad += 1\n else: break\n return b'\\x00' * pad + res\n\n\n\n# def base58_decode(s:str, len):\n# alphabet = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'\n# result = 0\n# for c in s:\n# result = result * 58 + alphabet.index(c)\n# return bytearray.fromhex(f\"{result:0x}\".rjust(len * 2, '0'))\n\n# %%:\ndef to_WIF(key:str):\n if not(len(key) == 64):\n raise Exception(\"Bad key len\")\n key = \"80\" + key\n key_b = bytes.fromhex(key)\n sha_key1 = hashlib.sha256(hashlib.sha256(key_b).digest()).digest()\n key_b = key_b + sha_key1[0:4]\n return base58_encode(key_b)\n\n\ndef f_import_private(filename):\n file = open(filename, 'r')\n wif_key = file.read()\n file.close()\n key = from_WIF(wif_key)\n key_pair = k_box(string=key.hex())\n return key_pair\n\n\n#%%:\ndef from_WIF(wif_key):\n if not(len(wif_key) == 51):\n raise Exception(\"Bad len of WIF key\")\n key = base58_decode(wif_key)\n checksum = key[-4:]\n key = key[1:33]\n if hashlib.sha256(hashlib.sha256(0x80.to_bytes(1,\"big\") + key).digest()).digest()[0:4] != checksum:\n raise Exception(\"Bad checksum\")\n return key\n\n#%%:\n\ndef uncompress_key(comp_key: bytearray):\n x = int.from_bytes(comp_key[1:], byteorder='big')\n is_even = True if comp_key[1] == '2' else False\n \"\"\" Derive y point from x point \"\"\"\n curve = ecdsa.SECP256k1.curve\n # The curve equation over F_p is:\n # y^2 = x^3 + ax + b\n a, b, p = curve.a(), curve.b(), curve.p()\n alpha = (pow(x, 3, p) + a * x + b) % p\n beta = ecdsa.numbertheory.square_root_mod_prime(alpha, p)\n if (beta % 2) == is_even:\n beta = p - beta\n return bytearray.fromhex( f\"04{x:064x}{beta:064x}\")",
"step-ids": [
9,
12,
14,
16,
18
]
}
|
[
9,
12,
14,
16,
18
] |
from typing import List, Callable
#: A list of int
T = List[int]
C = Callable[[int], None] # a generic alias not having a doccomment
|
normal
|
{
"blob_id": "aaee69d339cf1c14e54366633155ee57026e6487",
"index": 2071,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nT = List[int]\nC = Callable[[int], None]\n",
"step-3": "from typing import List, Callable\nT = List[int]\nC = Callable[[int], None]\n",
"step-4": "from typing import List, Callable\n\n#: A list of int\nT = List[int]\n\nC = Callable[[int], None] # a generic alias not having a doccomment\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class BaseHandler(tornado.web.RequestHandler):
<|reserved_special_token_0|>
class CondaHandler(BaseHandler):
def get(self, filePath):
with open('packages/conda/' + filePath) as f:
data = json.load(f)
condaPackages = {}
packages = data['packages']
for package in packages:
name = packages[package]['name']
version = packages[package]['version']
try:
if version not in condaPackages[name]['versions']:
condaPackages[name]['versions'].append(version)
except:
condaPackages[name] = {'versions': [version]}
self.write(json.dumps(condaPackages))
class SubmitHandler(BaseHandler):
def post(self):
data = tornado.escape.json_decode(self.request.body)
print(data)
folderPath = str(data['id'])
utils.mkdir(folderPath)
self.write('testing')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseHandler(tornado.web.RequestHandler):
def set_default_headers(self):
self.set_header('Access-Control-Allow-Origin', '*')
self.set_header('Access-Control-Allow-Headers', 'x-requested-with')
class CondaHandler(BaseHandler):
def get(self, filePath):
with open('packages/conda/' + filePath) as f:
data = json.load(f)
condaPackages = {}
packages = data['packages']
for package in packages:
name = packages[package]['name']
version = packages[package]['version']
try:
if version not in condaPackages[name]['versions']:
condaPackages[name]['versions'].append(version)
except:
condaPackages[name] = {'versions': [version]}
self.write(json.dumps(condaPackages))
class SubmitHandler(BaseHandler):
def post(self):
data = tornado.escape.json_decode(self.request.body)
print(data)
folderPath = str(data['id'])
utils.mkdir(folderPath)
self.write('testing')
def make_app():
return tornado.web.Application([('/packages/(.*)', CondaHandler), (
'/submit', SubmitHandler)])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseHandler(tornado.web.RequestHandler):
def set_default_headers(self):
self.set_header('Access-Control-Allow-Origin', '*')
self.set_header('Access-Control-Allow-Headers', 'x-requested-with')
class CondaHandler(BaseHandler):
def get(self, filePath):
with open('packages/conda/' + filePath) as f:
data = json.load(f)
condaPackages = {}
packages = data['packages']
for package in packages:
name = packages[package]['name']
version = packages[package]['version']
try:
if version not in condaPackages[name]['versions']:
condaPackages[name]['versions'].append(version)
except:
condaPackages[name] = {'versions': [version]}
self.write(json.dumps(condaPackages))
class SubmitHandler(BaseHandler):
def post(self):
data = tornado.escape.json_decode(self.request.body)
print(data)
folderPath = str(data['id'])
utils.mkdir(folderPath)
self.write('testing')
def make_app():
return tornado.web.Application([('/packages/(.*)', CondaHandler), (
'/submit', SubmitHandler)])
if __name__ == '__main__':
app = make_app()
app.listen(8888)
tornado.ioloop.IOLoop.current().start()
<|reserved_special_token_1|>
import tornado.ioloop
import tornado.web
import json
import utils
class BaseHandler(tornado.web.RequestHandler):
def set_default_headers(self):
self.set_header('Access-Control-Allow-Origin', '*')
self.set_header('Access-Control-Allow-Headers', 'x-requested-with')
class CondaHandler(BaseHandler):
def get(self, filePath):
with open('packages/conda/' + filePath) as f:
data = json.load(f)
condaPackages = {}
packages = data['packages']
for package in packages:
name = packages[package]['name']
version = packages[package]['version']
try:
if version not in condaPackages[name]['versions']:
condaPackages[name]['versions'].append(version)
except:
condaPackages[name] = {'versions': [version]}
self.write(json.dumps(condaPackages))
class SubmitHandler(BaseHandler):
def post(self):
data = tornado.escape.json_decode(self.request.body)
print(data)
folderPath = str(data['id'])
utils.mkdir(folderPath)
self.write('testing')
def make_app():
return tornado.web.Application([('/packages/(.*)', CondaHandler), (
'/submit', SubmitHandler)])
if __name__ == '__main__':
app = make_app()
app.listen(8888)
tornado.ioloop.IOLoop.current().start()
<|reserved_special_token_1|>
import tornado.ioloop
import tornado.web
import json
import utils
class BaseHandler(tornado.web.RequestHandler):
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "x-requested-with")
class CondaHandler(BaseHandler):
def get(self, filePath):
with open("packages/conda/" + filePath) as f:
data = json.load(f)
condaPackages = {}
packages = data["packages"]
for package in packages:
name = packages[package]["name"]
version = packages[package]["version"]
try:
if version not in condaPackages[name]["versions"]:
condaPackages[name]["versions"].append(version)
except:
condaPackages[name] = {
"versions": [
version
]
}
self.write(json.dumps(condaPackages))
class SubmitHandler(BaseHandler):
def post(self):
data = tornado.escape.json_decode(self.request.body)
print(data)
folderPath = str(data['id'])
utils.mkdir(folderPath)
self.write('testing')
def make_app():
return tornado.web.Application([
(r"/packages/(.*)", CondaHandler),
(r"/submit", SubmitHandler)
])
if __name__ == "__main__":
app = make_app()
app.listen(8888)
tornado.ioloop.IOLoop.current().start()
|
flexible
|
{
"blob_id": "44a9bb4d74d2e694f252d8726647bca13baa4df5",
"index": 853,
"step-1": "<mask token>\n\n\nclass BaseHandler(tornado.web.RequestHandler):\n <mask token>\n\n\nclass CondaHandler(BaseHandler):\n\n def get(self, filePath):\n with open('packages/conda/' + filePath) as f:\n data = json.load(f)\n condaPackages = {}\n packages = data['packages']\n for package in packages:\n name = packages[package]['name']\n version = packages[package]['version']\n try:\n if version not in condaPackages[name]['versions']:\n condaPackages[name]['versions'].append(version)\n except:\n condaPackages[name] = {'versions': [version]}\n self.write(json.dumps(condaPackages))\n\n\nclass SubmitHandler(BaseHandler):\n\n def post(self):\n data = tornado.escape.json_decode(self.request.body)\n print(data)\n folderPath = str(data['id'])\n utils.mkdir(folderPath)\n self.write('testing')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BaseHandler(tornado.web.RequestHandler):\n\n def set_default_headers(self):\n self.set_header('Access-Control-Allow-Origin', '*')\n self.set_header('Access-Control-Allow-Headers', 'x-requested-with')\n\n\nclass CondaHandler(BaseHandler):\n\n def get(self, filePath):\n with open('packages/conda/' + filePath) as f:\n data = json.load(f)\n condaPackages = {}\n packages = data['packages']\n for package in packages:\n name = packages[package]['name']\n version = packages[package]['version']\n try:\n if version not in condaPackages[name]['versions']:\n condaPackages[name]['versions'].append(version)\n except:\n condaPackages[name] = {'versions': [version]}\n self.write(json.dumps(condaPackages))\n\n\nclass SubmitHandler(BaseHandler):\n\n def post(self):\n data = tornado.escape.json_decode(self.request.body)\n print(data)\n folderPath = str(data['id'])\n utils.mkdir(folderPath)\n self.write('testing')\n\n\ndef make_app():\n return tornado.web.Application([('/packages/(.*)', CondaHandler), (\n '/submit', SubmitHandler)])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass BaseHandler(tornado.web.RequestHandler):\n\n def set_default_headers(self):\n self.set_header('Access-Control-Allow-Origin', '*')\n self.set_header('Access-Control-Allow-Headers', 'x-requested-with')\n\n\nclass CondaHandler(BaseHandler):\n\n def get(self, filePath):\n with open('packages/conda/' + filePath) as f:\n data = json.load(f)\n condaPackages = {}\n packages = data['packages']\n for package in packages:\n name = packages[package]['name']\n version = packages[package]['version']\n try:\n if version not in condaPackages[name]['versions']:\n condaPackages[name]['versions'].append(version)\n except:\n condaPackages[name] = {'versions': [version]}\n self.write(json.dumps(condaPackages))\n\n\nclass SubmitHandler(BaseHandler):\n\n def post(self):\n data = tornado.escape.json_decode(self.request.body)\n print(data)\n folderPath = str(data['id'])\n utils.mkdir(folderPath)\n self.write('testing')\n\n\ndef make_app():\n return tornado.web.Application([('/packages/(.*)', CondaHandler), (\n '/submit', SubmitHandler)])\n\n\nif __name__ == '__main__':\n app = make_app()\n app.listen(8888)\n tornado.ioloop.IOLoop.current().start()\n",
"step-4": "import tornado.ioloop\nimport tornado.web\nimport json\nimport utils\n\n\nclass BaseHandler(tornado.web.RequestHandler):\n\n def set_default_headers(self):\n self.set_header('Access-Control-Allow-Origin', '*')\n self.set_header('Access-Control-Allow-Headers', 'x-requested-with')\n\n\nclass CondaHandler(BaseHandler):\n\n def get(self, filePath):\n with open('packages/conda/' + filePath) as f:\n data = json.load(f)\n condaPackages = {}\n packages = data['packages']\n for package in packages:\n name = packages[package]['name']\n version = packages[package]['version']\n try:\n if version not in condaPackages[name]['versions']:\n condaPackages[name]['versions'].append(version)\n except:\n condaPackages[name] = {'versions': [version]}\n self.write(json.dumps(condaPackages))\n\n\nclass SubmitHandler(BaseHandler):\n\n def post(self):\n data = tornado.escape.json_decode(self.request.body)\n print(data)\n folderPath = str(data['id'])\n utils.mkdir(folderPath)\n self.write('testing')\n\n\ndef make_app():\n return tornado.web.Application([('/packages/(.*)', CondaHandler), (\n '/submit', SubmitHandler)])\n\n\nif __name__ == '__main__':\n app = make_app()\n app.listen(8888)\n tornado.ioloop.IOLoop.current().start()\n",
"step-5": "import tornado.ioloop\nimport tornado.web\nimport json\nimport utils\n\nclass BaseHandler(tornado.web.RequestHandler):\n def set_default_headers(self):\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header(\"Access-Control-Allow-Headers\", \"x-requested-with\")\n\nclass CondaHandler(BaseHandler):\n def get(self, filePath):\n with open(\"packages/conda/\" + filePath) as f:\n data = json.load(f)\n\n condaPackages = {}\n packages = data[\"packages\"]\n\n for package in packages:\n name = packages[package][\"name\"]\n version = packages[package][\"version\"]\n\n try:\n if version not in condaPackages[name][\"versions\"]:\n condaPackages[name][\"versions\"].append(version)\n except:\n condaPackages[name] = {\n \"versions\": [\n version\n ]\n }\n self.write(json.dumps(condaPackages))\n\nclass SubmitHandler(BaseHandler):\n def post(self):\n data = tornado.escape.json_decode(self.request.body)\n print(data)\n folderPath = str(data['id'])\n utils.mkdir(folderPath)\n self.write('testing')\n\ndef make_app():\n return tornado.web.Application([\n (r\"/packages/(.*)\", CondaHandler),\n (r\"/submit\", SubmitHandler)\n ])\n\nif __name__ == \"__main__\":\n app = make_app()\n app.listen(8888)\n tornado.ioloop.IOLoop.current().start()",
"step-ids": [
5,
7,
8,
9,
10
]
}
|
[
5,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append(dir_path)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gulishop.settings')
<|reserved_special_token_0|>
django.setup()
<|reserved_special_token_0|>
for lev1 in row_data:
cat1 = GoodsCategory()
cat1.name = lev1['name']
cat1.code = lev1['code'] if lev1['code'] else ''
cat1.category_type = 1
cat1.save()
for lev2 in lev1['sub_categorys']:
cat2 = GoodsCategory()
cat2.name = lev2['name']
cat2.code = lev2['code'] if lev2['code'] else ''
cat2.category_type = 2
cat2.parent_category = cat1
cat2.save()
for lev3 in lev2['sub_categorys']:
cat3 = GoodsCategory()
cat3.name = lev3['name']
cat3.code = lev3['code'] if lev3['code'] else ''
cat3.category_type = 3
cat3.parent_category = cat2
cat3.save()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
file_path = os.path.abspath(__file__)
dir_path = os.path.dirname(file_path)
sys.path.append(dir_path)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gulishop.settings')
<|reserved_special_token_0|>
django.setup()
<|reserved_special_token_0|>
for lev1 in row_data:
cat1 = GoodsCategory()
cat1.name = lev1['name']
cat1.code = lev1['code'] if lev1['code'] else ''
cat1.category_type = 1
cat1.save()
for lev2 in lev1['sub_categorys']:
cat2 = GoodsCategory()
cat2.name = lev2['name']
cat2.code = lev2['code'] if lev2['code'] else ''
cat2.category_type = 2
cat2.parent_category = cat1
cat2.save()
for lev3 in lev2['sub_categorys']:
cat3 = GoodsCategory()
cat3.name = lev3['name']
cat3.code = lev3['code'] if lev3['code'] else ''
cat3.category_type = 3
cat3.parent_category = cat2
cat3.save()
<|reserved_special_token_1|>
import os, sys
file_path = os.path.abspath(__file__)
dir_path = os.path.dirname(file_path)
sys.path.append(dir_path)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gulishop.settings')
import django
django.setup()
from goods.models import GoodsCategory
from db_tools.data.category_data import row_data
for lev1 in row_data:
cat1 = GoodsCategory()
cat1.name = lev1['name']
cat1.code = lev1['code'] if lev1['code'] else ''
cat1.category_type = 1
cat1.save()
for lev2 in lev1['sub_categorys']:
cat2 = GoodsCategory()
cat2.name = lev2['name']
cat2.code = lev2['code'] if lev2['code'] else ''
cat2.category_type = 2
cat2.parent_category = cat1
cat2.save()
for lev3 in lev2['sub_categorys']:
cat3 = GoodsCategory()
cat3.name = lev3['name']
cat3.code = lev3['code'] if lev3['code'] else ''
cat3.category_type = 3
cat3.parent_category = cat2
cat3.save()
<|reserved_special_token_1|>
#配置我们文件所在目录的搜寻环境
import os,sys
#第一步先拿到当前文件的路径
file_path = os.path.abspath(__file__)
#第二步 根据这个路径去拿到这个文件所在目录的路径
dir_path = os.path.dirname(file_path)
#第三步:讲这个目录的路径添加到我们的搜寻环境当中
sys.path.append(dir_path)
#第四步,动态设置我们的setting文件
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "gulishop.settings")
#第五步,让设置好的环境初始化生效
import django
django.setup()
#这一行,不能放在上面
from goods.models import GoodsCategory
from db_tools.data.category_data import row_data
for lev1 in row_data:
cat1 = GoodsCategory()
cat1.name = lev1['name']
cat1.code = lev1['code'] if lev1['code'] else ''
cat1.category_type = 1
cat1.save()
for lev2 in lev1['sub_categorys']:
cat2 = GoodsCategory()
cat2.name = lev2['name']
cat2.code = lev2['code'] if lev2['code'] else ''
cat2.category_type = 2
cat2.parent_category = cat1
cat2.save()
for lev3 in lev2['sub_categorys']:
cat3 = GoodsCategory()
cat3.name = lev3['name']
cat3.code = lev3['code'] if lev3['code'] else ''
cat3.category_type = 3
cat3.parent_category = cat2
cat3.save()
|
flexible
|
{
"blob_id": "35ae9c86594b50bbe4a67d2cc6b20efc6f6fdc64",
"index": 295,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.append(dir_path)\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gulishop.settings')\n<mask token>\ndjango.setup()\n<mask token>\nfor lev1 in row_data:\n cat1 = GoodsCategory()\n cat1.name = lev1['name']\n cat1.code = lev1['code'] if lev1['code'] else ''\n cat1.category_type = 1\n cat1.save()\n for lev2 in lev1['sub_categorys']:\n cat2 = GoodsCategory()\n cat2.name = lev2['name']\n cat2.code = lev2['code'] if lev2['code'] else ''\n cat2.category_type = 2\n cat2.parent_category = cat1\n cat2.save()\n for lev3 in lev2['sub_categorys']:\n cat3 = GoodsCategory()\n cat3.name = lev3['name']\n cat3.code = lev3['code'] if lev3['code'] else ''\n cat3.category_type = 3\n cat3.parent_category = cat2\n cat3.save()\n",
"step-3": "<mask token>\nfile_path = os.path.abspath(__file__)\ndir_path = os.path.dirname(file_path)\nsys.path.append(dir_path)\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gulishop.settings')\n<mask token>\ndjango.setup()\n<mask token>\nfor lev1 in row_data:\n cat1 = GoodsCategory()\n cat1.name = lev1['name']\n cat1.code = lev1['code'] if lev1['code'] else ''\n cat1.category_type = 1\n cat1.save()\n for lev2 in lev1['sub_categorys']:\n cat2 = GoodsCategory()\n cat2.name = lev2['name']\n cat2.code = lev2['code'] if lev2['code'] else ''\n cat2.category_type = 2\n cat2.parent_category = cat1\n cat2.save()\n for lev3 in lev2['sub_categorys']:\n cat3 = GoodsCategory()\n cat3.name = lev3['name']\n cat3.code = lev3['code'] if lev3['code'] else ''\n cat3.category_type = 3\n cat3.parent_category = cat2\n cat3.save()\n",
"step-4": "import os, sys\nfile_path = os.path.abspath(__file__)\ndir_path = os.path.dirname(file_path)\nsys.path.append(dir_path)\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gulishop.settings')\nimport django\ndjango.setup()\nfrom goods.models import GoodsCategory\nfrom db_tools.data.category_data import row_data\nfor lev1 in row_data:\n cat1 = GoodsCategory()\n cat1.name = lev1['name']\n cat1.code = lev1['code'] if lev1['code'] else ''\n cat1.category_type = 1\n cat1.save()\n for lev2 in lev1['sub_categorys']:\n cat2 = GoodsCategory()\n cat2.name = lev2['name']\n cat2.code = lev2['code'] if lev2['code'] else ''\n cat2.category_type = 2\n cat2.parent_category = cat1\n cat2.save()\n for lev3 in lev2['sub_categorys']:\n cat3 = GoodsCategory()\n cat3.name = lev3['name']\n cat3.code = lev3['code'] if lev3['code'] else ''\n cat3.category_type = 3\n cat3.parent_category = cat2\n cat3.save()\n",
"step-5": "#配置我们文件所在目录的搜寻环境\r\nimport os,sys\r\n#第一步先拿到当前文件的路径\r\nfile_path = os.path.abspath(__file__)\r\n#第二步 根据这个路径去拿到这个文件所在目录的路径\r\ndir_path = os.path.dirname(file_path)\r\n#第三步:讲这个目录的路径添加到我们的搜寻环境当中\r\nsys.path.append(dir_path)\r\n#第四步,动态设置我们的setting文件\r\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"gulishop.settings\")\r\n#第五步,让设置好的环境初始化生效\r\nimport django\r\ndjango.setup()\r\n\r\n#这一行,不能放在上面\r\nfrom goods.models import GoodsCategory\r\nfrom db_tools.data.category_data import row_data\r\n\r\nfor lev1 in row_data:\r\n cat1 = GoodsCategory()\r\n cat1.name = lev1['name']\r\n cat1.code = lev1['code'] if lev1['code'] else ''\r\n cat1.category_type = 1\r\n cat1.save()\r\n for lev2 in lev1['sub_categorys']:\r\n cat2 = GoodsCategory()\r\n cat2.name = lev2['name']\r\n cat2.code = lev2['code'] if lev2['code'] else ''\r\n cat2.category_type = 2\r\n cat2.parent_category = cat1\r\n cat2.save()\r\n for lev3 in lev2['sub_categorys']:\r\n cat3 = GoodsCategory()\r\n cat3.name = lev3['name']\r\n cat3.code = lev3['code'] if lev3['code'] else ''\r\n cat3.category_type = 3\r\n cat3.parent_category = cat2\r\n cat3.save()\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#the initial DNA sequence
dnaSequence = 'ACTGATCGATTACGTATAGTAGAATTCTATCATACATATATATCGATGCGTTCAT'
#seperating the DNA sequence at the specified location
firstFragment = 'ACTGATCGATTACGTATAGTAGAATTCTATCATACATATATATCGATGCGTTCAT' [0:22]
secondFragment = 'ACTGATCGATTACGTATAGTAGAATTCTATCATACATATATATCGATGCGTTCAT' [23:100]
#finsing the length of the 2 fragments
firstFragmentLen = len(firstFragment)
secondFragmentLen = len(secondFragment)
#printing the original and the split DNA sequence
print("the original DNA sequence is", dnaSequence)
print("the first fragment is", firstFragment, "and is", firstFragmentLen ,"letters long")
print("the second fragment is", secondFragment, "and is", secondFragmentLen,"letters long")
|
normal
|
{
"blob_id": "7dc99d33023dbb13938ac413af7d3e9471fdbc3d",
"index": 126,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('the original DNA sequence is', dnaSequence)\nprint('the first fragment is', firstFragment, 'and is', firstFragmentLen,\n 'letters long')\nprint('the second fragment is', secondFragment, 'and is', secondFragmentLen,\n 'letters long')\n",
"step-3": "dnaSequence = 'ACTGATCGATTACGTATAGTAGAATTCTATCATACATATATATCGATGCGTTCAT'\nfirstFragment = 'ACTGATCGATTACGTATAGTAGAATTCTATCATACATATATATCGATGCGTTCAT'[0:22]\nsecondFragment = 'ACTGATCGATTACGTATAGTAGAATTCTATCATACATATATATCGATGCGTTCAT'[\n 23:100]\nfirstFragmentLen = len(firstFragment)\nsecondFragmentLen = len(secondFragment)\nprint('the original DNA sequence is', dnaSequence)\nprint('the first fragment is', firstFragment, 'and is', firstFragmentLen,\n 'letters long')\nprint('the second fragment is', secondFragment, 'and is', secondFragmentLen,\n 'letters long')\n",
"step-4": "#the initial DNA sequence\ndnaSequence = 'ACTGATCGATTACGTATAGTAGAATTCTATCATACATATATATCGATGCGTTCAT'\n\n#seperating the DNA sequence at the specified location\nfirstFragment = 'ACTGATCGATTACGTATAGTAGAATTCTATCATACATATATATCGATGCGTTCAT' [0:22]\nsecondFragment = 'ACTGATCGATTACGTATAGTAGAATTCTATCATACATATATATCGATGCGTTCAT' [23:100]\n\n#finsing the length of the 2 fragments\nfirstFragmentLen = len(firstFragment)\nsecondFragmentLen = len(secondFragment)\n\n#printing the original and the split DNA sequence\nprint(\"the original DNA sequence is\", dnaSequence)\nprint(\"the first fragment is\", firstFragment, \"and is\", firstFragmentLen ,\"letters long\")\nprint(\"the second fragment is\", secondFragment, \"and is\", secondFragmentLen,\"letters long\")\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def selectionSort(arr, low, high):
for i in range(len(arr)):
mini = i
for j in range(i + 1, len(arr)):
if arr[mini] > arr[j]:
mini = j
arr[i], arr[mini] = arr[mini], arr[i]
return arr
|
flexible
|
{
"blob_id": "c91be6cc332139c5b1e7ee5a3512482d0f8620b1",
"index": 7322,
"step-1": "<mask token>\n",
"step-2": "def selectionSort(arr, low, high):\n for i in range(len(arr)):\n mini = i\n for j in range(i + 1, len(arr)):\n if arr[mini] > arr[j]:\n mini = j\n arr[i], arr[mini] = arr[mini], arr[i]\n return arr\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def project_query_lz_main(question):
txt_line = ReadTxtName(PROJECT_NAMES)
for project_name in txt_line:
if project_name in question:
GV.SHOW = True
return '我们觉得您是想查' + project_name + '项目的信息,但是我们还没有记录项目详细信息'
GV.FLAG = 3
GV.SHOW = False
return question
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def ReadTxtName(rootdir):
lines = []
with open(rootdir, 'r') as file_to_read:
while True:
line = file_to_read.readline()
if not line:
break
line = line.strip('\n')
lines.append(line)
return lines
def project_query_lz_main(question):
txt_line = ReadTxtName(PROJECT_NAMES)
for project_name in txt_line:
if project_name in question:
GV.SHOW = True
return '我们觉得您是想查' + project_name + '项目的信息,但是我们还没有记录项目详细信息'
GV.FLAG = 3
GV.SHOW = False
return question
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from config.path_config import *
import GV
def ReadTxtName(rootdir):
lines = []
with open(rootdir, 'r') as file_to_read:
while True:
line = file_to_read.readline()
if not line:
break
line = line.strip('\n')
lines.append(line)
return lines
def project_query_lz_main(question):
txt_line = ReadTxtName(PROJECT_NAMES)
for project_name in txt_line:
if project_name in question:
GV.SHOW = True
return '我们觉得您是想查' + project_name + '项目的信息,但是我们还没有记录项目详细信息'
GV.FLAG = 3
GV.SHOW = False
return question
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 19 13:42:09 2019
@author: Administrator
"""
from config.path_config import *
import GV
def ReadTxtName(rootdir):
#读取文件中的每一行,转为list
lines = []
with open(rootdir, 'r') as file_to_read:
while True:
line = file_to_read.readline()
if not line:
break
line = line.strip('\n')
lines.append(line)
return lines
def project_query_lz_main(question):
#找语句中是否匹配到了项目名称
txt_line = ReadTxtName(PROJECT_NAMES)
for project_name in txt_line:
if project_name in question:
#print('我们觉得您是想查' + project_name + '项目的信息')
GV.SHOW = True
return ('我们觉得您是想查' + project_name +
'项目的信息,但是我们还没有记录项目详细信息')
GV.FLAG = 3
GV.SHOW = False
#state = False
#print('与项目无关,此处跳出,接其他模块')
return question
#project_query_lz_main('工银天梭项目进度怎么样了',2)
|
flexible
|
{
"blob_id": "92bbccfbfebf905965c9cb0f1a85ffaa7d0cf6b5",
"index": 3796,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef project_query_lz_main(question):\n txt_line = ReadTxtName(PROJECT_NAMES)\n for project_name in txt_line:\n if project_name in question:\n GV.SHOW = True\n return '我们觉得您是想查' + project_name + '项目的信息,但是我们还没有记录项目详细信息'\n GV.FLAG = 3\n GV.SHOW = False\n return question\n",
"step-3": "<mask token>\n\n\ndef ReadTxtName(rootdir):\n lines = []\n with open(rootdir, 'r') as file_to_read:\n while True:\n line = file_to_read.readline()\n if not line:\n break\n line = line.strip('\\n')\n lines.append(line)\n return lines\n\n\ndef project_query_lz_main(question):\n txt_line = ReadTxtName(PROJECT_NAMES)\n for project_name in txt_line:\n if project_name in question:\n GV.SHOW = True\n return '我们觉得您是想查' + project_name + '项目的信息,但是我们还没有记录项目详细信息'\n GV.FLAG = 3\n GV.SHOW = False\n return question\n",
"step-4": "<mask token>\nfrom config.path_config import *\nimport GV\n\n\ndef ReadTxtName(rootdir):\n lines = []\n with open(rootdir, 'r') as file_to_read:\n while True:\n line = file_to_read.readline()\n if not line:\n break\n line = line.strip('\\n')\n lines.append(line)\n return lines\n\n\ndef project_query_lz_main(question):\n txt_line = ReadTxtName(PROJECT_NAMES)\n for project_name in txt_line:\n if project_name in question:\n GV.SHOW = True\n return '我们觉得您是想查' + project_name + '项目的信息,但是我们还没有记录项目详细信息'\n GV.FLAG = 3\n GV.SHOW = False\n return question\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jul 19 13:42:09 2019\r\n\r\n@author: Administrator\r\n\"\"\"\r\n\r\nfrom config.path_config import *\r\nimport GV\r\n \r\ndef ReadTxtName(rootdir):\r\n #读取文件中的每一行,转为list\r\n lines = []\r\n with open(rootdir, 'r') as file_to_read:\r\n while True:\r\n line = file_to_read.readline()\r\n if not line:\r\n break\r\n line = line.strip('\\n')\r\n lines.append(line)\r\n return lines\r\n\r\ndef project_query_lz_main(question):\r\n #找语句中是否匹配到了项目名称\r\n txt_line = ReadTxtName(PROJECT_NAMES) \r\n for project_name in txt_line:\r\n if project_name in question:\r\n #print('我们觉得您是想查' + project_name + '项目的信息')\r\n GV.SHOW = True\r\n return ('我们觉得您是想查' + project_name + \r\n '项目的信息,但是我们还没有记录项目详细信息')\r\n GV.FLAG = 3\r\n GV.SHOW = False\r\n #state = False\r\n #print('与项目无关,此处跳出,接其他模块')\r\n return question\r\n\r\n#project_query_lz_main('工银天梭项目进度怎么样了',2)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#TODO: allow workers to pull this from cache
RABBITMQ_IP = '172.23.105.82'
OBJECT_CACHE_IP = "172.23.105.69"
OBJECT_CACHE_PORT = "11911"
SERIESLY_IP = ''
COUCHBASE_IP = '172.23.105.54'
COUCHBASE_PORT = '8091'
COUCHBASE_USER = "Administrator"
COUCHBASE_PWD = "password"
SSH_USER = "root"
SSH_PASSWORD = "password"
WORKERS = ['127.0.0.1']
WORKER_CONFIGS = ["all"]
CB_CLUSTER_TAG = "default"
CLUSTER_IPS = ["172.23.105.54", "172.23.105.57", "172.23.105.62", "172.23.105.55"]
# xdcr config
"""
" pointer information to remote sites
" remote1 = name for remote site
" RABBITMQ_IP = broker managing remote site (can be same as local broker if using different vhosts)
" this should equal RABBITMQ_IP of remote site
" CB_CLUSTER_TAG = represents vhost watched by workers remote site.
" this should equal CB_CLUSTER_TAG of remote site
" COUCHBASE_IP/PORT = IP/PORT of a couchbase node in remote site
"""
REMOTE_SITES = {"remote1" : {"RABBITMQ_IP" : "172.23.105.99",
"CB_CLUSTER_TAG" : "default",
"COUCHBASE_IP" : "172.23.105.58",
"COUCHBASE_PORT" : "8091"}}
LOGDIR="logs" # relative to current dir
#Backup Config
ENABLE_BACKUPS = False
BACKUP_DIR = "/tmp/backup"
BACKUP_NODE_IP = "127.0.0.1"
BACKUP_NODE_SSH_USER = "root"
BACKUP_NODE_SSH_PWD = "password"
|
normal
|
{
"blob_id": "e70ebd9bb9cd7027772ec117cb91349afba7ab10",
"index": 6390,
"step-1": "<mask token>\n",
"step-2": "RABBITMQ_IP = '172.23.105.82'\nOBJECT_CACHE_IP = '172.23.105.69'\nOBJECT_CACHE_PORT = '11911'\nSERIESLY_IP = ''\nCOUCHBASE_IP = '172.23.105.54'\nCOUCHBASE_PORT = '8091'\nCOUCHBASE_USER = 'Administrator'\nCOUCHBASE_PWD = 'password'\nSSH_USER = 'root'\nSSH_PASSWORD = 'password'\nWORKERS = ['127.0.0.1']\nWORKER_CONFIGS = ['all']\nCB_CLUSTER_TAG = 'default'\nCLUSTER_IPS = ['172.23.105.54', '172.23.105.57', '172.23.105.62',\n '172.23.105.55']\n<mask token>\nREMOTE_SITES = {'remote1': {'RABBITMQ_IP': '172.23.105.99',\n 'CB_CLUSTER_TAG': 'default', 'COUCHBASE_IP': '172.23.105.58',\n 'COUCHBASE_PORT': '8091'}}\nLOGDIR = 'logs'\nENABLE_BACKUPS = False\nBACKUP_DIR = '/tmp/backup'\nBACKUP_NODE_IP = '127.0.0.1'\nBACKUP_NODE_SSH_USER = 'root'\nBACKUP_NODE_SSH_PWD = 'password'\n",
"step-3": "#TODO: allow workers to pull this from cache\n\nRABBITMQ_IP = '172.23.105.82'\nOBJECT_CACHE_IP = \"172.23.105.69\"\nOBJECT_CACHE_PORT = \"11911\"\nSERIESLY_IP = ''\nCOUCHBASE_IP = '172.23.105.54'\nCOUCHBASE_PORT = '8091'\nCOUCHBASE_USER = \"Administrator\"\nCOUCHBASE_PWD = \"password\"\nSSH_USER = \"root\"\nSSH_PASSWORD = \"password\"\nWORKERS = ['127.0.0.1']\nWORKER_CONFIGS = [\"all\"]\nCB_CLUSTER_TAG = \"default\"\n\nCLUSTER_IPS = [\"172.23.105.54\", \"172.23.105.57\", \"172.23.105.62\", \"172.23.105.55\"]\n\n# xdcr config\n\"\"\"\n\" pointer information to remote sites\n\" remote1 = name for remote site\n\" RABBITMQ_IP = broker managing remote site (can be same as local broker if using different vhosts)\n\" this should equal RABBITMQ_IP of remote site\n\" CB_CLUSTER_TAG = represents vhost watched by workers remote site.\n\" this should equal CB_CLUSTER_TAG of remote site\n\" COUCHBASE_IP/PORT = IP/PORT of a couchbase node in remote site\n\"\"\"\nREMOTE_SITES = {\"remote1\" : {\"RABBITMQ_IP\" : \"172.23.105.99\",\n \"CB_CLUSTER_TAG\" : \"default\",\n \"COUCHBASE_IP\" : \"172.23.105.58\",\n \"COUCHBASE_PORT\" : \"8091\"}}\n\nLOGDIR=\"logs\" # relative to current dir\n\n\n#Backup Config\nENABLE_BACKUPS = False\nBACKUP_DIR = \"/tmp/backup\"\nBACKUP_NODE_IP = \"127.0.0.1\"\nBACKUP_NODE_SSH_USER = \"root\"\nBACKUP_NODE_SSH_PWD = \"password\"\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2010-2014 Elico Corp. All Rights Reserved.
# Alex Duan <alex.duan@elico-corp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
import openerp.addons.decimal_precision as dp
class product_product(orm.Model):
_inherit = 'product.product'
def get_kits_product_available(self, cr, uid, ids, context=None):
pass
def _kits_product_available(self, cr, uid, ids, field_names=None, arg=False, context=None):
res = {}
field_names = field_names or []
context = context or {}
for id in ids:
res[id] = {}.fromkeys(field_names, 0.0)
field_map = {
'kits_qty_available': 'qty_available',
'kits_incoming_qty': 'incoming_qty',
'kits_outgoing_qty': 'outgoing_qty',
'kits_virtual_available': 'virtual_available'
}
for product_record in self.browse(cr, uid, ids, context=context):
#check if is a kit product.
so_qty = self._get_sale_quotation_qty(cr, uid, product_record.id, context=context)
if not self._is_kit(
cr, uid,
[product_record.id],
context=context).get(product_record.id):
res[product_record.id] = {
'kits_qty_available': 0,
'kits_incoming_qty': 0,
'kits_virtual_available': 0,
'kits_outgoing_qty': 0,
'kits_sale_quotation_qty': so_qty
}
#product with no bom
# if not product_record.bom_ids:
# raw_res = self._product_available(cr, uid, [product_record.id], field_map.values(), arg, context)
# for key, val in field_map.items():
# res[product_record.id][key] = raw_res[product_record.id].get(val)
#TODO how to deal with multi-bom products.
#now get always get the first bom.
#product with bom
else:
for bom in product_record.bom_ids:
#bom type is phantom
#TODO take care of the valid date of the components
if bom.type == 'phantom':
child_product_res = {}
for line in bom.bom_lines:
child_product_res[line.product_id.id] = {'product_qty': line.product_qty or 0.0}
child_product_qtys = self._product_available(cr, uid, child_product_res.keys(), field_map.values(), context=context)
res[product_record.id] = {
'kits_qty_available': self._get_qty_from_children(child_product_qtys, child_product_res, 'qty_available'),
'kits_incoming_qty': self._get_qty_from_children(child_product_qtys, child_product_res, 'incoming_qty'),
'kits_virtual_available': self._get_qty_from_children(child_product_qtys, child_product_res, 'virtual_available') - so_qty,
'kits_outgoing_qty': self._get_qty_from_children(child_product_qtys, child_product_res, 'outgoing_qty'),
'kits_sale_quotation_qty': so_qty
}
else:
raw_res = self._product_available(cr, uid, ids, field_map.values(), arg, context)
for key, val in field_map.items():
res[product_record.id][key] = raw_res[product_record.id].get(val)
#only get the first bom.
break
return res
def _get_sale_quotation_qty(self, cr, uid, product_id, context=None):
'''get all qty of the product in all sale quotations (draft, sent)'''
sol_obj = self.pool.get('sale.order.line')
domain = [('state', 'in', ('draft', False, None)), ('product_id', '=', product_id)]
#TODO take care of the uom.
sol_ids = sol_obj.read_group(cr, uid, domain, ['product_uom_qty', 'product_id'], groupby=['product_id'])
return sol_ids and sol_ids[0].get('product_uom_qty') or 0.0
def _get_qty_from_children(self, child_product_qtys, child_product_res, field_name):
def qty_div(product_total_qty, component_qty):
return product_total_qty[1].get(field_name) / component_qty[1].get('product_qty')
# import pdb
# pdb.set_trace()
return min(map(qty_div, child_product_qtys.iteritems(), child_product_res.iteritems()))
def _is_kit(self, cr, uid, ids, fields=None, args=False, context=None):
'''see if this product is Kit or not'''
res = {}
for product_record in self.browse(cr, uid, ids, context=context):
res[product_record.id] = False
if product_record.bom_ids:
for bom in product_record.bom_ids:
if bom.type == 'phantom':
res[product_record.id] = True
return res
def _get_product_from_bom(self, cr, uid, ids, context=None):
res = {}
bom_ids = self.pool.get('mrp.bom').browse(cr, uid, ids, context=context)
for bom in bom_ids:
res[bom.product_id.id] = True
return res.keys()
_columns = {
'is_kit': fields.function(
_is_kit,
readonly=True,
type='boolean',
string='Is Kit',
store={
'mrp.bom': (_get_product_from_bom, ['type'], 10)
}),
'kits_qty_available': fields.function(
_kits_product_available,
multi='kits_qty_available',
type='float',
digits_compute=dp.get_precision('Product Unit of Measure'),
string='Quantity On Hand (Kits)',
help=""),
'kits_incoming_qty': fields.function(
_kits_product_available,
multi='kits_qty_available',
type='float',
digits_compute=dp.get_precision('Product Unit of Measure'),
string='Incoming (Kits)',
help=""),
'kits_outgoing_qty': fields.function(
_kits_product_available,
multi='kits_qty_available',
type='float',
digits_compute=dp.get_precision('Product Unit of Measure'),
string='Outgoing (Kits)',
help=""),
'kits_sale_quotation_qty': fields.function(
_kits_product_available,
multi='kits_qty_available',
type='float',
digits_compute=dp.get_precision('Product Unit of Measure'),
string='Sales Quotation Allocated',
help=""),
'kits_virtual_available': fields.function(
_kits_product_available,
multi='kits_qty_available',
type='float',
digits_compute=dp.get_precision('Product Unit of Measure'),
string='Forecasted Quantity (Kits)',
help=""),
}
|
normal
|
{
"blob_id": "19d86c64876575ed9b3f5e33dd44e7633c96e696",
"index": 2401,
"step-1": "<mask token>\n\n\nclass product_product(orm.Model):\n <mask token>\n <mask token>\n <mask token>\n\n def _get_sale_quotation_qty(self, cr, uid, product_id, context=None):\n \"\"\"get all qty of the product in all sale quotations (draft, sent)\"\"\"\n sol_obj = self.pool.get('sale.order.line')\n domain = [('state', 'in', ('draft', False, None)), ('product_id',\n '=', product_id)]\n sol_ids = sol_obj.read_group(cr, uid, domain, ['product_uom_qty',\n 'product_id'], groupby=['product_id'])\n return sol_ids and sol_ids[0].get('product_uom_qty') or 0.0\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass product_product(orm.Model):\n <mask token>\n\n def get_kits_product_available(self, cr, uid, ids, context=None):\n pass\n\n def _kits_product_available(self, cr, uid, ids, field_names=None, arg=\n False, context=None):\n res = {}\n field_names = field_names or []\n context = context or {}\n for id in ids:\n res[id] = {}.fromkeys(field_names, 0.0)\n field_map = {'kits_qty_available': 'qty_available',\n 'kits_incoming_qty': 'incoming_qty', 'kits_outgoing_qty':\n 'outgoing_qty', 'kits_virtual_available': 'virtual_available'}\n for product_record in self.browse(cr, uid, ids, context=context):\n so_qty = self._get_sale_quotation_qty(cr, uid, product_record.\n id, context=context)\n if not self._is_kit(cr, uid, [product_record.id], context=context\n ).get(product_record.id):\n res[product_record.id] = {'kits_qty_available': 0,\n 'kits_incoming_qty': 0, 'kits_virtual_available': 0,\n 'kits_outgoing_qty': 0, 'kits_sale_quotation_qty': so_qty}\n else:\n for bom in product_record.bom_ids:\n if bom.type == 'phantom':\n child_product_res = {}\n for line in bom.bom_lines:\n child_product_res[line.product_id.id] = {\n 'product_qty': line.product_qty or 0.0}\n child_product_qtys = self._product_available(cr,\n uid, child_product_res.keys(), field_map.values\n (), context=context)\n res[product_record.id] = {'kits_qty_available':\n self._get_qty_from_children(child_product_qtys,\n child_product_res, 'qty_available'),\n 'kits_incoming_qty': self.\n _get_qty_from_children(child_product_qtys,\n child_product_res, 'incoming_qty'),\n 'kits_virtual_available': self.\n _get_qty_from_children(child_product_qtys,\n child_product_res, 'virtual_available') -\n so_qty, 'kits_outgoing_qty': self.\n _get_qty_from_children(child_product_qtys,\n child_product_res, 'outgoing_qty'),\n 'kits_sale_quotation_qty': so_qty}\n else:\n raw_res = self._product_available(cr, uid, ids,\n field_map.values(), arg, context)\n for key, val in field_map.items():\n res[product_record.id][key] = raw_res[\n product_record.id].get(val)\n break\n return res\n\n def _get_sale_quotation_qty(self, cr, uid, product_id, context=None):\n \"\"\"get all qty of the product in all sale quotations (draft, sent)\"\"\"\n sol_obj = self.pool.get('sale.order.line')\n domain = [('state', 'in', ('draft', False, None)), ('product_id',\n '=', product_id)]\n sol_ids = sol_obj.read_group(cr, uid, domain, ['product_uom_qty',\n 'product_id'], groupby=['product_id'])\n return sol_ids and sol_ids[0].get('product_uom_qty') or 0.0\n <mask token>\n\n def _is_kit(self, cr, uid, ids, fields=None, args=False, context=None):\n \"\"\"see if this product is Kit or not\"\"\"\n res = {}\n for product_record in self.browse(cr, uid, ids, context=context):\n res[product_record.id] = False\n if product_record.bom_ids:\n for bom in product_record.bom_ids:\n if bom.type == 'phantom':\n res[product_record.id] = True\n return res\n\n def _get_product_from_bom(self, cr, uid, ids, context=None):\n res = {}\n bom_ids = self.pool.get('mrp.bom').browse(cr, uid, ids, context=context\n )\n for bom in bom_ids:\n res[bom.product_id.id] = True\n return res.keys()\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass product_product(orm.Model):\n _inherit = 'product.product'\n\n def get_kits_product_available(self, cr, uid, ids, context=None):\n pass\n\n def _kits_product_available(self, cr, uid, ids, field_names=None, arg=\n False, context=None):\n res = {}\n field_names = field_names or []\n context = context or {}\n for id in ids:\n res[id] = {}.fromkeys(field_names, 0.0)\n field_map = {'kits_qty_available': 'qty_available',\n 'kits_incoming_qty': 'incoming_qty', 'kits_outgoing_qty':\n 'outgoing_qty', 'kits_virtual_available': 'virtual_available'}\n for product_record in self.browse(cr, uid, ids, context=context):\n so_qty = self._get_sale_quotation_qty(cr, uid, product_record.\n id, context=context)\n if not self._is_kit(cr, uid, [product_record.id], context=context\n ).get(product_record.id):\n res[product_record.id] = {'kits_qty_available': 0,\n 'kits_incoming_qty': 0, 'kits_virtual_available': 0,\n 'kits_outgoing_qty': 0, 'kits_sale_quotation_qty': so_qty}\n else:\n for bom in product_record.bom_ids:\n if bom.type == 'phantom':\n child_product_res = {}\n for line in bom.bom_lines:\n child_product_res[line.product_id.id] = {\n 'product_qty': line.product_qty or 0.0}\n child_product_qtys = self._product_available(cr,\n uid, child_product_res.keys(), field_map.values\n (), context=context)\n res[product_record.id] = {'kits_qty_available':\n self._get_qty_from_children(child_product_qtys,\n child_product_res, 'qty_available'),\n 'kits_incoming_qty': self.\n _get_qty_from_children(child_product_qtys,\n child_product_res, 'incoming_qty'),\n 'kits_virtual_available': self.\n _get_qty_from_children(child_product_qtys,\n child_product_res, 'virtual_available') -\n so_qty, 'kits_outgoing_qty': self.\n _get_qty_from_children(child_product_qtys,\n child_product_res, 'outgoing_qty'),\n 'kits_sale_quotation_qty': so_qty}\n else:\n raw_res = self._product_available(cr, uid, ids,\n field_map.values(), arg, context)\n for key, val in field_map.items():\n res[product_record.id][key] = raw_res[\n product_record.id].get(val)\n break\n return res\n\n def _get_sale_quotation_qty(self, cr, uid, product_id, context=None):\n \"\"\"get all qty of the product in all sale quotations (draft, sent)\"\"\"\n sol_obj = self.pool.get('sale.order.line')\n domain = [('state', 'in', ('draft', False, None)), ('product_id',\n '=', product_id)]\n sol_ids = sol_obj.read_group(cr, uid, domain, ['product_uom_qty',\n 'product_id'], groupby=['product_id'])\n return sol_ids and sol_ids[0].get('product_uom_qty') or 0.0\n\n def _get_qty_from_children(self, child_product_qtys, child_product_res,\n field_name):\n\n def qty_div(product_total_qty, component_qty):\n return product_total_qty[1].get(field_name) / component_qty[1].get(\n 'product_qty')\n return min(map(qty_div, child_product_qtys.iteritems(),\n child_product_res.iteritems()))\n\n def _is_kit(self, cr, uid, ids, fields=None, args=False, context=None):\n \"\"\"see if this product is Kit or not\"\"\"\n res = {}\n for product_record in self.browse(cr, uid, ids, context=context):\n res[product_record.id] = False\n if product_record.bom_ids:\n for bom in product_record.bom_ids:\n if bom.type == 'phantom':\n res[product_record.id] = True\n return res\n\n def _get_product_from_bom(self, cr, uid, ids, context=None):\n res = {}\n bom_ids = self.pool.get('mrp.bom').browse(cr, uid, ids, context=context\n )\n for bom in bom_ids:\n res[bom.product_id.id] = True\n return res.keys()\n _columns = {'is_kit': fields.function(_is_kit, readonly=True, type=\n 'boolean', string='Is Kit', store={'mrp.bom': (\n _get_product_from_bom, ['type'], 10)}), 'kits_qty_available':\n fields.function(_kits_product_available, multi='kits_qty_available',\n type='float', digits_compute=dp.get_precision(\n 'Product Unit of Measure'), string='Quantity On Hand (Kits)', help=\n ''), 'kits_incoming_qty': fields.function(_kits_product_available,\n multi='kits_qty_available', type='float', digits_compute=dp.\n get_precision('Product Unit of Measure'), string='Incoming (Kits)',\n help=''), 'kits_outgoing_qty': fields.function(\n _kits_product_available, multi='kits_qty_available', type='float',\n digits_compute=dp.get_precision('Product Unit of Measure'), string=\n 'Outgoing (Kits)', help=''), 'kits_sale_quotation_qty': fields.\n function(_kits_product_available, multi='kits_qty_available', type=\n 'float', digits_compute=dp.get_precision('Product Unit of Measure'),\n string='Sales Quotation Allocated', help=''),\n 'kits_virtual_available': fields.function(_kits_product_available,\n multi='kits_qty_available', type='float', digits_compute=dp.\n get_precision('Product Unit of Measure'), string=\n 'Forecasted Quantity (Kits)', help='')}\n",
"step-4": "from openerp.osv import orm, fields\nimport openerp.addons.decimal_precision as dp\n\n\nclass product_product(orm.Model):\n _inherit = 'product.product'\n\n def get_kits_product_available(self, cr, uid, ids, context=None):\n pass\n\n def _kits_product_available(self, cr, uid, ids, field_names=None, arg=\n False, context=None):\n res = {}\n field_names = field_names or []\n context = context or {}\n for id in ids:\n res[id] = {}.fromkeys(field_names, 0.0)\n field_map = {'kits_qty_available': 'qty_available',\n 'kits_incoming_qty': 'incoming_qty', 'kits_outgoing_qty':\n 'outgoing_qty', 'kits_virtual_available': 'virtual_available'}\n for product_record in self.browse(cr, uid, ids, context=context):\n so_qty = self._get_sale_quotation_qty(cr, uid, product_record.\n id, context=context)\n if not self._is_kit(cr, uid, [product_record.id], context=context\n ).get(product_record.id):\n res[product_record.id] = {'kits_qty_available': 0,\n 'kits_incoming_qty': 0, 'kits_virtual_available': 0,\n 'kits_outgoing_qty': 0, 'kits_sale_quotation_qty': so_qty}\n else:\n for bom in product_record.bom_ids:\n if bom.type == 'phantom':\n child_product_res = {}\n for line in bom.bom_lines:\n child_product_res[line.product_id.id] = {\n 'product_qty': line.product_qty or 0.0}\n child_product_qtys = self._product_available(cr,\n uid, child_product_res.keys(), field_map.values\n (), context=context)\n res[product_record.id] = {'kits_qty_available':\n self._get_qty_from_children(child_product_qtys,\n child_product_res, 'qty_available'),\n 'kits_incoming_qty': self.\n _get_qty_from_children(child_product_qtys,\n child_product_res, 'incoming_qty'),\n 'kits_virtual_available': self.\n _get_qty_from_children(child_product_qtys,\n child_product_res, 'virtual_available') -\n so_qty, 'kits_outgoing_qty': self.\n _get_qty_from_children(child_product_qtys,\n child_product_res, 'outgoing_qty'),\n 'kits_sale_quotation_qty': so_qty}\n else:\n raw_res = self._product_available(cr, uid, ids,\n field_map.values(), arg, context)\n for key, val in field_map.items():\n res[product_record.id][key] = raw_res[\n product_record.id].get(val)\n break\n return res\n\n def _get_sale_quotation_qty(self, cr, uid, product_id, context=None):\n \"\"\"get all qty of the product in all sale quotations (draft, sent)\"\"\"\n sol_obj = self.pool.get('sale.order.line')\n domain = [('state', 'in', ('draft', False, None)), ('product_id',\n '=', product_id)]\n sol_ids = sol_obj.read_group(cr, uid, domain, ['product_uom_qty',\n 'product_id'], groupby=['product_id'])\n return sol_ids and sol_ids[0].get('product_uom_qty') or 0.0\n\n def _get_qty_from_children(self, child_product_qtys, child_product_res,\n field_name):\n\n def qty_div(product_total_qty, component_qty):\n return product_total_qty[1].get(field_name) / component_qty[1].get(\n 'product_qty')\n return min(map(qty_div, child_product_qtys.iteritems(),\n child_product_res.iteritems()))\n\n def _is_kit(self, cr, uid, ids, fields=None, args=False, context=None):\n \"\"\"see if this product is Kit or not\"\"\"\n res = {}\n for product_record in self.browse(cr, uid, ids, context=context):\n res[product_record.id] = False\n if product_record.bom_ids:\n for bom in product_record.bom_ids:\n if bom.type == 'phantom':\n res[product_record.id] = True\n return res\n\n def _get_product_from_bom(self, cr, uid, ids, context=None):\n res = {}\n bom_ids = self.pool.get('mrp.bom').browse(cr, uid, ids, context=context\n )\n for bom in bom_ids:\n res[bom.product_id.id] = True\n return res.keys()\n _columns = {'is_kit': fields.function(_is_kit, readonly=True, type=\n 'boolean', string='Is Kit', store={'mrp.bom': (\n _get_product_from_bom, ['type'], 10)}), 'kits_qty_available':\n fields.function(_kits_product_available, multi='kits_qty_available',\n type='float', digits_compute=dp.get_precision(\n 'Product Unit of Measure'), string='Quantity On Hand (Kits)', help=\n ''), 'kits_incoming_qty': fields.function(_kits_product_available,\n multi='kits_qty_available', type='float', digits_compute=dp.\n get_precision('Product Unit of Measure'), string='Incoming (Kits)',\n help=''), 'kits_outgoing_qty': fields.function(\n _kits_product_available, multi='kits_qty_available', type='float',\n digits_compute=dp.get_precision('Product Unit of Measure'), string=\n 'Outgoing (Kits)', help=''), 'kits_sale_quotation_qty': fields.\n function(_kits_product_available, multi='kits_qty_available', type=\n 'float', digits_compute=dp.get_precision('Product Unit of Measure'),\n string='Sales Quotation Allocated', help=''),\n 'kits_virtual_available': fields.function(_kits_product_available,\n multi='kits_qty_available', type='float', digits_compute=dp.\n get_precision('Product Unit of Measure'), string=\n 'Forecasted Quantity (Kits)', help='')}\n",
"step-5": "# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (c) 2010-2014 Elico Corp. All Rights Reserved.\n# Alex Duan <alex.duan@elico-corp.com>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n##############################################################################\nfrom openerp.osv import orm, fields\nimport openerp.addons.decimal_precision as dp\n\n\nclass product_product(orm.Model):\n _inherit = 'product.product'\n\n def get_kits_product_available(self, cr, uid, ids, context=None):\n pass\n\n def _kits_product_available(self, cr, uid, ids, field_names=None, arg=False, context=None):\n res = {}\n field_names = field_names or []\n context = context or {}\n for id in ids:\n res[id] = {}.fromkeys(field_names, 0.0)\n field_map = {\n 'kits_qty_available': 'qty_available',\n 'kits_incoming_qty': 'incoming_qty',\n 'kits_outgoing_qty': 'outgoing_qty',\n 'kits_virtual_available': 'virtual_available'\n }\n for product_record in self.browse(cr, uid, ids, context=context):\n #check if is a kit product.\n so_qty = self._get_sale_quotation_qty(cr, uid, product_record.id, context=context)\n if not self._is_kit(\n cr, uid,\n [product_record.id],\n context=context).get(product_record.id):\n\n res[product_record.id] = {\n 'kits_qty_available': 0,\n 'kits_incoming_qty': 0,\n 'kits_virtual_available': 0,\n 'kits_outgoing_qty': 0,\n 'kits_sale_quotation_qty': so_qty\n }\n #product with no bom\n # if not product_record.bom_ids:\n # raw_res = self._product_available(cr, uid, [product_record.id], field_map.values(), arg, context)\n # for key, val in field_map.items():\n # res[product_record.id][key] = raw_res[product_record.id].get(val)\n\n #TODO how to deal with multi-bom products.\n #now get always get the first bom.\n #product with bom\n else:\n for bom in product_record.bom_ids:\n #bom type is phantom\n #TODO take care of the valid date of the components\n if bom.type == 'phantom':\n child_product_res = {}\n for line in bom.bom_lines:\n child_product_res[line.product_id.id] = {'product_qty': line.product_qty or 0.0}\n child_product_qtys = self._product_available(cr, uid, child_product_res.keys(), field_map.values(), context=context)\n res[product_record.id] = {\n 'kits_qty_available': self._get_qty_from_children(child_product_qtys, child_product_res, 'qty_available'),\n 'kits_incoming_qty': self._get_qty_from_children(child_product_qtys, child_product_res, 'incoming_qty'),\n 'kits_virtual_available': self._get_qty_from_children(child_product_qtys, child_product_res, 'virtual_available') - so_qty,\n 'kits_outgoing_qty': self._get_qty_from_children(child_product_qtys, child_product_res, 'outgoing_qty'),\n 'kits_sale_quotation_qty': so_qty\n }\n\n else:\n raw_res = self._product_available(cr, uid, ids, field_map.values(), arg, context)\n for key, val in field_map.items():\n res[product_record.id][key] = raw_res[product_record.id].get(val)\n\n #only get the first bom.\n break\n return res\n\n def _get_sale_quotation_qty(self, cr, uid, product_id, context=None):\n '''get all qty of the product in all sale quotations (draft, sent)'''\n sol_obj = self.pool.get('sale.order.line')\n domain = [('state', 'in', ('draft', False, None)), ('product_id', '=', product_id)]\n #TODO take care of the uom.\n sol_ids = sol_obj.read_group(cr, uid, domain, ['product_uom_qty', 'product_id'], groupby=['product_id'])\n return sol_ids and sol_ids[0].get('product_uom_qty') or 0.0\n\n def _get_qty_from_children(self, child_product_qtys, child_product_res, field_name):\n def qty_div(product_total_qty, component_qty):\n return product_total_qty[1].get(field_name) / component_qty[1].get('product_qty')\n # import pdb\n # pdb.set_trace()\n return min(map(qty_div, child_product_qtys.iteritems(), child_product_res.iteritems()))\n\n def _is_kit(self, cr, uid, ids, fields=None, args=False, context=None):\n '''see if this product is Kit or not'''\n res = {}\n for product_record in self.browse(cr, uid, ids, context=context):\n res[product_record.id] = False\n if product_record.bom_ids:\n for bom in product_record.bom_ids:\n if bom.type == 'phantom':\n res[product_record.id] = True\n return res\n\n def _get_product_from_bom(self, cr, uid, ids, context=None):\n res = {}\n bom_ids = self.pool.get('mrp.bom').browse(cr, uid, ids, context=context)\n for bom in bom_ids:\n res[bom.product_id.id] = True\n return res.keys()\n\n _columns = {\n 'is_kit': fields.function(\n _is_kit,\n readonly=True,\n type='boolean',\n string='Is Kit',\n store={\n 'mrp.bom': (_get_product_from_bom, ['type'], 10)\n }),\n 'kits_qty_available': fields.function(\n _kits_product_available,\n multi='kits_qty_available',\n type='float',\n digits_compute=dp.get_precision('Product Unit of Measure'),\n string='Quantity On Hand (Kits)',\n help=\"\"),\n 'kits_incoming_qty': fields.function(\n _kits_product_available,\n multi='kits_qty_available',\n type='float',\n digits_compute=dp.get_precision('Product Unit of Measure'),\n string='Incoming (Kits)',\n help=\"\"),\n 'kits_outgoing_qty': fields.function(\n _kits_product_available,\n multi='kits_qty_available',\n type='float',\n digits_compute=dp.get_precision('Product Unit of Measure'),\n string='Outgoing (Kits)',\n help=\"\"),\n 'kits_sale_quotation_qty': fields.function(\n _kits_product_available,\n multi='kits_qty_available',\n type='float',\n digits_compute=dp.get_precision('Product Unit of Measure'),\n string='Sales Quotation Allocated',\n help=\"\"),\n 'kits_virtual_available': fields.function(\n _kits_product_available,\n multi='kits_qty_available',\n type='float',\n digits_compute=dp.get_precision('Product Unit of Measure'),\n string='Forecasted Quantity (Kits)',\n help=\"\"),\n }\n",
"step-ids": [
2,
6,
8,
9,
10
]
}
|
[
2,
6,
8,
9,
10
] |
from sqlitedict import SqliteDict
import sys
import socket
import urllib
import argparse
import zlib, pickle, sqlite3
import random
from datetime import datetime
import time
from urllib.parse import urlparse
import hashlib
import subprocess
import requests
from multiprocessing import Pool
def gz_encode(obj):
return sqlite3.Binary(zlib.compress(pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)))
def gz_decode(obj):
return pickle.loads(zlib.decompress(bytes(obj)))
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--dnscache', default="dnscache.sqld", help='IP address cache default: %(default)s')
parser.add_argument('--download', default="pages.sqld", help='Here is where the downloaded pages go: %(default)s')
parser.add_argument('--r404', default="404.sqld", help='Here is where we remember pages that gave 404 etc: %(default)s')
args = parser.parse_args()
#2) Results setup
result_store = SqliteDict(args.download, encode=gz_encode, decode=gz_decode, autocommit=True)
for url,cont in result_store.items():
print(url,cont[:30])
#3) 404 setup
r404 = SqliteDict(args.r404, autocommit=True)
for url,status in r404.items():
print(url,status)
|
normal
|
{
"blob_id": "295d6a66335491b406f47212064da9fd5fca6eb6",
"index": 6812,
"step-1": "<mask token>\n\n\ndef gz_decode(obj):\n return pickle.loads(zlib.decompress(bytes(obj)))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef gz_encode(obj):\n return sqlite3.Binary(zlib.compress(pickle.dumps(obj, pickle.\n HIGHEST_PROTOCOL)))\n\n\ndef gz_decode(obj):\n return pickle.loads(zlib.decompress(bytes(obj)))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef gz_encode(obj):\n return sqlite3.Binary(zlib.compress(pickle.dumps(obj, pickle.\n HIGHEST_PROTOCOL)))\n\n\ndef gz_decode(obj):\n return pickle.loads(zlib.decompress(bytes(obj)))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--dnscache', default='dnscache.sqld', help=\n 'IP address cache default: %(default)s')\n parser.add_argument('--download', default='pages.sqld', help=\n 'Here is where the downloaded pages go: %(default)s')\n parser.add_argument('--r404', default='404.sqld', help=\n 'Here is where we remember pages that gave 404 etc: %(default)s')\n args = parser.parse_args()\n result_store = SqliteDict(args.download, encode=gz_encode, decode=\n gz_decode, autocommit=True)\n for url, cont in result_store.items():\n print(url, cont[:30])\n r404 = SqliteDict(args.r404, autocommit=True)\n for url, status in r404.items():\n print(url, status)\n",
"step-4": "from sqlitedict import SqliteDict\nimport sys\nimport socket\nimport urllib\nimport argparse\nimport zlib, pickle, sqlite3\nimport random\nfrom datetime import datetime\nimport time\nfrom urllib.parse import urlparse\nimport hashlib\nimport subprocess\nimport requests\nfrom multiprocessing import Pool\n\n\ndef gz_encode(obj):\n return sqlite3.Binary(zlib.compress(pickle.dumps(obj, pickle.\n HIGHEST_PROTOCOL)))\n\n\ndef gz_decode(obj):\n return pickle.loads(zlib.decompress(bytes(obj)))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--dnscache', default='dnscache.sqld', help=\n 'IP address cache default: %(default)s')\n parser.add_argument('--download', default='pages.sqld', help=\n 'Here is where the downloaded pages go: %(default)s')\n parser.add_argument('--r404', default='404.sqld', help=\n 'Here is where we remember pages that gave 404 etc: %(default)s')\n args = parser.parse_args()\n result_store = SqliteDict(args.download, encode=gz_encode, decode=\n gz_decode, autocommit=True)\n for url, cont in result_store.items():\n print(url, cont[:30])\n r404 = SqliteDict(args.r404, autocommit=True)\n for url, status in r404.items():\n print(url, status)\n",
"step-5": "from sqlitedict import SqliteDict\nimport sys\nimport socket\nimport urllib\nimport argparse\nimport zlib, pickle, sqlite3\nimport random\nfrom datetime import datetime\nimport time\nfrom urllib.parse import urlparse\nimport hashlib\nimport subprocess\nimport requests\nfrom multiprocessing import Pool\n\ndef gz_encode(obj):\n return sqlite3.Binary(zlib.compress(pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)))\ndef gz_decode(obj):\n return pickle.loads(zlib.decompress(bytes(obj)))\n\n\nif __name__==\"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--dnscache', default=\"dnscache.sqld\", help='IP address cache default: %(default)s')\n parser.add_argument('--download', default=\"pages.sqld\", help='Here is where the downloaded pages go: %(default)s')\n parser.add_argument('--r404', default=\"404.sqld\", help='Here is where we remember pages that gave 404 etc: %(default)s')\n args = parser.parse_args()\n\n #2) Results setup\n result_store = SqliteDict(args.download, encode=gz_encode, decode=gz_decode, autocommit=True)\n\n for url,cont in result_store.items():\n print(url,cont[:30])\n \n #3) 404 setup\n r404 = SqliteDict(args.r404, autocommit=True)\n for url,status in r404.items():\n print(url,status)\n \n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def Ret_Sister_Same_Rank(string, nodes_file, names_file):
print(string)
interest_taxid = Str_To_Taxid(string, names_file)
print(interest_taxid)
up_taxid = Return_Parent(interest_taxid, nodes_file)
up_taxid = up_taxid.strip()
interest_taxid = interest_taxid.strip()
sis_self_tuples = Taxid_To_Children(up_taxid, nodes_file)
sister_and_self = []
for tup in sis_self_tuples:
sister_and_self.append(tup[0])
print(sister_and_self)
print(interest_taxid)
sister_and_self.remove(interest_taxid)
sisterlist = sister_and_self
print(sisterlist)
return sisterlist
<|reserved_special_token_0|>
def write_spc_list(spc_list, spcname):
with open(spcname, 'w') as spc_list_file:
for item in spc_list:
if '_' in item:
dash_sep = item.split('_')
item = dash_sep[0] + '_' + dash_sep[1]
spc_list_file.write(item + '\n')
return spcname
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def Str_To_Taxid(string, names_file):
found = False
string = string.replace('_', ' ')
with open(names_file) as names:
for line in names:
if '\t' + string + '\t' in line:
taxid_int = re.sub('(\\d*)(\t\\|\t)(' + string +
')(\t)(.*)', '\\1', line)
found = True
break
if found is False:
print('Error finding string: ' + string + ' in file: ' + names_file)
taxid_int = 'NA'
return taxid_int
<|reserved_special_token_0|>
def Get_Taxid_Rank(taxid, nodes_file):
taxid = taxid.strip()
ranklist = []
len_tax = len(taxid)
len_tax_t = len_tax + 1
with open(nodes_file) as nodes:
for line in nodes:
if line[:len_tax_t] == taxid + '\t':
apparent_rank = re.sub('(' + taxid +
')(\t\\|\t)(\\d*)(\t\\|\t)([a-z]*)(.*)', '\\5', line)
apparent_rank = apparent_rank.strip()
if '\t' in apparent_rank:
pass
else:
return apparent_rank
return 'NA'
<|reserved_special_token_0|>
def Return_Parent(taxid, nodes_file):
len_tax = len(taxid.strip())
len_tax_t = len_tax + 1
with open(nodes_file) as nodes:
for line in nodes:
if line[:len_tax_t] == taxid.strip() + '\t':
parent_taxid = re.sub('(' + taxid.strip() +
')(\t\\|\t)(\\d*)(\t\\|\t)([a-z]*)(.*)', '\\3', line)
if '\t' in parent_taxid:
pass
else:
return parent_taxid
print('error finding parent taxa')
return 'NA'
def Ret_A_Valid_Species_Below_LESS_EFFICIENTLY(taxid, nodes_file,
names_file, acc_list):
children = []
list_ch_remove = []
child_list_a = []
child_list_atup = Taxid_To_Children(taxid, nodes_file)
done = False
saved_top_level = []
for itema in child_list_atup:
saved_top_level.append(itema)
maxi = len(saved_top_level)
atup = saved_top_level[0]
saved_top_level.remove(atup)
child_list_atup = [atup]
for item in child_list_atup:
child_list_a.append(item[0])
i = 1
while done is False:
for item in child_list_atup:
if item[1] == 'species':
children.append(item[0])
sis_spec_name = Taxid_To_Name(item[0], names_file)
if sis_spec_name[0].islower() is False:
in_blast = Check_Spec_Name_Acceptable_List(sis_spec_name,
acc_list)
if in_blast is True:
return sis_spec_name
list_ch_remove.append(item)
for rem in list_ch_remove:
child_list_atup.remove(rem)
child_list_a.remove(rem[0])
if child_list_a == []:
if i == maxi:
return 'NA'
done = True
else:
i += 1
list_ch_remove = []
atup = saved_top_level[0]
saved_top_level.remove(atup)
child_list_atup = [atup]
for item in child_list_atup:
child_list_a.append(item[0])
continue
list_ch_remove = []
child_list_b = []
child_list_c = []
for parent in child_list_a:
child_list_btup = Taxid_To_Children(parent, nodes_file)
for item in child_list_btup:
child_list_b.append(item[0])
if child_list_btup == []:
pass
else:
for bitem in child_list_btup:
child_list_c.append(bitem)
child_list_atup = child_list_c
child_list_a = []
for itup in child_list_atup:
child_list_a.append(itup[0])
return 'NA'
<|reserved_special_token_0|>
def Ret_All_Species_Below_Less_Efficiently(taxid, nodes_file):
children = []
list_ch_remove = []
child_list_a = []
child_list_atup = Taxid_To_Children(taxid, nodes_file)
for item in child_list_atup:
child_list_a.append(item[0])
done = False
while done is False:
for item in child_list_atup:
if item[1] == 'species':
children.append(item[0])
list_ch_remove.append(item)
for rem in list_ch_remove:
child_list_atup.remove(rem)
child_list_a.remove(rem[0])
if child_list_a == []:
done = True
list_ch_remove = []
child_list_b = []
child_list_c = []
for parent in child_list_a:
child_list_btup = Taxid_To_Children(parent, nodes_file)
for item in child_list_btup:
child_list_b.append(item[0])
if child_list_btup == []:
pass
else:
for bitem in child_list_btup:
child_list_c.append(bitem)
child_list_atup = child_list_c
child_list_a = []
for itup in child_list_atup:
child_list_a.append(itup[0])
return children
<|reserved_special_token_0|>
def Ret_Sister_Same_Rank(string, nodes_file, names_file):
print(string)
interest_taxid = Str_To_Taxid(string, names_file)
print(interest_taxid)
up_taxid = Return_Parent(interest_taxid, nodes_file)
up_taxid = up_taxid.strip()
interest_taxid = interest_taxid.strip()
sis_self_tuples = Taxid_To_Children(up_taxid, nodes_file)
sister_and_self = []
for tup in sis_self_tuples:
sister_and_self.append(tup[0])
print(sister_and_self)
print(interest_taxid)
sister_and_self.remove(interest_taxid)
sisterlist = sister_and_self
print(sisterlist)
return sisterlist
<|reserved_special_token_0|>
def Choose_One_OG_Seq(string, species_list, names_file, acc_list, nodes_file):
print('one og sequence choser initiating')
if '_' in string:
string = string.replace('_', ' ')
sislist = Ret_Sister_Same_Rank(string, nodes_file, names_file)
print('Sisterlist')
print(sislist)
if sislist == []:
go = True
else:
go = False
my_taxid = Str_To_Taxid(string, names_file)
while go is True:
parent_of_me_taxid = Return_Parent(my_taxid, nodes_file)
parent_of_me = Taxid_To_Name(parent_of_me_taxid, names_file)
sislist = Ret_Sister_Same_Rank(parent_of_me, nodes_file, names_file)
my_taxid = parent_of_me_taxid
if sislist == []:
pass
else:
go = False
for item in sislist:
test = Ret_A_Valid_Species_Below(item, nodes_file, names_file, acc_list
)
if test == 'NA':
pass
else:
print(test)
return test
while test == 'NA':
sislist = []
go = True
if my_taxid == 1:
break
while go is True:
parent_of_me_taxid = Return_Parent(my_taxid, nodes_file)
parent_of_me = Taxid_To_Name(parent_of_me_taxid, names_file)
sislist = Ret_Sister_Same_Rank(parent_of_me, nodes_file, names_file
)
my_taxid = parent_of_me_taxid
if sislist == []:
pass
else:
go = False
for item in sislist:
test = Ret_A_Valid_Species_Below(item, nodes_file, names_file,
acc_list)
if test != 'NA':
pass
else:
return test
return test
<|reserved_special_token_0|>
def Check_Spec_Name_Acceptable_List(ssp_name, acc_list):
if ssp_name in acc_list:
return True
else:
result = next((True for item in acc_list if ssp_name in item), False)
if result is True:
print('Err in match spec name - gen list: ' + ssp_name + ' ' + item
)
return result
<|reserved_special_token_0|>
def gen_acceptable_species_list(list_raw_gene_fastas, acc_name):
names_list_acc = []
numbers_list_acc = []
for raw in list_raw_gene_fastas:
raw.gen_species_lists()
raw_sl = raw.species_names
print(raw_sl[0])
for rawsp in raw_sl:
if rawsp in names_list_acc:
ind = names_list_acc.index(rawsp)
numbers_list_acc[ind] = numbers_list_acc[ind] + 1
else:
names_list_acc.append(rawsp)
numbers_list_acc.append(1)
cutoff_num = len(list_raw_gene_fastas) / 2
print(cutoff_num)
list_of_rem = []
index = 0
for n in numbers_list_acc:
if n > cutoff_num:
pass
else:
list_of_rem.append(names_list_acc[index])
index += 1
print(len(list_of_rem))
list_of_rem.sort(reverse=True)
for remove_me in list_of_rem:
names_list_acc.remove(remove_me)
a = write_acc_list(names_list_acc, acc_name)
return a
def write_acc_list(acc_list, acc_name):
with open(acc_name, 'w') as acc_list_file:
for item in acc_list:
acc_list_file.write(item + '\n')
return acc_name
def write_spc_list(spc_list, spcname):
with open(spcname, 'w') as spc_list_file:
for item in spc_list:
if '_' in item:
dash_sep = item.split('_')
item = dash_sep[0] + '_' + dash_sep[1]
spc_list_file.write(item + '\n')
return spcname
def Run_OG_LOSS_ON_CLUSTER(script_name, all_files, all_result_files):
os.system(ssh_inst + " 'mkdir Taxonomy'")
sb_script = script_name
print(all_files)
for item in all_files:
os.system('scp ' + item + ' ' + clus_head + 'Taxonomy')
os.system(ssh_inst + " 'cd ~/Taxonomy; sbatch " + sb_script + "'")
direct = os.getcwd()
exists = False
movehome = []
finished = 'start'
for i in all_result_files:
movehome.append(i)
while finished is not True:
for filename in movehome:
os.system('scp ' + clus_head + 'Taxonomy/' + filename + ' ' +
direct)
for item in all_result_files:
exists = os.path.isfile(item)
if exists is True:
if item in movehome:
movehome.remove(item)
finished = 'yes'
else:
finished = False
print('Tax not done yet. could not locate : ' + item +
'checking again in 5 minutes')
break
if finished == 'yes':
print('Should be done!')
finished = True
else:
time.sleep(600)
finished = 'yes'
print('Taxonomy parsing complete')
def Get_OG_LOSS_DATA(list_of_clades, projectname):
list_catfiles = []
list_of_lists_of_raw_blast_files = []
for item in list_of_clades:
catfile = item.cat_file
list_of_raw_blast_files = item.blast_raw
if catfile in list_catfiles:
pass
else:
list_catfiles.append(catfile)
list_of_lists_of_raw_blast_files.append(list_of_raw_blast_files)
cat_acc_dict = {}
for i in range(len(list_catfiles)):
item = list_catfiles[i]
list_raws = list_of_lists_of_raw_blast_files[i]
gsflist = item.split('.')
gsf_a = gsflist[0]
gsf_b = gsf_a.split('/')[-1]
acc_file = gsf_b + '_Acc_List.txt'
acc_exists = os.path.isfile(acc_file)
if acc_exists is True:
pass
else:
print(
'....initializing all_acceptables from gene_seq_query file: ' +
gsf_b + '. this should only happen once...')
acc_file = gen_acceptable_species_list(list_raws, acc_file)
cat_acc_dict[item] = acc_file
list_of_species_files = Gen_Species_File(list_of_clades, projectname)
list_to_tax_clades = []
for item in list_of_clades:
exists_result = os.path.isfile(item.result)
if exists_result is False:
list_to_tax_clades.append(item)
corr_file_name, results_list = Generate_Cat_File_OGLOSS(list_to_tax_clades,
cat_acc_dict, projectname)
n = len(list_to_tax_clades)
script_name = projectname + '_OGLScript.sh'
scriptfile = Generate_Script_File_OGLOSS(n, corr_file_name, script_name)
all_files = []
for item in cat_acc_dict.values():
all_files.append(item)
for item in list_of_species_files:
all_files.append(item)
all_files.append(scriptfile)
all_files.append(corr_file_name)
if len(results_list) is 0:
pass
else:
Run_OG_LOSS_ON_CLUSTER(scriptfile, all_files, results_list)
for item in list_of_clades:
results_file = item.result
loss_species = []
print(item.string_name)
with open(results_file) as res:
a = 0
for line in res:
if a == 0:
loss_species = line.strip()
loss_species = loss_species.split('~')
print('loss candidates')
if '' in loss_species:
loss_species.remove('')
if '\n' in loss_species:
loss_species.remove('\n')
item.loss_species_list = loss_species
print(loss_species)
if a == 1:
root_species = line.strip()
item.root_species = root_species
print('root: ' + root_species)
if a == 2:
print('time:')
print(line)
a += 1
item.species_list_plus_og_loss = []
for thing in item.species_list_original:
item.species_list_plus_og_loss.append(thing)
if loss_species == []:
pass
else:
for ls in loss_species:
item.species_list_plus_og_loss.append(ls)
if root_species == '':
pass
else:
item.species_list_plus_og_loss.append(root_species)
return results_list
<|reserved_special_token_0|>
def Generate_Script_File_OGLOSS(n, indexname, scriptname):
n = str(n)
a = """#!/bin/bash
#SBATCH -p sched_mit_g4nier
#SBATCH -t 2-00:00:00
#SBATCH -J Tax
#SBATCH --array=1-""" + n + """
. /etc/profile.d/modules.sh
module add engaging/openmpi/1.8.8
MY_ARRAY_ID=$SLURM_ARRAY_TASK_ID
THE_INDEX=""" + indexname + """
SPECIES_FILE=$( cat $THE_INDEX | grep "^$MY_ARRAY_ID " | awk '{print $2}' )
STRING_NAME=$( cat $THE_INDEX | grep "^$MY_ARRAY_ID " | awk '{print $3}' )
ACC_FILE=$( cat $THE_INDEX | grep "^$MY_ARRAY_ID " | awk '{print $4}' )
RESULT=$( cat $THE_INDEX | grep "^$MY_ARRAY_ID " | awk '{print $5}' )
echo $SPECIES_FILE
echo $STRING_NAME
echo $ACC_FILE
mpirun python Online_Taxon_Parse.py -s $SPECIES_FILE -g $STRING_NAME -b $ACC_FILE -n $RESULT
exit"""
with open(scriptname, 'w') as script:
script.write(a)
return scriptname
def Gen_Species_File(list_of_clades, projectname):
list_sp_files = []
for item in list_of_clades:
species_list = item.species_list_original
species_file_name = item.prefix + '_Species_List.txt'
species_list2 = []
for sl2 in species_list:
sl2 = sl2.strip('"')
species_list2.append(sl2)
spc_file = write_spc_list(species_list2, species_file_name)
item.species_file = species_file_name
list_sp_files.append(species_file_name)
item.result = item.prefix + '_OGL_Result.txt'
return list_sp_files
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def Str_To_Taxid(string, names_file):
found = False
string = string.replace('_', ' ')
with open(names_file) as names:
for line in names:
if '\t' + string + '\t' in line:
taxid_int = re.sub('(\\d*)(\t\\|\t)(' + string +
')(\t)(.*)', '\\1', line)
found = True
break
if found is False:
print('Error finding string: ' + string + ' in file: ' + names_file)
taxid_int = 'NA'
return taxid_int
<|reserved_special_token_0|>
def Get_Taxid_Rank(taxid, nodes_file):
taxid = taxid.strip()
ranklist = []
len_tax = len(taxid)
len_tax_t = len_tax + 1
with open(nodes_file) as nodes:
for line in nodes:
if line[:len_tax_t] == taxid + '\t':
apparent_rank = re.sub('(' + taxid +
')(\t\\|\t)(\\d*)(\t\\|\t)([a-z]*)(.*)', '\\5', line)
apparent_rank = apparent_rank.strip()
if '\t' in apparent_rank:
pass
else:
return apparent_rank
return 'NA'
<|reserved_special_token_0|>
def Return_Parent(taxid, nodes_file):
len_tax = len(taxid.strip())
len_tax_t = len_tax + 1
with open(nodes_file) as nodes:
for line in nodes:
if line[:len_tax_t] == taxid.strip() + '\t':
parent_taxid = re.sub('(' + taxid.strip() +
')(\t\\|\t)(\\d*)(\t\\|\t)([a-z]*)(.*)', '\\3', line)
if '\t' in parent_taxid:
pass
else:
return parent_taxid
print('error finding parent taxa')
return 'NA'
def Ret_A_Valid_Species_Below_LESS_EFFICIENTLY(taxid, nodes_file,
names_file, acc_list):
children = []
list_ch_remove = []
child_list_a = []
child_list_atup = Taxid_To_Children(taxid, nodes_file)
done = False
saved_top_level = []
for itema in child_list_atup:
saved_top_level.append(itema)
maxi = len(saved_top_level)
atup = saved_top_level[0]
saved_top_level.remove(atup)
child_list_atup = [atup]
for item in child_list_atup:
child_list_a.append(item[0])
i = 1
while done is False:
for item in child_list_atup:
if item[1] == 'species':
children.append(item[0])
sis_spec_name = Taxid_To_Name(item[0], names_file)
if sis_spec_name[0].islower() is False:
in_blast = Check_Spec_Name_Acceptable_List(sis_spec_name,
acc_list)
if in_blast is True:
return sis_spec_name
list_ch_remove.append(item)
for rem in list_ch_remove:
child_list_atup.remove(rem)
child_list_a.remove(rem[0])
if child_list_a == []:
if i == maxi:
return 'NA'
done = True
else:
i += 1
list_ch_remove = []
atup = saved_top_level[0]
saved_top_level.remove(atup)
child_list_atup = [atup]
for item in child_list_atup:
child_list_a.append(item[0])
continue
list_ch_remove = []
child_list_b = []
child_list_c = []
for parent in child_list_a:
child_list_btup = Taxid_To_Children(parent, nodes_file)
for item in child_list_btup:
child_list_b.append(item[0])
if child_list_btup == []:
pass
else:
for bitem in child_list_btup:
child_list_c.append(bitem)
child_list_atup = child_list_c
child_list_a = []
for itup in child_list_atup:
child_list_a.append(itup[0])
return 'NA'
<|reserved_special_token_0|>
def Ret_All_Species_Below_Less_Efficiently(taxid, nodes_file):
children = []
list_ch_remove = []
child_list_a = []
child_list_atup = Taxid_To_Children(taxid, nodes_file)
for item in child_list_atup:
child_list_a.append(item[0])
done = False
while done is False:
for item in child_list_atup:
if item[1] == 'species':
children.append(item[0])
list_ch_remove.append(item)
for rem in list_ch_remove:
child_list_atup.remove(rem)
child_list_a.remove(rem[0])
if child_list_a == []:
done = True
list_ch_remove = []
child_list_b = []
child_list_c = []
for parent in child_list_a:
child_list_btup = Taxid_To_Children(parent, nodes_file)
for item in child_list_btup:
child_list_b.append(item[0])
if child_list_btup == []:
pass
else:
for bitem in child_list_btup:
child_list_c.append(bitem)
child_list_atup = child_list_c
child_list_a = []
for itup in child_list_atup:
child_list_a.append(itup[0])
return children
def Ret_All_Groups_One_Rank_Below(taxid, nodes_file):
taxid = taxid.strip()
print('looking for taxid:' + str(taxid))
rank = Get_Taxid_Rank(taxid, nodes_file)
print(rank)
target_rank = One_Rank_Lower(rank)
if target_rank == 'NA':
return 'NA'
removal_ranks = (
'superkingdom kingdom phylum class order family genus species')
garbage, remove_string = removal_ranks.split(target_rank)
remove_rank_list = remove_string.split()
children = []
list_ch_remove = []
child_list_a = Taxid_To_Children(taxid, nodes_file)
done = False
while done is False:
for item in child_list_a:
if item[1] == target_rank:
children.append(item[0])
list_ch_remove.append(item)
if item[1] in remove_rank_list:
list_ch_remove.append(item)
for rem in list_ch_remove:
child_list_a.remove(rem)
if child_list_a == []:
done = True
list_ch_remove = []
child_list_b = []
child_list_c = []
for parent in child_list_a:
child_list_b = Taxid_To_Children(parent[0], nodes_file)
if child_list_b == []:
pass
else:
for bitem in child_list_b:
child_list_c.append(bitem)
child_list_a = child_list_c
return children
def Ret_Sister_Same_Rank(string, nodes_file, names_file):
print(string)
interest_taxid = Str_To_Taxid(string, names_file)
print(interest_taxid)
up_taxid = Return_Parent(interest_taxid, nodes_file)
up_taxid = up_taxid.strip()
interest_taxid = interest_taxid.strip()
sis_self_tuples = Taxid_To_Children(up_taxid, nodes_file)
sister_and_self = []
for tup in sis_self_tuples:
sister_and_self.append(tup[0])
print(sister_and_self)
print(interest_taxid)
sister_and_self.remove(interest_taxid)
sisterlist = sister_and_self
print(sisterlist)
return sisterlist
def Taxid_To_Name(taxid, names_file):
found = False
taxid = taxid.strip()
len_tax = len(taxid)
len_tax_t = len_tax + 1
with open(names_file) as names:
for line in names:
if line[:len_tax_t] == taxid + '\t':
name_wanted = re.sub(
'(\\d*)(\t\\|\t)([^\t]*)(\t\\|\t)(.*)(\t\\|\t)(scientific name)(.*)'
, '\\3', line)
if '\t' in name_wanted:
pass
else:
found = True
break
if found is False:
print('Error finding name for: ' + taxid + ' in file: ' + names_file)
name_wanted = 'NA'
if found is True:
name_wanted = name_wanted.strip()
return name_wanted
def Choose_One_OG_Seq(string, species_list, names_file, acc_list, nodes_file):
print('one og sequence choser initiating')
if '_' in string:
string = string.replace('_', ' ')
sislist = Ret_Sister_Same_Rank(string, nodes_file, names_file)
print('Sisterlist')
print(sislist)
if sislist == []:
go = True
else:
go = False
my_taxid = Str_To_Taxid(string, names_file)
while go is True:
parent_of_me_taxid = Return_Parent(my_taxid, nodes_file)
parent_of_me = Taxid_To_Name(parent_of_me_taxid, names_file)
sislist = Ret_Sister_Same_Rank(parent_of_me, nodes_file, names_file)
my_taxid = parent_of_me_taxid
if sislist == []:
pass
else:
go = False
for item in sislist:
test = Ret_A_Valid_Species_Below(item, nodes_file, names_file, acc_list
)
if test == 'NA':
pass
else:
print(test)
return test
while test == 'NA':
sislist = []
go = True
if my_taxid == 1:
break
while go is True:
parent_of_me_taxid = Return_Parent(my_taxid, nodes_file)
parent_of_me = Taxid_To_Name(parent_of_me_taxid, names_file)
sislist = Ret_Sister_Same_Rank(parent_of_me, nodes_file, names_file
)
my_taxid = parent_of_me_taxid
if sislist == []:
pass
else:
go = False
for item in sislist:
test = Ret_A_Valid_Species_Below(item, nodes_file, names_file,
acc_list)
if test != 'NA':
pass
else:
return test
return test
def Check_If_We_Have_A_Rep_Already(species_list, tid_list, rank):
print('Checking for reps... target rank is: ' + rank)
list_of_correct_rank = []
found = []
removal_ranks = (
'superkingdom kingdom phylum class order family genus species')
remove_string, garbage = removal_ranks.split(rank)
remove_rank_list = remove_string.split()
for species in species_list:
nid = Str_To_Taxid(species, names_file)
go = True
while go is True:
rp = Return_Parent(nid, nodes_file)
if rp == 'NA':
list_of_correct_rank.append(rp)
go = False
if rp.strip() == 1:
rp = 'NA'
list_of_correct_rank.append(rp)
go = False
par_rank = Get_Taxid_Rank(rp, nodes_file)
if par_rank == rank:
rp = rp.strip()
list_of_correct_rank.append(rp)
go = False
elif par_rank in remove_rank_list:
rp = 'NA'
list_of_correct_rank.append(rp)
go = False
else:
nid = rp
print(tid_list)
print(list_of_correct_rank)
for item in tid_list:
if item in list_of_correct_rank:
a = tid_list.index(item)
found.append(tid_list[a])
return found
<|reserved_special_token_0|>
def Check_Spec_Name_Acceptable_List(ssp_name, acc_list):
if ssp_name in acc_list:
return True
else:
result = next((True for item in acc_list if ssp_name in item), False)
if result is True:
print('Err in match spec name - gen list: ' + ssp_name + ' ' + item
)
return result
<|reserved_special_token_0|>
def gen_acceptable_species_list(list_raw_gene_fastas, acc_name):
names_list_acc = []
numbers_list_acc = []
for raw in list_raw_gene_fastas:
raw.gen_species_lists()
raw_sl = raw.species_names
print(raw_sl[0])
for rawsp in raw_sl:
if rawsp in names_list_acc:
ind = names_list_acc.index(rawsp)
numbers_list_acc[ind] = numbers_list_acc[ind] + 1
else:
names_list_acc.append(rawsp)
numbers_list_acc.append(1)
cutoff_num = len(list_raw_gene_fastas) / 2
print(cutoff_num)
list_of_rem = []
index = 0
for n in numbers_list_acc:
if n > cutoff_num:
pass
else:
list_of_rem.append(names_list_acc[index])
index += 1
print(len(list_of_rem))
list_of_rem.sort(reverse=True)
for remove_me in list_of_rem:
names_list_acc.remove(remove_me)
a = write_acc_list(names_list_acc, acc_name)
return a
def write_acc_list(acc_list, acc_name):
with open(acc_name, 'w') as acc_list_file:
for item in acc_list:
acc_list_file.write(item + '\n')
return acc_name
def write_spc_list(spc_list, spcname):
with open(spcname, 'w') as spc_list_file:
for item in spc_list:
if '_' in item:
dash_sep = item.split('_')
item = dash_sep[0] + '_' + dash_sep[1]
spc_list_file.write(item + '\n')
return spcname
def Run_OG_LOSS_ON_CLUSTER(script_name, all_files, all_result_files):
os.system(ssh_inst + " 'mkdir Taxonomy'")
sb_script = script_name
print(all_files)
for item in all_files:
os.system('scp ' + item + ' ' + clus_head + 'Taxonomy')
os.system(ssh_inst + " 'cd ~/Taxonomy; sbatch " + sb_script + "'")
direct = os.getcwd()
exists = False
movehome = []
finished = 'start'
for i in all_result_files:
movehome.append(i)
while finished is not True:
for filename in movehome:
os.system('scp ' + clus_head + 'Taxonomy/' + filename + ' ' +
direct)
for item in all_result_files:
exists = os.path.isfile(item)
if exists is True:
if item in movehome:
movehome.remove(item)
finished = 'yes'
else:
finished = False
print('Tax not done yet. could not locate : ' + item +
'checking again in 5 minutes')
break
if finished == 'yes':
print('Should be done!')
finished = True
else:
time.sleep(600)
finished = 'yes'
print('Taxonomy parsing complete')
def Get_OG_LOSS_DATA(list_of_clades, projectname):
list_catfiles = []
list_of_lists_of_raw_blast_files = []
for item in list_of_clades:
catfile = item.cat_file
list_of_raw_blast_files = item.blast_raw
if catfile in list_catfiles:
pass
else:
list_catfiles.append(catfile)
list_of_lists_of_raw_blast_files.append(list_of_raw_blast_files)
cat_acc_dict = {}
for i in range(len(list_catfiles)):
item = list_catfiles[i]
list_raws = list_of_lists_of_raw_blast_files[i]
gsflist = item.split('.')
gsf_a = gsflist[0]
gsf_b = gsf_a.split('/')[-1]
acc_file = gsf_b + '_Acc_List.txt'
acc_exists = os.path.isfile(acc_file)
if acc_exists is True:
pass
else:
print(
'....initializing all_acceptables from gene_seq_query file: ' +
gsf_b + '. this should only happen once...')
acc_file = gen_acceptable_species_list(list_raws, acc_file)
cat_acc_dict[item] = acc_file
list_of_species_files = Gen_Species_File(list_of_clades, projectname)
list_to_tax_clades = []
for item in list_of_clades:
exists_result = os.path.isfile(item.result)
if exists_result is False:
list_to_tax_clades.append(item)
corr_file_name, results_list = Generate_Cat_File_OGLOSS(list_to_tax_clades,
cat_acc_dict, projectname)
n = len(list_to_tax_clades)
script_name = projectname + '_OGLScript.sh'
scriptfile = Generate_Script_File_OGLOSS(n, corr_file_name, script_name)
all_files = []
for item in cat_acc_dict.values():
all_files.append(item)
for item in list_of_species_files:
all_files.append(item)
all_files.append(scriptfile)
all_files.append(corr_file_name)
if len(results_list) is 0:
pass
else:
Run_OG_LOSS_ON_CLUSTER(scriptfile, all_files, results_list)
for item in list_of_clades:
results_file = item.result
loss_species = []
print(item.string_name)
with open(results_file) as res:
a = 0
for line in res:
if a == 0:
loss_species = line.strip()
loss_species = loss_species.split('~')
print('loss candidates')
if '' in loss_species:
loss_species.remove('')
if '\n' in loss_species:
loss_species.remove('\n')
item.loss_species_list = loss_species
print(loss_species)
if a == 1:
root_species = line.strip()
item.root_species = root_species
print('root: ' + root_species)
if a == 2:
print('time:')
print(line)
a += 1
item.species_list_plus_og_loss = []
for thing in item.species_list_original:
item.species_list_plus_og_loss.append(thing)
if loss_species == []:
pass
else:
for ls in loss_species:
item.species_list_plus_og_loss.append(ls)
if root_species == '':
pass
else:
item.species_list_plus_og_loss.append(root_species)
return results_list
<|reserved_special_token_0|>
def Generate_Script_File_OGLOSS(n, indexname, scriptname):
n = str(n)
a = """#!/bin/bash
#SBATCH -p sched_mit_g4nier
#SBATCH -t 2-00:00:00
#SBATCH -J Tax
#SBATCH --array=1-""" + n + """
. /etc/profile.d/modules.sh
module add engaging/openmpi/1.8.8
MY_ARRAY_ID=$SLURM_ARRAY_TASK_ID
THE_INDEX=""" + indexname + """
SPECIES_FILE=$( cat $THE_INDEX | grep "^$MY_ARRAY_ID " | awk '{print $2}' )
STRING_NAME=$( cat $THE_INDEX | grep "^$MY_ARRAY_ID " | awk '{print $3}' )
ACC_FILE=$( cat $THE_INDEX | grep "^$MY_ARRAY_ID " | awk '{print $4}' )
RESULT=$( cat $THE_INDEX | grep "^$MY_ARRAY_ID " | awk '{print $5}' )
echo $SPECIES_FILE
echo $STRING_NAME
echo $ACC_FILE
mpirun python Online_Taxon_Parse.py -s $SPECIES_FILE -g $STRING_NAME -b $ACC_FILE -n $RESULT
exit"""
with open(scriptname, 'w') as script:
script.write(a)
return scriptname
def Gen_Species_File(list_of_clades, projectname):
list_sp_files = []
for item in list_of_clades:
species_list = item.species_list_original
species_file_name = item.prefix + '_Species_List.txt'
species_list2 = []
for sl2 in species_list:
sl2 = sl2.strip('"')
species_list2.append(sl2)
spc_file = write_spc_list(species_list2, species_file_name)
item.species_file = species_file_name
list_sp_files.append(species_file_name)
item.result = item.prefix + '_OGL_Result.txt'
return list_sp_files
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def Str_To_Taxid(string, names_file):
found = False
string = string.replace('_', ' ')
with open(names_file) as names:
for line in names:
if '\t' + string + '\t' in line:
taxid_int = re.sub('(\\d*)(\t\\|\t)(' + string +
')(\t)(.*)', '\\1', line)
found = True
break
if found is False:
print('Error finding string: ' + string + ' in file: ' + names_file)
taxid_int = 'NA'
return taxid_int
<|reserved_special_token_0|>
def Get_Taxid_Rank(taxid, nodes_file):
taxid = taxid.strip()
ranklist = []
len_tax = len(taxid)
len_tax_t = len_tax + 1
with open(nodes_file) as nodes:
for line in nodes:
if line[:len_tax_t] == taxid + '\t':
apparent_rank = re.sub('(' + taxid +
')(\t\\|\t)(\\d*)(\t\\|\t)([a-z]*)(.*)', '\\5', line)
apparent_rank = apparent_rank.strip()
if '\t' in apparent_rank:
pass
else:
return apparent_rank
return 'NA'
def One_Rank_Lower(rank):
print('looking one level lower than' + rank)
if rank == 'species':
print('is species!')
return 'NA'
ordered_str = (
'superkingdom kingdom phylum class order family genus species')
ordered_list = ordered_str.split()
if rank in ordered_list:
pass
elif rank == 'NA':
return 'NA'
else:
print(rank + ' is weird')
return 'NA'
current = ordered_list.index(rank)
lowindex = current + 1
one_lower = ordered_list[lowindex]
return one_lower
def Return_Parent(taxid, nodes_file):
len_tax = len(taxid.strip())
len_tax_t = len_tax + 1
with open(nodes_file) as nodes:
for line in nodes:
if line[:len_tax_t] == taxid.strip() + '\t':
parent_taxid = re.sub('(' + taxid.strip() +
')(\t\\|\t)(\\d*)(\t\\|\t)([a-z]*)(.*)', '\\3', line)
if '\t' in parent_taxid:
pass
else:
return parent_taxid
print('error finding parent taxa')
return 'NA'
def Ret_A_Valid_Species_Below_LESS_EFFICIENTLY(taxid, nodes_file,
names_file, acc_list):
children = []
list_ch_remove = []
child_list_a = []
child_list_atup = Taxid_To_Children(taxid, nodes_file)
done = False
saved_top_level = []
for itema in child_list_atup:
saved_top_level.append(itema)
maxi = len(saved_top_level)
atup = saved_top_level[0]
saved_top_level.remove(atup)
child_list_atup = [atup]
for item in child_list_atup:
child_list_a.append(item[0])
i = 1
while done is False:
for item in child_list_atup:
if item[1] == 'species':
children.append(item[0])
sis_spec_name = Taxid_To_Name(item[0], names_file)
if sis_spec_name[0].islower() is False:
in_blast = Check_Spec_Name_Acceptable_List(sis_spec_name,
acc_list)
if in_blast is True:
return sis_spec_name
list_ch_remove.append(item)
for rem in list_ch_remove:
child_list_atup.remove(rem)
child_list_a.remove(rem[0])
if child_list_a == []:
if i == maxi:
return 'NA'
done = True
else:
i += 1
list_ch_remove = []
atup = saved_top_level[0]
saved_top_level.remove(atup)
child_list_atup = [atup]
for item in child_list_atup:
child_list_a.append(item[0])
continue
list_ch_remove = []
child_list_b = []
child_list_c = []
for parent in child_list_a:
child_list_btup = Taxid_To_Children(parent, nodes_file)
for item in child_list_btup:
child_list_b.append(item[0])
if child_list_btup == []:
pass
else:
for bitem in child_list_btup:
child_list_c.append(bitem)
child_list_atup = child_list_c
child_list_a = []
for itup in child_list_atup:
child_list_a.append(itup[0])
return 'NA'
<|reserved_special_token_0|>
def Ret_All_Species_Below_Less_Efficiently(taxid, nodes_file):
children = []
list_ch_remove = []
child_list_a = []
child_list_atup = Taxid_To_Children(taxid, nodes_file)
for item in child_list_atup:
child_list_a.append(item[0])
done = False
while done is False:
for item in child_list_atup:
if item[1] == 'species':
children.append(item[0])
list_ch_remove.append(item)
for rem in list_ch_remove:
child_list_atup.remove(rem)
child_list_a.remove(rem[0])
if child_list_a == []:
done = True
list_ch_remove = []
child_list_b = []
child_list_c = []
for parent in child_list_a:
child_list_btup = Taxid_To_Children(parent, nodes_file)
for item in child_list_btup:
child_list_b.append(item[0])
if child_list_btup == []:
pass
else:
for bitem in child_list_btup:
child_list_c.append(bitem)
child_list_atup = child_list_c
child_list_a = []
for itup in child_list_atup:
child_list_a.append(itup[0])
return children
def Ret_All_Groups_One_Rank_Below(taxid, nodes_file):
taxid = taxid.strip()
print('looking for taxid:' + str(taxid))
rank = Get_Taxid_Rank(taxid, nodes_file)
print(rank)
target_rank = One_Rank_Lower(rank)
if target_rank == 'NA':
return 'NA'
removal_ranks = (
'superkingdom kingdom phylum class order family genus species')
garbage, remove_string = removal_ranks.split(target_rank)
remove_rank_list = remove_string.split()
children = []
list_ch_remove = []
child_list_a = Taxid_To_Children(taxid, nodes_file)
done = False
while done is False:
for item in child_list_a:
if item[1] == target_rank:
children.append(item[0])
list_ch_remove.append(item)
if item[1] in remove_rank_list:
list_ch_remove.append(item)
for rem in list_ch_remove:
child_list_a.remove(rem)
if child_list_a == []:
done = True
list_ch_remove = []
child_list_b = []
child_list_c = []
for parent in child_list_a:
child_list_b = Taxid_To_Children(parent[0], nodes_file)
if child_list_b == []:
pass
else:
for bitem in child_list_b:
child_list_c.append(bitem)
child_list_a = child_list_c
return children
def Ret_Sister_Same_Rank(string, nodes_file, names_file):
print(string)
interest_taxid = Str_To_Taxid(string, names_file)
print(interest_taxid)
up_taxid = Return_Parent(interest_taxid, nodes_file)
up_taxid = up_taxid.strip()
interest_taxid = interest_taxid.strip()
sis_self_tuples = Taxid_To_Children(up_taxid, nodes_file)
sister_and_self = []
for tup in sis_self_tuples:
sister_and_self.append(tup[0])
print(sister_and_self)
print(interest_taxid)
sister_and_self.remove(interest_taxid)
sisterlist = sister_and_self
print(sisterlist)
return sisterlist
def Taxid_To_Name(taxid, names_file):
found = False
taxid = taxid.strip()
len_tax = len(taxid)
len_tax_t = len_tax + 1
with open(names_file) as names:
for line in names:
if line[:len_tax_t] == taxid + '\t':
name_wanted = re.sub(
'(\\d*)(\t\\|\t)([^\t]*)(\t\\|\t)(.*)(\t\\|\t)(scientific name)(.*)'
, '\\3', line)
if '\t' in name_wanted:
pass
else:
found = True
break
if found is False:
print('Error finding name for: ' + taxid + ' in file: ' + names_file)
name_wanted = 'NA'
if found is True:
name_wanted = name_wanted.strip()
return name_wanted
def Choose_One_OG_Seq(string, species_list, names_file, acc_list, nodes_file):
print('one og sequence choser initiating')
if '_' in string:
string = string.replace('_', ' ')
sislist = Ret_Sister_Same_Rank(string, nodes_file, names_file)
print('Sisterlist')
print(sislist)
if sislist == []:
go = True
else:
go = False
my_taxid = Str_To_Taxid(string, names_file)
while go is True:
parent_of_me_taxid = Return_Parent(my_taxid, nodes_file)
parent_of_me = Taxid_To_Name(parent_of_me_taxid, names_file)
sislist = Ret_Sister_Same_Rank(parent_of_me, nodes_file, names_file)
my_taxid = parent_of_me_taxid
if sislist == []:
pass
else:
go = False
for item in sislist:
test = Ret_A_Valid_Species_Below(item, nodes_file, names_file, acc_list
)
if test == 'NA':
pass
else:
print(test)
return test
while test == 'NA':
sislist = []
go = True
if my_taxid == 1:
break
while go is True:
parent_of_me_taxid = Return_Parent(my_taxid, nodes_file)
parent_of_me = Taxid_To_Name(parent_of_me_taxid, names_file)
sislist = Ret_Sister_Same_Rank(parent_of_me, nodes_file, names_file
)
my_taxid = parent_of_me_taxid
if sislist == []:
pass
else:
go = False
for item in sislist:
test = Ret_A_Valid_Species_Below(item, nodes_file, names_file,
acc_list)
if test != 'NA':
pass
else:
return test
return test
def Check_If_We_Have_A_Rep_Already(species_list, tid_list, rank):
print('Checking for reps... target rank is: ' + rank)
list_of_correct_rank = []
found = []
removal_ranks = (
'superkingdom kingdom phylum class order family genus species')
remove_string, garbage = removal_ranks.split(rank)
remove_rank_list = remove_string.split()
for species in species_list:
nid = Str_To_Taxid(species, names_file)
go = True
while go is True:
rp = Return_Parent(nid, nodes_file)
if rp == 'NA':
list_of_correct_rank.append(rp)
go = False
if rp.strip() == 1:
rp = 'NA'
list_of_correct_rank.append(rp)
go = False
par_rank = Get_Taxid_Rank(rp, nodes_file)
if par_rank == rank:
rp = rp.strip()
list_of_correct_rank.append(rp)
go = False
elif par_rank in remove_rank_list:
rp = 'NA'
list_of_correct_rank.append(rp)
go = False
else:
nid = rp
print(tid_list)
print(list_of_correct_rank)
for item in tid_list:
if item in list_of_correct_rank:
a = tid_list.index(item)
found.append(tid_list[a])
return found
def Choose_Loss_Candidates(string, species_list, names_file, acc_list,
nodes_file):
print('loss search initiating')
if '_' in string:
print(string)
string = string.replace('_', ' ')
print(string)
taxid = Str_To_Taxid(string, names_file)
sub_taxids = Ret_All_Groups_One_Rank_Below(taxid, nodes_file)
if sub_taxids == 'NA':
print('Error getting loss candidates for string:' + string)
return []
subgroup_names = []
for item in sub_taxids:
subgroup_names.append(Taxid_To_Name(item, names_file))
b = Get_Taxid_Rank(taxid, nodes_file)
a = One_Rank_Lower(b)
found = Check_If_We_Have_A_Rep_Already(species_list, sub_taxids, a)
print('Representatives already exist for:')
found_names = []
for foundtid in found:
foundtid = foundtid.strip()
index1 = sub_taxids.index(foundtid)
found_names.append(subgroup_names.pop(index1))
del sub_taxids[index1]
print(found_names)
print('Looking for one representative from each of the following:')
print(subgroup_names)
loss_list = []
ite = 0
for item in sub_taxids:
test = Ret_A_Valid_Species_Below(item, nodes_file, names_file, acc_list
)
print(subgroup_names[ite] + ' : ' + test)
ite += 1
loss_list.append(test)
continue
print('Loss candidates will be added:')
na = 0
for item in loss_list:
if item == 'NA':
na += 1
while 'NA' in loss_list:
loss_list.remove('NA')
print(loss_list)
print('there were ' + str(na) + ' ' + a +
's that no suitable loss candidate was found for.')
return loss_list
def Check_Spec_Name_Acceptable_List(ssp_name, acc_list):
if ssp_name in acc_list:
return True
else:
result = next((True for item in acc_list if ssp_name in item), False)
if result is True:
print('Err in match spec name - gen list: ' + ssp_name + ' ' + item
)
return result
<|reserved_special_token_0|>
def gen_acceptable_species_list(list_raw_gene_fastas, acc_name):
names_list_acc = []
numbers_list_acc = []
for raw in list_raw_gene_fastas:
raw.gen_species_lists()
raw_sl = raw.species_names
print(raw_sl[0])
for rawsp in raw_sl:
if rawsp in names_list_acc:
ind = names_list_acc.index(rawsp)
numbers_list_acc[ind] = numbers_list_acc[ind] + 1
else:
names_list_acc.append(rawsp)
numbers_list_acc.append(1)
cutoff_num = len(list_raw_gene_fastas) / 2
print(cutoff_num)
list_of_rem = []
index = 0
for n in numbers_list_acc:
if n > cutoff_num:
pass
else:
list_of_rem.append(names_list_acc[index])
index += 1
print(len(list_of_rem))
list_of_rem.sort(reverse=True)
for remove_me in list_of_rem:
names_list_acc.remove(remove_me)
a = write_acc_list(names_list_acc, acc_name)
return a
def write_acc_list(acc_list, acc_name):
with open(acc_name, 'w') as acc_list_file:
for item in acc_list:
acc_list_file.write(item + '\n')
return acc_name
def write_spc_list(spc_list, spcname):
with open(spcname, 'w') as spc_list_file:
for item in spc_list:
if '_' in item:
dash_sep = item.split('_')
item = dash_sep[0] + '_' + dash_sep[1]
spc_list_file.write(item + '\n')
return spcname
def Run_OG_LOSS_ON_CLUSTER(script_name, all_files, all_result_files):
os.system(ssh_inst + " 'mkdir Taxonomy'")
sb_script = script_name
print(all_files)
for item in all_files:
os.system('scp ' + item + ' ' + clus_head + 'Taxonomy')
os.system(ssh_inst + " 'cd ~/Taxonomy; sbatch " + sb_script + "'")
direct = os.getcwd()
exists = False
movehome = []
finished = 'start'
for i in all_result_files:
movehome.append(i)
while finished is not True:
for filename in movehome:
os.system('scp ' + clus_head + 'Taxonomy/' + filename + ' ' +
direct)
for item in all_result_files:
exists = os.path.isfile(item)
if exists is True:
if item in movehome:
movehome.remove(item)
finished = 'yes'
else:
finished = False
print('Tax not done yet. could not locate : ' + item +
'checking again in 5 minutes')
break
if finished == 'yes':
print('Should be done!')
finished = True
else:
time.sleep(600)
finished = 'yes'
print('Taxonomy parsing complete')
def Get_OG_LOSS_DATA(list_of_clades, projectname):
list_catfiles = []
list_of_lists_of_raw_blast_files = []
for item in list_of_clades:
catfile = item.cat_file
list_of_raw_blast_files = item.blast_raw
if catfile in list_catfiles:
pass
else:
list_catfiles.append(catfile)
list_of_lists_of_raw_blast_files.append(list_of_raw_blast_files)
cat_acc_dict = {}
for i in range(len(list_catfiles)):
item = list_catfiles[i]
list_raws = list_of_lists_of_raw_blast_files[i]
gsflist = item.split('.')
gsf_a = gsflist[0]
gsf_b = gsf_a.split('/')[-1]
acc_file = gsf_b + '_Acc_List.txt'
acc_exists = os.path.isfile(acc_file)
if acc_exists is True:
pass
else:
print(
'....initializing all_acceptables from gene_seq_query file: ' +
gsf_b + '. this should only happen once...')
acc_file = gen_acceptable_species_list(list_raws, acc_file)
cat_acc_dict[item] = acc_file
list_of_species_files = Gen_Species_File(list_of_clades, projectname)
list_to_tax_clades = []
for item in list_of_clades:
exists_result = os.path.isfile(item.result)
if exists_result is False:
list_to_tax_clades.append(item)
corr_file_name, results_list = Generate_Cat_File_OGLOSS(list_to_tax_clades,
cat_acc_dict, projectname)
n = len(list_to_tax_clades)
script_name = projectname + '_OGLScript.sh'
scriptfile = Generate_Script_File_OGLOSS(n, corr_file_name, script_name)
all_files = []
for item in cat_acc_dict.values():
all_files.append(item)
for item in list_of_species_files:
all_files.append(item)
all_files.append(scriptfile)
all_files.append(corr_file_name)
if len(results_list) is 0:
pass
else:
Run_OG_LOSS_ON_CLUSTER(scriptfile, all_files, results_list)
for item in list_of_clades:
results_file = item.result
loss_species = []
print(item.string_name)
with open(results_file) as res:
a = 0
for line in res:
if a == 0:
loss_species = line.strip()
loss_species = loss_species.split('~')
print('loss candidates')
if '' in loss_species:
loss_species.remove('')
if '\n' in loss_species:
loss_species.remove('\n')
item.loss_species_list = loss_species
print(loss_species)
if a == 1:
root_species = line.strip()
item.root_species = root_species
print('root: ' + root_species)
if a == 2:
print('time:')
print(line)
a += 1
item.species_list_plus_og_loss = []
for thing in item.species_list_original:
item.species_list_plus_og_loss.append(thing)
if loss_species == []:
pass
else:
for ls in loss_species:
item.species_list_plus_og_loss.append(ls)
if root_species == '':
pass
else:
item.species_list_plus_og_loss.append(root_species)
return results_list
def Generate_Cat_File_OGLOSS(list_of_clades, cat_acc_dict, projectname):
corr_file_name = 'Corr_' + projectname + '.txt'
results_list = []
with open(corr_file_name, 'w') as corr:
for n in range(len(list_of_clades)):
corr.write(str(n + 1) + ' ' + list_of_clades[n].species_file +
' ' + list_of_clades[n].string_name + ' ' + cat_acc_dict[
list_of_clades[n].cat_file] + ' ' + list_of_clades[n].
result + '\n')
results_list.append(list_of_clades[n].result)
return corr_file_name, results_list
def Generate_Script_File_OGLOSS(n, indexname, scriptname):
n = str(n)
a = """#!/bin/bash
#SBATCH -p sched_mit_g4nier
#SBATCH -t 2-00:00:00
#SBATCH -J Tax
#SBATCH --array=1-""" + n + """
. /etc/profile.d/modules.sh
module add engaging/openmpi/1.8.8
MY_ARRAY_ID=$SLURM_ARRAY_TASK_ID
THE_INDEX=""" + indexname + """
SPECIES_FILE=$( cat $THE_INDEX | grep "^$MY_ARRAY_ID " | awk '{print $2}' )
STRING_NAME=$( cat $THE_INDEX | grep "^$MY_ARRAY_ID " | awk '{print $3}' )
ACC_FILE=$( cat $THE_INDEX | grep "^$MY_ARRAY_ID " | awk '{print $4}' )
RESULT=$( cat $THE_INDEX | grep "^$MY_ARRAY_ID " | awk '{print $5}' )
echo $SPECIES_FILE
echo $STRING_NAME
echo $ACC_FILE
mpirun python Online_Taxon_Parse.py -s $SPECIES_FILE -g $STRING_NAME -b $ACC_FILE -n $RESULT
exit"""
with open(scriptname, 'w') as script:
script.write(a)
return scriptname
def Gen_Species_File(list_of_clades, projectname):
list_sp_files = []
for item in list_of_clades:
species_list = item.species_list_original
species_file_name = item.prefix + '_Species_List.txt'
species_list2 = []
for sl2 in species_list:
sl2 = sl2.strip('"')
species_list2.append(sl2)
spc_file = write_spc_list(species_list2, species_file_name)
item.species_file = species_file_name
list_sp_files.append(species_file_name)
item.result = item.prefix + '_OGL_Result.txt'
return list_sp_files
<|reserved_special_token_1|>
# #!/usr/bin/python
# last edit abigailc@Actaeon on jan 27 2017
#pulling the taxonomy functions out of makespeciestree because I need to make them faster...
#insects is running for literally >20 hours.
names_file = "/Users/abigailc/Documents/Taxonomy_Stuff/taxdump/names.dmp"
nodes_file = "/Users/abigailc/Documents/Taxonomy_Stuff/taxdump/nodes.dmp"
######### PERSONAL_SETTINGS #########
ssh_inst = "ssh -l abigailc -i ~/.ssh/id_rsa eofe4.mit.edu"
clus_head = "abigailc@eofe4.mit.edu:/home/abigailc/"
Path_Blast = "/Users/abigailc/blast/"
import os
import re
import time
import sys
#from oxy_mods.Classes_DTL_Detector import Fasta
#BASIC OPERATIONS
def Str_To_Taxid(string, names_file):
#init done
#turns a string to its taxon id NCBI
#this is easier than expected. just open names.dmp and find the first hit. format:
found = False
#print("strtotaxid")
#print(string+" str to taxid")
string = string.replace("_", " ")
#print(string)
with open (names_file) as names:
for line in names:
if "\t"+string+"\t" in line:
#print("got:"+line)
taxid_int = re.sub ("(\d*)(\t\|\t)("+string+")(\t)(.*)", "\\1", line)
found = True
break
if found is False:
print("Error finding string: "+string+" in file: "+names_file)
taxid_int = "NA"
return taxid_int
def Taxid_To_Children(taxid, nodes_file):
#goes one level deeper. finds all taxids that list the given taxid as "parent", returns as a list
childlist = []
child_rank_list = []
with open (nodes_file) as nodes:
for line in nodes:
if "\t"+taxid+"\t" in line:
#print("gotcha")
#print(line)
#the thing matches, do the re.sub.
#includes the tab bc otherwise taxid 12 would match 12, 123, 12345355, etc
baby_taxid_rank = re.sub("(\d*)(\t\|\t)("+taxid+")(\t\|\t)([a-z]*)(.*)", "\\1~\\5", line)
if "\t" in baby_taxid_rank:
#this happens if the re.sub does not occur - eg if \ttaxid\t occured somewhere in the line other than where it should've.
pass
else:
baby_taxid, baby_rank = baby_taxid_rank.split("~")
#add to list of bbys
baby_taxid = baby_taxid.strip()
baby_rank = baby_rank.strip()
childlist.append(baby_taxid)
child_rank_list.append((baby_taxid, baby_rank))
return child_rank_list
def Get_Taxid_Rank(taxid, nodes_file):
taxid = taxid.strip()
ranklist = []
len_tax = len(taxid)
len_tax_t = len_tax+1
#given taxid = 100, len_tax = 3, len_tax_t = 5
with open (nodes_file) as nodes:
for line in nodes:
#print(line[:len_tax_t])
#print(taxid+"\t")
if line[:len_tax_t] == taxid+"\t":
#the thing matches, do the re.sub.
#includes the tab bc otherwise taxid 12 would match 12, 123, 12345355, etc
apparent_rank = re.sub("("+taxid+")(\t\|\t)(\d*)(\t\|\t)([a-z]*)(.*)", "\\5", line)
apparent_rank = apparent_rank.strip()
if "\t" in apparent_rank:
pass
else:
return apparent_rank
return "NA"
#returns the rank (eg, "order" of a taxid")
def One_Rank_Lower(rank):
print("looking one level lower than"+rank)
if rank == "species":
print("is species!")
return "NA"
ordered_str = "superkingdom kingdom phylum class order family genus species"
ordered_list = ordered_str.split()
if rank in ordered_list:
pass
elif rank == "NA":
return "NA"
else:
print(rank+" is weird")
return "NA"
current = ordered_list.index(rank)
lowindex = current + 1
one_lower = ordered_list[lowindex]
return one_lower
#given phylum, returns class. given class, return order. etc.
# rank = "class"
# string = "cyanobacteria"
# taxid = "12345"
def Return_Parent(taxid, nodes_file):
#eg for a given rank taxid, find it's up-one-level (not rank) taxid, and return it.
len_tax = len(taxid.strip())
len_tax_t = len_tax+1
#given taxid = 100, len_tax = 3, len_tax_t = 5
#print("searching for one level above taxid:"+str(taxid))
#print("tiud: "+taxid)
with open (nodes_file) as nodes:
for line in nodes:
#print(taxid.strip()+"\t")
#print(line[:len_tax_t])
if line[:len_tax_t] == taxid.strip()+"\t":
# print("got: "+line)
#the thing matches, do the re.sub.
#includes the tab bc otherwise taxid 12 would match 12, 123, 12345355, etc
parent_taxid = re.sub("("+taxid.strip()+")(\t\|\t)(\d*)(\t\|\t)([a-z]*)(.*)", "\\3", line)
#print(parent_taxid)
if "\t" in parent_taxid:
pass
else:
return parent_taxid
print("error finding parent taxa")
return("NA")
#COMPLEX OPERATIONS
def Ret_A_Valid_Species_Below_LESS_EFFICIENTLY(taxid, nodes_file, names_file, acc_list):
children = []
list_ch_remove = []
child_list_a = []
#this is a list of children : [ [child_taxid, child_rank], [child2_taxid, child2_rank] ]
child_list_atup = Taxid_To_Children(taxid, nodes_file)
#this is a list of children TAXIDS ONLY
#print("initial pass")
#print(child_list_atup)
#print(child_list_a)
done = False
saved_top_level = []
#we're going to do one at a time, so save all, and load them one-by-one.
for itema in child_list_atup:
saved_top_level.append(itema)
maxi = len(saved_top_level)
# print("maxi: "+str(maxi))
atup = saved_top_level[0]
saved_top_level.remove(atup)
child_list_atup = [atup]
for item in child_list_atup:
child_list_a.append(item[0])
i = 1
#also lets implement a saved second level... for further spe.
while done is False:
for item in child_list_atup:
if item[1] == "species":
#add the taxid to the list of species_level_children
children.append(item[0])
sis_spec_name = Taxid_To_Name(item[0], names_file)
if sis_spec_name[0].islower() is False:
in_blast = Check_Spec_Name_Acceptable_List(sis_spec_name, acc_list)
if in_blast is True:
return sis_spec_name
list_ch_remove.append(item)
#remove taxids that were saved at the species level
#print(list_ch_remove)
for rem in list_ch_remove:
child_list_atup.remove(rem)
child_list_a.remove(rem[0])
#if all tips have terminated at the species level: you are done.
if child_list_a == []:
if i == maxi:
#print("found none")
return "NA"
done = True
else:
i += 1
#print(i)
list_ch_remove = []
atup = saved_top_level[0]
#print(atup)
saved_top_level.remove(atup)
child_list_atup = [atup]
#print(child_list_atup)
for item in child_list_atup:
child_list_a.append(item[0])
continue
list_ch_remove = []
child_list_b = []
child_list_c = []
for parent in child_list_a:
child_list_btup = Taxid_To_Children(parent, nodes_file)
for item in child_list_btup:
child_list_b.append(item[0])
if child_list_btup == []:
pass
else:
for bitem in child_list_btup:
child_list_c.append(bitem)
child_list_atup = child_list_c
#print("New parent list:")
#print(child_list_atup)
child_list_a = []
for itup in child_list_atup:
child_list_a.append(itup[0])
#print(child_list_a)
#children is a list of all species-level TAXIDS that belong to the given group.
return "NA"
#WHY ARE THERE TWO OF THESE???????
def Ret_A_Valid_Species_Below(taxid, nodes_file, names_file, acc_list):
masterlist = []
#this is a list of children : [ [child_taxid, child_rank], [child2_taxid, child2_rank] ]
complete = False
masterlist.append([(taxid, "starter")])
while complete is False:
#print(masterlist)
if masterlist == []:
return("NA")
#now lookat is the last member of the last list in masterlist.
now_list = masterlist[-1]
if now_list == []:
while [] in masterlist:
masterlist.remove([])
if masterlist == []:
return("NA")
now_list = masterlist[-1]
#lookat first member of that list.
now_tup = now_list[0]
now_taxid, now_rank = now_tup[0], now_tup[1]
#see if its a species
if now_rank == "species":
now_list.remove(now_tup)
now_name = Taxid_To_Name(now_taxid, names_file)
if now_name[0].islower() is False:
in_blast = Check_Spec_Name_Acceptable_List(now_name,acc_list)
if in_blast is True:
#now_name is a species_name
return now_name
#check if now_tup is valid. if so, return.
else:
now_list.remove(now_tup)
#generate a new list - of the descendents of this one.
newlist = Taxid_To_Children(now_taxid, nodes_file)
#print(newlist)
if newlist == "NA":
pass
else:
#add it to masterlist.
masterlist.append(newlist)
return("Uh, what?")
def Ret_All_Species_Below_Less_Efficiently(taxid, nodes_file):
children = []
list_ch_remove = []
child_list_a = []
#this is a list of children : [ [child_taxid, child_rank], [child2_taxid, child2_rank] ]
child_list_atup = Taxid_To_Children(taxid, nodes_file)
#this is a list of children TAXIDS ONLY
for item in child_list_atup:
child_list_a.append(item[0])
#print("initial pass")
#print(child_list_atup)
#print(child_list_a)
done = False
while done is False:
for item in child_list_atup:
if item[1] == "species":
#add the taxid to the list of species_level_children
children.append(item[0])
list_ch_remove.append(item)
#remove taxids that were saved at the species level
for rem in list_ch_remove:
child_list_atup.remove(rem)
child_list_a.remove(rem[0])
#if all tips have terminated at the species level: you are done.
if child_list_a == []:
done = True
list_ch_remove = []
child_list_b = []
child_list_c = []
#for remaining non-species level taxids in lista:
# -get their children (listb)
# -add their children to a persistant list(listc)
# -then set lista(the list to check and remove species-level-entries) to be == listc.
for parent in child_list_a:
child_list_btup = Taxid_To_Children(parent, nodes_file)
for item in child_list_btup:
child_list_b.append(item[0])
if child_list_btup == []:
pass
else:
for bitem in child_list_btup:
child_list_c.append(bitem)
child_list_atup = child_list_c
#print("New parent list:")
#print(child_list_atup)
child_list_a = []
for itup in child_list_atup:
child_list_a.append(itup[0])
#print(child_list_a)
#children is a list of all species-level TAXIDS that belong to the given group.
return children
def Ret_All_Groups_One_Rank_Below(taxid, nodes_file):
taxid = taxid.strip()
print("looking for taxid:"+str(taxid))
rank = Get_Taxid_Rank(taxid, nodes_file)
print(rank)
#raise SystemExit
target_rank = One_Rank_Lower(rank)
if target_rank == "NA":
return("NA")
removal_ranks = "superkingdom kingdom phylum class order family genus species"
garbage, remove_string = removal_ranks.split(target_rank)
remove_rank_list = remove_string.split()
children = []
list_ch_remove = []
#print(remove_rank_list)
#this is a list of children : [ [child_taxid, child_rank], [child2_taxid, child2_rank] ]
child_list_a = Taxid_To_Children(taxid, nodes_file)
done = False
while done is False:
for item in child_list_a:
if item[1] == target_rank:
#add the taxid to the list of species_level_children
children.append(item[0])
list_ch_remove.append(item)
if item[1] in remove_rank_list:
list_ch_remove.append(item)
#remove taxids that were saved at the species level
for rem in list_ch_remove:
child_list_a.remove(rem)
#if all tips have terminated at the target species level: you are done.
if child_list_a == []:
done = True
list_ch_remove = []
child_list_b = []
child_list_c = []
#for remaining non-species level taxids in lista:
# -get their children (listb)
# -add their children to a persistant list(listc)
# -then set lista(the list to check and remove species-level-entries) to be == listc.
for parent in child_list_a:
child_list_b = Taxid_To_Children(parent[0], nodes_file)
if child_list_b == []:
pass
else:
for bitem in child_list_b:
child_list_c.append(bitem)
child_list_a = child_list_c
#print(child_list_a)
#children is a list of all ONE-RANK-BELOW level TAXIDS that belong to the given group.
return children
#runs until all children are found of one rank below. eg (CLASS -> [order1, order 2, order3, order 4)
#for checking loss candidates, i will want to 1) run this 2) run a species_level_children generation for each member of the output list. 3) chose one member of each of those output lists to go in the species tree. hopefully checking that we have data for the chosen species.
def Ret_Sister_Same_Rank(string, nodes_file, names_file):
#from str rank - get current taxid, go up one level, then get all descendents in a list, remove the current taxid, and return the resulting sister list
print(string)
interest_taxid = Str_To_Taxid(string, names_file)
print(interest_taxid)
up_taxid = Return_Parent(interest_taxid, nodes_file)
up_taxid = up_taxid.strip()
interest_taxid = interest_taxid.strip()
sis_self_tuples = Taxid_To_Children(up_taxid, nodes_file)
sister_and_self = []
for tup in sis_self_tuples:
sister_and_self.append(tup[0])
#sis_and_self is a list of TAXIDS ONLY
print(sister_and_self)
print(interest_taxid)
sister_and_self.remove(interest_taxid)
sisterlist = sister_and_self
print(sisterlist)
return sisterlist
#sisterlist will be a list of taxids for the sister clades to the current thing. by level, not by rank.
#todo = implement something to redo if sisterlist is empty.
def Taxid_To_Name(taxid, names_file):
#this needs to be the backwards version of Str to Taxid.
found = False
taxid = taxid.strip()
len_tax = len(taxid)
len_tax_t = len_tax+1
with open (names_file) as names:
for line in names:
if line[:len_tax_t] == taxid+"\t":
# print("got here")
name_wanted = re.sub ("(\d*)(\t\|\t)([^\t]*)(\t\|\t)(.*)(\t\|\t)(scientific name)(.*)", "\\3", line)
if "\t" in name_wanted:
pass
else:
found = True
break
if found is False:
print("Error finding name for: "+taxid+" in file: "+names_file)
name_wanted = "NA"
if found is True:
#print(name_wanted)
name_wanted = name_wanted.strip()
return name_wanted
def Choose_One_OG_Seq(string, species_list, names_file, acc_list, nodes_file):
print("one og sequence choser initiating")
if "_" in string:
string = string.replace("_", " ")
sislist = Ret_Sister_Same_Rank(string, nodes_file, names_file)
print("Sisterlist")
print(sislist)
if sislist == []:
go = True
else:
go = False
my_taxid = Str_To_Taxid(string, names_file)
while go is True:
parent_of_me_taxid = Return_Parent(my_taxid, nodes_file)
parent_of_me = Taxid_To_Name(parent_of_me_taxid, names_file)
sislist = Ret_Sister_Same_Rank(parent_of_me, nodes_file, names_file)
my_taxid = parent_of_me_taxid
if sislist == []:
pass
else:
go = False
for item in sislist:
#spec_sis_list = Ret_All_Species_Below(item, nodes_file)
test = Ret_A_Valid_Species_Below(item, nodes_file, names_file, acc_list)
if test == "NA":
pass
else:
print(test)
return test
#if test == "None":
# return "None"
#if nothing in the first level sister list is a valid hit, keep moving up the tree until you get one.
while test == "NA":
sislist = []
go = True
if my_taxid == 1:
break
while go is True:
parent_of_me_taxid = Return_Parent(my_taxid, nodes_file)
parent_of_me = Taxid_To_Name(parent_of_me_taxid, names_file)
sislist = Ret_Sister_Same_Rank(parent_of_me, nodes_file, names_file)
my_taxid = parent_of_me_taxid
if sislist == []:
pass
else:
go = False
for item in sislist:
test = Ret_A_Valid_Species_Below(item, nodes_file, names_file, acc_list)
if test != "NA":
pass
else:
return test
return test
#print (spec_sis_list)
#for sis_spec_taxid in spec_sis_list:
# sis_spec_name = Taxid_To_Name(sis_spec_taxid, names_file)
# in_blast = Check_Spec_Name_Blast_File(sis_spec_name, blast_file)
# if in_blast is True:
# print("Outgroup sequence chosen:"+sis_spec_name)
# return sis_spec_name
#double break so we only keep ONE sequence.
#go all the way down the first one until you get a species-level entry.
#check if the species-level entry is found in your .blast file (if that is where we are implementing this??? )
#if not, continue... check each species-level thing you find.
#this would then need to be included in make_species_trees... and only called if the request is sent directly from Parser_blah_master.
def Check_If_We_Have_A_Rep_Already(species_list, tid_list, rank):
print("Checking for reps... target rank is: "+rank)
list_of_correct_rank = []
found = []
removal_ranks = "superkingdom kingdom phylum class order family genus species"
remove_string, garbage = removal_ranks.split(rank)
remove_rank_list = remove_string.split()
for species in species_list:
nid = Str_To_Taxid(species, names_file)
#go up the ladder
go = True
while go is True:
#get parent taxid
rp = Return_Parent(nid, nodes_file)
#if its 1, we're done.
if rp == "NA":
list_of_correct_rank.append(rp)
go = False
if rp.strip() == 1:
rp = "NA"
list_of_correct_rank.append(rp)
go = False
#get rank for that new taxid
par_rank = Get_Taxid_Rank(rp, nodes_file)
#if it's what we want it to be, add to list.
if par_rank == rank:
rp = rp.strip()
list_of_correct_rank.append(rp)
go = False
#if its a step too high, terminate - we went too far somehow
elif par_rank in remove_rank_list:
rp = "NA"
list_of_correct_rank.append(rp)
go = False
#else, go up another level and test that one!
else:
nid = rp
print(tid_list)
print(list_of_correct_rank)
for item in tid_list:
if item in list_of_correct_rank:
a = tid_list.index(item)
found.append(tid_list[a])
return found
#@blast_file should actually be a list of raw_blast_FASTA objects
def Choose_Loss_Candidates(string, species_list, names_file, acc_list, nodes_file):
print("loss search initiating")
if "_" in string:
print(string)
string = string.replace("_", " ")
print(string)
taxid = Str_To_Taxid(string, names_file)
#for checking loss candidates, i will want to 1) run this 2) run a species_level_children generation for each member of the output list. 3) chose one member of each of those output lists to go in the species tree. hopefully checking that we have data for the chosen species.
sub_taxids = Ret_All_Groups_One_Rank_Below(taxid, nodes_file)
if sub_taxids == "NA":
print("Error getting loss candidates for string:"+string)
return([])
subgroup_names = []
for item in sub_taxids:
subgroup_names.append(Taxid_To_Name(item, names_file))
b = Get_Taxid_Rank(taxid, nodes_file)
a = One_Rank_Lower(b)
found = Check_If_We_Have_A_Rep_Already(species_list, sub_taxids, a)
print("Representatives already exist for:")
found_names = []
for foundtid in found:
foundtid = foundtid.strip()
index1 = sub_taxids.index(foundtid)
found_names.append(subgroup_names.pop(index1))
del sub_taxids[index1]
print(found_names)
print("Looking for one representative from each of the following:")
print(subgroup_names)
loss_list = []
ite = 0
# #first check if it is in the output loss list.
# for item in sub_taxids:
# with open(saved_loss_candidates) as saved:
# for line in saved:
# if item in line:
# #newthing will be a species name.
# newthing = re.sub("("item")(\t)(.*)", "\\3", line))
# loss_list.append(newthing)
# found2.append(item)
# break
#remove those found from file from the search list.
# for item in found2:
# sub_taxids.pop(item)
for item in sub_taxids:
test = Ret_A_Valid_Species_Below(item, nodes_file, names_file, acc_list)
#print(test)
print(subgroup_names[ite]+" : "+test)
ite+=1
loss_list.append(test)
continue
print("Loss candidates will be added:")
na = 0
for item in loss_list:
if item == "NA":
na +=1
while "NA" in loss_list: loss_list.remove("NA")
print(loss_list)
print("there were "+str(na)+" "+a+"s that no suitable loss candidate was found for.")
return loss_list
#either one per next-level-down
#or one per next-rank-down
def Check_Spec_Name_Acceptable_List(ssp_name, acc_list):
if ssp_name in acc_list:
return True
else:
result = next((True for item in acc_list if ssp_name in item), False)
if result is True:
print("Err in match spec name - gen list: "+ ssp_name +" "+ item)
return result
def Check_Spec_Name_Blast_File(ssp_name, blast_fasta_list):
lf = (len(blast_fasta_list))
half = lf/2
yes = 0
att = 0
#print("Checking :"+ssp_name)
ssp_name = ssp_name.replace(" ", "_")
ssp_name = ssp_name.strip()
for current_blast in blast_fasta_list:
att += 1
if att > 6:
if yes < att/3:
return False
if ssp_name in current_blast.species_names:
yes += 1
continue
else:
#print(ssp_name)
#print(current_blast.species_names[0])
for spec in current_blast.species_names:
if ssp_name in spec:
yes +=1
break
continue
#print(yes)
#print(half)
if yes > half:
#print("validated: "+ssp_name)
return True
else:
return False
def gen_acceptable_species_list(list_raw_gene_fastas, acc_name):
#this is printing an empty file. why?
names_list_acc = []
numbers_list_acc = []
for raw in list_raw_gene_fastas:
#do they have species lists?
raw.gen_species_lists()
raw_sl = raw.species_names
print(raw_sl[0])
for rawsp in raw_sl:
if rawsp in names_list_acc:
ind = names_list_acc.index(rawsp)
numbers_list_acc[ind] = numbers_list_acc[ind]+1
else:
names_list_acc.append(rawsp)
numbers_list_acc.append(1)
#the numbers list can specify a cut off that is necesary for the thing being acceptable
#for now let's be consistant and use 1/2 of lsit of raw fastas?
cutoff_num = (len(list_raw_gene_fastas)/2)
print(cutoff_num)
#this will be 15 currently. might be .5 sometimes.
list_of_rem = []
index = 0
for n in numbers_list_acc:
if n > cutoff_num:
#means that we dont care if its a decimal or not. 1 will pass .5
pass
else:
list_of_rem.append(names_list_acc[index])
#add the index to be removed to a list. index into names and numbers should be identicle
index +=1
print(len(list_of_rem))
list_of_rem.sort(reverse=True)
for remove_me in list_of_rem:
#uhhhhh i think we need to sort the numbers so removal of the largest number happens first so as to not fuck up list order.
#sorting now. should be good.
names_list_acc.remove(remove_me)
a = write_acc_list(names_list_acc, acc_name)
return a
def write_acc_list(acc_list, acc_name):
with open(acc_name, "w") as acc_list_file:
for item in acc_list:
acc_list_file.write(item+"\n")
return acc_name
def write_spc_list(spc_list, spcname):
with open(spcname, "w") as spc_list_file:
for item in spc_list:
#stripiing strain data from this version of the species_list such that it will
if "_" in item:
dash_sep = item.split("_")
item = dash_sep[0]+"_"+dash_sep[1]
spc_list_file.write(item+"\n")
return spcname
#parser stuff
def Run_OG_LOSS_ON_CLUSTER(script_name,all_files, all_result_files):
#here acc list is the name of the acc_list_current_file
#auto gen an sbatch script
os.system(ssh_inst+" \'mkdir Taxonomy\'")
sb_script = script_name
#scp it over
print(all_files)
for item in all_files:
os.system("scp "+item+" "+clus_head+"Taxonomy")
#run it
#edit the script on the cluster to deal with my mistakes
os.system(ssh_inst+" 'cd ~/Taxonomy; sbatch "+sb_script+"'")
#scp it back and verify
direct = os.getcwd()
exists = False
#now it should exist locally
movehome = []
finished = "start"
#bring home the d
for i in all_result_files:
movehome.append(i)
while finished is not True:
for filename in movehome:
os.system("scp "+clus_head+"Taxonomy/"+filename+" "+direct)
for item in all_result_files:
#see if it got moved home.
exists = os.path.isfile(item)
if exists is True:
if item in movehome:
movehome.remove(item)
finished = "yes"
else:
finished = False
print("Tax not done yet. could not locate : "+item+"checking again in 5 minutes")
break
if finished == "yes":
print("Should be done!")
finished = True
else:
#wait ten minutes and then try again.
time.sleep(600)
finished = "yes"
#TEMPORARILY REMOVED result file deletion from the cluster to make testing progress faster.
#for item in all_result_files:
# os.system(ssh_inst+" 'cd ~/Taxonomy; rm "+item+"'")
#for item in all_files:
# os.system(ssh_inst+" 'cd ~/Taxonomy; rm "+item+"'")
print("Taxonomy parsing complete")
#remove the script and the og loss file from cluster
def Get_OG_LOSS_DATA(list_of_clades, projectname):
#the acceptable list should be a list of taxa that are present in at least 50% (?) of the blast hit files for the genes given.
#get all gene-query-files to look at
list_catfiles = []
list_of_lists_of_raw_blast_files = []
for item in list_of_clades:
catfile = item.cat_file
list_of_raw_blast_files = item.blast_raw
if catfile in list_catfiles:
pass
else:
list_catfiles.append(catfile)
list_of_lists_of_raw_blast_files.append(list_of_raw_blast_files)
cat_acc_dict = {}
#for each, create an acceptable list output name
for i in range(len(list_catfiles)):
item = list_catfiles[i]
list_raws = list_of_lists_of_raw_blast_files[i]
gsflist = item.split(".")
gsf_a = gsflist[0]
gsf_b = gsf_a.split("/")[-1]
acc_file = gsf_b+"_Acc_List.txt"
#print("Looking for loss-candidates and a rooting sequence to add....")
acc_exists = os.path.isfile(acc_file)
if acc_exists is True:
pass
#if not already done, actually make the output acceptable list.
else:
print("....initializing all_acceptables from gene_seq_query file: "+gsf_b+". this should only happen once...")
#generate it
#should be passing in A LIST OF ALL THE BLAST_FILES ASSOCIATED WITH THE GENE. eg the things in Raw_Blasts that were consulted.
#are these stored in each subtree? should pass a list of fasta objects.
#ist_raw_objects = []
#rint(list_raws)
#or raw in list_raws:
# print(raw.name)
acc_file = gen_acceptable_species_list(list_raws, acc_file)
#this is returning "NONE" which is super not okay.
cat_acc_dict[item] = acc_file
list_of_species_files = Gen_Species_File(list_of_clades, projectname)
#check if we already ran the taxonomy and have data downloaded. (this is mostly for while fixing errors; i keep getting stuck at this point & ity is a waste of time to re-run the taxonomy parser.
list_to_tax_clades = []
for item in list_of_clades:
exists_result = os.path.isfile(item.result)
if exists_result is False:
list_to_tax_clades.append(item)
#sets species_file and result to each subtree.
corr_file_name, results_list = Generate_Cat_File_OGLOSS(list_to_tax_clades, cat_acc_dict, projectname)
#makes the correlation file.
#for each clade, generate a species_list, result name, acc_file_name, string_name and print them all to a corr.file
n = len(list_to_tax_clades)
#gen the script
script_name = projectname+"_OGLScript.sh"
scriptfile = Generate_Script_File_OGLOSS(n, corr_file_name, script_name)
all_files = []
for item in cat_acc_dict.values():
all_files.append(item)
for item in list_of_species_files:
all_files.append(item)
all_files.append(scriptfile)
all_files.append(corr_file_name)
if len(results_list) is 0:
pass
else:
Run_OG_LOSS_ON_CLUSTER(scriptfile,all_files, results_list)
#run the script
#add loss_species, root_species to each subtree as a value and also add them to the species_list going forward.
for item in list_of_clades:
results_file = item.result
loss_species = []
print(item.string_name)
#open the file and get loss and species results.
with open(results_file) as res:
# print("opened")
a=0
for line in res:
#get loss results
if a == 0:
loss_species = line.strip()
loss_species = loss_species.split("~")
print("loss candidates")
if "" in loss_species:
loss_species.remove ("")
if "\n" in loss_species:
loss_species.remove("\n")
item.loss_species_list = loss_species
print(loss_species)
#get root results
if a == 1:
root_species = line.strip()
item.root_species = root_species
print("root: "+root_species)
#get how long it took
if a == 2:
print("time:")
print(line)
a += 1
#if no loss, do nothing
item.species_list_plus_og_loss = []
for thing in item.species_list_original:
item.species_list_plus_og_loss.append(thing)
if loss_species == []:
pass
#else, add them to the species list, and also track them(?)
else:
for ls in loss_species:
item.species_list_plus_og_loss.append(ls)
if root_species == "":
pass
else:
item.species_list_plus_og_loss.append(root_species)
return results_list
# os.system("rm "+results_file)
#done
def Generate_Cat_File_OGLOSS(list_of_clades, cat_acc_dict, projectname):
corr_file_name = "Corr_"+projectname+".txt"
results_list = []
with open(corr_file_name, "w") as corr:
for n in range(len(list_of_clades)):
corr.write(str(n+1)+" "+list_of_clades[n].species_file+" "+list_of_clades[n].string_name+" "+cat_acc_dict[list_of_clades[n].cat_file]+" "+list_of_clades[n].result+"\n")
results_list.append(list_of_clades[n].result)
return corr_file_name, results_list
def Generate_Script_File_OGLOSS(n, indexname, scriptname):
n = str(n)
a = """#!/bin/bash
#SBATCH -p sched_mit_g4nier
#SBATCH -t 2-00:00:00
#SBATCH -J Tax
#SBATCH --array=1-"""+n+"""
. /etc/profile.d/modules.sh
module add engaging/openmpi/1.8.8
MY_ARRAY_ID=$SLURM_ARRAY_TASK_ID
THE_INDEX="""+indexname+"""
SPECIES_FILE=$( cat $THE_INDEX | grep "^$MY_ARRAY_ID " | awk '{print $2}' )
STRING_NAME=$( cat $THE_INDEX | grep "^$MY_ARRAY_ID " | awk '{print $3}' )
ACC_FILE=$( cat $THE_INDEX | grep "^$MY_ARRAY_ID " | awk '{print $4}' )
RESULT=$( cat $THE_INDEX | grep "^$MY_ARRAY_ID " | awk '{print $5}' )
echo $SPECIES_FILE
echo $STRING_NAME
echo $ACC_FILE
mpirun python Online_Taxon_Parse.py -s $SPECIES_FILE -g $STRING_NAME -b $ACC_FILE -n $RESULT
exit"""
with open(scriptname, "w") as script:
script.write(a)
return scriptname
def Gen_Species_File(list_of_clades, projectname):
list_sp_files = []
for item in list_of_clades:
species_list = item.species_list_original
species_file_name = item.prefix+"_Species_List.txt"
species_list2 = []
for sl2 in species_list:
sl2 = sl2.strip("\"")
species_list2.append(sl2)
spc_file = write_spc_list(species_list2, species_file_name)
item.species_file = species_file_name
list_sp_files.append(species_file_name)
item.result = item.prefix+"_OGL_Result.txt"
return list_sp_files
|
flexible
|
{
"blob_id": "5c1324207e24f2d723be33175101102bd97fe7a2",
"index": 4860,
"step-1": "<mask token>\n\n\ndef Ret_Sister_Same_Rank(string, nodes_file, names_file):\n print(string)\n interest_taxid = Str_To_Taxid(string, names_file)\n print(interest_taxid)\n up_taxid = Return_Parent(interest_taxid, nodes_file)\n up_taxid = up_taxid.strip()\n interest_taxid = interest_taxid.strip()\n sis_self_tuples = Taxid_To_Children(up_taxid, nodes_file)\n sister_and_self = []\n for tup in sis_self_tuples:\n sister_and_self.append(tup[0])\n print(sister_and_self)\n print(interest_taxid)\n sister_and_self.remove(interest_taxid)\n sisterlist = sister_and_self\n print(sisterlist)\n return sisterlist\n\n\n<mask token>\n\n\ndef write_spc_list(spc_list, spcname):\n with open(spcname, 'w') as spc_list_file:\n for item in spc_list:\n if '_' in item:\n dash_sep = item.split('_')\n item = dash_sep[0] + '_' + dash_sep[1]\n spc_list_file.write(item + '\\n')\n return spcname\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef Str_To_Taxid(string, names_file):\n found = False\n string = string.replace('_', ' ')\n with open(names_file) as names:\n for line in names:\n if '\\t' + string + '\\t' in line:\n taxid_int = re.sub('(\\\\d*)(\\t\\\\|\\t)(' + string +\n ')(\\t)(.*)', '\\\\1', line)\n found = True\n break\n if found is False:\n print('Error finding string: ' + string + ' in file: ' + names_file)\n taxid_int = 'NA'\n return taxid_int\n\n\n<mask token>\n\n\ndef Get_Taxid_Rank(taxid, nodes_file):\n taxid = taxid.strip()\n ranklist = []\n len_tax = len(taxid)\n len_tax_t = len_tax + 1\n with open(nodes_file) as nodes:\n for line in nodes:\n if line[:len_tax_t] == taxid + '\\t':\n apparent_rank = re.sub('(' + taxid +\n ')(\\t\\\\|\\t)(\\\\d*)(\\t\\\\|\\t)([a-z]*)(.*)', '\\\\5', line)\n apparent_rank = apparent_rank.strip()\n if '\\t' in apparent_rank:\n pass\n else:\n return apparent_rank\n return 'NA'\n\n\n<mask token>\n\n\ndef Return_Parent(taxid, nodes_file):\n len_tax = len(taxid.strip())\n len_tax_t = len_tax + 1\n with open(nodes_file) as nodes:\n for line in nodes:\n if line[:len_tax_t] == taxid.strip() + '\\t':\n parent_taxid = re.sub('(' + taxid.strip() +\n ')(\\t\\\\|\\t)(\\\\d*)(\\t\\\\|\\t)([a-z]*)(.*)', '\\\\3', line)\n if '\\t' in parent_taxid:\n pass\n else:\n return parent_taxid\n print('error finding parent taxa')\n return 'NA'\n\n\ndef Ret_A_Valid_Species_Below_LESS_EFFICIENTLY(taxid, nodes_file,\n names_file, acc_list):\n children = []\n list_ch_remove = []\n child_list_a = []\n child_list_atup = Taxid_To_Children(taxid, nodes_file)\n done = False\n saved_top_level = []\n for itema in child_list_atup:\n saved_top_level.append(itema)\n maxi = len(saved_top_level)\n atup = saved_top_level[0]\n saved_top_level.remove(atup)\n child_list_atup = [atup]\n for item in child_list_atup:\n child_list_a.append(item[0])\n i = 1\n while done is False:\n for item in child_list_atup:\n if item[1] == 'species':\n children.append(item[0])\n sis_spec_name = Taxid_To_Name(item[0], names_file)\n if sis_spec_name[0].islower() is False:\n in_blast = Check_Spec_Name_Acceptable_List(sis_spec_name,\n acc_list)\n if in_blast is True:\n return sis_spec_name\n list_ch_remove.append(item)\n for rem in list_ch_remove:\n child_list_atup.remove(rem)\n child_list_a.remove(rem[0])\n if child_list_a == []:\n if i == maxi:\n return 'NA'\n done = True\n else:\n i += 1\n list_ch_remove = []\n atup = saved_top_level[0]\n saved_top_level.remove(atup)\n child_list_atup = [atup]\n for item in child_list_atup:\n child_list_a.append(item[0])\n continue\n list_ch_remove = []\n child_list_b = []\n child_list_c = []\n for parent in child_list_a:\n child_list_btup = Taxid_To_Children(parent, nodes_file)\n for item in child_list_btup:\n child_list_b.append(item[0])\n if child_list_btup == []:\n pass\n else:\n for bitem in child_list_btup:\n child_list_c.append(bitem)\n child_list_atup = child_list_c\n child_list_a = []\n for itup in child_list_atup:\n child_list_a.append(itup[0])\n return 'NA'\n\n\n<mask token>\n\n\ndef Ret_All_Species_Below_Less_Efficiently(taxid, nodes_file):\n children = []\n list_ch_remove = []\n child_list_a = []\n child_list_atup = Taxid_To_Children(taxid, nodes_file)\n for item in child_list_atup:\n child_list_a.append(item[0])\n done = False\n while done is False:\n for item in child_list_atup:\n if item[1] == 'species':\n children.append(item[0])\n list_ch_remove.append(item)\n for rem in list_ch_remove:\n child_list_atup.remove(rem)\n child_list_a.remove(rem[0])\n if child_list_a == []:\n done = True\n list_ch_remove = []\n child_list_b = []\n child_list_c = []\n for parent in child_list_a:\n child_list_btup = Taxid_To_Children(parent, nodes_file)\n for item in child_list_btup:\n child_list_b.append(item[0])\n if child_list_btup == []:\n pass\n else:\n for bitem in child_list_btup:\n child_list_c.append(bitem)\n child_list_atup = child_list_c\n child_list_a = []\n for itup in child_list_atup:\n child_list_a.append(itup[0])\n return children\n\n\n<mask token>\n\n\ndef Ret_Sister_Same_Rank(string, nodes_file, names_file):\n print(string)\n interest_taxid = Str_To_Taxid(string, names_file)\n print(interest_taxid)\n up_taxid = Return_Parent(interest_taxid, nodes_file)\n up_taxid = up_taxid.strip()\n interest_taxid = interest_taxid.strip()\n sis_self_tuples = Taxid_To_Children(up_taxid, nodes_file)\n sister_and_self = []\n for tup in sis_self_tuples:\n sister_and_self.append(tup[0])\n print(sister_and_self)\n print(interest_taxid)\n sister_and_self.remove(interest_taxid)\n sisterlist = sister_and_self\n print(sisterlist)\n return sisterlist\n\n\n<mask token>\n\n\ndef Choose_One_OG_Seq(string, species_list, names_file, acc_list, nodes_file):\n print('one og sequence choser initiating')\n if '_' in string:\n string = string.replace('_', ' ')\n sislist = Ret_Sister_Same_Rank(string, nodes_file, names_file)\n print('Sisterlist')\n print(sislist)\n if sislist == []:\n go = True\n else:\n go = False\n my_taxid = Str_To_Taxid(string, names_file)\n while go is True:\n parent_of_me_taxid = Return_Parent(my_taxid, nodes_file)\n parent_of_me = Taxid_To_Name(parent_of_me_taxid, names_file)\n sislist = Ret_Sister_Same_Rank(parent_of_me, nodes_file, names_file)\n my_taxid = parent_of_me_taxid\n if sislist == []:\n pass\n else:\n go = False\n for item in sislist:\n test = Ret_A_Valid_Species_Below(item, nodes_file, names_file, acc_list\n )\n if test == 'NA':\n pass\n else:\n print(test)\n return test\n while test == 'NA':\n sislist = []\n go = True\n if my_taxid == 1:\n break\n while go is True:\n parent_of_me_taxid = Return_Parent(my_taxid, nodes_file)\n parent_of_me = Taxid_To_Name(parent_of_me_taxid, names_file)\n sislist = Ret_Sister_Same_Rank(parent_of_me, nodes_file, names_file\n )\n my_taxid = parent_of_me_taxid\n if sislist == []:\n pass\n else:\n go = False\n for item in sislist:\n test = Ret_A_Valid_Species_Below(item, nodes_file, names_file,\n acc_list)\n if test != 'NA':\n pass\n else:\n return test\n return test\n\n\n<mask token>\n\n\ndef Check_Spec_Name_Acceptable_List(ssp_name, acc_list):\n if ssp_name in acc_list:\n return True\n else:\n result = next((True for item in acc_list if ssp_name in item), False)\n if result is True:\n print('Err in match spec name - gen list: ' + ssp_name + ' ' + item\n )\n return result\n\n\n<mask token>\n\n\ndef gen_acceptable_species_list(list_raw_gene_fastas, acc_name):\n names_list_acc = []\n numbers_list_acc = []\n for raw in list_raw_gene_fastas:\n raw.gen_species_lists()\n raw_sl = raw.species_names\n print(raw_sl[0])\n for rawsp in raw_sl:\n if rawsp in names_list_acc:\n ind = names_list_acc.index(rawsp)\n numbers_list_acc[ind] = numbers_list_acc[ind] + 1\n else:\n names_list_acc.append(rawsp)\n numbers_list_acc.append(1)\n cutoff_num = len(list_raw_gene_fastas) / 2\n print(cutoff_num)\n list_of_rem = []\n index = 0\n for n in numbers_list_acc:\n if n > cutoff_num:\n pass\n else:\n list_of_rem.append(names_list_acc[index])\n index += 1\n print(len(list_of_rem))\n list_of_rem.sort(reverse=True)\n for remove_me in list_of_rem:\n names_list_acc.remove(remove_me)\n a = write_acc_list(names_list_acc, acc_name)\n return a\n\n\ndef write_acc_list(acc_list, acc_name):\n with open(acc_name, 'w') as acc_list_file:\n for item in acc_list:\n acc_list_file.write(item + '\\n')\n return acc_name\n\n\ndef write_spc_list(spc_list, spcname):\n with open(spcname, 'w') as spc_list_file:\n for item in spc_list:\n if '_' in item:\n dash_sep = item.split('_')\n item = dash_sep[0] + '_' + dash_sep[1]\n spc_list_file.write(item + '\\n')\n return spcname\n\n\ndef Run_OG_LOSS_ON_CLUSTER(script_name, all_files, all_result_files):\n os.system(ssh_inst + \" 'mkdir Taxonomy'\")\n sb_script = script_name\n print(all_files)\n for item in all_files:\n os.system('scp ' + item + ' ' + clus_head + 'Taxonomy')\n os.system(ssh_inst + \" 'cd ~/Taxonomy; sbatch \" + sb_script + \"'\")\n direct = os.getcwd()\n exists = False\n movehome = []\n finished = 'start'\n for i in all_result_files:\n movehome.append(i)\n while finished is not True:\n for filename in movehome:\n os.system('scp ' + clus_head + 'Taxonomy/' + filename + ' ' +\n direct)\n for item in all_result_files:\n exists = os.path.isfile(item)\n if exists is True:\n if item in movehome:\n movehome.remove(item)\n finished = 'yes'\n else:\n finished = False\n print('Tax not done yet. could not locate : ' + item +\n 'checking again in 5 minutes')\n break\n if finished == 'yes':\n print('Should be done!')\n finished = True\n else:\n time.sleep(600)\n finished = 'yes'\n print('Taxonomy parsing complete')\n\n\ndef Get_OG_LOSS_DATA(list_of_clades, projectname):\n list_catfiles = []\n list_of_lists_of_raw_blast_files = []\n for item in list_of_clades:\n catfile = item.cat_file\n list_of_raw_blast_files = item.blast_raw\n if catfile in list_catfiles:\n pass\n else:\n list_catfiles.append(catfile)\n list_of_lists_of_raw_blast_files.append(list_of_raw_blast_files)\n cat_acc_dict = {}\n for i in range(len(list_catfiles)):\n item = list_catfiles[i]\n list_raws = list_of_lists_of_raw_blast_files[i]\n gsflist = item.split('.')\n gsf_a = gsflist[0]\n gsf_b = gsf_a.split('/')[-1]\n acc_file = gsf_b + '_Acc_List.txt'\n acc_exists = os.path.isfile(acc_file)\n if acc_exists is True:\n pass\n else:\n print(\n '....initializing all_acceptables from gene_seq_query file: ' +\n gsf_b + '. this should only happen once...')\n acc_file = gen_acceptable_species_list(list_raws, acc_file)\n cat_acc_dict[item] = acc_file\n list_of_species_files = Gen_Species_File(list_of_clades, projectname)\n list_to_tax_clades = []\n for item in list_of_clades:\n exists_result = os.path.isfile(item.result)\n if exists_result is False:\n list_to_tax_clades.append(item)\n corr_file_name, results_list = Generate_Cat_File_OGLOSS(list_to_tax_clades,\n cat_acc_dict, projectname)\n n = len(list_to_tax_clades)\n script_name = projectname + '_OGLScript.sh'\n scriptfile = Generate_Script_File_OGLOSS(n, corr_file_name, script_name)\n all_files = []\n for item in cat_acc_dict.values():\n all_files.append(item)\n for item in list_of_species_files:\n all_files.append(item)\n all_files.append(scriptfile)\n all_files.append(corr_file_name)\n if len(results_list) is 0:\n pass\n else:\n Run_OG_LOSS_ON_CLUSTER(scriptfile, all_files, results_list)\n for item in list_of_clades:\n results_file = item.result\n loss_species = []\n print(item.string_name)\n with open(results_file) as res:\n a = 0\n for line in res:\n if a == 0:\n loss_species = line.strip()\n loss_species = loss_species.split('~')\n print('loss candidates')\n if '' in loss_species:\n loss_species.remove('')\n if '\\n' in loss_species:\n loss_species.remove('\\n')\n item.loss_species_list = loss_species\n print(loss_species)\n if a == 1:\n root_species = line.strip()\n item.root_species = root_species\n print('root: ' + root_species)\n if a == 2:\n print('time:')\n print(line)\n a += 1\n item.species_list_plus_og_loss = []\n for thing in item.species_list_original:\n item.species_list_plus_og_loss.append(thing)\n if loss_species == []:\n pass\n else:\n for ls in loss_species:\n item.species_list_plus_og_loss.append(ls)\n if root_species == '':\n pass\n else:\n item.species_list_plus_og_loss.append(root_species)\n return results_list\n\n\n<mask token>\n\n\ndef Generate_Script_File_OGLOSS(n, indexname, scriptname):\n n = str(n)\n a = \"\"\"#!/bin/bash \n#SBATCH -p sched_mit_g4nier \n#SBATCH -t 2-00:00:00 \n#SBATCH -J Tax\n \n#SBATCH --array=1-\"\"\" + n + \"\"\" \n\n. /etc/profile.d/modules.sh\nmodule add engaging/openmpi/1.8.8\n\nMY_ARRAY_ID=$SLURM_ARRAY_TASK_ID\nTHE_INDEX=\"\"\" + indexname + \"\"\"\nSPECIES_FILE=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $2}' )\nSTRING_NAME=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $3}' )\nACC_FILE=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $4}' )\nRESULT=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $5}' )\n\necho $SPECIES_FILE\necho $STRING_NAME\necho $ACC_FILE\n\nmpirun python Online_Taxon_Parse.py -s $SPECIES_FILE -g $STRING_NAME -b $ACC_FILE -n $RESULT\n\nexit\"\"\"\n with open(scriptname, 'w') as script:\n script.write(a)\n return scriptname\n\n\ndef Gen_Species_File(list_of_clades, projectname):\n list_sp_files = []\n for item in list_of_clades:\n species_list = item.species_list_original\n species_file_name = item.prefix + '_Species_List.txt'\n species_list2 = []\n for sl2 in species_list:\n sl2 = sl2.strip('\"')\n species_list2.append(sl2)\n spc_file = write_spc_list(species_list2, species_file_name)\n item.species_file = species_file_name\n list_sp_files.append(species_file_name)\n item.result = item.prefix + '_OGL_Result.txt'\n return list_sp_files\n",
"step-3": "<mask token>\n\n\ndef Str_To_Taxid(string, names_file):\n found = False\n string = string.replace('_', ' ')\n with open(names_file) as names:\n for line in names:\n if '\\t' + string + '\\t' in line:\n taxid_int = re.sub('(\\\\d*)(\\t\\\\|\\t)(' + string +\n ')(\\t)(.*)', '\\\\1', line)\n found = True\n break\n if found is False:\n print('Error finding string: ' + string + ' in file: ' + names_file)\n taxid_int = 'NA'\n return taxid_int\n\n\n<mask token>\n\n\ndef Get_Taxid_Rank(taxid, nodes_file):\n taxid = taxid.strip()\n ranklist = []\n len_tax = len(taxid)\n len_tax_t = len_tax + 1\n with open(nodes_file) as nodes:\n for line in nodes:\n if line[:len_tax_t] == taxid + '\\t':\n apparent_rank = re.sub('(' + taxid +\n ')(\\t\\\\|\\t)(\\\\d*)(\\t\\\\|\\t)([a-z]*)(.*)', '\\\\5', line)\n apparent_rank = apparent_rank.strip()\n if '\\t' in apparent_rank:\n pass\n else:\n return apparent_rank\n return 'NA'\n\n\n<mask token>\n\n\ndef Return_Parent(taxid, nodes_file):\n len_tax = len(taxid.strip())\n len_tax_t = len_tax + 1\n with open(nodes_file) as nodes:\n for line in nodes:\n if line[:len_tax_t] == taxid.strip() + '\\t':\n parent_taxid = re.sub('(' + taxid.strip() +\n ')(\\t\\\\|\\t)(\\\\d*)(\\t\\\\|\\t)([a-z]*)(.*)', '\\\\3', line)\n if '\\t' in parent_taxid:\n pass\n else:\n return parent_taxid\n print('error finding parent taxa')\n return 'NA'\n\n\ndef Ret_A_Valid_Species_Below_LESS_EFFICIENTLY(taxid, nodes_file,\n names_file, acc_list):\n children = []\n list_ch_remove = []\n child_list_a = []\n child_list_atup = Taxid_To_Children(taxid, nodes_file)\n done = False\n saved_top_level = []\n for itema in child_list_atup:\n saved_top_level.append(itema)\n maxi = len(saved_top_level)\n atup = saved_top_level[0]\n saved_top_level.remove(atup)\n child_list_atup = [atup]\n for item in child_list_atup:\n child_list_a.append(item[0])\n i = 1\n while done is False:\n for item in child_list_atup:\n if item[1] == 'species':\n children.append(item[0])\n sis_spec_name = Taxid_To_Name(item[0], names_file)\n if sis_spec_name[0].islower() is False:\n in_blast = Check_Spec_Name_Acceptable_List(sis_spec_name,\n acc_list)\n if in_blast is True:\n return sis_spec_name\n list_ch_remove.append(item)\n for rem in list_ch_remove:\n child_list_atup.remove(rem)\n child_list_a.remove(rem[0])\n if child_list_a == []:\n if i == maxi:\n return 'NA'\n done = True\n else:\n i += 1\n list_ch_remove = []\n atup = saved_top_level[0]\n saved_top_level.remove(atup)\n child_list_atup = [atup]\n for item in child_list_atup:\n child_list_a.append(item[0])\n continue\n list_ch_remove = []\n child_list_b = []\n child_list_c = []\n for parent in child_list_a:\n child_list_btup = Taxid_To_Children(parent, nodes_file)\n for item in child_list_btup:\n child_list_b.append(item[0])\n if child_list_btup == []:\n pass\n else:\n for bitem in child_list_btup:\n child_list_c.append(bitem)\n child_list_atup = child_list_c\n child_list_a = []\n for itup in child_list_atup:\n child_list_a.append(itup[0])\n return 'NA'\n\n\n<mask token>\n\n\ndef Ret_All_Species_Below_Less_Efficiently(taxid, nodes_file):\n children = []\n list_ch_remove = []\n child_list_a = []\n child_list_atup = Taxid_To_Children(taxid, nodes_file)\n for item in child_list_atup:\n child_list_a.append(item[0])\n done = False\n while done is False:\n for item in child_list_atup:\n if item[1] == 'species':\n children.append(item[0])\n list_ch_remove.append(item)\n for rem in list_ch_remove:\n child_list_atup.remove(rem)\n child_list_a.remove(rem[0])\n if child_list_a == []:\n done = True\n list_ch_remove = []\n child_list_b = []\n child_list_c = []\n for parent in child_list_a:\n child_list_btup = Taxid_To_Children(parent, nodes_file)\n for item in child_list_btup:\n child_list_b.append(item[0])\n if child_list_btup == []:\n pass\n else:\n for bitem in child_list_btup:\n child_list_c.append(bitem)\n child_list_atup = child_list_c\n child_list_a = []\n for itup in child_list_atup:\n child_list_a.append(itup[0])\n return children\n\n\ndef Ret_All_Groups_One_Rank_Below(taxid, nodes_file):\n taxid = taxid.strip()\n print('looking for taxid:' + str(taxid))\n rank = Get_Taxid_Rank(taxid, nodes_file)\n print(rank)\n target_rank = One_Rank_Lower(rank)\n if target_rank == 'NA':\n return 'NA'\n removal_ranks = (\n 'superkingdom kingdom phylum class order family genus species')\n garbage, remove_string = removal_ranks.split(target_rank)\n remove_rank_list = remove_string.split()\n children = []\n list_ch_remove = []\n child_list_a = Taxid_To_Children(taxid, nodes_file)\n done = False\n while done is False:\n for item in child_list_a:\n if item[1] == target_rank:\n children.append(item[0])\n list_ch_remove.append(item)\n if item[1] in remove_rank_list:\n list_ch_remove.append(item)\n for rem in list_ch_remove:\n child_list_a.remove(rem)\n if child_list_a == []:\n done = True\n list_ch_remove = []\n child_list_b = []\n child_list_c = []\n for parent in child_list_a:\n child_list_b = Taxid_To_Children(parent[0], nodes_file)\n if child_list_b == []:\n pass\n else:\n for bitem in child_list_b:\n child_list_c.append(bitem)\n child_list_a = child_list_c\n return children\n\n\ndef Ret_Sister_Same_Rank(string, nodes_file, names_file):\n print(string)\n interest_taxid = Str_To_Taxid(string, names_file)\n print(interest_taxid)\n up_taxid = Return_Parent(interest_taxid, nodes_file)\n up_taxid = up_taxid.strip()\n interest_taxid = interest_taxid.strip()\n sis_self_tuples = Taxid_To_Children(up_taxid, nodes_file)\n sister_and_self = []\n for tup in sis_self_tuples:\n sister_and_self.append(tup[0])\n print(sister_and_self)\n print(interest_taxid)\n sister_and_self.remove(interest_taxid)\n sisterlist = sister_and_self\n print(sisterlist)\n return sisterlist\n\n\ndef Taxid_To_Name(taxid, names_file):\n found = False\n taxid = taxid.strip()\n len_tax = len(taxid)\n len_tax_t = len_tax + 1\n with open(names_file) as names:\n for line in names:\n if line[:len_tax_t] == taxid + '\\t':\n name_wanted = re.sub(\n '(\\\\d*)(\\t\\\\|\\t)([^\\t]*)(\\t\\\\|\\t)(.*)(\\t\\\\|\\t)(scientific name)(.*)'\n , '\\\\3', line)\n if '\\t' in name_wanted:\n pass\n else:\n found = True\n break\n if found is False:\n print('Error finding name for: ' + taxid + ' in file: ' + names_file)\n name_wanted = 'NA'\n if found is True:\n name_wanted = name_wanted.strip()\n return name_wanted\n\n\ndef Choose_One_OG_Seq(string, species_list, names_file, acc_list, nodes_file):\n print('one og sequence choser initiating')\n if '_' in string:\n string = string.replace('_', ' ')\n sislist = Ret_Sister_Same_Rank(string, nodes_file, names_file)\n print('Sisterlist')\n print(sislist)\n if sislist == []:\n go = True\n else:\n go = False\n my_taxid = Str_To_Taxid(string, names_file)\n while go is True:\n parent_of_me_taxid = Return_Parent(my_taxid, nodes_file)\n parent_of_me = Taxid_To_Name(parent_of_me_taxid, names_file)\n sislist = Ret_Sister_Same_Rank(parent_of_me, nodes_file, names_file)\n my_taxid = parent_of_me_taxid\n if sislist == []:\n pass\n else:\n go = False\n for item in sislist:\n test = Ret_A_Valid_Species_Below(item, nodes_file, names_file, acc_list\n )\n if test == 'NA':\n pass\n else:\n print(test)\n return test\n while test == 'NA':\n sislist = []\n go = True\n if my_taxid == 1:\n break\n while go is True:\n parent_of_me_taxid = Return_Parent(my_taxid, nodes_file)\n parent_of_me = Taxid_To_Name(parent_of_me_taxid, names_file)\n sislist = Ret_Sister_Same_Rank(parent_of_me, nodes_file, names_file\n )\n my_taxid = parent_of_me_taxid\n if sislist == []:\n pass\n else:\n go = False\n for item in sislist:\n test = Ret_A_Valid_Species_Below(item, nodes_file, names_file,\n acc_list)\n if test != 'NA':\n pass\n else:\n return test\n return test\n\n\ndef Check_If_We_Have_A_Rep_Already(species_list, tid_list, rank):\n print('Checking for reps... target rank is: ' + rank)\n list_of_correct_rank = []\n found = []\n removal_ranks = (\n 'superkingdom kingdom phylum class order family genus species')\n remove_string, garbage = removal_ranks.split(rank)\n remove_rank_list = remove_string.split()\n for species in species_list:\n nid = Str_To_Taxid(species, names_file)\n go = True\n while go is True:\n rp = Return_Parent(nid, nodes_file)\n if rp == 'NA':\n list_of_correct_rank.append(rp)\n go = False\n if rp.strip() == 1:\n rp = 'NA'\n list_of_correct_rank.append(rp)\n go = False\n par_rank = Get_Taxid_Rank(rp, nodes_file)\n if par_rank == rank:\n rp = rp.strip()\n list_of_correct_rank.append(rp)\n go = False\n elif par_rank in remove_rank_list:\n rp = 'NA'\n list_of_correct_rank.append(rp)\n go = False\n else:\n nid = rp\n print(tid_list)\n print(list_of_correct_rank)\n for item in tid_list:\n if item in list_of_correct_rank:\n a = tid_list.index(item)\n found.append(tid_list[a])\n return found\n\n\n<mask token>\n\n\ndef Check_Spec_Name_Acceptable_List(ssp_name, acc_list):\n if ssp_name in acc_list:\n return True\n else:\n result = next((True for item in acc_list if ssp_name in item), False)\n if result is True:\n print('Err in match spec name - gen list: ' + ssp_name + ' ' + item\n )\n return result\n\n\n<mask token>\n\n\ndef gen_acceptable_species_list(list_raw_gene_fastas, acc_name):\n names_list_acc = []\n numbers_list_acc = []\n for raw in list_raw_gene_fastas:\n raw.gen_species_lists()\n raw_sl = raw.species_names\n print(raw_sl[0])\n for rawsp in raw_sl:\n if rawsp in names_list_acc:\n ind = names_list_acc.index(rawsp)\n numbers_list_acc[ind] = numbers_list_acc[ind] + 1\n else:\n names_list_acc.append(rawsp)\n numbers_list_acc.append(1)\n cutoff_num = len(list_raw_gene_fastas) / 2\n print(cutoff_num)\n list_of_rem = []\n index = 0\n for n in numbers_list_acc:\n if n > cutoff_num:\n pass\n else:\n list_of_rem.append(names_list_acc[index])\n index += 1\n print(len(list_of_rem))\n list_of_rem.sort(reverse=True)\n for remove_me in list_of_rem:\n names_list_acc.remove(remove_me)\n a = write_acc_list(names_list_acc, acc_name)\n return a\n\n\ndef write_acc_list(acc_list, acc_name):\n with open(acc_name, 'w') as acc_list_file:\n for item in acc_list:\n acc_list_file.write(item + '\\n')\n return acc_name\n\n\ndef write_spc_list(spc_list, spcname):\n with open(spcname, 'w') as spc_list_file:\n for item in spc_list:\n if '_' in item:\n dash_sep = item.split('_')\n item = dash_sep[0] + '_' + dash_sep[1]\n spc_list_file.write(item + '\\n')\n return spcname\n\n\ndef Run_OG_LOSS_ON_CLUSTER(script_name, all_files, all_result_files):\n os.system(ssh_inst + \" 'mkdir Taxonomy'\")\n sb_script = script_name\n print(all_files)\n for item in all_files:\n os.system('scp ' + item + ' ' + clus_head + 'Taxonomy')\n os.system(ssh_inst + \" 'cd ~/Taxonomy; sbatch \" + sb_script + \"'\")\n direct = os.getcwd()\n exists = False\n movehome = []\n finished = 'start'\n for i in all_result_files:\n movehome.append(i)\n while finished is not True:\n for filename in movehome:\n os.system('scp ' + clus_head + 'Taxonomy/' + filename + ' ' +\n direct)\n for item in all_result_files:\n exists = os.path.isfile(item)\n if exists is True:\n if item in movehome:\n movehome.remove(item)\n finished = 'yes'\n else:\n finished = False\n print('Tax not done yet. could not locate : ' + item +\n 'checking again in 5 minutes')\n break\n if finished == 'yes':\n print('Should be done!')\n finished = True\n else:\n time.sleep(600)\n finished = 'yes'\n print('Taxonomy parsing complete')\n\n\ndef Get_OG_LOSS_DATA(list_of_clades, projectname):\n list_catfiles = []\n list_of_lists_of_raw_blast_files = []\n for item in list_of_clades:\n catfile = item.cat_file\n list_of_raw_blast_files = item.blast_raw\n if catfile in list_catfiles:\n pass\n else:\n list_catfiles.append(catfile)\n list_of_lists_of_raw_blast_files.append(list_of_raw_blast_files)\n cat_acc_dict = {}\n for i in range(len(list_catfiles)):\n item = list_catfiles[i]\n list_raws = list_of_lists_of_raw_blast_files[i]\n gsflist = item.split('.')\n gsf_a = gsflist[0]\n gsf_b = gsf_a.split('/')[-1]\n acc_file = gsf_b + '_Acc_List.txt'\n acc_exists = os.path.isfile(acc_file)\n if acc_exists is True:\n pass\n else:\n print(\n '....initializing all_acceptables from gene_seq_query file: ' +\n gsf_b + '. this should only happen once...')\n acc_file = gen_acceptable_species_list(list_raws, acc_file)\n cat_acc_dict[item] = acc_file\n list_of_species_files = Gen_Species_File(list_of_clades, projectname)\n list_to_tax_clades = []\n for item in list_of_clades:\n exists_result = os.path.isfile(item.result)\n if exists_result is False:\n list_to_tax_clades.append(item)\n corr_file_name, results_list = Generate_Cat_File_OGLOSS(list_to_tax_clades,\n cat_acc_dict, projectname)\n n = len(list_to_tax_clades)\n script_name = projectname + '_OGLScript.sh'\n scriptfile = Generate_Script_File_OGLOSS(n, corr_file_name, script_name)\n all_files = []\n for item in cat_acc_dict.values():\n all_files.append(item)\n for item in list_of_species_files:\n all_files.append(item)\n all_files.append(scriptfile)\n all_files.append(corr_file_name)\n if len(results_list) is 0:\n pass\n else:\n Run_OG_LOSS_ON_CLUSTER(scriptfile, all_files, results_list)\n for item in list_of_clades:\n results_file = item.result\n loss_species = []\n print(item.string_name)\n with open(results_file) as res:\n a = 0\n for line in res:\n if a == 0:\n loss_species = line.strip()\n loss_species = loss_species.split('~')\n print('loss candidates')\n if '' in loss_species:\n loss_species.remove('')\n if '\\n' in loss_species:\n loss_species.remove('\\n')\n item.loss_species_list = loss_species\n print(loss_species)\n if a == 1:\n root_species = line.strip()\n item.root_species = root_species\n print('root: ' + root_species)\n if a == 2:\n print('time:')\n print(line)\n a += 1\n item.species_list_plus_og_loss = []\n for thing in item.species_list_original:\n item.species_list_plus_og_loss.append(thing)\n if loss_species == []:\n pass\n else:\n for ls in loss_species:\n item.species_list_plus_og_loss.append(ls)\n if root_species == '':\n pass\n else:\n item.species_list_plus_og_loss.append(root_species)\n return results_list\n\n\n<mask token>\n\n\ndef Generate_Script_File_OGLOSS(n, indexname, scriptname):\n n = str(n)\n a = \"\"\"#!/bin/bash \n#SBATCH -p sched_mit_g4nier \n#SBATCH -t 2-00:00:00 \n#SBATCH -J Tax\n \n#SBATCH --array=1-\"\"\" + n + \"\"\" \n\n. /etc/profile.d/modules.sh\nmodule add engaging/openmpi/1.8.8\n\nMY_ARRAY_ID=$SLURM_ARRAY_TASK_ID\nTHE_INDEX=\"\"\" + indexname + \"\"\"\nSPECIES_FILE=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $2}' )\nSTRING_NAME=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $3}' )\nACC_FILE=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $4}' )\nRESULT=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $5}' )\n\necho $SPECIES_FILE\necho $STRING_NAME\necho $ACC_FILE\n\nmpirun python Online_Taxon_Parse.py -s $SPECIES_FILE -g $STRING_NAME -b $ACC_FILE -n $RESULT\n\nexit\"\"\"\n with open(scriptname, 'w') as script:\n script.write(a)\n return scriptname\n\n\ndef Gen_Species_File(list_of_clades, projectname):\n list_sp_files = []\n for item in list_of_clades:\n species_list = item.species_list_original\n species_file_name = item.prefix + '_Species_List.txt'\n species_list2 = []\n for sl2 in species_list:\n sl2 = sl2.strip('\"')\n species_list2.append(sl2)\n spc_file = write_spc_list(species_list2, species_file_name)\n item.species_file = species_file_name\n list_sp_files.append(species_file_name)\n item.result = item.prefix + '_OGL_Result.txt'\n return list_sp_files\n",
"step-4": "<mask token>\n\n\ndef Str_To_Taxid(string, names_file):\n found = False\n string = string.replace('_', ' ')\n with open(names_file) as names:\n for line in names:\n if '\\t' + string + '\\t' in line:\n taxid_int = re.sub('(\\\\d*)(\\t\\\\|\\t)(' + string +\n ')(\\t)(.*)', '\\\\1', line)\n found = True\n break\n if found is False:\n print('Error finding string: ' + string + ' in file: ' + names_file)\n taxid_int = 'NA'\n return taxid_int\n\n\n<mask token>\n\n\ndef Get_Taxid_Rank(taxid, nodes_file):\n taxid = taxid.strip()\n ranklist = []\n len_tax = len(taxid)\n len_tax_t = len_tax + 1\n with open(nodes_file) as nodes:\n for line in nodes:\n if line[:len_tax_t] == taxid + '\\t':\n apparent_rank = re.sub('(' + taxid +\n ')(\\t\\\\|\\t)(\\\\d*)(\\t\\\\|\\t)([a-z]*)(.*)', '\\\\5', line)\n apparent_rank = apparent_rank.strip()\n if '\\t' in apparent_rank:\n pass\n else:\n return apparent_rank\n return 'NA'\n\n\ndef One_Rank_Lower(rank):\n print('looking one level lower than' + rank)\n if rank == 'species':\n print('is species!')\n return 'NA'\n ordered_str = (\n 'superkingdom kingdom phylum class order family genus species')\n ordered_list = ordered_str.split()\n if rank in ordered_list:\n pass\n elif rank == 'NA':\n return 'NA'\n else:\n print(rank + ' is weird')\n return 'NA'\n current = ordered_list.index(rank)\n lowindex = current + 1\n one_lower = ordered_list[lowindex]\n return one_lower\n\n\ndef Return_Parent(taxid, nodes_file):\n len_tax = len(taxid.strip())\n len_tax_t = len_tax + 1\n with open(nodes_file) as nodes:\n for line in nodes:\n if line[:len_tax_t] == taxid.strip() + '\\t':\n parent_taxid = re.sub('(' + taxid.strip() +\n ')(\\t\\\\|\\t)(\\\\d*)(\\t\\\\|\\t)([a-z]*)(.*)', '\\\\3', line)\n if '\\t' in parent_taxid:\n pass\n else:\n return parent_taxid\n print('error finding parent taxa')\n return 'NA'\n\n\ndef Ret_A_Valid_Species_Below_LESS_EFFICIENTLY(taxid, nodes_file,\n names_file, acc_list):\n children = []\n list_ch_remove = []\n child_list_a = []\n child_list_atup = Taxid_To_Children(taxid, nodes_file)\n done = False\n saved_top_level = []\n for itema in child_list_atup:\n saved_top_level.append(itema)\n maxi = len(saved_top_level)\n atup = saved_top_level[0]\n saved_top_level.remove(atup)\n child_list_atup = [atup]\n for item in child_list_atup:\n child_list_a.append(item[0])\n i = 1\n while done is False:\n for item in child_list_atup:\n if item[1] == 'species':\n children.append(item[0])\n sis_spec_name = Taxid_To_Name(item[0], names_file)\n if sis_spec_name[0].islower() is False:\n in_blast = Check_Spec_Name_Acceptable_List(sis_spec_name,\n acc_list)\n if in_blast is True:\n return sis_spec_name\n list_ch_remove.append(item)\n for rem in list_ch_remove:\n child_list_atup.remove(rem)\n child_list_a.remove(rem[0])\n if child_list_a == []:\n if i == maxi:\n return 'NA'\n done = True\n else:\n i += 1\n list_ch_remove = []\n atup = saved_top_level[0]\n saved_top_level.remove(atup)\n child_list_atup = [atup]\n for item in child_list_atup:\n child_list_a.append(item[0])\n continue\n list_ch_remove = []\n child_list_b = []\n child_list_c = []\n for parent in child_list_a:\n child_list_btup = Taxid_To_Children(parent, nodes_file)\n for item in child_list_btup:\n child_list_b.append(item[0])\n if child_list_btup == []:\n pass\n else:\n for bitem in child_list_btup:\n child_list_c.append(bitem)\n child_list_atup = child_list_c\n child_list_a = []\n for itup in child_list_atup:\n child_list_a.append(itup[0])\n return 'NA'\n\n\n<mask token>\n\n\ndef Ret_All_Species_Below_Less_Efficiently(taxid, nodes_file):\n children = []\n list_ch_remove = []\n child_list_a = []\n child_list_atup = Taxid_To_Children(taxid, nodes_file)\n for item in child_list_atup:\n child_list_a.append(item[0])\n done = False\n while done is False:\n for item in child_list_atup:\n if item[1] == 'species':\n children.append(item[0])\n list_ch_remove.append(item)\n for rem in list_ch_remove:\n child_list_atup.remove(rem)\n child_list_a.remove(rem[0])\n if child_list_a == []:\n done = True\n list_ch_remove = []\n child_list_b = []\n child_list_c = []\n for parent in child_list_a:\n child_list_btup = Taxid_To_Children(parent, nodes_file)\n for item in child_list_btup:\n child_list_b.append(item[0])\n if child_list_btup == []:\n pass\n else:\n for bitem in child_list_btup:\n child_list_c.append(bitem)\n child_list_atup = child_list_c\n child_list_a = []\n for itup in child_list_atup:\n child_list_a.append(itup[0])\n return children\n\n\ndef Ret_All_Groups_One_Rank_Below(taxid, nodes_file):\n taxid = taxid.strip()\n print('looking for taxid:' + str(taxid))\n rank = Get_Taxid_Rank(taxid, nodes_file)\n print(rank)\n target_rank = One_Rank_Lower(rank)\n if target_rank == 'NA':\n return 'NA'\n removal_ranks = (\n 'superkingdom kingdom phylum class order family genus species')\n garbage, remove_string = removal_ranks.split(target_rank)\n remove_rank_list = remove_string.split()\n children = []\n list_ch_remove = []\n child_list_a = Taxid_To_Children(taxid, nodes_file)\n done = False\n while done is False:\n for item in child_list_a:\n if item[1] == target_rank:\n children.append(item[0])\n list_ch_remove.append(item)\n if item[1] in remove_rank_list:\n list_ch_remove.append(item)\n for rem in list_ch_remove:\n child_list_a.remove(rem)\n if child_list_a == []:\n done = True\n list_ch_remove = []\n child_list_b = []\n child_list_c = []\n for parent in child_list_a:\n child_list_b = Taxid_To_Children(parent[0], nodes_file)\n if child_list_b == []:\n pass\n else:\n for bitem in child_list_b:\n child_list_c.append(bitem)\n child_list_a = child_list_c\n return children\n\n\ndef Ret_Sister_Same_Rank(string, nodes_file, names_file):\n print(string)\n interest_taxid = Str_To_Taxid(string, names_file)\n print(interest_taxid)\n up_taxid = Return_Parent(interest_taxid, nodes_file)\n up_taxid = up_taxid.strip()\n interest_taxid = interest_taxid.strip()\n sis_self_tuples = Taxid_To_Children(up_taxid, nodes_file)\n sister_and_self = []\n for tup in sis_self_tuples:\n sister_and_self.append(tup[0])\n print(sister_and_self)\n print(interest_taxid)\n sister_and_self.remove(interest_taxid)\n sisterlist = sister_and_self\n print(sisterlist)\n return sisterlist\n\n\ndef Taxid_To_Name(taxid, names_file):\n found = False\n taxid = taxid.strip()\n len_tax = len(taxid)\n len_tax_t = len_tax + 1\n with open(names_file) as names:\n for line in names:\n if line[:len_tax_t] == taxid + '\\t':\n name_wanted = re.sub(\n '(\\\\d*)(\\t\\\\|\\t)([^\\t]*)(\\t\\\\|\\t)(.*)(\\t\\\\|\\t)(scientific name)(.*)'\n , '\\\\3', line)\n if '\\t' in name_wanted:\n pass\n else:\n found = True\n break\n if found is False:\n print('Error finding name for: ' + taxid + ' in file: ' + names_file)\n name_wanted = 'NA'\n if found is True:\n name_wanted = name_wanted.strip()\n return name_wanted\n\n\ndef Choose_One_OG_Seq(string, species_list, names_file, acc_list, nodes_file):\n print('one og sequence choser initiating')\n if '_' in string:\n string = string.replace('_', ' ')\n sislist = Ret_Sister_Same_Rank(string, nodes_file, names_file)\n print('Sisterlist')\n print(sislist)\n if sislist == []:\n go = True\n else:\n go = False\n my_taxid = Str_To_Taxid(string, names_file)\n while go is True:\n parent_of_me_taxid = Return_Parent(my_taxid, nodes_file)\n parent_of_me = Taxid_To_Name(parent_of_me_taxid, names_file)\n sislist = Ret_Sister_Same_Rank(parent_of_me, nodes_file, names_file)\n my_taxid = parent_of_me_taxid\n if sislist == []:\n pass\n else:\n go = False\n for item in sislist:\n test = Ret_A_Valid_Species_Below(item, nodes_file, names_file, acc_list\n )\n if test == 'NA':\n pass\n else:\n print(test)\n return test\n while test == 'NA':\n sislist = []\n go = True\n if my_taxid == 1:\n break\n while go is True:\n parent_of_me_taxid = Return_Parent(my_taxid, nodes_file)\n parent_of_me = Taxid_To_Name(parent_of_me_taxid, names_file)\n sislist = Ret_Sister_Same_Rank(parent_of_me, nodes_file, names_file\n )\n my_taxid = parent_of_me_taxid\n if sislist == []:\n pass\n else:\n go = False\n for item in sislist:\n test = Ret_A_Valid_Species_Below(item, nodes_file, names_file,\n acc_list)\n if test != 'NA':\n pass\n else:\n return test\n return test\n\n\ndef Check_If_We_Have_A_Rep_Already(species_list, tid_list, rank):\n print('Checking for reps... target rank is: ' + rank)\n list_of_correct_rank = []\n found = []\n removal_ranks = (\n 'superkingdom kingdom phylum class order family genus species')\n remove_string, garbage = removal_ranks.split(rank)\n remove_rank_list = remove_string.split()\n for species in species_list:\n nid = Str_To_Taxid(species, names_file)\n go = True\n while go is True:\n rp = Return_Parent(nid, nodes_file)\n if rp == 'NA':\n list_of_correct_rank.append(rp)\n go = False\n if rp.strip() == 1:\n rp = 'NA'\n list_of_correct_rank.append(rp)\n go = False\n par_rank = Get_Taxid_Rank(rp, nodes_file)\n if par_rank == rank:\n rp = rp.strip()\n list_of_correct_rank.append(rp)\n go = False\n elif par_rank in remove_rank_list:\n rp = 'NA'\n list_of_correct_rank.append(rp)\n go = False\n else:\n nid = rp\n print(tid_list)\n print(list_of_correct_rank)\n for item in tid_list:\n if item in list_of_correct_rank:\n a = tid_list.index(item)\n found.append(tid_list[a])\n return found\n\n\ndef Choose_Loss_Candidates(string, species_list, names_file, acc_list,\n nodes_file):\n print('loss search initiating')\n if '_' in string:\n print(string)\n string = string.replace('_', ' ')\n print(string)\n taxid = Str_To_Taxid(string, names_file)\n sub_taxids = Ret_All_Groups_One_Rank_Below(taxid, nodes_file)\n if sub_taxids == 'NA':\n print('Error getting loss candidates for string:' + string)\n return []\n subgroup_names = []\n for item in sub_taxids:\n subgroup_names.append(Taxid_To_Name(item, names_file))\n b = Get_Taxid_Rank(taxid, nodes_file)\n a = One_Rank_Lower(b)\n found = Check_If_We_Have_A_Rep_Already(species_list, sub_taxids, a)\n print('Representatives already exist for:')\n found_names = []\n for foundtid in found:\n foundtid = foundtid.strip()\n index1 = sub_taxids.index(foundtid)\n found_names.append(subgroup_names.pop(index1))\n del sub_taxids[index1]\n print(found_names)\n print('Looking for one representative from each of the following:')\n print(subgroup_names)\n loss_list = []\n ite = 0\n for item in sub_taxids:\n test = Ret_A_Valid_Species_Below(item, nodes_file, names_file, acc_list\n )\n print(subgroup_names[ite] + ' : ' + test)\n ite += 1\n loss_list.append(test)\n continue\n print('Loss candidates will be added:')\n na = 0\n for item in loss_list:\n if item == 'NA':\n na += 1\n while 'NA' in loss_list:\n loss_list.remove('NA')\n print(loss_list)\n print('there were ' + str(na) + ' ' + a +\n 's that no suitable loss candidate was found for.')\n return loss_list\n\n\ndef Check_Spec_Name_Acceptable_List(ssp_name, acc_list):\n if ssp_name in acc_list:\n return True\n else:\n result = next((True for item in acc_list if ssp_name in item), False)\n if result is True:\n print('Err in match spec name - gen list: ' + ssp_name + ' ' + item\n )\n return result\n\n\n<mask token>\n\n\ndef gen_acceptable_species_list(list_raw_gene_fastas, acc_name):\n names_list_acc = []\n numbers_list_acc = []\n for raw in list_raw_gene_fastas:\n raw.gen_species_lists()\n raw_sl = raw.species_names\n print(raw_sl[0])\n for rawsp in raw_sl:\n if rawsp in names_list_acc:\n ind = names_list_acc.index(rawsp)\n numbers_list_acc[ind] = numbers_list_acc[ind] + 1\n else:\n names_list_acc.append(rawsp)\n numbers_list_acc.append(1)\n cutoff_num = len(list_raw_gene_fastas) / 2\n print(cutoff_num)\n list_of_rem = []\n index = 0\n for n in numbers_list_acc:\n if n > cutoff_num:\n pass\n else:\n list_of_rem.append(names_list_acc[index])\n index += 1\n print(len(list_of_rem))\n list_of_rem.sort(reverse=True)\n for remove_me in list_of_rem:\n names_list_acc.remove(remove_me)\n a = write_acc_list(names_list_acc, acc_name)\n return a\n\n\ndef write_acc_list(acc_list, acc_name):\n with open(acc_name, 'w') as acc_list_file:\n for item in acc_list:\n acc_list_file.write(item + '\\n')\n return acc_name\n\n\ndef write_spc_list(spc_list, spcname):\n with open(spcname, 'w') as spc_list_file:\n for item in spc_list:\n if '_' in item:\n dash_sep = item.split('_')\n item = dash_sep[0] + '_' + dash_sep[1]\n spc_list_file.write(item + '\\n')\n return spcname\n\n\ndef Run_OG_LOSS_ON_CLUSTER(script_name, all_files, all_result_files):\n os.system(ssh_inst + \" 'mkdir Taxonomy'\")\n sb_script = script_name\n print(all_files)\n for item in all_files:\n os.system('scp ' + item + ' ' + clus_head + 'Taxonomy')\n os.system(ssh_inst + \" 'cd ~/Taxonomy; sbatch \" + sb_script + \"'\")\n direct = os.getcwd()\n exists = False\n movehome = []\n finished = 'start'\n for i in all_result_files:\n movehome.append(i)\n while finished is not True:\n for filename in movehome:\n os.system('scp ' + clus_head + 'Taxonomy/' + filename + ' ' +\n direct)\n for item in all_result_files:\n exists = os.path.isfile(item)\n if exists is True:\n if item in movehome:\n movehome.remove(item)\n finished = 'yes'\n else:\n finished = False\n print('Tax not done yet. could not locate : ' + item +\n 'checking again in 5 minutes')\n break\n if finished == 'yes':\n print('Should be done!')\n finished = True\n else:\n time.sleep(600)\n finished = 'yes'\n print('Taxonomy parsing complete')\n\n\ndef Get_OG_LOSS_DATA(list_of_clades, projectname):\n list_catfiles = []\n list_of_lists_of_raw_blast_files = []\n for item in list_of_clades:\n catfile = item.cat_file\n list_of_raw_blast_files = item.blast_raw\n if catfile in list_catfiles:\n pass\n else:\n list_catfiles.append(catfile)\n list_of_lists_of_raw_blast_files.append(list_of_raw_blast_files)\n cat_acc_dict = {}\n for i in range(len(list_catfiles)):\n item = list_catfiles[i]\n list_raws = list_of_lists_of_raw_blast_files[i]\n gsflist = item.split('.')\n gsf_a = gsflist[0]\n gsf_b = gsf_a.split('/')[-1]\n acc_file = gsf_b + '_Acc_List.txt'\n acc_exists = os.path.isfile(acc_file)\n if acc_exists is True:\n pass\n else:\n print(\n '....initializing all_acceptables from gene_seq_query file: ' +\n gsf_b + '. this should only happen once...')\n acc_file = gen_acceptable_species_list(list_raws, acc_file)\n cat_acc_dict[item] = acc_file\n list_of_species_files = Gen_Species_File(list_of_clades, projectname)\n list_to_tax_clades = []\n for item in list_of_clades:\n exists_result = os.path.isfile(item.result)\n if exists_result is False:\n list_to_tax_clades.append(item)\n corr_file_name, results_list = Generate_Cat_File_OGLOSS(list_to_tax_clades,\n cat_acc_dict, projectname)\n n = len(list_to_tax_clades)\n script_name = projectname + '_OGLScript.sh'\n scriptfile = Generate_Script_File_OGLOSS(n, corr_file_name, script_name)\n all_files = []\n for item in cat_acc_dict.values():\n all_files.append(item)\n for item in list_of_species_files:\n all_files.append(item)\n all_files.append(scriptfile)\n all_files.append(corr_file_name)\n if len(results_list) is 0:\n pass\n else:\n Run_OG_LOSS_ON_CLUSTER(scriptfile, all_files, results_list)\n for item in list_of_clades:\n results_file = item.result\n loss_species = []\n print(item.string_name)\n with open(results_file) as res:\n a = 0\n for line in res:\n if a == 0:\n loss_species = line.strip()\n loss_species = loss_species.split('~')\n print('loss candidates')\n if '' in loss_species:\n loss_species.remove('')\n if '\\n' in loss_species:\n loss_species.remove('\\n')\n item.loss_species_list = loss_species\n print(loss_species)\n if a == 1:\n root_species = line.strip()\n item.root_species = root_species\n print('root: ' + root_species)\n if a == 2:\n print('time:')\n print(line)\n a += 1\n item.species_list_plus_og_loss = []\n for thing in item.species_list_original:\n item.species_list_plus_og_loss.append(thing)\n if loss_species == []:\n pass\n else:\n for ls in loss_species:\n item.species_list_plus_og_loss.append(ls)\n if root_species == '':\n pass\n else:\n item.species_list_plus_og_loss.append(root_species)\n return results_list\n\n\ndef Generate_Cat_File_OGLOSS(list_of_clades, cat_acc_dict, projectname):\n corr_file_name = 'Corr_' + projectname + '.txt'\n results_list = []\n with open(corr_file_name, 'w') as corr:\n for n in range(len(list_of_clades)):\n corr.write(str(n + 1) + ' ' + list_of_clades[n].species_file +\n ' ' + list_of_clades[n].string_name + ' ' + cat_acc_dict[\n list_of_clades[n].cat_file] + ' ' + list_of_clades[n].\n result + '\\n')\n results_list.append(list_of_clades[n].result)\n return corr_file_name, results_list\n\n\ndef Generate_Script_File_OGLOSS(n, indexname, scriptname):\n n = str(n)\n a = \"\"\"#!/bin/bash \n#SBATCH -p sched_mit_g4nier \n#SBATCH -t 2-00:00:00 \n#SBATCH -J Tax\n \n#SBATCH --array=1-\"\"\" + n + \"\"\" \n\n. /etc/profile.d/modules.sh\nmodule add engaging/openmpi/1.8.8\n\nMY_ARRAY_ID=$SLURM_ARRAY_TASK_ID\nTHE_INDEX=\"\"\" + indexname + \"\"\"\nSPECIES_FILE=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $2}' )\nSTRING_NAME=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $3}' )\nACC_FILE=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $4}' )\nRESULT=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $5}' )\n\necho $SPECIES_FILE\necho $STRING_NAME\necho $ACC_FILE\n\nmpirun python Online_Taxon_Parse.py -s $SPECIES_FILE -g $STRING_NAME -b $ACC_FILE -n $RESULT\n\nexit\"\"\"\n with open(scriptname, 'w') as script:\n script.write(a)\n return scriptname\n\n\ndef Gen_Species_File(list_of_clades, projectname):\n list_sp_files = []\n for item in list_of_clades:\n species_list = item.species_list_original\n species_file_name = item.prefix + '_Species_List.txt'\n species_list2 = []\n for sl2 in species_list:\n sl2 = sl2.strip('\"')\n species_list2.append(sl2)\n spc_file = write_spc_list(species_list2, species_file_name)\n item.species_file = species_file_name\n list_sp_files.append(species_file_name)\n item.result = item.prefix + '_OGL_Result.txt'\n return list_sp_files\n",
"step-5": "# #!/usr/bin/python\n\n# last edit abigailc@Actaeon on jan 27 2017\n\n#pulling the taxonomy functions out of makespeciestree because I need to make them faster...\n#insects is running for literally >20 hours.\n\n\nnames_file = \"/Users/abigailc/Documents/Taxonomy_Stuff/taxdump/names.dmp\"\nnodes_file = \"/Users/abigailc/Documents/Taxonomy_Stuff/taxdump/nodes.dmp\"\n\n\n\n######### PERSONAL_SETTINGS #########\nssh_inst = \"ssh -l abigailc -i ~/.ssh/id_rsa eofe4.mit.edu\"\nclus_head = \"abigailc@eofe4.mit.edu:/home/abigailc/\"\nPath_Blast = \"/Users/abigailc/blast/\"\n\nimport os\nimport re\nimport time\nimport sys\n#from oxy_mods.Classes_DTL_Detector import Fasta\n\n#BASIC OPERATIONS\ndef Str_To_Taxid(string, names_file):\n #init done\n #turns a string to its taxon id NCBI\n #this is easier than expected. just open names.dmp and find the first hit. format:\n found = False\n #print(\"strtotaxid\")\n #print(string+\" str to taxid\")\n string = string.replace(\"_\", \" \")\n #print(string)\n with open (names_file) as names:\n for line in names:\n \n if \"\\t\"+string+\"\\t\" in line:\n #print(\"got:\"+line)\n taxid_int = re.sub (\"(\\d*)(\\t\\|\\t)(\"+string+\")(\\t)(.*)\", \"\\\\1\", line)\n found = True\n break\n if found is False:\n print(\"Error finding string: \"+string+\" in file: \"+names_file)\n taxid_int = \"NA\"\n return taxid_int\n\ndef Taxid_To_Children(taxid, nodes_file):\n\n #goes one level deeper. finds all taxids that list the given taxid as \"parent\", returns as a list\n childlist = []\n child_rank_list = []\n with open (nodes_file) as nodes:\n for line in nodes:\n if \"\\t\"+taxid+\"\\t\" in line:\n #print(\"gotcha\")\n #print(line)\n #the thing matches, do the re.sub.\n #includes the tab bc otherwise taxid 12 would match 12, 123, 12345355, etc\n baby_taxid_rank = re.sub(\"(\\d*)(\\t\\|\\t)(\"+taxid+\")(\\t\\|\\t)([a-z]*)(.*)\", \"\\\\1~\\\\5\", line)\n if \"\\t\" in baby_taxid_rank:\n #this happens if the re.sub does not occur - eg if \\ttaxid\\t occured somewhere in the line other than where it should've. \n pass\n else:\n baby_taxid, baby_rank = baby_taxid_rank.split(\"~\")\n #add to list of bbys\n baby_taxid = baby_taxid.strip()\n baby_rank = baby_rank.strip()\n childlist.append(baby_taxid)\n child_rank_list.append((baby_taxid, baby_rank))\n return child_rank_list\n\ndef Get_Taxid_Rank(taxid, nodes_file):\n taxid = taxid.strip()\n ranklist = []\n len_tax = len(taxid)\n len_tax_t = len_tax+1\n #given taxid = 100, len_tax = 3, len_tax_t = 5\n with open (nodes_file) as nodes:\n for line in nodes:\n #print(line[:len_tax_t])\n #print(taxid+\"\\t\")\n if line[:len_tax_t] == taxid+\"\\t\":\n #the thing matches, do the re.sub.\n #includes the tab bc otherwise taxid 12 would match 12, 123, 12345355, etc\n apparent_rank = re.sub(\"(\"+taxid+\")(\\t\\|\\t)(\\d*)(\\t\\|\\t)([a-z]*)(.*)\", \"\\\\5\", line)\n apparent_rank = apparent_rank.strip()\n if \"\\t\" in apparent_rank:\n pass\n else:\n return apparent_rank\n return \"NA\"\n #returns the rank (eg, \"order\" of a taxid\")\n\ndef One_Rank_Lower(rank):\n print(\"looking one level lower than\"+rank)\n if rank == \"species\":\n print(\"is species!\")\n return \"NA\"\n ordered_str = \"superkingdom kingdom phylum class order family genus species\"\n ordered_list = ordered_str.split()\n if rank in ordered_list:\n pass\n elif rank == \"NA\":\n return \"NA\"\n else:\n print(rank+\" is weird\")\n return \"NA\"\n current = ordered_list.index(rank)\n lowindex = current + 1\n one_lower = ordered_list[lowindex]\n return one_lower\n \n #given phylum, returns class. given class, return order. etc.\n\n# rank = \"class\"\n# string = \"cyanobacteria\"\n# taxid = \"12345\"\n\ndef Return_Parent(taxid, nodes_file):\n #eg for a given rank taxid, find it's up-one-level (not rank) taxid, and return it.\n len_tax = len(taxid.strip())\n len_tax_t = len_tax+1\n #given taxid = 100, len_tax = 3, len_tax_t = 5\n #print(\"searching for one level above taxid:\"+str(taxid))\n #print(\"tiud: \"+taxid)\n with open (nodes_file) as nodes:\n for line in nodes:\n #print(taxid.strip()+\"\\t\")\n #print(line[:len_tax_t])\n if line[:len_tax_t] == taxid.strip()+\"\\t\":\n \n # print(\"got: \"+line)\n #the thing matches, do the re.sub.\n #includes the tab bc otherwise taxid 12 would match 12, 123, 12345355, etc\n parent_taxid = re.sub(\"(\"+taxid.strip()+\")(\\t\\|\\t)(\\d*)(\\t\\|\\t)([a-z]*)(.*)\", \"\\\\3\", line)\n #print(parent_taxid)\n if \"\\t\" in parent_taxid:\n pass\n else:\n return parent_taxid\n print(\"error finding parent taxa\")\n return(\"NA\")\n\n\n#COMPLEX OPERATIONS\n\ndef Ret_A_Valid_Species_Below_LESS_EFFICIENTLY(taxid, nodes_file, names_file, acc_list):\n children = []\n list_ch_remove = []\n child_list_a = []\n #this is a list of children : [ [child_taxid, child_rank], [child2_taxid, child2_rank] ] \n child_list_atup = Taxid_To_Children(taxid, nodes_file)\n #this is a list of children TAXIDS ONLY\n\n #print(\"initial pass\")\n #print(child_list_atup)\n #print(child_list_a)\n done = False\n saved_top_level = []\n #we're going to do one at a time, so save all, and load them one-by-one.\n for itema in child_list_atup:\n saved_top_level.append(itema)\n maxi = len(saved_top_level)\n # print(\"maxi: \"+str(maxi))\n atup = saved_top_level[0]\n saved_top_level.remove(atup)\n child_list_atup = [atup]\n for item in child_list_atup:\n child_list_a.append(item[0])\n i = 1\n #also lets implement a saved second level... for further spe.\n while done is False:\n for item in child_list_atup:\n if item[1] == \"species\":\n #add the taxid to the list of species_level_children\n children.append(item[0])\n sis_spec_name = Taxid_To_Name(item[0], names_file)\n if sis_spec_name[0].islower() is False:\n in_blast = Check_Spec_Name_Acceptable_List(sis_spec_name, acc_list)\n if in_blast is True:\n return sis_spec_name\n list_ch_remove.append(item)\n #remove taxids that were saved at the species level\n #print(list_ch_remove)\n for rem in list_ch_remove:\n child_list_atup.remove(rem)\n child_list_a.remove(rem[0])\n #if all tips have terminated at the species level: you are done.\n if child_list_a == []:\n if i == maxi:\n #print(\"found none\")\n return \"NA\"\n done = True\n else:\n i += 1\n #print(i)\n list_ch_remove = []\n \n atup = saved_top_level[0]\n #print(atup)\n saved_top_level.remove(atup)\n child_list_atup = [atup]\n #print(child_list_atup)\n for item in child_list_atup:\n child_list_a.append(item[0])\n continue\n list_ch_remove = []\n child_list_b = []\n child_list_c = []\n for parent in child_list_a:\n child_list_btup = Taxid_To_Children(parent, nodes_file)\n for item in child_list_btup:\n child_list_b.append(item[0])\n if child_list_btup == []:\n pass\n else:\n for bitem in child_list_btup:\n child_list_c.append(bitem)\n child_list_atup = child_list_c\n #print(\"New parent list:\")\n #print(child_list_atup)\n child_list_a = []\n for itup in child_list_atup:\n child_list_a.append(itup[0])\n #print(child_list_a)\n #children is a list of all species-level TAXIDS that belong to the given group. \n return \"NA\"\n\n#WHY ARE THERE TWO OF THESE???????\ndef Ret_A_Valid_Species_Below(taxid, nodes_file, names_file, acc_list):\n masterlist = []\n #this is a list of children : [ [child_taxid, child_rank], [child2_taxid, child2_rank] ] \n complete = False\n masterlist.append([(taxid, \"starter\")])\n while complete is False:\n #print(masterlist)\n if masterlist == []:\n return(\"NA\")\n #now lookat is the last member of the last list in masterlist.\n now_list = masterlist[-1]\n if now_list == []:\n while [] in masterlist: \n masterlist.remove([])\n if masterlist == []:\n return(\"NA\")\n now_list = masterlist[-1]\n #lookat first member of that list.\n now_tup = now_list[0]\n now_taxid, now_rank = now_tup[0], now_tup[1]\n #see if its a species\n if now_rank == \"species\":\n now_list.remove(now_tup)\n now_name = Taxid_To_Name(now_taxid, names_file)\n if now_name[0].islower() is False:\n in_blast = Check_Spec_Name_Acceptable_List(now_name,acc_list)\n if in_blast is True:\n #now_name is a species_name\n return now_name\n #check if now_tup is valid. if so, return.\n else:\n now_list.remove(now_tup)\n #generate a new list - of the descendents of this one.\n newlist = Taxid_To_Children(now_taxid, nodes_file)\n #print(newlist)\n if newlist == \"NA\":\n pass\n else:\n #add it to masterlist.\n masterlist.append(newlist)\n return(\"Uh, what?\")\n\ndef Ret_All_Species_Below_Less_Efficiently(taxid, nodes_file):\n children = []\n list_ch_remove = []\n child_list_a = []\n #this is a list of children : [ [child_taxid, child_rank], [child2_taxid, child2_rank] ] \n child_list_atup = Taxid_To_Children(taxid, nodes_file)\n #this is a list of children TAXIDS ONLY\n for item in child_list_atup:\n child_list_a.append(item[0])\n #print(\"initial pass\")\n #print(child_list_atup)\n #print(child_list_a)\n done = False\n while done is False:\n for item in child_list_atup:\n if item[1] == \"species\":\n #add the taxid to the list of species_level_children\n children.append(item[0])\n list_ch_remove.append(item)\n #remove taxids that were saved at the species level\n for rem in list_ch_remove:\n child_list_atup.remove(rem)\n child_list_a.remove(rem[0])\n #if all tips have terminated at the species level: you are done.\n if child_list_a == []:\n done = True\n list_ch_remove = []\n child_list_b = []\n child_list_c = []\n #for remaining non-species level taxids in lista:\n # -get their children (listb)\n # -add their children to a persistant list(listc)\n # -then set lista(the list to check and remove species-level-entries) to be == listc.\n for parent in child_list_a:\n child_list_btup = Taxid_To_Children(parent, nodes_file)\n for item in child_list_btup:\n child_list_b.append(item[0])\n if child_list_btup == []:\n pass\n else:\n for bitem in child_list_btup:\n child_list_c.append(bitem)\n child_list_atup = child_list_c\n #print(\"New parent list:\")\n #print(child_list_atup)\n child_list_a = []\n for itup in child_list_atup:\n child_list_a.append(itup[0])\n #print(child_list_a)\n #children is a list of all species-level TAXIDS that belong to the given group. \n return children\n\n\ndef Ret_All_Groups_One_Rank_Below(taxid, nodes_file):\n taxid = taxid.strip()\n print(\"looking for taxid:\"+str(taxid))\n rank = Get_Taxid_Rank(taxid, nodes_file)\n print(rank)\n #raise SystemExit\n target_rank = One_Rank_Lower(rank)\n if target_rank == \"NA\":\n return(\"NA\")\n removal_ranks = \"superkingdom kingdom phylum class order family genus species\"\n garbage, remove_string = removal_ranks.split(target_rank)\n remove_rank_list = remove_string.split()\n children = []\n list_ch_remove = []\n #print(remove_rank_list)\n #this is a list of children : [ [child_taxid, child_rank], [child2_taxid, child2_rank] ] \n child_list_a = Taxid_To_Children(taxid, nodes_file)\n done = False\n while done is False:\n for item in child_list_a:\n if item[1] == target_rank:\n #add the taxid to the list of species_level_children\n children.append(item[0])\n list_ch_remove.append(item)\n if item[1] in remove_rank_list:\n list_ch_remove.append(item)\n #remove taxids that were saved at the species level\n for rem in list_ch_remove:\n child_list_a.remove(rem)\n #if all tips have terminated at the target species level: you are done.\n if child_list_a == []:\n done = True\n list_ch_remove = []\n child_list_b = []\n child_list_c = []\n #for remaining non-species level taxids in lista:\n # -get their children (listb)\n # -add their children to a persistant list(listc)\n # -then set lista(the list to check and remove species-level-entries) to be == listc.\n for parent in child_list_a:\n child_list_b = Taxid_To_Children(parent[0], nodes_file)\n if child_list_b == []:\n pass\n else:\n for bitem in child_list_b:\n child_list_c.append(bitem)\n child_list_a = child_list_c\n #print(child_list_a)\n #children is a list of all ONE-RANK-BELOW level TAXIDS that belong to the given group. \n return children\n #runs until all children are found of one rank below. eg (CLASS -> [order1, order 2, order3, order 4)\n #for checking loss candidates, i will want to 1) run this 2) run a species_level_children generation for each member of the output list. 3) chose one member of each of those output lists to go in the species tree. hopefully checking that we have data for the chosen species.\n\n \n\ndef Ret_Sister_Same_Rank(string, nodes_file, names_file):\n #from str rank - get current taxid, go up one level, then get all descendents in a list, remove the current taxid, and return the resulting sister list\n print(string)\n interest_taxid = Str_To_Taxid(string, names_file)\n print(interest_taxid)\n up_taxid = Return_Parent(interest_taxid, nodes_file)\n\n up_taxid = up_taxid.strip()\n interest_taxid = interest_taxid.strip()\n sis_self_tuples = Taxid_To_Children(up_taxid, nodes_file)\n sister_and_self = []\n for tup in sis_self_tuples:\n sister_and_self.append(tup[0])\n #sis_and_self is a list of TAXIDS ONLY\n print(sister_and_self)\n print(interest_taxid)\n sister_and_self.remove(interest_taxid)\n sisterlist = sister_and_self\n print(sisterlist)\n return sisterlist\n#sisterlist will be a list of taxids for the sister clades to the current thing. by level, not by rank.\n#todo = implement something to redo if sisterlist is empty.\n\ndef Taxid_To_Name(taxid, names_file):\n #this needs to be the backwards version of Str to Taxid.\n found = False\n taxid = taxid.strip()\n len_tax = len(taxid)\n len_tax_t = len_tax+1\n with open (names_file) as names:\n for line in names:\n if line[:len_tax_t] == taxid+\"\\t\":\n # print(\"got here\")\n name_wanted = re.sub (\"(\\d*)(\\t\\|\\t)([^\\t]*)(\\t\\|\\t)(.*)(\\t\\|\\t)(scientific name)(.*)\", \"\\\\3\", line)\n if \"\\t\" in name_wanted:\n pass\n else:\n found = True\n break\n if found is False:\n print(\"Error finding name for: \"+taxid+\" in file: \"+names_file)\n name_wanted = \"NA\"\n if found is True:\n #print(name_wanted)\n name_wanted = name_wanted.strip()\n return name_wanted\n \ndef Choose_One_OG_Seq(string, species_list, names_file, acc_list, nodes_file):\n print(\"one og sequence choser initiating\")\n if \"_\" in string:\n string = string.replace(\"_\", \" \")\n sislist = Ret_Sister_Same_Rank(string, nodes_file, names_file)\n print(\"Sisterlist\")\n print(sislist)\n if sislist == []:\n go = True\n else:\n go = False \n my_taxid = Str_To_Taxid(string, names_file)\n while go is True:\n parent_of_me_taxid = Return_Parent(my_taxid, nodes_file)\n parent_of_me = Taxid_To_Name(parent_of_me_taxid, names_file)\n sislist = Ret_Sister_Same_Rank(parent_of_me, nodes_file, names_file)\n my_taxid = parent_of_me_taxid\n if sislist == []:\n pass\n else:\n go = False\n for item in sislist:\n #spec_sis_list = Ret_All_Species_Below(item, nodes_file)\n test = Ret_A_Valid_Species_Below(item, nodes_file, names_file, acc_list)\n if test == \"NA\":\n pass\n else:\n print(test)\n return test\n #if test == \"None\":\n # return \"None\"\n #if nothing in the first level sister list is a valid hit, keep moving up the tree until you get one.\n\n while test == \"NA\":\n sislist = []\n go = True\n if my_taxid == 1:\n break\n while go is True:\n parent_of_me_taxid = Return_Parent(my_taxid, nodes_file)\n parent_of_me = Taxid_To_Name(parent_of_me_taxid, names_file)\n sislist = Ret_Sister_Same_Rank(parent_of_me, nodes_file, names_file)\n my_taxid = parent_of_me_taxid\n if sislist == []:\n pass\n else:\n go = False\n for item in sislist:\n test = Ret_A_Valid_Species_Below(item, nodes_file, names_file, acc_list)\n if test != \"NA\":\n pass\n else:\n return test\n return test\n \n #print (spec_sis_list)\n #for sis_spec_taxid in spec_sis_list:\n # sis_spec_name = Taxid_To_Name(sis_spec_taxid, names_file)\n # in_blast = Check_Spec_Name_Blast_File(sis_spec_name, blast_file)\n # if in_blast is True:\n # print(\"Outgroup sequence chosen:\"+sis_spec_name)\n # return sis_spec_name\n\n \n\n #double break so we only keep ONE sequence.\n #go all the way down the first one until you get a species-level entry.\n #check if the species-level entry is found in your .blast file (if that is where we are implementing this??? )\n #if not, continue... check each species-level thing you find.\n #this would then need to be included in make_species_trees... and only called if the request is sent directly from Parser_blah_master.\ndef Check_If_We_Have_A_Rep_Already(species_list, tid_list, rank):\n print(\"Checking for reps... target rank is: \"+rank)\n list_of_correct_rank = []\n found = []\n removal_ranks = \"superkingdom kingdom phylum class order family genus species\"\n remove_string, garbage = removal_ranks.split(rank)\n remove_rank_list = remove_string.split()\n for species in species_list:\n nid = Str_To_Taxid(species, names_file)\n #go up the ladder\n go = True\n while go is True:\n #get parent taxid\n rp = Return_Parent(nid, nodes_file)\n #if its 1, we're done.\n if rp == \"NA\":\n list_of_correct_rank.append(rp)\n go = False\n if rp.strip() == 1:\n rp = \"NA\"\n list_of_correct_rank.append(rp)\n go = False\n #get rank for that new taxid\n par_rank = Get_Taxid_Rank(rp, nodes_file)\n #if it's what we want it to be, add to list.\n if par_rank == rank:\n rp = rp.strip()\n list_of_correct_rank.append(rp)\n go = False\n #if its a step too high, terminate - we went too far somehow\n elif par_rank in remove_rank_list:\n rp = \"NA\"\n list_of_correct_rank.append(rp)\n go = False\n #else, go up another level and test that one!\n else:\n nid = rp\n print(tid_list)\n print(list_of_correct_rank)\n for item in tid_list:\n if item in list_of_correct_rank:\n a = tid_list.index(item)\n found.append(tid_list[a])\n return found\n\n#@blast_file should actually be a list of raw_blast_FASTA objects\ndef Choose_Loss_Candidates(string, species_list, names_file, acc_list, nodes_file):\n print(\"loss search initiating\")\n if \"_\" in string:\n print(string)\n string = string.replace(\"_\", \" \")\n print(string)\n taxid = Str_To_Taxid(string, names_file)\n #for checking loss candidates, i will want to 1) run this 2) run a species_level_children generation for each member of the output list. 3) chose one member of each of those output lists to go in the species tree. hopefully checking that we have data for the chosen species.\n sub_taxids = Ret_All_Groups_One_Rank_Below(taxid, nodes_file)\n if sub_taxids == \"NA\":\n print(\"Error getting loss candidates for string:\"+string)\n return([])\n subgroup_names = []\n for item in sub_taxids:\n subgroup_names.append(Taxid_To_Name(item, names_file))\n b = Get_Taxid_Rank(taxid, nodes_file)\n a = One_Rank_Lower(b) \n found = Check_If_We_Have_A_Rep_Already(species_list, sub_taxids, a)\n print(\"Representatives already exist for:\")\n found_names = []\n for foundtid in found:\n foundtid = foundtid.strip()\n index1 = sub_taxids.index(foundtid)\n found_names.append(subgroup_names.pop(index1))\n del sub_taxids[index1]\n print(found_names)\n print(\"Looking for one representative from each of the following:\")\n print(subgroup_names)\n loss_list = []\n ite = 0\n # #first check if it is in the output loss list.\n # for item in sub_taxids:\n # with open(saved_loss_candidates) as saved:\n # for line in saved:\n # if item in line:\n # #newthing will be a species name.\n # newthing = re.sub(\"(\"item\")(\\t)(.*)\", \"\\\\3\", line))\n # loss_list.append(newthing)\n # found2.append(item)\n # break\n #remove those found from file from the search list.\n # for item in found2:\n # sub_taxids.pop(item)\n for item in sub_taxids:\n test = Ret_A_Valid_Species_Below(item, nodes_file, names_file, acc_list)\n #print(test)\n print(subgroup_names[ite]+\" : \"+test)\n ite+=1\n loss_list.append(test)\n continue\n print(\"Loss candidates will be added:\")\n na = 0\n for item in loss_list:\n if item == \"NA\":\n na +=1\n while \"NA\" in loss_list: loss_list.remove(\"NA\")\n \n print(loss_list)\n print(\"there were \"+str(na)+\" \"+a+\"s that no suitable loss candidate was found for.\")\n return loss_list\n #either one per next-level-down\n #or one per next-rank-down\n \ndef Check_Spec_Name_Acceptable_List(ssp_name, acc_list):\n if ssp_name in acc_list:\n return True\n else:\n \n result = next((True for item in acc_list if ssp_name in item), False)\n if result is True:\n print(\"Err in match spec name - gen list: \"+ ssp_name +\" \"+ item)\n return result\n\n \n \ndef Check_Spec_Name_Blast_File(ssp_name, blast_fasta_list):\n lf = (len(blast_fasta_list))\n half = lf/2\n yes = 0\n att = 0\n #print(\"Checking :\"+ssp_name)\n ssp_name = ssp_name.replace(\" \", \"_\")\n ssp_name = ssp_name.strip()\n for current_blast in blast_fasta_list:\n att += 1\n if att > 6:\n if yes < att/3:\n return False\n if ssp_name in current_blast.species_names:\n \n yes += 1\n continue\n else:\n \n #print(ssp_name)\n #print(current_blast.species_names[0])\n for spec in current_blast.species_names:\n if ssp_name in spec:\n yes +=1\n break\n continue\n #print(yes)\n #print(half)\n if yes > half:\n #print(\"validated: \"+ssp_name)\n return True\n else:\n return False\n\ndef gen_acceptable_species_list(list_raw_gene_fastas, acc_name):\n #this is printing an empty file. why?\n names_list_acc = []\n numbers_list_acc = []\n \n for raw in list_raw_gene_fastas:\n #do they have species lists?\n raw.gen_species_lists()\n raw_sl = raw.species_names\n print(raw_sl[0])\n for rawsp in raw_sl:\n if rawsp in names_list_acc:\n ind = names_list_acc.index(rawsp)\n numbers_list_acc[ind] = numbers_list_acc[ind]+1\n else:\n names_list_acc.append(rawsp)\n numbers_list_acc.append(1)\n #the numbers list can specify a cut off that is necesary for the thing being acceptable\n #for now let's be consistant and use 1/2 of lsit of raw fastas?\n cutoff_num = (len(list_raw_gene_fastas)/2)\n print(cutoff_num)\n #this will be 15 currently. might be .5 sometimes.\n list_of_rem = []\n index = 0\n for n in numbers_list_acc:\n if n > cutoff_num:\n #means that we dont care if its a decimal or not. 1 will pass .5\n pass\n else:\n list_of_rem.append(names_list_acc[index])\n #add the index to be removed to a list. index into names and numbers should be identicle\n index +=1\n print(len(list_of_rem))\n list_of_rem.sort(reverse=True)\n for remove_me in list_of_rem:\n #uhhhhh i think we need to sort the numbers so removal of the largest number happens first so as to not fuck up list order.\n #sorting now. should be good.\n names_list_acc.remove(remove_me)\n a = write_acc_list(names_list_acc, acc_name) \n return a\n \ndef write_acc_list(acc_list, acc_name):\n with open(acc_name, \"w\") as acc_list_file:\n for item in acc_list:\n acc_list_file.write(item+\"\\n\")\n return acc_name\n\ndef write_spc_list(spc_list, spcname):\n with open(spcname, \"w\") as spc_list_file:\n for item in spc_list:\n #stripiing strain data from this version of the species_list such that it will \n if \"_\" in item:\n dash_sep = item.split(\"_\")\n item = dash_sep[0]+\"_\"+dash_sep[1]\n spc_list_file.write(item+\"\\n\")\n return spcname\n\n#parser stuff\n\n\ndef Run_OG_LOSS_ON_CLUSTER(script_name,all_files, all_result_files):\n #here acc list is the name of the acc_list_current_file\n #auto gen an sbatch script\n os.system(ssh_inst+\" \\'mkdir Taxonomy\\'\")\n sb_script = script_name\n #scp it over\n \n print(all_files)\n for item in all_files:\n os.system(\"scp \"+item+\" \"+clus_head+\"Taxonomy\")\n #run it\n\n #edit the script on the cluster to deal with my mistakes\n\n os.system(ssh_inst+\" 'cd ~/Taxonomy; sbatch \"+sb_script+\"'\")\n #scp it back and verify\n direct = os.getcwd()\n exists = False\n #now it should exist locally\n movehome = []\n finished = \"start\"\n #bring home the d\n for i in all_result_files:\n movehome.append(i)\n while finished is not True:\n for filename in movehome:\n os.system(\"scp \"+clus_head+\"Taxonomy/\"+filename+\" \"+direct)\n for item in all_result_files:\n #see if it got moved home.\n exists = os.path.isfile(item)\n if exists is True:\n if item in movehome:\n movehome.remove(item)\n finished = \"yes\"\n else:\n finished = False\n print(\"Tax not done yet. could not locate : \"+item+\"checking again in 5 minutes\")\n break\n if finished == \"yes\":\n print(\"Should be done!\")\n finished = True\n else:\n #wait ten minutes and then try again.\n time.sleep(600)\n finished = \"yes\"\n#TEMPORARILY REMOVED result file deletion from the cluster to make testing progress faster.\n #for item in all_result_files:\n # os.system(ssh_inst+\" 'cd ~/Taxonomy; rm \"+item+\"'\")\n #for item in all_files:\n # os.system(ssh_inst+\" 'cd ~/Taxonomy; rm \"+item+\"'\")\n print(\"Taxonomy parsing complete\")\n #remove the script and the og loss file from cluster\n\n\n\n\n \ndef Get_OG_LOSS_DATA(list_of_clades, projectname):\n #the acceptable list should be a list of taxa that are present in at least 50% (?) of the blast hit files for the genes given.\n\n #get all gene-query-files to look at\n list_catfiles = []\n list_of_lists_of_raw_blast_files = []\n for item in list_of_clades:\n catfile = item.cat_file\n list_of_raw_blast_files = item.blast_raw\n if catfile in list_catfiles:\n pass\n else:\n list_catfiles.append(catfile)\n list_of_lists_of_raw_blast_files.append(list_of_raw_blast_files)\n cat_acc_dict = {}\n\n #for each, create an acceptable list output name\n for i in range(len(list_catfiles)):\n item = list_catfiles[i]\n list_raws = list_of_lists_of_raw_blast_files[i]\n gsflist = item.split(\".\")\n gsf_a = gsflist[0]\n gsf_b = gsf_a.split(\"/\")[-1]\n acc_file = gsf_b+\"_Acc_List.txt\"\n #print(\"Looking for loss-candidates and a rooting sequence to add....\")\n acc_exists = os.path.isfile(acc_file)\n if acc_exists is True:\n pass\n #if not already done, actually make the output acceptable list.\n else:\n print(\"....initializing all_acceptables from gene_seq_query file: \"+gsf_b+\". this should only happen once...\")\n #generate it\n #should be passing in A LIST OF ALL THE BLAST_FILES ASSOCIATED WITH THE GENE. eg the things in Raw_Blasts that were consulted.\n #are these stored in each subtree? should pass a list of fasta objects.\n #ist_raw_objects = []\n #rint(list_raws)\n #or raw in list_raws:\n # print(raw.name)\n\n acc_file = gen_acceptable_species_list(list_raws, acc_file)\n #this is returning \"NONE\" which is super not okay.\n cat_acc_dict[item] = acc_file\n \n list_of_species_files = Gen_Species_File(list_of_clades, projectname)\n\n #check if we already ran the taxonomy and have data downloaded. (this is mostly for while fixing errors; i keep getting stuck at this point & ity is a waste of time to re-run the taxonomy parser.\n list_to_tax_clades = []\n for item in list_of_clades:\n exists_result = os.path.isfile(item.result)\n if exists_result is False:\n list_to_tax_clades.append(item)\n #sets species_file and result to each subtree.\n corr_file_name, results_list = Generate_Cat_File_OGLOSS(list_to_tax_clades, cat_acc_dict, projectname)\n #makes the correlation file.\n #for each clade, generate a species_list, result name, acc_file_name, string_name and print them all to a corr.file\n n = len(list_to_tax_clades)\n #gen the script\n script_name = projectname+\"_OGLScript.sh\"\n scriptfile = Generate_Script_File_OGLOSS(n, corr_file_name, script_name)\n all_files = []\n for item in cat_acc_dict.values():\n all_files.append(item)\n for item in list_of_species_files:\n all_files.append(item)\n all_files.append(scriptfile)\n all_files.append(corr_file_name)\n if len(results_list) is 0:\n pass\n else:\n Run_OG_LOSS_ON_CLUSTER(scriptfile,all_files, results_list)\n \n #run the script\n\n #add loss_species, root_species to each subtree as a value and also add them to the species_list going forward.\n for item in list_of_clades:\n results_file = item.result\n loss_species = []\n print(item.string_name)\n #open the file and get loss and species results.\n with open(results_file) as res:\n # print(\"opened\")\n a=0\n for line in res:\n #get loss results\n if a == 0:\n loss_species = line.strip()\n loss_species = loss_species.split(\"~\")\n print(\"loss candidates\")\n if \"\" in loss_species:\n loss_species.remove (\"\")\n if \"\\n\" in loss_species:\n loss_species.remove(\"\\n\")\n item.loss_species_list = loss_species \n print(loss_species)\n #get root results\n if a == 1:\n root_species = line.strip()\n item.root_species = root_species\n print(\"root: \"+root_species)\n #get how long it took\n if a == 2:\n print(\"time:\")\n print(line)\n a += 1\n #if no loss, do nothing\n\n item.species_list_plus_og_loss = []\n for thing in item.species_list_original:\n item.species_list_plus_og_loss.append(thing)\n if loss_species == []:\n pass\n #else, add them to the species list, and also track them(?)\n else:\n for ls in loss_species:\n item.species_list_plus_og_loss.append(ls)\n \n \n if root_species == \"\":\n pass\n \n else:\n item.species_list_plus_og_loss.append(root_species)\n return results_list\n# os.system(\"rm \"+results_file)\n\n #done\n\ndef Generate_Cat_File_OGLOSS(list_of_clades, cat_acc_dict, projectname):\n \n corr_file_name = \"Corr_\"+projectname+\".txt\"\n results_list = []\n with open(corr_file_name, \"w\") as corr:\n for n in range(len(list_of_clades)):\n corr.write(str(n+1)+\" \"+list_of_clades[n].species_file+\" \"+list_of_clades[n].string_name+\" \"+cat_acc_dict[list_of_clades[n].cat_file]+\" \"+list_of_clades[n].result+\"\\n\")\n results_list.append(list_of_clades[n].result)\n return corr_file_name, results_list\n\ndef Generate_Script_File_OGLOSS(n, indexname, scriptname):\n n = str(n)\n a = \"\"\"#!/bin/bash \n#SBATCH -p sched_mit_g4nier \n#SBATCH -t 2-00:00:00 \n#SBATCH -J Tax\n \n#SBATCH --array=1-\"\"\"+n+\"\"\" \n\n. /etc/profile.d/modules.sh\nmodule add engaging/openmpi/1.8.8\n\nMY_ARRAY_ID=$SLURM_ARRAY_TASK_ID\nTHE_INDEX=\"\"\"+indexname+\"\"\"\nSPECIES_FILE=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $2}' )\nSTRING_NAME=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $3}' )\nACC_FILE=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $4}' )\nRESULT=$( cat $THE_INDEX | grep \"^$MY_ARRAY_ID \" | awk '{print $5}' )\n\necho $SPECIES_FILE\necho $STRING_NAME\necho $ACC_FILE\n\nmpirun python Online_Taxon_Parse.py -s $SPECIES_FILE -g $STRING_NAME -b $ACC_FILE -n $RESULT\n\nexit\"\"\"\n with open(scriptname, \"w\") as script:\n script.write(a)\n return scriptname\n\ndef Gen_Species_File(list_of_clades, projectname):\n list_sp_files = []\n for item in list_of_clades:\n species_list = item.species_list_original\n species_file_name = item.prefix+\"_Species_List.txt\"\n species_list2 = []\n for sl2 in species_list:\n sl2 = sl2.strip(\"\\\"\")\n species_list2.append(sl2)\n spc_file = write_spc_list(species_list2, species_file_name)\n item.species_file = species_file_name\n list_sp_files.append(species_file_name)\n item.result = item.prefix+\"_OGL_Result.txt\"\n return list_sp_files\n\n",
"step-ids": [
2,
15,
18,
21,
27
]
}
|
[
2,
15,
18,
21,
27
] |
<|reserved_special_token_0|>
@app.route('/unlock')
def web_unlock():
if not (request.args.get('token') and request.args.get('state')):
return 'Error'
else:
with shelve.open('Settings.conf') as settings:
if 'token' in settings:
token = settings['token']
else:
return 'System not setup !'
if request.args.get('token') != token:
return 'Invalid Token'
if request.args.get('state') == 'open':
lock.unlock()
elif request.args.get('state') == 'close':
lock.lock()
elif request.args.get('state') == 'switch':
lock.switch()
else:
return 'Invalid State'
return 'Done'
@app.route('/state')
def web_state():
return str(lock.state())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/unlock')
def web_unlock():
if not (request.args.get('token') and request.args.get('state')):
return 'Error'
else:
with shelve.open('Settings.conf') as settings:
if 'token' in settings:
token = settings['token']
else:
return 'System not setup !'
if request.args.get('token') != token:
return 'Invalid Token'
if request.args.get('state') == 'open':
lock.unlock()
elif request.args.get('state') == 'close':
lock.lock()
elif request.args.get('state') == 'switch':
lock.switch()
else:
return 'Invalid State'
return 'Done'
@app.route('/state')
def web_state():
return str(lock.state())
if __name__ == '__main__':
app.run(debug=True, port=5000, host='0.0.0.0')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
@app.route('/unlock')
def web_unlock():
if not (request.args.get('token') and request.args.get('state')):
return 'Error'
else:
with shelve.open('Settings.conf') as settings:
if 'token' in settings:
token = settings['token']
else:
return 'System not setup !'
if request.args.get('token') != token:
return 'Invalid Token'
if request.args.get('state') == 'open':
lock.unlock()
elif request.args.get('state') == 'close':
lock.lock()
elif request.args.get('state') == 'switch':
lock.switch()
else:
return 'Invalid State'
return 'Done'
@app.route('/state')
def web_state():
return str(lock.state())
if __name__ == '__main__':
app.run(debug=True, port=5000, host='0.0.0.0')
<|reserved_special_token_1|>
from flask import request, Flask
import lock, shelve
app = Flask(__name__)
@app.route('/unlock')
def web_unlock():
if not (request.args.get('token') and request.args.get('state')):
return 'Error'
else:
with shelve.open('Settings.conf') as settings:
if 'token' in settings:
token = settings['token']
else:
return 'System not setup !'
if request.args.get('token') != token:
return 'Invalid Token'
if request.args.get('state') == 'open':
lock.unlock()
elif request.args.get('state') == 'close':
lock.lock()
elif request.args.get('state') == 'switch':
lock.switch()
else:
return 'Invalid State'
return 'Done'
@app.route('/state')
def web_state():
return str(lock.state())
if __name__ == '__main__':
app.run(debug=True, port=5000, host='0.0.0.0')
<|reserved_special_token_1|>
from flask import request, Flask
import lock, shelve
app = Flask(__name__)
@app.route("/unlock")
def web_unlock():
if not (request.args.get("token") and request.args.get("state")):
return "Error"
else:
with shelve.open("Settings.conf") as settings:
if "token" in settings:
token = settings["token"]
else:
return "System not setup !"
if request.args.get("token") != token:
return "Invalid Token"
if request.args.get("state") == "open":
lock.unlock()
elif request.args.get("state") == "close":
lock.lock()
elif request.args.get("state") == "switch":
lock.switch()
else:
return "Invalid State"
return "Done"
@app.route("/state")
def web_state():
return str(lock.state())
if __name__ == "__main__":
app.run(debug=True, port=5000, host="0.0.0.0")
|
flexible
|
{
"blob_id": "ee0f90b84df73ae5783ca0b8a52fe6fe9c979f15",
"index": 2576,
"step-1": "<mask token>\n\n\n@app.route('/unlock')\ndef web_unlock():\n if not (request.args.get('token') and request.args.get('state')):\n return 'Error'\n else:\n with shelve.open('Settings.conf') as settings:\n if 'token' in settings:\n token = settings['token']\n else:\n return 'System not setup !'\n if request.args.get('token') != token:\n return 'Invalid Token'\n if request.args.get('state') == 'open':\n lock.unlock()\n elif request.args.get('state') == 'close':\n lock.lock()\n elif request.args.get('state') == 'switch':\n lock.switch()\n else:\n return 'Invalid State'\n return 'Done'\n\n\n@app.route('/state')\ndef web_state():\n return str(lock.state())\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/unlock')\ndef web_unlock():\n if not (request.args.get('token') and request.args.get('state')):\n return 'Error'\n else:\n with shelve.open('Settings.conf') as settings:\n if 'token' in settings:\n token = settings['token']\n else:\n return 'System not setup !'\n if request.args.get('token') != token:\n return 'Invalid Token'\n if request.args.get('state') == 'open':\n lock.unlock()\n elif request.args.get('state') == 'close':\n lock.lock()\n elif request.args.get('state') == 'switch':\n lock.switch()\n else:\n return 'Invalid State'\n return 'Done'\n\n\n@app.route('/state')\ndef web_state():\n return str(lock.state())\n\n\nif __name__ == '__main__':\n app.run(debug=True, port=5000, host='0.0.0.0')\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\n@app.route('/unlock')\ndef web_unlock():\n if not (request.args.get('token') and request.args.get('state')):\n return 'Error'\n else:\n with shelve.open('Settings.conf') as settings:\n if 'token' in settings:\n token = settings['token']\n else:\n return 'System not setup !'\n if request.args.get('token') != token:\n return 'Invalid Token'\n if request.args.get('state') == 'open':\n lock.unlock()\n elif request.args.get('state') == 'close':\n lock.lock()\n elif request.args.get('state') == 'switch':\n lock.switch()\n else:\n return 'Invalid State'\n return 'Done'\n\n\n@app.route('/state')\ndef web_state():\n return str(lock.state())\n\n\nif __name__ == '__main__':\n app.run(debug=True, port=5000, host='0.0.0.0')\n",
"step-4": "from flask import request, Flask\nimport lock, shelve\napp = Flask(__name__)\n\n\n@app.route('/unlock')\ndef web_unlock():\n if not (request.args.get('token') and request.args.get('state')):\n return 'Error'\n else:\n with shelve.open('Settings.conf') as settings:\n if 'token' in settings:\n token = settings['token']\n else:\n return 'System not setup !'\n if request.args.get('token') != token:\n return 'Invalid Token'\n if request.args.get('state') == 'open':\n lock.unlock()\n elif request.args.get('state') == 'close':\n lock.lock()\n elif request.args.get('state') == 'switch':\n lock.switch()\n else:\n return 'Invalid State'\n return 'Done'\n\n\n@app.route('/state')\ndef web_state():\n return str(lock.state())\n\n\nif __name__ == '__main__':\n app.run(debug=True, port=5000, host='0.0.0.0')\n",
"step-5": "from flask import request, Flask\nimport lock, shelve\n\napp = Flask(__name__)\n@app.route(\"/unlock\")\ndef web_unlock():\n if not (request.args.get(\"token\") and request.args.get(\"state\")):\n return \"Error\"\n else:\n with shelve.open(\"Settings.conf\") as settings:\n if \"token\" in settings:\n token = settings[\"token\"]\n else:\n return \"System not setup !\"\n if request.args.get(\"token\") != token:\n return \"Invalid Token\"\n if request.args.get(\"state\") == \"open\":\n lock.unlock()\n elif request.args.get(\"state\") == \"close\":\n lock.lock()\n elif request.args.get(\"state\") == \"switch\":\n lock.switch()\n else:\n return \"Invalid State\"\n return \"Done\"\n\n@app.route(\"/state\")\ndef web_state():\n return str(lock.state())\n\nif __name__ == \"__main__\":\n app.run(debug=True, port=5000, host=\"0.0.0.0\")\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#! /usr/bin/python
# -*- coding: utf8 -*-
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import unittest
from pyama.filereader import FileReader,Segment
class TestFileReader(unittest.TestCase):
def test_reads_file(self):
reader = FileReader(
"sample_reader_test.txt",
regexes=[(r'name="(\w+)"', 'END SEGMENT'),
(r'\s*\*\s*START\s*(\w+)', 'END SEGMENT'),
(r"PYTHON\s+SEGMENT\s+(\w[\w\d_]*)", None)]
)
file = reader.read()
self.assertEqual(7, len(file.segments))
self.assertEqual('0', file.segments[0].name)
self.assertEqual(2, len(file.segments[0].text))
self.assertEqual('segmentOne', file.segments[1].name)
self.assertEqual(3, len(file.segments[1].text))
self.assertEqual('1', file.segments[2].name)
self.assertEqual(1, len(file.segments[2].text))
self.assertEqual('anotherSegment', file.segments[3].name)
self.assertEqual(6, len(file.segments[3].text))
self.assertEqual('2', file.segments[4].name)
self.assertEqual(2, len(file.segments[4].text))
self.assertEqual('python_segment', file.segments[5].name)
self.assertEqual(4, len(file.segments[5].text))
self.assertEqual('python_segment', file.segments[6].name)
self.assertEqual(3, len(file.segments[6].text))
def test_analyses_parameters(self):
segment = Segment("name","file name")
line = """ SNIPPET START A=B B=13 K='ha mi' ZIG="ZA G" WITH hami -> "mami" """
FileReader("whatnot",["onces"]).analyze_parameters(line,segment)
self.assertEqual(segment.parameters["A"],"B")
self.assertEqual(segment.parameters["B"],"13")
self.assertEqual(segment.parameters["K"],"ha mi")
self.assertEqual(segment.parameters["ZIG"],"ZA G")
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "92dc0bd3cfcddd98f99d8152d0221f047beb4fb0",
"index": 9492,
"step-1": "<mask token>\n\n\nclass TestFileReader(unittest.TestCase):\n\n def test_reads_file(self):\n reader = FileReader('sample_reader_test.txt', regexes=[(\n 'name=\"(\\\\w+)\"', 'END SEGMENT'), ('\\\\s*\\\\*\\\\s*START\\\\s*(\\\\w+)',\n 'END SEGMENT'), ('PYTHON\\\\s+SEGMENT\\\\s+(\\\\w[\\\\w\\\\d_]*)', None)])\n file = reader.read()\n self.assertEqual(7, len(file.segments))\n self.assertEqual('0', file.segments[0].name)\n self.assertEqual(2, len(file.segments[0].text))\n self.assertEqual('segmentOne', file.segments[1].name)\n self.assertEqual(3, len(file.segments[1].text))\n self.assertEqual('1', file.segments[2].name)\n self.assertEqual(1, len(file.segments[2].text))\n self.assertEqual('anotherSegment', file.segments[3].name)\n self.assertEqual(6, len(file.segments[3].text))\n self.assertEqual('2', file.segments[4].name)\n self.assertEqual(2, len(file.segments[4].text))\n self.assertEqual('python_segment', file.segments[5].name)\n self.assertEqual(4, len(file.segments[5].text))\n self.assertEqual('python_segment', file.segments[6].name)\n self.assertEqual(3, len(file.segments[6].text))\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestFileReader(unittest.TestCase):\n\n def test_reads_file(self):\n reader = FileReader('sample_reader_test.txt', regexes=[(\n 'name=\"(\\\\w+)\"', 'END SEGMENT'), ('\\\\s*\\\\*\\\\s*START\\\\s*(\\\\w+)',\n 'END SEGMENT'), ('PYTHON\\\\s+SEGMENT\\\\s+(\\\\w[\\\\w\\\\d_]*)', None)])\n file = reader.read()\n self.assertEqual(7, len(file.segments))\n self.assertEqual('0', file.segments[0].name)\n self.assertEqual(2, len(file.segments[0].text))\n self.assertEqual('segmentOne', file.segments[1].name)\n self.assertEqual(3, len(file.segments[1].text))\n self.assertEqual('1', file.segments[2].name)\n self.assertEqual(1, len(file.segments[2].text))\n self.assertEqual('anotherSegment', file.segments[3].name)\n self.assertEqual(6, len(file.segments[3].text))\n self.assertEqual('2', file.segments[4].name)\n self.assertEqual(2, len(file.segments[4].text))\n self.assertEqual('python_segment', file.segments[5].name)\n self.assertEqual(4, len(file.segments[5].text))\n self.assertEqual('python_segment', file.segments[6].name)\n self.assertEqual(3, len(file.segments[6].text))\n\n def test_analyses_parameters(self):\n segment = Segment('name', 'file name')\n line = (\n ' SNIPPET START A=B B=13 K=\\'ha mi\\' ZIG=\"ZA G\" WITH hami -> \"mami\" '\n )\n FileReader('whatnot', ['onces']).analyze_parameters(line, segment)\n self.assertEqual(segment.parameters['A'], 'B')\n self.assertEqual(segment.parameters['B'], '13')\n self.assertEqual(segment.parameters['K'], 'ha mi')\n self.assertEqual(segment.parameters['ZIG'], 'ZA G')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestFileReader(unittest.TestCase):\n\n def test_reads_file(self):\n reader = FileReader('sample_reader_test.txt', regexes=[(\n 'name=\"(\\\\w+)\"', 'END SEGMENT'), ('\\\\s*\\\\*\\\\s*START\\\\s*(\\\\w+)',\n 'END SEGMENT'), ('PYTHON\\\\s+SEGMENT\\\\s+(\\\\w[\\\\w\\\\d_]*)', None)])\n file = reader.read()\n self.assertEqual(7, len(file.segments))\n self.assertEqual('0', file.segments[0].name)\n self.assertEqual(2, len(file.segments[0].text))\n self.assertEqual('segmentOne', file.segments[1].name)\n self.assertEqual(3, len(file.segments[1].text))\n self.assertEqual('1', file.segments[2].name)\n self.assertEqual(1, len(file.segments[2].text))\n self.assertEqual('anotherSegment', file.segments[3].name)\n self.assertEqual(6, len(file.segments[3].text))\n self.assertEqual('2', file.segments[4].name)\n self.assertEqual(2, len(file.segments[4].text))\n self.assertEqual('python_segment', file.segments[5].name)\n self.assertEqual(4, len(file.segments[5].text))\n self.assertEqual('python_segment', file.segments[6].name)\n self.assertEqual(3, len(file.segments[6].text))\n\n def test_analyses_parameters(self):\n segment = Segment('name', 'file name')\n line = (\n ' SNIPPET START A=B B=13 K=\\'ha mi\\' ZIG=\"ZA G\" WITH hami -> \"mami\" '\n )\n FileReader('whatnot', ['onces']).analyze_parameters(line, segment)\n self.assertEqual(segment.parameters['A'], 'B')\n self.assertEqual(segment.parameters['B'], '13')\n self.assertEqual(segment.parameters['K'], 'ha mi')\n self.assertEqual(segment.parameters['ZIG'], 'ZA G')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nfrom pyama.filereader import FileReader, Segment\n\n\nclass TestFileReader(unittest.TestCase):\n\n def test_reads_file(self):\n reader = FileReader('sample_reader_test.txt', regexes=[(\n 'name=\"(\\\\w+)\"', 'END SEGMENT'), ('\\\\s*\\\\*\\\\s*START\\\\s*(\\\\w+)',\n 'END SEGMENT'), ('PYTHON\\\\s+SEGMENT\\\\s+(\\\\w[\\\\w\\\\d_]*)', None)])\n file = reader.read()\n self.assertEqual(7, len(file.segments))\n self.assertEqual('0', file.segments[0].name)\n self.assertEqual(2, len(file.segments[0].text))\n self.assertEqual('segmentOne', file.segments[1].name)\n self.assertEqual(3, len(file.segments[1].text))\n self.assertEqual('1', file.segments[2].name)\n self.assertEqual(1, len(file.segments[2].text))\n self.assertEqual('anotherSegment', file.segments[3].name)\n self.assertEqual(6, len(file.segments[3].text))\n self.assertEqual('2', file.segments[4].name)\n self.assertEqual(2, len(file.segments[4].text))\n self.assertEqual('python_segment', file.segments[5].name)\n self.assertEqual(4, len(file.segments[5].text))\n self.assertEqual('python_segment', file.segments[6].name)\n self.assertEqual(3, len(file.segments[6].text))\n\n def test_analyses_parameters(self):\n segment = Segment('name', 'file name')\n line = (\n ' SNIPPET START A=B B=13 K=\\'ha mi\\' ZIG=\"ZA G\" WITH hami -> \"mami\" '\n )\n FileReader('whatnot', ['onces']).analyze_parameters(line, segment)\n self.assertEqual(segment.parameters['A'], 'B')\n self.assertEqual(segment.parameters['B'], '13')\n self.assertEqual(segment.parameters['K'], 'ha mi')\n self.assertEqual(segment.parameters['ZIG'], 'ZA G')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "#! /usr/bin/python\n# -*- coding: utf8 -*-\n# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4\nimport unittest\n\nfrom pyama.filereader import FileReader,Segment\n\n\nclass TestFileReader(unittest.TestCase):\n def test_reads_file(self):\n reader = FileReader(\n \"sample_reader_test.txt\",\n regexes=[(r'name=\"(\\w+)\"', 'END SEGMENT'),\n (r'\\s*\\*\\s*START\\s*(\\w+)', 'END SEGMENT'),\n (r\"PYTHON\\s+SEGMENT\\s+(\\w[\\w\\d_]*)\", None)]\n )\n file = reader.read()\n self.assertEqual(7, len(file.segments))\n self.assertEqual('0', file.segments[0].name)\n self.assertEqual(2, len(file.segments[0].text))\n self.assertEqual('segmentOne', file.segments[1].name)\n self.assertEqual(3, len(file.segments[1].text))\n self.assertEqual('1', file.segments[2].name)\n self.assertEqual(1, len(file.segments[2].text))\n self.assertEqual('anotherSegment', file.segments[3].name)\n self.assertEqual(6, len(file.segments[3].text))\n self.assertEqual('2', file.segments[4].name)\n self.assertEqual(2, len(file.segments[4].text))\n self.assertEqual('python_segment', file.segments[5].name)\n self.assertEqual(4, len(file.segments[5].text))\n self.assertEqual('python_segment', file.segments[6].name)\n self.assertEqual(3, len(file.segments[6].text))\n\n def test_analyses_parameters(self):\n segment = Segment(\"name\",\"file name\")\n line = \"\"\" SNIPPET START A=B B=13 K='ha mi' ZIG=\"ZA G\" WITH hami -> \"mami\" \"\"\"\n FileReader(\"whatnot\",[\"onces\"]).analyze_parameters(line,segment)\n self.assertEqual(segment.parameters[\"A\"],\"B\")\n self.assertEqual(segment.parameters[\"B\"],\"13\")\n self.assertEqual(segment.parameters[\"K\"],\"ha mi\")\n self.assertEqual(segment.parameters[\"ZIG\"],\"ZA G\")\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from mf_app import db
from mf_app.models import User
db.create_all()
#test input data
admin = User('admin', 'admin@admin.com', 'admin')
guest = User('guest', 'guest@guest.com', 'guest')
db.session.add(admin)
db.session.add(guest)
db.session.commit()
users = User.query.all()
print(users)
|
normal
|
{
"blob_id": "99c2bd56deccc327faf659e91fc1fd0f6ff7a219",
"index": 3932,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndb.create_all()\n<mask token>\ndb.session.add(admin)\ndb.session.add(guest)\ndb.session.commit()\n<mask token>\nprint(users)\n",
"step-3": "<mask token>\ndb.create_all()\nadmin = User('admin', 'admin@admin.com', 'admin')\nguest = User('guest', 'guest@guest.com', 'guest')\ndb.session.add(admin)\ndb.session.add(guest)\ndb.session.commit()\nusers = User.query.all()\nprint(users)\n",
"step-4": "from mf_app import db\nfrom mf_app.models import User\ndb.create_all()\nadmin = User('admin', 'admin@admin.com', 'admin')\nguest = User('guest', 'guest@guest.com', 'guest')\ndb.session.add(admin)\ndb.session.add(guest)\ndb.session.commit()\nusers = User.query.all()\nprint(users)\n",
"step-5": "from mf_app import db\nfrom mf_app.models import User\n\ndb.create_all()\n\n#test input data\nadmin = User('admin', 'admin@admin.com', 'admin')\nguest = User('guest', 'guest@guest.com', 'guest')\n\ndb.session.add(admin)\ndb.session.add(guest)\n\ndb.session.commit()\n\nusers = User.query.all()\nprint(users)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for _ in range(int(input())):
n = int(input())
xco, yco = [], []
flagx, flagy, xans, yans = 1, 1, 0, 0
for x in range(4 * n - 1):
x, y = input().split()
xco.append(int(x))
yco.append(int(y))
xco.sort(), yco.sort()
xco.append(xco[-1] + 1), yco.append(yco[-1] + 1)
countx, county, i = 1, 1, 0
while i < len(xco) - 1:
if flagx == 1:
if xco[i] == xco[i + 1]:
countx += 1
else:
if countx % 2 != 0:
xans = xco[i]
flagx = 0
countx = 1
if flagy == 1:
if yco[i] == yco[i + 1]:
county += 1
else:
if county % 2 != 0:
yans = yco[i]
flagy = 0
county = 1
if flagx == 0 and flagy == 0:
break
i = i + 1
print(xans, yans, end=' ')
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 3 18:27:30 2020
@author: PREET MODH
"""
for _ in range(int(input())):
n=int(input())
xco,yco=[],[]
flagx,flagy,xans,yans=1,1,0,0
for x in range(4*n-1):
x,y=input().split()
xco.append(int(x))
yco.append(int(y))
xco.sort(),yco.sort()
xco.append(xco[-1]+1),yco.append(yco[-1]+1)
countx,county,i=1,1,0
while(i<len(xco)-1):
if flagx==1:
if xco[i]==xco[i+1]:
countx+=1
else:
if countx%2!=0:
xans=xco[i]
flagx=0
countx=1
if flagy==1:
if yco[i]==yco[i+1]:
county+=1
else:
if county%2!=0:
yans=yco[i]
flagy=0
county=1
if flagx==0 and flagy==0:
break
i=i+1
print(xans,yans,end=' ')
|
flexible
|
{
"blob_id": "d3b0a1d8b9f800c5d34732f4701ea2183405e5b4",
"index": 9523,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor _ in range(int(input())):\n n = int(input())\n xco, yco = [], []\n flagx, flagy, xans, yans = 1, 1, 0, 0\n for x in range(4 * n - 1):\n x, y = input().split()\n xco.append(int(x))\n yco.append(int(y))\n xco.sort(), yco.sort()\n xco.append(xco[-1] + 1), yco.append(yco[-1] + 1)\n countx, county, i = 1, 1, 0\n while i < len(xco) - 1:\n if flagx == 1:\n if xco[i] == xco[i + 1]:\n countx += 1\n else:\n if countx % 2 != 0:\n xans = xco[i]\n flagx = 0\n countx = 1\n if flagy == 1:\n if yco[i] == yco[i + 1]:\n county += 1\n else:\n if county % 2 != 0:\n yans = yco[i]\n flagy = 0\n county = 1\n if flagx == 0 and flagy == 0:\n break\n i = i + 1\n print(xans, yans, end=' ')\n",
"step-3": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jul 3 18:27:30 2020\r\n\r\n@author: PREET MODH\r\n\"\"\"\r\n\r\n\r\nfor _ in range(int(input())):\r\n n=int(input())\r\n xco,yco=[],[]\r\n flagx,flagy,xans,yans=1,1,0,0\r\n for x in range(4*n-1):\r\n x,y=input().split()\r\n xco.append(int(x))\r\n yco.append(int(y))\r\n \r\n xco.sort(),yco.sort()\r\n xco.append(xco[-1]+1),yco.append(yco[-1]+1)\r\n \r\n countx,county,i=1,1,0\r\n while(i<len(xco)-1):\r\n if flagx==1:\r\n if xco[i]==xco[i+1]:\r\n countx+=1\r\n else:\r\n if countx%2!=0:\r\n xans=xco[i]\r\n flagx=0\r\n countx=1\r\n if flagy==1:\r\n if yco[i]==yco[i+1]:\r\n county+=1\r\n else:\r\n if county%2!=0:\r\n yans=yco[i]\r\n flagy=0\r\n county=1\r\n if flagx==0 and flagy==0:\r\n break\r\n i=i+1\r\n print(xans,yans,end=' ')\r\n ",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# generated from catkin/cmake/template/order_packages.context.py.in
source_root_dir = "/home/songsong/image_transport_ws/src"
whitelisted_packages = "".split(';') if "" != "" else []
blacklisted_packages = "".split(';') if "" != "" else []
underlay_workspaces = "/home/songsong/image_transport_ws/devel;/home/songsong/pibot_ros/ros_ws/devel;/home/songsong/catkin_ws/devel;/opt/ros/kinetic".split(';') if "/home/songsong/image_transport_ws/devel;/home/songsong/pibot_ros/ros_ws/devel;/home/songsong/catkin_ws/devel;/opt/ros/kinetic" != "" else []
|
normal
|
{
"blob_id": "86ca94820c05b3f63f4a733b6d1fa7eb9dea6a5d",
"index": 325,
"step-1": "<mask token>\n",
"step-2": "source_root_dir = '/home/songsong/image_transport_ws/src'\nwhitelisted_packages = ''.split(';') if '' != '' else []\nblacklisted_packages = ''.split(';') if '' != '' else []\nunderlay_workspaces = (\n '/home/songsong/image_transport_ws/devel;/home/songsong/pibot_ros/ros_ws/devel;/home/songsong/catkin_ws/devel;/opt/ros/kinetic'\n .split(';') if \n '/home/songsong/image_transport_ws/devel;/home/songsong/pibot_ros/ros_ws/devel;/home/songsong/catkin_ws/devel;/opt/ros/kinetic'\n != '' else [])\n",
"step-3": "# generated from catkin/cmake/template/order_packages.context.py.in\nsource_root_dir = \"/home/songsong/image_transport_ws/src\"\nwhitelisted_packages = \"\".split(';') if \"\" != \"\" else []\nblacklisted_packages = \"\".split(';') if \"\" != \"\" else []\nunderlay_workspaces = \"/home/songsong/image_transport_ws/devel;/home/songsong/pibot_ros/ros_ws/devel;/home/songsong/catkin_ws/devel;/opt/ros/kinetic\".split(';') if \"/home/songsong/image_transport_ws/devel;/home/songsong/pibot_ros/ros_ws/devel;/home/songsong/catkin_ws/devel;/opt/ros/kinetic\" != \"\" else []\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from guet.commands.strategies.strategy import CommandStrategy
class TooManyArgsStrategy(CommandStrategy):
def apply(self):
print('Too many arguments.')
|
normal
|
{
"blob_id": "afd72ce2d9598f92937f3038eb0ef49b740b9977",
"index": 6846,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TooManyArgsStrategy(CommandStrategy):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TooManyArgsStrategy(CommandStrategy):\n\n def apply(self):\n print('Too many arguments.')\n",
"step-4": "from guet.commands.strategies.strategy import CommandStrategy\n\n\nclass TooManyArgsStrategy(CommandStrategy):\n\n def apply(self):\n print('Too many arguments.')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def shiftZeroesToEnd(myArray):
zeroCounter = 0
shiftedArray = []
for item in myArray:
if (str(item) == '0' or str(item) == '0.0') and type(item) is not str:
zeroCounter += 1
else:
shiftedArray.append(item)
zeroStore = [(0) for i in range(zeroCounter)]
shiftedArray.extend(zeroStore)
return shiftedArray
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def shiftZeroesToEnd(myArray):
zeroCounter = 0
shiftedArray = []
for item in myArray:
if (str(item) == '0' or str(item) == '0.0') and type(item) is not str:
zeroCounter += 1
else:
shiftedArray.append(item)
zeroStore = [(0) for i in range(zeroCounter)]
shiftedArray.extend(zeroStore)
return shiftedArray
print(shiftZeroesToEnd([True, 3, 3, 0, 23, 0, 112, 'b', 'a']))
print(shiftZeroesToEnd([0.0, 23, -3, False, 'xxx', 0, 112, True, 9]))
<|reserved_special_token_1|>
#1.25.2019 - shashi
#Program that accepts an array of different elements
#And moves all the integer 0s to the end of it. String 0s like "0" or "0.0" remain untouched.
def shiftZeroesToEnd(myArray): #function starts here
zeroCounter = 0 #counter to keep track of how many 0s exist.
shiftedArray = [] #array to hold final output
for item in myArray: #loop through each item in array
if (str(item) == "0" or str(item) == "0.0") and type(item) is not str:
zeroCounter += 1 #if numeric string found, incremenet zero counter
else:
shiftedArray.append(item) #else add item from original list as is (same position)
#end of loop
zeroStore = [0 for i in range(zeroCounter)] #declare an array of 0s of the size of zeroCounter
shiftedArray.extend(zeroStore) #append it to final output list (adds it to the end)
return shiftedArray #return final output back
#testing function
print(shiftZeroesToEnd([True, 3, 3, 0, 23, 0, 112, "b", "a"]))
print(shiftZeroesToEnd([0.0, 23, -3, False, "xxx", 0, 112, True , 9]))
|
flexible
|
{
"blob_id": "4a9c42727a28e19cf1eebcf72784b85bbae695bf",
"index": 3429,
"step-1": "<mask token>\n",
"step-2": "def shiftZeroesToEnd(myArray):\n zeroCounter = 0\n shiftedArray = []\n for item in myArray:\n if (str(item) == '0' or str(item) == '0.0') and type(item) is not str:\n zeroCounter += 1\n else:\n shiftedArray.append(item)\n zeroStore = [(0) for i in range(zeroCounter)]\n shiftedArray.extend(zeroStore)\n return shiftedArray\n\n\n<mask token>\n",
"step-3": "def shiftZeroesToEnd(myArray):\n zeroCounter = 0\n shiftedArray = []\n for item in myArray:\n if (str(item) == '0' or str(item) == '0.0') and type(item) is not str:\n zeroCounter += 1\n else:\n shiftedArray.append(item)\n zeroStore = [(0) for i in range(zeroCounter)]\n shiftedArray.extend(zeroStore)\n return shiftedArray\n\n\nprint(shiftZeroesToEnd([True, 3, 3, 0, 23, 0, 112, 'b', 'a']))\nprint(shiftZeroesToEnd([0.0, 23, -3, False, 'xxx', 0, 112, True, 9]))\n",
"step-4": "#1.25.2019 - shashi\n#Program that accepts an array of different elements\n#And moves all the integer 0s to the end of it. String 0s like \"0\" or \"0.0\" remain untouched.\n\n\ndef shiftZeroesToEnd(myArray): #function starts here\n zeroCounter = 0 #counter to keep track of how many 0s exist.\n shiftedArray = [] #array to hold final output\n \n for item in myArray: #loop through each item in array\n if (str(item) == \"0\" or str(item) == \"0.0\") and type(item) is not str:\n zeroCounter += 1 #if numeric string found, incremenet zero counter\n else:\n shiftedArray.append(item) #else add item from original list as is (same position)\n #end of loop \n zeroStore = [0 for i in range(zeroCounter)] #declare an array of 0s of the size of zeroCounter\n shiftedArray.extend(zeroStore) #append it to final output list (adds it to the end)\n \n return shiftedArray #return final output back\n\n#testing function\nprint(shiftZeroesToEnd([True, 3, 3, 0, 23, 0, 112, \"b\", \"a\"]))\nprint(shiftZeroesToEnd([0.0, 23, -3, False, \"xxx\", 0, 112, True , 9]))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
def parse_detail_for_one_course(page, course, no_info_course):
print(f'{course["name"]} is processing**: {course["url"]}')
map = {"Locatie": "location",
"Location": "location",
"Startdatum": "effective_start_date",
"Start date": "effective_start_date",
"Duur": "duration_desc",
"Wekelijkse studie": "duration_desc",
"Expensive": "duration_desc",
"Colleges": "consecutive_desc",
"Languages": "languages",
"Languages ": "languages",
"Talen": "languages",
"Fee": "price",
"Fee ": "price",
"Fairy ": "price",
"Weekly study": "second_duration_desc",
"Accreditations ": "third_duration_desc",
"Investering": "price"}
info = {"location": "",
"effective_start_date": "",
"duration_desc": "",
"consecutive_desc": "",
"languages": "",
"price": "",
"second_duration_desc": "",
"third_duration_desc": ""}
info_div = page.find('div', attrs={"class": "program-general-info"})
info_sessions = None
if info_div:
info_sessions = info_div.find_all('div', attrs={"class": "info-item"})
if not info_sessions:
print(f'-------{course["url"]} not div')
no_info_course.append(course)
elif info_sessions:
for info_session in info_sessions:
try:
label = info_session.find('label')
label_text = label.text.strip()
info_attr = map.get(label_text, '')
if "Wekeli" in label_text:
info_attr = "duration_desc"
elif "Permanente educatie" in label_text:
continue
elif "Accreditaties" in label_text:
continue
elif "Deadline voor aanmelding" in label_text:
continue
res = info_session.find('div')
res_text = res.text.strip()
info[info_attr] = res_text
except Exception as e:
print(f'{course["url"]} has problem of {e}')
continue
# print(title)
detail = {**course, **info}
# pprint(detail)
return detail
# page = requests.get("https://www.nyenrode.nl/opleidingen/p/collegereeks-excellent-leiderschap")
# page = requests.get("https://www.nyenrode.nl/opleidingen/p/behavioral-and-cultural-governance")
# page = requests.get("https://www.nyenrode.nl/opleidingen/p/advanced-management-program")
# page = requests.get("https://www.nyenrode.nl/opleidingen/p/mba-thesis")
# course = {"name": "",
# "url": ""}
# page = page.text
# page = bs4.BeautifulSoup(page, 'html.parser')
#
# detail = get_detail_for_one_course(page, course, [])
# pprint(detail)
|
normal
|
{
"blob_id": "0f4fa9f8835ae22032af9faa6c7cb10af3facd79",
"index": 5389,
"step-1": "<mask token>\n",
"step-2": "def parse_detail_for_one_course(page, course, no_info_course):\n print(f\"{course['name']} is processing**: {course['url']}\")\n map = {'Locatie': 'location', 'Location': 'location', 'Startdatum':\n 'effective_start_date', 'Start date': 'effective_start_date',\n 'Duur': 'duration_desc', 'Wekelijkse studie': 'duration_desc',\n 'Expensive': 'duration_desc', 'Colleges': 'consecutive_desc',\n 'Languages': 'languages', 'Languages ': 'languages', 'Talen':\n 'languages', 'Fee': 'price', 'Fee ': 'price', 'Fairy ': 'price',\n 'Weekly study': 'second_duration_desc', 'Accreditations ':\n 'third_duration_desc', 'Investering': 'price'}\n info = {'location': '', 'effective_start_date': '', 'duration_desc': '',\n 'consecutive_desc': '', 'languages': '', 'price': '',\n 'second_duration_desc': '', 'third_duration_desc': ''}\n info_div = page.find('div', attrs={'class': 'program-general-info'})\n info_sessions = None\n if info_div:\n info_sessions = info_div.find_all('div', attrs={'class': 'info-item'})\n if not info_sessions:\n print(f\"-------{course['url']} not div\")\n no_info_course.append(course)\n elif info_sessions:\n for info_session in info_sessions:\n try:\n label = info_session.find('label')\n label_text = label.text.strip()\n info_attr = map.get(label_text, '')\n if 'Wekeli' in label_text:\n info_attr = 'duration_desc'\n elif 'Permanente educatie' in label_text:\n continue\n elif 'Accreditaties' in label_text:\n continue\n elif 'Deadline voor aanmelding' in label_text:\n continue\n res = info_session.find('div')\n res_text = res.text.strip()\n info[info_attr] = res_text\n except Exception as e:\n print(f\"{course['url']} has problem of {e}\")\n continue\n detail = {**course, **info}\n return detail\n",
"step-3": "def parse_detail_for_one_course(page, course, no_info_course):\n print(f'{course[\"name\"]} is processing**: {course[\"url\"]}')\n map = {\"Locatie\": \"location\",\n \"Location\": \"location\",\n \"Startdatum\": \"effective_start_date\",\n \"Start date\": \"effective_start_date\",\n \"Duur\": \"duration_desc\",\n \"Wekelijkse studie\": \"duration_desc\",\n \"Expensive\": \"duration_desc\",\n \"Colleges\": \"consecutive_desc\",\n \"Languages\": \"languages\",\n \"Languages \": \"languages\",\n \"Talen\": \"languages\",\n \"Fee\": \"price\",\n \"Fee \": \"price\",\n \"Fairy \": \"price\",\n \"Weekly study\": \"second_duration_desc\",\n \"Accreditations \": \"third_duration_desc\",\n \"Investering\": \"price\"}\n\n info = {\"location\": \"\",\n \"effective_start_date\": \"\",\n \"duration_desc\": \"\",\n \"consecutive_desc\": \"\",\n \"languages\": \"\",\n \"price\": \"\",\n \"second_duration_desc\": \"\",\n \"third_duration_desc\": \"\"}\n\n info_div = page.find('div', attrs={\"class\": \"program-general-info\"})\n info_sessions = None\n if info_div:\n info_sessions = info_div.find_all('div', attrs={\"class\": \"info-item\"})\n\n if not info_sessions:\n print(f'-------{course[\"url\"]} not div')\n no_info_course.append(course)\n elif info_sessions:\n for info_session in info_sessions:\n try:\n label = info_session.find('label')\n label_text = label.text.strip()\n info_attr = map.get(label_text, '')\n if \"Wekeli\" in label_text:\n info_attr = \"duration_desc\"\n elif \"Permanente educatie\" in label_text:\n continue\n elif \"Accreditaties\" in label_text:\n continue\n elif \"Deadline voor aanmelding\" in label_text:\n continue\n res = info_session.find('div')\n res_text = res.text.strip()\n info[info_attr] = res_text\n except Exception as e:\n print(f'{course[\"url\"]} has problem of {e}')\n continue\n # print(title)\n detail = {**course, **info}\n # pprint(detail)\n return detail\n\n\n# page = requests.get(\"https://www.nyenrode.nl/opleidingen/p/collegereeks-excellent-leiderschap\")\n# page = requests.get(\"https://www.nyenrode.nl/opleidingen/p/behavioral-and-cultural-governance\")\n# page = requests.get(\"https://www.nyenrode.nl/opleidingen/p/advanced-management-program\")\n# page = requests.get(\"https://www.nyenrode.nl/opleidingen/p/mba-thesis\")\n# course = {\"name\": \"\",\n# \"url\": \"\"}\n# page = page.text\n# page = bs4.BeautifulSoup(page, 'html.parser')\n#\n# detail = get_detail_for_one_course(page, course, [])\n# pprint(detail)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
def clear_firefox_driver_session(firefox_driver):
firefox_driver.delete_all_cookies()
# Note this only works if the browser is set to a location.
firefox_driver.execute_script('window.localStorage.clear();')
firefox_driver.execute_script('window.sessionStorage.clear();')
class LocationNotSet(Exception):
pass
|
normal
|
{
"blob_id": "6d0b9523668bd0b302fdbc196d3d7ff25be10b23",
"index": 5045,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass LocationNotSet(Exception):\n pass\n",
"step-3": "def clear_firefox_driver_session(firefox_driver):\n firefox_driver.delete_all_cookies()\n firefox_driver.execute_script('window.localStorage.clear();')\n firefox_driver.execute_script('window.sessionStorage.clear();')\n\n\nclass LocationNotSet(Exception):\n pass\n",
"step-4": "def clear_firefox_driver_session(firefox_driver):\n firefox_driver.delete_all_cookies()\n # Note this only works if the browser is set to a location.\n firefox_driver.execute_script('window.localStorage.clear();')\n firefox_driver.execute_script('window.sessionStorage.clear();')\n\n\nclass LocationNotSet(Exception):\n pass\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Create your views here.
from django.shortcuts import render_to_response, Http404, render
from django.template import RequestContext
from books.models import Book
from django.http import HttpResponse, HttpResponseRedirect
import urllib, urllib2
import json
def incr_reads(request, book_id):
if request.POST:
try:
readers = Book.objects.get(id=book_id).incr_reads()
return HttpResponse(readers)
except Book.DoesNotExist:
pass
return HttpResponse('FAILED')
def index(request):
'''
No processing, should use direct to template.
'''
return render_to_response('index.html', {}, context_instance=RequestContext(request))
def search(request):
if request.GET and 'q' in request.GET:
b = Book.search.query(request.GET['q'])
return render_to_response('books/book_list.html', {'object_list':b}, context_instance=RequestContext(request))
def suggest_image(request, book_id):
'''
So this is a helper view for staff to update the picture.
'''
b = Book.objects.get(id=book_id)
_img = b.get_image_suggestions(first=False)
return render_to_response('books/image_suggestor.html', {'images':_img, 'book':b}, context_instance=RequestContext(request))
|
normal
|
{
"blob_id": "bcbcb4ea3a3b8b5c11e9b107103418ae79a3921c",
"index": 3628,
"step-1": "<mask token>\n\n\ndef incr_reads(request, book_id):\n if request.POST:\n try:\n readers = Book.objects.get(id=book_id).incr_reads()\n return HttpResponse(readers)\n except Book.DoesNotExist:\n pass\n return HttpResponse('FAILED')\n\n\ndef index(request):\n \"\"\"\n No processing, should use direct to template.\n \"\"\"\n return render_to_response('index.html', {}, context_instance=\n RequestContext(request))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef incr_reads(request, book_id):\n if request.POST:\n try:\n readers = Book.objects.get(id=book_id).incr_reads()\n return HttpResponse(readers)\n except Book.DoesNotExist:\n pass\n return HttpResponse('FAILED')\n\n\ndef index(request):\n \"\"\"\n No processing, should use direct to template.\n \"\"\"\n return render_to_response('index.html', {}, context_instance=\n RequestContext(request))\n\n\n<mask token>\n\n\ndef suggest_image(request, book_id):\n \"\"\"\n So this is a helper view for staff to update the picture.\n \"\"\"\n b = Book.objects.get(id=book_id)\n _img = b.get_image_suggestions(first=False)\n return render_to_response('books/image_suggestor.html', {'images': _img,\n 'book': b}, context_instance=RequestContext(request))\n",
"step-3": "<mask token>\n\n\ndef incr_reads(request, book_id):\n if request.POST:\n try:\n readers = Book.objects.get(id=book_id).incr_reads()\n return HttpResponse(readers)\n except Book.DoesNotExist:\n pass\n return HttpResponse('FAILED')\n\n\ndef index(request):\n \"\"\"\n No processing, should use direct to template.\n \"\"\"\n return render_to_response('index.html', {}, context_instance=\n RequestContext(request))\n\n\ndef search(request):\n if request.GET and 'q' in request.GET:\n b = Book.search.query(request.GET['q'])\n return render_to_response('books/book_list.html', {'object_list': b},\n context_instance=RequestContext(request))\n\n\ndef suggest_image(request, book_id):\n \"\"\"\n So this is a helper view for staff to update the picture.\n \"\"\"\n b = Book.objects.get(id=book_id)\n _img = b.get_image_suggestions(first=False)\n return render_to_response('books/image_suggestor.html', {'images': _img,\n 'book': b}, context_instance=RequestContext(request))\n",
"step-4": "from django.shortcuts import render_to_response, Http404, render\nfrom django.template import RequestContext\nfrom books.models import Book\nfrom django.http import HttpResponse, HttpResponseRedirect\nimport urllib, urllib2\nimport json\n\n\ndef incr_reads(request, book_id):\n if request.POST:\n try:\n readers = Book.objects.get(id=book_id).incr_reads()\n return HttpResponse(readers)\n except Book.DoesNotExist:\n pass\n return HttpResponse('FAILED')\n\n\ndef index(request):\n \"\"\"\n No processing, should use direct to template.\n \"\"\"\n return render_to_response('index.html', {}, context_instance=\n RequestContext(request))\n\n\ndef search(request):\n if request.GET and 'q' in request.GET:\n b = Book.search.query(request.GET['q'])\n return render_to_response('books/book_list.html', {'object_list': b},\n context_instance=RequestContext(request))\n\n\ndef suggest_image(request, book_id):\n \"\"\"\n So this is a helper view for staff to update the picture.\n \"\"\"\n b = Book.objects.get(id=book_id)\n _img = b.get_image_suggestions(first=False)\n return render_to_response('books/image_suggestor.html', {'images': _img,\n 'book': b}, context_instance=RequestContext(request))\n",
"step-5": "# Create your views here.\nfrom django.shortcuts import render_to_response, Http404, render\nfrom django.template import RequestContext\nfrom books.models import Book\nfrom django.http import HttpResponse, HttpResponseRedirect\nimport urllib, urllib2\nimport json \n\ndef incr_reads(request, book_id):\n if request.POST:\n try:\n readers = Book.objects.get(id=book_id).incr_reads()\n return HttpResponse(readers)\n except Book.DoesNotExist:\n pass\n return HttpResponse('FAILED')\n\ndef index(request):\n '''\n No processing, should use direct to template.\n '''\n return render_to_response('index.html', {}, context_instance=RequestContext(request))\n\ndef search(request):\n if request.GET and 'q' in request.GET:\n b = Book.search.query(request.GET['q'])\n return render_to_response('books/book_list.html', {'object_list':b}, context_instance=RequestContext(request))\n\ndef suggest_image(request, book_id):\n '''\n So this is a helper view for staff to update the picture.\n '''\n b = Book.objects.get(id=book_id)\n _img = b.get_image_suggestions(first=False)\n return render_to_response('books/image_suggestor.html', {'images':_img, 'book':b}, context_instance=RequestContext(request))\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# Core Packages
import difflib
import tkinter as tk
from tkinter import *
from tkinter import ttk
from tkinter.scrolledtext import *
import tkinter.filedialog
import PyPDF2
from tkinter import filedialog
import torch
import json
from transformers import T5Tokenizer, T5ForConditionalGeneration, T5Config
# NLP Pkgs
from spacy_summarization import text_summarizer
from gensim.summarization import summarize
from nltk_summarization import nltk_summarizer
# Web Scraping Pkg
from bs4 import BeautifulSoup
from urllib.request import urlopen
# Structure and Layout
window = Tk()
window.title("Summaryzer GUI")
window.geometry("700x400")
window.config(background='black')
style = ttk.Style(window)
style.configure('lefttab.TNotebook', tabposition='wn', )
# TAB LAYOUT
tab_control = ttk.Notebook(window, style='lefttab.TNotebook')
tab2 = ttk.Frame(tab_control)
tab3 = ttk.Frame(tab_control)
# ADD TABS TO NOTEBOOK
tab_control.add(tab3, text=f'{"Extractive":^20s}')
tab_control.add(tab2, text=f'{"Abstractive":^20s}')
label1 = Label(tab3, text='Extractive Summrize', padx=5, pady=5)
label1.grid(column=1, row=0)
label2 = Label(tab2, text='Abstractive Summrize',padx=5, pady=5)
label2.grid(column=0, row=0)
tab_control.pack(expand=1, fill='both')
def get_summary():
model = T5ForConditionalGeneration.from_pretrained ('t5-small')
tokenizer = T5Tokenizer.from_pretrained ('t5-small')
device = torch.device ('cpu')
text = str(url_display1.get('1.0', tk.END))
preprocess_text = text.strip ().replace ("\n", "")
t5_prepared_Text = "summarize: " + preprocess_text
tokenized_text = tokenizer.encode (t5_prepared_Text, return_tensors="pt").to (device)
summary_ids = model.generate (tokenized_text,
num_beams=4,
no_repeat_ngram_size=2,
min_length=30,
max_length=100,
early_stopping=True)
output = tokenizer.decode (summary_ids[0], skip_special_tokens=True)
Str1 = text
str2 = output
printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100
edited = len(text)-len(output)
Precision = (len(text)+len(output)+edited)/2
Precisioncalc = Precision / 100
result =("\n\nSummarized text: \n", output)," Precision = " , Precisioncalc , " similarity = " , printt
tab2_display_text.insert(tk.END, result)
def open_pdf():
open_file = filedialog.askopenfilename(
initialdir="C:/gui/",
title="Open PDF File",
filetypes=(
("PDF Files", "*.pdf"),
("All Files", ".")))
if open_file:
pdf_file = PyPDF2.PdfFileReader(open_file)
page = pdf_file.getPage(0)
page_stuff = page.extractText()
io = page_stuff.split()
url_display.insert(3.0, io)
def open_pdf1():
open_file = filedialog.askopenfilename(
initialdir="C:/gui/",
title="Open PDF File",
filetypes=(
("PDF Files", "*.pdf"),
("All Files", ".")))
if open_file:
pdf_file = PyPDF2.PdfFileReader(open_file)
page = pdf_file.getPage(0)
page_stuff = page.extractText()
io = page_stuff.split()
url_display1.insert(3.0, io)
def clear_display_result():
tab3_display_text.delete('1.0', END)
# Clear For URL
def clear_url_entry():
url_entry.delete(0, END)
# Open File to Read and Process
def openfiles():
file1 = tkinter.filedialog.askopenfilename(filetypes=(("Text Files", ".txt"), ("All files", "*")))
read_text = open(file1).read()
url_display.insert(tk.END, read_text)
def get_text():
raw_text = str(url_entry.get())
page = urlopen(raw_text)
soup = BeautifulSoup(page)
fetched_text = ' '.join(map(lambda p: p.text, soup.find_all('p')))
url_display.insert(tk.END, fetched_text)
def get_url_summary():
raw_text = url_display.get('1.0', tk.END)
final_text = text_summarizer(raw_text)
result = '\nSummary:{}'.format(final_text)
tab3_display_text.insert(tk.END, result)
def use_spacy ():
raw_text = url_display.get('1.0', tk.END)
final_text = text_summarizer(raw_text)
print(final_text)
Str1 = raw_text
str2 = text_summarizer(raw_text)
printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100
Precision = len(raw_text) + len(nltk_summarizer(raw_text)) / len(raw_text)
Precisioncalc = Precision / 100
result = '\nSpacy Summary:{}\n'.format(final_text)," Precision = " , Precisioncalc , " similarity = " , printt
tab3_display_text.insert(tk.END, result)
def use_nltk():
raw_text = url_display.get ('1.0', tk.END)
final_text = nltk_summarizer (raw_text)
print (final_text)
Str1 = raw_text
str2 = nltk_summarizer(raw_text)
printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100
Precision = len(raw_text) + len(nltk_summarizer(raw_text)) / len(raw_text)
Precisioncalc = Precision / 100
result = '\nNLTK Summary:{}\n'.format(final_text)," Precision = " , Precisioncalc , " similarity = " , printt
tab3_display_text.insert(tk.END, result)
def use_gensim():
raw_text = url_display.get ('1.0', tk.END)
final_text = summarize(raw_text)
print (final_text)
Str1 = raw_text
str2 = summarize(raw_text)
printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100
Precision = len(raw_text) + len(nltk_summarizer(raw_text)) / len(raw_text)
Precisioncalc = Precision / 100
result ='\nGensim Summary:{}\n'.format(final_text)," Precision = " , Precisioncalc , " similarity = " , printt
tab3_display_text.insert(tk.END, result)
# URL TAB
l1 = Label(tab3, text="Enter URL To Summarize")
l1.grid(row=1, column=0)
raw_entry = StringVar()
url_entry = Entry(tab3, textvariable=raw_entry, width=50)
url_entry.grid(row=1, column=1)
# BUTTONS
button1 = Button(tab3, text="Reset", command=clear_url_entry, width=12, bg='#03A9F4', fg='#fff')
button1.grid(row=4, column=0, padx=10, pady=10)
button2 = Button(tab3, text="Get Text", command=get_text, width=12, bg='#03A9F4', fg='#fff')
button2.grid(row=4, column=1, padx=10, pady=10)
button3 = Button(tab3, text="Open File", width=12, command=openfiles, bg='#c5cae9')
button3.grid(row=5, column=0, padx=10, pady=10)
button4 = Button(tab3, text="Open PDF", width=12, command=open_pdf, bg='#c5cae9')
button4.grid(row=5, column=1, padx=10, pady=10)
button5 = Button(tab3, text="SpaCy", command=use_spacy, width=12, bg='red', fg='#fff')
button5.grid(row=8, column=0, padx=10, pady=10)
button6 = Button(tab3, text="Clear Result", command=clear_display_result, width=12, bg='#03A9F4', fg='#fff')
button6.grid(row=9, column=1, padx=10, pady=10)
button7 = Button(tab3, text="NLTK", command=use_nltk, width=12, bg='#03A9F4', fg='#fff')
button7.grid(row=8, column=1, padx=10, pady=10)
button8 = Button(tab3, text="Gensim", command=use_gensim, width=12, bg='#03A9F4', fg='#fff')
button8.grid(row=9, column=0, padx=10, pady=10)
# Display Screen For Result
url_display = ScrolledText(tab3, height=10)
url_display.grid(row=7, column=0, columnspan=3, padx=5, pady=5)
tab3_display_text = ScrolledText(tab3, height=10)
tab3_display_text.grid(row=11, column=0, columnspan=3, padx=5, pady=5)
l1 = Label(tab2, text="Enter URL To Summarize")
l1.grid(row=1, column=0)
raw_entry1 = StringVar()
url_entry1 = Entry(tab2, textvariable=raw_entry, width=50)
url_entry1.grid(row=1, column=1)
# BUTTONS
button9 = Button(tab2, text="Reset", command=clear_url_entry, width=12, bg='#03A9F4', fg='#fff')
button9.grid(row=4, column=0, padx=10, pady=10)
button10 = Button(tab2, text="Get Text", command=get_text, width=12, bg='#03A9F4', fg='#fff')
button10.grid(row=4, column=1, padx=10, pady=10)
button11 = Button(tab2, text="Open File", width=12, command=openfiles, bg='#c5cae9')
button11.grid(row=5, column=0, padx=10, pady=10)
button12 = Button(tab2, text="Open PDF", width=12, command=open_pdf1, bg='#c5cae9')
button12.grid(row=5, column=1, padx=10, pady=10)
button13 = Button(tab2, text="Clear Result", command=clear_display_result, width=12, bg='#03A9F4', fg='#fff')
button13.grid(row=9, column=1, padx=10, pady=10)
button14 = Button(tab2, text="Abstract", command=get_summary, width=12, bg='#03A9F4', fg='#fff')
button14.grid(row=9, column=0, padx=10, pady=10)
url_display1 = ScrolledText(tab2, height=10)
url_display1.grid(row=7, column=0, columnspan=3, padx=5, pady=5)
tab2_display_text = ScrolledText(tab2, height=10)
tab2_display_text.grid(row=11, column=0, columnspan=3, padx=5, pady=5)
window.mainloop()
|
normal
|
{
"blob_id": "e3dece36ba3e5b3df763e7119c485f6ed2155098",
"index": 795,
"step-1": "<mask token>\n\n\ndef get_summary():\n model = T5ForConditionalGeneration.from_pretrained('t5-small')\n tokenizer = T5Tokenizer.from_pretrained('t5-small')\n device = torch.device('cpu')\n text = str(url_display1.get('1.0', tk.END))\n preprocess_text = text.strip().replace('\\n', '')\n t5_prepared_Text = 'summarize: ' + preprocess_text\n tokenized_text = tokenizer.encode(t5_prepared_Text, return_tensors='pt'\n ).to(device)\n summary_ids = model.generate(tokenized_text, num_beams=4,\n no_repeat_ngram_size=2, min_length=30, max_length=100,\n early_stopping=True)\n output = tokenizer.decode(summary_ids[0], skip_special_tokens=True)\n Str1 = text\n str2 = output\n printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100\n edited = len(text) - len(output)\n Precision = (len(text) + len(output) + edited) / 2\n Precisioncalc = Precision / 100\n result = ('\\n\\nSummarized text: \\n', output\n ), ' Precision = ', Precisioncalc, ' similarity = ', printt\n tab2_display_text.insert(tk.END, result)\n\n\ndef open_pdf():\n open_file = filedialog.askopenfilename(initialdir='C:/gui/', title=\n 'Open PDF File', filetypes=(('PDF Files', '*.pdf'), ('All Files', '.'))\n )\n if open_file:\n pdf_file = PyPDF2.PdfFileReader(open_file)\n page = pdf_file.getPage(0)\n page_stuff = page.extractText()\n io = page_stuff.split()\n url_display.insert(3.0, io)\n\n\ndef open_pdf1():\n open_file = filedialog.askopenfilename(initialdir='C:/gui/', title=\n 'Open PDF File', filetypes=(('PDF Files', '*.pdf'), ('All Files', '.'))\n )\n if open_file:\n pdf_file = PyPDF2.PdfFileReader(open_file)\n page = pdf_file.getPage(0)\n page_stuff = page.extractText()\n io = page_stuff.split()\n url_display1.insert(3.0, io)\n\n\n<mask token>\n\n\ndef clear_url_entry():\n url_entry.delete(0, END)\n\n\n<mask token>\n\n\ndef get_text():\n raw_text = str(url_entry.get())\n page = urlopen(raw_text)\n soup = BeautifulSoup(page)\n fetched_text = ' '.join(map(lambda p: p.text, soup.find_all('p')))\n url_display.insert(tk.END, fetched_text)\n\n\ndef get_url_summary():\n raw_text = url_display.get('1.0', tk.END)\n final_text = text_summarizer(raw_text)\n result = '\\nSummary:{}'.format(final_text)\n tab3_display_text.insert(tk.END, result)\n\n\ndef use_spacy():\n raw_text = url_display.get('1.0', tk.END)\n final_text = text_summarizer(raw_text)\n print(final_text)\n Str1 = raw_text\n str2 = text_summarizer(raw_text)\n printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100\n Precision = len(raw_text) + len(nltk_summarizer(raw_text)) / len(raw_text)\n Precisioncalc = Precision / 100\n result = '\\nSpacy Summary:{}\\n'.format(final_text\n ), ' Precision = ', Precisioncalc, ' similarity = ', printt\n tab3_display_text.insert(tk.END, result)\n\n\ndef use_nltk():\n raw_text = url_display.get('1.0', tk.END)\n final_text = nltk_summarizer(raw_text)\n print(final_text)\n Str1 = raw_text\n str2 = nltk_summarizer(raw_text)\n printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100\n Precision = len(raw_text) + len(nltk_summarizer(raw_text)) / len(raw_text)\n Precisioncalc = Precision / 100\n result = '\\nNLTK Summary:{}\\n'.format(final_text\n ), ' Precision = ', Precisioncalc, ' similarity = ', printt\n tab3_display_text.insert(tk.END, result)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_summary():\n model = T5ForConditionalGeneration.from_pretrained('t5-small')\n tokenizer = T5Tokenizer.from_pretrained('t5-small')\n device = torch.device('cpu')\n text = str(url_display1.get('1.0', tk.END))\n preprocess_text = text.strip().replace('\\n', '')\n t5_prepared_Text = 'summarize: ' + preprocess_text\n tokenized_text = tokenizer.encode(t5_prepared_Text, return_tensors='pt'\n ).to(device)\n summary_ids = model.generate(tokenized_text, num_beams=4,\n no_repeat_ngram_size=2, min_length=30, max_length=100,\n early_stopping=True)\n output = tokenizer.decode(summary_ids[0], skip_special_tokens=True)\n Str1 = text\n str2 = output\n printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100\n edited = len(text) - len(output)\n Precision = (len(text) + len(output) + edited) / 2\n Precisioncalc = Precision / 100\n result = ('\\n\\nSummarized text: \\n', output\n ), ' Precision = ', Precisioncalc, ' similarity = ', printt\n tab2_display_text.insert(tk.END, result)\n\n\ndef open_pdf():\n open_file = filedialog.askopenfilename(initialdir='C:/gui/', title=\n 'Open PDF File', filetypes=(('PDF Files', '*.pdf'), ('All Files', '.'))\n )\n if open_file:\n pdf_file = PyPDF2.PdfFileReader(open_file)\n page = pdf_file.getPage(0)\n page_stuff = page.extractText()\n io = page_stuff.split()\n url_display.insert(3.0, io)\n\n\ndef open_pdf1():\n open_file = filedialog.askopenfilename(initialdir='C:/gui/', title=\n 'Open PDF File', filetypes=(('PDF Files', '*.pdf'), ('All Files', '.'))\n )\n if open_file:\n pdf_file = PyPDF2.PdfFileReader(open_file)\n page = pdf_file.getPage(0)\n page_stuff = page.extractText()\n io = page_stuff.split()\n url_display1.insert(3.0, io)\n\n\ndef clear_display_result():\n tab3_display_text.delete('1.0', END)\n\n\ndef clear_url_entry():\n url_entry.delete(0, END)\n\n\n<mask token>\n\n\ndef get_text():\n raw_text = str(url_entry.get())\n page = urlopen(raw_text)\n soup = BeautifulSoup(page)\n fetched_text = ' '.join(map(lambda p: p.text, soup.find_all('p')))\n url_display.insert(tk.END, fetched_text)\n\n\ndef get_url_summary():\n raw_text = url_display.get('1.0', tk.END)\n final_text = text_summarizer(raw_text)\n result = '\\nSummary:{}'.format(final_text)\n tab3_display_text.insert(tk.END, result)\n\n\ndef use_spacy():\n raw_text = url_display.get('1.0', tk.END)\n final_text = text_summarizer(raw_text)\n print(final_text)\n Str1 = raw_text\n str2 = text_summarizer(raw_text)\n printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100\n Precision = len(raw_text) + len(nltk_summarizer(raw_text)) / len(raw_text)\n Precisioncalc = Precision / 100\n result = '\\nSpacy Summary:{}\\n'.format(final_text\n ), ' Precision = ', Precisioncalc, ' similarity = ', printt\n tab3_display_text.insert(tk.END, result)\n\n\ndef use_nltk():\n raw_text = url_display.get('1.0', tk.END)\n final_text = nltk_summarizer(raw_text)\n print(final_text)\n Str1 = raw_text\n str2 = nltk_summarizer(raw_text)\n printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100\n Precision = len(raw_text) + len(nltk_summarizer(raw_text)) / len(raw_text)\n Precisioncalc = Precision / 100\n result = '\\nNLTK Summary:{}\\n'.format(final_text\n ), ' Precision = ', Precisioncalc, ' similarity = ', printt\n tab3_display_text.insert(tk.END, result)\n\n\n<mask token>\n",
"step-3": "<mask token>\nwindow = Tk()\nwindow.title('Summaryzer GUI')\nwindow.geometry('700x400')\nwindow.config(background='black')\nstyle = ttk.Style(window)\nstyle.configure('lefttab.TNotebook', tabposition='wn')\ntab_control = ttk.Notebook(window, style='lefttab.TNotebook')\ntab2 = ttk.Frame(tab_control)\ntab3 = ttk.Frame(tab_control)\ntab_control.add(tab3, text=f\"{'Extractive':^20s}\")\ntab_control.add(tab2, text=f\"{'Abstractive':^20s}\")\nlabel1 = Label(tab3, text='Extractive Summrize', padx=5, pady=5)\nlabel1.grid(column=1, row=0)\nlabel2 = Label(tab2, text='Abstractive Summrize', padx=5, pady=5)\nlabel2.grid(column=0, row=0)\ntab_control.pack(expand=1, fill='both')\n\n\ndef get_summary():\n model = T5ForConditionalGeneration.from_pretrained('t5-small')\n tokenizer = T5Tokenizer.from_pretrained('t5-small')\n device = torch.device('cpu')\n text = str(url_display1.get('1.0', tk.END))\n preprocess_text = text.strip().replace('\\n', '')\n t5_prepared_Text = 'summarize: ' + preprocess_text\n tokenized_text = tokenizer.encode(t5_prepared_Text, return_tensors='pt'\n ).to(device)\n summary_ids = model.generate(tokenized_text, num_beams=4,\n no_repeat_ngram_size=2, min_length=30, max_length=100,\n early_stopping=True)\n output = tokenizer.decode(summary_ids[0], skip_special_tokens=True)\n Str1 = text\n str2 = output\n printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100\n edited = len(text) - len(output)\n Precision = (len(text) + len(output) + edited) / 2\n Precisioncalc = Precision / 100\n result = ('\\n\\nSummarized text: \\n', output\n ), ' Precision = ', Precisioncalc, ' similarity = ', printt\n tab2_display_text.insert(tk.END, result)\n\n\ndef open_pdf():\n open_file = filedialog.askopenfilename(initialdir='C:/gui/', title=\n 'Open PDF File', filetypes=(('PDF Files', '*.pdf'), ('All Files', '.'))\n )\n if open_file:\n pdf_file = PyPDF2.PdfFileReader(open_file)\n page = pdf_file.getPage(0)\n page_stuff = page.extractText()\n io = page_stuff.split()\n url_display.insert(3.0, io)\n\n\ndef open_pdf1():\n open_file = filedialog.askopenfilename(initialdir='C:/gui/', title=\n 'Open PDF File', filetypes=(('PDF Files', '*.pdf'), ('All Files', '.'))\n )\n if open_file:\n pdf_file = PyPDF2.PdfFileReader(open_file)\n page = pdf_file.getPage(0)\n page_stuff = page.extractText()\n io = page_stuff.split()\n url_display1.insert(3.0, io)\n\n\ndef clear_display_result():\n tab3_display_text.delete('1.0', END)\n\n\ndef clear_url_entry():\n url_entry.delete(0, END)\n\n\ndef openfiles():\n file1 = tkinter.filedialog.askopenfilename(filetypes=(('Text Files',\n '.txt'), ('All files', '*')))\n read_text = open(file1).read()\n url_display.insert(tk.END, read_text)\n\n\ndef get_text():\n raw_text = str(url_entry.get())\n page = urlopen(raw_text)\n soup = BeautifulSoup(page)\n fetched_text = ' '.join(map(lambda p: p.text, soup.find_all('p')))\n url_display.insert(tk.END, fetched_text)\n\n\ndef get_url_summary():\n raw_text = url_display.get('1.0', tk.END)\n final_text = text_summarizer(raw_text)\n result = '\\nSummary:{}'.format(final_text)\n tab3_display_text.insert(tk.END, result)\n\n\ndef use_spacy():\n raw_text = url_display.get('1.0', tk.END)\n final_text = text_summarizer(raw_text)\n print(final_text)\n Str1 = raw_text\n str2 = text_summarizer(raw_text)\n printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100\n Precision = len(raw_text) + len(nltk_summarizer(raw_text)) / len(raw_text)\n Precisioncalc = Precision / 100\n result = '\\nSpacy Summary:{}\\n'.format(final_text\n ), ' Precision = ', Precisioncalc, ' similarity = ', printt\n tab3_display_text.insert(tk.END, result)\n\n\ndef use_nltk():\n raw_text = url_display.get('1.0', tk.END)\n final_text = nltk_summarizer(raw_text)\n print(final_text)\n Str1 = raw_text\n str2 = nltk_summarizer(raw_text)\n printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100\n Precision = len(raw_text) + len(nltk_summarizer(raw_text)) / len(raw_text)\n Precisioncalc = Precision / 100\n result = '\\nNLTK Summary:{}\\n'.format(final_text\n ), ' Precision = ', Precisioncalc, ' similarity = ', printt\n tab3_display_text.insert(tk.END, result)\n\n\ndef use_gensim():\n raw_text = url_display.get('1.0', tk.END)\n final_text = summarize(raw_text)\n print(final_text)\n Str1 = raw_text\n str2 = summarize(raw_text)\n printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100\n Precision = len(raw_text) + len(nltk_summarizer(raw_text)) / len(raw_text)\n Precisioncalc = Precision / 100\n result = '\\nGensim Summary:{}\\n'.format(final_text\n ), ' Precision = ', Precisioncalc, ' similarity = ', printt\n tab3_display_text.insert(tk.END, result)\n\n\nl1 = Label(tab3, text='Enter URL To Summarize')\nl1.grid(row=1, column=0)\nraw_entry = StringVar()\nurl_entry = Entry(tab3, textvariable=raw_entry, width=50)\nurl_entry.grid(row=1, column=1)\nbutton1 = Button(tab3, text='Reset', command=clear_url_entry, width=12, bg=\n '#03A9F4', fg='#fff')\nbutton1.grid(row=4, column=0, padx=10, pady=10)\nbutton2 = Button(tab3, text='Get Text', command=get_text, width=12, bg=\n '#03A9F4', fg='#fff')\nbutton2.grid(row=4, column=1, padx=10, pady=10)\nbutton3 = Button(tab3, text='Open File', width=12, command=openfiles, bg=\n '#c5cae9')\nbutton3.grid(row=5, column=0, padx=10, pady=10)\nbutton4 = Button(tab3, text='Open PDF', width=12, command=open_pdf, bg=\n '#c5cae9')\nbutton4.grid(row=5, column=1, padx=10, pady=10)\nbutton5 = Button(tab3, text='SpaCy', command=use_spacy, width=12, bg='red',\n fg='#fff')\nbutton5.grid(row=8, column=0, padx=10, pady=10)\nbutton6 = Button(tab3, text='Clear Result', command=clear_display_result,\n width=12, bg='#03A9F4', fg='#fff')\nbutton6.grid(row=9, column=1, padx=10, pady=10)\nbutton7 = Button(tab3, text='NLTK', command=use_nltk, width=12, bg=\n '#03A9F4', fg='#fff')\nbutton7.grid(row=8, column=1, padx=10, pady=10)\nbutton8 = Button(tab3, text='Gensim', command=use_gensim, width=12, bg=\n '#03A9F4', fg='#fff')\nbutton8.grid(row=9, column=0, padx=10, pady=10)\nurl_display = ScrolledText(tab3, height=10)\nurl_display.grid(row=7, column=0, columnspan=3, padx=5, pady=5)\ntab3_display_text = ScrolledText(tab3, height=10)\ntab3_display_text.grid(row=11, column=0, columnspan=3, padx=5, pady=5)\nl1 = Label(tab2, text='Enter URL To Summarize')\nl1.grid(row=1, column=0)\nraw_entry1 = StringVar()\nurl_entry1 = Entry(tab2, textvariable=raw_entry, width=50)\nurl_entry1.grid(row=1, column=1)\nbutton9 = Button(tab2, text='Reset', command=clear_url_entry, width=12, bg=\n '#03A9F4', fg='#fff')\nbutton9.grid(row=4, column=0, padx=10, pady=10)\nbutton10 = Button(tab2, text='Get Text', command=get_text, width=12, bg=\n '#03A9F4', fg='#fff')\nbutton10.grid(row=4, column=1, padx=10, pady=10)\nbutton11 = Button(tab2, text='Open File', width=12, command=openfiles, bg=\n '#c5cae9')\nbutton11.grid(row=5, column=0, padx=10, pady=10)\nbutton12 = Button(tab2, text='Open PDF', width=12, command=open_pdf1, bg=\n '#c5cae9')\nbutton12.grid(row=5, column=1, padx=10, pady=10)\nbutton13 = Button(tab2, text='Clear Result', command=clear_display_result,\n width=12, bg='#03A9F4', fg='#fff')\nbutton13.grid(row=9, column=1, padx=10, pady=10)\nbutton14 = Button(tab2, text='Abstract', command=get_summary, width=12, bg=\n '#03A9F4', fg='#fff')\nbutton14.grid(row=9, column=0, padx=10, pady=10)\nurl_display1 = ScrolledText(tab2, height=10)\nurl_display1.grid(row=7, column=0, columnspan=3, padx=5, pady=5)\ntab2_display_text = ScrolledText(tab2, height=10)\ntab2_display_text.grid(row=11, column=0, columnspan=3, padx=5, pady=5)\nwindow.mainloop()\n",
"step-4": "import difflib\nimport tkinter as tk\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tkinter.scrolledtext import *\nimport tkinter.filedialog\nimport PyPDF2\nfrom tkinter import filedialog\nimport torch\nimport json\nfrom transformers import T5Tokenizer, T5ForConditionalGeneration, T5Config\nfrom spacy_summarization import text_summarizer\nfrom gensim.summarization import summarize\nfrom nltk_summarization import nltk_summarizer\nfrom bs4 import BeautifulSoup\nfrom urllib.request import urlopen\nwindow = Tk()\nwindow.title('Summaryzer GUI')\nwindow.geometry('700x400')\nwindow.config(background='black')\nstyle = ttk.Style(window)\nstyle.configure('lefttab.TNotebook', tabposition='wn')\ntab_control = ttk.Notebook(window, style='lefttab.TNotebook')\ntab2 = ttk.Frame(tab_control)\ntab3 = ttk.Frame(tab_control)\ntab_control.add(tab3, text=f\"{'Extractive':^20s}\")\ntab_control.add(tab2, text=f\"{'Abstractive':^20s}\")\nlabel1 = Label(tab3, text='Extractive Summrize', padx=5, pady=5)\nlabel1.grid(column=1, row=0)\nlabel2 = Label(tab2, text='Abstractive Summrize', padx=5, pady=5)\nlabel2.grid(column=0, row=0)\ntab_control.pack(expand=1, fill='both')\n\n\ndef get_summary():\n model = T5ForConditionalGeneration.from_pretrained('t5-small')\n tokenizer = T5Tokenizer.from_pretrained('t5-small')\n device = torch.device('cpu')\n text = str(url_display1.get('1.0', tk.END))\n preprocess_text = text.strip().replace('\\n', '')\n t5_prepared_Text = 'summarize: ' + preprocess_text\n tokenized_text = tokenizer.encode(t5_prepared_Text, return_tensors='pt'\n ).to(device)\n summary_ids = model.generate(tokenized_text, num_beams=4,\n no_repeat_ngram_size=2, min_length=30, max_length=100,\n early_stopping=True)\n output = tokenizer.decode(summary_ids[0], skip_special_tokens=True)\n Str1 = text\n str2 = output\n printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100\n edited = len(text) - len(output)\n Precision = (len(text) + len(output) + edited) / 2\n Precisioncalc = Precision / 100\n result = ('\\n\\nSummarized text: \\n', output\n ), ' Precision = ', Precisioncalc, ' similarity = ', printt\n tab2_display_text.insert(tk.END, result)\n\n\ndef open_pdf():\n open_file = filedialog.askopenfilename(initialdir='C:/gui/', title=\n 'Open PDF File', filetypes=(('PDF Files', '*.pdf'), ('All Files', '.'))\n )\n if open_file:\n pdf_file = PyPDF2.PdfFileReader(open_file)\n page = pdf_file.getPage(0)\n page_stuff = page.extractText()\n io = page_stuff.split()\n url_display.insert(3.0, io)\n\n\ndef open_pdf1():\n open_file = filedialog.askopenfilename(initialdir='C:/gui/', title=\n 'Open PDF File', filetypes=(('PDF Files', '*.pdf'), ('All Files', '.'))\n )\n if open_file:\n pdf_file = PyPDF2.PdfFileReader(open_file)\n page = pdf_file.getPage(0)\n page_stuff = page.extractText()\n io = page_stuff.split()\n url_display1.insert(3.0, io)\n\n\ndef clear_display_result():\n tab3_display_text.delete('1.0', END)\n\n\ndef clear_url_entry():\n url_entry.delete(0, END)\n\n\ndef openfiles():\n file1 = tkinter.filedialog.askopenfilename(filetypes=(('Text Files',\n '.txt'), ('All files', '*')))\n read_text = open(file1).read()\n url_display.insert(tk.END, read_text)\n\n\ndef get_text():\n raw_text = str(url_entry.get())\n page = urlopen(raw_text)\n soup = BeautifulSoup(page)\n fetched_text = ' '.join(map(lambda p: p.text, soup.find_all('p')))\n url_display.insert(tk.END, fetched_text)\n\n\ndef get_url_summary():\n raw_text = url_display.get('1.0', tk.END)\n final_text = text_summarizer(raw_text)\n result = '\\nSummary:{}'.format(final_text)\n tab3_display_text.insert(tk.END, result)\n\n\ndef use_spacy():\n raw_text = url_display.get('1.0', tk.END)\n final_text = text_summarizer(raw_text)\n print(final_text)\n Str1 = raw_text\n str2 = text_summarizer(raw_text)\n printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100\n Precision = len(raw_text) + len(nltk_summarizer(raw_text)) / len(raw_text)\n Precisioncalc = Precision / 100\n result = '\\nSpacy Summary:{}\\n'.format(final_text\n ), ' Precision = ', Precisioncalc, ' similarity = ', printt\n tab3_display_text.insert(tk.END, result)\n\n\ndef use_nltk():\n raw_text = url_display.get('1.0', tk.END)\n final_text = nltk_summarizer(raw_text)\n print(final_text)\n Str1 = raw_text\n str2 = nltk_summarizer(raw_text)\n printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100\n Precision = len(raw_text) + len(nltk_summarizer(raw_text)) / len(raw_text)\n Precisioncalc = Precision / 100\n result = '\\nNLTK Summary:{}\\n'.format(final_text\n ), ' Precision = ', Precisioncalc, ' similarity = ', printt\n tab3_display_text.insert(tk.END, result)\n\n\ndef use_gensim():\n raw_text = url_display.get('1.0', tk.END)\n final_text = summarize(raw_text)\n print(final_text)\n Str1 = raw_text\n str2 = summarize(raw_text)\n printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100\n Precision = len(raw_text) + len(nltk_summarizer(raw_text)) / len(raw_text)\n Precisioncalc = Precision / 100\n result = '\\nGensim Summary:{}\\n'.format(final_text\n ), ' Precision = ', Precisioncalc, ' similarity = ', printt\n tab3_display_text.insert(tk.END, result)\n\n\nl1 = Label(tab3, text='Enter URL To Summarize')\nl1.grid(row=1, column=0)\nraw_entry = StringVar()\nurl_entry = Entry(tab3, textvariable=raw_entry, width=50)\nurl_entry.grid(row=1, column=1)\nbutton1 = Button(tab3, text='Reset', command=clear_url_entry, width=12, bg=\n '#03A9F4', fg='#fff')\nbutton1.grid(row=4, column=0, padx=10, pady=10)\nbutton2 = Button(tab3, text='Get Text', command=get_text, width=12, bg=\n '#03A9F4', fg='#fff')\nbutton2.grid(row=4, column=1, padx=10, pady=10)\nbutton3 = Button(tab3, text='Open File', width=12, command=openfiles, bg=\n '#c5cae9')\nbutton3.grid(row=5, column=0, padx=10, pady=10)\nbutton4 = Button(tab3, text='Open PDF', width=12, command=open_pdf, bg=\n '#c5cae9')\nbutton4.grid(row=5, column=1, padx=10, pady=10)\nbutton5 = Button(tab3, text='SpaCy', command=use_spacy, width=12, bg='red',\n fg='#fff')\nbutton5.grid(row=8, column=0, padx=10, pady=10)\nbutton6 = Button(tab3, text='Clear Result', command=clear_display_result,\n width=12, bg='#03A9F4', fg='#fff')\nbutton6.grid(row=9, column=1, padx=10, pady=10)\nbutton7 = Button(tab3, text='NLTK', command=use_nltk, width=12, bg=\n '#03A9F4', fg='#fff')\nbutton7.grid(row=8, column=1, padx=10, pady=10)\nbutton8 = Button(tab3, text='Gensim', command=use_gensim, width=12, bg=\n '#03A9F4', fg='#fff')\nbutton8.grid(row=9, column=0, padx=10, pady=10)\nurl_display = ScrolledText(tab3, height=10)\nurl_display.grid(row=7, column=0, columnspan=3, padx=5, pady=5)\ntab3_display_text = ScrolledText(tab3, height=10)\ntab3_display_text.grid(row=11, column=0, columnspan=3, padx=5, pady=5)\nl1 = Label(tab2, text='Enter URL To Summarize')\nl1.grid(row=1, column=0)\nraw_entry1 = StringVar()\nurl_entry1 = Entry(tab2, textvariable=raw_entry, width=50)\nurl_entry1.grid(row=1, column=1)\nbutton9 = Button(tab2, text='Reset', command=clear_url_entry, width=12, bg=\n '#03A9F4', fg='#fff')\nbutton9.grid(row=4, column=0, padx=10, pady=10)\nbutton10 = Button(tab2, text='Get Text', command=get_text, width=12, bg=\n '#03A9F4', fg='#fff')\nbutton10.grid(row=4, column=1, padx=10, pady=10)\nbutton11 = Button(tab2, text='Open File', width=12, command=openfiles, bg=\n '#c5cae9')\nbutton11.grid(row=5, column=0, padx=10, pady=10)\nbutton12 = Button(tab2, text='Open PDF', width=12, command=open_pdf1, bg=\n '#c5cae9')\nbutton12.grid(row=5, column=1, padx=10, pady=10)\nbutton13 = Button(tab2, text='Clear Result', command=clear_display_result,\n width=12, bg='#03A9F4', fg='#fff')\nbutton13.grid(row=9, column=1, padx=10, pady=10)\nbutton14 = Button(tab2, text='Abstract', command=get_summary, width=12, bg=\n '#03A9F4', fg='#fff')\nbutton14.grid(row=9, column=0, padx=10, pady=10)\nurl_display1 = ScrolledText(tab2, height=10)\nurl_display1.grid(row=7, column=0, columnspan=3, padx=5, pady=5)\ntab2_display_text = ScrolledText(tab2, height=10)\ntab2_display_text.grid(row=11, column=0, columnspan=3, padx=5, pady=5)\nwindow.mainloop()\n",
"step-5": "# Core Packages\r\nimport difflib\r\nimport tkinter as tk\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nfrom tkinter.scrolledtext import *\r\nimport tkinter.filedialog\r\nimport PyPDF2\r\nfrom tkinter import filedialog\r\nimport torch\r\nimport json\r\nfrom transformers import T5Tokenizer, T5ForConditionalGeneration, T5Config\r\n\r\n# NLP Pkgs\r\nfrom spacy_summarization import text_summarizer\r\nfrom gensim.summarization import summarize\r\nfrom nltk_summarization import nltk_summarizer\r\n\r\n# Web Scraping Pkg\r\nfrom bs4 import BeautifulSoup\r\nfrom urllib.request import urlopen\r\n\r\n# Structure and Layout\r\nwindow = Tk()\r\nwindow.title(\"Summaryzer GUI\")\r\nwindow.geometry(\"700x400\")\r\nwindow.config(background='black')\r\n\r\nstyle = ttk.Style(window)\r\nstyle.configure('lefttab.TNotebook', tabposition='wn', )\r\n\r\n# TAB LAYOUT\r\ntab_control = ttk.Notebook(window, style='lefttab.TNotebook')\r\n\r\ntab2 = ttk.Frame(tab_control)\r\ntab3 = ttk.Frame(tab_control)\r\n\r\n# ADD TABS TO NOTEBOOK\r\ntab_control.add(tab3, text=f'{\"Extractive\":^20s}')\r\ntab_control.add(tab2, text=f'{\"Abstractive\":^20s}')\r\n\r\nlabel1 = Label(tab3, text='Extractive Summrize', padx=5, pady=5)\r\nlabel1.grid(column=1, row=0)\r\n\r\n\r\nlabel2 = Label(tab2, text='Abstractive Summrize',padx=5, pady=5)\r\nlabel2.grid(column=0, row=0)\r\n\r\ntab_control.pack(expand=1, fill='both')\r\n\r\ndef get_summary():\r\n model = T5ForConditionalGeneration.from_pretrained ('t5-small')\r\n tokenizer = T5Tokenizer.from_pretrained ('t5-small')\r\n device = torch.device ('cpu')\r\n text = str(url_display1.get('1.0', tk.END))\r\n preprocess_text = text.strip ().replace (\"\\n\", \"\")\r\n t5_prepared_Text = \"summarize: \" + preprocess_text\r\n tokenized_text = tokenizer.encode (t5_prepared_Text, return_tensors=\"pt\").to (device)\r\n\r\n summary_ids = model.generate (tokenized_text,\r\n num_beams=4,\r\n no_repeat_ngram_size=2,\r\n min_length=30,\r\n max_length=100,\r\n early_stopping=True)\r\n\r\n output = tokenizer.decode (summary_ids[0], skip_special_tokens=True)\r\n\r\n Str1 = text\r\n str2 = output\r\n printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100\r\n\r\n edited = len(text)-len(output)\r\n Precision = (len(text)+len(output)+edited)/2\r\n Precisioncalc = Precision / 100\r\n\r\n result =(\"\\n\\nSummarized text: \\n\", output),\" Precision = \" , Precisioncalc , \" similarity = \" , printt\r\n\r\n tab2_display_text.insert(tk.END, result)\r\n\r\ndef open_pdf():\r\n open_file = filedialog.askopenfilename(\r\n initialdir=\"C:/gui/\",\r\n title=\"Open PDF File\",\r\n filetypes=(\r\n (\"PDF Files\", \"*.pdf\"),\r\n (\"All Files\", \".\")))\r\n\r\n if open_file:\r\n pdf_file = PyPDF2.PdfFileReader(open_file)\r\n page = pdf_file.getPage(0)\r\n page_stuff = page.extractText()\r\n io = page_stuff.split()\r\n url_display.insert(3.0, io)\r\n\r\n\r\ndef open_pdf1():\r\n open_file = filedialog.askopenfilename(\r\n initialdir=\"C:/gui/\",\r\n title=\"Open PDF File\",\r\n filetypes=(\r\n (\"PDF Files\", \"*.pdf\"),\r\n (\"All Files\", \".\")))\r\n\r\n if open_file:\r\n pdf_file = PyPDF2.PdfFileReader(open_file)\r\n page = pdf_file.getPage(0)\r\n page_stuff = page.extractText()\r\n io = page_stuff.split()\r\n url_display1.insert(3.0, io)\r\n\r\n\r\ndef clear_display_result():\r\n tab3_display_text.delete('1.0', END)\r\n\r\n# Clear For URL\r\ndef clear_url_entry():\r\n url_entry.delete(0, END)\r\n\r\n\r\n# Open File to Read and Process\r\ndef openfiles():\r\n file1 = tkinter.filedialog.askopenfilename(filetypes=((\"Text Files\", \".txt\"), (\"All files\", \"*\")))\r\n read_text = open(file1).read()\r\n url_display.insert(tk.END, read_text)\r\n\r\n\r\ndef get_text():\r\n raw_text = str(url_entry.get())\r\n page = urlopen(raw_text)\r\n soup = BeautifulSoup(page)\r\n fetched_text = ' '.join(map(lambda p: p.text, soup.find_all('p')))\r\n url_display.insert(tk.END, fetched_text)\r\n\r\n\r\ndef get_url_summary():\r\n raw_text = url_display.get('1.0', tk.END)\r\n final_text = text_summarizer(raw_text)\r\n result = '\\nSummary:{}'.format(final_text)\r\n tab3_display_text.insert(tk.END, result)\r\n\r\n\r\ndef use_spacy ():\r\n\r\n raw_text = url_display.get('1.0', tk.END)\r\n final_text = text_summarizer(raw_text)\r\n print(final_text)\r\n\r\n Str1 = raw_text\r\n str2 = text_summarizer(raw_text)\r\n printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100\r\n\r\n Precision = len(raw_text) + len(nltk_summarizer(raw_text)) / len(raw_text)\r\n Precisioncalc = Precision / 100\r\n result = '\\nSpacy Summary:{}\\n'.format(final_text),\" Precision = \" , Precisioncalc , \" similarity = \" , printt\r\n tab3_display_text.insert(tk.END, result)\r\n\r\n\r\ndef use_nltk():\r\n raw_text = url_display.get ('1.0', tk.END)\r\n final_text = nltk_summarizer (raw_text)\r\n print (final_text)\r\n\r\n Str1 = raw_text\r\n str2 = nltk_summarizer(raw_text)\r\n printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100\r\n\r\n Precision = len(raw_text) + len(nltk_summarizer(raw_text)) / len(raw_text)\r\n Precisioncalc = Precision / 100\r\n result = '\\nNLTK Summary:{}\\n'.format(final_text),\" Precision = \" , Precisioncalc , \" similarity = \" , printt\r\n tab3_display_text.insert(tk.END, result)\r\n\r\ndef use_gensim():\r\n raw_text = url_display.get ('1.0', tk.END)\r\n final_text = summarize(raw_text)\r\n print (final_text)\r\n Str1 = raw_text\r\n str2 = summarize(raw_text)\r\n printt = difflib.SequenceMatcher(None, Str1, str2, False).ratio() * 100\r\n\r\n Precision = len(raw_text) + len(nltk_summarizer(raw_text)) / len(raw_text)\r\n Precisioncalc = Precision / 100\r\n result ='\\nGensim Summary:{}\\n'.format(final_text),\" Precision = \" , Precisioncalc , \" similarity = \" , printt\r\n tab3_display_text.insert(tk.END, result)\r\n\r\n\r\n# URL TAB\r\nl1 = Label(tab3, text=\"Enter URL To Summarize\")\r\nl1.grid(row=1, column=0)\r\n\r\nraw_entry = StringVar()\r\nurl_entry = Entry(tab3, textvariable=raw_entry, width=50)\r\nurl_entry.grid(row=1, column=1)\r\n\r\n# BUTTONS\r\nbutton1 = Button(tab3, text=\"Reset\", command=clear_url_entry, width=12, bg='#03A9F4', fg='#fff')\r\nbutton1.grid(row=4, column=0, padx=10, pady=10)\r\n\r\nbutton2 = Button(tab3, text=\"Get Text\", command=get_text, width=12, bg='#03A9F4', fg='#fff')\r\nbutton2.grid(row=4, column=1, padx=10, pady=10)\r\n\r\nbutton3 = Button(tab3, text=\"Open File\", width=12, command=openfiles, bg='#c5cae9')\r\nbutton3.grid(row=5, column=0, padx=10, pady=10)\r\n\r\nbutton4 = Button(tab3, text=\"Open PDF\", width=12, command=open_pdf, bg='#c5cae9')\r\nbutton4.grid(row=5, column=1, padx=10, pady=10)\r\n\r\nbutton5 = Button(tab3, text=\"SpaCy\", command=use_spacy, width=12, bg='red', fg='#fff')\r\nbutton5.grid(row=8, column=0, padx=10, pady=10)\r\n\r\nbutton6 = Button(tab3, text=\"Clear Result\", command=clear_display_result, width=12, bg='#03A9F4', fg='#fff')\r\nbutton6.grid(row=9, column=1, padx=10, pady=10)\r\n\r\nbutton7 = Button(tab3, text=\"NLTK\", command=use_nltk, width=12, bg='#03A9F4', fg='#fff')\r\nbutton7.grid(row=8, column=1, padx=10, pady=10)\r\n\r\nbutton8 = Button(tab3, text=\"Gensim\", command=use_gensim, width=12, bg='#03A9F4', fg='#fff')\r\nbutton8.grid(row=9, column=0, padx=10, pady=10)\r\n# Display Screen For Result\r\nurl_display = ScrolledText(tab3, height=10)\r\nurl_display.grid(row=7, column=0, columnspan=3, padx=5, pady=5)\r\n\r\ntab3_display_text = ScrolledText(tab3, height=10)\r\ntab3_display_text.grid(row=11, column=0, columnspan=3, padx=5, pady=5)\r\n\r\n\r\n\r\nl1 = Label(tab2, text=\"Enter URL To Summarize\")\r\nl1.grid(row=1, column=0)\r\n\r\nraw_entry1 = StringVar()\r\nurl_entry1 = Entry(tab2, textvariable=raw_entry, width=50)\r\nurl_entry1.grid(row=1, column=1)\r\n\r\n# BUTTONS\r\n\r\nbutton9 = Button(tab2, text=\"Reset\", command=clear_url_entry, width=12, bg='#03A9F4', fg='#fff')\r\nbutton9.grid(row=4, column=0, padx=10, pady=10)\r\n\r\nbutton10 = Button(tab2, text=\"Get Text\", command=get_text, width=12, bg='#03A9F4', fg='#fff')\r\nbutton10.grid(row=4, column=1, padx=10, pady=10)\r\n\r\nbutton11 = Button(tab2, text=\"Open File\", width=12, command=openfiles, bg='#c5cae9')\r\nbutton11.grid(row=5, column=0, padx=10, pady=10)\r\n\r\nbutton12 = Button(tab2, text=\"Open PDF\", width=12, command=open_pdf1, bg='#c5cae9')\r\nbutton12.grid(row=5, column=1, padx=10, pady=10)\r\n\r\nbutton13 = Button(tab2, text=\"Clear Result\", command=clear_display_result, width=12, bg='#03A9F4', fg='#fff')\r\nbutton13.grid(row=9, column=1, padx=10, pady=10)\r\n\r\nbutton14 = Button(tab2, text=\"Abstract\", command=get_summary, width=12, bg='#03A9F4', fg='#fff')\r\nbutton14.grid(row=9, column=0, padx=10, pady=10)\r\n\r\nurl_display1 = ScrolledText(tab2, height=10)\r\nurl_display1.grid(row=7, column=0, columnspan=3, padx=5, pady=5)\r\n\r\ntab2_display_text = ScrolledText(tab2, height=10)\r\ntab2_display_text.grid(row=11, column=0, columnspan=3, padx=5, pady=5)\r\n\r\nwindow.mainloop()\r\n\r\n",
"step-ids": [
8,
9,
13,
14,
15
]
}
|
[
8,
9,
13,
14,
15
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-03 14:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('productores', '0002_auto_20170327_0841'),
]
operations = [
migrations.AddField(
model_name='productor',
name='edad',
field=models.IntegerField(choices=[(1, 'Menor 35'), (2, 'Mayor 35')], default=1, editable=False),
preserve_default=False,
),
]
|
normal
|
{
"blob_id": "2f7be68f08716d5d04d064d81eecb53eb9b80174",
"index": 7635,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('productores', '0002_auto_20170327_0841')]\n operations = [migrations.AddField(model_name='productor', name='edad',\n field=models.IntegerField(choices=[(1, 'Menor 35'), (2, 'Mayor 35')\n ], default=1, editable=False), preserve_default=False)]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('productores', '0002_auto_20170327_0841')]\n operations = [migrations.AddField(model_name='productor', name='edad',\n field=models.IntegerField(choices=[(1, 'Menor 35'), (2, 'Mayor 35')\n ], default=1, editable=False), preserve_default=False)]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.6 on 2017-04-03 14:45\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('productores', '0002_auto_20170327_0841'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='productor',\n name='edad',\n field=models.IntegerField(choices=[(1, 'Menor 35'), (2, 'Mayor 35')], default=1, editable=False),\n preserve_default=False,\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(kamus)
print(kamus['ayah'])
print(
"""
Data ini dikirimkan server gojek, memberikan info driver di sekitar pemakai aplikasi"""
)
<|reserved_special_token_0|>
print(data_server_gojek)
print(f"Driver di sekitar sini {data_server_gojek['driver_list']}")
print(f"Driver #1 {data_server_gojek['driver_list'][0]}")
print(f"Driver #3 {data_server_gojek['driver_list'][2]}")
print("""
Cara mengambil data jarak terdekat""")
print(
f"jarak driver terdekat {data_server_gojek['driver_list'][0]['jarak']} meters"
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
kamus = {}
kamus['anak'] = 'son'
kamus['istri'] = 'wife'
kamus['ayah'] = 'father'
print(kamus)
print(kamus['ayah'])
print(
"""
Data ini dikirimkan server gojek, memberikan info driver di sekitar pemakai aplikasi"""
)
data_server_gojek = {'tanggal': '2020-10-27', 'driver_list': [{'nama':
'Eko', 'jarak': 10}, {'nama': 'Dwi', 'jarak': 100}, {'nama': 'Tri',
'jarak': 1000}]}
print(data_server_gojek)
print(f"Driver di sekitar sini {data_server_gojek['driver_list']}")
print(f"Driver #1 {data_server_gojek['driver_list'][0]}")
print(f"Driver #3 {data_server_gojek['driver_list'][2]}")
print("""
Cara mengambil data jarak terdekat""")
print(
f"jarak driver terdekat {data_server_gojek['driver_list'][0]['jarak']} meters"
)
<|reserved_special_token_1|>
"""
Type data Dictionary hanya sekedar menghubungkan KEY dan VALUE
KVP = KEY VALUE PAIR
"""
kamus = {}
kamus['anak'] = 'son'
kamus['istri'] = 'wife'
kamus['ayah'] = 'father'
print(kamus)
print(kamus['ayah'])
print('\nData ini dikirimkan server gojek, memberikan info driver di sekitar pemakai aplikasi')
data_server_gojek = {
'tanggal': '2020-10-27',
'driver_list': [ # diver_list merupakan array yang bertipe dictionary krna memiliki beberapa atribut
{'nama': 'Eko', 'jarak': 10},
{'nama': 'Dwi', 'jarak': 100},
{'nama': 'Tri', 'jarak': 1000}
]
}
print(data_server_gojek)
print(f"Driver di sekitar sini {data_server_gojek['driver_list']}")
print(f"Driver #1 {data_server_gojek['driver_list'][0]}")
print(f"Driver #3 {data_server_gojek['driver_list'][2]}")
print('\nCara mengambil data jarak terdekat')
print(f"jarak driver terdekat {data_server_gojek['driver_list'][0]['jarak']} meters")
|
flexible
|
{
"blob_id": "67b101df690bbe9629db2cabf0060c0f2aad9722",
"index": 2389,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(kamus)\nprint(kamus['ayah'])\nprint(\n \"\"\"\nData ini dikirimkan server gojek, memberikan info driver di sekitar pemakai aplikasi\"\"\"\n )\n<mask token>\nprint(data_server_gojek)\nprint(f\"Driver di sekitar sini {data_server_gojek['driver_list']}\")\nprint(f\"Driver #1 {data_server_gojek['driver_list'][0]}\")\nprint(f\"Driver #3 {data_server_gojek['driver_list'][2]}\")\nprint(\"\"\"\nCara mengambil data jarak terdekat\"\"\")\nprint(\n f\"jarak driver terdekat {data_server_gojek['driver_list'][0]['jarak']} meters\"\n )\n",
"step-3": "<mask token>\nkamus = {}\nkamus['anak'] = 'son'\nkamus['istri'] = 'wife'\nkamus['ayah'] = 'father'\nprint(kamus)\nprint(kamus['ayah'])\nprint(\n \"\"\"\nData ini dikirimkan server gojek, memberikan info driver di sekitar pemakai aplikasi\"\"\"\n )\ndata_server_gojek = {'tanggal': '2020-10-27', 'driver_list': [{'nama':\n 'Eko', 'jarak': 10}, {'nama': 'Dwi', 'jarak': 100}, {'nama': 'Tri',\n 'jarak': 1000}]}\nprint(data_server_gojek)\nprint(f\"Driver di sekitar sini {data_server_gojek['driver_list']}\")\nprint(f\"Driver #1 {data_server_gojek['driver_list'][0]}\")\nprint(f\"Driver #3 {data_server_gojek['driver_list'][2]}\")\nprint(\"\"\"\nCara mengambil data jarak terdekat\"\"\")\nprint(\n f\"jarak driver terdekat {data_server_gojek['driver_list'][0]['jarak']} meters\"\n )\n",
"step-4": "\"\"\"\nType data Dictionary hanya sekedar menghubungkan KEY dan VALUE\nKVP = KEY VALUE PAIR\n\"\"\"\n\nkamus = {}\nkamus['anak'] = 'son'\nkamus['istri'] = 'wife'\nkamus['ayah'] = 'father'\n\nprint(kamus)\nprint(kamus['ayah'])\n\nprint('\\nData ini dikirimkan server gojek, memberikan info driver di sekitar pemakai aplikasi')\ndata_server_gojek = {\n 'tanggal': '2020-10-27',\n 'driver_list': [ # diver_list merupakan array yang bertipe dictionary krna memiliki beberapa atribut\n {'nama': 'Eko', 'jarak': 10},\n {'nama': 'Dwi', 'jarak': 100},\n {'nama': 'Tri', 'jarak': 1000}\n ]\n}\nprint(data_server_gojek)\nprint(f\"Driver di sekitar sini {data_server_gojek['driver_list']}\")\nprint(f\"Driver #1 {data_server_gojek['driver_list'][0]}\")\nprint(f\"Driver #3 {data_server_gojek['driver_list'][2]}\")\n\nprint('\\nCara mengambil data jarak terdekat')\nprint(f\"jarak driver terdekat {data_server_gojek['driver_list'][0]['jarak']} meters\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Scraper:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Entry:
folder = None
date = None
sunspots = -1
image_path = None
counted_sunspots = 0
sections = [0, 0, 0, 0]
def nothing(self, *arg):
pass
def __init__(self, folder, date, sunspots, image_path):
self.folder = folder
self.date = date
self.sunspots = sunspots
self.image_path = image_path
def process(self):
frame = cv2.imread(self.image_path)
height, width, channels = frame.shape
frameBGR = cv2.GaussianBlur(frame, (1, 1), 0)
hsv = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2HSV)
colorLow = np.array([0, 90, 80])
colorHigh = np.array([10, 255, 255])
mask = cv2.inRange(hsv, colorLow, colorHigh)
result = cv2.bitwise_and(frame, frame, mask=mask)
image_edged = cv2.Canny(mask, 50, 100)
image_edged = cv2.dilate(image_edged, None, iterations=1)
image_edged = cv2.erode(image_edged, None, iterations=1)
cnts = cv2.findContours(image_edged.copy(), cv2.RETR_EXTERNAL, cv2.
CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
image_contours = cv2.bitwise_not(result)
self.counted_sunspots = 0
self.sections = [0, 0, 0, 0]
section_1_start, section_1_end = 0, height / 4
section_2_start, section_2_end = height / 4, height / 4 * 2
section_3_start, section_3_end = height / 4 * 2, height / 4 * 3
section_4_start, section_4_end = height / 4 * 3, height / 4 * 4
cv2.line(image_contours, (0, section_1_end), (width, section_1_end),
(0, 0, 0), 5)
cv2.line(image_contours, (0, section_2_end), (width, section_2_end),
(0, 0, 0), 10)
cv2.line(image_contours, (0, section_3_end), (width, section_3_end),
(0, 0, 0), 5)
cv2.circle(image_contours, (width / 2, height / 2), width / 2, (0,
0, 0), 5)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(image_contours, self.date.strftime('%a %b %d'), (20, 50
), font, 2, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(image_contours, self.date.strftime('SSN: {}'.format(
self.sunspots)), (20, 100), font, 1.5, (0, 0, 0), 2, cv2.LINE_AA)
for c in cnts:
if cv2.contourArea(c) < 5:
continue
(x, y), radius = cv2.minEnclosingCircle(c)
x = int(x)
y = int(y)
radius = int(radius)
cv2.circle(image_contours, (x, y), radius, (100, 100, 255), -1)
self.counted_sunspots = self.counted_sunspots + 1
if y >= section_1_start and y <= section_1_end:
self.sections[0] = self.sections[0] + 1
elif y >= section_2_start and y <= section_2_end:
self.sections[1] = self.sections[1] + 1
elif y >= section_3_start and y <= section_3_end:
self.sections[2] = self.sections[2] + 1
elif y >= section_4_start and y <= section_4_end:
self.sections[3] = self.sections[3] + 1
print('Counted sunspots: {}'.format(self.counted_sunspots))
print(self.sections)
cv2.putText(image_contours, 'Section 1: {}'.format(self.sections[0]
), (20, 130), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(image_contours, 'Section 2: {}'.format(self.sections[1]
), (20, 160), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(image_contours, 'Section 3: {}'.format(self.sections[2]
), (20, 190), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(image_contours, 'Section 4: {}'.format(self.sections[3]
), (20, 220), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
colorLow = np.array([0, 0, 90])
colorHigh = np.array([0, 0, 255])
mask = cv2.inRange(hsv, colorLow, colorHigh)
image_contours[mask > 0] = 0, 0, 0
vis = np.concatenate((frame, image_contours), axis=1)
cv2.imwrite('out/images/{}.png'.format(self.folder), vis)
class Processor:
entries = []
def load(self):
folders = os.listdir('data')
for folder in folders:
year = int(folder[:4])
month = int(folder[4:6])
day = int(folder[6:8])
date = datetime(year, month, day)
image_name = 'data/{}/image.png'.format(folder)
txt_name = 'data/{}/data.txt'.format(folder)
txt_file = open(txt_name, 'r')
content = txt_file.readlines()
txt_file.close()
number = int(content[0])
print(folder)
entry = Entry(folder, date, number, image_name)
entry.process()
self.entries.append(entry)
self.entries.sort(key=lambda x: x.date, reverse=False)
def compute(self):
for section in range(0, 4):
total = 0
for entry in self.entries:
total += entry.sections[section]
average = float(total) / float(len(self.entries))
print('-------[Section {}]-------'.format(section + 1))
print('Total: {}'.format(total))
print('Average: {}'.format(average))
total = 0
sections_data = [['date', 'section_1', 'section_2', 'section_3',
'section_4']]
numbers_data = [['date', 'reported', 'visible']]
for entry in self.entries:
total += entry.counted_sunspots
sections_data.append([entry.date.strftime('%Y/%m/%d')] + entry.
sections)
numbers_data.append([entry.date.strftime('%Y/%m/%d')] + [entry.
sunspots, entry.counted_sunspots])
average = float(total) / float(len(self.entries))
print('---------[TOTAL]---------')
print('Total: {}'.format(total))
print('Average: {}'.format(average))
csv_file = open('out/sections.csv', 'w')
writer = csv.writer(csv_file)
writer.writerows(sections_data)
csv_file.close()
csv_file = open('out/numbers.csv', 'w')
writer = csv.writer(csv_file)
writer.writerows(numbers_data)
csv_file.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Scraper:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_days(self):
days = []
for i in range(0, 8):
base = self.start_date + timedelta(days=7 * i)
first = base
second = base + timedelta(days=2)
third = base + timedelta(days=4)
days.append(first)
days.append(second)
days.append(third)
return days
class Entry:
folder = None
date = None
sunspots = -1
image_path = None
counted_sunspots = 0
sections = [0, 0, 0, 0]
def nothing(self, *arg):
pass
def __init__(self, folder, date, sunspots, image_path):
self.folder = folder
self.date = date
self.sunspots = sunspots
self.image_path = image_path
def process(self):
frame = cv2.imread(self.image_path)
height, width, channels = frame.shape
frameBGR = cv2.GaussianBlur(frame, (1, 1), 0)
hsv = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2HSV)
colorLow = np.array([0, 90, 80])
colorHigh = np.array([10, 255, 255])
mask = cv2.inRange(hsv, colorLow, colorHigh)
result = cv2.bitwise_and(frame, frame, mask=mask)
image_edged = cv2.Canny(mask, 50, 100)
image_edged = cv2.dilate(image_edged, None, iterations=1)
image_edged = cv2.erode(image_edged, None, iterations=1)
cnts = cv2.findContours(image_edged.copy(), cv2.RETR_EXTERNAL, cv2.
CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
image_contours = cv2.bitwise_not(result)
self.counted_sunspots = 0
self.sections = [0, 0, 0, 0]
section_1_start, section_1_end = 0, height / 4
section_2_start, section_2_end = height / 4, height / 4 * 2
section_3_start, section_3_end = height / 4 * 2, height / 4 * 3
section_4_start, section_4_end = height / 4 * 3, height / 4 * 4
cv2.line(image_contours, (0, section_1_end), (width, section_1_end),
(0, 0, 0), 5)
cv2.line(image_contours, (0, section_2_end), (width, section_2_end),
(0, 0, 0), 10)
cv2.line(image_contours, (0, section_3_end), (width, section_3_end),
(0, 0, 0), 5)
cv2.circle(image_contours, (width / 2, height / 2), width / 2, (0,
0, 0), 5)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(image_contours, self.date.strftime('%a %b %d'), (20, 50
), font, 2, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(image_contours, self.date.strftime('SSN: {}'.format(
self.sunspots)), (20, 100), font, 1.5, (0, 0, 0), 2, cv2.LINE_AA)
for c in cnts:
if cv2.contourArea(c) < 5:
continue
(x, y), radius = cv2.minEnclosingCircle(c)
x = int(x)
y = int(y)
radius = int(radius)
cv2.circle(image_contours, (x, y), radius, (100, 100, 255), -1)
self.counted_sunspots = self.counted_sunspots + 1
if y >= section_1_start and y <= section_1_end:
self.sections[0] = self.sections[0] + 1
elif y >= section_2_start and y <= section_2_end:
self.sections[1] = self.sections[1] + 1
elif y >= section_3_start and y <= section_3_end:
self.sections[2] = self.sections[2] + 1
elif y >= section_4_start and y <= section_4_end:
self.sections[3] = self.sections[3] + 1
print('Counted sunspots: {}'.format(self.counted_sunspots))
print(self.sections)
cv2.putText(image_contours, 'Section 1: {}'.format(self.sections[0]
), (20, 130), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(image_contours, 'Section 2: {}'.format(self.sections[1]
), (20, 160), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(image_contours, 'Section 3: {}'.format(self.sections[2]
), (20, 190), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(image_contours, 'Section 4: {}'.format(self.sections[3]
), (20, 220), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
colorLow = np.array([0, 0, 90])
colorHigh = np.array([0, 0, 255])
mask = cv2.inRange(hsv, colorLow, colorHigh)
image_contours[mask > 0] = 0, 0, 0
vis = np.concatenate((frame, image_contours), axis=1)
cv2.imwrite('out/images/{}.png'.format(self.folder), vis)
class Processor:
entries = []
def load(self):
folders = os.listdir('data')
for folder in folders:
year = int(folder[:4])
month = int(folder[4:6])
day = int(folder[6:8])
date = datetime(year, month, day)
image_name = 'data/{}/image.png'.format(folder)
txt_name = 'data/{}/data.txt'.format(folder)
txt_file = open(txt_name, 'r')
content = txt_file.readlines()
txt_file.close()
number = int(content[0])
print(folder)
entry = Entry(folder, date, number, image_name)
entry.process()
self.entries.append(entry)
self.entries.sort(key=lambda x: x.date, reverse=False)
def compute(self):
for section in range(0, 4):
total = 0
for entry in self.entries:
total += entry.sections[section]
average = float(total) / float(len(self.entries))
print('-------[Section {}]-------'.format(section + 1))
print('Total: {}'.format(total))
print('Average: {}'.format(average))
total = 0
sections_data = [['date', 'section_1', 'section_2', 'section_3',
'section_4']]
numbers_data = [['date', 'reported', 'visible']]
for entry in self.entries:
total += entry.counted_sunspots
sections_data.append([entry.date.strftime('%Y/%m/%d')] + entry.
sections)
numbers_data.append([entry.date.strftime('%Y/%m/%d')] + [entry.
sunspots, entry.counted_sunspots])
average = float(total) / float(len(self.entries))
print('---------[TOTAL]---------')
print('Total: {}'.format(total))
print('Average: {}'.format(average))
csv_file = open('out/sections.csv', 'w')
writer = csv.writer(csv_file)
writer.writerows(sections_data)
csv_file.close()
csv_file = open('out/numbers.csv', 'w')
writer = csv.writer(csv_file)
writer.writerows(numbers_data)
csv_file.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Scraper:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def scrape_day(self, day):
self.browser.select('month', day.strftime('%m'))
self.browser.select('day', day.strftime('%d'))
self.browser.select('year', day.strftime('%Y'))
button = self.browser.find_by_name('view')
button.click()
text = self.browser.find_by_css('.solarWindText')[4].text
number = int(text.split(' ')[2].strip())
link = self.browser.find_link_by_partial_href('images{}/'.format(
day.strftime('%Y')))['href']
folder_name = 'data/{}{}{}'.format(day.strftime('%Y'), day.strftime
('%m'), day.strftime('%d'))
image_name = '{}/image.gif'.format(folder_name)
txt_name = '{}/data.txt'.format(folder_name)
os.mkdir(folder_name)
urllib.urlretrieve(link, image_name)
img = Image.open(image_name)
img.save('{}/image.png'.format(folder_name), 'png', optimize=True,
quality=70)
txt_file = open(txt_name, 'w')
txt_file.write(str(number))
txt_file.close()
print('Downloaded data for {}, sunspots: {}'.format(day.strftime(
'%m/%d/%Y'), number))
def get_days(self):
days = []
for i in range(0, 8):
base = self.start_date + timedelta(days=7 * i)
first = base
second = base + timedelta(days=2)
third = base + timedelta(days=4)
days.append(first)
days.append(second)
days.append(third)
return days
class Entry:
folder = None
date = None
sunspots = -1
image_path = None
counted_sunspots = 0
sections = [0, 0, 0, 0]
def nothing(self, *arg):
pass
def __init__(self, folder, date, sunspots, image_path):
self.folder = folder
self.date = date
self.sunspots = sunspots
self.image_path = image_path
def process(self):
frame = cv2.imread(self.image_path)
height, width, channels = frame.shape
frameBGR = cv2.GaussianBlur(frame, (1, 1), 0)
hsv = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2HSV)
colorLow = np.array([0, 90, 80])
colorHigh = np.array([10, 255, 255])
mask = cv2.inRange(hsv, colorLow, colorHigh)
result = cv2.bitwise_and(frame, frame, mask=mask)
image_edged = cv2.Canny(mask, 50, 100)
image_edged = cv2.dilate(image_edged, None, iterations=1)
image_edged = cv2.erode(image_edged, None, iterations=1)
cnts = cv2.findContours(image_edged.copy(), cv2.RETR_EXTERNAL, cv2.
CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
image_contours = cv2.bitwise_not(result)
self.counted_sunspots = 0
self.sections = [0, 0, 0, 0]
section_1_start, section_1_end = 0, height / 4
section_2_start, section_2_end = height / 4, height / 4 * 2
section_3_start, section_3_end = height / 4 * 2, height / 4 * 3
section_4_start, section_4_end = height / 4 * 3, height / 4 * 4
cv2.line(image_contours, (0, section_1_end), (width, section_1_end),
(0, 0, 0), 5)
cv2.line(image_contours, (0, section_2_end), (width, section_2_end),
(0, 0, 0), 10)
cv2.line(image_contours, (0, section_3_end), (width, section_3_end),
(0, 0, 0), 5)
cv2.circle(image_contours, (width / 2, height / 2), width / 2, (0,
0, 0), 5)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(image_contours, self.date.strftime('%a %b %d'), (20, 50
), font, 2, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(image_contours, self.date.strftime('SSN: {}'.format(
self.sunspots)), (20, 100), font, 1.5, (0, 0, 0), 2, cv2.LINE_AA)
for c in cnts:
if cv2.contourArea(c) < 5:
continue
(x, y), radius = cv2.minEnclosingCircle(c)
x = int(x)
y = int(y)
radius = int(radius)
cv2.circle(image_contours, (x, y), radius, (100, 100, 255), -1)
self.counted_sunspots = self.counted_sunspots + 1
if y >= section_1_start and y <= section_1_end:
self.sections[0] = self.sections[0] + 1
elif y >= section_2_start and y <= section_2_end:
self.sections[1] = self.sections[1] + 1
elif y >= section_3_start and y <= section_3_end:
self.sections[2] = self.sections[2] + 1
elif y >= section_4_start and y <= section_4_end:
self.sections[3] = self.sections[3] + 1
print('Counted sunspots: {}'.format(self.counted_sunspots))
print(self.sections)
cv2.putText(image_contours, 'Section 1: {}'.format(self.sections[0]
), (20, 130), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(image_contours, 'Section 2: {}'.format(self.sections[1]
), (20, 160), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(image_contours, 'Section 3: {}'.format(self.sections[2]
), (20, 190), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(image_contours, 'Section 4: {}'.format(self.sections[3]
), (20, 220), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
colorLow = np.array([0, 0, 90])
colorHigh = np.array([0, 0, 255])
mask = cv2.inRange(hsv, colorLow, colorHigh)
image_contours[mask > 0] = 0, 0, 0
vis = np.concatenate((frame, image_contours), axis=1)
cv2.imwrite('out/images/{}.png'.format(self.folder), vis)
class Processor:
entries = []
def load(self):
folders = os.listdir('data')
for folder in folders:
year = int(folder[:4])
month = int(folder[4:6])
day = int(folder[6:8])
date = datetime(year, month, day)
image_name = 'data/{}/image.png'.format(folder)
txt_name = 'data/{}/data.txt'.format(folder)
txt_file = open(txt_name, 'r')
content = txt_file.readlines()
txt_file.close()
number = int(content[0])
print(folder)
entry = Entry(folder, date, number, image_name)
entry.process()
self.entries.append(entry)
self.entries.sort(key=lambda x: x.date, reverse=False)
def compute(self):
for section in range(0, 4):
total = 0
for entry in self.entries:
total += entry.sections[section]
average = float(total) / float(len(self.entries))
print('-------[Section {}]-------'.format(section + 1))
print('Total: {}'.format(total))
print('Average: {}'.format(average))
total = 0
sections_data = [['date', 'section_1', 'section_2', 'section_3',
'section_4']]
numbers_data = [['date', 'reported', 'visible']]
for entry in self.entries:
total += entry.counted_sunspots
sections_data.append([entry.date.strftime('%Y/%m/%d')] + entry.
sections)
numbers_data.append([entry.date.strftime('%Y/%m/%d')] + [entry.
sunspots, entry.counted_sunspots])
average = float(total) / float(len(self.entries))
print('---------[TOTAL]---------')
print('Total: {}'.format(total))
print('Average: {}'.format(average))
csv_file = open('out/sections.csv', 'w')
writer = csv.writer(csv_file)
writer.writerows(sections_data)
csv_file.close()
csv_file = open('out/numbers.csv', 'w')
writer = csv.writer(csv_file)
writer.writerows(numbers_data)
csv_file.close()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from splinter import Browser
from time import sleep
from datetime import datetime, timedelta
import os, sys
import urllib
import cv2
import numpy as np
from PIL import Image
import imutils
import csv
class Scraper:
start_date = datetime(2018, 1, 8)
url = 'http://spaceweather.com/'
def scrape(self):
self.browser = Browser('firefox')
self.browser.driver.set_page_load_timeout(60)
self.browser.visit(self.url)
for day in self.get_days():
self.scrape_day(day)
def scrape_day(self, day):
self.browser.select('month', day.strftime('%m'))
self.browser.select('day', day.strftime('%d'))
self.browser.select('year', day.strftime('%Y'))
button = self.browser.find_by_name('view')
button.click()
text = self.browser.find_by_css('.solarWindText')[4].text
number = int(text.split(' ')[2].strip())
link = self.browser.find_link_by_partial_href('images{}/'.format(
day.strftime('%Y')))['href']
folder_name = 'data/{}{}{}'.format(day.strftime('%Y'), day.strftime
('%m'), day.strftime('%d'))
image_name = '{}/image.gif'.format(folder_name)
txt_name = '{}/data.txt'.format(folder_name)
os.mkdir(folder_name)
urllib.urlretrieve(link, image_name)
img = Image.open(image_name)
img.save('{}/image.png'.format(folder_name), 'png', optimize=True,
quality=70)
txt_file = open(txt_name, 'w')
txt_file.write(str(number))
txt_file.close()
print('Downloaded data for {}, sunspots: {}'.format(day.strftime(
'%m/%d/%Y'), number))
def get_days(self):
days = []
for i in range(0, 8):
base = self.start_date + timedelta(days=7 * i)
first = base
second = base + timedelta(days=2)
third = base + timedelta(days=4)
days.append(first)
days.append(second)
days.append(third)
return days
class Entry:
folder = None
date = None
sunspots = -1
image_path = None
counted_sunspots = 0
sections = [0, 0, 0, 0]
def nothing(self, *arg):
pass
def __init__(self, folder, date, sunspots, image_path):
self.folder = folder
self.date = date
self.sunspots = sunspots
self.image_path = image_path
def process(self):
frame = cv2.imread(self.image_path)
height, width, channels = frame.shape
frameBGR = cv2.GaussianBlur(frame, (1, 1), 0)
hsv = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2HSV)
colorLow = np.array([0, 90, 80])
colorHigh = np.array([10, 255, 255])
mask = cv2.inRange(hsv, colorLow, colorHigh)
result = cv2.bitwise_and(frame, frame, mask=mask)
image_edged = cv2.Canny(mask, 50, 100)
image_edged = cv2.dilate(image_edged, None, iterations=1)
image_edged = cv2.erode(image_edged, None, iterations=1)
cnts = cv2.findContours(image_edged.copy(), cv2.RETR_EXTERNAL, cv2.
CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
image_contours = cv2.bitwise_not(result)
self.counted_sunspots = 0
self.sections = [0, 0, 0, 0]
section_1_start, section_1_end = 0, height / 4
section_2_start, section_2_end = height / 4, height / 4 * 2
section_3_start, section_3_end = height / 4 * 2, height / 4 * 3
section_4_start, section_4_end = height / 4 * 3, height / 4 * 4
cv2.line(image_contours, (0, section_1_end), (width, section_1_end),
(0, 0, 0), 5)
cv2.line(image_contours, (0, section_2_end), (width, section_2_end),
(0, 0, 0), 10)
cv2.line(image_contours, (0, section_3_end), (width, section_3_end),
(0, 0, 0), 5)
cv2.circle(image_contours, (width / 2, height / 2), width / 2, (0,
0, 0), 5)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(image_contours, self.date.strftime('%a %b %d'), (20, 50
), font, 2, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(image_contours, self.date.strftime('SSN: {}'.format(
self.sunspots)), (20, 100), font, 1.5, (0, 0, 0), 2, cv2.LINE_AA)
for c in cnts:
if cv2.contourArea(c) < 5:
continue
(x, y), radius = cv2.minEnclosingCircle(c)
x = int(x)
y = int(y)
radius = int(radius)
cv2.circle(image_contours, (x, y), radius, (100, 100, 255), -1)
self.counted_sunspots = self.counted_sunspots + 1
if y >= section_1_start and y <= section_1_end:
self.sections[0] = self.sections[0] + 1
elif y >= section_2_start and y <= section_2_end:
self.sections[1] = self.sections[1] + 1
elif y >= section_3_start and y <= section_3_end:
self.sections[2] = self.sections[2] + 1
elif y >= section_4_start and y <= section_4_end:
self.sections[3] = self.sections[3] + 1
print('Counted sunspots: {}'.format(self.counted_sunspots))
print(self.sections)
cv2.putText(image_contours, 'Section 1: {}'.format(self.sections[0]
), (20, 130), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(image_contours, 'Section 2: {}'.format(self.sections[1]
), (20, 160), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(image_contours, 'Section 3: {}'.format(self.sections[2]
), (20, 190), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(image_contours, 'Section 4: {}'.format(self.sections[3]
), (20, 220), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
colorLow = np.array([0, 0, 90])
colorHigh = np.array([0, 0, 255])
mask = cv2.inRange(hsv, colorLow, colorHigh)
image_contours[mask > 0] = 0, 0, 0
vis = np.concatenate((frame, image_contours), axis=1)
cv2.imwrite('out/images/{}.png'.format(self.folder), vis)
class Processor:
entries = []
def load(self):
folders = os.listdir('data')
for folder in folders:
year = int(folder[:4])
month = int(folder[4:6])
day = int(folder[6:8])
date = datetime(year, month, day)
image_name = 'data/{}/image.png'.format(folder)
txt_name = 'data/{}/data.txt'.format(folder)
txt_file = open(txt_name, 'r')
content = txt_file.readlines()
txt_file.close()
number = int(content[0])
print(folder)
entry = Entry(folder, date, number, image_name)
entry.process()
self.entries.append(entry)
self.entries.sort(key=lambda x: x.date, reverse=False)
def compute(self):
for section in range(0, 4):
total = 0
for entry in self.entries:
total += entry.sections[section]
average = float(total) / float(len(self.entries))
print('-------[Section {}]-------'.format(section + 1))
print('Total: {}'.format(total))
print('Average: {}'.format(average))
total = 0
sections_data = [['date', 'section_1', 'section_2', 'section_3',
'section_4']]
numbers_data = [['date', 'reported', 'visible']]
for entry in self.entries:
total += entry.counted_sunspots
sections_data.append([entry.date.strftime('%Y/%m/%d')] + entry.
sections)
numbers_data.append([entry.date.strftime('%Y/%m/%d')] + [entry.
sunspots, entry.counted_sunspots])
average = float(total) / float(len(self.entries))
print('---------[TOTAL]---------')
print('Total: {}'.format(total))
print('Average: {}'.format(average))
csv_file = open('out/sections.csv', 'w')
writer = csv.writer(csv_file)
writer.writerows(sections_data)
csv_file.close()
csv_file = open('out/numbers.csv', 'w')
writer = csv.writer(csv_file)
writer.writerows(numbers_data)
csv_file.close()
scraper = Scraper()
scraper.scrape()
processor = Processor()
processor.load()
processor.compute()
<|reserved_special_token_1|>
from splinter import Browser
from time import sleep
from datetime import datetime, timedelta
import os, sys
import urllib
import cv2
import numpy as np
from PIL import Image
import imutils
import csv
class Scraper():
start_date = datetime(2018, 1, 8)
url = 'http://spaceweather.com/'
def scrape(self):
self.browser = Browser('firefox')
self.browser.driver.set_page_load_timeout(60)
self.browser.visit(self.url)
for day in self.get_days():
self.scrape_day(day)
def scrape_day(self, day):
self.browser.select('month', day.strftime('%m'))
self.browser.select('day', day.strftime('%d'))
self.browser.select('year', day.strftime('%Y'))
button = self.browser.find_by_name('view')
button.click()
text = self.browser.find_by_css('.solarWindText')[4].text
number = int(text.split(' ')[2].strip())
link = self.browser.find_link_by_partial_href('images{}/'.format(day.strftime('%Y')))['href']
folder_name = "data/{}{}{}".format(day.strftime('%Y'), day.strftime('%m'), day.strftime('%d'))
image_name = "{}/image.gif".format(folder_name)
txt_name = "{}/data.txt".format(folder_name)
os.mkdir(folder_name)
urllib.urlretrieve(link, image_name)
img = Image.open(image_name)
img.save("{}/image.png".format(folder_name), 'png', optimize=True, quality=70)
txt_file = open(txt_name, 'w')
txt_file.write(str(number))
txt_file.close()
print("Downloaded data for {}, sunspots: {}".format(day.strftime('%m/%d/%Y'), number))
def get_days(self):
days = []
for i in range(0, 8):
base = self.start_date + timedelta(days=7 * i)
first = base
second = base + timedelta(days=2)
third = base + timedelta(days=4)
days.append(first)
days.append(second)
days.append(third)
return days
class Entry():
folder = None
date = None
sunspots = -1
image_path = None
counted_sunspots = 0
sections = [0, 0, 0, 0]
def nothing(self, *arg):
pass
def __init__(self, folder, date, sunspots, image_path):
self.folder = folder
self.date = date
self.sunspots = sunspots
self.image_path = image_path
def process(self):
frame = cv2.imread(self.image_path)
height, width, channels = frame.shape
frameBGR = cv2.GaussianBlur(frame, (1, 1), 0)
hsv = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2HSV)
colorLow = np.array([0,90,80])
colorHigh = np.array([10,255,255])
mask = cv2.inRange(hsv, colorLow, colorHigh)
result = cv2.bitwise_and(frame, frame, mask=mask)
image_edged = cv2.Canny(mask, 50, 100)
image_edged = cv2.dilate(image_edged, None, iterations=1)
image_edged = cv2.erode(image_edged, None, iterations=1)
cnts = cv2.findContours(image_edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
image_contours = cv2.bitwise_not(result)
self.counted_sunspots = 0
self.sections = [0, 0, 0, 0]
section_1_start, section_1_end = 0, height/4
section_2_start, section_2_end = height/4, height/4 * 2
section_3_start, section_3_end = height/4 * 2, height/4 * 3
section_4_start, section_4_end = height/4 * 3, height/4 * 4
cv2.line(image_contours, (0, section_1_end), (width, section_1_end), (0, 0, 0), 5)
cv2.line(image_contours, (0, section_2_end), (width, section_2_end), (0, 0, 0), 10)
cv2.line(image_contours, (0, section_3_end), (width, section_3_end), (0, 0, 0), 5)
cv2.circle(image_contours, (width/2, height/2), width/2, (0, 0, 0), 5)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(image_contours, self.date.strftime('%a %b %d'), (20, 50), font, 2, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(image_contours, self.date.strftime('SSN: {}'.format(self.sunspots)), (20, 100), font, 1.5, (0, 0, 0), 2, cv2.LINE_AA)
for c in cnts:
if cv2.contourArea(c) < 5:
continue
(x,y),radius = cv2.minEnclosingCircle(c)
x = int(x)
y = int(y)
radius = int(radius)
cv2.circle(image_contours, (x, y), radius, (100, 100, 255), -1)
self.counted_sunspots = self.counted_sunspots + 1
if y >= section_1_start and y <= section_1_end:
#cv2.putText(image_contours, '1', (x, y - 10), font, 0.8, (100, 100, 255), 2, cv2.LINE_AA)
self.sections[0] = self.sections[0] + 1
elif y >= section_2_start and y <= section_2_end:
#cv2.putText(image_contours, '2', (x, y - 10), font, 0.8, (100, 100, 255), 2, cv2.LINE_AA)
self.sections[1] = self.sections[1] + 1
elif y >= section_3_start and y <= section_3_end:
#cv2.putText(image_contours, '3', (x, y - 10), font, 0.8, (100, 100, 255), 2, cv2.LINE_AA)
self.sections[2] = self.sections[2] + 1
elif y >= section_4_start and y <= section_4_end:
#cv2.putText(image_contours, '4', (x, y - 10), font, 0.8, (100, 100, 255), 2, cv2.LINE_AA)
self.sections[3] = self.sections[3] + 1
print('Counted sunspots: {}'.format(self.counted_sunspots))
print(self.sections)
cv2.putText(image_contours, 'Section 1: {}'.format(self.sections[0]), (20, 130), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(image_contours, 'Section 2: {}'.format(self.sections[1]), (20, 160), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(image_contours, 'Section 3: {}'.format(self.sections[2]), (20, 190), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
cv2.putText(image_contours, 'Section 4: {}'.format(self.sections[3]), (20, 220), font, 1, (0, 0, 0), 2, cv2.LINE_AA)
colorLow = np.array([0,0,90])
colorHigh = np.array([0,0,255])
mask = cv2.inRange(hsv, colorLow, colorHigh)
image_contours[mask > 0] = (0, 0, 0)
vis = np.concatenate((frame, image_contours), axis=1)
cv2.imwrite('out/images/{}.png'.format(self.folder), vis)
class Processor():
entries = []
def load(self):
folders = os.listdir("data")
for folder in folders:
year = int(folder[:4])
month = int(folder[4:6])
day = int(folder[6:8])
date = datetime(year, month, day)
image_name = "data/{}/image.png".format(folder)
txt_name = "data/{}/data.txt".format(folder)
txt_file = open(txt_name, 'r')
content = txt_file.readlines()
txt_file.close()
number = int(content[0])
print(folder)
entry = Entry(folder, date, number, image_name)
entry.process()
self.entries.append(entry)
self.entries.sort(key=lambda x: x.date, reverse=False)
def compute(self):
for section in range(0, 4):
total = 0
for entry in self.entries:
total += entry.sections[section]
average = float(total) / float(len(self.entries))
print('-------[Section {}]-------'.format(section + 1))
print('Total: {}'.format(total))
print('Average: {}'.format(average))
total = 0
sections_data = [["date", "section_1", "section_2", "section_3", "section_4"]]
numbers_data = [["date", "reported", "visible"]]
for entry in self.entries:
total += entry.counted_sunspots
sections_data.append([entry.date.strftime("%Y/%m/%d")] + entry.sections)
numbers_data.append([entry.date.strftime("%Y/%m/%d")] + [entry.sunspots, entry.counted_sunspots])
average = float(total) / float(len(self.entries))
print('---------[TOTAL]---------')
print('Total: {}'.format(total))
print('Average: {}'.format(average))
csv_file = open('out/sections.csv', 'w')
writer = csv.writer(csv_file)
writer.writerows(sections_data)
csv_file.close()
csv_file = open('out/numbers.csv', 'w')
writer = csv.writer(csv_file)
writer.writerows(numbers_data)
csv_file.close()
scraper = Scraper()
scraper.scrape()
processor = Processor()
processor.load()
processor.compute()
|
flexible
|
{
"blob_id": "c55991e738c89ee09dabd79d514e710e0fcbac85",
"index": 422,
"step-1": "<mask token>\n\n\nclass Scraper:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Entry:\n folder = None\n date = None\n sunspots = -1\n image_path = None\n counted_sunspots = 0\n sections = [0, 0, 0, 0]\n\n def nothing(self, *arg):\n pass\n\n def __init__(self, folder, date, sunspots, image_path):\n self.folder = folder\n self.date = date\n self.sunspots = sunspots\n self.image_path = image_path\n\n def process(self):\n frame = cv2.imread(self.image_path)\n height, width, channels = frame.shape\n frameBGR = cv2.GaussianBlur(frame, (1, 1), 0)\n hsv = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2HSV)\n colorLow = np.array([0, 90, 80])\n colorHigh = np.array([10, 255, 255])\n mask = cv2.inRange(hsv, colorLow, colorHigh)\n result = cv2.bitwise_and(frame, frame, mask=mask)\n image_edged = cv2.Canny(mask, 50, 100)\n image_edged = cv2.dilate(image_edged, None, iterations=1)\n image_edged = cv2.erode(image_edged, None, iterations=1)\n cnts = cv2.findContours(image_edged.copy(), cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if imutils.is_cv2() else cnts[1]\n image_contours = cv2.bitwise_not(result)\n self.counted_sunspots = 0\n self.sections = [0, 0, 0, 0]\n section_1_start, section_1_end = 0, height / 4\n section_2_start, section_2_end = height / 4, height / 4 * 2\n section_3_start, section_3_end = height / 4 * 2, height / 4 * 3\n section_4_start, section_4_end = height / 4 * 3, height / 4 * 4\n cv2.line(image_contours, (0, section_1_end), (width, section_1_end),\n (0, 0, 0), 5)\n cv2.line(image_contours, (0, section_2_end), (width, section_2_end),\n (0, 0, 0), 10)\n cv2.line(image_contours, (0, section_3_end), (width, section_3_end),\n (0, 0, 0), 5)\n cv2.circle(image_contours, (width / 2, height / 2), width / 2, (0, \n 0, 0), 5)\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(image_contours, self.date.strftime('%a %b %d'), (20, 50\n ), font, 2, (0, 0, 0), 2, cv2.LINE_AA)\n cv2.putText(image_contours, self.date.strftime('SSN: {}'.format(\n self.sunspots)), (20, 100), font, 1.5, (0, 0, 0), 2, cv2.LINE_AA)\n for c in cnts:\n if cv2.contourArea(c) < 5:\n continue\n (x, y), radius = cv2.minEnclosingCircle(c)\n x = int(x)\n y = int(y)\n radius = int(radius)\n cv2.circle(image_contours, (x, y), radius, (100, 100, 255), -1)\n self.counted_sunspots = self.counted_sunspots + 1\n if y >= section_1_start and y <= section_1_end:\n self.sections[0] = self.sections[0] + 1\n elif y >= section_2_start and y <= section_2_end:\n self.sections[1] = self.sections[1] + 1\n elif y >= section_3_start and y <= section_3_end:\n self.sections[2] = self.sections[2] + 1\n elif y >= section_4_start and y <= section_4_end:\n self.sections[3] = self.sections[3] + 1\n print('Counted sunspots: {}'.format(self.counted_sunspots))\n print(self.sections)\n cv2.putText(image_contours, 'Section 1: {}'.format(self.sections[0]\n ), (20, 130), font, 1, (0, 0, 0), 2, cv2.LINE_AA)\n cv2.putText(image_contours, 'Section 2: {}'.format(self.sections[1]\n ), (20, 160), font, 1, (0, 0, 0), 2, cv2.LINE_AA)\n cv2.putText(image_contours, 'Section 3: {}'.format(self.sections[2]\n ), (20, 190), font, 1, (0, 0, 0), 2, cv2.LINE_AA)\n cv2.putText(image_contours, 'Section 4: {}'.format(self.sections[3]\n ), (20, 220), font, 1, (0, 0, 0), 2, cv2.LINE_AA)\n colorLow = np.array([0, 0, 90])\n colorHigh = np.array([0, 0, 255])\n mask = cv2.inRange(hsv, colorLow, colorHigh)\n image_contours[mask > 0] = 0, 0, 0\n vis = np.concatenate((frame, image_contours), axis=1)\n cv2.imwrite('out/images/{}.png'.format(self.folder), vis)\n\n\nclass Processor:\n entries = []\n\n def load(self):\n folders = os.listdir('data')\n for folder in folders:\n year = int(folder[:4])\n month = int(folder[4:6])\n day = int(folder[6:8])\n date = datetime(year, month, day)\n image_name = 'data/{}/image.png'.format(folder)\n txt_name = 'data/{}/data.txt'.format(folder)\n txt_file = open(txt_name, 'r')\n content = txt_file.readlines()\n txt_file.close()\n number = int(content[0])\n print(folder)\n entry = Entry(folder, date, number, image_name)\n entry.process()\n self.entries.append(entry)\n self.entries.sort(key=lambda x: x.date, reverse=False)\n\n def compute(self):\n for section in range(0, 4):\n total = 0\n for entry in self.entries:\n total += entry.sections[section]\n average = float(total) / float(len(self.entries))\n print('-------[Section {}]-------'.format(section + 1))\n print('Total: {}'.format(total))\n print('Average: {}'.format(average))\n total = 0\n sections_data = [['date', 'section_1', 'section_2', 'section_3',\n 'section_4']]\n numbers_data = [['date', 'reported', 'visible']]\n for entry in self.entries:\n total += entry.counted_sunspots\n sections_data.append([entry.date.strftime('%Y/%m/%d')] + entry.\n sections)\n numbers_data.append([entry.date.strftime('%Y/%m/%d')] + [entry.\n sunspots, entry.counted_sunspots])\n average = float(total) / float(len(self.entries))\n print('---------[TOTAL]---------')\n print('Total: {}'.format(total))\n print('Average: {}'.format(average))\n csv_file = open('out/sections.csv', 'w')\n writer = csv.writer(csv_file)\n writer.writerows(sections_data)\n csv_file.close()\n csv_file = open('out/numbers.csv', 'w')\n writer = csv.writer(csv_file)\n writer.writerows(numbers_data)\n csv_file.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Scraper:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def get_days(self):\n days = []\n for i in range(0, 8):\n base = self.start_date + timedelta(days=7 * i)\n first = base\n second = base + timedelta(days=2)\n third = base + timedelta(days=4)\n days.append(first)\n days.append(second)\n days.append(third)\n return days\n\n\nclass Entry:\n folder = None\n date = None\n sunspots = -1\n image_path = None\n counted_sunspots = 0\n sections = [0, 0, 0, 0]\n\n def nothing(self, *arg):\n pass\n\n def __init__(self, folder, date, sunspots, image_path):\n self.folder = folder\n self.date = date\n self.sunspots = sunspots\n self.image_path = image_path\n\n def process(self):\n frame = cv2.imread(self.image_path)\n height, width, channels = frame.shape\n frameBGR = cv2.GaussianBlur(frame, (1, 1), 0)\n hsv = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2HSV)\n colorLow = np.array([0, 90, 80])\n colorHigh = np.array([10, 255, 255])\n mask = cv2.inRange(hsv, colorLow, colorHigh)\n result = cv2.bitwise_and(frame, frame, mask=mask)\n image_edged = cv2.Canny(mask, 50, 100)\n image_edged = cv2.dilate(image_edged, None, iterations=1)\n image_edged = cv2.erode(image_edged, None, iterations=1)\n cnts = cv2.findContours(image_edged.copy(), cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if imutils.is_cv2() else cnts[1]\n image_contours = cv2.bitwise_not(result)\n self.counted_sunspots = 0\n self.sections = [0, 0, 0, 0]\n section_1_start, section_1_end = 0, height / 4\n section_2_start, section_2_end = height / 4, height / 4 * 2\n section_3_start, section_3_end = height / 4 * 2, height / 4 * 3\n section_4_start, section_4_end = height / 4 * 3, height / 4 * 4\n cv2.line(image_contours, (0, section_1_end), (width, section_1_end),\n (0, 0, 0), 5)\n cv2.line(image_contours, (0, section_2_end), (width, section_2_end),\n (0, 0, 0), 10)\n cv2.line(image_contours, (0, section_3_end), (width, section_3_end),\n (0, 0, 0), 5)\n cv2.circle(image_contours, (width / 2, height / 2), width / 2, (0, \n 0, 0), 5)\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(image_contours, self.date.strftime('%a %b %d'), (20, 50\n ), font, 2, (0, 0, 0), 2, cv2.LINE_AA)\n cv2.putText(image_contours, self.date.strftime('SSN: {}'.format(\n self.sunspots)), (20, 100), font, 1.5, (0, 0, 0), 2, cv2.LINE_AA)\n for c in cnts:\n if cv2.contourArea(c) < 5:\n continue\n (x, y), radius = cv2.minEnclosingCircle(c)\n x = int(x)\n y = int(y)\n radius = int(radius)\n cv2.circle(image_contours, (x, y), radius, (100, 100, 255), -1)\n self.counted_sunspots = self.counted_sunspots + 1\n if y >= section_1_start and y <= section_1_end:\n self.sections[0] = self.sections[0] + 1\n elif y >= section_2_start and y <= section_2_end:\n self.sections[1] = self.sections[1] + 1\n elif y >= section_3_start and y <= section_3_end:\n self.sections[2] = self.sections[2] + 1\n elif y >= section_4_start and y <= section_4_end:\n self.sections[3] = self.sections[3] + 1\n print('Counted sunspots: {}'.format(self.counted_sunspots))\n print(self.sections)\n cv2.putText(image_contours, 'Section 1: {}'.format(self.sections[0]\n ), (20, 130), font, 1, (0, 0, 0), 2, cv2.LINE_AA)\n cv2.putText(image_contours, 'Section 2: {}'.format(self.sections[1]\n ), (20, 160), font, 1, (0, 0, 0), 2, cv2.LINE_AA)\n cv2.putText(image_contours, 'Section 3: {}'.format(self.sections[2]\n ), (20, 190), font, 1, (0, 0, 0), 2, cv2.LINE_AA)\n cv2.putText(image_contours, 'Section 4: {}'.format(self.sections[3]\n ), (20, 220), font, 1, (0, 0, 0), 2, cv2.LINE_AA)\n colorLow = np.array([0, 0, 90])\n colorHigh = np.array([0, 0, 255])\n mask = cv2.inRange(hsv, colorLow, colorHigh)\n image_contours[mask > 0] = 0, 0, 0\n vis = np.concatenate((frame, image_contours), axis=1)\n cv2.imwrite('out/images/{}.png'.format(self.folder), vis)\n\n\nclass Processor:\n entries = []\n\n def load(self):\n folders = os.listdir('data')\n for folder in folders:\n year = int(folder[:4])\n month = int(folder[4:6])\n day = int(folder[6:8])\n date = datetime(year, month, day)\n image_name = 'data/{}/image.png'.format(folder)\n txt_name = 'data/{}/data.txt'.format(folder)\n txt_file = open(txt_name, 'r')\n content = txt_file.readlines()\n txt_file.close()\n number = int(content[0])\n print(folder)\n entry = Entry(folder, date, number, image_name)\n entry.process()\n self.entries.append(entry)\n self.entries.sort(key=lambda x: x.date, reverse=False)\n\n def compute(self):\n for section in range(0, 4):\n total = 0\n for entry in self.entries:\n total += entry.sections[section]\n average = float(total) / float(len(self.entries))\n print('-------[Section {}]-------'.format(section + 1))\n print('Total: {}'.format(total))\n print('Average: {}'.format(average))\n total = 0\n sections_data = [['date', 'section_1', 'section_2', 'section_3',\n 'section_4']]\n numbers_data = [['date', 'reported', 'visible']]\n for entry in self.entries:\n total += entry.counted_sunspots\n sections_data.append([entry.date.strftime('%Y/%m/%d')] + entry.\n sections)\n numbers_data.append([entry.date.strftime('%Y/%m/%d')] + [entry.\n sunspots, entry.counted_sunspots])\n average = float(total) / float(len(self.entries))\n print('---------[TOTAL]---------')\n print('Total: {}'.format(total))\n print('Average: {}'.format(average))\n csv_file = open('out/sections.csv', 'w')\n writer = csv.writer(csv_file)\n writer.writerows(sections_data)\n csv_file.close()\n csv_file = open('out/numbers.csv', 'w')\n writer = csv.writer(csv_file)\n writer.writerows(numbers_data)\n csv_file.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Scraper:\n <mask token>\n <mask token>\n <mask token>\n\n def scrape_day(self, day):\n self.browser.select('month', day.strftime('%m'))\n self.browser.select('day', day.strftime('%d'))\n self.browser.select('year', day.strftime('%Y'))\n button = self.browser.find_by_name('view')\n button.click()\n text = self.browser.find_by_css('.solarWindText')[4].text\n number = int(text.split(' ')[2].strip())\n link = self.browser.find_link_by_partial_href('images{}/'.format(\n day.strftime('%Y')))['href']\n folder_name = 'data/{}{}{}'.format(day.strftime('%Y'), day.strftime\n ('%m'), day.strftime('%d'))\n image_name = '{}/image.gif'.format(folder_name)\n txt_name = '{}/data.txt'.format(folder_name)\n os.mkdir(folder_name)\n urllib.urlretrieve(link, image_name)\n img = Image.open(image_name)\n img.save('{}/image.png'.format(folder_name), 'png', optimize=True,\n quality=70)\n txt_file = open(txt_name, 'w')\n txt_file.write(str(number))\n txt_file.close()\n print('Downloaded data for {}, sunspots: {}'.format(day.strftime(\n '%m/%d/%Y'), number))\n\n def get_days(self):\n days = []\n for i in range(0, 8):\n base = self.start_date + timedelta(days=7 * i)\n first = base\n second = base + timedelta(days=2)\n third = base + timedelta(days=4)\n days.append(first)\n days.append(second)\n days.append(third)\n return days\n\n\nclass Entry:\n folder = None\n date = None\n sunspots = -1\n image_path = None\n counted_sunspots = 0\n sections = [0, 0, 0, 0]\n\n def nothing(self, *arg):\n pass\n\n def __init__(self, folder, date, sunspots, image_path):\n self.folder = folder\n self.date = date\n self.sunspots = sunspots\n self.image_path = image_path\n\n def process(self):\n frame = cv2.imread(self.image_path)\n height, width, channels = frame.shape\n frameBGR = cv2.GaussianBlur(frame, (1, 1), 0)\n hsv = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2HSV)\n colorLow = np.array([0, 90, 80])\n colorHigh = np.array([10, 255, 255])\n mask = cv2.inRange(hsv, colorLow, colorHigh)\n result = cv2.bitwise_and(frame, frame, mask=mask)\n image_edged = cv2.Canny(mask, 50, 100)\n image_edged = cv2.dilate(image_edged, None, iterations=1)\n image_edged = cv2.erode(image_edged, None, iterations=1)\n cnts = cv2.findContours(image_edged.copy(), cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if imutils.is_cv2() else cnts[1]\n image_contours = cv2.bitwise_not(result)\n self.counted_sunspots = 0\n self.sections = [0, 0, 0, 0]\n section_1_start, section_1_end = 0, height / 4\n section_2_start, section_2_end = height / 4, height / 4 * 2\n section_3_start, section_3_end = height / 4 * 2, height / 4 * 3\n section_4_start, section_4_end = height / 4 * 3, height / 4 * 4\n cv2.line(image_contours, (0, section_1_end), (width, section_1_end),\n (0, 0, 0), 5)\n cv2.line(image_contours, (0, section_2_end), (width, section_2_end),\n (0, 0, 0), 10)\n cv2.line(image_contours, (0, section_3_end), (width, section_3_end),\n (0, 0, 0), 5)\n cv2.circle(image_contours, (width / 2, height / 2), width / 2, (0, \n 0, 0), 5)\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(image_contours, self.date.strftime('%a %b %d'), (20, 50\n ), font, 2, (0, 0, 0), 2, cv2.LINE_AA)\n cv2.putText(image_contours, self.date.strftime('SSN: {}'.format(\n self.sunspots)), (20, 100), font, 1.5, (0, 0, 0), 2, cv2.LINE_AA)\n for c in cnts:\n if cv2.contourArea(c) < 5:\n continue\n (x, y), radius = cv2.minEnclosingCircle(c)\n x = int(x)\n y = int(y)\n radius = int(radius)\n cv2.circle(image_contours, (x, y), radius, (100, 100, 255), -1)\n self.counted_sunspots = self.counted_sunspots + 1\n if y >= section_1_start and y <= section_1_end:\n self.sections[0] = self.sections[0] + 1\n elif y >= section_2_start and y <= section_2_end:\n self.sections[1] = self.sections[1] + 1\n elif y >= section_3_start and y <= section_3_end:\n self.sections[2] = self.sections[2] + 1\n elif y >= section_4_start and y <= section_4_end:\n self.sections[3] = self.sections[3] + 1\n print('Counted sunspots: {}'.format(self.counted_sunspots))\n print(self.sections)\n cv2.putText(image_contours, 'Section 1: {}'.format(self.sections[0]\n ), (20, 130), font, 1, (0, 0, 0), 2, cv2.LINE_AA)\n cv2.putText(image_contours, 'Section 2: {}'.format(self.sections[1]\n ), (20, 160), font, 1, (0, 0, 0), 2, cv2.LINE_AA)\n cv2.putText(image_contours, 'Section 3: {}'.format(self.sections[2]\n ), (20, 190), font, 1, (0, 0, 0), 2, cv2.LINE_AA)\n cv2.putText(image_contours, 'Section 4: {}'.format(self.sections[3]\n ), (20, 220), font, 1, (0, 0, 0), 2, cv2.LINE_AA)\n colorLow = np.array([0, 0, 90])\n colorHigh = np.array([0, 0, 255])\n mask = cv2.inRange(hsv, colorLow, colorHigh)\n image_contours[mask > 0] = 0, 0, 0\n vis = np.concatenate((frame, image_contours), axis=1)\n cv2.imwrite('out/images/{}.png'.format(self.folder), vis)\n\n\nclass Processor:\n entries = []\n\n def load(self):\n folders = os.listdir('data')\n for folder in folders:\n year = int(folder[:4])\n month = int(folder[4:6])\n day = int(folder[6:8])\n date = datetime(year, month, day)\n image_name = 'data/{}/image.png'.format(folder)\n txt_name = 'data/{}/data.txt'.format(folder)\n txt_file = open(txt_name, 'r')\n content = txt_file.readlines()\n txt_file.close()\n number = int(content[0])\n print(folder)\n entry = Entry(folder, date, number, image_name)\n entry.process()\n self.entries.append(entry)\n self.entries.sort(key=lambda x: x.date, reverse=False)\n\n def compute(self):\n for section in range(0, 4):\n total = 0\n for entry in self.entries:\n total += entry.sections[section]\n average = float(total) / float(len(self.entries))\n print('-------[Section {}]-------'.format(section + 1))\n print('Total: {}'.format(total))\n print('Average: {}'.format(average))\n total = 0\n sections_data = [['date', 'section_1', 'section_2', 'section_3',\n 'section_4']]\n numbers_data = [['date', 'reported', 'visible']]\n for entry in self.entries:\n total += entry.counted_sunspots\n sections_data.append([entry.date.strftime('%Y/%m/%d')] + entry.\n sections)\n numbers_data.append([entry.date.strftime('%Y/%m/%d')] + [entry.\n sunspots, entry.counted_sunspots])\n average = float(total) / float(len(self.entries))\n print('---------[TOTAL]---------')\n print('Total: {}'.format(total))\n print('Average: {}'.format(average))\n csv_file = open('out/sections.csv', 'w')\n writer = csv.writer(csv_file)\n writer.writerows(sections_data)\n csv_file.close()\n csv_file = open('out/numbers.csv', 'w')\n writer = csv.writer(csv_file)\n writer.writerows(numbers_data)\n csv_file.close()\n\n\n<mask token>\n",
"step-4": "from splinter import Browser\nfrom time import sleep\nfrom datetime import datetime, timedelta\nimport os, sys\nimport urllib\nimport cv2\nimport numpy as np\nfrom PIL import Image\nimport imutils\nimport csv\n\n\nclass Scraper:\n start_date = datetime(2018, 1, 8)\n url = 'http://spaceweather.com/'\n\n def scrape(self):\n self.browser = Browser('firefox')\n self.browser.driver.set_page_load_timeout(60)\n self.browser.visit(self.url)\n for day in self.get_days():\n self.scrape_day(day)\n\n def scrape_day(self, day):\n self.browser.select('month', day.strftime('%m'))\n self.browser.select('day', day.strftime('%d'))\n self.browser.select('year', day.strftime('%Y'))\n button = self.browser.find_by_name('view')\n button.click()\n text = self.browser.find_by_css('.solarWindText')[4].text\n number = int(text.split(' ')[2].strip())\n link = self.browser.find_link_by_partial_href('images{}/'.format(\n day.strftime('%Y')))['href']\n folder_name = 'data/{}{}{}'.format(day.strftime('%Y'), day.strftime\n ('%m'), day.strftime('%d'))\n image_name = '{}/image.gif'.format(folder_name)\n txt_name = '{}/data.txt'.format(folder_name)\n os.mkdir(folder_name)\n urllib.urlretrieve(link, image_name)\n img = Image.open(image_name)\n img.save('{}/image.png'.format(folder_name), 'png', optimize=True,\n quality=70)\n txt_file = open(txt_name, 'w')\n txt_file.write(str(number))\n txt_file.close()\n print('Downloaded data for {}, sunspots: {}'.format(day.strftime(\n '%m/%d/%Y'), number))\n\n def get_days(self):\n days = []\n for i in range(0, 8):\n base = self.start_date + timedelta(days=7 * i)\n first = base\n second = base + timedelta(days=2)\n third = base + timedelta(days=4)\n days.append(first)\n days.append(second)\n days.append(third)\n return days\n\n\nclass Entry:\n folder = None\n date = None\n sunspots = -1\n image_path = None\n counted_sunspots = 0\n sections = [0, 0, 0, 0]\n\n def nothing(self, *arg):\n pass\n\n def __init__(self, folder, date, sunspots, image_path):\n self.folder = folder\n self.date = date\n self.sunspots = sunspots\n self.image_path = image_path\n\n def process(self):\n frame = cv2.imread(self.image_path)\n height, width, channels = frame.shape\n frameBGR = cv2.GaussianBlur(frame, (1, 1), 0)\n hsv = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2HSV)\n colorLow = np.array([0, 90, 80])\n colorHigh = np.array([10, 255, 255])\n mask = cv2.inRange(hsv, colorLow, colorHigh)\n result = cv2.bitwise_and(frame, frame, mask=mask)\n image_edged = cv2.Canny(mask, 50, 100)\n image_edged = cv2.dilate(image_edged, None, iterations=1)\n image_edged = cv2.erode(image_edged, None, iterations=1)\n cnts = cv2.findContours(image_edged.copy(), cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if imutils.is_cv2() else cnts[1]\n image_contours = cv2.bitwise_not(result)\n self.counted_sunspots = 0\n self.sections = [0, 0, 0, 0]\n section_1_start, section_1_end = 0, height / 4\n section_2_start, section_2_end = height / 4, height / 4 * 2\n section_3_start, section_3_end = height / 4 * 2, height / 4 * 3\n section_4_start, section_4_end = height / 4 * 3, height / 4 * 4\n cv2.line(image_contours, (0, section_1_end), (width, section_1_end),\n (0, 0, 0), 5)\n cv2.line(image_contours, (0, section_2_end), (width, section_2_end),\n (0, 0, 0), 10)\n cv2.line(image_contours, (0, section_3_end), (width, section_3_end),\n (0, 0, 0), 5)\n cv2.circle(image_contours, (width / 2, height / 2), width / 2, (0, \n 0, 0), 5)\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(image_contours, self.date.strftime('%a %b %d'), (20, 50\n ), font, 2, (0, 0, 0), 2, cv2.LINE_AA)\n cv2.putText(image_contours, self.date.strftime('SSN: {}'.format(\n self.sunspots)), (20, 100), font, 1.5, (0, 0, 0), 2, cv2.LINE_AA)\n for c in cnts:\n if cv2.contourArea(c) < 5:\n continue\n (x, y), radius = cv2.minEnclosingCircle(c)\n x = int(x)\n y = int(y)\n radius = int(radius)\n cv2.circle(image_contours, (x, y), radius, (100, 100, 255), -1)\n self.counted_sunspots = self.counted_sunspots + 1\n if y >= section_1_start and y <= section_1_end:\n self.sections[0] = self.sections[0] + 1\n elif y >= section_2_start and y <= section_2_end:\n self.sections[1] = self.sections[1] + 1\n elif y >= section_3_start and y <= section_3_end:\n self.sections[2] = self.sections[2] + 1\n elif y >= section_4_start and y <= section_4_end:\n self.sections[3] = self.sections[3] + 1\n print('Counted sunspots: {}'.format(self.counted_sunspots))\n print(self.sections)\n cv2.putText(image_contours, 'Section 1: {}'.format(self.sections[0]\n ), (20, 130), font, 1, (0, 0, 0), 2, cv2.LINE_AA)\n cv2.putText(image_contours, 'Section 2: {}'.format(self.sections[1]\n ), (20, 160), font, 1, (0, 0, 0), 2, cv2.LINE_AA)\n cv2.putText(image_contours, 'Section 3: {}'.format(self.sections[2]\n ), (20, 190), font, 1, (0, 0, 0), 2, cv2.LINE_AA)\n cv2.putText(image_contours, 'Section 4: {}'.format(self.sections[3]\n ), (20, 220), font, 1, (0, 0, 0), 2, cv2.LINE_AA)\n colorLow = np.array([0, 0, 90])\n colorHigh = np.array([0, 0, 255])\n mask = cv2.inRange(hsv, colorLow, colorHigh)\n image_contours[mask > 0] = 0, 0, 0\n vis = np.concatenate((frame, image_contours), axis=1)\n cv2.imwrite('out/images/{}.png'.format(self.folder), vis)\n\n\nclass Processor:\n entries = []\n\n def load(self):\n folders = os.listdir('data')\n for folder in folders:\n year = int(folder[:4])\n month = int(folder[4:6])\n day = int(folder[6:8])\n date = datetime(year, month, day)\n image_name = 'data/{}/image.png'.format(folder)\n txt_name = 'data/{}/data.txt'.format(folder)\n txt_file = open(txt_name, 'r')\n content = txt_file.readlines()\n txt_file.close()\n number = int(content[0])\n print(folder)\n entry = Entry(folder, date, number, image_name)\n entry.process()\n self.entries.append(entry)\n self.entries.sort(key=lambda x: x.date, reverse=False)\n\n def compute(self):\n for section in range(0, 4):\n total = 0\n for entry in self.entries:\n total += entry.sections[section]\n average = float(total) / float(len(self.entries))\n print('-------[Section {}]-------'.format(section + 1))\n print('Total: {}'.format(total))\n print('Average: {}'.format(average))\n total = 0\n sections_data = [['date', 'section_1', 'section_2', 'section_3',\n 'section_4']]\n numbers_data = [['date', 'reported', 'visible']]\n for entry in self.entries:\n total += entry.counted_sunspots\n sections_data.append([entry.date.strftime('%Y/%m/%d')] + entry.\n sections)\n numbers_data.append([entry.date.strftime('%Y/%m/%d')] + [entry.\n sunspots, entry.counted_sunspots])\n average = float(total) / float(len(self.entries))\n print('---------[TOTAL]---------')\n print('Total: {}'.format(total))\n print('Average: {}'.format(average))\n csv_file = open('out/sections.csv', 'w')\n writer = csv.writer(csv_file)\n writer.writerows(sections_data)\n csv_file.close()\n csv_file = open('out/numbers.csv', 'w')\n writer = csv.writer(csv_file)\n writer.writerows(numbers_data)\n csv_file.close()\n\n\nscraper = Scraper()\nscraper.scrape()\nprocessor = Processor()\nprocessor.load()\nprocessor.compute()\n",
"step-5": "from splinter import Browser\nfrom time import sleep\nfrom datetime import datetime, timedelta\nimport os, sys\nimport urllib\nimport cv2\nimport numpy as np\nfrom PIL import Image\nimport imutils\nimport csv\n\nclass Scraper():\n start_date = datetime(2018, 1, 8)\n url = 'http://spaceweather.com/'\n\n def scrape(self):\n self.browser = Browser('firefox')\n self.browser.driver.set_page_load_timeout(60)\n self.browser.visit(self.url)\n for day in self.get_days():\n self.scrape_day(day)\n\n def scrape_day(self, day):\n self.browser.select('month', day.strftime('%m'))\n self.browser.select('day', day.strftime('%d'))\n self.browser.select('year', day.strftime('%Y'))\n button = self.browser.find_by_name('view')\n button.click()\n text = self.browser.find_by_css('.solarWindText')[4].text\n number = int(text.split(' ')[2].strip())\n link = self.browser.find_link_by_partial_href('images{}/'.format(day.strftime('%Y')))['href']\n folder_name = \"data/{}{}{}\".format(day.strftime('%Y'), day.strftime('%m'), day.strftime('%d'))\n image_name = \"{}/image.gif\".format(folder_name)\n txt_name = \"{}/data.txt\".format(folder_name)\n os.mkdir(folder_name)\n urllib.urlretrieve(link, image_name)\n img = Image.open(image_name)\n img.save(\"{}/image.png\".format(folder_name), 'png', optimize=True, quality=70)\n txt_file = open(txt_name, 'w')\n txt_file.write(str(number))\n txt_file.close()\n print(\"Downloaded data for {}, sunspots: {}\".format(day.strftime('%m/%d/%Y'), number))\n\n\n def get_days(self):\n days = []\n for i in range(0, 8):\n base = self.start_date + timedelta(days=7 * i)\n first = base\n second = base + timedelta(days=2)\n third = base + timedelta(days=4)\n days.append(first)\n days.append(second)\n days.append(third)\n return days\n\nclass Entry():\n folder = None\n date = None\n sunspots = -1\n image_path = None\n counted_sunspots = 0\n sections = [0, 0, 0, 0]\n\n def nothing(self, *arg):\n pass\n\n def __init__(self, folder, date, sunspots, image_path):\n self.folder = folder\n self.date = date\n self.sunspots = sunspots\n self.image_path = image_path\n\n def process(self):\n frame = cv2.imread(self.image_path)\n height, width, channels = frame.shape\n frameBGR = cv2.GaussianBlur(frame, (1, 1), 0)\n hsv = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2HSV)\n \n colorLow = np.array([0,90,80])\n colorHigh = np.array([10,255,255])\n mask = cv2.inRange(hsv, colorLow, colorHigh)\n result = cv2.bitwise_and(frame, frame, mask=mask)\n image_edged = cv2.Canny(mask, 50, 100)\n image_edged = cv2.dilate(image_edged, None, iterations=1)\n image_edged = cv2.erode(image_edged, None, iterations=1)\n cnts = cv2.findContours(image_edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if imutils.is_cv2() else cnts[1]\n image_contours = cv2.bitwise_not(result)\n\n self.counted_sunspots = 0\n self.sections = [0, 0, 0, 0]\n section_1_start, section_1_end = 0, height/4\n section_2_start, section_2_end = height/4, height/4 * 2\n section_3_start, section_3_end = height/4 * 2, height/4 * 3\n section_4_start, section_4_end = height/4 * 3, height/4 * 4\n cv2.line(image_contours, (0, section_1_end), (width, section_1_end), (0, 0, 0), 5)\n cv2.line(image_contours, (0, section_2_end), (width, section_2_end), (0, 0, 0), 10)\n cv2.line(image_contours, (0, section_3_end), (width, section_3_end), (0, 0, 0), 5)\n cv2.circle(image_contours, (width/2, height/2), width/2, (0, 0, 0), 5)\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(image_contours, self.date.strftime('%a %b %d'), (20, 50), font, 2, (0, 0, 0), 2, cv2.LINE_AA)\n cv2.putText(image_contours, self.date.strftime('SSN: {}'.format(self.sunspots)), (20, 100), font, 1.5, (0, 0, 0), 2, cv2.LINE_AA)\n\n for c in cnts:\n if cv2.contourArea(c) < 5:\n continue\n (x,y),radius = cv2.minEnclosingCircle(c)\n x = int(x)\n y = int(y)\n radius = int(radius)\n cv2.circle(image_contours, (x, y), radius, (100, 100, 255), -1)\n\n self.counted_sunspots = self.counted_sunspots + 1\n if y >= section_1_start and y <= section_1_end:\n #cv2.putText(image_contours, '1', (x, y - 10), font, 0.8, (100, 100, 255), 2, cv2.LINE_AA)\n self.sections[0] = self.sections[0] + 1\n elif y >= section_2_start and y <= section_2_end:\n #cv2.putText(image_contours, '2', (x, y - 10), font, 0.8, (100, 100, 255), 2, cv2.LINE_AA)\n self.sections[1] = self.sections[1] + 1\n elif y >= section_3_start and y <= section_3_end:\n #cv2.putText(image_contours, '3', (x, y - 10), font, 0.8, (100, 100, 255), 2, cv2.LINE_AA)\n self.sections[2] = self.sections[2] + 1\n elif y >= section_4_start and y <= section_4_end:\n #cv2.putText(image_contours, '4', (x, y - 10), font, 0.8, (100, 100, 255), 2, cv2.LINE_AA)\n self.sections[3] = self.sections[3] + 1\n print('Counted sunspots: {}'.format(self.counted_sunspots))\n print(self.sections)\n cv2.putText(image_contours, 'Section 1: {}'.format(self.sections[0]), (20, 130), font, 1, (0, 0, 0), 2, cv2.LINE_AA)\n cv2.putText(image_contours, 'Section 2: {}'.format(self.sections[1]), (20, 160), font, 1, (0, 0, 0), 2, cv2.LINE_AA)\n cv2.putText(image_contours, 'Section 3: {}'.format(self.sections[2]), (20, 190), font, 1, (0, 0, 0), 2, cv2.LINE_AA)\n cv2.putText(image_contours, 'Section 4: {}'.format(self.sections[3]), (20, 220), font, 1, (0, 0, 0), 2, cv2.LINE_AA)\n\n colorLow = np.array([0,0,90])\n colorHigh = np.array([0,0,255])\n mask = cv2.inRange(hsv, colorLow, colorHigh)\n image_contours[mask > 0] = (0, 0, 0)\n vis = np.concatenate((frame, image_contours), axis=1)\n\n cv2.imwrite('out/images/{}.png'.format(self.folder), vis)\n\nclass Processor():\n entries = []\n \n def load(self):\n folders = os.listdir(\"data\")\n for folder in folders:\n year = int(folder[:4])\n month = int(folder[4:6])\n day = int(folder[6:8])\n date = datetime(year, month, day)\n image_name = \"data/{}/image.png\".format(folder)\n txt_name = \"data/{}/data.txt\".format(folder)\n txt_file = open(txt_name, 'r')\n content = txt_file.readlines()\n txt_file.close()\n number = int(content[0])\n print(folder)\n entry = Entry(folder, date, number, image_name)\n entry.process()\n self.entries.append(entry)\n self.entries.sort(key=lambda x: x.date, reverse=False)\n\n def compute(self):\n for section in range(0, 4):\n total = 0\n for entry in self.entries:\n total += entry.sections[section]\n average = float(total) / float(len(self.entries))\n print('-------[Section {}]-------'.format(section + 1))\n print('Total: {}'.format(total))\n print('Average: {}'.format(average))\n total = 0\n sections_data = [[\"date\", \"section_1\", \"section_2\", \"section_3\", \"section_4\"]]\n numbers_data = [[\"date\", \"reported\", \"visible\"]]\n for entry in self.entries:\n total += entry.counted_sunspots\n sections_data.append([entry.date.strftime(\"%Y/%m/%d\")] + entry.sections)\n numbers_data.append([entry.date.strftime(\"%Y/%m/%d\")] + [entry.sunspots, entry.counted_sunspots])\n average = float(total) / float(len(self.entries))\n print('---------[TOTAL]---------')\n print('Total: {}'.format(total))\n print('Average: {}'.format(average))\n csv_file = open('out/sections.csv', 'w')\n writer = csv.writer(csv_file)\n writer.writerows(sections_data)\n csv_file.close()\n csv_file = open('out/numbers.csv', 'w')\n writer = csv.writer(csv_file)\n writer.writerows(numbers_data)\n csv_file.close()\n\nscraper = Scraper()\nscraper.scrape()\nprocessor = Processor()\nprocessor.load()\nprocessor.compute()",
"step-ids": [
10,
11,
12,
17,
18
]
}
|
[
10,
11,
12,
17,
18
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def my_square_root(a, x):
e = 0.0001
while True:
y = (x + a / x) / 2
if abs(y - x) < e:
return y
break
x = y
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def my_square_root(a, x):
e = 0.0001
while True:
y = (x + a / x) / 2
if abs(y - x) < e:
return y
break
x = y
<|reserved_special_token_0|>
print('The square root of ', a, 'is ', result)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def my_square_root(a, x):
e = 0.0001
while True:
y = (x + a / x) / 2
if abs(y - x) < e:
return y
break
x = y
a = input('Find square root of which number? ')
x = input('What is your first guess?')
result = round(my_square_root(float(a), float(x)), 3)
print('The square root of ', a, 'is ', result)
<|reserved_special_token_1|>
"""Exercise 7.2. Encapsulate this loop in a function called square_root that takes a as a parameter,
chooses a reasonable value of x, and returns an estimate of the square root of a."""
def my_square_root(a,x) :
e = 0.0001
while True :
y=(x+a/x)/2
if abs(y-x) < e :
return y
break
x = y
a = input("Find square root of which number? ",)
x = input("What is your first guess?")
result = round(my_square_root(float(a),float(x)),3)
print("The square root of ",a,"is ",result)
|
flexible
|
{
"blob_id": "c9f4ae94dc901d34a3c0fb4371c8d35a7fe94507",
"index": 5095,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef my_square_root(a, x):\n e = 0.0001\n while True:\n y = (x + a / x) / 2\n if abs(y - x) < e:\n return y\n break\n x = y\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef my_square_root(a, x):\n e = 0.0001\n while True:\n y = (x + a / x) / 2\n if abs(y - x) < e:\n return y\n break\n x = y\n\n\n<mask token>\nprint('The square root of ', a, 'is ', result)\n",
"step-4": "<mask token>\n\n\ndef my_square_root(a, x):\n e = 0.0001\n while True:\n y = (x + a / x) / 2\n if abs(y - x) < e:\n return y\n break\n x = y\n\n\na = input('Find square root of which number? ')\nx = input('What is your first guess?')\nresult = round(my_square_root(float(a), float(x)), 3)\nprint('The square root of ', a, 'is ', result)\n",
"step-5": "\"\"\"Exercise 7.2. Encapsulate this loop in a function called square_root that takes a as a parameter,\nchooses a reasonable value of x, and returns an estimate of the square root of a.\"\"\"\n\ndef my_square_root(a,x) :\n e = 0.0001\n while True :\n y=(x+a/x)/2\n if abs(y-x) < e :\n return y\n break\n x = y\n\na = input(\"Find square root of which number? \",)\nx = input(\"What is your first guess?\") \nresult = round(my_square_root(float(a),float(x)),3)\nprint(\"The square root of \",a,\"is \",result)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
##outcome: Hello, my name is B-max
print("Hello", end="")
print(", my name ", end="")
print("is B-max", end="")
print()
##outcome: ****************************************
for i in range(40):
print('*', end="")
print()
##outcome: x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*
for i in range(20):
print("x*", end="")
print()
##outcome: x*x*x*x*x*
##outcome: *x*x*x*x*x
##outcome: x*x*x*x*x*
##outcome: *x*x*x*x*x
##outcome: x*x*x*x*x*
##outcome: *x*x*x*x*x
##outcome: x*x*x*x*x*
##outcome: *x*x*x*x*x
##outcome: x*x*x*x*x*
##outcome: *x*x*x*x*x
for i in range(5):
for i in range(5):
print("x*", end="")
print()
for i in range(5):
print("*x", end="")
print()
|
normal
|
{
"blob_id": "41aebc4ee9cb058c3351029773be05cdc4f84ffa",
"index": 7282,
"step-1": "<mask token>\n",
"step-2": "print('Hello', end='')\nprint(', my name ', end='')\nprint('is B-max', end='')\nprint()\nfor i in range(40):\n print('*', end='')\nprint()\nfor i in range(20):\n print('x*', end='')\nprint()\nfor i in range(5):\n for i in range(5):\n print('x*', end='')\n print()\n for i in range(5):\n print('*x', end='')\n print()\n",
"step-3": "\n##outcome: Hello, my name is B-max\nprint(\"Hello\", end=\"\") \nprint(\", my name \", end=\"\")\nprint(\"is B-max\", end=\"\")\n\nprint()\n##outcome: ****************************************\n\nfor i in range(40):\n print('*', end=\"\") \n\nprint()\n\n##outcome: x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*x*\nfor i in range(20):\n print(\"x*\", end=\"\")\n \nprint()\n\n##outcome: x*x*x*x*x*\n##outcome: *x*x*x*x*x\n##outcome: x*x*x*x*x*\n\n##outcome: *x*x*x*x*x\n##outcome: x*x*x*x*x*\n##outcome: *x*x*x*x*x\n##outcome: x*x*x*x*x*\n##outcome: *x*x*x*x*x\n##outcome: x*x*x*x*x*\n##outcome: *x*x*x*x*x\n\nfor i in range(5):\n for i in range(5):\n print(\"x*\", end=\"\")\n print()\n for i in range(5):\n print(\"*x\", end=\"\")\n print()\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.