prompt listlengths 1 1 | compression_prompt listlengths 1 1 | target stringlengths 1.03k 828k |
|---|---|---|
[
{
"content": "Here is a code snippet:\n```python\n# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://... | [
{
"content": "Here is a code snippet:\n<|memory_start|>```python\n# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n... | ```python
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Tests for criteo.data_lib."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import six
from six.moves import range
import tensorflow.compat.v2 as tf
from uq_benchmark_2019.criteo import data_lib
class DataLibTest(absltest.TestCase):
def test_build_dataset(self):
config = data_lib.DataConfig(split='train', fake_data=True)
dataset = data_lib.build_dataset(config, batch_size=8,
is_training=False, fake_training=False)
# Check output_shapes.
features_shapes, label_shape = dataset.output_shapes
self.assertEqual([None], label_shape.as_list())
expected_keys = [data_lib.feature_name(i)
for i in range(1, data_lib.NUM_TOTAL_FEATURES+1)]
self.assertSameElements(expected_keys, list(features_shapes.keys()))
for key, shape in six.iteritems(features_shapes):
self.assertEqual([None], shape.as_list(), 'Unexpected shape at key='+key)
# Check output_types.
features_types, label_type = tf.compat.v1.data.get_output_types(dataset)
self.assertEqual(tf.float32, label_type)
for idx in data_lib.INT_FEATURE_INDICES:
self.assertEqual(tf.float32, features_types[data_lib.feature_name(idx)])
for idx in data_lib.CAT_FEATURE_INDICES:
self.assertEqual(tf.string, features_types[data_lib.feature_name(idx)])
if __name__ == '__main__':
absltest.main()
``` |
[
{
"content": "Write the code verbatim:\n```python\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.utils.timezone\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.... | [
{
"content": "Write the code verbatim:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.utils.timezone\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ... | ```python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('choice_text', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='ChoiceVote',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('vote', models.BooleanField(default=True)),
('timestamp', models.DateTimeField(default=django.utils.timezone.now)),
('choice', models.ForeignKey(related_name='choice_vote', to='polls.Choice')),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name=b'date published')),
],
),
migrations.AddField(
model_name='choicevote',
name='question',
field=models.ForeignKey(related_name='question_vote', to='polls.Question'),
),
migrations.AddField(
model_name='choicevote',
name='user',
field=models.ForeignKey(related_name='user_vote', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(related_name='question_choice', to='polls.Question'),
),
]
``` |
[
{
"content": "Provide a verbatim copy of the code:\n```python\nfrom test.support import run_unittest, open_urlresource\nimport unittest\n\nimport sys\nimport os\nfrom unicodedata import normalize, unidata_version\n\nTESTDATAFILE = \"NormalizationTest.txt\"\nTESTDATAURL = \"http://www.unicode.org/Public/\" + uni... | [
{
"content": "Provide a verbatim copy of the code:\n<|memory_start|>```python\nfrom test.support import run_unittest, open_urlresource\nimport unittest\n\nimport sys\nimport os\nfrom unicodedata import normalize, unidata_version\n\nTESTDATAFILE = \"NormalizationTest.txt\"\nTESTDATAURL = \"http://www.unicode.org... | ```python
from test.support import run_unittest, open_urlresource
import unittest
import sys
import os
from unicodedata import normalize, unidata_version
TESTDATAFILE = "NormalizationTest.txt"
TESTDATAURL = "http://www.unicode.org/Public/" + unidata_version + "/ucd/" + TESTDATAFILE
if os.path.exists(TESTDATAFILE):
f = open(TESTDATAFILE, encoding='utf-8')
l = f.readline()
f.close()
if not unidata_version in l:
os.unlink(TESTDATAFILE)
class RangeError(Exception):
pass
def NFC(str):
return normalize("NFC", str)
def NFKC(str):
return normalize("NFKC", str)
def NFD(str):
return normalize("NFD", str)
def NFKD(str):
return normalize("NFKD", str)
def unistr(data):
data = [int(x, 16) for x in data.split(" ")]
for x in data:
if x > sys.maxunicode:
raise RangeError
return "".join([chr(x) for x in data])
class NormalizationTest(unittest.TestCase):
def test_main(self):
part1_data = {}
# Hit the exception early
try:
open_urlresource(TESTDATAURL, encoding="utf-8")
except IOError:
self.skipTest("Could not retrieve " + TESTDATAURL)
for line in open_urlresource(TESTDATAURL, encoding="utf-8"):
if '#' in line:
line = line.split('#')[0]
line = line.strip()
if not line:
continue
if line.startswith("@Part"):
part = line.split()[0]
continue
try:
c1,c2,c3,c4,c5 = [unistr(x) for x in line.split(';')[:-1]]
except RangeError:
# Skip unsupported characters;
# try atleast adding c1 if we are in part1
if part == "@Part1":
try:
c1 = unistr(line.split(';')[0])
except RangeError:
pass
else:
part1_data[c1] = 1
continue
# Perform tests
self.assertTrue(c2 == NFC(c1) == NFC(c2) == NFC(c3), line)
self.assertTrue(c4 == NFC(c4) == NFC(c5), line)
self.assertTrue(c3 == NFD(c1) == NFD(c2) == NFD(c3), line)
self.assertTrue(c5 == NFD(c4) == NFD(c5), line)
self.assertTrue(c4 == NFKC(c1) == NFKC(c2) == \
NFKC(c3) == NFKC(c4) == NFKC(c5),
line)
self.assertTrue(c5 == NFKD(c1) == NFKD(c2) == \
NFKD(c3) == NFKD(c4) == NFKD(c5),
line)
# Record part 1 data
if part == "@Part1":
part1_data[c1] = 1
# Perform tests for all other data
for c in range(sys.maxunicode+1):
X = chr(c)
if X in part1_data:
continue
self.assertTrue(X == NFC(X) == NFD(X) == NFKC(X) == NFKD(X), c)
def test_bug_834676(self):
# Check for bug 834676
normalize('NFC', '\ud55c\uae00')
def test_main():
run_unittest(NormalizationTest)
if __name__ == "__main__":
test_main()
``` |
[
{
"content": "Repeat the full code snippet:\n```python\n\"\"\"empty message\n\nRevision ID: 42caf438dcdf\nRevises: None\nCreate Date: 2015-05-15 13:14:21.980616\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '42caf438dcdf'\ndown_revision = None\n\nfrom alembic import op\nimport sqlalchemy as ... | [
{
"content": "Repeat the full code snippet:\n<|memory_start|>```python\n\"\"\"empty message\n\nRevision ID: 42caf438dcdf\nRevises: None\nCreate Date: 2015-05-15 13:14:21.980616\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '42caf438dcdf'\ndown_revision = None\n\nfrom alembic import op\nimpor... | ```python
"""empty message
Revision ID: 42caf438dcdf
Revises: None
Create Date: 2015-05-15 13:14:21.980616
"""
# revision identifiers, used by Alembic.
revision = '42caf438dcdf'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('default', sa.Boolean(), nullable=True),
sa.Column('permissions', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_index(op.f('ix_roles_default'), 'roles', ['default'], unique=False)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=64), nullable=True),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.Column('confirmed', sa.Boolean(), nullable=True),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('location', sa.String(length=64), nullable=True),
sa.Column('about_me', sa.Text(), nullable=True),
sa.Column('member_since', sa.DateTime(), nullable=True),
sa.Column('university', sa.String(length=64), nullable=True),
sa.Column('last_seen', sa.DateTime(), nullable=True),
sa.Column('avatar_hash', sa.String(length=32), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=True)
op.create_table('follows',
sa.Column('follower_id', sa.Integer(), nullable=False),
sa.Column('followed_id', sa.Integer(), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['followed_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['follower_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('follower_id', 'followed_id')
)
op.create_table('posts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('body_html', sa.Text(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_posts_timestamp'), 'posts', ['timestamp'], unique=False)
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('body_html', sa.Text(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('disabled', sa.Boolean(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.Column('post_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['post_id'], ['posts.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_comments_timestamp'), 'comments', ['timestamp'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_comments_timestamp'), table_name='comments')
op.drop_table('comments')
op.drop_index(op.f('ix_posts_timestamp'), table_name='posts')
op.drop_table('posts')
op.drop_table('follows')
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
op.drop_index(op.f('ix_roles_default'), table_name='roles')
op.drop_table('roles')
### end Alembic commands ###
``` |
[
{
"content": "Provide a verbatim copy of the code:\n```python\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom matplotlib import cm\nfrom matplotlib.ticker import MaxNLocator\nfrom mpl_toolkits.mplot3d import Axes3D\n\nclass DLPlotter:\n '''\n This class is responsible ... | [
{
"content": "Provide a verbatim copy of the code:\n<|memory_start|>```python\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom matplotlib import cm\nfrom matplotlib.ticker import MaxNLocator\nfrom mpl_toolkits.mplot3d import Axes3D\n\nclass DLPlotter:\n '''\n This class... | ```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import cm
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.mplot3d import Axes3D
class DLPlotter:
'''
This class is responsible for plotting decision landscapes. Matplotlib is used as a background.
'''
figsize = (10.5, 6) # in inches, at 100 dpi
# figsize = (14, 8) # in inches, at 100 dpi
legendFontSize = 24
tickLabelFontSize = 18
axisLabelFontSize = 24
lw=2.0
def __init__(self, elev=27, azim=130, ax=None):
if ax is None:
fig = plt.figure(figsize=self.figsize)
self.ax = fig.add_subplot(111, projection='3d')
# self.ax = fig.gca(projection='3d')
else:
self.ax = ax
self.set_axis_params(elev, azim)
def set_axis_params(self, elev=27, azim=130):
self.ax.xaxis.set_major_locator(MaxNLocator(5))
self.ax.yaxis.set_major_locator(MaxNLocator(5))
self.ax.zaxis.set_major_locator(MaxNLocator(1))
self.ax.set_xlabel(r'x coordinate', fontsize=self.axisLabelFontSize, labelpad=20)
self.ax.set_ylabel(r'y coordinate', fontsize=self.axisLabelFontSize, labelpad=20)
self.ax.tick_params(axis='both', which='major', labelsize=self.tickLabelFontSize)
self.ax.view_init(elev, azim)
def plot_surface(self, x_grid, y_grid, z, cmap=cm.viridis, color=None, scale_z=True,
view=None, alpha=1.0, shade=False, linewidth=0.1, aa=True, plot_marble=True):
n_cells=100
x, y = np.meshgrid((x_grid[1:]+x_grid[:-1])/2, (y_grid[1:]+y_grid[:-1])/2)
z = np.nan_to_num(z)
if scale_z:
self.ax.set_zlim([np.min(z), 0])
norm = mpl.colors.Normalize(vmin=np.min(z), vmax=0, clip=False)
if plot_marble:
self.ax.plot([0.], [0.], [0.], marker='o', markersize=15, color='black')
if color is None:
self.ax.plot_surface(x, y, z, cmap=cmap, norm=norm, alpha=alpha, shade=shade,
rcount=n_cells, ccount=n_cells, linewidth=linewidth, edgecolors='k', antialiased=aa)
else:
self.ax.plot_surface(x, y, z, color=color, alpha=alpha, shade=shade, rcount=n_cells,
ccount=n_cells, linewidth=linewidth, edgecolors='k', antialiased=aa)
if view == 'top right':
self.ax.view_init(elev=27, azim=40)
return self.ax
def add_legend(self, colors, labels):
patches = [mpl.patches.Patch(color=color, linewidth=0) for color in colors]
self.ax.legend(patches, labels, fontsize=self.legendFontSize)
``` |
[
{
"content": "```python\n# Based on: https://github.com/caglar/autoencoders.git\n# http://www-etud.iro.umontreal.ca/~gulcehrc/\nimport theano\nimport theano.tensor as T\nfrom theano.tensor.shared_randomstreams import RandomStreams\n\nfrom layer import AEHiddenLayer, AEOutputLayer\n# from tools import NonLineari... | [
{
"content": "<|memory_start|>```python\n# Based on: https://github.com/caglar/autoencoders.git\n# http://www-etud.iro.umontreal.ca/~gulcehrc/\nimport theano\nimport theano.tensor as T\nfrom theano.tensor.shared_randomstreams import RandomStreams\n\nfrom layer import AEHiddenLayer, AEOutputLayer\n# from tools i... | ```python
# Based on: https://github.com/caglar/autoencoders.git
# http://www-etud.iro.umontreal.ca/~gulcehrc/
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from layer import AEHiddenLayer, AEOutputLayer
# from tools import NonLinearity, CostType, relu
from sop_embed.extra import relu
from sop_embed.extra import NonLinearity
from sop_embed.extra import CostType
import numpy as np
import cPickle as pkl
from collections import OrderedDict
theano.config.warn.subtensor_merge_bug = False
class Autoencoder(object):
"""
Typical implementation of an autoencoder.
"""
def __init__(
self,
input,
nvis,
nhid=None,
nvis_dec=None,
nhid_dec=None,
rnd=None,
bhid=None,
cost_type=CostType.MeanSquared,
momentum=1,
num_pieces=1,
L2_reg=-1,
L1_reg=-1,
sparse_initialize=False,
nonlinearity=NonLinearity.TANH,
bvis=None,
tied_weights=True,
reverse=False):
self.input = input
self.nvis = nvis
self.nhid = nhid
self.bhid = bhid
self.bvis = bvis
self.momentum = momentum
self.nonlinearity = nonlinearity
self.tied_weights = tied_weights
self.gparams = None
self.reverse = reverse
self.activation = self.get_non_linearity_fn()
self.catched_params = []
if cost_type == CostType.MeanSquared:
self.cost_type = CostType.MeanSquared
elif cost_type == CostType.CrossEntropy:
self.cost_type = CostType.CrossEntropy
if rnd is None:
self.rnd = np.random.RandomState(1231)
else:
self.rnd = rnd
self.srng = RandomStreams(seed=1231)
if not reverse:
self.hidden = AEHiddenLayer(input=input,
n_in=nvis,
n_out=nhid,
num_pieces=num_pieces,
n_in_dec=nvis_dec,
n_out_dec=nhid_dec,
activation=self.activation,
tied_weights=tied_weights,
sparse_initialize=sparse_initialize,
rng=rnd)
else:
self.hidden = AEOutputLayer(input=input,
n_in=nvis,
n_out=nhid,
num_pieces=num_pieces,
n_in_dec=nvis_dec,
n_out_dec=nhid_dec,
activation=self.activation,
tied_weights=tied_weights,
sparse_initialize=sparse_initialize,
rng=rnd)
self.params = self.hidden.params
self.sparse_initialize = sparse_initialize
self.L1_reg = L1_reg
self.L2_reg = L2_reg
self.L1 = 0
self.L2 = 0
if L1_reg != -1:
if not reverse:
self.L1 += abs(self.hidden.W).sum()
if not tied_weights:
self.L1 += abs(self.hidden.W_prime).sum()
else:
self.L1 += abs(self.hidden.W_prime).sum()
if not tied_weights:
self.L1 += abs(self.hidden.W).sum()
if L2_reg != -1:
if not reverse:
self.L2 += (self.hidden.W_prime**2).sum()
if not tied_weights:
self.L2 += (self.hidden.W**2).sum()
else:
self.L2 += (self.hidden.W**2).sum()
if not tied_weights:
self.L2 += (self.hidden.W_prime**2).sum()
if input is not None:
self.x = input
else:
self.x = T.matrix('x_input', dtype=theano.config.floatX)
def catch_params(self):
self.catched_params = []
for param in self.params:
self.catched_params.append(param.get_value())
def nonlinearity_fn(self, d_in=None, recons=False):
if self.nonlinearity == NonLinearity.SIGMOID:
return T.nnet.sigmoid(d_in)
elif self.nonlinearity == NonLinearity.RELU and not recons:
return T.maximum(d_in, 0)
elif self.nonlinearity == NonLinearity.RELU and recons:
return T.nnet.softplus(d_in)
elif self.nonlinearity == NonLinearity.TANH:
return T.tanh(d_in)
def get_non_linearity_fn(self):
if self.nonlinearity == NonLinearity.SIGMOID:
return T.nnet.sigmoid
elif self.nonlinearity == NonLinearity.RELU:
return relu
elif self.nonlinearity == NonLinearity.TANH:
return T.tanh
def encode(self, x_in=None, center=True):
if x_in is None:
x_in = self.x
act = self.nonlinearity_fn(T.dot(x_in, self.hidden.W) + self.hidden.b)
if center:
act = act - act.mean(0)
return act
def encode_linear(self, x_in=None):
if x_in is None:
x_in = self.x_in
lin_out = T.dot(x_in, self.hidden.W) + self.hidden.b
return self.nonlinearity_fn(lin_out), lin_out
def decode(self, h):
return self.nonlinearity_fn(
T.dot(h, self.hidden.W_prime) + self.hidden.b_prime)
def get_rec_cost(self, x_rec, eyes=False):
"""
Returns the reconstruction cost.
"""
if self.cost_type == CostType.MeanSquared:
return T.mean(((self.x - x_rec)**2).sum(axis=1))
elif self.cost_type == CostType.CrossEntropy:
return T.mean(
(T.nnet.binary_crossentropy(x_rec, self.x)).mean(axis=1))
def get_rec_cost_face(self, x_rec):
"""
Returns the reconstruction cost.
"""
d_eyes = (
(self.x[:, 37] - self.x[:, 46])**2 +
(self.x[:, 37] - self.x[:, 46])**2).T
if self.cost_type == CostType.MeanSquared:
return T.mean(((self.x - x_rec)**2).sum(axis=1) / d_eyes)
elif self.cost_type == CostType.CrossEntropy:
return T.mean(
(T.nnet.binary_crossentropy(
x_rec, self.x)).mean(axis=1) / d_eyes)
def kl_divergence(self, p, p_hat):
return p * T.log(p) - T.log(p_hat) + (1-p) * T.log(1-p) -\
(1-p_hat) * T.log(1-p_hat)
def sparsity_penality(self, h, sparsity_level=0.05, sparse_reg=1e-3,
batch_size=-1):
if batch_size == -1 or batch_size == 0:
raise Exception("Invalid batch size")
sparsity_level = T.extra_ops.repeat(sparsity_level, self.nhid)
sparsity_penality = 0
avg_act = h.mean(axis=0)
kl_div = self.kl_divergence(sparsity_level, avg_act)
sparsity_penality = sparse_reg * kl_div.sum()
return sparsity_penality
def act_grads(self, inputs):
h, acts = self.encode_linear(inputs)
h_grad = T.grad(h.sum(), acts)
return (h, h_grad)
def jacobian_h_x(self, inputs):
h, act_grad = self.act_grads(inputs)
jacobian = self.hidden.W * act_grad.dimshuffle(0, 'x', 1)
return (h, T.reshape(jacobian, newshape=(self.nhid, self.nvis)))
def compute_jacobian_h_x(self, inputs):
inputs = theano.shared(inputs.flatten())
h = self.encode(inputs)
# see later
# h = h.faltten()
# inputs = inputs.flatten()
# inputs = T.reshape(inputs, newshape=(self.nvis))
J = theano.gradient.jacobian(h, inputs)
return h, J
def sample_one_step(self, x, sigma):
# h, J_t = self.jacobian_h_x(x)
h, J_t = self.compute_jacobian_h_x(x)
eps = self.srng.normal(avg=0, size=(self.nhid, 1), std=sigma)
jacob_w_eps = T.dot(J_t.T, eps)
delta_h = T.dot(J_t, jacob_w_eps)
perturbed_h = h + delta_h.T
x = self.decode(perturbed_h)
return x
def sample_scan(self, x, sigma, n_steps, samples):
# Enable on-the-fly graph computations
# theano.config.compute_test_value = "raise"
in_val = T.fmatrix("input_values")
# in_val.tag.test_value = np.asarray(
# np.random.rand(1, 784), dtype=theano.config.floatX)
s_sigma = T.fscalr("sigma_values")
# s_sigma = np.asarray(
# np.random.rand(1), dtype=theano.config.floatX)
mode = "FAST_RUN"
values, updates = theano.scan(fn=self.sample_one_step,
outputs_info=in_val,
non_sequences=s_sigma,
n_steps=n_steps,
mode=mode)
ae_sampler = theano.function(inputs=[in_val, s_sigma],
outputs=values[-1],
updates=updates)
samples = ae_sampler(x, sigma)
return samples
def sample_old(self, x, sigma, n_steps):
# Enable on-the-fly graph computations
# theano.config.compute_test_value = "raise"
# in_val = T.fmatrix('input_values")
# in_val.tag.test_value = np.asarray(
# np.random.rand(1, 784), dtype=theano.config.floatX)
# s_sigma = T.fscalar("sigma_value")
# s_sigma = np.asarray(
# np.random.rand(1), dtype=theano.config.floatX)
# mode = "FAST_RUN"
samples = []
sample = x
samples.append(x)
for i in xrange(n_steps):
print "Sample %d ..." % i
sampler = self.sample_one_step(sample, sigma)
sample = sampler.eval()
samples.append(sample)
return samples
def get_sgd_updates(self, learning_rate, lr_scaler=1.0, batch_size=1,
sparsity_level=-1, sparse_reg=-1, x_in=None):
h = self.encode(x_in)
x_rec = self.decode(h)
cost = self.get_rec_cost(x_rec)
if self.L1_reg != -1 and self.L1_reg is not None:
cost += self.L1_reg * self.L1
if self.L2_reg != -1 and self.L2_reg is not None:
cost += self.L2_reg * self.L2
if sparsity_level != -1 and sparse_reg != -1:
sparsity_penal = self.sparsity_penality(
h, sparsity_level, sparse_reg, batch_size)
cost += sparsity_penal
self.gparams = T.grad(cost, self.params)
updates = OrderedDict({})
for param, gparam in zip(self.params, self.gparams):
updates[param] = self.momentum * param - lr_scaler * \
learning_rate * gparam
return (cost, updates, h, x_rec)
def get_train_cost(self, batch_size=1, sparsity_level=-1, sparse_reg=-1,
x_in=None, face=False):
h = self.encode(x_in)
x_rec = self.decode(h)
if face:
cost = self.get_rec_cost_face(x_rec)
else:
cost = self.get_rec_cost(x_rec)
if self.L1_reg != -1 and self.L1_reg is not None:
cost += self.L1_reg * self.L1
if self.L2_reg != -1 and self.L2_reg is not None:
cost += self.L2_reg * self.L2
if sparsity_level != -1 and sparse_reg != -1:
sparsity_penal = self.sparsity_penality(
h, sparsity_level, sparse_reg, batch_size)
cost += sparsity_penal
return (cost, h, x_rec)
def save_params(self, weights_file, catched=False):
"""Save the model's parameters."""
f_dump = open(weights_file, "w")
params_vls = []
if catched:
if self.catched_params != []:
params_vls = self.catched_params
else:
raise ValueError(
"You asked to save catched params," +
"but you didn't catch any!!!!!!!")
else:
for param in self.params:
params_vls.append(param.get_value())
pkl.dump(params_vls, f_dump, protocol=pkl.HIGHEST_PROTOCOL)
f_dump.close()
def set_params_vals(self, weights_file):
"""Set the values of the parameters."""
with open(weights_file, 'r') as f:
params_vls = pkl.load(f)
for param, val in zip(self.params, params_vls):
param.set_value(val)
def fit(self,
data=None,
learning_rate=0.1,
batch_size=100,
n_epochs=20,
lr_scalar=0.998,
weights_file="out/ae_weights_mnist.npy"):
"""
Fit the data to the autoencoder (training).
"""
if data is None:
raise Exception("Data can't be empty.")
index = T.lscalar("index")
data_shared = theano.shared(
np.asarray(data, dtype=theano.config.floatX))
n_batches = data.shape[0] / batch_size
(cost, updates) = self.get_sgd_updates(
learning_rate, lr_scalar, batch_size)
train_ae = theano.function(
[index], cost, updates=updates,
givens={
self.x: data_shared[index*batch_size: (index+1)*batch_size]})
print "Start training the ae."
ae_costs = []
for epoch in xrange(n_epochs):
print "Training at epoch %d" % epoch
cost_one_epoch = []
for batch_index in xrange(n_batches):
cost_one_epoch.append(train_ae(batch_index))
print "Training at epoch %d, %f" % (epoch, np.mean(cost_one_epoch))
ae_costs.append(np.mean(cost_one_epoch))
print "Saving files ..."
self.save_params(weights_file)
return ae_costs
``` |
[
{
"content": "Write the code verbatim:\n```python\nimport unittest\nfrom functools import partial\n\nfrom numpy.testing import assert_array_equal\nfrom hypothesis import given\nfrom hypothesis.strategies import sampled_from\nfrom tvtk.api import tvtk\n\nfrom simphony.core.cuba import CUBA\nfrom simphony.testing... | [
{
"content": "Write the code verbatim:\n<|memory_start|>```python\nimport unittest\nfrom functools import partial\n\nfrom numpy.testing import assert_array_equal\nfrom hypothesis import given\nfrom hypothesis.strategies import sampled_from\nfrom tvtk.api import tvtk\n\nfrom simphony.core.cuba import CUBA\nfrom ... | ```python
import unittest
from functools import partial
from numpy.testing import assert_array_equal
from hypothesis import given
from hypothesis.strategies import sampled_from
from tvtk.api import tvtk
from simphony.core.cuba import CUBA
from simphony.testing.abc_check_lattice import (
CheckLatticeNodeOperations, CheckLatticeNodeCoordinates)
from simphony.testing.utils import compare_lattice_nodes
from simphony.core.data_container import DataContainer
from simphony.cuds.lattice import (
make_hexagonal_lattice, make_cubic_lattice, make_orthorhombic_lattice,
make_body_centered_cubic_lattice, make_face_centered_cubic_lattice,
make_rhombohedral_lattice, make_tetragonal_lattice,
make_body_centered_tetragonal_lattice,
make_face_centered_orthorhombic_lattice,
make_base_centered_orthorhombic_lattice,
make_body_centered_orthorhombic_lattice,
make_monoclinic_lattice,
make_base_centered_monoclinic_lattice,
make_triclinic_lattice,
Lattice, LatticeNode)
from simphony.cuds.primitive_cell import BravaisLattice, PrimitiveCell
from simphony_mayavi.cuds.api import VTKLattice
from simphony_mayavi.core.api import supported_cuba
lattice_types = sampled_from([
make_cubic_lattice('test', 0.1, (3, 6, 5)),
make_hexagonal_lattice('test', 0.1, 0.2, (5, 4, 6)),
make_orthorhombic_lattice('test', (0.1, 0.2, 0.3), (3, 7, 6)),
make_body_centered_cubic_lattice('test', 0.1, (3, 6, 5)),
make_face_centered_cubic_lattice('test', 0.1, (3, 6, 5)),
make_rhombohedral_lattice('test', 0.1, 0.2, (3, 6, 5)),
make_tetragonal_lattice('test', 0.1, 0.2, (3, 6, 5)),
make_body_centered_tetragonal_lattice('test', 0.1, 0.5, (3, 6, 5)),
make_face_centered_orthorhombic_lattice('test', (0.5, 0.6, 0.7),
(3, 6, 5)),
make_base_centered_orthorhombic_lattice('test', (0.5, 0.6, 0.7),
(3, 6, 5)),
make_body_centered_orthorhombic_lattice('test', (0.5, 0.6, 0.7),
(3, 6, 5)),
make_monoclinic_lattice('test', (0.5, 0.6, 0.7), 0.4,
(3, 6, 5)),
make_base_centered_monoclinic_lattice('test', (0.5, 0.6, 0.7),
0.4, (3, 6, 5)),
make_triclinic_lattice('test', (0.5, 0.6, 0.7), (0.4, 0.3, 0.2),
(3, 6, 5))])
class TestVTKLatticeNodeOperations(
CheckLatticeNodeOperations, unittest.TestCase):
def container_factory(self, name, primitive_cell, size, origin):
return VTKLattice.empty(name, primitive_cell, size, origin)
def supported_cuba(self):
return supported_cuba()
class TestVTKLatticeNodeCoordinates(
CheckLatticeNodeCoordinates, unittest.TestCase):
def container_factory(self, name, primitive_cell, size, origin):
return VTKLattice.empty(name, primitive_cell, size, origin)
def supported_cuba(self):
return supported_cuba()
class TestVTKLattice(unittest.TestCase):
def setUp(self):
self.addTypeEqualityFunc(
LatticeNode, partial(compare_lattice_nodes, testcase=self))
def test_get_node_on_a_xy_plane_hexagonal_lattice(self):
# given
lattice = make_hexagonal_lattice('test', 0.1, 0.2, (5, 4, 6))
self.add_velocity(lattice)
vtk_lattice = VTKLattice.from_lattice(lattice)
# when
node = vtk_lattice.get((1, 1, 0))
# then
self.assertEqual(
node, LatticeNode(
(1, 1, 0),
data=DataContainer(VELOCITY=(1, 1, 0))))
def test_iter_nodes_on_a_xy_plane_hexagonal_lattice(self):
# given
lattice = make_hexagonal_lattice('test', 0.1, 0.2, (5, 4, 6))
self.add_velocity(lattice)
vtk_lattice = VTKLattice.from_lattice(lattice)
# when/then
for node in vtk_lattice.iter(item_type=CUBA.NODE):
self.assertEqual(
node, LatticeNode(
node.index,
data=DataContainer(VELOCITY=node.index)))
self.assertEqual(sum(1 for _ in vtk_lattice.iter(
item_type=CUBA.NODE)), 120)
def test_update_nodes_on_a_xy_plane_hexagonal_lattice(self):
# given
lattice = make_hexagonal_lattice('test', 0.1, 0.2, (5, 4, 6))
self.add_velocity(lattice)
vtk_lattice = VTKLattice.from_lattice(lattice)
node = vtk_lattice.get((1, 1, 0))
# when
node.data = DataContainer(VELOCITY=(1, 54, 0.3))
vtk_lattice.update((node,))
# then
new_node = vtk_lattice.get((1, 1, 0))
self.assertEqual(
new_node, LatticeNode(
(1, 1, 0),
data=DataContainer(VELOCITY=(1, 54, 0.3))))
def test_get_coordinate_on_a_xy_plane_hexagonal_lattice(self):
# given
lattice = make_hexagonal_lattice('test', 0.1, 0.2, (5, 4, 6))
self.add_velocity(lattice)
vtk_lattice = VTKLattice.from_lattice(lattice)
# when/then
for node in lattice.iter(item_type=CUBA.NODE):
assert_array_equal(
vtk_lattice.get_coordinate(node.index),
lattice.get_coordinate(node.index))
def test_initialization_with_unknown_type(self):
#
lattice = make_hexagonal_lattice('test', 0.1, 0.2, (5, 4, 6))
self.add_velocity(lattice)
data = VTKLattice.from_lattice(lattice)
primitive_cell = PrimitiveCell(lattice.primitive_cell.p1,
lattice.primitive_cell.p2,
lattice.primitive_cell.p3,
"Cubic")
# when/then
with self.assertRaises(ValueError):
VTKLattice(
name=lattice.name, primitive_cell=primitive_cell,
data_set=data.data_set)
def test_initialization_with_unfamiliar_dataset(self):
# given
data_set = tvtk.UnstructuredGrid(points=[(0, 0, 0,), (1, 1, 1)])
primitive_cell = PrimitiveCell.for_cubic_lattice(1.)
# when/then
with self.assertRaises(TypeError):
VTKLattice(
name='test', primitive_cell=primitive_cell,
data_set=data_set)
def test_create_empty_with_unknown_type(self):
primitive_cell = PrimitiveCell((1., 0., 0.), (0., 1., 0.),
(0., 0., 1.), "Cubic")
# when/then
with self.assertRaises(ValueError):
VTKLattice.empty(
name='test', primitive_cell=primitive_cell, size=(3, 4, 5),
origin=(0.0, 0.0, 0.0))
def test_create_from_unfamiliar_dataset(self):
# given
data_set = tvtk.UnstructuredGrid(points=[(0, 0, 0,), (1, 1, 1)])
# when/then
with self.assertRaises(TypeError):
VTKLattice.from_dataset(name='test', data_set=data_set)
@given(lattice_types)
def test_initialization_with_dataset(self, lattice):
# given
expected = VTKLattice.from_lattice(lattice)
# when
vtk_lattice = VTKLattice.from_dataset('test', expected.data_set)
# then
self.assertEqual(vtk_lattice.primitive_cell.bravais_lattice,
lattice.primitive_cell.bravais_lattice)
@given(lattice_types)
def test_creating_a_vtk_lattice_from_cuds_lattice(self, lattice):
# when
vtk_lattice = VTKLattice.from_lattice(lattice)
# then
self.assertEqual(vtk_lattice.primitive_cell.bravais_lattice,
lattice.primitive_cell.bravais_lattice)
self.assertEqual(vtk_lattice.data, lattice.data)
self.assertEqual(vtk_lattice.size, lattice.size)
assert_array_equal(vtk_lattice.origin, lattice.origin)
assert_array_equal(vtk_lattice.primitive_cell.p1,
lattice.primitive_cell.p1)
assert_array_equal(vtk_lattice.primitive_cell.p2,
lattice.primitive_cell.p2)
assert_array_equal(vtk_lattice.primitive_cell.p3,
lattice.primitive_cell.p3)
def test_data_setter(self):
# when
primitive_cell = PrimitiveCell.for_cubic_lattice(1.)
vtk_lattice = VTKLattice.empty('test', primitive_cell, (2, 3, 4),
(0, 0, 0))
vtk_lattice.data = {CUBA.TEMPERATURE: 40.}
# then
self.assertIsInstance(vtk_lattice.data, DataContainer)
def test_exception_create_dataset_with_inconsistent_lattice_type(self):
bad_lattice_types = (BravaisLattice.CUBIC,
BravaisLattice.TETRAGONAL,
BravaisLattice.ORTHORHOMBIC)
for lattice_type in bad_lattice_types:
# when
primitive_cell = PrimitiveCell((1., 0., 0.), # require PolyData
(0.5, 0.5, 0.),
(0., 0., 1.),
lattice_type)
lattice = Lattice('test', primitive_cell, (2, 3, 4),
(0., 0., 0.))
# then
with self.assertRaises(ValueError):
VTKLattice.from_lattice(lattice)
def add_velocity(self, lattice):
new_nodes = []
for node in lattice.iter(item_type=CUBA.NODE):
node.data[CUBA.VELOCITY] = node.index
new_nodes.append(node)
lattice.update(new_nodes)
if __name__ == '__main__':
unittest.main()
``` |
[
{
"content": "Repeat the code precisely:\n```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Author:\n# Mateusz Kruszyński <mateusz.kruszynski@gmail.com>\n#\nfrom obci.gui.ugm import ugm_config_manager as m\nMAZE = {'id':'1986', \n 'stimulus_type':'maze',\n 'width_type':'relative', \... | [
{
"content": "Repeat the code precisely:\n<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Author:\n# Mateusz Kruszyński <mateusz.kruszynski@gmail.com>\n#\nfrom obci.gui.ugm import ugm_config_manager as m\nMAZE = {'id':'1986', \n 'stimulus_type':'maze',\n 'width_typ... | ```python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author:
# Mateusz Kruszyński <mateusz.kruszynski@gmail.com>
#
from obci.gui.ugm import ugm_config_manager as m
MAZE = {'id':'1986',
'stimulus_type':'maze',
'width_type':'relative',
'width':1.0,
'height_type':'relative',
'height':1.0,
'position_horizontal_type':'aligned',
'position_horizontal':'center',
'position_vertical_type':'aligned',
'position_vertical':'center',
'color':'#ffffff',
'maze_user_x':2,
'maze_user_y':4,
'maze_user_direction':'UP',
'maze_user_color':'#222777',
'stimuluses':[]
}
def run(maze_parent_id=102,
output_path='configs/brain2013_config_8_fields_tablet',
template='brain2013_config_8_fields_tablet_no_maze'):
mgr = m.UgmConfigManager(template)
parent = mgr.get_config_for(maze_parent_id)
parent['stimuluses'].append(MAZE)
mgr.set_config(parent)
mgr.update_to_file(output_path)
if __name__ == '__main__':
run()
``` |
[
{
"content": "Here is the code content:\n```python\nimport re\nimport uuid\n\nimport requests\n\nfrom cloudbot import hook\n\nHIST_API = \"http://api.fishbans.com/history/{}\"\nUUID_API = \"http://api.goender.net/api/uuids/{}/\"\n\n\ndef get_name(uuid):\n # submit the profile request\n request = requests.... | [
{
"content": "Here is the code content:\n<|memory_start|>```python\nimport re\nimport uuid\n\nimport requests\n\nfrom cloudbot import hook\n\nHIST_API = \"http://api.fishbans.com/history/{}\"\nUUID_API = \"http://api.goender.net/api/uuids/{}/\"\n\n\ndef get_name(uuid):\n # submit the profile request\n req... | ```python
import re
import uuid
import requests
from cloudbot import hook
HIST_API = "http://api.fishbans.com/history/{}"
UUID_API = "http://api.goender.net/api/uuids/{}/"
def get_name(uuid):
# submit the profile request
request = requests.get(UUID_API.format(uuid))
request.raise_for_status()
data = request.json()
return data[uuid]
@hook.command("mcuser", "mcpaid", "haspaid")
def mcuser(text, bot, reply):
"""<username> - gets information about the Minecraft user <account>"""
headers = {'User-Agent': bot.user_agent}
text = text.strip()
# check if we are looking up a UUID
cleaned = text.replace('-', '')
if re.search(r'^[0-9a-f]{32}\Z$', cleaned, re.I):
# we are looking up a UUID, get a name.
try:
name = get_name(cleaned)
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError, KeyError) as e:
reply("Could not get username from UUID: {}".format(e))
raise
else:
name = text
# get user data from fishbans
try:
request = requests.get(HIST_API.format(requests.utils.quote(name)), headers=headers)
request.raise_for_status()
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e:
reply("Could not get profile status: {}".format(e))
raise
# read the fishbans data
try:
results = request.json()
except ValueError:
return "Could not parse profile status"
# check for errors from fishbans and handle them
if not results['success']:
if results['error'] == "User is not premium.":
return "The account \x02{}\x02 is not premium or does not exist.".format(text)
return results['error']
username = results['data']['username']
uid = uuid.UUID(results['data']['uuid'])
return 'The account \x02{}\x02 ({}) exists. It is a \x02paid\x02' \
' account.'.format(username, uid)
``` |
[
{
"content": "Here is a code file:\n```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2017, Cabral, Juan; Sanchez, Bruno & Berois, Martín\n# All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that t... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2017, Cabral, Juan; Sanchez, Bruno & Berois, Martín\n# All rights reserved.\n\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted... | ```python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Cabral, Juan; Sanchez, Bruno & Berois, Martín
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
# DOCS
# =============================================================================
"""This are the tests for de tests pipeline"""
# =============================================================================
# CLASSES
# =============================================================================
# =============================================================================
# IMPORTS
# =============================================================================
from corral import qa
from . import steps, commands
# =============================================================================
# EXAMPLES
# =============================================================================
class ExampleTestStep1(qa.TestCase):
subject = steps.Step1
def setup(self):
pass
def validate(self):
pass
class ExampleTestCommand(qa.TestCase):
subject = commands.TestAPICommand
def setup(self):
pass
def validate(self):
pass
``` |
[
{
"content": "```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nfrom glob import glob\nfrom subprocess import check_output, CalledProcessError\n\n\ndef get_usb_devices():\n \"\"\"\n Lista dispositivos USB conectados\n\n :return:\n \"\"\"\n sdb_devices = map(os.path.realpath, g... | [
{
"content": "<|memory_start|>```python\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nfrom glob import glob\nfrom subprocess import check_output, CalledProcessError\n\n\ndef get_usb_devices():\n \"\"\"\n Lista dispositivos USB conectados\n\n :return:\n \"\"\"\n sdb_devices = map(os.... | ```python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from glob import glob
from subprocess import check_output, CalledProcessError
def get_usb_devices():
"""
Lista dispositivos USB conectados
:return:
"""
sdb_devices = map(os.path.realpath, glob('/sys/block/sd*'))
usb_devices = (dev for dev in sdb_devices
if 'usb' in dev.split('/')[5])
return dict((os.path.basename(dev), dev) for dev in usb_devices)
def get_mount_points(devices=None):
"""
Lista pontos de montagem
:param devices:
:return: Lista de tuplas
[('/dev/sdb1', '/media/bisa/BACKUP')]
"""
devices = devices or get_usb_devices() # if devices are None: get_usb_devices
output = check_output(['mount']).splitlines()
is_usb = lambda path: any(dev in str(path) for dev in devices)
usb_info = (line for line in output if is_usb(line.split()[0]))
fullInfo = []
for info in usb_info:
# print(info)
mountURI = info.split()[0]
usbURI = info.split()[2]
# print((info.split().__sizeof__()))
for x in range(3, info.split().__sizeof__()):
if info.split()[x].__eq__("type"):
for m in range(3, x):
usbURI += " "+info.split()[m]
break
fullInfo.append([mountURI.decode('utf-8'), usbURI.decode('utf-8')])
return fullInfo
``` |
[
{
"content": "Here is the code block:\n```python\n# -*-coding:Utf-8 -*\n\n# Copyright (c) 2012 LE GOFF Vincent\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions o... | [
{
"content": "Here is the code block:\n<|memory_start|>```python\n# -*-coding:Utf-8 -*\n\n# Copyright (c) 2012 LE GOFF Vincent\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * R... | ```python
# -*-coding:Utf-8 -*
# Copyright (c) 2012 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la classe Obstacle, détaillée plus bas."""
from abstraits.obase import BaseObj
class Obstacle(BaseObj):
"""Classe représentant un obstacle dans une étendue d'eau.
Les obstacles sont des points uniques dont seule la localisation
change. Cela permet d'économiser beaucoup d'espaces pour des étendues
comptant quelques centaines d'obstacles, voire plus.
"""
enregistrer = True
def __init__(self, nom, desc_survol):
"""Constructeur de l'obstacle."""
BaseObj.__init__(self)
self.nom = nom
self.desc_survol = desc_survol
self.symbole = ""
self._construire()
def __getnewargs__(self):
return ("", "")
``` |
[
{
"content": "```python\n#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Interpreter version: python 2.7\n#\n# !!! DO NOT EDIT THIS FILE - THIS IS GENERATED FILE !!!\n# Imports =====================================================================\nfrom collections import namedtuple\n\n\n# Functions and cl... | [
{
"content": "<|memory_start|>```python\n#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Interpreter version: python 2.7\n#\n# !!! DO NOT EDIT THIS FILE - THIS IS GENERATED FILE !!!\n# Imports =====================================================================\nfrom collections import namedtuple\n\n\n# ... | ```python
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# !!! DO NOT EDIT THIS FILE - THIS IS GENERATED FILE !!!
# Imports =====================================================================
from collections import namedtuple
# Functions and classes =======================================================
# !!! DO NOT EDIT THIS FILE - THIS IS GENERATED FILE !!!
# !!! DO NOT EDIT THIS FILE - THIS IS GENERATED FILE !!!
_PUB_FIELDS = [
"isbn",
"uuid",
"aleph_id",
"b64_data",
"dir_pointer",
]
class Archive(namedtuple('Archive', _PUB_FIELDS)):
'''
Communication structure used to sent data to `storage` subsystem over AMQP.
Attributes:
isbn (str): ISBN for the archive.
uuid (str): UUID string to pair the archive with edeposit.
aleph_id (str): ID used in aleph.
b64_data (str): Base64 encoded data ebook file.
dir_pointer (str): Pointer to the directory on the file server.
'''
def __new__(self, *args, **kwargs):
for field, arg in zip(_PUB_FIELDS, args):
kwargs[field] = arg
for key in _PUB_FIELDS:
if key not in kwargs:
kwargs[key] = None
return super(Archive, self).__new__(self, **kwargs)
def __init__(self, *args, **kwargs):
for field, arg in zip(_PUB_FIELDS, args):
kwargs[field] = arg
for key, val in kwargs.iteritems():
if key not in _PUB_FIELDS:
raise ValueError("Unknown parameter '%s'!" % key)
self.__dict__[key] = val
# !!! DO NOT EDIT THIS FILE - THIS IS GENERATED FILE !!!
# !!! DO NOT EDIT THIS FILE - THIS IS GENERATED FILE !!!
``` |
[
{
"content": "Write the code verbatim:\n```python\nfrom flask import Flask, redirect, url_for, session, request, jsonify, flash\nfrom flask_oauthlib.client import OAuth\nfrom flask_login import login_user\nimport json\n\nfrom . import util\nfrom . import app, db\nfrom . import models\noauth = OAuth(app)\n\ngoog... | [
{
"content": "Write the code verbatim:\n<|memory_start|>```python\nfrom flask import Flask, redirect, url_for, session, request, jsonify, flash\nfrom flask_oauthlib.client import OAuth\nfrom flask_login import login_user\nimport json\n\nfrom . import util\nfrom . import app, db\nfrom . import models\noauth = OA... | ```python
from flask import Flask, redirect, url_for, session, request, jsonify, flash
from flask_oauthlib.client import OAuth
from flask_login import login_user
import json
from . import util
from . import app, db
from . import models
oauth = OAuth(app)
google = oauth.remote_app(
'google',
consumer_key=app.config.get('GOOGLE_ID'),
consumer_secret=app.config.get('GOOGLE_SECRET'),
request_token_params={
'scope': 'email'
},
base_url='https://www.googleapis.com/oauth2/v1/',
request_token_url=None,
access_token_method='POST',
access_token_url='https://accounts.google.com/o/oauth2/token',
authorize_url='https://accounts.google.com/o/oauth2/auth',
)
@app.route('/oauth_login')
def oauth_login():
return google.authorize(callback=url_for('oauth_authorized', _external=True))
@app.route('/login/authorized')
def oauth_authorized():
resp = google.authorized_response()
if resp is None: #OAuth authorization failed
flash("OAuth login failed: %s -> %s" %(request.args['error_reason'], request.args['error_description']))
return redirect(url_for("home"))
session['google_token'] = (resp['access_token'], '') #Stick it in the session (if we potentially decide to use
#more of Google's API features later, e.g. mailing or
#whatever we'll need this for the OAuth scope in the
#API calls
me = google.get('userinfo').data #Snarf out the user's free data
user = models.User.query.filter_by(username=me["email"], auth_provider="OAUTH").first() #Is there a user with this
#email using OAuth already?
if user: #If so...
return util.try_login_user(user) #Proceed to try to log them in
else: #Otherwise
user=models.User( #Create a (disabled) account for them for the admin to enable later
marss_id=-1, #Cant find this w/o some kind of DB dump, if even applicable
username=me["email"], #Google's return gaurenteed to have email, this is the username for OAuth accounts
name=me["name"], #Google's return sometimes has name, otherwise empty string
email=me["email"], #Store it here too
auth_provider="OAUTH", #Use OAUTH provider, duh!
enabled=False #And leave them disabled
) #Default permission='view'
db.session.add(user)
db.session.commit()
flash("Please wait for an Administrator to enable your account")
return redirect(url_for("login_user_page"))
@google.tokengetter
def get_google_oauth_token():
return session.get('google_token')
``` |
[
{
"content": "Here is the snippet:\n```python\n#encoding:utf-8\n\nimport logging\n\nimport yaml\nimport praw\n\nimport utils\nfrom reporting_stuff import report_error\nfrom utils.tech import long_sleep, short_sleep\n\n\ndef send_to_channel_from_subreddit(how_to_post, channel_to_post, subreddit, submissions_rank... | [
{
"content": "Here is the snippet:\n<|memory_start|>```python\n#encoding:utf-8\n\nimport logging\n\nimport yaml\nimport praw\n\nimport utils\nfrom reporting_stuff import report_error\nfrom utils.tech import long_sleep, short_sleep\n\n\ndef send_to_channel_from_subreddit(how_to_post, channel_to_post, subreddit, ... | ```python
#encoding:utf-8
import logging
import yaml
import praw
import utils
from reporting_stuff import report_error
from utils.tech import long_sleep, short_sleep
def send_to_channel_from_subreddit(how_to_post, channel_to_post, subreddit, submissions_ranking, submissions_limit, config, **kwargs):
reddit = praw.Reddit(
user_agent=config['reddit']['user_agent'],
client_id=config['reddit']['client_id'],
client_secret=config['reddit']['client_secret'],
username=config['reddit']['username'],
password=config['reddit']['password']
)
if submissions_ranking == 'top':
submissions = reddit.subreddit(subreddit).top(limit=submissions_limit)
elif submissions_ranking == 'hot':
submissions = reddit.subreddit(subreddit).hot(limit=submissions_limit)
elif submissions_ranking == 'new':
submissions = reddit.subreddit(subreddit).new(limit=submissions_limit)
else:
logging.error('Unknown submissions_ranking. {}'.format(submissions_ranking))
r2t = utils.Reddit2TelegramSender(channel_to_post, config)
success = False
for submission in submissions:
link = submission.shortlink
if r2t.was_before(link):
continue
if r2t.too_much_errors(link):
continue
if kwargs.get('extra_args', False):
success = how_to_post(submission, r2t, **kwargs)
else:
success = how_to_post(submission, r2t)
if success == utils.SupplyResult.SUCCESSFULLY:
# Every thing is ok, post was sent
r2t.mark_as_was_before(link, sent=True)
break
elif success == utils.SupplyResult.DO_NOT_WANT_THIS_SUBMISSION:
# Do not want to send this post
r2t.mark_as_was_before(link, sent=False)
continue
elif success == utils.SupplyResult.SKIP_FOR_NOW:
# Do not want to send now
continue
elif success == utils.SupplyResult.STOP_THIS_SUPPLY:
# If None — do not want to send anything this time
break
else:
logging.error('Unknown SupplyResult. {}'.format(success))
@report_error
def supply(submodule_name, config, is_test=False):
if not is_test:
long_sleep(2)
submodule = utils.channels_stuff.import_submodule(submodule_name)
submissions_ranking_stated = getattr(submodule, 'submissions_ranking', None)
if submissions_ranking_stated not in ['hot', 'new', 'top']:
submissions_ranking = 'hot'
else:
submissions_ranking = submissions_ranking_stated
submissions_limit = getattr(submodule, 'submissions_limit', 100)
channel_to_post = submodule.t_channel if not is_test else '@r_channels_test'
success = send_to_channel_from_subreddit(how_to_post=submodule.send_post,
channel_to_post=channel_to_post,
subreddit=submodule.subreddit,
submissions_ranking=submissions_ranking,
submissions_limit=submissions_limit,
config=config,
extra_args=False
)
if success is False:
logging.info('Nothing to post from {sub} to {channel}.'.format(
sub=submodule.subreddit, channel=submodule.t_channel))
if submissions_ranking_stated is None:
success = send_to_channel_from_subreddit(how_to_post=submodule.send_post,
channel_to_post=channel_to_post,
subreddit=submodule.subreddit,
submissions_ranking='new',
submissions_limit=submissions_limit,
config=config,
extra_args=False
)
if success is False:
success = send_to_channel_from_subreddit(how_to_post=submodule.send_post,
channel_to_post=channel_to_post,
subreddit=submodule.subreddit,
submissions_ranking='top',
submissions_limit=submissions_limit,
config=config,
extra_args=False
)
utils.clean_after_module(channel_to_post)
def main(config_filename, sub, is_test=False):
with open(config_filename) as config_file:
config = yaml.safe_load(config_file.read())
if not is_test:
supply(sub, config, is_test)
else:
for i in range(100):
print('i =', i, '>')
supply(sub, config, is_test)
short_sleep(0.1)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='configs/prod.yml')
parser.add_argument('--test', action='store_true')
parser.add_argument('--sub')
args = parser.parse_args()
main(args.config, args.sub, args.test)
``` |
[
{
"content": "```python\nclass BaseGroup(object):\n \"\"\"\n A collection of tests and other groups.\n \n Groups are stored as a tree. They know of their children and parent.\n \"\"\"\n\n def __init__(self, subject=None, children=None, parent=None):\n # An identifier for this group\n ... | [
{
"content": "<|memory_start|>```python\nclass BaseGroup(object):\n \"\"\"\n A collection of tests and other groups.\n \n Groups are stored as a tree. They know of their children and parent.\n \"\"\"\n\n def __init__(self, subject=None, children=None, parent=None):\n # An identifier for... | ```python
class BaseGroup(object):
"""
A collection of tests and other groups.
Groups are stored as a tree. They know of their children and parent.
"""
def __init__(self, subject=None, children=None, parent=None):
# An identifier for this group
self.subject = subject
# This group's subgroups
self.children = children or []
# The group this group inherits from
self.parent = parent
self.tests = []
self.is_collecting = False
def __repr__(self):
return u'<%s: %s>' % (self.__class__.__name__, self.subject)
def add_child(self, group):
"""
Adds group as a child of this group and sets its parent.
"""
group.parent = self
self.children.append(group)
def get_collecting_group(self):
"""
Returns the right-most group that is currently collecting from this group
downwards.
"""
for group in reversed(self.children):
result = group.get_collecting_group()
if result:
return result
if self.is_collecting:
return self
def get_descendant_tests(self):
"""
Returns a flat list of tests from this group's descendants, excluding this
group's tests.
"""
tests = []
for child in self.children:
tests.extend(child.tests)
tests.extend(child.get_descendant_tests())
return tests
def __iter__(self):
"""
Returns a flat list of all tests from this group and its descendants.
"""
return iter(self.tests + self.get_descendant_tests())
def add_test(self, test):
"""
Adds a test to this group.
"""
assert not hasattr(test, '_pspec_group')
test._pspec_group = self
self.tests.append(test)
``` |
[
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n```python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 7 16:23:52 2016\n\nCopyright 2015 Nicolo' Navarin\n\nThis file is part of count-mean-sketch based on https://github.com/rafacarrascosa/countminsketch.\n\ncount-mean-... | [
{
"content": "Write out the code verbatim, preserving indentation and whitespace:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 7 16:23:52 2016\n\nCopyright 2015 Nicolo' Navarin\n\nThis file is part of count-mean-sketch based on https://github.com/rafacarrascosa/countminsketch... | ```python
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 7 16:23:52 2016
Copyright 2015 Nicolo' Navarin
This file is part of count-mean-sketch based on https://github.com/rafacarrascosa/countminsketch.
count-mean-sketch is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
count-mean-sketch is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with count-mean-sketch. If not, see <http://www.gnu.org/licenses/>.
"""
import hashlib
import os
#import array
import itertools
import numpy as np
import string
from numpy import median
import numpy.matlib
import copy
from itertools import izip
from numpy import random, sqrt, log, sin, cos, pi
from scipy.sparse import csr_matrix, linalg
#from joblib import Parallel, delayed
#import multiprocessing
import scipy
import tables as tb
def processInput(i, m, rs):
numpy.random.seed(i + (rs * 10000))
v = numpy.random.normal(0, 1, m)
v = numpy.multiply(sqrt(m), v)
row = [idx for idx in xrange(m)]
col = [i for idx in xrange(m)]
data = v
return (row, col, data)
class CountMinSketch(object):
"""
A class for counting hashable items using the Count-min Sketch strategy.
It fulfills a similar purpose than `itertools.Counter`.
The Count-min Sketch is a randomized data structure that uses a constant
amount of memory and has constant insertion and lookup times at the cost
of an arbitrarily small overestimation of the counts.
It has two parameters:
- `m` the size of the hash tables, larger implies smaller overestimation
- `d` the amount of hash tables, larger implies lower probability of
overestimation.
An example usage:
from countminsketch import CountMinSketch
sketch = CountMinSketch(1000, 10) # m=1000, d=10
sketch.add("oh yeah")
sketch.add(tuple())
sketch.add(1, value=123)
print sketch["oh yeah"] # prints 1
print sketch[tuple()] # prints 1
print sketch[1] # prints 123
print sketch["non-existent"] # prints 0
Note that this class can be used to count *any* hashable type, so it's
possible to "count apples" and then "ask for oranges". Validation is up to
the user.
"""
def __init__(self, m, samplesize,rs):
""" sizes is an array of hash dimensions.
"""
if not m:
raise ValueError("Table size (m) and amount of hash functions (d)"
" must be non-zero")
self.n = 0
self.m=m
self.samplesize=samplesize
self.rs=rs
self.mus=numpy.asarray([0.0] *m).reshape(self.m,1)
print "mus", self.mus.shape
#self.tables = numpy.matlib.zeros(shape=(m,samplesize))
#self.tables=numpy.random.normal(size=(m,samplesize))
# for _ in xrange(d):
# table = array.array("d", (0.0 for _ in xrange(m)))
# self.tables.append(table)
#inizialize projection matrix
import random as rnd
#numpy.random.seed(self.rs * 10000)
filename=''.join(rnd.choice(string.ascii_uppercase + string.digits) for _ in range(16))
#filename= "test"
self.filename=filename+'.h5'
h5file = tb.open_file(self.filename, mode='w', title="Random Projection Matrix")
root = h5file.root
self.x = h5file.create_carray(root, 'x', tb.Float64Atom(), shape=(self.samplesize, self.m))
print "generating matrix of shape", self.samplesize, self.m
for i in range(self.samplesize):
numpy.random.seed(i + (self.rs * 10000))
#v = numpy.random.normal(0, 1, self.m)
self.x[i, :self.m] = numpy.random.normal(0, 1, self.m) # Now put in some data
print "Random projection matrix saved on file", filename+'.h5'
def transform(self, vector):
#mus is a vector of the means
#print "example size", vector.shape
#print "transformation size", self.tables.shape
#tables=csr_matrix ((self.m,self.samplesize))
#num_cores = multiprocessing.cpu_count()
indices=vector.nonzero()[0]
#print vector.shape
norm=scipy.sparse.linalg.norm(vector,1)
#print norm
# results = Parallel(n_jobs=num_cores)(delayed(processInput)(i,self.m,self.rs) for i in indices)
# parrow = []
# parcol = []
# pardata = []
# for (row,col,v) in results:
# parrow.extend(row)
# parcol.extend(col)
# pardata.extend(v)
row=[]
col=[]
data=[]
data_nobias=[]
vbias=[]
#print indices
#print indices
#RPM=self.x[indices,:self.m]
#print RPM
data_nobias=self.x[indices,:self.m].ravel()
#data_nobias=list(itertools.chain.from_iterable([self.x[i,:self.m] for i in indices]))
#print data_nobias
data=np.tile(numpy.multiply(norm, self.mus).ravel(),len(indices))
#data=list(itertools.chain.from_iterable([numpy.multiply(norm, self.mus).ravel()]*len(indices)))
#print data
row=np.tile(range(self.m),len(indices))
#row=range(self.m)*len(indices)
#print row
col=np.repeat(indices, self.m)
#col=np.tile([i]* self.m,len(indices))
#col=list(itertools.chain.from_iterable([[i]* self.m for i in indices]))
#print col
# print data_nobias
# for i in indices:
# #numpy.random.seed(i+(self.rs*10000))
# v=self.x[i,:self.m].reshape(self.m,1)
# #v=numpy.multiply(sqrt(self.m),v).reshape(self.m,1)
# #print "v", v.shape
# #print "munorm", (self.mus*norm).shape
# #vbias.extend(numpy.multiply(norm, self.mu))
# #print "vbias", vbias.shape
# row.extend(range(self.m))
# col.extend([i]* self.m)
# data.extend(numpy.multiply(norm, self.mus).ravel()) #considero il bias
# data_nobias.extend(v.ravel())
#print data
tables_nobias=csr_matrix ((data_nobias,(row,col)), shape=(self.m,self.samplesize))
tables_nobias=scipy.sparse.csr_matrix.multiply(tables_nobias,sqrt(self.m))
#vbias.extend(numpy.multiply(norm,self.mu))
toadd=csr_matrix ((data,(row,col)), shape=(self.m,self.samplesize))
tables=tables_nobias+ toadd #csr_matrix ((data,(row,col)), shape=(self.m,self.samplesize))
transformation= np.multiply(tables,vector).todense()
#print transformation.shape
#assert(parrow==row)
#assert(parcol==col)
#assert(pardata==data)
#TODO return vector in which i-th (1-tanh(R_i\phi(g) +norm*\mu_i)^2 * norm)
#then just multiply each entry by y w_i to get the gradient
#self.norm=norm
#val2= self.norm*self.mus
#print "val2", val2.shape
#print "tablesnobias", tables_nobias.shape
#print "vector", vector.shape
#self.Rphix= (np.multiply(tables_nobias,vector)).todense()
#val3=self.Rphix+val2
#print "val3",val3.shape
#ones = np.ones(self.m).reshape(self.m,1)
#print "ones", ones.shape
#derivative= np.multiply((ones-numpy.square(val3)),norm)
#print derivative
return transformation # Probably I'll need to return v (to compute the bs)
def removetmp(self):
os.remove(self.filename)
print "removed temporary file"
``` |
[
{
"content": "Here is the code block:\n```python\n# Copyright 2016 United States Government as represented by the Administrator\n# of the National Aeronautics and Space Administration. All Rights Reserved.\n#\n# Portion of this code is Copyright Geoscience Australia, Licensed under the\n# Apache License, Versio... | [
{
"content": "Here is the code block:\n<|memory_start|>```python\n# Copyright 2016 United States Government as represented by the Administrator\n# of the National Aeronautics and Space Administration. All Rights Reserved.\n#\n# Portion of this code is Copyright Geoscience Australia, Licensed under the\n# Apache... | ```python
# Copyright 2016 United States Government as represented by the Administrator
# of the National Aeronautics and Space Administration. All Rights Reserved.
#
# Portion of this code is Copyright Geoscience Australia, Licensed under the
# Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License
# at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# The CEOS 2 platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import url
from . import views
# Author: AHDS
# Creation date: 2016-06-23
# Modified by:
# Last modified date:
urlpatterns = [
url(r'^(\w+)/details/(\d+)', views.get_query_details, name='get_query_details'),
url(r'^(?P<app_id>[\w\-]+)', views.get_task_manager, name='get_task_manager'),
]
``` |
[
{
"content": "```python\n# -*- encoding: utf-8 -*-\n###########################################################################\n# Module Writen to OpenERP, Open Source Management Solution\n#\n# Copyright (c) 2011 Vauxoo - http://www.vauxoo.com/\n# All Rights Reserved.\n# info Vauxoo (info@vauxoo.co... | [
{
"content": "<|memory_start|>```python\n# -*- encoding: utf-8 -*-\n###########################################################################\n# Module Writen to OpenERP, Open Source Management Solution\n#\n# Copyright (c) 2011 Vauxoo - http://www.vauxoo.com/\n# All Rights Reserved.\n# info Vauxoo... | ```python
# -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Vauxoo - http://www.vauxoo.com/
# All Rights Reserved.
# info Vauxoo (info@vauxoo.com)
############################################################################
# Coded by: moylop260 (moylop260@vauxoo.com)
############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#import wizard_open_move_line
import wizard_print_report
``` |
[
{
"content": "```python\n# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'setgoaldialog.ui'\n#\n# Created: Tue Dec 23 18:15:13 2014\n# by: PyQt4 UI code generator 4.11.3\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n ... | [
{
"content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'setgoaldialog.ui'\n#\n# Created: Tue Dec 23 18:15:13 2014\n# by: PyQt4 UI code generator 4.11.3\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, Q... | ```python
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'setgoaldialog.ui'
#
# Created: Tue Dec 23 18:15:13 2014
# by: PyQt4 UI code generator 4.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_setgoalsdialog(object):
def setupUi(self, setgoalsdialog):
setgoalsdialog.setObjectName(_fromUtf8("setgoalsdialog"))
setgoalsdialog.resize(434, 241)
self.setgoaldialogtopLabel = QtGui.QLabel(setgoalsdialog)
self.setgoaldialogtopLabel.setGeometry(QtCore.QRect(40, 30, 381, 16))
self.setgoaldialogtopLabel.setObjectName(_fromUtf8("setgoaldialogtopLabel"))
self.setgoaldialoggoalLabel = QtGui.QLabel(setgoalsdialog)
self.setgoaldialoggoalLabel.setGeometry(QtCore.QRect(130, 70, 59, 15))
self.setgoaldialoggoalLabel.setObjectName(_fromUtf8("setgoaldialoggoalLabel"))
self.horizontalLayoutWidget = QtGui.QWidget(setgoalsdialog)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(100, 90, 177, 41))
self.horizontalLayoutWidget.setObjectName(_fromUtf8("horizontalLayoutWidget"))
self.setgoalsdialoggoallayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget)
self.setgoalsdialoggoallayout.setMargin(0)
self.setgoalsdialoggoallayout.setObjectName(_fromUtf8("setgoalsdialoggoallayout"))
self.setgoaldialogvalue = QtGui.QSpinBox(self.horizontalLayoutWidget)
self.setgoaldialogvalue.setLayoutDirection(QtCore.Qt.RightToLeft)
self.setgoaldialogvalue.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.setgoaldialogvalue.setButtonSymbols(QtGui.QAbstractSpinBox.PlusMinus)
self.setgoaldialogvalue.setObjectName(_fromUtf8("setgoaldialogvalue"))
self.setgoalsdialoggoallayout.addWidget(self.setgoaldialogvalue)
self.setgoaldialoghrslabel = QtGui.QLabel(self.horizontalLayoutWidget)
self.setgoaldialoghrslabel.setObjectName(_fromUtf8("setgoaldialoghrslabel"))
self.setgoalsdialoggoallayout.addWidget(self.setgoaldialoghrslabel)
self.setgoaldialogDueDate = QtGui.QDateEdit(setgoalsdialog)
self.setgoaldialogDueDate.setGeometry(QtCore.QRect(220, 100, 110, 22))
self.setgoaldialogDueDate.setLayoutDirection(QtCore.Qt.RightToLeft)
self.setgoaldialogDueDate.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.setgoaldialogDueDate.setButtonSymbols(QtGui.QAbstractSpinBox.PlusMinus)
self.setgoaldialogDueDate.setDisplayFormat(_fromUtf8(""))
self.setgoaldialogDueDate.setObjectName(_fromUtf8("setgoaldialogDueDate"))
self.setgoalduedateLabel = QtGui.QLabel(setgoalsdialog)
self.setgoalduedateLabel.setGeometry(QtCore.QRect(240, 70, 61, 20))
self.setgoalduedateLabel.setObjectName(_fromUtf8("setgoalduedateLabel"))
self.horizontalLayoutWidget_2 = QtGui.QWidget(setgoalsdialog)
self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(90, 180, 334, 41))
self.horizontalLayoutWidget_2.setObjectName(_fromUtf8("horizontalLayoutWidget_2"))
self.setdialogbuttonslayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget_2)
self.setdialogbuttonslayout.setMargin(0)
self.setdialogbuttonslayout.setObjectName(_fromUtf8("setdialogbuttonslayout"))
self.pushButton = QtGui.QPushButton(self.horizontalLayoutWidget_2)
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.setdialogbuttonslayout.addWidget(self.pushButton)
self.setgoaldialogAcceptButton = QtGui.QPushButton(self.horizontalLayoutWidget_2)
self.setgoaldialogAcceptButton.setObjectName(_fromUtf8("setgoaldialogAcceptButton"))
self.setdialogbuttonslayout.addWidget(self.setgoaldialogAcceptButton)
self.setgoaldialogCancelButton = QtGui.QPushButton(self.horizontalLayoutWidget_2)
self.setgoaldialogCancelButton.setObjectName(_fromUtf8("setgoaldialogCancelButton"))
self.setdialogbuttonslayout.addWidget(self.setgoaldialogCancelButton)
self.retranslateUi(setgoalsdialog)
QtCore.QMetaObject.connectSlotsByName(setgoalsdialog)
def retranslateUi(self, setgoalsdialog):
setgoalsdialog.setWindowTitle(_translate("setgoalsdialog", "Dialog", None))
self.setgoaldialogtopLabel.setText(_translate("setgoalsdialog", "You Are Currently At num Hours. Please Set A New Goal:", None))
self.setgoaldialoggoalLabel.setText(_translate("setgoalsdialog", "GOAL", None))
self.setgoaldialoghrslabel.setText(_translate("setgoalsdialog", "hrs", None))
self.setgoalduedateLabel.setText(_translate("setgoalsdialog", "Due Date", None))
self.pushButton.setText(_translate("setgoalsdialog", "VIEW CURRENT GOALS", None))
self.setgoaldialogAcceptButton.setText(_translate("setgoalsdialog", "ACCEPT", None))
self.setgoaldialogCancelButton.setText(_translate("setgoalsdialog", "CANCEL", None))
``` |
[
{
"content": "Write the code verbatim:\n```python\nfrom functools import wraps\nfrom flask import render_template, request, url_for\nfrom app.models import PatchState\n\ndef filterable(f):\n \"\"\"Filter a query\"\"\"\n @wraps(f)\n def wrapped(*args, **kwargs):\n d = f(*args, **kwargs)\n\n ... | [
{
"content": "Write the code verbatim:\n<|memory_start|>```python\nfrom functools import wraps\nfrom flask import render_template, request, url_for\nfrom app.models import PatchState\n\ndef filterable(f):\n \"\"\"Filter a query\"\"\"\n @wraps(f)\n def wrapped(*args, **kwargs):\n d = f(*args, **k... | ```python
from functools import wraps
from flask import render_template, request, url_for
from app.models import PatchState
def filterable(f):
"""Filter a query"""
@wraps(f)
def wrapped(*args, **kwargs):
d = f(*args, **kwargs)
q = d['query']
state = request.args.get('state', None, type=str)
if state:
q = q.filter_by(state=PatchState.from_string(state))
# add more filters later
d['query'] = q
return d
return wrapped
def paginable(pagename, max_per_page=50):
"""Paginate a query"""
def decorator(f):
@wraps(f)
def wrapped(*args, **kwargs):
d = f(*args, **kwargs)
q = d['query']
page = request.args.get('page', 1, type=int)
per_page = request.args.get('per_page', max_per_page, type=int)
p = q.paginate(page, per_page, False)
if not p.items:
d['page'] = None
d[pagename] = q.paginate(1, per_page, False)
else:
d[pagename] = p
return d
return wrapped
return decorator
def render(template):
"""render a query"""
def decorator(f):
@wraps(f)
def wrapped(*args, **kwargs):
d = f(*args, **kwargs)
def endpoint(**up):
kwargs.update(up)
return url_for(request.endpoint, **kwargs)
d['endpoint'] = endpoint
return render_template(template, **d)
return wrapped
return decorator
``` |
[
{
"content": "Reconstruct the code file line-for-line, unmodified:\n```python\nimport psycopg2\nimport os, sys\n\n\nTOPIC_NUM_LIST = [30, 100, 200, 500]\n\nif len(sys.argv) is 1:\n print(\"トピック数を入力\")\n exit()\n\ntopic_num = int(sys.argv[1])\nif not topic_num in TOPIC_NUM_LIST:\n print(\"入力可能なトピック数は \", end=... | [
{
"content": "Reconstruct the code file line-for-line, unmodified:\n<|memory_start|>```python\nimport psycopg2\nimport os, sys\n\n\nTOPIC_NUM_LIST = [30, 100, 200, 500]\n\nif len(sys.argv) is 1:\n print(\"トピック数を入力\")\n exit()\n\ntopic_num = int(sys.argv[1])\nif not topic_num in TOPIC_NUM_LIST:\n print(\"入力可能... | ```python
import psycopg2
import os, sys
TOPIC_NUM_LIST = [30, 100, 200, 500]
if len(sys.argv) is 1:
print("トピック数を入力")
exit()
topic_num = int(sys.argv[1])
if not topic_num in TOPIC_NUM_LIST:
print("入力可能なトピック数は ", end="")
for each in TOPIC_NUM_LIST:
print("{0} ".format(each), end="")
print("です.")
exit()
DBPATH = "dbname=image_tagging host=localhost user=postgres"
con = psycopg2.connect(DBPATH)
concur = con.cursor()
concur.execute('''select distinct a.tweet_id from answer as a, answer_all as b
where a.tweet_id=b.tweet_id''')
tweet_id_list = [x for x in map(lambda y: y[0], concur.fetchall())]
lda_score = {}
except_score = {}
histgram_dic = {}
query = "select distinct tag from exp_rawlda{0} where tweet_id=%s".format(topic_num)
for each_tweet_id in tweet_id_list:
concur.execute(query, (each_tweet_id,))
tag_set = { x for x in map(lambda y: y[0], concur.fetchall()) }
concur.execute('''select distinct tag from answer where tweet_id=%s''', (each_tweet_id,))
except_tag_set = { x for x in map(lambda y: y[0], concur.fetchall()) } - tag_set
good_num = 0
bad_num = 0
for each_tag in tag_set:
concur.execute('''select score from answer
where tweet_id=%s and tag=%s''', (each_tweet_id, each_tag))
score = concur.fetchone()[0]
if score is 1:
good_num += 1
else:
bad_num += 1
if not bad_num in histgram_dic.keys():
histgram_dic[bad_num] = 1
else:
histgram_dic[bad_num] += 1
except_good_num = 0
except_bad_num = 0
for each_tag in except_tag_set:
concur.execute('''select score from answer
where tweet_id=%s and tag=%s''', (each_tweet_id, each_tag))
score = concur.fetchone()[0]
if score is 1:
except_good_num += 1
else:
except_bad_num += 1
lda_score[each_tweet_id] = {'good_num': good_num, 'bad_num': bad_num}
except_score[each_tweet_id] = {'good_num': except_good_num, 'bad_num': except_bad_num}
good_rate_sum = 0
good_only_num = 0
bad_only_num = 0
good_sum = 0
bad_sum = 0
zero_num = 0
for each_tweet_id, value in lda_score.items():
each_good_num = value['good_num']
each_bad_num = value['bad_num']
good_sum += each_good_num
bad_sum += each_bad_num
if each_good_num > 0 and each_bad_num is 0:
good_only_num += 1
if each_good_num is 0 and each_bad_num > 0:
bad_only_num += 1
if each_good_num + each_bad_num == 0:
zero_num += 1
else:
good_rate_sum += each_good_num / (each_good_num + each_bad_num)
good_rate = round(good_rate_sum / (len(lda_score) - zero_num), 3)
total_good_rate = round(good_sum / (good_sum + bad_sum), 3)
except_good_sum = 0
except_bad_sum = 0
except_bad_rate_sum = 0
zero_num = 0
for each_tweet_id, value in except_score.items():
each_good_num = value['good_num']
each_bad_num = value['bad_num']
except_good_sum += each_good_num
except_bad_sum += each_bad_num
if each_good_num + each_bad_num is 0:
zero_num += 1
else:
except_bad_rate_sum += each_bad_num / (each_good_num + each_bad_num)
except_bad_rate = round(except_bad_rate_sum / (len(except_score)-zero_num), 3)
remain_bad_rate = round(bad_sum / (bad_sum + except_bad_sum), 3)
total_tag_num = good_sum + bad_sum + except_good_sum + except_bad_sum
good_only_rate = round(good_only_num / len(lda_score), 3)
good_and_bad_rate = round((len(lda_score) - bad_only_num - good_only_num) / len(lda_score), 3)
bad_only_rate = 1.0 - good_only_rate - good_and_bad_rate
print('''正解タグのみの割合: {0}({1})
正解タグとノイズ両方を含む割合: {2}
ノイズタグのみを含む割合: {3}
正解タグ含有率の平均: {4}
付与したタグのうち正解だった数: {5} / {6} = {7}
全ノイズタグのうち除去できなかったタグの数: {8} / {9} = {10}
全タグ数: {11}
'''.format(good_only_rate, len(lda_score), good_and_bad_rate, bad_only_rate, good_rate, good_sum, good_sum+bad_sum, \
total_good_rate, bad_sum, bad_sum+except_bad_sum, remain_bad_rate, total_tag_num))
good_recall_rate_sum = 0
fmeasure_sum = 0
zero_num = 0
for each_tweet_id in tweet_id_list:
each_good_num = lda_score[each_tweet_id]['good_num']
each_bad_num = lda_score[each_tweet_id]['bad_num']
each_except_good_num = except_score[each_tweet_id]['good_num']
if each_good_num + each_except_good_num is 0:
zero_num += 1
else:
if each_good_num + each_bad_num != 0:
precision = each_good_num / (each_good_num + each_bad_num)
else:
precision = 0
if each_good_num + each_except_good_num != 0:
recall = each_good_num / (each_good_num + each_except_good_num)
else:
recall = 0
good_recall_rate_sum += recall
if precision + recall != 0:
fmeasure_sum += 2*precision*recall / (precision + recall)
ave_recall_rate = round(good_recall_rate_sum / (len(lda_score)-zero_num), 3)
total_recall = round(good_sum / (good_sum+except_good_sum), 3)
good_fmeasure = round(2*total_good_rate*total_recall / (total_good_rate + total_recall), 3)
ave_good_fmeasure = round(fmeasure_sum / (len(tweet_id_list)-zero_num), 3)
print('''正解タグ
全体の適合率: {0}
全体の再現率: {1}
F値: {2}
適合率の平均: {3}
再現率の平均: {4}
F値(平均): {5}
'''.format(total_good_rate, total_recall, good_fmeasure, good_rate, ave_recall_rate, ave_good_fmeasure))
except_bad_recall_rate_sum = 0
removed_fmeasure_sum = 0
zero_num = 0
for each_tweet_id in tweet_id_list:
each_bad_num = lda_score[each_tweet_id]['bad_num']
each_except_good_num = except_score[each_tweet_id]['good_num']
each_except_bad_num = except_score[each_tweet_id]['bad_num']
if each_bad_num + each_except_bad_num is 0:
zero_num += 1
else:
if each_except_good_num + each_except_bad_num != 0:
precision = each_except_bad_num / (each_except_good_num + each_except_bad_num)
else:
precision = 0
if each_bad_num + each_except_bad_num != 0:
recall = each_except_bad_num / (each_bad_num + each_except_bad_num)
else:
recall = 0
except_bad_recall_rate_sum += recall
if precision + recall != 0:
removed_fmeasure_sum += 2*precision*recall / (precision + recall)
ave_bad_recall_rate = round(except_bad_recall_rate_sum / (len(lda_score)-zero_num), 3)
removed_bad_precision = round(except_bad_sum / (except_good_sum + except_bad_sum), 3)
removed_bad_recall = round(except_bad_sum / (bad_sum + except_bad_sum), 3)
removed_bad_fmeasure = round(2*removed_bad_precision*removed_bad_recall / (removed_bad_precision + removed_bad_recall), 3)
ave_removed_bad_fmeasure = round(removed_fmeasure_sum / (len(tweet_id_list)-zero_num), 3)
print('''除去したノイズタグ
全体の適合率: {0}
全体の再現率: {1}
F値: {2}
適合率の平均: {3}
再現率の平均: {4}
F値(平均): {5}
'''.format(removed_bad_precision, removed_bad_recall, removed_bad_fmeasure, except_bad_rate, ave_bad_recall_rate, ave_removed_bad_fmeasure))
print("提案手法適用後のノイズ数分布(トピック数:{0})".format(topic_num))
print("ノイズ数,画像数")
for k, v in histgram_dic.items():
print("{0},{1}".format(k, v))
``` |
[
{
"content": "Here is the code content:\n```python\nimport sys\nsys.path.append('../py')\n\nfrom iroha import *\nfrom iroha.iroha import *\n\nd = IDesign()\nmod = IModule(d, \"mod\")\ntab1 = ITable(mod)\nsreg = design_tool.CreateSharedReg(tab1, \"o\", 32)\n\nwtab = ITable(mod)\nw = design_tool.CreateSharedRegWr... | [
{
"content": "Here is the code content:\n<|memory_start|>```python\nimport sys\nsys.path.append('../py')\n\nfrom iroha import *\nfrom iroha.iroha import *\n\nd = IDesign()\nmod = IModule(d, \"mod\")\ntab1 = ITable(mod)\nsreg = design_tool.CreateSharedReg(tab1, \"o\", 32)\n\nwtab = ITable(mod)\nw = design_tool.C... | ```python
import sys
sys.path.append('../py')
from iroha import *
from iroha.iroha import *
d = IDesign()
mod = IModule(d, "mod")
tab1 = ITable(mod)
sreg = design_tool.CreateSharedReg(tab1, "o", 32)
wtab = ITable(mod)
w = design_tool.CreateSharedRegWriter(wtab, sreg)
wst1 = IState(wtab)
wst2 = IState(wtab)
wst3 = IState(wtab)
wtab.initialSt = wst1
wtab.states.append(wst1)
wtab.states.append(wst2)
wtab.states.append(wst3)
design_tool.AddNextState(wst1, wst2)
design_tool.AddNextState(wst2, wst3)
winsn = IInsn(w)
rc = design_tool.AllocConstNum(wtab, False, 32, 123)
winsn.inputs.append(rc)
winsn.operand = "notify"
wst1.insns.append(winsn)
spinsn = IInsn(w)
spinsn.inputs.append(rc)
spinsn.operand = "put_mailbox"
wst2.insns.append(spinsn)
rtab = ITable(mod)
r = design_tool.CreateSharedRegReader(rtab, sreg)
rst1 = IState(rtab)
rst2 = IState(rtab)
rst3 = IState(rtab)
rtab.initialSt = rst1
rtab.states.append(rst1)
rtab.states.append(rst2)
rtab.states.append(rst3)
design_tool.AddNextState(rst1, rst2)
design_tool.AddNextState(rst2, rst3)
rinsn = IInsn(r)
reg = IRegister(rtab, "r_local")
rinsn.outputs.append(reg)
rinsn.operand = "wait_notify"
rst1.insns.append(rinsn)
sginsn = IInsn(r)
sginsn.inputs.append(rc)
sginsn.operand = "get_mailbox"
rst2.insns.append(sginsn)
design_tool.ValidateIds(d)
DesignWriter(d).Write()
``` |
[
{
"content": "```python\ntitle = 'Pmw.ButtonBox demonstration'\n\n# Import Pmw from this directory tree.\nimport sys\nsys.path[:0] = ['../../..']\n\nimport Tkinter\nimport Pmw\n\nclass Demo:\n def __init__(self, parent):\n\t# Create and pack the ButtonBox.\n\tself.buttonBox = Pmw.ButtonBox(parent,\n ... | [
{
"content": "<|memory_start|>```python\ntitle = 'Pmw.ButtonBox demonstration'\n\n# Import Pmw from this directory tree.\nimport sys\nsys.path[:0] = ['../../..']\n\nimport Tkinter\nimport Pmw\n\nclass Demo:\n def __init__(self, parent):\n\t# Create and pack the ButtonBox.\n\tself.buttonBox = Pmw.ButtonBox(pa... | ```python
title = 'Pmw.ButtonBox demonstration'
# Import Pmw from this directory tree.
import sys
sys.path[:0] = ['../../..']
import Tkinter
import Pmw
class Demo:
def __init__(self, parent):
# Create and pack the ButtonBox.
self.buttonBox = Pmw.ButtonBox(parent,
labelpos = 'nw',
label_text = 'ButtonBox:',
frame_borderwidth = 2,
frame_relief = 'groove')
self.buttonBox.pack(fill = 'both', expand = 1, padx = 10, pady = 10)
# Add some buttons to the ButtonBox.
self.buttonBox.add('OK', command = self.ok)
self.buttonBox.add('Apply', command = self.apply)
self.buttonBox.add('Cancel', command = self.cancel)
# Set the default button (the one executed when <Return> is hit).
self.buttonBox.setdefault('OK')
parent.bind('<Return>', self._processReturnKey)
parent.focus_set()
# Make all the buttons the same width.
self.buttonBox.alignbuttons()
def _processReturnKey(self, event):
self.buttonBox.invoke()
def ok(self):
print 'You clicked on OK'
def apply(self):
print 'You clicked on Apply'
def cancel(self):
print 'You clicked on Cancel'
######################################################################
# Create demo in root window for testing.
if __name__ == '__main__':
root = Tkinter.Tk()
Pmw.initialise(root)
root.title(title)
exitButton = Tkinter.Button(root, text = 'Exit', command = root.destroy)
exitButton.pack(side = 'bottom')
widget = Demo(root)
root.mainloop()
``` |
[
{
"content": "Here is the code block:\n```python\n\"\"\"\nArrow shapes\n\"\"\"\n\nfrom pynoded.graph import GraphObject\nfrom math import atan2,pi\nfrom cubicspline import cubicspline\nfrom numpy import array\n\nclass Arrow(GraphObject):\n \"\"\"\n An arrow connecting two objects.\n \"\"\"\n def __i... | [
{
"content": "Here is the code block:\n<|memory_start|>```python\n\"\"\"\nArrow shapes\n\"\"\"\n\nfrom pynoded.graph import GraphObject\nfrom math import atan2,pi\nfrom cubicspline import cubicspline\nfrom numpy import array\n\nclass Arrow(GraphObject):\n \"\"\"\n An arrow connecting two objects.\n \"\... | ```python
"""
Arrow shapes
"""
from pynoded.graph import GraphObject
from math import atan2,pi
from cubicspline import cubicspline
from numpy import array
class Arrow(GraphObject):
"""
An arrow connecting two objects.
"""
def __init__(self,parent,x0,y0,x1,y1,color):
GraphObject.__init__(self,parent,x0,y0)
self.x1=x1
self.y1=y1
self.color=color
self.maxdist = 3
def Draw_(self, ctx):
x1,y1=self.ToLocal(self.x1, self.y1)
ctx.set_line_width(1)
linewidth,_ = ctx.device_to_user_distance(1., 1.)
ctx.set_line_width(linewidth)
ctx.set_source_rgb(*self.color)
ctx.move_to(0,0)
dist = abs(complex(x1, y1))
elast = dist/2.0
ctx.curve_to(elast, 0, x1-elast, y1, x1, y1)
ctx.stroke()
data = [[float(elast), float(0)],
[float(x1-elast), float(y1)],
[float(x1), float(y1)],
[0, 0]]
data = array(data)
time, val = cubicspline(data, 123)
if linewidth > self.maxdist:
return
ctx.move_to(x1, y1)
# following is to draw the arrow in direction of line
# but now we're drawing the in/out tangential, so not needed
# angle=atan2(0,x1)
# ctx.rotate(angle)
ctx.rel_line_to(-6*linewidth,0)
ctx.rel_line_to(0,2*linewidth)
ctx.rel_line_to(6*linewidth,-2*linewidth)
ctx.rel_line_to(-6*linewidth,-2*linewidth)
ctx.rel_line_to(0,2*linewidth)
ctx.fill_preserve()
ctx.stroke()
def Test(self,x,y):
return False
``` |
[
{
"content": "Produce an exact reconstruction of the code:\n```python\n# -*- coding: utf-8 -*-\nimport datetime\nfrom south.db import db\nfrom south.v2 import DataMigration\nfrom django.db import models\n\nclass Migration(DataMigration):\n\n def forwards(self, orm):\n \"Write your forwards methods her... | [
{
"content": "Produce an exact reconstruction of the code:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\nimport datetime\nfrom south.db import db\nfrom south.v2 import DataMigration\nfrom django.db import models\n\nclass Migration(DataMigration):\n\n def forwards(self, orm):\n \"Write your forw... | ```python
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..."
to_migrate = orm['fixmystreet.Report'].objects.filter(mark_as_done_motivation__isnull=False)
for report in to_migrate:
contact_user = report.mark_as_done_user
print 'Report', report.id
print ' Motivation', report.mark_as_done_motivation
print ' User', contact_user
# If it's a citizen who did the action, no comment
if report.mark_as_done_motivation and contact_user:
comment = orm['fixmystreet.ReportComment']()
comment.text = report.mark_as_done_motivation
comment.user = contact_user
comment.report = report
comment.save()
report.mark_as_done_comment = comment
report.save()
print 'mark_as_done_comment migrated:', len(to_migrate)
def backwards(self, orm):
"Write your backwards methods here."
to_delete = orm['fixmystreet.Report'].objects.filter(mark_as_done_motivation__isnull=False, mark_as_done_comment__isnull=False)
for report in to_delete:
contact_user = report.mark_as_done_user
print 'Report', report.id
print ' Motivation', report.mark_as_done_comment
print ' User', contact_user
report.mark_as_done_comment.delete()
print 'mark_as_done_comment deleted:', len(to_delete)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'default': "'!'", 'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'fixmystreet.faqentry': {
'Meta': {'ordering': "['order']", 'object_name': 'FaqEntry'},
'a_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'a_nl': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'q_fr': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'q_nl': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
u'fixmystreet.fmsuser': {
'Meta': {'ordering': "['last_name']", 'object_name': 'FMSUser', '_ormbases': [u'auth.User']},
'agent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'applicant': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'type'", 'blank': 'True', 'to': u"orm['fixmystreet.ReportCategory']"}),
'contractor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fmsuser_created'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'last_used_language': ('django.db.models.fields.CharField', [], {'default': "'FR'", 'max_length': '10', 'null': 'True'}),
'leader': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'logical_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fmsuser_modified'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'organisation': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'team'", 'null': 'True', 'to': u"orm['fixmystreet.OrganisationEntity']"}),
'quality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
u'fixmystreet.historicalfmsuser': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalFMSUser'},
'agent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'applicant': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'contractor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'created_by_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_used_language': ('django.db.models.fields.CharField', [], {'default': "'FR'", 'max_length': '10', 'null': 'True'}),
'leader': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'logical_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'modified_by_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'organisation_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'default': "'!'", 'max_length': '128'}),
'quality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
u'user_ptr_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '75', 'db_index': 'True'})
},
u'fixmystreet.historicalorganisationentity': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalOrganisationEntity'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'applicant': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'commune': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'created_by_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'department': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'dependency_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'feature_id': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'modified_by_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'region': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug_fr': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'slug_nl': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'subcontractor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1'})
},
u'fixmystreet.historicalpage': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalPage'},
'content_fr': ('ckeditor.fields.RichTextField', [], {}),
'content_nl': ('ckeditor.fields.RichTextField', [], {'null': 'True', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'slug_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'fixmystreet.historicalreport': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalReport'},
'accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'address_fr': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'address_nl': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address_number': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'address_number_as_int': ('django.db.models.fields.IntegerField', [], {'max_length': '255'}),
'address_regional': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'category_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'citizen_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'close_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'contractor_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'created_by_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_planned': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'false_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fixed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gravity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'hash_code': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
u'mark_as_done_comment_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'mark_as_done_motivation': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'mark_as_done_user_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'merged_with_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'modified_by_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'pending': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'photo': ('django.db.models.fields.TextField', [], {'max_length': '100', 'blank': 'True'}),
'planned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '31370', 'null': 'True', 'blank': 'True'}),
'postalcode': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'probability': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'quality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'refusal_comment_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'refusal_motivation': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'responsible_department_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'responsible_entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'responsible_manager_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'responsible_manager_validated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'secondary_category_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.TextField', [], {'default': "'web'"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'terms_of_use_validated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'thumbnail': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'thumbnail_pro': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'fixmystreet.listitem': {
'Meta': {'object_name': 'ListItem'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'label_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'model_class': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'model_field': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'fixmystreet.mailnotificationtemplate': {
'Meta': {'object_name': 'MailNotificationTemplate'},
'content_fr': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'content_nl': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'title_fr': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'title_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'fixmystreet.organisationentity': {
'Meta': {'ordering': "['name_fr']", 'object_name': 'OrganisationEntity'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'applicant': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'commune': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'organisationentity_created'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'department': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'dependency': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'associates'", 'null': 'True', 'to': u"orm['fixmystreet.OrganisationEntity']"}),
'dispatch_categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'assigned_to_department'", 'blank': 'True', 'to': u"orm['fixmystreet.ReportCategory']"}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'feature_id': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'organisationentity_modified'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'region': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug_fr': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'slug_nl': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'subcontractor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1'})
},
u'fixmystreet.page': {
'Meta': {'object_name': 'Page'},
'content_fr': ('ckeditor.fields.RichTextField', [], {}),
'content_nl': ('ckeditor.fields.RichTextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'fixmystreet.report': {
'Meta': {'object_name': 'Report'},
'accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'address_fr': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'address_nl': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address_number': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'address_number_as_int': ('django.db.models.fields.IntegerField', [], {'max_length': '255'}),
'address_regional': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['fixmystreet.ReportMainCategoryClass']", 'null': 'True', 'blank': 'True'}),
'citizen': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'citizen_reports'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'close_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'contractor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'assigned_reports'", 'null': 'True', 'to': u"orm['fixmystreet.OrganisationEntity']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'report_created'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'date_planned': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'false_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fixed_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gravity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'hash_code': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mark_as_done_comment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'report_mark_as_done'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['fixmystreet.ReportComment']"}),
'mark_as_done_motivation': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'mark_as_done_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'reports_solved'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'merged_with': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'merged_reports'", 'null': 'True', 'to': u"orm['fixmystreet.Report']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'report_modified'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'pending': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'photo': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'planned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '31370', 'null': 'True', 'blank': 'True'}),
'postalcode': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'previous_managers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'previous_reports'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['fixmystreet.FMSUser']"}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'probability': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'quality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'refusal_comment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'report_refusal'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['fixmystreet.ReportComment']"}),
'refusal_motivation': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'responsible_department': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reports_in_department'", 'null': 'True', 'to': u"orm['fixmystreet.OrganisationEntity']"}),
'responsible_entity': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'reports_in_charge'", 'null': 'True', 'to': u"orm['fixmystreet.OrganisationEntity']"}),
'responsible_manager': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'reports_in_charge'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'responsible_manager_validated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'secondary_category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['fixmystreet.ReportCategory']", 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.TextField', [], {'default': "'web'"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'terms_of_use_validated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'thumbnail': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'thumbnail_pro': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'fixmystreet.reportattachment': {
'Meta': {'object_name': 'ReportAttachment'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reportattachment_created'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logical_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reportattachment_modified'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attachments'", 'to': u"orm['fixmystreet.Report']"}),
'security_level': ('django.db.models.fields.IntegerField', [], {'default': '2'})
},
u'fixmystreet.reportcategory': {
'Meta': {'object_name': 'ReportCategory'},
'category_class': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'categories'", 'to': u"orm['fixmystreet.ReportMainCategoryClass']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reportcategory_created'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reportcategory_modified'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'secondary_category_class': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'categories'", 'to': u"orm['fixmystreet.ReportSecondaryCategoryClass']"}),
'slug_fr': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'slug_nl': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'fixmystreet.reportcategoryhint': {
'Meta': {'object_name': 'ReportCategoryHint'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_fr': ('django.db.models.fields.TextField', [], {}),
'label_nl': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'fixmystreet.reportcomment': {
'Meta': {'object_name': 'ReportComment', '_ormbases': [u'fixmystreet.ReportAttachment']},
u'reportattachment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['fixmystreet.ReportAttachment']", 'unique': 'True', 'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
u'fixmystreet.reporteventlog': {
'Meta': {'ordering': "['event_at']", 'object_name': 'ReportEventLog'},
'event_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'merged_with_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'organisation': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activities'", 'to': u"orm['fixmystreet.OrganisationEntity']"}),
'related_content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True'}),
'related_new_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'related_old_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activities'", 'to': u"orm['fixmystreet.Report']"}),
'status_new': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'status_old': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activities'", 'null': 'True', 'to': u"orm['auth.User']"}),
'value_old': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'})
},
u'fixmystreet.reportfile': {
'Meta': {'object_name': 'ReportFile', '_ormbases': [u'fixmystreet.ReportAttachment']},
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'file_creation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'file_type': ('django.db.models.fields.IntegerField', [], {}),
'image': ('django_fixmystreet.fixmystreet.utils.FixStdImageField', [], {'max_length': '100', 'name': "'image'", 'blank': 'True'}),
u'reportattachment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['fixmystreet.ReportAttachment']", 'unique': 'True', 'primary_key': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'})
},
u'fixmystreet.reportmaincategoryclass': {
'Meta': {'object_name': 'ReportMainCategoryClass'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reportmaincategoryclass_created'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'hint': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['fixmystreet.ReportCategoryHint']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reportmaincategoryclass_modified'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug_fr': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'slug_nl': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'fixmystreet.reportnotification': {
'Meta': {'object_name': 'ReportNotification'},
'content_template': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'error_msg': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'notifications'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'recipient_mail': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'related_content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'related_object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'reply_to': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'sent_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'success': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'fixmystreet.reportsecondarycategoryclass': {
'Meta': {'object_name': 'ReportSecondaryCategoryClass'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reportsecondarycategoryclass_created'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reportsecondarycategoryclass_modified'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug_fr': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'slug_nl': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'fixmystreet.reportsubscription': {
'Meta': {'unique_together': "(('report', 'subscriber'),)", 'object_name': 'ReportSubscription'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscriptions'", 'to': u"orm['fixmystreet.Report']"}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['fixmystreet.FMSUser']"})
},
u'fixmystreet.streetsurface': {
'Meta': {'object_name': 'StreetSurface'},
'administrator': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.GeometryField', [], {'srid': '31370'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pw_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ssft': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'sslv': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'urbis_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'version_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'fixmystreet.userorganisationmembership': {
'Meta': {'unique_together': "(('user', 'organisation'),)", 'object_name': 'UserOrganisationMembership'},
'contact_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'userorganisationmembership_created'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'userorganisationmembership_modified'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"}),
'organisation': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'memberships'", 'null': 'True', 'to': u"orm['fixmystreet.OrganisationEntity']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'memberships'", 'null': 'True', 'to': u"orm['fixmystreet.FMSUser']"})
},
u'fixmystreet.zipcode': {
'Meta': {'object_name': 'ZipCode'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'commune': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'zipcode'", 'to': u"orm['fixmystreet.OrganisationEntity']"}),
'hide': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_nl': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['fixmystreet']
symmetrical = True
``` |
[
{
"content": "Return the code unaltered:\n```python\nimport os\nimport pandas as pd\n\nfrom dataactbroker.scripts import update_historical_duns\nfrom dataactcore.config import CONFIG_BROKER\nfrom dataactcore.utils.duns import DUNS_COLUMNS, EXCLUDE_FROM_API\nfrom dataactcore.models.domainModels import DUNS, Hist... | [
{
"content": "Return the code unaltered:\n<|memory_start|>```python\nimport os\nimport pandas as pd\n\nfrom dataactbroker.scripts import update_historical_duns\nfrom dataactcore.config import CONFIG_BROKER\nfrom dataactcore.utils.duns import DUNS_COLUMNS, EXCLUDE_FROM_API\nfrom dataactcore.models.domainModels i... | ```python
import os
import pandas as pd
from dataactbroker.scripts import update_historical_duns
from dataactcore.config import CONFIG_BROKER
from dataactcore.utils.duns import DUNS_COLUMNS, EXCLUDE_FROM_API
from dataactcore.models.domainModels import DUNS, HistoricDUNS
def test_remove_existing_duns(database):
""" Testing the removing existing duns function"""
sess = database.session
# of the duns 000000001-000000009, half of them are in the database
all_duns = ['00000000{}'.format(x) for x in range(0, 10)]
existing_duns = all_duns[: 4]
data = pd.DataFrame.from_dict({'awardee_or_recipient_uniqu': all_duns})
for duns in existing_duns:
sess.add(DUNS(awardee_or_recipient_uniqu=duns))
sess.commit()
# confirm that the dataframe returned only has half the duns
expected_duns = list(set(existing_duns) ^ set(all_duns))
new_df = update_historical_duns.remove_existing_duns(data, sess)
assert sorted(expected_duns) == sorted(new_df['awardee_or_recipient_uniqu'].tolist())
def mock_get_duns_props_from_sam(duns_list):
""" Mock function for get_duns_props as we can't connect to the SAM service """
request_cols = [col for col in DUNS_COLUMNS if col not in EXCLUDE_FROM_API]
columns = request_cols
results = pd.DataFrame(columns=columns)
duns_mappings = {
'000000001': {
'awardee_or_recipient_uniqu': '000000001',
'uei': 'A1',
'legal_business_name': 'Legal Name 1',
'dba_name': 'Name 1',
'entity_structure': '1A',
'ultimate_parent_unique_ide': '999999999',
'ultimate_parent_uei': 'Z9',
'ultimate_parent_legal_enti': 'Parent Legal Name 1',
'address_line_1': 'Test address 1',
'address_line_2': 'Test address 2',
'city': 'Test city',
'state': 'Test state',
'zip': 'Test zip',
'zip4': 'Test zip4',
'country_code': 'Test country',
'congressional_district': 'Test congressional district',
'business_types_codes': [['A', 'B', 'C']],
'business_types': [['Name A', 'Name B', 'Name C']],
'high_comp_officer1_full_na': 'Test Exec 1',
'high_comp_officer1_amount': '1',
'high_comp_officer2_full_na': 'Test Exec 2',
'high_comp_officer2_amount': '2',
'high_comp_officer3_full_na': 'Test Exec 3',
'high_comp_officer3_amount': '3',
'high_comp_officer4_full_na': 'Test Exec 4',
'high_comp_officer4_amount': '4',
'high_comp_officer5_full_na': 'Test Exec 5',
'high_comp_officer5_amount': '5'
},
'000000002': {
'awardee_or_recipient_uniqu': '000000002',
'uei': 'B2',
'legal_business_name': 'Legal Name 2',
'dba_name': 'Name 2',
'entity_structure': '2B',
'ultimate_parent_unique_ide': '999999998',
'ultimate_parent_uei': 'Y8',
'ultimate_parent_legal_enti': 'Parent Legal Name 2',
'address_line_1': 'Other Test address 1',
'address_line_2': 'Other Test address 2',
'city': 'Other Test city',
'state': 'Other Test state',
'zip': 'Other Test zip',
'zip4': 'Other Test zip4',
'country_code': 'Other Test country',
'congressional_district': 'Other Test congressional district',
'business_types_codes': [['D', 'E', 'F']],
'business_types': [['Name D', 'Name E', 'Name F']],
'high_comp_officer1_full_na': 'Test Other Exec 6',
'high_comp_officer1_amount': '6',
'high_comp_officer2_full_na': 'Test Other Exec 7',
'high_comp_officer2_amount': '7',
'high_comp_officer3_full_na': 'Test Other Exec 8',
'high_comp_officer3_amount': '8',
'high_comp_officer4_full_na': 'Test Other Exec 9',
'high_comp_officer4_amount': '9',
'high_comp_officer5_full_na': 'Test Other Exec 10',
'high_comp_officer5_amount': '10'
}
}
for duns in duns_list:
if duns in duns_mappings:
results = results.append(pd.DataFrame(duns_mappings[duns]), sort=True)
return results
def test_update_duns_props(monkeypatch):
""" Testing updating the duns props with both populated/blank data """
monkeypatch.setattr('dataactcore.utils.duns.get_duns_props_from_sam', mock_get_duns_props_from_sam)
duns_df = pd.DataFrame.from_dict({
'awardee_or_recipient_uniqu': ['000000001', '000000002', '000000003']
})
expected_df = pd.DataFrame.from_dict({
'awardee_or_recipient_uniqu': ['000000001', '000000002', '000000003'],
'uei': ['A1', 'B2', None],
'address_line_1': ['Test address 1', 'Other Test address 1', None],
'address_line_2': ['Test address 2', 'Other Test address 2', None],
'city': ['Test city', 'Other Test city', None],
'state': ['Test state', 'Other Test state', None],
'zip': ['Test zip', 'Other Test zip', None],
'zip4': ['Test zip4', 'Other Test zip4', None],
'country_code': ['Test country', 'Other Test country', None],
'congressional_district': ['Test congressional district', 'Other Test congressional district', None],
'business_types_codes': [['A', 'B', 'C'], ['D', 'E', 'F'], []],
'business_types': [['Name A', 'Name B', 'Name C'], ['Name D', 'Name E', 'Name F'], []],
'entity_structure': ['1A', '2B', None],
'dba_name': ['Name 1', 'Name 2', None],
'ultimate_parent_unique_ide': ['999999999', '999999998', None],
'ultimate_parent_uei': ['Z9', 'Y8', None],
'ultimate_parent_legal_enti': ['Parent Legal Name 1', 'Parent Legal Name 2', None],
'high_comp_officer1_full_na': ['Test Exec 1', 'Test Other Exec 6', None],
'high_comp_officer1_amount': ['1', '6', None],
'high_comp_officer2_full_na': ['Test Exec 1', 'Test Other Exec 7', None],
'high_comp_officer2_amount': ['2', '7', None],
'high_comp_officer3_full_na': ['Test Exec 1', 'Test Other Exec 8', None],
'high_comp_officer3_amount': ['3', '8', None],
'high_comp_officer4_full_na': ['Test Exec 1', 'Test Other Exec 9', None],
'high_comp_officer4_amount': ['4', '9', None],
'high_comp_officer5_full_na': ['Test Exec 1', 'Test Other Exec 10', None],
'high_comp_officer5_amount': ['5', '10', None]
})
assert expected_df.sort_index(inplace=True) == update_historical_duns.update_duns_props(duns_df)\
.sort_index(inplace=True)
def test_update_duns_props_empty(monkeypatch):
""" Special case where no data is returned """
monkeypatch.setattr('dataactcore.utils.duns.get_duns_props_from_sam', mock_get_duns_props_from_sam)
duns_df = pd.DataFrame.from_dict({
'awardee_or_recipient_uniqu': ['000000003']
})
expected_df = pd.DataFrame.from_dict({
'awardee_or_recipient_uniqu': ['000000003'],
'uei': [None],
'address_line_1': [None],
'address_line_2': [None],
'city': [None],
'state': [None],
'zip': [None],
'zip4': [None],
'country_code': [None],
'congressional_district': [None],
'business_types_codes': [[]],
'business_types': [[]],
'dba_name': [None],
'entity_structure': [None],
'ultimate_parent_unique_ide': [None],
'ultimate_parent_uei': [None],
'ultimate_parent_legal_enti': [None],
'high_comp_officer1_full_na': [None],
'high_comp_officer1_amount': [None],
'high_comp_officer2_full_na': [None],
'high_comp_officer2_amount': [None],
'high_comp_officer3_full_na': [None],
'high_comp_officer3_amount': [None],
'high_comp_officer4_full_na': [None],
'high_comp_officer4_amount': [None],
'high_comp_officer5_full_na': [None],
'high_comp_officer5_amount': [None]
})
assert expected_df.to_dict() == update_historical_duns.update_duns_props(duns_df).to_dict()
def test_run_duns_batches(database, monkeypatch):
""" Test run_duns_batches for the core functionality """
monkeypatch.setattr('dataactcore.utils.duns.get_duns_props_from_sam', mock_get_duns_props_from_sam)
sess = database.session
all_duns = ['00000000{}'.format(x) for x in range(1, 5)]
existing_duns = all_duns[2:]
for duns in existing_duns:
sess.add(DUNS(awardee_or_recipient_uniqu=duns))
sess.commit()
duns_file = os.path.join(CONFIG_BROKER['path'], 'tests', 'unit', 'data', 'historic_DUNS_export_small.csv')
update_historical_duns.run_duns_batches(duns_file, sess, block_size=1)
expected_results = {
'000000001': {
'awardee_or_recipient_uniqu': '000000001',
'uei': 'A1',
'registration_date': '2004-04-01',
'expiration_date': '2013-01-11',
'last_sam_mod_date': '2013-01-11',
'activation_date': '2012-01-11',
'legal_business_name': 'TEST DUNS 1',
'address_line_1': 'Test address 1',
'address_line_2': 'Test address 2',
'city': 'Test city',
'state': 'Test state',
'zip': 'Test zip',
'zip4': 'Test zip4',
'country_code': 'Test country',
'congressional_district': 'Test congressional district',
'business_types_codes': ['A', 'B', 'C'],
'business_types': ['Name A', 'Name B', 'Name C'],
'dba_name': 'Name 1',
'entity_structure': '1A',
'ultimate_parent_unique_ide': '999999999',
'ultimate_parent_uei': 'Z9',
'ultimate_parent_legal_enti': 'Parent Legal Name 1',
'high_comp_officer1_full_na': 'Test Exec 1',
'high_comp_officer1_amount': '1',
'high_comp_officer2_full_na': 'Test Exec 2',
'high_comp_officer2_amount': '2',
'high_comp_officer3_full_na': 'Test Exec 3',
'high_comp_officer3_amount': '3',
'high_comp_officer4_full_na': 'Test Exec 4',
'high_comp_officer4_amount': '4',
'high_comp_officer5_full_na': 'Test Exec 5',
'high_comp_officer5_amount': '5'
},
'000000002': {
'awardee_or_recipient_uniqu': '000000002',
'uei': 'B2',
'registration_date': '2004-04-02',
'expiration_date': '2013-01-12',
'last_sam_mod_date': '2013-01-12',
'activation_date': '2012-01-12',
'legal_business_name': 'TEST DUNS 2',
'address_line_1': 'Other Test address 1',
'address_line_2': 'Other Test address 2',
'city': 'Other Test city',
'state': 'Other Test state',
'zip': 'Other Test zip',
'zip4': 'Other Test zip4',
'country_code': 'Other Test country',
'congressional_district': 'Other Test congressional district',
'business_types_codes': ['D', 'E', 'F'],
'business_types': ['Name D', 'Name E', 'Name F'],
'dba_name': 'Name 2',
'entity_structure': '2B',
'ultimate_parent_unique_ide': '999999998',
'ultimate_parent_uei': 'Y8',
'ultimate_parent_legal_enti': 'Parent Legal Name 2',
'high_comp_officer1_full_na': 'Test Other Exec 6',
'high_comp_officer1_amount': '6',
'high_comp_officer2_full_na': 'Test Other Exec 7',
'high_comp_officer2_amount': '7',
'high_comp_officer3_full_na': 'Test Other Exec 8',
'high_comp_officer3_amount': '8',
'high_comp_officer4_full_na': 'Test Other Exec 9',
'high_comp_officer4_amount': '9',
'high_comp_officer5_full_na': 'Test Other Exec 10',
'high_comp_officer5_amount': '10'
}
}
results = {}
for duns_obj in sess.query(HistoricDUNS).all():
results[duns_obj.awardee_or_recipient_uniqu] = {
'awardee_or_recipient_uniqu': duns_obj.awardee_or_recipient_uniqu,
'uei': duns_obj.uei,
'registration_date': str(duns_obj.registration_date) if duns_obj.registration_date else None,
'expiration_date': str(duns_obj.expiration_date) if duns_obj.expiration_date else None,
'last_sam_mod_date': str(duns_obj.last_sam_mod_date) if duns_obj.last_sam_mod_date else None,
'activation_date': str(duns_obj.activation_date) if duns_obj.activation_date else None,
'legal_business_name': duns_obj.legal_business_name,
'address_line_1': duns_obj.address_line_1,
'address_line_2': duns_obj.address_line_2,
'city': duns_obj.city,
'state': duns_obj.state,
'zip': duns_obj.zip,
'zip4': duns_obj.zip4,
'country_code': duns_obj.country_code,
'congressional_district': duns_obj.congressional_district,
'business_types_codes': duns_obj.business_types_codes,
'business_types': duns_obj.business_types,
'dba_name': duns_obj.dba_name,
'entity_structure': duns_obj.entity_structure,
'ultimate_parent_unique_ide': duns_obj.ultimate_parent_unique_ide,
'ultimate_parent_uei': duns_obj.ultimate_parent_uei,
'ultimate_parent_legal_enti': duns_obj.ultimate_parent_legal_enti,
'high_comp_officer1_full_na': duns_obj.high_comp_officer1_full_na,
'high_comp_officer1_amount': duns_obj.high_comp_officer1_amount,
'high_comp_officer2_full_na': duns_obj.high_comp_officer2_full_na,
'high_comp_officer2_amount': duns_obj.high_comp_officer2_amount,
'high_comp_officer3_full_na': duns_obj.high_comp_officer3_full_na,
'high_comp_officer3_amount': duns_obj.high_comp_officer3_amount,
'high_comp_officer4_full_na': duns_obj.high_comp_officer4_full_na,
'high_comp_officer4_amount': duns_obj.high_comp_officer4_amount,
'high_comp_officer5_full_na': duns_obj.high_comp_officer5_full_na,
'high_comp_officer5_amount': duns_obj.high_comp_officer5_amount
}
assert results == expected_results
def test_workflows(database, monkeypatch):
""" Test both scenarios of the script, starting with a full run """
monkeypatch.setattr('dataactcore.utils.duns.get_duns_props_from_sam', mock_get_duns_props_from_sam)
sess = database.session
all_duns = ['00000000{}'.format(x) for x in range(1, 5)]
existing_duns = all_duns[2:]
for duns in existing_duns:
sess.add(DUNS(awardee_or_recipient_uniqu=duns))
sess.commit()
duns_file = os.path.join(CONFIG_BROKER['path'], 'tests', 'unit', 'data', 'historic_DUNS_export_small.csv')
update_historical_duns.run_duns_batches(duns_file, sess, block_size=1)
update_historical_duns.import_historic_duns(sess)
expected_results = {
'000000001': {
'awardee_or_recipient_uniqu': '000000001',
'uei': 'A1',
'registration_date': '2004-04-01',
'expiration_date': '2013-01-11',
'last_sam_mod_date': '2013-01-11',
'activation_date': '2012-01-11',
'legal_business_name': 'TEST DUNS 1',
'address_line_1': 'Test address 1',
'address_line_2': 'Test address 2',
'city': 'Test city',
'state': 'Test state',
'zip': 'Test zip',
'zip4': 'Test zip4',
'country_code': 'Test country',
'congressional_district': 'Test congressional district',
'business_types_codes': ['A', 'B', 'C'],
'business_types': ['Name A', 'Name B', 'Name C'],
'dba_name': 'Name 1',
'entity_structure': '1A',
'ultimate_parent_unique_ide': '999999999',
'ultimate_parent_uei': 'Z9',
'ultimate_parent_legal_enti': 'Parent Legal Name 1',
'high_comp_officer1_full_na': 'Test Exec 1',
'high_comp_officer1_amount': '1',
'high_comp_officer2_full_na': 'Test Exec 2',
'high_comp_officer2_amount': '2',
'high_comp_officer3_full_na': 'Test Exec 3',
'high_comp_officer3_amount': '3',
'high_comp_officer4_full_na': 'Test Exec 4',
'high_comp_officer4_amount': '4',
'high_comp_officer5_full_na': 'Test Exec 5',
'high_comp_officer5_amount': '5'
},
'000000002': {
'awardee_or_recipient_uniqu': '000000002',
'uei': 'B2',
'registration_date': '2004-04-02',
'expiration_date': '2013-01-12',
'last_sam_mod_date': '2013-01-12',
'activation_date': '2012-01-12',
'legal_business_name': 'TEST DUNS 2',
'address_line_1': 'Other Test address 1',
'address_line_2': 'Other Test address 2',
'city': 'Other Test city',
'state': 'Other Test state',
'zip': 'Other Test zip',
'zip4': 'Other Test zip4',
'country_code': 'Other Test country',
'congressional_district': 'Other Test congressional district',
'business_types_codes': ['D', 'E', 'F'],
'business_types': ['Name D', 'Name E', 'Name F'],
'dba_name': 'Name 2',
'entity_structure': '2B',
'ultimate_parent_unique_ide': '999999998',
'ultimate_parent_uei': 'Y8',
'ultimate_parent_legal_enti': 'Parent Legal Name 2',
'high_comp_officer1_full_na': 'Test Other Exec 6',
'high_comp_officer1_amount': '6',
'high_comp_officer2_full_na': 'Test Other Exec 7',
'high_comp_officer2_amount': '7',
'high_comp_officer3_full_na': 'Test Other Exec 8',
'high_comp_officer3_amount': '8',
'high_comp_officer4_full_na': 'Test Other Exec 9',
'high_comp_officer4_amount': '9',
'high_comp_officer5_full_na': 'Test Other Exec 10',
'high_comp_officer5_amount': '10'
},
'000000003': {
'awardee_or_recipient_uniqu': '000000003',
'uei': None,
'registration_date': None,
'expiration_date': None,
'last_sam_mod_date': None,
'activation_date': None,
'legal_business_name': None,
'address_line_1': None,
'address_line_2': None,
'city': None,
'state': None,
'zip': None,
'zip4': None,
'country_code': None,
'congressional_district': None,
'business_types_codes': None,
'business_types': None,
'dba_name': None,
'entity_structure': None,
'ultimate_parent_unique_ide': None,
'ultimate_parent_uei': None,
'ultimate_parent_legal_enti': None,
'high_comp_officer1_full_na': None,
'high_comp_officer1_amount': None,
'high_comp_officer2_full_na': None,
'high_comp_officer2_amount': None,
'high_comp_officer3_full_na': None,
'high_comp_officer3_amount': None,
'high_comp_officer4_full_na': None,
'high_comp_officer4_amount': None,
'high_comp_officer5_full_na': None,
'high_comp_officer5_amount': None
},
'000000004': {
'awardee_or_recipient_uniqu': '000000004',
'uei': None,
'registration_date': None,
'expiration_date': None,
'last_sam_mod_date': None,
'activation_date': None,
'legal_business_name': None,
'address_line_1': None,
'address_line_2': None,
'city': None,
'state': None,
'zip': None,
'zip4': None,
'country_code': None,
'congressional_district': None,
'business_types_codes': None,
'business_types': None,
'dba_name': None,
'entity_structure': None,
'ultimate_parent_unique_ide': None,
'ultimate_parent_uei': None,
'ultimate_parent_legal_enti': None,
'high_comp_officer1_full_na': None,
'high_comp_officer1_amount': None,
'high_comp_officer2_full_na': None,
'high_comp_officer2_amount': None,
'high_comp_officer3_full_na': None,
'high_comp_officer3_amount': None,
'high_comp_officer4_full_na': None,
'high_comp_officer4_amount': None,
'high_comp_officer5_full_na': None,
'high_comp_officer5_amount': None
}
}
results = {}
for duns_obj in sess.query(DUNS).all():
results[duns_obj.awardee_or_recipient_uniqu] = {
'awardee_or_recipient_uniqu': duns_obj.awardee_or_recipient_uniqu,
'uei': duns_obj.uei,
'registration_date': str(duns_obj.registration_date) if duns_obj.registration_date else None,
'expiration_date': str(duns_obj.expiration_date) if duns_obj.expiration_date else None,
'last_sam_mod_date': str(duns_obj.last_sam_mod_date) if duns_obj.last_sam_mod_date else None,
'activation_date': str(duns_obj.activation_date) if duns_obj.activation_date else None,
'legal_business_name': duns_obj.legal_business_name,
'address_line_1': duns_obj.address_line_1,
'address_line_2': duns_obj.address_line_2,
'city': duns_obj.city,
'state': duns_obj.state,
'zip': duns_obj.zip,
'zip4': duns_obj.zip4,
'country_code': duns_obj.country_code,
'congressional_district': duns_obj.congressional_district,
'business_types_codes': duns_obj.business_types_codes,
'business_types': duns_obj.business_types,
'dba_name': duns_obj.dba_name,
'entity_structure': duns_obj.entity_structure,
'ultimate_parent_unique_ide': duns_obj.ultimate_parent_unique_ide,
'ultimate_parent_uei': duns_obj.ultimate_parent_uei,
'ultimate_parent_legal_enti': duns_obj.ultimate_parent_legal_enti,
'high_comp_officer1_full_na': duns_obj.high_comp_officer1_full_na,
'high_comp_officer1_amount': duns_obj.high_comp_officer1_amount,
'high_comp_officer2_full_na': duns_obj.high_comp_officer2_full_na,
'high_comp_officer2_amount': duns_obj.high_comp_officer2_amount,
'high_comp_officer3_full_na': duns_obj.high_comp_officer3_full_na,
'high_comp_officer3_amount': duns_obj.high_comp_officer3_amount,
'high_comp_officer4_full_na': duns_obj.high_comp_officer4_full_na,
'high_comp_officer4_amount': duns_obj.high_comp_officer4_amount,
'high_comp_officer5_full_na': duns_obj.high_comp_officer5_full_na,
'high_comp_officer5_amount': duns_obj.high_comp_officer5_amount
}
assert results == expected_results
# Test to see if truncating the DUNS table while keeping the historic reuploads the historic values
sess.query(DUNS).filter(DUNS.historic.is_(True)).delete(synchronize_session=False)
# Make sure all the historic DUNS are removed from the DUNS table
assert sess.query(DUNS).filter(DUNS.historic.is_(True)).all() == []
# Redo script but don't go through run_duns_batches
update_historical_duns.clean_historic_duns(sess)
update_historical_duns.import_historic_duns(sess)
results = {}
for duns_obj in sess.query(DUNS).all():
results[duns_obj.awardee_or_recipient_uniqu] = {
'awardee_or_recipient_uniqu': duns_obj.awardee_or_recipient_uniqu,
'uei': duns_obj.uei,
'registration_date': str(duns_obj.registration_date) if duns_obj.registration_date else None,
'expiration_date': str(duns_obj.expiration_date) if duns_obj.expiration_date else None,
'last_sam_mod_date': str(duns_obj.last_sam_mod_date) if duns_obj.last_sam_mod_date else None,
'activation_date': str(duns_obj.activation_date) if duns_obj.activation_date else None,
'legal_business_name': duns_obj.legal_business_name,
'address_line_1': duns_obj.address_line_1,
'address_line_2': duns_obj.address_line_2,
'city': duns_obj.city,
'state': duns_obj.state,
'zip': duns_obj.zip,
'zip4': duns_obj.zip4,
'country_code': duns_obj.country_code,
'congressional_district': duns_obj.congressional_district,
'business_types_codes': duns_obj.business_types_codes,
'business_types': duns_obj.business_types,
'dba_name': duns_obj.dba_name,
'entity_structure': duns_obj.entity_structure,
'ultimate_parent_unique_ide': duns_obj.ultimate_parent_unique_ide,
'ultimate_parent_uei': duns_obj.ultimate_parent_uei,
'ultimate_parent_legal_enti': duns_obj.ultimate_parent_legal_enti,
'high_comp_officer1_full_na': duns_obj.high_comp_officer1_full_na,
'high_comp_officer1_amount': duns_obj.high_comp_officer1_amount,
'high_comp_officer2_full_na': duns_obj.high_comp_officer2_full_na,
'high_comp_officer2_amount': duns_obj.high_comp_officer2_amount,
'high_comp_officer3_full_na': duns_obj.high_comp_officer3_full_na,
'high_comp_officer3_amount': duns_obj.high_comp_officer3_amount,
'high_comp_officer4_full_na': duns_obj.high_comp_officer4_full_na,
'high_comp_officer4_amount': duns_obj.high_comp_officer4_amount,
'high_comp_officer5_full_na': duns_obj.high_comp_officer5_full_na,
'high_comp_officer5_amount': duns_obj.high_comp_officer5_amount
}
assert results == expected_results
def test_clean_historic_duns(database, monkeypatch):
"""
Test to make sure if a new DUNS is loaded and we reload historic DUNS (skipping the major load),
we should remove the historic equivalents.
"""
monkeypatch.setattr('dataactcore.utils.duns.get_duns_props_from_sam', mock_get_duns_props_from_sam)
sess = database.session
all_duns = ['00000000{}'.format(x) for x in range(1, 5)]
existing_duns = all_duns[2:]
for duns in existing_duns:
sess.add(DUNS(awardee_or_recipient_uniqu=duns))
sess.commit()
duns_file = os.path.join(CONFIG_BROKER['path'], 'tests', 'unit', 'data', 'historic_DUNS_export_small.csv')
# normal run
update_historical_duns.run_duns_batches(duns_file, sess, block_size=1)
update_historical_duns.import_historic_duns(sess)
# update old DUNS as part of load_duns_exec_comp.py
updated_duns = sess.query(DUNS).filter(DUNS.awardee_or_recipient_uniqu == '000000002').one()
updated_duns.historic = False
sess.commit()
# rerun with a skip
update_historical_duns.clean_historic_duns(sess)
update_historical_duns.import_historic_duns(sess)
# check to see if historic duns equivalent is removed
expected_count = sess.query(HistoricDUNS).filter(HistoricDUNS.awardee_or_recipient_uniqu == '000000002').count()
assert expected_count == 0
``` |
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\n# Generated by Django 2.2.10 on 2021-03-03 17:08\n\nfrom django.conf import settings\nfrom django.db import (\n migrations,\n models,\n)\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n depende... | [
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\n# Generated by Django 2.2.10 on 2021-03-03 17:08\n\nfrom django.conf import settings\nfrom django.db import (\n migrations,\n models,\n)\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration)... | ```python
# Generated by Django 2.2.10 on 2021-03-03 17:08
from django.conf import settings
from django.db import (
migrations,
models,
)
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('accelerator', '0035_add_deferrable_modal_model'),
]
operations = [
migrations.CreateModel(
name='UserDeferrableModal',
fields=[
('id', models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID')),
('created_at', models.DateTimeField(
auto_now_add=True,
null=True)),
('updated_at', models.DateTimeField(
auto_now=True,
null=True)),
('is_deferred', models.BooleanField(default=False)),
('deferred_to', models.DateTimeField(
blank=True,
null=True)),
('deferrable_modal', models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.ACCELERATOR_DEFERRABLEMODAL_MODEL)),
('user', models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'User Deferrable Modal',
'abstract': False,
'managed': True,
'swappable': None,
},
),
]
``` |
[
{
"content": "Here is the source code:\n```python\n# Logging: easier workflow with some utility methods to log and handle errors\nfrom collections import defaultdict, namedtuple\nfrom enum import Enum\nimport bpy\nimport os\nimport sys\nimport traceback\n\n\nclass LogLevel(Enum):\n DEBUG = 'debug'\n INFO ... | [
{
"content": "Here is the source code:\n<|memory_start|>```python\n# Logging: easier workflow with some utility methods to log and handle errors\nfrom collections import defaultdict, namedtuple\nfrom enum import Enum\nimport bpy\nimport os\nimport sys\nimport traceback\n\n\nclass LogLevel(Enum):\n DEBUG = 'd... | ```python
# Logging: easier workflow with some utility methods to log and handle errors
from collections import defaultdict, namedtuple
from enum import Enum
import bpy
import os
import sys
import traceback
class LogLevel(Enum):
DEBUG = 'debug'
INFO = 'info'
WARNING = 'warning'
ERROR = 'error'
FATAL = 'fatal'
def is_fatal(self):
return self == LogLevel.ERROR or self == LogLevel.FATAL
def get_bl_report_level(self):
if self == LogLevel.DEBUG:
return {'DEBUG'}
if self == LogLevel.INFO:
return {'INFO'}
if self == LogLevel.WARNING:
return {'WARNING'}
if self == LogLevel.ERROR:
return {'ERROR_INVALID_INPUT'}
if self == LogLevel.FATAL:
return {'ERROR'}
ReportItem = namedtuple('ReportItem', 'message etype value traceback')
class Report(object):
_reports = None
def __init__(self):
self._reports = defaultdict(list)
def extend(self, other):
"""Append another report to this one
"""
for level in other._reports:
self._reports[level].extend(other._reports[level])
def append(self, message, level=LogLevel.INFO, cause=None):
if cause is None:
cause = sys.exc_info()
etype, val, trace = cause
self._reports[level].append(ReportItem(message, etype, val, trace))
return self
def get_items(self, level):
return self._reports[level]
def contains_fatal(self):
for level in self._reports:
if level.is_fatal() and self._reports[level]:
return True
return False
def print_report(self, op):
for level in self._reports:
op_level = level.get_bl_report_level()
for item in self.get_items(level):
op.report(op_level, str(item.message))
if level.is_fatal():
formatted = traceback.format_exception(
item.etype, item.value, item.traceback)
op.report(op_level, ''.join(formatted))
class ReportedError(RuntimeError):
"""Thrown when a Reporter fails to run. That is an error or a fatal exception occured during
running it.
"""
report = None
_target = None
def __init__(self, message, target=None):
super(ReportedError, self).__init__(message)
self.report = Report()
self._target = target
def is_aimed_at(self, candidate):
return self._target is None or self._target is candidate
@classmethod
def throw_from_exception(cls, reporter, level=LogLevel.ERROR, exc=None):
"""Constructs a ReportedError from the current exception handling context.
"""
if exc is None:
exc = sys.exc_info()
_, exc_value, _ = exc
message = "An error occured: " + str(exc_value)
reported = cls(message, target=reporter)
reported.report.append(message, level=level, cause=exc)
raise reported from exc_value
def static_access(func):
"""Provides static access to member functions by calling the function with self set to None
"""
import functools
class Functor(object):
def __get__(self, instance, owner):
# DON'T CHECK FOR instance is None, unlike functions, which then
# return themselves
return functools.partial(func, instance)
return Functor()
class Reporter(object):
"""Via this class one can make reports of a process. That is return warnings, errors and
fatal exceptions
"""
_stack = []
_report = None
_caught = None
_engaged = None
_bl_op = None
def __init__(self, caught_types=(Exception,), reported_to=None):
"""@param caught_types: A repeatable-iterable containing classinfos that will be used to check if an exception of type exc_t
should be caught or not. A caught exception will be logged as LogLevel.ERROR and not passed onwards. Note that
each entry of caught_types can be either a class or a tuple of classes and will be checked via issubclass(exc_t, entry).
Note that this does not change how an ReportedError is handled. They are reported if they
belong to this reporter.
"""
self._report = Report()
self._caught = caught_types
self._engaged = False
self._bl_op = reported_to
# The following will check that the given caught_types is indeed legal
# by performing a dummy check
self._should_catch(type(None))
def _should_catch(self, exc_type):
return any(issubclass(exc_type, ct) for ct in self._caught)
def __enter__(self):
if self._engaged:
raise RuntimeError("No multi-entry into a reporter allowed")
self._engaged = True
Reporter._stack.append(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
try:
exc = (exc_type, exc_value, traceback)
if exc_value is None:
# Completed normally, yay
return False
if isinstance(exc_value, ReportedError):
# Allows for nesting of multiple reporters
if exc_value.is_aimed_at(self):
self._report.extend(exc_value.report)
return True # Catch it, was ours
else:
exc_value.report.extend(self._report)
return False # Pass it on, to another reporter
if self._should_catch(exc_type):
self._report.append(exc_value, level=LogLevel.ERROR, cause=exc)
return True
return False
finally:
self._engaged = False
if self._bl_op is not None:
self.print_report(self._bl_op)
assert(Reporter._stack.pop() is self)
def rebind_bl_op(self, op):
"""Binds a Blender op that will be reported to when this Reporter __exit__s
"""
self._bl_op = op
@classmethod
def _get_reporter(cls, proposed):
if proposed is not None:
return proposed
if not cls._stack:
return None
return cls._stack[-1]
@static_access
def warning(self, message, *args, **wargs):
"""When something happened that can be recovered from but isn't
conformant never-the-less
"""
self = Reporter._get_reporter(self)
if self is None:
return
formatted = message.format(*args, **wargs)
self._report.append(formatted, level=LogLevel.WARNING)
@static_access
def info(self, message, *args, **wargs):
"""A useful information for the user
"""
self = Reporter._get_reporter(self)
if self is None:
return
formatted = message.format(*args, **wargs)
self._report.append(formatted, level=LogLevel.INFO)
@static_access
def debug(self, message, *args, **wargs):
"""Debug output, only output during debug mode
"""
self = Reporter._get_reporter(self)
if self is None:
return
formatted = message.format(*args, **wargs)
self._report.append(formatted, level=LogLevel.DEBUG)
@static_access
def error(self, message, *args, cause=None, **wargs):
"""When something happened that can't conform with the specification.
Aka: the user's fault
"""
if self is not None and not self._engaged:
raise RuntimeError(
"Can't file an error without __enter__'ing this Reporter")
formatted = message.format(*args, **wargs)
try:
raise RuntimeError(formatted) from cause
except RuntimeError:
ReportedError.throw_from_exception(self, level=LogLevel.FATAL)
@static_access
def fatal(self, message, *args, cause=None, **wargs):
"""
When something happened that really shouldn't happen.
Aka: my fault
"""
if self is not None and not self._engaged:
raise RuntimeError(
"Can't file an error without __enter__'ing this Reporter")
formatted = message.format(*args, **wargs)
message = "This should not have happened. Report to WorldSEnder:\n{mess}".format(
mess=formatted)
try:
raise RuntimeError(message) from cause
except RuntimeError:
ReportedError.throw_from_exception(self, level=LogLevel.FATAL)
def print_report(self, op):
self._report.print_report(op)
def was_success(self):
return not self._report.contains_fatal()
def extract_safe(collection, key, mess_on_fail, *args, on_fail=Reporter.error, **wargs):
"""Ensures that the item is in the collection by reporting an error with
the specified message if not.
Calls on_fail when it fails to extract the element with the formatted message and
the keyword argument 'cause' set to the KeyError that caused it to fail
@param collection: the collection to search in
@param key: the key to search for
@param mess_on_fail: a message that will get formatted and handed to on_fail
@param on_fail: called when the key is not found in the collection as on_fail(formatted_message, cause=e)
where e is the KeyError thrown by the collection. The result of this function is returned instead
@param args: formatting arguments
@param wargs: additional formatting keyword-arguments. Can not be 'coll' or 'item', those will be
provided by default as the collection and the searched key
@returns the item in the collection for the specified key or the result of on_fail if a KeyError is
raised by collection[key]
"""
try:
return collection[key]
except KeyError as e:
return on_fail(mess_on_fail.format(*args, coll=collection, item=key, **wargs), cause=e)
def to_valid_loc(assetstr):
'''Replaces all non Java Characters with '_' to form a valid package/class name
@see also http://docs.oracle.com/javase/specs/jls/se8/html/jls-3.html#jls-IdentifierChars
'''
# TODO: replace all nonconforming characters with '_' (UNDERSCORE)
#assetstr = '_'.join(re.split(r'[[\x00-\x40\x5b-\x60]--[{pathsep}]]'.format(pathsep=os.path.sep), assetstr))
# if re.match(r'^[0-9]') is not None:
# assetstr = '_'+assetstr
# return assetstr
# ^ that requires Regex Set Operations
return assetstr.replace(' ', '_')
def asset_to_dir(assetstr):
"""Translates and minecraft asset string to a filesystem path. If the path is non conformant, an
error is reported
"""
if not assetstr:
Reporter.error("Asset-String can't be empty")
*resourceDomains, resourcePath = assetstr.split(':')
if not resourceDomains:
resourceDomains = ["minecraft"]
if len(resourceDomains) > 1:
Reporter.error(
"Asset-String {loc} can't contain more than one ':'".format(loc=assetstr))
domain = resourceDomains[0].lower()
path = resourcePath.lower()
if not domain or not path:
Reporter.error(
"Asset-String {loc}: Splitted string mustn't be empty".format(loc=assetstr))
return "assets/{mod}/{file}".format(mod=domain, file=path)
def openw_save(filepath, flags, *args, **wargs):
"""
Ensures that the directory for the filepath exists and creates it if
necessary. Returns a file_handle to the open stream by calling
open(filepath, flags, *args, **wargs)
"""
filepath = bpy.path.abspath(filepath)
directory = os.path.dirname(filepath)
if not os.path.exists(directory):
os.makedirs(directory)
return open(filepath, flags, *args, **wargs)
``` |
[
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n```python\n#!/usr/bin/env python\n#! -*- coding: utf-8 -*-\n\n###\n# Copyright (c) Rice University 2012-13\n# This software is subject to\n# the provisions of the GNU Affero General\n# Public License version 3 (AGPLv3).\n# ... | [
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n<|memory_start|>```python\n#!/usr/bin/env python\n#! -*- coding: utf-8 -*-\n\n###\n# Copyright (c) Rice University 2012-13\n# This software is subject to\n# the provisions of the GNU Affero General\n# Public License version... | ```python
#!/usr/bin/env python
#! -*- coding: utf-8 -*-
###
# Copyright (c) Rice University 2012-13
# This software is subject to
# the provisions of the GNU Affero General
# Public License version 3 (AGPLv3).
# See LICENCE.txt for details.
###
"""Rhaptos user profile web application
The application is initialized using the application factory (`make_app`).
To acquire the application from anywhere in this package or extra packages,
use the `get_app` function.
Author: Paul Brian, Michael Mulich
Copyright (c) 2012 Rice University
This software is subject to the provisions of the GNU Lesser General
Public License Version 2.1 (LGPL). See LICENSE.txt for details.
"""
import os
import sys
import datetime
import md5
import random
import statsd
import json
import logging
import uuid
import flask # XXX Why is this imported twice (see 2 lines down)?
from functools import wraps
from flask import (
Flask, render_template,
request, g, session, flash,
redirect, url_for, abort,
)
import conf, log, err
from rhaptos2.user import backend
import pkg_resources # part of setuptools
__version__ = pkg_resources.require("rhaptos2.user")[0].version
APPTYPE = 'rhaptos2user'
VERSION = __version__
_app = None
def get_app():
"""Get the application object"""
global _app
return _app
def set_app(app):
"""Set the global application object"""
global _app
_app = app
return _app
def make_app(config):
"""Application factory"""
app = Flask(__name__)
app.config.update(config)
set_up_logging(app)
# Set the application
app = set_app(app)
print config
backend.initdb(config)
# Initialize the views
from rhaptos2.user import views
return app
def dolog(lvl, msg, caller=None, statsd=None):
"""wrapper function purely for adding context to log stmts
I am trying to keep this simple, no parsing of the stack etc.
caller is the function passed when the dolog func is called. We jsut grab its name
extras is likely to hold a list of strings that are the buckets
>>> dolog("ERROR", "whoops", os.path.isdir, ['a.b.c',])
"""
lvls = {
"CRITICAL" : 50,
"ERROR" : 40,
"WARNING" : 30,
"INFO" : 20,
"DEBUG" : 10,
"NOTSET" : 0
}
try:
goodlvl = lvls[lvl]
except:
goodlvl = 20 ###!!!
#create an extras dict, that holds curreent user, request and action notes
if caller:
calledby = "rhaptos2.loggedin." + str(caller.__name__)
else:
calledby = "rhaptos2.loggedin.unknown"
if statsd:
statsd.append(calledby)
else:
statsd = [calledby,]
try:
request_id = g.request_id
except:
request_id = "no_request_id"
try:
user_id = g.user_id
except:
user_id = "no_user_id"
extra = {'statsd': statsd,
'user_id': user_id,
'request_id': request_id}
try:
_app.logger.log(goodlvl, msg, extra=extra)
except Exception, e:
print extra, msg, e
def set_up_logging(app):
"""Set up the logging within the application.
useage::
logger.warn("Help",
extra={'statsd': ['rhaptos2.repo.module',
'bamboo.foo.bar']})
"""
config = app.config
# Define the logging handlers
statsd_host = config['globals']['bamboo_global']['statsd_host']
statsd_port = config['globals']['bamboo_global']['statsd_port']
statsd_handler = log.StatsdHandler(statsd_host, statsd_port)
stream_handler = logging.StreamHandler()
# Define the log formatting. Reduced this as bug #39 prevents
# extra being used.
# formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s "
# "- %(request_id)s - %(user_id)s "
# "- %(message)s")
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s "
"- %(message)s")
statsd_handler.setFormatter(formatter)
stream_handler.setFormatter(formatter)
# Set the handlers on the application.
for handler in (statsd_handler, stream_handler,):
app.logger.addHandler(handler)
``` |
[
{
"content": "Reproduce the code exactly as provided (keep formatting):\n```python\nimport abc\nfrom hashlib import sha1\n\n\nclass Key(metaclass=abc.ABCMeta):\n \"\"\"\n Interface for a public or private key.\n \"\"\"\n\n @abc.abstractmethod\n def pub(self):\n pass\n\n @abc.abstractmet... | [
{
"content": "Reproduce the code exactly as provided (keep formatting):\n<|memory_start|>```python\nimport abc\nfrom hashlib import sha1\n\n\nclass Key(metaclass=abc.ABCMeta):\n \"\"\"\n Interface for a public or private key.\n \"\"\"\n\n @abc.abstractmethod\n def pub(self):\n pass\n\n ... | ```python
import abc
from hashlib import sha1
class Key(metaclass=abc.ABCMeta):
"""
Interface for a public or private key.
"""
@abc.abstractmethod
def pub(self):
pass
@abc.abstractmethod
def has_secret_key(self):
pass
@abc.abstractmethod
def key_to_bin(self):
pass
def key_to_hash(self):
if self.has_secret_key():
return sha1(self.pub().key_to_bin()).digest()
return sha1(self.key_to_bin()).digest()
class PrivateKey(Key, metaclass=abc.ABCMeta):
"""
Interface for a private key.
"""
def has_secret_key(self):
return True
@abc.abstractmethod
def signature(self, msg):
pass
class PublicKey(Key, metaclass=abc.ABCMeta):
"""
Interface for a public key.
"""
def pub(self):
return self
def has_secret_key(self):
return False
@abc.abstractmethod
def verify(self, signature, msg):
pass
@abc.abstractmethod
def get_signature_length(self):
pass
``` |
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\n#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2020, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with e... | [
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\n#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2020, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# ... | ```python
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
"""
Import hook for PyGObject's "gi.repository.GdkPixbuf" package.
"""
import glob
import os
import subprocess
from PyInstaller.config import CONF
from PyInstaller.compat import (
exec_command_stdout, is_darwin, is_win, is_linux, open_file, which)
from PyInstaller.utils.hooks import (
collect_glib_translations, get_gi_typelibs, get_gi_libdir, logger)
loaders_path = os.path.join('gdk-pixbuf-2.0', '2.10.0', 'loaders')
destpath = "lib/gdk-pixbuf-2.0/2.10.0/loaders"
cachedest = "lib/gdk-pixbuf-2.0/2.10.0"
# If the "gdk-pixbuf-query-loaders" command is not in the current ${PATH}, or
# is not in the GI lib path, GDK and thus GdkPixbuf is unavailable. Return with
# a non-fatal warning.
gdk_pixbuf_query_loaders = None
try:
libdir = get_gi_libdir('GdkPixbuf', '2.0')
except ValueError:
logger.warning(
'"hook-gi.repository.GdkPixbuf" ignored, '
'since GdkPixbuf library not found'
)
libdir = None
if libdir:
# Distributions either package gdk-pixbuf-query-loaders in the GI libs
# directory (not on the path), or on the path with or without a -x64 suffix
# depending on the architecture
cmds = [
os.path.join(libdir, 'gdk-pixbuf-2.0/gdk-pixbuf-query-loaders'),
'gdk-pixbuf-query-loaders-64',
'gdk-pixbuf-query-loaders',
]
for cmd in cmds:
gdk_pixbuf_query_loaders = which(cmd)
if gdk_pixbuf_query_loaders is not None:
break
if gdk_pixbuf_query_loaders is None:
logger.warning(
'"hook-gi.repository.GdkPixbuf" ignored, since '
'"gdk-pixbuf-query-loaders" is not in $PATH or gi lib dir.'
)
# Else, GDK is available. Let's do this.
else:
binaries, datas, hiddenimports = get_gi_typelibs('GdkPixbuf', '2.0')
datas += collect_glib_translations('gdk-pixbuf')
# To add support for a new platform, add a new "elif" branch below with
# the proper is_<platform>() test and glob for finding loaders on that
# platform.
if is_win:
ext = "*.dll"
elif is_darwin or is_linux:
ext = "*.so"
# If loader detection is supported on this platform, bundle all
# detected loaders and an updated loader cache.
if ext:
loader_libs = []
# Bundle all found loaders with this user application.
pattern = os.path.join(libdir, loaders_path, ext)
for f in glob.glob(pattern):
binaries.append((f, destpath))
loader_libs.append(f)
# Sometimes the loaders are stored in a different directory from
# the library (msys2)
if not loader_libs:
pattern = os.path.join(libdir, '..', 'lib', loaders_path, ext)
for f in glob.glob(pattern):
binaries.append((f, destpath))
loader_libs.append(f)
# Filename of the loader cache to be written below.
cachefile = os.path.join(CONF['workpath'], 'loaders.cache')
# Run the "gdk-pixbuf-query-loaders" command and capture its
# standard output providing an updated loader cache; then write
# this output to the loader cache bundled with this frozen
# application.
#
# On OSX we use @executable_path to specify a path relative to the
# generated bundle. However, on non-Windows we need to rewrite the
# loader cache because it isn't relocatable by default. See
# https://bugzilla.gnome.org/show_bug.cgi?id=737523
#
# To make it easier to rewrite, we just always write
# @executable_path, since its significantly easier to find/replace
# at runtime. :)
#
# If we need to rewrite it...
if not is_win:
# To permit string munging, decode the encoded bytes output by
# this command (i.e., enable the "universal_newlines" option).
# Note that:
#
# * Under Python 2.7, "cachedata" will be a decoded "unicode"
# object. * Under Python 3.x, "cachedata" will be a decoded
# "str" object.
#
# On Fedora, the default loaders cache is /usr/lib64, but the
# libdir is actually /lib64. To get around this, we pass the
# path to the loader command, and it will create a cache with
# the right path.
cachedata = exec_command_stdout(gdk_pixbuf_query_loaders,
*loader_libs)
cd = []
prefix = '"' + os.path.join(libdir, 'gdk-pixbuf-2.0', '2.10.0')
plen = len(prefix)
# For each line in the updated loader cache...
for line in cachedata.splitlines():
if line.startswith('#'):
continue
if line.startswith(prefix):
line = '"@executable_path/' + cachedest + line[plen:]
cd.append(line)
# Rejoin these lines in a manner preserving this object's
# "unicode" type under Python 2.
cachedata = u'\n'.join(cd)
# Write the updated loader cache to this file.
with open_file(cachefile, 'w') as fp:
fp.write(cachedata)
# Else, GdkPixbuf will do the right thing on Windows, so no changes
# to the loader cache are required. For efficiency and reliability,
# this command's encoded byte output is written as is without being
# decoded.
else:
with open_file(cachefile, 'wb') as fp:
fp.write(subprocess.check_output(gdk_pixbuf_query_loaders))
# Bundle this loader cache with this frozen application.
datas.append((cachefile, cachedest))
# Else, loader detection is unsupported on this platform.
else:
logger.warning(
'GdkPixbuf loader bundling unsupported on your platform.'
)
``` |
[
{
"content": "Provide a verbatim copy of the code:\n```python\nimport mxnet as mx\nimport logging\n\n# data & preprocessing\ndata = mx.symbol.Variable('data')\n\n# 1st conv\nconv1 = mx.symbol.Convolution(data=data, kernel=(5, 5), num_filter=20)\npool1 = mx.symbol.Pooling(data=conv1, pool_type=\"max\",\n ... | [
{
"content": "Provide a verbatim copy of the code:\n<|memory_start|>```python\nimport mxnet as mx\nimport logging\n\n# data & preprocessing\ndata = mx.symbol.Variable('data')\n\n# 1st conv\nconv1 = mx.symbol.Convolution(data=data, kernel=(5, 5), num_filter=20)\npool1 = mx.symbol.Pooling(data=conv1, pool_type=\"... | ```python
import mxnet as mx
import logging
# data & preprocessing
data = mx.symbol.Variable('data')
# 1st conv
conv1 = mx.symbol.Convolution(data=data, kernel=(5, 5), num_filter=20)
pool1 = mx.symbol.Pooling(data=conv1, pool_type="max",
kernel=(2, 2), stride=(2, 2))
# 2nd conv
conv2 = mx.symbol.Convolution(data=pool1, kernel=(5, 5), num_filter=50)
pool2 = mx.symbol.Pooling(data=conv2, pool_type="max",
kernel=(2, 2), stride=(2, 2))
# 1st fc & relu
flatten = mx.symbol.Flatten(data=pool2)
fc1 = mx.symbol.FullyConnected(data=flatten, num_hidden=500)
relu1 = mx.symbol.Activation(data=fc1, act_type="relu")
# 2nd fc
fc2 = mx.symbol.FullyConnected(data=relu1, num_hidden=10)
# loss
lenet5 = mx.symbol.SoftmaxOutput(data=fc2, name='softmax')
train_dataiter = mx.io.ImageRecordIter(
path_imgrec="../data/train.rec",
data_shape=(1, 28, 28),
batch_size=50,
mean_r=128,
scale=0.00390625,
rand_crop=True,
min_crop_size=26,
max_crop_size=28,
max_rotate_angle=15,
fill_value=0
)
val_dataiter = mx.io.ImageRecordIter(
path_imgrec="../data/val.rec",
data_shape=(1, 28, 28),
batch_size=100,
mean_r=128,
scale=0.00390625,
)
logging.getLogger().setLevel(logging.DEBUG)
fh = logging.FileHandler('train_mnist_lenet.log')
logging.getLogger().addHandler(fh)
lr_scheduler = mx.lr_scheduler.FactorScheduler(1000, factor=0.95)
optimizer_params = {
'learning_rate': 0.01,
'momentum': 0.9,
'wd': 0.0005,
'lr_scheduler': lr_scheduler
}
checkpoint = mx.callback.do_checkpoint('mnist_lenet', period=5)
mod = mx.mod.Module(lenet5, context=mx.gpu(2))
mod.fit(train_dataiter,
eval_data=val_dataiter,
optimizer_params=optimizer_params,
num_epoch=36,
epoch_end_callback=checkpoint)
``` |
[
{
"content": "```python\n# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright 2010-2011 OpenStack LLC.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of th... | [
{
"content": "<|memory_start|>```python\n# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright 2010-2011 OpenStack LLC.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n#... | ```python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from glance.common import exception
from glance.common import utils
from glance.common import wsgi
import glance.db
import glance.openstack.common.log as logging
LOG = logging.getLogger(__name__)
class Controller(object):
def _check_can_access_image_members(self, context):
if context.owner is None and not context.is_admin:
raise webob.exc.HTTPUnauthorized(_("No authenticated user"))
def __init__(self):
self.db_api = glance.db.get_api()
self.db_api.configure_db()
def index(self, req, image_id):
"""
Get the members of an image.
"""
try:
self.db_api.image_get(req.context, image_id)
except exception.NotFound:
msg = _("Image %(id)s not found")
LOG.info(msg % {'id': image_id})
raise webob.exc.HTTPNotFound()
except exception.Forbidden:
# If it's private and doesn't belong to them, don't let on
# that it exists
msg = _("Access denied to image %(id)s but returning 'not found'")
LOG.info(msg % {'id': image_id})
raise webob.exc.HTTPNotFound()
members = self.db_api.image_member_find(req.context, image_id=image_id)
msg = _("Returning member list for image %(id)s")
LOG.info(msg % {'id': image_id})
return dict(members=make_member_list(members,
member_id='member',
can_share='can_share'))
@utils.mutating
def update_all(self, req, image_id, body):
"""
Replaces the members of the image with those specified in the
body. The body is a dict with the following format::
{"memberships": [
{"member_id": <MEMBER_ID>,
["can_share": [True|False]]}, ...
]}
"""
self._check_can_access_image_members(req.context)
# Make sure the image exists
session = self.db_api.get_session()
try:
image = self.db_api.image_get(req.context, image_id,
session=session)
except exception.NotFound:
msg = _("Image %(id)s not found")
LOG.info(msg % {'id': image_id})
raise webob.exc.HTTPNotFound()
except exception.Forbidden:
# If it's private and doesn't belong to them, don't let on
# that it exists
msg = _("Access denied to image %(id)s but returning 'not found'")
LOG.info(msg % {'id': image_id})
raise webob.exc.HTTPNotFound()
# Can they manipulate the membership?
if not self.db_api.is_image_sharable(req.context, image):
msg = _("User lacks permission to share image %(id)s")
LOG.info(msg % {'id': image_id})
msg = _("No permission to share that image")
raise webob.exc.HTTPForbidden(msg)
# Get the membership list
try:
memb_list = body['memberships']
except Exception, e:
# Malformed entity...
msg = _("Invalid membership association specified for "
"image %(id)s")
LOG.info(msg % {'id': image_id})
msg = _("Invalid membership association: %s") % e
raise webob.exc.HTTPBadRequest(explanation=msg)
add = []
existing = {}
# Walk through the incoming memberships
for memb in memb_list:
try:
datum = dict(image_id=image['id'],
member=memb['member_id'],
can_share=None)
except Exception, e:
# Malformed entity...
msg = _("Invalid membership association specified for "
"image %(id)s")
LOG.info(msg % {'id': image_id})
msg = _("Invalid membership association: %s") % e
raise webob.exc.HTTPBadRequest(explanation=msg)
# Figure out what can_share should be
if 'can_share' in memb:
datum['can_share'] = bool(memb['can_share'])
# Try to find the corresponding membership
members = self.db_api.image_member_find(req.context,
image_id=datum['image_id'],
member=datum['member'],
session=session)
try:
member = members[0]
except IndexError:
# Default can_share
datum['can_share'] = bool(datum['can_share'])
add.append(datum)
else:
# Are we overriding can_share?
if datum['can_share'] is None:
datum['can_share'] = members[0]['can_share']
existing[member['id']] = {
'values': datum,
'membership': member,
}
# We now have a filtered list of memberships to add and
# memberships to modify. Let's start by walking through all
# the existing image memberships...
existing_members = self.db_api.image_member_find(req.context,
image_id=image['id'])
for memb in existing_members:
if memb['id'] in existing:
# Just update the membership in place
update = existing[memb['id']]['values']
self.db_api.image_member_update(req.context, memb, update,
session=session)
else:
# Outdated one; needs to be deleted
self.db_api.image_member_delete(req.context, memb,
session=session)
# Now add the non-existent ones
for memb in add:
self.db_api.image_member_create(req.context, memb, session=session)
# Make an appropriate result
msg = _("Successfully updated memberships for image %(id)s")
LOG.info(msg % {'id': image_id})
return webob.exc.HTTPNoContent()
@utils.mutating
def update(self, req, image_id, id, body=None):
"""
Adds a membership to the image, or updates an existing one.
If a body is present, it is a dict with the following format::
{"member": {
"can_share": [True|False]
}}
If "can_share" is provided, the member's ability to share is
set accordingly. If it is not provided, existing memberships
remain unchanged and new memberships default to False.
"""
self._check_can_access_image_members(req.context)
# Make sure the image exists
try:
image = self.db_api.image_get(req.context, image_id)
except exception.NotFound:
msg = _("Image %(id)s not found")
LOG.info(msg % {'id': image_id})
raise webob.exc.HTTPNotFound()
except exception.Forbidden:
# If it's private and doesn't belong to them, don't let on
# that it exists
msg = _("Access denied to image %(id)s but returning 'not found'")
LOG.info(msg % {'id': image_id})
raise webob.exc.HTTPNotFound()
# Can they manipulate the membership?
if not self.db_api.is_image_sharable(req.context, image):
msg = _("User lacks permission to share image %(id)s")
LOG.info(msg % {'id': image_id})
msg = _("No permission to share that image")
raise webob.exc.HTTPForbidden(msg)
# Determine the applicable can_share value
can_share = None
if body:
try:
can_share = bool(body['member']['can_share'])
except Exception, e:
# Malformed entity...
msg = _("Invalid membership association specified for "
"image %(id)s")
LOG.info(msg % {'id': image_id})
msg = _("Invalid membership association: %s") % e
raise webob.exc.HTTPBadRequest(explanation=msg)
# Look up an existing membership...
session = self.db_api.get_session()
members = self.db_api.image_member_find(req.context,
image_id=image_id,
member=id,
session=session)
if members:
if can_share is not None:
values = dict(can_share=can_share)
self.db_api.image_member_update(req.context, members[0],
values, session=session)
else:
values = dict(image_id=image['id'], member=id,
can_share=bool(can_share))
self.db_api.image_member_create(req.context, values,
session=session)
msg = _("Successfully updated a membership for image %(id)s")
LOG.info(msg % {'id': image_id})
return webob.exc.HTTPNoContent()
@utils.mutating
def delete(self, req, image_id, id):
"""
Removes a membership from the image.
"""
self._check_can_access_image_members(req.context)
# Make sure the image exists
try:
image = self.db_api.image_get(req.context, image_id)
except exception.NotFound:
msg = _("Image %(id)s not found")
LOG.info(msg % {'id': image_id})
raise webob.exc.HTTPNotFound()
except exception.Forbidden:
# If it's private and doesn't belong to them, don't let on
# that it exists
msg = _("Access denied to image %(id)s but returning 'not found'")
LOG.info(msg % {'id': image_id})
raise webob.exc.HTTPNotFound()
# Can they manipulate the membership?
if not self.db_api.is_image_sharable(req.context, image):
msg = _("User lacks permission to share image %(id)s")
LOG.info(msg % {'id': image_id})
msg = _("No permission to share that image")
raise webob.exc.HTTPForbidden(msg)
# Look up an existing membership
try:
session = self.db_api.get_session()
members = self.db_api.image_member_find(req.context,
image_id=image_id,
member=id,
session=session)
self.db_api.image_member_delete(req.context,
members[0],
session=session)
except exception.NotFound:
pass
# Make an appropriate result
msg = _("Successfully deleted a membership from image %(id)s")
LOG.info(msg % {'id': image_id})
return webob.exc.HTTPNoContent()
def index_shared_images(self, req, id):
"""
Retrieves images shared with the given member.
"""
try:
members = self.db_api.image_member_find(req.context, member=id)
except exception.NotFound, e:
msg = _("Member %(id)s not found")
LOG.info(msg % {'id': id})
msg = _("Membership could not be found.")
raise webob.exc.HTTPBadRequest(explanation=msg)
msg = _("Returning list of images shared with member %(id)s")
LOG.info(msg % {'id': id})
return dict(shared_images=make_member_list(members,
image_id='image_id',
can_share='can_share'))
def make_member_list(members, **attr_map):
"""
Create a dict representation of a list of members which we can use
to serialize the members list. Keyword arguments map the names of
optional attributes to include to the database attribute.
"""
def _fetch_memb(memb, attr_map):
return dict([(k, memb[v]) for k, v in attr_map.items()
if v in memb.keys()])
# Return the list of members with the given attribute mapping
return [_fetch_memb(memb, attr_map) for memb in members
if not memb.deleted]
def create_resource():
"""Image members resource factory method."""
deserializer = wsgi.JSONRequestDeserializer()
serializer = wsgi.JSONResponseSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)
``` |
[
{
"content": "```python\n# -*- coding: utf-8 -*-\n# Strawberry Blog Engine\n#\n# Copyright (c) 2014 Regis FLORET\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restri... | [
{
"content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\n# Strawberry Blog Engine\n#\n# Copyright (c) 2014 Regis FLORET\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Softwar... | ```python
# -*- coding: utf-8 -*-
# Strawberry Blog Engine
#
# Copyright (c) 2014 Regis FLORET
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
__author__ = 'Regis FLORET'
``` |
[
{
"content": "```python\nimport os\nfrom flask import Flask, json\n\nimport logging\nfrom logging.handlers import RotatingFileHandler\n\nUPLOAD_DIRECTORY = 'uploads'\n\napp = Flask(__name__, static_url_path='')\n\nprint 'Newman flask application starting...'\n# Configure root logging which effects console - don... | [
{
"content": "<|memory_start|>```python\nimport os\nfrom flask import Flask, json\n\nimport logging\nfrom logging.handlers import RotatingFileHandler\n\nUPLOAD_DIRECTORY = 'uploads'\n\napp = Flask(__name__, static_url_path='')\n\nprint 'Newman flask application starting...'\n# Configure root logging which effec... | ```python
import os
from flask import Flask, json
import logging
from logging.handlers import RotatingFileHandler
UPLOAD_DIRECTORY = 'uploads'
app = Flask(__name__, static_url_path='')
print 'Newman flask application starting...'
# Configure root logging which effects console - dont want newman debug to go to console
# TODO not exactly correct - need to control console and files seperately
for handler in app.logger.handlers:
handler.setLevel(logging.INFO)
# Configure Newman application logging
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s -[%(filename)s:%(lineno)s - %(funcName)20s() ]- %(message)s")
newmanlog_handler = RotatingFileHandler('log/flask-newman.log', maxBytes=10000000, backupCount=10)
newmanlog_handler.setFormatter(formatter)
newmanlog_handler.setLevel(logging.DEBUG)
app.logger.addHandler(newmanlog_handler)
app.logger.info('Newman flask application starting.')
# Configure Flask WSGI server access logging
accesslogger = logging.getLogger('werkzeug')
accesslogger.setLevel(logging.INFO)
accesslog_handler = logging.FileHandler('log/flask-access.log')
accesslogger.addHandler(accesslog_handler)
# app.logger.addHandler(accesslog_handler)
# Configure the application
SITE_ROOT = os.path.realpath(os.path.dirname(__file__))
json_url = os.path.join(SITE_ROOT, "conf", "app_config.json")
data = json.load(open(json_url))
app.config["root_context"] = data
app.config["site_root"] = SITE_ROOT
app.config["upload_dir"] = UPLOAD_DIRECTORY
# 2 GB max
app.config['MAX_CONTENT_LENGTH'] = 2 * 1024 * 1024 * 1024
app.logger.debug('Newman config: {}'.format(data))
app.logger.info('Newman config loaded.')
# from app.newman_config import getTileCacheConfig
# app.logger.info('Newman config loaded. {}'.format(getTileCacheConfig()))
from app import root_context
from app import datasource
from app import app_config
from app import search
from app import ingester
from app import email
from app import export_services
from app import tag_services
from app import aggregations
from app import geo
from app import file_uploader
``` |
[
{
"content": "```python\nimport sublime, sublime_plugin\nimport os, shutil, traceback, json\nfrom ...libs import util\nfrom ...libs import window_view_manager\nfrom ...libs import FlowCLI\nfrom .refactor_preview import RefactorPreview\n\nclass JavascriptEnhancementsRefactorSafeMoveCommand(sublime_plugin.TextCom... | [
{
"content": "<|memory_start|>```python\nimport sublime, sublime_plugin\nimport os, shutil, traceback, json\nfrom ...libs import util\nfrom ...libs import window_view_manager\nfrom ...libs import FlowCLI\nfrom .refactor_preview import RefactorPreview\n\nclass JavascriptEnhancementsRefactorSafeMoveCommand(sublim... | ```python
import sublime, sublime_plugin
import os, shutil, traceback, json
from ...libs import util
from ...libs import window_view_manager
from ...libs import FlowCLI
from .refactor_preview import RefactorPreview
class JavascriptEnhancementsRefactorSafeMoveCommand(sublime_plugin.TextCommand):
def run(self, edit, **args):
view = self.view
window = view.window()
file_name = view.file_name()
inputs = args.get("inputs")
view_id_caller = args.get("view_id_caller") if "view_id_caller" in args else None
new_path = os.path.normpath(inputs["new_path"].strip())
settings = util.get_project_settings()
javascript_files = [file_name]
preview_view = None
if view.is_dirty():
sublime.error_message("Cannot move this file. There are unsaved modifications to the buffer. Save the file before use this.")
return
if not file_name:
sublime.error_message("Cannot move this file. File name is empty.")
return
if not new_path or new_path.endswith(os.path.sep) or os.path.isdir(new_path):
sublime.error_message("The File path is empty or incorrect.")
return
if new_path == file_name:
sublime.error_message("The file path is the same as before.")
return
if settings:
for root, dirs, files in os.walk(settings["project_dir_name"]):
if os.path.sep + "node_modules" in root:
continue
for file in files:
if file.endswith(".js"):
javascript_files.append(os.path.join(root, file))
if not args.get("preview"):
if os.path.isfile(new_path):
if not sublime.ok_cancel_dialog(new_path + " already exists.", "Move anyway"):
return
if not os.path.isdir(os.path.dirname(new_path)):
try:
os.makedirs(os.path.dirname(new_path))
except FileNotFoundError as e:
print(traceback.format_exc())
sublime.error_message("Cannot create the path. On Windows could be caused by the '[WinError 206] The filename or extension is too long' error.")
return
except Exception as e:
print(traceback.format_exc())
sublime.error_message("Cannot create the path. The filename, directory name, or volume label syntax could be incorrect.")
return
else:
preview_view = RefactorPreview("Refactor - Safe Move Preview")
preview_view.append_text("Refactor - Safe Move Preview\n\nList of files that will be updated\n\n")
if javascript_files:
imports = {}
flow_cli = FlowCLI(view)
result = flow_cli.get_imports(files=javascript_files)
if result [0]:
imports = result[1]
for k, v in imports.items():
is_same_file = k == file_name
if v["requirements"]:
if is_same_file:
with open(k, "r+", encoding="utf-8") as file:
content = file.read()
preview_content = ""
delta = 0
lines_updated = []
requirements_sorted = sorted(v["requirements"], key=lambda req: int(req["loc"]["start"]["offset"]))
for req in requirements_sorted:
start_offset = int(req["loc"]["start"]["offset"]) + 1 + delta if sublime.platform() != "windows" else view.text_point(int(req["line"]) - 1, int(req["start"])) + delta
end_offset = int(req["loc"]["end"]["offset"]) - 1 + delta if sublime.platform() != "windows" else view.text_point(int(req["endline"]) - 1, int(req["end"]) - 1) + delta
req_new_path = req["import"] if os.path.isabs(req["import"]) else os.path.abspath(os.path.dirname(k) + os.path.sep + req["import"])
if os.path.dirname(new_path) == os.path.dirname(req_new_path):
rel_new_path = "./" + os.path.basename(req_new_path)
else:
rel_new_path = os.path.relpath(req_new_path, start=os.path.dirname(new_path))
if sublime.platform() == "windows":
rel_new_path = util.convert_path_to_unix(rel_new_path)
if not rel_new_path.startswith(".."):
rel_new_path = "./" + rel_new_path
delta += len(rel_new_path) - len(content[start_offset:end_offset])
content = content[:start_offset] + rel_new_path + content[end_offset:]
if args.get("preview"):
line = int(req["line"]) - 1
lines_updated.append(line)
if args.get("preview"):
splitted_content = content.splitlines()
preview_content = "- Move From:\n" + file_name + "\n\n"
preview_content += "- To:\n" + new_path + "\n\n"
range_start_before = -1
is_first_range_start = True
for range_start in lines_updated:
line_number = str(range_start + 1)
space_before_line_number = " " * ( 5 - len(line_number) )
if range_start - 1 != range_start_before and not is_first_range_start:
space_before_line_number = space_before_line_number + ("." * len(line_number) ) + "\n" + space_before_line_number
is_first_range_start = False
preview_content += space_before_line_number + line_number + (": " if range_start in lines_updated else " ") + splitted_content[range_start] + "\n"
range_start_before = range_start
range_start += 1
preview_content += "\n\n"
preview_view.append_text(preview_content)
else:
file.seek(0)
file.write(content)
file.truncate()
else:
for req in v["requirements"]:
if file_name == ( req["import"] if os.path.isabs(req["import"]) else os.path.abspath(os.path.dirname(k) + os.path.sep + req["import"]) ):
with open(k, "r+", encoding="utf-8") as file:
content = file.read()
start_offset = int(req["loc"]["start"]["offset"]) + 1 if sublime.platform() != "windows" else view.text_point(int(req["line"]) - 1, int(req["start"]))
end_offset = int(req["loc"]["end"]["offset"]) - 1 if sublime.platform() != "windows" else view.text_point(int(req["endline"]) - 1, int(req["end"]) - 1)
if os.path.dirname(k) == os.path.dirname(new_path):
rel_new_path = "./" + os.path.basename(new_path)
else:
rel_new_path = os.path.relpath(new_path, start=os.path.dirname(k))
if sublime.platform() == "windows":
rel_new_path = util.convert_path_to_unix(rel_new_path)
if not rel_new_path.startswith(".."):
rel_new_path = "./" + rel_new_path
content = content[:start_offset] + rel_new_path + content[end_offset:]
if args.get("preview"):
splitted_content = content.splitlines()
preview_content = k + ":\n\n"
line = int(req["line"]) - 1
range_start = max(0, line - 2)
range_end = min(line + 2, len(splitted_content) - 1)
while range_start <= range_end:
line_number = str(range_start + 1)
space_before_line_number = " " * ( 5 - len(line_number) )
preview_content += space_before_line_number + line_number + (": " if line == range_start else " ") + splitted_content[range_start] + "\n"
range_start += 1
preview_content += "\n"
preview_view.append_text(preview_content)
else:
file.seek(0)
file.write(content)
file.truncate()
if not args.get("preview"):
shutil.move(file_name, new_path)
window.focus_group(0)
new_view = window.open_file(new_path)
window.focus_group(1)
if not args.get("preview"):
RefactorPreview.close("Refactor - Safe Move Preview")
window_view_manager.close(view.id())
# added view.set_scratch(True) and sublime.set_timeout_async in order to not crash Sublime Text 3
view.set_scratch(True)
sublime.set_timeout_async(lambda: view.close())
else:
sublime.error_message("Error: can't get project settings")
def is_enabled(self, **args) :
view = self.view
return util.selection_in_js_scope(view)
def is_visible(self, **args) :
view = self.view
if not view.file_name():
return False
settings = util.get_project_settings()
if not settings or not util.selection_in_js_scope(view):
return False
return True
``` |
[
{
"content": "Here is the source code:\n```python\nfrom __future__ import annotations\nfrom collections import OrderedDict as OD\nfrom uuid import uuid4\nfrom itertools import chain\nfrom operator import itemgetter\nimport logging\nlogger = logging.getLogger(__name__)\n\nclass NodeMeta(type):\n '''used to ma... | [
{
"content": "Here is the source code:\n<|memory_start|>```python\nfrom __future__ import annotations\nfrom collections import OrderedDict as OD\nfrom uuid import uuid4\nfrom itertools import chain\nfrom operator import itemgetter\nimport logging\nlogger = logging.getLogger(__name__)\n\nclass NodeMeta(type):\n ... | ```python
from __future__ import annotations
from collections import OrderedDict as OD
from uuid import uuid4
from itertools import chain
from operator import itemgetter
import logging
logger = logging.getLogger(__name__)
class NodeMeta(type):
'''used to magically update the nb_attrs'''
def __new__(mcs, name, bases, attrs):
_nb_attrs = attrs.get('_nb_attrs', frozenset())
for b in bases:
if hasattr(b, '_nb_attrs'):
_nb_attrs |= b._nb_attrs
attrs['_nb_attrs'] = _nb_attrs
new_class = super().__new__(mcs, name, bases, attrs)
return new_class
class GetIoFunc:
"""Non data descriptor to get a user supplied IO function from a parent node
if necessary
"""
def __set_name__(self, owner, name):
self.name = name
def __get__(self, instance, owner):
"""As this is a non data descriptor, the instance won't ever have a
reference to the supplied function. Need to query the parent
"""
if not instance:
raise AttributeError("Descriptor only to be used on instances")
if instance.parent:
func = getattr(instance.parent, self.name)
setattr(instance, self.name, func)
return func
else:
raise AttributeError(f"No {self.name} function provided!")
class Node(metaclass=NodeMeta):
'''A node in the tree data structure representing the register map'''
#these names are not to be looked for in children
#when pickling, only be concerned with these
_nb_attrs = frozenset(['name', 'descr', 'doc', 'uuid', '_ref', '_alias'])
_reg_read_func = GetIoFunc()
_reg_write_func = GetIoFunc()
_block_read_func = GetIoFunc()
_block_write_func = GetIoFunc()
def __init__(self, **kwargs):
'''
Args:
name(str) : A the name of the Node
descr(str) : A description for the node (usually shorter than doc)
doc(str) : A documentation string for the node
uuid(str) : A Universal Identifier
_ref(Node): If not None, the reference to the Node that this
node is an instance of
_alias(bool): If True, this node is an alias for the reference node
'''
for key in self._nb_attrs:
setattr(self, key, kwargs.get(key, None))
self._parent = None
if self.name is None:
raise ValueError("Passed None for name parameter. name is a required parameter")
#if not self.name.isidentifier():
# raise ValueError("supplied name is not a valid identifier: {}".format(self.name))
self._children = {}
self.__doc__ = next((i for i in (self.descr, self.doc) if i), 'No description')
self.uuid = kwargs.get('uuid', uuid4().hex)
unexpecteds = kwargs.keys() - self._nb_attrs
if unexpecteds:
raise ValueError("Got unexpected keyword arguments: {}".format('\n'.join(unexpecteds)))
@property
def parent(self):
return self._parent
def _add(self, item):
"""Add node to self._children. Called from `parent` property
"""
if isinstance(item, Node):
if item in self:
return #already added
elif item.name in self:
if item.parent:
#maintain consistency as we're replacing an existing item
item.parent._remove(item)
self._children[item.name] = item
item._parent = self
else:
raise ValueError("Expected argument to be of type Node or one of "
"its descendents")
def _remove(self, item):
if isinstance(item, Node) and item in self:
self._children.pop(item.name)
item._parent = None
else:
raise ValueError("Expected argument to be of type Node or one of "
"its descendents")
def __contains__(self, item):
if isinstance(item, Node):
return item.name in self._children
elif isinstance(item, str):
return item in self._children
else:
return NotImplemented
def __dir__(self):
local_items = {f for f in vars(self) if f[0] != '_'}
children = {c for c in self._children}
class_objs = {s for s in dir(type(self)) if s[0] != '_'}
return list(local_items | children | class_objs)
def __getattr__(self, name):
if name in self._nb_attrs or name[:2] == '__':
raise AttributeError(f"{name} not found")
try:
return self._children[name]
except (KeyError, AttributeError) as e:
raise AttributeError(f"{name} not found")
def __getitem__(self, item):
return self._children[item]
def __iter__(self):
return (child for child in self._children.values())
def _walk(self, levels=2, top_down=True):
'return up to <levels> worth of nodes'
if levels == 0: #i am a leaf node
yield self
return
if top_down:
yield self
for node in self:
#if a negative number is supplied, all elements below will be traversed
if levels >= 0:
new_levels = levels -1
else:
new_levels = levels
yield from node._walk(levels=new_levels, top_down=top_down)
if not top_down:
yield self
def __bool__(self):
return True #don't call __len__
def __len__(self):
return len(self._children)
def __str__(self):
return f'{type(self).__name__}: {self.name}'
def __repr__(self):
items = ((k,getattr(self, k)) for k in self._nb_attrs)
#don't want these to be in the repr
me = {k:v for (k,v) in items if v is not None}
if '_ref' in me:
me['_ref'] = me['_ref'].uuid
arg_strings = (f'{k}={v!r}' for (k,v) in sorted(me.items(), key=itemgetter(0)))
return f"{type(self).__name__}({','.join(arg_strings)})"
def _copy(self, *, new_instance:bool=False, new_alias:bool=False,
_context:dict=None, _deep_copy:bool=True, **kwargs):
"""Create a deep copy of this object
:param new_instance: Indicate if the copy should be considered an
instance of this node. When a node is an instance of another, it
will have a `_ref` attribute pointing to it.
:param new_alias: Indicate if the copy should be an alias of this node.
If True, the copy will have an `_alias` boolean set to True.
:param _context: A dictionary holding mapping of original objects to
newly created copies. This is essential for ensuring that when the
same child bitfield is being copied from multiple parent bitfield
references, only a single newly created copy will be used
:param _deep_copy: If True, copy children too. Mainly used in
BitFieldRef override.
:returns: The newly created copy
"""
if _context is None:
_context = {}
elif self in _context:
return _context[self] # I've already been copied
existing_items = {k:getattr(self, k) for k in self._nb_attrs}
#It's a copy so shouldn't have the same uuid
existing_items.pop('uuid', None)
existing_items.update(kwargs)
if new_instance:
existing_items['_ref'] = self
elif not ('_ref' in kwargs and kwargs['_ref']) and self._ref:
existing_items['_ref'] = self._ref._copy(_context=_context)
if new_alias:
existing_items['_alias'] = True
new_obj = type(self)(**existing_items)
_context[self] = new_obj
if _deep_copy:
for obj in self:
new_obj._add(obj._copy(new_alias=new_alias, _context=_context))
return new_obj
def validate(self):
"""Do some validation checks on the parameters set on this instance and
that of the child bitfield
:returns: Iterable of errors found
"""
for child in self:
yield from child.validate()
``` |
[
{
"content": "Repeat the full code snippet:\n```python\nfrom django.core.management import call_command\n\nfrom ... import models\nfrom ...fields import get_one_to_many_fields, get_self_reference_fields\nfrom ._base import DumperBaseCommand\n\n\nclass Command(DumperBaseCommand):\n help = 'Load data'\n\n d... | [
{
"content": "Repeat the full code snippet:\n<|memory_start|>```python\nfrom django.core.management import call_command\n\nfrom ... import models\nfrom ...fields import get_one_to_many_fields, get_self_reference_fields\nfrom ._base import DumperBaseCommand\n\n\nclass Command(DumperBaseCommand):\n help = 'Loa... | ```python
from django.core.management import call_command
from ... import models
from ...fields import get_one_to_many_fields, get_self_reference_fields
from ._base import DumperBaseCommand
class Command(DumperBaseCommand):
help = 'Load data'
def handle(self, **options):
self.verbosity = options['verbosity']
self.load_all()
for field in get_self_reference_fields(models.Country):
self.load_country_self_reference(field.name)
def loaddata(self, fixture_path):
if not self.is_excluded(fixture_path):
call_command(
'loaddata',
fixture_path.as_posix(),
verbosity=self.verbosity)
def get_fixtures(self, **kwargs):
return sorted((
fixture for fixture in self._rootdir.glob('**/*.*')
if 'self' != fixture.parent.stem
), **kwargs)
def load_all(self):
one_to_many_fields = [
field.name for field in get_one_to_many_fields(models.Country)
]
# Sorted key: /path/to/fixture.ext < fixture.ext
fixtures = self.get_fixtures(
key=lambda path: path.stem if any(
name in path.stem or path.parent.match(name)
for name in one_to_many_fields
) else path.as_posix())
for fixture_path in fixtures:
self.loaddata(fixture_path)
def load_country_self_reference(self, name):
with self.open_fixture('self/{}'.format(name), 'r') as fixture:
for data in fixture.read():
country = models.Country.objects.get(cca2=data.object.pk)
getattr(country, name).add(*data.m2m_data[name])
``` |
[
{
"content": "Recreate the entire code block with identical formatting:\n```python\n# -*- coding: utf-8 -*-\nfrom south.utils import datetime_utils as datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(... | [
{
"content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\nfrom south.utils import datetime_utils as datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n ... | ```python
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Boostrap3AlertPlugin.icon'
db.add_column(u'aldryn_bootstrap3_boostrap3alertplugin', 'icon',
self.gf(u'django.db.models.fields.CharField')(default=u'', max_length=255, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Boostrap3AlertPlugin.icon'
db.delete_column(u'aldryn_bootstrap3_boostrap3alertplugin', 'icon')
models = {
u'aldryn_bootstrap3.boostrap3alertplugin': {
'Meta': {'object_name': 'Boostrap3AlertPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'context': (u'django.db.models.fields.CharField', [], {'default': "u'default'", 'max_length': '255'}),
'icon': (u'django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'})
},
u'aldryn_bootstrap3.boostrap3blockquoteplugin': {
'Meta': {'object_name': 'Boostrap3BlockquotePlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'reverse': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'aldryn_bootstrap3.boostrap3buttonplugin': {
'Meta': {'object_name': 'Boostrap3ButtonPlugin', '_ormbases': ['cms.CMSPlugin']},
'anchor': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'btn_block': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'btn_context': (u'django.db.models.fields.CharField', [], {'default': "u'default'", 'max_length': '255', 'blank': 'True'}),
'btn_size': (u'django.db.models.fields.CharField', [], {'default': "u'md'", 'max_length': '255', 'blank': 'True'}),
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['filer.File']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'icon_left': (u'django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'icon_right': (u'django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '256', 'blank': 'True'}),
'mailto': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'page_link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'txt_context': (u'django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "u'lnk'", 'max_length': '10'}),
'url': ('django.db.models.fields.URLField', [], {'default': "u''", 'max_length': '200', 'blank': 'True'})
},
u'aldryn_bootstrap3.boostrap3iconplugin': {
'Meta': {'object_name': 'Boostrap3IconPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'icon': (u'django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'})
},
u'aldryn_bootstrap3.boostrap3imageplugin': {
'Meta': {'object_name': 'Boostrap3ImagePlugin', '_ormbases': ['cms.CMSPlugin']},
'alt': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'aspect_ratio': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '10', 'blank': 'True'}),
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['filer.Image']"}),
'shape': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '64', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'})
},
u'aldryn_bootstrap3.boostrap3labelplugin': {
'Meta': {'object_name': 'Boostrap3LabelPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'context': (u'django.db.models.fields.CharField', [], {'default': "u'default'", 'max_length': '255'}),
'label': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '256', 'blank': 'True'})
},
u'aldryn_bootstrap3.boostrap3panelbodyplugin': {
'Meta': {'object_name': 'Boostrap3PanelBodyPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"})
},
u'aldryn_bootstrap3.boostrap3panelfooterplugin': {
'Meta': {'object_name': 'Boostrap3PanelFooterPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"})
},
u'aldryn_bootstrap3.boostrap3panelheadingplugin': {
'Meta': {'object_name': 'Boostrap3PanelHeadingPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'title': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'})
},
u'aldryn_bootstrap3.boostrap3panelplugin': {
'Meta': {'object_name': 'Boostrap3PanelPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'context': (u'django.db.models.fields.CharField', [], {'default': "u'default'", 'max_length': '255'})
},
u'aldryn_bootstrap3.boostrap3wellplugin': {
'Meta': {'object_name': 'Boostrap3WellPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'+'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['cms.CMSPlugin']"}),
'size': (u'django.db.models.fields.CharField', [], {'default': "u'md'", 'max_length': '255', 'blank': 'True'})
},
u'aldryn_bootstrap3.bootstrap3columnplugin': {
'Meta': {'object_name': 'Bootstrap3ColumnPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
u'lg_col': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'lg_offset': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'lg_pull': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'lg_push': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'md_col': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'md_offset': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'md_pull': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'md_push': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'sm_col': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'sm_offset': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'sm_pull': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'sm_push': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'tag': ('django.db.models.fields.SlugField', [], {'default': "u'div'", 'max_length': '50'}),
u'xs_col': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'xs_offset': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'xs_pull': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'xs_push': (u'django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
u'aldryn_bootstrap3.bootstrap3rowplugin': {
'Meta': {'object_name': 'Bootstrap3RowPlugin', '_ormbases': ['cms.CMSPlugin']},
'classes': (u'django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('publisher_is_draft', 'application_namespace'), ('reverse_id', 'site', 'publisher_is_draft'))", 'object_name': 'Page'},
'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_home': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_pages'", 'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'xframe_options': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'all_files'", 'null': 'True', 'to': u"orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'owned_files'", 'null': 'True', 'to': u"orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_filer.file_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'filer.folder': {
'Meta': {'ordering': "(u'name',)", 'unique_together': "((u'parent', u'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'filer_owned_folders'", 'null': 'True', 'to': u"orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'children'", 'null': 'True', 'to': u"orm['filer.Folder']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image'},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['aldryn_bootstrap3']
``` |
[
{
"content": "Repeat the code precisely as written (spacing intact):\n```python\n#!/usr/bin/python -tt\n# Copyright 2010 Google Inc.\n# Licensed under the Apache License, Version 2.0\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Google's Python Class\n# http://code.google.com/edu/languages/google-python-cl... | [
{
"content": "Repeat the code precisely as written (spacing intact):\n<|memory_start|>```python\n#!/usr/bin/python -tt\n# Copyright 2010 Google Inc.\n# Licensed under the Apache License, Version 2.0\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Google's Python Class\n# http://code.google.com/edu/languages/... | ```python
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic string exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in string2.py.
# A. donuts
# Given an int count of a number of donuts, return a string
# of the form 'Number of donuts: <count>', where <count> is the number
# passed in. However, if the count is 10 or more, then use the word 'many'
# instead of the actual count.
# So donuts(5) returns 'Number of donuts: 5'
# and donuts(23) returns 'Number of donuts: many'
def donuts(count):
# +++your code here+++
if count >= 10:
donuts = 'Number of donuts: many'
else:
donuts = 'Number of donuts: %d' % count
return donuts
# B. both_ends
# Given a string s, return a string made of the first 2
# and the last 2 chars of the original string,
# so 'spring' yields 'spng'. However, if the string length
# is less than 2, return instead the empty string.
def both_ends(s):
# +++your code here+++
if len(s) < 2:
both_ends = ''
else:
both_ends = s[0:2] + s[len(s)-2:]
return both_ends
# C. fix_start
# Given a string s, return a string
# where all occurences of its first char have
# been changed to '*', except do not change
# the first char itself.
# e.g. 'babble' yields 'ba**le'
# Assume that the string is length 1 or more.
# Hint: s.replace(stra, strb) returns a version of string s
# where all instances of stra have been replaced by strb.
def fix_start(s):
# +++your code here+++
first_s = s[0:1]
s = s.replace(s[0:1], '*')
s = first_s + s[1:]
return s
# D. MixUp
# Given strings a and b, return a single string with a and b separated
# by a space '<a> <b>', except swap the first 2 chars of each string.
# e.g.
# 'mix', pod' -> 'pox mid'
# 'dog', 'dinner' -> 'dig donner'
# Assume a and b are length 2 or more.
def mix_up(a, b):
# +++your code here+++
a_first = a[0:2]
b_first = b[0:2]
a = b_first + a[2:]
b = a_first + b[2:]
return a + ' ' + b
# Provided simple test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Provided main() calls the above functions with interesting inputs,
# using test() to check if each result is correct or not.
def main():
print 'donuts'
# Each line calls donuts, compares its result to the expected for that call.
test(donuts(4), 'Number of donuts: 4')
test(donuts(9), 'Number of donuts: 9')
test(donuts(10), 'Number of donuts: many')
test(donuts(99), 'Number of donuts: many')
print
print 'both_ends'
test(both_ends('spring'), 'spng')
test(both_ends('Hello'), 'Helo')
test(both_ends('a'), '')
test(both_ends('xyz'), 'xyyz')
print
print 'fix_start'
test(fix_start('babble'), 'ba**le')
test(fix_start('aardvark'), 'a*rdv*rk')
test(fix_start('google'), 'goo*le')
test(fix_start('donut'), 'donut')
print
print 'mix_up'
test(mix_up('mix', 'pod'), 'pox mid')
test(mix_up('dog', 'dinner'), 'dig donner')
test(mix_up('gnash', 'sport'), 'spash gnort')
test(mix_up('pezzy', 'firm'), 'fizzy perm')
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
``` |
[
{
"content": "Here is the source code:\n```python\nimport os\nimport getpass\nimport sys\nimport time\n\nimport numpy as np\nimport tensorflow as tf\nfrom q2_initialization import xavier_weight_init\nimport data_utils.utils as du\nimport data_utils.ner as ner\nfrom utils import data_iterator\nfrom model import ... | [
{
"content": "Here is the source code:\n<|memory_start|>```python\nimport os\nimport getpass\nimport sys\nimport time\n\nimport numpy as np\nimport tensorflow as tf\nfrom q2_initialization import xavier_weight_init\nimport data_utils.utils as du\nimport data_utils.ner as ner\nfrom utils import data_iterator\nfr... | ```python
import os
import getpass
import sys
import time
import numpy as np
import tensorflow as tf
from q2_initialization import xavier_weight_init
import data_utils.utils as du
import data_utils.ner as ner
from utils import data_iterator
from model import LanguageModel
class Config(object):
"""Holds model hyperparams and data information.
The config class is used to store various hyperparameters and dataset
information parameters. Model objects are passed a Config() object at
instantiation.
"""
embed_size = 50
batch_size = 64
label_size = 5
hidden_size = 100
max_epochs = 24
early_stopping = 2
dropout = 0.9
lr = 0.001
l2 = 0.001
window_size = 3
class NERModel(LanguageModel):
"""Implements a NER (Named Entity Recognition) model.
This class implements a deep network for named entity recognition. It
inherits from LanguageModel, which has an add_embedding method in addition to
the standard Model method.
"""
def load_data(self, debug=False):
"""Loads starter word-vectors and train/dev/test data."""
# Load the starter word vectors
self.wv, word_to_num, num_to_word = ner.load_wv(
'data/ner/vocab.txt', 'data/ner/wordVectors.txt')
tagnames = ['O', 'LOC', 'MISC', 'ORG', 'PER']
self.num_to_tag = dict(enumerate(tagnames))
tag_to_num = {v:k for k,v in self.num_to_tag.iteritems()}
# Load the training set
docs = du.load_dataset('data/ner/train')
self.X_train, self.y_train = du.docs_to_windows(
docs, word_to_num, tag_to_num, wsize=self.config.window_size)
if debug:
self.X_train = self.X_train[:1024]
self.y_train = self.y_train[:1024]
# Load the dev set (for tuning hyperparameters)
docs = du.load_dataset('data/ner/dev')
self.X_dev, self.y_dev = du.docs_to_windows(
docs, word_to_num, tag_to_num, wsize=self.config.window_size)
if debug:
self.X_dev = self.X_dev[:1024]
self.y_dev = self.y_dev[:1024]
# Load the test set (dummy labels only)
docs = du.load_dataset('data/ner/test.masked')
self.X_test, self.y_test = du.docs_to_windows(
docs, word_to_num, tag_to_num, wsize=self.config.window_size)
def add_placeholders(self):
"""Generate placeholder variables to represent the input tensors
These placeholders are used as inputs by the rest of the model building
code and will be fed data during training. Note that when "None" is in a
placeholder's shape, it's flexible
Adds following nodes to the computational graph
input_placeholder: Input placeholder tensor of shape
(None, window_size), type tf.int32
labels_placeholder: Labels placeholder tensor of shape
(None, label_size), type tf.float32
dropout_placeholder: Dropout value placeholder (scalar),
type tf.float32
Add these placeholders to self as the instance variables
self.input_placeholder
self.labels_placeholder
self.dropout_placeholder
(Don't change the variable names)
"""
### YOUR CODE HERE
self.input_placeholder = tf.placeholder(
tf.int32, shape=[None, self.config.window_size], name='Input')
self.labels_placeholder = tf.placeholder(
tf.float32, shape=[None, self.config.label_size], name='Target')
self.dropout_placeholder = tf.placeholder(tf.float32, name='Dropout')
### END YOUR CODE
def create_feed_dict(self, input_batch, dropout, label_batch=None):
"""Creates the feed_dict for softmax classifier.
A feed_dict takes the form of:
feed_dict = {
<placeholder>: <tensor of values to be passed for placeholder>,
....
}
Hint: The keys for the feed_dict should be a subset of the placeholder
tensors created in add_placeholders.
Hint: When label_batch is None, don't add a labels entry to the feed_dict.
Args:
input_batch: A batch of input data.
label_batch: A batch of label data.
Returns:
feed_dict: The feed dictionary mapping from placeholders to values.
"""
### YOUR CODE HERE
feed_dict = {
self.input_placeholder: input_batch,
}
if label_batch is not None:
feed_dict[self.labels_placeholder] = label_batch
if dropout is not None:
feed_dict[self.dropout_placeholder] = dropout
### END YOUR CODE
return feed_dict
def add_embedding(self):
"""Add embedding layer that maps from vocabulary to vectors.
Creates an embedding tensor (of shape (len(self.wv), embed_size). Use the
input_placeholder to retrieve the embeddings for words in the current batch.
(Words are discrete entities. They need to be transformed into vectors for use
in deep-learning. Although we won't do so in this problem, in practice it's
useful to initialize the embedding with pre-trained word-vectors. For this
problem, using the default initializer is sufficient.)
Hint: This layer should use the input_placeholder to index into the
embedding.
Hint: You might find tf.nn.embedding_lookup useful.
Hint: See following link to understand what -1 in a shape means.
https://www.tensorflow.org/versions/r0.8/api_docs/python/array_ops.html#reshape
Hint: Check the last slide from the TensorFlow lecture.
Hint: Here are the dimensions of the variables you will need to create:
L: (len(self.wv), embed_size)
Returns:
window: tf.Tensor of shape (-1, window_size*embed_size)
"""
# The embedding lookup is currently only implemented for the CPU
with tf.device('/cpu:0'):
### YOUR CODE HERE
embedding = tf.get_variable('Embedding', [len(self.wv), self.config.embed_size])
window = tf.nn.embedding_lookup(embedding, self.input_placeholder)
window = tf.reshape(
window, [-1, self.config.window_size * self.config.embed_size])
### END YOUR CODE
return window
def add_model(self, window):
"""Adds the 1-hidden-layer NN.
Hint: Use a variable_scope (e.g. "Layer") for the first hidden layer, and
another variable_scope (e.g. "Softmax") for the linear transformation
preceding the softmax. Make sure to use the xavier_weight_init you
defined in the previous part to initialize weights.
Hint: Make sure to add in regularization and dropout to this network.
Regularization should be an addition to the cost function, while
dropout should be added after both variable scopes.
Hint: You might consider using a tensorflow Graph Collection (e.g
"total_loss") to collect the regularization and loss terms (which you
will add in add_loss_op below).
Hint: Here are the dimensions of the various variables you will need to
create
W: (window_size*embed_size, hidden_size)
b1: (hidden_size,)
U: (hidden_size, label_size)
b2: (label_size)
https://www.tensorflow.org/versions/r0.7/api_docs/python/framework.html#graph-collections
Args:
window: tf.Tensor of shape (-1, window_size*embed_size)
Returns:
output: tf.Tensor of shape (batch_size, label_size)
"""
### YOUR CODE HERE
with tf.variable_scope('Layer1', initializer=xavier_weight_init()) as scope:
W = tf.get_variable(
'W', [self.config.window_size * self.config.embed_size,
self.config.hidden_size])
b1 = tf.get_variable('b1', [self.config.hidden_size])
h = tf.nn.tanh(tf.matmul(window, W) + b1)
if self.config.l2:
tf.add_to_collection('total_loss', 0.5 * self.config.l2 * tf.nn.l2_loss(W))
with tf.variable_scope('Layer2', initializer=xavier_weight_init()) as scope:
U = tf.get_variable('U', [self.config.hidden_size, self.config.label_size])
b2 = tf.get_variable('b2', [self.config.label_size])
y = tf.matmul(h, U) + b2
if self.config.l2:
tf.add_to_collection('total_loss', 0.5 * self.config.l2 * tf.nn.l2_loss(U))
output = tf.nn.dropout(y, self.dropout_placeholder)
### END YOUR CODE
return output
def add_loss_op(self, y):
"""Adds cross_entropy_loss ops to the computational graph.
Hint: You can use tf.nn.softmax_cross_entropy_with_logits to simplify your
implementation. You might find tf.reduce_mean useful.
Args:
pred: A tensor of shape (batch_size, n_classes)
Returns:
loss: A 0-d tensor (scalar)
"""
### YOUR CODE HERE
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(y, self.labels_placeholder))
tf.add_to_collection('total_loss', cross_entropy)
loss = tf.add_n(tf.get_collection('total_loss'))
### END YOUR CODE
return loss
def add_training_op(self, loss):
"""Sets up the training Ops.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the
`sess.run()` call to cause the model to train. See
https://www.tensorflow.org/versions/r0.7/api_docs/python/train.html#Optimizer
for more information.
Hint: Use tf.train.AdamOptimizer for this model.
Calling optimizer.minimize() will return a train_op object.
Args:
loss: Loss tensor, from cross_entropy_loss.
Returns:
train_op: The Op for training.
"""
### YOUR CODE HERE
optimizer = tf.train.AdamOptimizer(self.config.lr)
global_step = tf.Variable(0, name='global_step', trainable=False)
train_op = optimizer.minimize(loss, global_step=global_step)
### END YOUR CODE
return train_op
def __init__(self, config):
"""Constructs the network using the helper functions defined above."""
self.config = config
self.load_data(debug=False)
self.add_placeholders()
window = self.add_embedding()
y = self.add_model(window)
self.loss = self.add_loss_op(y)
self.predictions = tf.nn.softmax(y)
one_hot_prediction = tf.argmax(self.predictions, 1)
correct_prediction = tf.equal(
tf.argmax(self.labels_placeholder, 1), one_hot_prediction)
self.correct_predictions = tf.reduce_sum(tf.cast(correct_prediction, 'int32'))
self.train_op = self.add_training_op(self.loss)
def run_epoch(self, session, input_data, input_labels,
shuffle=True, verbose=True):
orig_X, orig_y = input_data, input_labels
dp = self.config.dropout
# We're interested in keeping track of the loss and accuracy during training
total_loss = []
total_correct_examples = 0
total_processed_examples = 0
total_steps = len(orig_X) / self.config.batch_size
for step, (x, y) in enumerate(
data_iterator(orig_X, orig_y, batch_size=self.config.batch_size,
label_size=self.config.label_size, shuffle=shuffle)):
feed = self.create_feed_dict(input_batch=x, dropout=dp, label_batch=y)
loss, total_correct, _ = session.run(
[self.loss, self.correct_predictions, self.train_op],
feed_dict=feed)
total_processed_examples += len(x)
total_correct_examples += total_correct
total_loss.append(loss)
##
if verbose and step % verbose == 0:
sys.stdout.write('\r{} / {} : loss = {}'.format(
step, total_steps, np.mean(total_loss)))
sys.stdout.flush()
if verbose:
sys.stdout.write('\r')
sys.stdout.flush()
return np.mean(total_loss), total_correct_examples / float(total_processed_examples)
def predict(self, session, X, y=None):
"""Make predictions from the provided model."""
# If y is given, the loss is also calculated
# We deactivate dropout by setting it to 1
dp = 1
losses = []
results = []
if np.any(y):
data = data_iterator(X, y, batch_size=self.config.batch_size,
label_size=self.config.label_size, shuffle=False)
else:
data = data_iterator(X, batch_size=self.config.batch_size,
label_size=self.config.label_size, shuffle=False)
for step, (x, y) in enumerate(data):
feed = self.create_feed_dict(input_batch=x, dropout=dp)
if np.any(y):
feed[self.labels_placeholder] = y
loss, preds = session.run(
[self.loss, self.predictions], feed_dict=feed)
losses.append(loss)
else:
preds = session.run(self.predictions, feed_dict=feed)
predicted_indices = preds.argmax(axis=1)
results.extend(predicted_indices)
return np.mean(losses), results
def print_confusion(confusion, num_to_tag):
"""Helper method that prints confusion matrix."""
# Summing top to bottom gets the total number of tags guessed as T
total_guessed_tags = confusion.sum(axis=0)
# Summing left to right gets the total number of true tags
total_true_tags = confusion.sum(axis=1)
print
print confusion
for i, tag in sorted(num_to_tag.items()):
prec = confusion[i, i] / float(total_guessed_tags[i])
recall = confusion[i, i] / float(total_true_tags[i])
print 'Tag: {} - P {:2.4f} / R {:2.4f}'.format(tag, prec, recall)
def calculate_confusion(config, predicted_indices, y_indices):
"""Helper method that calculates confusion matrix."""
confusion = np.zeros((config.label_size, config.label_size), dtype=np.int32)
for i in xrange(len(y_indices)):
correct_label = y_indices[i]
guessed_label = predicted_indices[i]
confusion[correct_label, guessed_label] += 1
return confusion
def save_predictions(predictions, filename):
"""Saves predictions to provided file."""
with open(filename, "wb") as f:
for prediction in predictions:
f.write(str(prediction) + "\n")
def test_NER():
"""Test NER model implementation.
You can use this function to test your implementation of the Named Entity
Recognition network. When debugging, set max_epochs in the Config object to 1
so you can rapidly iterate.
"""
config = Config()
with tf.Graph().as_default():
model = NERModel(config)
init = tf.initialize_all_variables()
saver = tf.train.Saver()
with tf.Session() as session:
best_val_loss = float('inf')
best_val_epoch = 0
session.run(init)
for epoch in xrange(config.max_epochs):
print 'Epoch {}'.format(epoch)
start = time.time()
###
train_loss, train_acc = model.run_epoch(session, model.X_train,
model.y_train)
val_loss, predictions = model.predict(session, model.X_dev, model.y_dev)
print 'Training loss: {}'.format(train_loss)
print 'Training acc: {}'.format(train_acc)
print 'Validation loss: {}'.format(val_loss)
if val_loss < best_val_loss:
best_val_loss = val_loss
best_val_epoch = epoch
if not os.path.exists("./weights"):
os.makedirs("./weights")
saver.save(session, './weights/ner.weights')
if epoch - best_val_epoch > config.early_stopping:
break
###
confusion = calculate_confusion(config, predictions, model.y_dev)
print_confusion(confusion, model.num_to_tag)
print 'Total time: {}'.format(time.time() - start)
saver.restore(session, './weights/ner.weights')
print 'Test'
print '=-=-='
print 'Writing predictions to q2_test.predicted'
_, predictions = model.predict(session, model.X_test, model.y_test)
save_predictions(predictions, "q2_test.predicted")
if __name__ == "__main__":
test_NER()
``` |
[
{
"content": "Here is the source code:\n```python\nimport sys\nimport serial\nimport struct\nimport numpy\nimport time\nimport math\nimport random\n\nclass DMXDevice(object):\n DEBUG = False\n def __init__(self, start, length):\n self.start, self.length = start, length\n if start < 1:\n print \"DMX... | [
{
"content": "Here is the source code:\n<|memory_start|>```python\nimport sys\nimport serial\nimport struct\nimport numpy\nimport time\nimport math\nimport random\n\nclass DMXDevice(object):\n DEBUG = False\n def __init__(self, start, length):\n self.start, self.length = start, length\n if start < 1:\n ... | ```python
import sys
import serial
import struct
import numpy
import time
import math
import random
class DMXDevice(object):
DEBUG = False
def __init__(self, start, length):
self.start, self.length = start, length
if start < 1:
print "DMX Channels must start at least at 1!"
self.start = 1
self.blackout()
def set(self, chan, value):
"""set the value of this channel to value
(Remember, that DMX channels in start at 1)"""
if chan >= 1 and chan <= self.length:
self.values[chan-1] = value
else:
if self.DEBUG is not None and self.DEBUG is True:
print "DMX Device debug: Channel "+str(chan)+" not in range!"
def blackout(self):
self.values = [0] * self.length
def pack(self, buf):
"""modify the passed buffer in place"""
for index in range(self.length):
buf[self.start+index] = self.values[index]
def __str__(self):
return "<DMXDevice start=%d, length=%d>" % (self.start, self.length)
class DMXManager(object):
def __init__(self, port, max_channels = 512):
self.MAX_CHANNELS = max_channels
self.UNIVERSE = 1
self.SEND_LABEL = 6
self.s = serial.Serial(port,57600)
self.buf = numpy.zeros((self.MAX_CHANNELS + self.UNIVERSE,), dtype='B')
self.devices = []
def append(self, device):
self.devices.append(device)
def blackout(self):
for device in self.devices:
device.blackout()
self.send()
def send(self):
for device in self.devices:
device.pack(self.buf)
l = len(self.buf)
msg = struct.pack("<BBH "+str(l)+"s B",
0x7e, self.SEND_LABEL, l,
self.buf.tostring(),
0xe7
)
self.s.write(msg)
if __name__=='__main__':
port = sys.argv[1]
manager = DMXManager(port)
light_0 = DMXDevice(start=25, length=6)
light_1 = DMXDevice(start=1, length=6)
manager.append(light_0)
manager.append(light_1)
while True:
intensity = 128*math.sin(time.time())+128
light_0.set(0, int(intensity))
light_1.set(1, int(intensity))
#for light in light_0, light_1:
# for color in range(3):
# light.set(color, random.randintil.com(0, 255))
manager.send()
``` |
[
{
"content": "Here is a code file:\n```python\n''' The `Extractor` hierarchy contains Transformer classes that take a `Stim`\nof any type as input and return extracted feature information (rather than\nanother `Stim` instance).\n'''\n\nfrom .base import Extractor, ExtractorResult, merge_results\nfrom .api impor... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n''' The `Extractor` hierarchy contains Transformer classes that take a `Stim`\nof any type as input and return extracted feature information (rather than\nanother `Stim` instance).\n'''\n\nfrom .base import Extractor, ExtractorResult, merge_results\... | ```python
''' The `Extractor` hierarchy contains Transformer classes that take a `Stim`
of any type as input and return extracted feature information (rather than
another `Stim` instance).
'''
from .base import Extractor, ExtractorResult, merge_results
from .api import (ClarifaiAPIImageExtractor,
ClarifaiAPIVideoExtractor,
GoogleVisionAPIFaceExtractor,
GoogleVisionAPILabelExtractor,
GoogleVisionAPIPropertyExtractor,
GoogleVisionAPISafeSearchExtractor,
GoogleVisionAPIWebEntitiesExtractor,
GoogleVideoIntelligenceAPIExtractor,
GoogleVideoAPILabelDetectionExtractor,
GoogleVideoAPIShotDetectionExtractor,
GoogleVideoAPIExplicitDetectionExtractor,
GoogleLanguageAPIExtractor,
GoogleLanguageAPIEntityExtractor,
GoogleLanguageAPISentimentExtractor,
GoogleLanguageAPISyntaxExtractor,
GoogleLanguageAPITextCategoryExtractor,
GoogleLanguageAPIEntitySentimentExtractor,
MicrosoftAPIFaceExtractor,
MicrosoftAPIFaceEmotionExtractor,
MicrosoftVisionAPIExtractor,
MicrosoftVisionAPITagExtractor,
MicrosoftVisionAPICategoryExtractor,
MicrosoftVisionAPIImageTypeExtractor,
MicrosoftVisionAPIColorExtractor,
MicrosoftVisionAPIAdultExtractor)
from .audio import (LibrosaFeatureExtractor,
STFTAudioExtractor,
MeanAmplitudeExtractor,
SpectralCentroidExtractor,
SpectralBandwidthExtractor,
SpectralContrastExtractor,
SpectralRolloffExtractor,
PolyFeaturesExtractor,
ZeroCrossingRateExtractor,
ChromaSTFTExtractor,
ChromaCQTExtractor,
ChromaCENSExtractor,
MelspectrogramExtractor,
MFCCExtractor,
TonnetzExtractor,
TempogramExtractor,
RMSExtractor,
SpectralFlatnessExtractor,
OnsetDetectExtractor,
OnsetStrengthMultiExtractor,
TempoExtractor,
BeatTrackExtractor,
HarmonicExtractor,
PercussiveExtractor,
AudiosetLabelExtractor)
from .image import (BrightnessExtractor, SaliencyExtractor, SharpnessExtractor,
VibranceExtractor, FaceRecognitionFaceEncodingsExtractor,
FaceRecognitionFaceLandmarksExtractor,
FaceRecognitionFaceLocationsExtractor)
from .misc import MetricExtractor
from .models import TensorFlowKerasApplicationExtractor
from .text import (ComplexTextExtractor, DictionaryExtractor,
PredefinedDictionaryExtractor, LengthExtractor,
NumUniqueWordsExtractor, PartOfSpeechExtractor,
WordEmbeddingExtractor, TextVectorizerExtractor,
VADERSentimentExtractor, SpaCyExtractor,
WordCounterExtractor, BertExtractor,
BertSequenceEncodingExtractor, BertLMExtractor,
BertSentimentExtractor)
from .video import (FarnebackOpticalFlowExtractor)
__all__ = [
'Extractor',
'ExtractorResult',
'ClarifaiAPIImageExtractor',
'ClarifaiAPIVideoExtractor',
'STFTAudioExtractor',
'MeanAmplitudeExtractor',
'LibrosaFeatureExtractor',
'SpectralCentroidExtractor',
'SpectralBandwidthExtractor',
'SpectralContrastExtractor',
'SpectralRolloffExtractor',
'PolyFeaturesExtractor',
'ZeroCrossingRateExtractor',
'ChromaSTFTExtractor',
'ChromaCQTExtractor',
'ChromaCENSExtractor',
'MelspectrogramExtractor',
'MFCCExtractor',
'TonnetzExtractor',
'TempogramExtractor',
'GoogleVisionAPIFaceExtractor',
'GoogleVisionAPILabelExtractor',
'GoogleVisionAPIPropertyExtractor',
'GoogleVisionAPISafeSearchExtractor',
'GoogleVisionAPIWebEntitiesExtractor',
'GoogleVideoIntelligenceAPIExtractor',
'GoogleVideoAPILabelDetectionExtractor',
'GoogleVideoAPIShotDetectionExtractor',
'GoogleVideoAPIExplicitDetectionExtractor',
'GoogleLanguageAPIExtractor',
'GoogleLanguageAPIEntityExtractor',
'GoogleLanguageAPISentimentExtractor',
'GoogleLanguageAPISyntaxExtractor',
'GoogleLanguageAPITextCategoryExtractor',
'GoogleLanguageAPIEntitySentimentExtractor',
'BrightnessExtractor',
'SaliencyExtractor',
'SharpnessExtractor',
'VibranceExtractor',
'FaceRecognitionFaceEncodingsExtractor',
'FaceRecognitionFaceLandmarksExtractor',
'FaceRecognitionFaceLocationsExtractor',
'MicrosoftAPIFaceExtractor',
'MicrosoftAPIFaceEmotionExtractor',
'MicrosoftVisionAPIExtractor',
'MicrosoftVisionAPITagExtractor',
'MicrosoftVisionAPICategoryExtractor',
'MicrosoftVisionAPIImageTypeExtractor',
'MicrosoftVisionAPIColorExtractor',
'MicrosoftVisionAPIAdultExtractor',
'TensorFlowKerasApplicationExtractor',
'ComplexTextExtractor',
'DictionaryExtractor',
'PredefinedDictionaryExtractor',
'LengthExtractor',
'NumUniqueWordsExtractor',
'PartOfSpeechExtractor',
'FarnebackOpticalFlowExtractor',
'WordEmbeddingExtractor',
'TextVectorizerExtractor',
'VADERSentimentExtractor',
'merge_results',
'SpaCyExtractor',
'RMSExtractor',
'SpectralFlatnessExtractor'
'OnsetDetectExtractor',
'OnsetStrengthMultiExtractor',
'TempoExtractor',
'BeatTrackExtractor',
'HarmonicExtractor',
'PercussiveExtractor',
'BertExtractor',
'BertSequenceEncodingExtractor',
'BertLMExtractor',
'BertSentimentExtractor',
'AudiosetLabelExtractor',
'WordCounterExtractor',
'MetricExtractor'
]
``` |
[
{
"content": "Here is a code file:\n```python\n#!/usr/bin/env python\n# ----------------------------------------------------------------------- #\n# Copyright 2008-2010, Gregor von Laszewski #\n# Copyright 2010-2013, Indiana University #\n# #\n# Lice... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n#!/usr/bin/env python\n# ----------------------------------------------------------------------- #\n# Copyright 2008-2010, Gregor von Laszewski #\n# Copyright 2010-2013, Indiana University ... | ```python
#!/usr/bin/env python
# ----------------------------------------------------------------------- #
# Copyright 2008-2010, Gregor von Laszewski #
# Copyright 2010-2013, Indiana University #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.#
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ------------------------------------------------------------------------#
from __future__ import print_function
import setuptools
from setuptools import setup, find_packages
import os
import sys
from cloudmesh_vagrant import __version__
import platform
import re
import io
if sys.version_info < (2, 7, 10):
print(70 * "#")
print("WARNING: upgrade to python 2.7.10 or above"
"Your version is {} not supported.".format(sys.version_info))
print(70 * "#")
command = None
this_platform = platform.system().lower()
if this_platform in ['darwin']:
command = "easy_install readline"
elif this_platform in ['windows']:
command = "pip install pyreadline"
if command is not None:
print("Install readline")
os.system(command)
requirements = [
'cloudmesh_client'
]
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
home = os.path.expanduser("~")
setup(
version=__version__,
name="cloudmesh_vagrant",
description="cloudmesh_vagrant - A real simple interface to virtualbox via vagrant",
long_description=read('README.rst'),
license="Apache License, Version 2.0",
author="Gregor von Laszewski",
author_email="laszewski@gmail.com",
url="https://github.com/cloudmesh/cloudmesh_vagrant",
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2.7",
"Topic :: Scientific/Engineering",
"Topic :: System :: Clustering",
"Topic :: System :: Distributed Computing",
"Topic :: Software Development :: Libraries :: Python Modules",
"Environment :: Console"
],
keywords="cloud cmd commandshell plugins cloudmesh vagrant virtualbox",
packages=find_packages(),
install_requires=requirements,
include_package_data=True,
entry_points={
'console_scripts': [
'cm-vbox = cloudmesh_vagrant.cm_vbox:main',
'cm-authors = cloudmesh_client.common.GitInfo:print_authors',
],
},
)
``` |
[
{
"content": "Here is a code file:\n```python\n# -*- coding: utf-8 -*-\n# @Author: chandan\n# @Date: 2017-07-08 00:32:09\n# @Last Modified by: chandan\n# @Last Modified time: 2017-07-08 11:13:46\n\nfrom data_utils import read_file\nfrom config import DATA_DIR, SCORE_COLUMNS \nimport os\nfrom model import tr... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n# @Author: chandan\n# @Date: 2017-07-08 00:32:09\n# @Last Modified by: chandan\n# @Last Modified time: 2017-07-08 11:13:46\n\nfrom data_utils import read_file\nfrom config import DATA_DIR, SCORE_COLUMNS \nimport os\nfrom... | ```python
# -*- coding: utf-8 -*-
# @Author: chandan
# @Date: 2017-07-08 00:32:09
# @Last Modified by: chandan
# @Last Modified time: 2017-07-08 11:13:46
from data_utils import read_file
from config import DATA_DIR, SCORE_COLUMNS
import os
from model import train_model, test_model
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import os.path as osp
ACC_FILE = 'RAW_ACCELEROMETERS.txt'
GPS_FILE = 'RAW_GPS.txt'
VEHDET_FILE = 'PROC_VEHICLE_DETECTION.txt'
SCORE_FILE = 'SEMANTIC_ONLINE.txt'
def main():
# read acc, gps, veh det for multiple drivers, scenes
X_dfs, Y_dfs = [], []
driver_dir = 'D1'
for drive_dir in os.listdir(osp.join(DATA_DIR, driver_dir)):
drive_path = osp.join(DATA_DIR, driver_dir, drive_dir)
print drive_path
acc = read_file(osp.join(drive_path, ACC_FILE))
gps = read_file(osp.join(drive_path, GPS_FILE))
veh = read_file(osp.join(drive_path, VEHDET_FILE))
score = read_file(osp.join(drive_path, SCORE_FILE))
datasets = [acc, gps, veh, score]
n_rows = min(map(len, datasets))
# sample high frequency data to lowest frequency
for i in range(len(datasets)):
# drop time column
datasets[i].drop(0, 1, inplace=True)
if len(datasets[i]) > n_rows:
step = len(datasets[i]) / n_rows
ndx = xrange(0, n_rows * step, step)
datasets[i] = datasets[i].ix[ndx]
datasets[i] = datasets[i].reset_index(drop=True)
score_df = datasets[-1]
datasets = datasets[:-1]
Y_df = score.ix[:, SCORE_COLUMNS]
# create dataset
X_df = pd.concat(datasets, axis=1, ignore_index=True)
X_df.fillna(0, inplace=True)
print "X:", X_df.shape
print "Y:", score_df.shape
X_dfs.append(X_df)
Y_dfs.append(Y_df)
# preprocess
X_df = pd.concat(X_dfs, ignore_index=True)
X = X_df.values.astype('float32')
Y = pd.concat(Y_dfs, ignore_index=True).values
print "X shape:", X.shape
print "Y shape:", Y.shape
scaler = MinMaxScaler(feature_range=(0, 1))
X = scaler.fit_transform(X)
X_tr, X_ts, Y_tr, Y_ts = train_test_split(X, Y, test_size=0.2)
# train
print "X Train shape:", X_tr.shape
print "Y Train shape:", Y_tr.shape
print "X test shape:", X_ts.shape
print "Y test shape:", Y_ts.shape
seq_len = 16
X_tr_seq = X_to_seq(X, seq_len, 1)
Y_tr = Y_tr[seq_len:]
X_ts_seq = X_to_seq(X_ts, seq_len, 1)
Y_ts = Y_ts[seq_len:]
#train_model(X_tr, Y_tr)
loss = test_model(X_ts_seq, Y_ts)
print loss
def X_to_seq(X, seq_len=16, stride=1):
X_seqs = []
for start_ndx in range(0, len(X) - seq_len, stride):
X_seqs.append(X[start_ndx : start_ndx + seq_len])
return np.array(X_seqs)
if __name__ == '__main__':
main()
``` |
[
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n```python\n\"\"\"Parse a Sycorax script into an annotated multi-author timeline.\"\"\"\n\nfrom datetime import datetime, timedelta\nimport json\nimport random\nimport re\nimport hashlib\nimport os\nimport pytz\n\n# 10M: ~10... | [
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n<|memory_start|>```python\n\"\"\"Parse a Sycorax script into an annotated multi-author timeline.\"\"\"\n\nfrom datetime import datetime, timedelta\nimport json\nimport random\nimport re\nimport hashlib\nimport os\nimport py... | ```python
"""Parse a Sycorax script into an annotated multi-author timeline."""
from datetime import datetime, timedelta
import json
import random
import re
import hashlib
import os
import pytz
# 10M: ~10 minutes later
# 4H: ~4 hours later
# 1D: the next day
# R: Reply to previous
# 10A: 10AM on the current day
# 9P: 9PM on the current day
DEFAULT_DELAY = timedelta(hours=4)
REPLY_TO_CODE = "R"
DELAY_CODE = re.compile("([0-9]+)([MHD])")
DELAY_UNITS = dict(M="minutes", H="hours", D="days")
TIME_OF_DAY_CODE = re.compile("([0-9]{1,2})([AP])")
JSON_TIME_FORMAT = "%d %b %Y %H:%M:%S %Z"
def load_config(directory):
filename = os.path.join(directory, "config.json")
if not os.path.exists(filename):
raise Exception("Could not find config.json file in directory %s" % (
directory
))
data = json.loads(open(filename).read().strip())
data['start_date'] = datetime.strptime(data['start_date'], "%Y/%m/%d")
data['chapter_duration_days'] = timedelta(
days=data['chapter_duration_days'])
return data
def load_stream(directory):
config = load_config(directory)
filename = os.path.join(directory, "input.txt")
if not os.path.exists(filename):
raise Exception("Could not find input.txt file in directory %s" % (
directory
))
progress = load_progress(directory)
try:
progress = load_progress(directory)
except Exception, e:
# Nothing has been posted yet.
progress = None
return Stream(open(filename), config=config, progress=progress)
def load_progress(directory):
config = load_config(directory)
filename = os.path.join(directory, "progress.json")
if not os.path.exists(filename):
raise Exception("Could not find progress.json file in directory %s" % (
directory
))
return Progress(open(filename))
class TimezoneAware(object):
def start_of_day(self, datetime):
return datetime.replace(
hour=0, minute=0, second=0, tzinfo=self.timezone)
class Progress(object):
"""The progress made in posting a stream."""
def __init__(self, input_stream):
self.timeline = [json.loads(line.strip()) for line in input_stream]
self.posts = {}
for post in self.timeline:
self.posts[post['internal_id']] = post
class TweetParser(TimezoneAware):
"""Parses a line of script into a tweet."""
def __init__(self, config, progress=None, fuzz_quotient=0.2,
fuzz_minimum_seconds=120):
self.authors = config['authors']
self.timezone = pytz.timezone(config['timezone'])
for author in self.authors:
author['account'] = author['account'].encode("utf8")
author['css_class'] = author['account'].replace(
'-', '').replace('_', '')
author['color'] = author.get('color', 'white').encode("utf8")
author['code'] = author.get('code', '')
fuzz = float(config.get('fuzz', fuzz_quotient))
self.fuzz_quotient = fuzz
self.fuzz_minimum_seconds = int(config.get('fuzz_minimum_seconds', fuzz_minimum_seconds))
self.start_date=config['start_date']
self.config = config
self.progress = progress
self.default_author = None
self.authors_by_code = {}
for author in self.authors:
code = author.get('code', '')
if code == '':
self.default_author = author
self.authors_by_code[code] = author
def parse(self, line, stream_so_far):
is_command = False
author = self.default_author
reply_to = None
delay = None
hour_of_day = None
if stream_so_far.latest_tweet is None:
# This is the first tweet ever. The base timecode is the
# start date.
base_timecode = self.start_of_day(self.start_date)
elif stream_so_far.current_chapter.total_tweets == 0:
# This is the first tweet of the chapter. The base timecode
# is the chapter start date.
base_timecode = stream_so_far.current_chapter.start_date
else:
# There is no base timecode. The timestamp will be calculated
# based on the previous tweet's timestamp.
base_timecode = None
line = line.strip()
command_and_tweet = line.split(" ", 1)
if len(command_and_tweet) > 1:
command, tweet = command_and_tweet
else:
# Single-word tweet.
return Tweet(line, author, base_timecode, self.timezone,
progress=self.progress)
# The "command" may actually be the first word of the tweet.
# Extract commands from it until there's nothing left.
# If there is something left, it's not a command.
for author_code, possible_author in self.authors_by_code.items():
if author_code != "" and author_code in command:
author = possible_author
command = command.replace(author_code, "", 1)
break
is_reply = False
if REPLY_TO_CODE in command:
reply_to = stream_so_far.latest_tweet
command = command.replace(REPLY_TO_CODE, "", 1)
is_reply = True
match = DELAY_CODE.match(command)
if match is not None:
number, unit = match.groups()
subcommand = "".join(match.groups())
command = command.replace(subcommand, "")
kwargs = { DELAY_UNITS[unit]: int(number) }
delay = timedelta(**kwargs)
match = TIME_OF_DAY_CODE.match(command)
if match is not None:
hour, am = match.groups()
subcommand = "".join(match.groups())
command = command.replace(subcommand, "")
hour = int(hour)
if am == "A" and hour == 12:
hour = 0
if am == "P" and hour != 12:
hour += 12
if hour > 23:
raise ValueError("Bad time of day %s in %s" % (
subcommand, line))
hour_of_day = hour
if command == "":
# The first word has been entirely processed as
# commands. The rest of the line is the actual content.
line = tweet
else:
# The first word was not a command.
author = self.default_author
reply_to = None
delay = None
is_reply = False
if is_reply and stream_so_far.latest_tweet is None:
raise ValueError(
"The first tweet in the script cannot be a reply.")
if delay is None and hour_of_day is None:
if len(stream_so_far.current_day.tweets) == 0 and stream_so_far.current_chapter.total_tweets > 0:
# This is the first tweet of an in-story day, and no
# special date instructions were given, so publish it at
# the start of the next real-world day.
base_timecode = self.start_of_day(base_timecode) + timedelta(
days=1)
elif stream_so_far.current_chapter.total_tweets == 0:
delay = timedelta(minutes=0)
if len(line) > 140:
print '[WARNING] %d characters in "%s"' % (len(line), line)
return Tweet(line, author, base_timecode, self.timezone, delay,
hour_of_day, reply_to, self.progress)
class Chapter:
def __init__(self, name, start_date):
self.name = name
self.days = []
self.start_date = start_date
@property
def in_story_timeline_html(self):
return "\n".join(
["<h2>%s</h2>\n" % self.name] +
#["<p>%s days, %s tweets</p>\n" % (
# len(self.days), self.total_tweets)] +
[day.in_story_timeline_html for day in self.days])
@property
def real_world_timeline_html(self):
chapter_start_date = self.start_date.strftime(Tweet.REAL_WORLD_TIMELINE_DATE_FORMAT)
if chapter_start_date != self.real_days[0].date:
print '[WARNING] Chapter "%s" starts on %s, but its first tweet happens on %s' % (
self.name, chapter_start_date, self.real_days[0].date)
return "\n".join(
["<h2>%s</h2>\n" % self.name] +
[day.real_world_timeline_html for day in self.real_days])
@property
def total_tweets(self):
return sum(len(x.tweets) for x in self.days)
@property
def all_tweets(self):
for d in self.days:
for t in d.tweets:
yield t
@property
def real_days(self):
"""A list of Day objects corresponding to real-world days for this chapter."""
days = []
current_date = None
current_day = None
for story_day in self.days:
for tweet in story_day.tweets:
if tweet.timestamp_date_str != current_date:
current_date = tweet.timestamp_date_str
current_day = Day(current_date)
days.append(current_day)
current_day.tweets.append(tweet)
return days
class Day:
"""A day's worth of tweets--either an in-story day or a real-world day."""
def __init__(self, date):
self.date = date
self.tweets = []
@property
def in_story_timeline_html(self):
if len(self.tweets) == 0:
return ""
return "\n".join(
["<h3>%s</h3>" % self.date, "<ul>"] +
[tweet.in_story_timeline_html for tweet in self.tweets] + ["</ul>"])
@property
def real_world_timeline_html(self):
if len(self.tweets) == 0:
return ""
return "\n".join(
["<h3>%s</h3>" % self.date, "<ul>"] +
[tweet.real_world_timeline_html for tweet in self.tweets] + ["</ul>"])
class Tweet(TimezoneAware):
REAL_WORLD_TIMELINE_TIME_FORMAT = "%H:%M"
REAL_WORLD_TIMELINE_DATE_FORMAT = "%a %d %b"
def __init__(self, text, author, base_timecode, timezone, delay=None,
hour_of_day=None, in_reply_to=None, progress=None):
self.text = text
self.author = author
self.timezone = timezone
self.in_reply_to = in_reply_to
self.digest = hashlib.md5(self.text).hexdigest()
self.delay = delay
self.hour_of_day = hour_of_day
if self.delay is None and self.hour_of_day is None:
self.delay = DEFAULT_DELAY
self.base_timecode = base_timecode
# In general, timestamps are calculated in a second pass.
self.timestamp = None
# However, if this tweet has already been posted, we know its
# timestamp already.
if progress is not None:
as_posted = progress.posts.get(self.digest)
if as_posted is not None:
self.timestamp = datetime.strptime(
as_posted['planned_timestamp'], JSON_TIME_FORMAT).replace(
tzinfo=pytz.timezone("UTC"))
if (self.hour_of_day is not None and self.delay is not None
and self.delay < timedelta(days=1)):
raise ValueError(
'"%s" defines both a delay and an hour of day, but the delay '
'is less than one day.' % text)
def calculate_timestamp(self, fuzz_quotient, fuzz_minimum_seconds,
previous_tweet):
timestamp = self.base_timecode or previous_tweet.timestamp
if self.timestamp is not None:
# This tweet already has a timestamp, possibly because
# it's already been posted. Leave it alone.
return self.timestamp
# If the delay after the last tweet is one day or more, apply
# it before setting the time of day.
one_day = timedelta(days=1)
if self.delay is not None and self.delay >= one_day:
timestamp += self.delay
timestamp = self.start_of_day(timestamp)
# If a time of day is given, set it now.
if self.hour_of_day is not None:
if timestamp.hour > self.hour_of_day:
# Bump to the next real-world day.
timestamp = timestamp + timedelta(days=1)
timestamp = timestamp.replace(
hour=self.hour_of_day, minute=0, second=0)
# If the delay is less than one day, apply it now.
if self.delay is not None and self.delay < one_day:
timestamp += self.delay
# Now we have a precise timestamp. But posting one tweet
# exactly 30 minutes after another one will look fake. We need
# to fudge the timestamp a little.
if self.hour_of_day is not None:
# We know which hour the tweet should go out. Pick
# sometime in the first 45 minutes of that hour, to
# minimize the chances of collisions with future tweets.
actual_delta = timedelta(seconds=random.randint(0, 45*60))
timestamp = timestamp + actual_delta
elif self.delay is not None:
# We know approximately how long after the previous tweet
# this tweet should go out. Pick sometime
delay_seconds = self.delay.seconds
maximum_variation = max(
delay_seconds * fuzz_quotient, fuzz_minimum_seconds)
actual_variation = random.randint(-maximum_variation, maximum_variation)
actual_delta = timedelta(seconds=actual_variation)
if random.randint(0,1) == 1:
timestamp = timestamp + actual_delta
else:
timestamp = timestamp - actual_delta
else:
raise ValueError(
'Tweet "%s" has neither hour-of-day nor delay since previous '
'tweet. Cannot calculate timestamp.' % self.text)
return timestamp
@property
def json(self):
if self.in_reply_to is None:
in_reply_to = None
else:
in_reply_to = self.in_reply_to.digest
d = dict(internal_id=self.digest, text=self.text,
author=self.author['account'],
in_reply_to=in_reply_to, timestamp=self.timestamp_for_json)
return json.dumps(d)
def li(self, text):
a = []
if self.in_reply_to is not None:
a.append("<ul>")
a.append('<li class="%s">%s</li>' % (self.author['css_class'], text))
if self.in_reply_to is not None:
a.append("</ul>")
return "\n".join(a)
@property
def in_story_timeline_html(self):
return self.li(self.text)
@property
def timestamp_str(self):
return self.timestamp.strftime(self.REAL_WORLD_TIMELINE_TIME_FORMAT)
@property
def timestamp_for_json(self):
return self.timestamp.astimezone(pytz.timezone("UTC")).strftime(
JSON_TIME_FORMAT)
@property
def timestamp_date_str(self):
return self.timestamp.strftime(self.REAL_WORLD_TIMELINE_DATE_FORMAT)
@property
def real_world_timeline_html(self):
text = self.timestamp_str + " " + self.text
return self.li(text)
class Stream:
def __init__(self, lines, tweet_parser=None, config=None, progress=None):
if tweet_parser is None:
if config is None:
raise ValueError(
"You tried to create a stream without providing a "
"tweet parser or a configuration for one.")
tweet_parser = TweetParser(config=config, progress=progress)
self.current_chapter = None
self.current_day = None
self.chapters = []
self.tweet_parser = tweet_parser
self.latest_tweet = None
for line in lines:
line = line.strip()
if len(line) == 0:
continue
if line[:3] == "== ":
self.end_chapter()
self.begin_chapter(line[3:])
elif line[:3] == "-- ":
self.end_day()
self.begin_day(line[3:])
else:
self.add_tweet(line)
self.end_chapter()
self.add_fuzz()
self.chapter_start_sanity_check()
def html_page(self, real_time=False):
START = '''<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
'''
l = [START]
l.append('<style type="text/css">')
for author in self.tweet_parser.authors:
l.append(".%s { background-color: %s }" % (
author['css_class'], author['color']))
l.append('</style></head><body>')
l.append("<p>Author guide:</p>")
l.append("<ul>")
for author in self.tweet_parser.authors:
l.append('<li class="%s">%s</a>' % (author['css_class'], author['account']))
l.append("</ul>")
if real_time:
l.append(self.real_world_timeline_html)
else:
l.append(self.in_story_timeline_html)
l.append("</body></html")
return "\n".join(l)
@property
def in_story_timeline_html(self):
return "\n\n".join(chapter.in_story_timeline_html for chapter in self.chapters)
@property
def real_world_timeline_html(self):
return "\n\n".join(chapter.real_world_timeline_html for chapter in self.chapters)
@property
def tweets(self):
for chapter in self.chapters:
for day in chapter.days:
for tweet in day.tweets:
yield tweet
def add_tweet(self, line):
if self.current_chapter is None:
self.begin_chapter("")
if self.current_day is None:
self.begin_day("")
line = line.strip()
tweet = self.tweet_parser.parse(line, self)
self.current_day.tweets.append(tweet)
self.latest_tweet = tweet
return tweet
def end_chapter(self):
if self.current_chapter is None:
# No current chapter.
return
self.end_day()
self.current_chapter = None
def begin_chapter(self, chapter_name):
if len(self.chapters) == 0:
start_date = self.tweet_parser.start_of_day(
self.tweet_parser.config['start_date'])
else:
previous_chapter = self.chapters[-1]
duration = self.tweet_parser.config['chapter_duration_days']
start_date = previous_chapter.start_date + duration
self.current_chapter = Chapter(chapter_name, start_date)
self.chapters.append(self.current_chapter)
def end_day(self):
if self.current_day is None:
return
self.current_day = None
def begin_day(self, date):
self.current_day = Day(date)
self.current_chapter.days.append(self.current_day)
def add_fuzz(self):
previous_tweet = None
for tweet in self.tweets:
progress = self.tweet_parser.progress
if (progress is not None
and progress.posts.get(tweet.digest) is not None):
# This tweet has already been posted. Don't mess with it.
previous_tweet = tweet
continue
success = False
for i in range(0, 10):
tweet.timestamp = tweet.calculate_timestamp(
self.tweet_parser.fuzz_quotient,
self.tweet_parser.fuzz_minimum_seconds,
previous_tweet)
if (previous_tweet is None
or previous_tweet.timestamp < tweet.timestamp):
# This timestamp is fine. Stop trying to calculate it.
success = True
break
# If we didn't break, the timestamp we calculated came
# before previous tweet's timestamp, which is a
# problem. Restart the loop and calculate a different
# timestamp.
if not success:
# We tried to calculate the timestamp ten times with
# no success. Raise an error.
raise ValueError('Calculated timestamp for "%s" is %s, which comes before calculated timestamp for the previous tweet "%s" (%s). Trying again may help.' % (
tweet.text, tweet.timestamp_str, previous_tweet.text, previous_tweet.timestamp_str))
previous_tweet = tweet
def chapter_start_sanity_check(self):
previous_chapter = self.chapters[0]
for chapter in self.chapters[1:]:
tweets = list(previous_chapter.all_tweets)
if len(tweets) > 0:
previous_chapter_last_tweet = tweets[-1]
if previous_chapter_last_tweet.timestamp > chapter.start_date:
print '[WARNING] Last tweet in chapter "%s" overlaps the start of chapter "%s"' % (
previous_chapter.name, chapter.name)
previous_chapter = chapter
@property
def json(self):
return "\n".join(tweet.json for tweet in self.tweets)
``` |
[
{
"content": "Return the code exactly, with no changes:\n```python\nfrom django.conf import settings\nfrom django.conf.urls import url, include\nfrom django.conf.urls.i18n import i18n_patterns\nfrom django.http import HttpResponseBadRequest, HttpRequest, HttpResponse\nfrom django.views.generic import TemplateVi... | [
{
"content": "Return the code exactly, with no changes:\n<|memory_start|>```python\nfrom django.conf import settings\nfrom django.conf.urls import url, include\nfrom django.conf.urls.i18n import i18n_patterns\nfrom django.http import HttpResponseBadRequest, HttpRequest, HttpResponse\nfrom django.views.generic i... | ```python
from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.i18n import i18n_patterns
from django.http import HttpResponseBadRequest, HttpRequest, HttpResponse
from django.views.generic import TemplateView, RedirectView
from django.utils.module_loading import import_string
import os
import zerver.forms
from zproject import dev_urls
from zproject.legacy_urls import legacy_urls
from zerver.views.documentation import IntegrationView, MarkdownDirectoryView
from zerver.lib.integrations import WEBHOOK_INTEGRATIONS
from django.contrib.auth.views import (login, password_reset_done,
password_reset_confirm, password_reset_complete)
import zerver.tornado.views
import zerver.views
import zerver.views.auth
import zerver.views.archive
import zerver.views.camo
import zerver.views.compatibility
import zerver.views.home
import zerver.views.email_mirror
import zerver.views.registration
import zerver.views.zephyr
import zerver.views.users
import zerver.views.unsubscribe
import zerver.views.documentation
import zerver.views.user_groups
import zerver.views.user_settings
import zerver.views.muting
import zerver.views.streams
import zerver.views.realm
import zerver.views.digest
import zerver.views.messages
from zerver.context_processors import latest_info_context
import zerver.views.public_export
from zerver.lib.rest import rest_dispatch
if settings.TWO_FACTOR_AUTHENTICATION_ENABLED:
from two_factor.urls import urlpatterns as tf_urls
from two_factor.gateways.twilio.urls import urlpatterns as tf_twilio_urls
# NB: There are several other pieces of code which route requests by URL:
#
# - legacy_urls.py contains API endpoint written before the redesign
# and should not be added to.
#
# - runtornado.py has its own URL list for Tornado views. See the
# invocation of web.Application in that file.
#
# - The Nginx config knows which URLs to route to Django or Tornado.
#
# - Likewise for the local dev server in tools/run-dev.py.
# These endpoints constitute the currently designed API (V1), which uses:
# * REST verbs
# * Basic auth (username:password is email:apiKey)
# * Take and return json-formatted data
#
# If you're adding a new endpoint to the code that requires authentication,
# please add it here.
# See rest_dispatch in zerver.lib.rest for an explanation of auth methods used
#
# All of these paths are accessed by either a /json or /api/v1 prefix;
# e.g. `PATCH /json/realm` or `PATCH /api/v1/realm`.
v1_api_and_json_patterns = [
# realm-level calls
url(r'^realm$', rest_dispatch,
{'PATCH': 'zerver.views.realm.update_realm'}),
# Returns a 204, used by desktop app to verify connectivity status
url(r'generate_204$', zerver.views.registration.generate_204,
name='zerver.views.registration.generate_204'),
url(r'realm/subdomain/(?P<subdomain>\S+)$', zerver.views.realm.check_subdomain_available,
name='zerver.views.realm.check_subdomain_available'),
# realm/domains -> zerver.views.realm_domains
url(r'^realm/domains$', rest_dispatch,
{'GET': 'zerver.views.realm_domains.list_realm_domains',
'POST': 'zerver.views.realm_domains.create_realm_domain'}),
url(r'^realm/domains/(?P<domain>\S+)$', rest_dispatch,
{'PATCH': 'zerver.views.realm_domains.patch_realm_domain',
'DELETE': 'zerver.views.realm_domains.delete_realm_domain'}),
# realm/emoji -> zerver.views.realm_emoji
url(r'^realm/emoji$', rest_dispatch,
{'GET': 'zerver.views.realm_emoji.list_emoji'}),
url(r'^realm/emoji/(?P<emoji_name>.*)$', rest_dispatch,
{'POST': 'zerver.views.realm_emoji.upload_emoji',
'DELETE': ('zerver.views.realm_emoji.delete_emoji', {"intentionally_undocumented"})}),
# this endpoint throws a status code 400 JsonableError when it should be a 404.
# realm/icon -> zerver.views.realm_icon
url(r'^realm/icon$', rest_dispatch,
{'POST': 'zerver.views.realm_icon.upload_icon',
'DELETE': 'zerver.views.realm_icon.delete_icon_backend',
'GET': 'zerver.views.realm_icon.get_icon_backend'}),
# realm/logo -> zerver.views.realm_logo
url(r'^realm/logo$', rest_dispatch,
{'POST': 'zerver.views.realm_logo.upload_logo',
'DELETE': 'zerver.views.realm_logo.delete_logo_backend',
'GET': 'zerver.views.realm_logo.get_logo_backend'}),
# realm/filters -> zerver.views.realm_filters
url(r'^realm/filters$', rest_dispatch,
{'GET': 'zerver.views.realm_filters.list_filters',
'POST': 'zerver.views.realm_filters.create_filter'}),
url(r'^realm/filters/(?P<filter_id>\d+)$', rest_dispatch,
{'DELETE': 'zerver.views.realm_filters.delete_filter'}),
# realm/profile_fields -> zerver.views.custom_profile_fields
url(r'^realm/profile_fields$', rest_dispatch,
{'GET': 'zerver.views.custom_profile_fields.list_realm_custom_profile_fields',
'PATCH': 'zerver.views.custom_profile_fields.reorder_realm_custom_profile_fields',
'POST': 'zerver.views.custom_profile_fields.create_realm_custom_profile_field'}),
url(r'^realm/profile_fields/(?P<field_id>\d+)$', rest_dispatch,
{'PATCH': 'zerver.views.custom_profile_fields.update_realm_custom_profile_field',
'DELETE': 'zerver.views.custom_profile_fields.delete_realm_custom_profile_field'}),
# realm/deactivate -> zerver.views.deactivate_realm
url(r'^realm/deactivate$', rest_dispatch,
{'POST': 'zerver.views.realm.deactivate_realm'}),
url(r'^realm/presence$', rest_dispatch,
{'GET': 'zerver.views.presence.get_statuses_for_realm'}),
# users -> zerver.views.users
#
# Since some of these endpoints do something different if used on
# yourself with `/me` as the email, we need to make sure that we
# don't accidentally trigger these. The cleanest way to do that
# is to add a regular expression assertion that it isn't `/me/`
# (or ends with `/me`, in the case of hitting the root URL).
url(r'^users$', rest_dispatch,
{'GET': 'zerver.views.users.get_members_backend',
'POST': 'zerver.views.users.create_user_backend'}),
url(r'^users/(?P<user_id>[0-9]+)/reactivate$', rest_dispatch,
{'POST': 'zerver.views.users.reactivate_user_backend'}),
url(r'^users/(?!me/)(?P<email>[^/]*)/presence$', rest_dispatch,
{'GET': 'zerver.views.presence.get_presence_backend'}),
url(r'^users/(?P<user_id>[0-9]+)$', rest_dispatch,
{'PATCH': 'zerver.views.users.update_user_backend',
'DELETE': 'zerver.views.users.deactivate_user_backend'}),
url(r'^bots$', rest_dispatch,
{'GET': 'zerver.views.users.get_bots_backend',
'POST': 'zerver.views.users.add_bot_backend'}),
url(r'^bots/(?P<bot_id>[0-9]+)/api_key/regenerate$', rest_dispatch,
{'POST': 'zerver.views.users.regenerate_bot_api_key'}),
url(r'^bots/(?P<bot_id>[0-9]+)$', rest_dispatch,
{'PATCH': 'zerver.views.users.patch_bot_backend',
'DELETE': 'zerver.views.users.deactivate_bot_backend'}),
# invites -> zerver.views.invite
url(r'^invites$', rest_dispatch,
{'GET': 'zerver.views.invite.get_user_invites',
'POST': 'zerver.views.invite.invite_users_backend'}),
url(r'^invites/(?P<prereg_id>[0-9]+)$', rest_dispatch,
{'DELETE': 'zerver.views.invite.revoke_user_invite'}),
url(r'^invites/(?P<prereg_id>[0-9]+)/resend$', rest_dispatch,
{'POST': 'zerver.views.invite.resend_user_invite_email'}),
# invites/multiuse -> zerver.views.invite
url(r'^invites/multiuse$', rest_dispatch,
{'POST': 'zerver.views.invite.generate_multiuse_invite_backend'}),
# invites/multiuse -> zerver.views.invite
url(r'^invites/multiuse/(?P<invite_id>[0-9]+)$', rest_dispatch,
{'DELETE': 'zerver.views.invite.revoke_multiuse_invite'}),
# mark messages as read (in bulk)
url(r'^mark_all_as_read$', rest_dispatch,
{'POST': 'zerver.views.messages.mark_all_as_read'}),
url(r'^mark_stream_as_read$', rest_dispatch,
{'POST': 'zerver.views.messages.mark_stream_as_read'}),
url(r'^mark_topic_as_read$', rest_dispatch,
{'POST': 'zerver.views.messages.mark_topic_as_read'}),
url(r'^zcommand$', rest_dispatch,
{'POST': 'zerver.views.messages.zcommand_backend'}),
# messages -> zerver.views.messages
# GET returns messages, possibly filtered, POST sends a message
url(r'^messages$', rest_dispatch,
{'GET': 'zerver.views.messages.get_messages_backend',
'POST': ('zerver.views.messages.send_message_backend',
{'allow_incoming_webhooks'})}),
url(r'^messages/(?P<message_id>[0-9]+)$', rest_dispatch,
{'GET': 'zerver.views.messages.json_fetch_raw_message',
'PATCH': 'zerver.views.messages.update_message_backend',
'DELETE': 'zerver.views.messages.delete_message_backend'}),
url(r'^messages/render$', rest_dispatch,
{'POST': 'zerver.views.messages.render_message_backend'}),
url(r'^messages/flags$', rest_dispatch,
{'POST': 'zerver.views.messages.update_message_flags'}),
url(r'^messages/(?P<message_id>\d+)/history$', rest_dispatch,
{'GET': 'zerver.views.messages.get_message_edit_history'}),
url(r'^messages/matches_narrow$', rest_dispatch,
{'GET': 'zerver.views.messages.messages_in_narrow_backend'}),
url(r'^users/me/subscriptions/properties$', rest_dispatch,
{'POST': 'zerver.views.streams.update_subscription_properties_backend'}),
url(r'^users/me/subscriptions/(?P<stream_id>\d+)$', rest_dispatch,
{'PATCH': 'zerver.views.streams.update_subscriptions_property'}),
url(r'^submessage$',
rest_dispatch,
{'POST': 'zerver.views.submessage.process_submessage'}),
# New endpoint for handling reactions.
url(r'^messages/(?P<message_id>[0-9]+)/reactions$',
rest_dispatch,
{'POST': 'zerver.views.reactions.add_reaction',
'DELETE': 'zerver.views.reactions.remove_reaction'}),
# reactions -> zerver.view.reactions
# PUT adds a reaction to a message
# DELETE removes a reaction from a message
url(r'^messages/(?P<message_id>[0-9]+)/emoji_reactions/(?P<emoji_name>.*)$',
rest_dispatch,
{'PUT': 'zerver.views.reactions.add_reaction_legacy',
'DELETE': 'zerver.views.reactions.remove_reaction_legacy'}),
# attachments -> zerver.views.attachments
url(r'^attachments$', rest_dispatch,
{'GET': 'zerver.views.attachments.list_by_user'}),
url(r'^attachments/(?P<attachment_id>[0-9]+)$', rest_dispatch,
{'DELETE': 'zerver.views.attachments.remove'}),
# typing -> zerver.views.typing
# POST sends a typing notification event to recipients
url(r'^typing$', rest_dispatch,
{'POST': 'zerver.views.typing.send_notification_backend'}),
# user_uploads -> zerver.views.upload
url(r'^user_uploads$', rest_dispatch,
{'POST': 'zerver.views.upload.upload_file_backend'}),
# bot_storage -> zerver.views.storage
url(r'^bot_storage$', rest_dispatch,
{'PUT': 'zerver.views.storage.update_storage',
'GET': 'zerver.views.storage.get_storage',
'DELETE': 'zerver.views.storage.remove_storage'}),
# users/me -> zerver.views
url(r'^users/me$', rest_dispatch,
{'GET': 'zerver.views.users.get_profile_backend',
'DELETE': 'zerver.views.users.deactivate_user_own_backend'}),
# PUT is currently used by mobile apps, we intend to remove the PUT version
# as soon as possible. POST exists to correct the erroneous use of PUT.
url(r'^users/me/pointer$', rest_dispatch,
{'GET': 'zerver.views.pointer.get_pointer_backend',
'PUT': 'zerver.views.pointer.update_pointer_backend',
'POST': 'zerver.views.pointer.update_pointer_backend'}),
url(r'^users/me/presence$', rest_dispatch,
{'POST': 'zerver.views.presence.update_active_status_backend'}),
url(r'^users/me/status$', rest_dispatch,
{'POST': 'zerver.views.presence.update_user_status_backend'}),
# Endpoint used by mobile devices to register their push
# notification credentials
url(r'^users/me/apns_device_token$', rest_dispatch,
{'POST': 'zerver.views.push_notifications.add_apns_device_token',
'DELETE': 'zerver.views.push_notifications.remove_apns_device_token'}),
url(r'^users/me/android_gcm_reg_id$', rest_dispatch,
{'POST': 'zerver.views.push_notifications.add_android_reg_id',
'DELETE': 'zerver.views.push_notifications.remove_android_reg_id'}),
# user_groups -> zerver.views.user_groups
url(r'^user_groups$', rest_dispatch,
{'GET': 'zerver.views.user_groups.get_user_group'}),
url(r'^user_groups/create$', rest_dispatch,
{'POST': 'zerver.views.user_groups.add_user_group'}),
url(r'^user_groups/(?P<user_group_id>\d+)$', rest_dispatch,
{'PATCH': 'zerver.views.user_groups.edit_user_group',
'DELETE': 'zerver.views.user_groups.delete_user_group'}),
url(r'^user_groups/(?P<user_group_id>\d+)/members$', rest_dispatch,
{'POST': 'zerver.views.user_groups.update_user_group_backend'}),
# users/me -> zerver.views.user_settings
url(r'^users/me/api_key/regenerate$', rest_dispatch,
{'POST': 'zerver.views.user_settings.regenerate_api_key'}),
url(r'^users/me/enter-sends$', rest_dispatch,
{'POST': ('zerver.views.user_settings.change_enter_sends',
# This endpoint should be folded into user settings
{'intentionally_undocumented'})}),
url(r'^users/me/avatar$', rest_dispatch,
{'POST': 'zerver.views.user_settings.set_avatar_backend',
'DELETE': 'zerver.views.user_settings.delete_avatar_backend'}),
# users/me/hotspots -> zerver.views.hotspots
url(r'^users/me/hotspots$', rest_dispatch,
{'POST': ('zerver.views.hotspots.mark_hotspot_as_read',
# This endpoint is low priority for documentation as
# it is part of the webapp-specific tutorial.
{'intentionally_undocumented'})}),
# users/me/tutorial_status -> zerver.views.tutorial
url(r'^users/me/tutorial_status$', rest_dispatch,
{'POST': ('zerver.views.tutorial.set_tutorial_status',
# This is a relic of an old Zulip tutorial model and
# should be deleted.
{'intentionally_undocumented'})}),
# settings -> zerver.views.user_settings
url(r'^settings$', rest_dispatch,
{'PATCH': 'zerver.views.user_settings.json_change_settings'}),
url(r'^settings/display$', rest_dispatch,
{'PATCH': 'zerver.views.user_settings.update_display_settings_backend'}),
url(r'^settings/notifications$', rest_dispatch,
{'PATCH': 'zerver.views.user_settings.json_change_notify_settings'}),
# users/me/alert_words -> zerver.views.alert_words
url(r'^users/me/alert_words$', rest_dispatch,
{'GET': 'zerver.views.alert_words.list_alert_words',
'POST': 'zerver.views.alert_words.add_alert_words',
'DELETE': 'zerver.views.alert_words.remove_alert_words'}),
# users/me/custom_profile_data -> zerver.views.custom_profile_data
url(r'^users/me/profile_data$', rest_dispatch,
{'PATCH': 'zerver.views.custom_profile_fields.update_user_custom_profile_data',
'DELETE': 'zerver.views.custom_profile_fields.remove_user_custom_profile_data'}),
url(r'^users/me/(?P<stream_id>\d+)/topics$', rest_dispatch,
{'GET': 'zerver.views.streams.get_topics_backend'}),
# streams -> zerver.views.streams
# (this API is only used externally)
url(r'^streams$', rest_dispatch,
{'GET': 'zerver.views.streams.get_streams_backend'}),
# GET returns `stream_id`, stream name should be encoded in the url query (in `stream` param)
url(r'^get_stream_id$', rest_dispatch,
{'GET': 'zerver.views.streams.json_get_stream_id'}),
# GET returns "stream info" (undefined currently?), HEAD returns whether stream exists (200 or 404)
url(r'^streams/(?P<stream_id>\d+)/members$', rest_dispatch,
{'GET': 'zerver.views.streams.get_subscribers_backend'}),
url(r'^streams/(?P<stream_id>\d+)$', rest_dispatch,
{'PATCH': 'zerver.views.streams.update_stream_backend',
'DELETE': 'zerver.views.streams.deactivate_stream_backend'}),
# Delete topic in stream
url(r'^streams/(?P<stream_id>\d+)/delete_topic$', rest_dispatch,
{'POST': 'zerver.views.streams.delete_in_topic'}),
url(r'^default_streams$', rest_dispatch,
{'POST': 'zerver.views.streams.add_default_stream',
'DELETE': 'zerver.views.streams.remove_default_stream'}),
url(r'^default_stream_groups/create$', rest_dispatch,
{'POST': 'zerver.views.streams.create_default_stream_group'}),
url(r'^default_stream_groups/(?P<group_id>\d+)$', rest_dispatch,
{'PATCH': 'zerver.views.streams.update_default_stream_group_info',
'DELETE': 'zerver.views.streams.remove_default_stream_group'}),
url(r'^default_stream_groups/(?P<group_id>\d+)/streams$', rest_dispatch,
{'PATCH': 'zerver.views.streams.update_default_stream_group_streams'}),
# GET lists your streams, POST bulk adds, PATCH bulk modifies/removes
url(r'^users/me/subscriptions$', rest_dispatch,
{'GET': 'zerver.views.streams.list_subscriptions_backend',
'POST': 'zerver.views.streams.add_subscriptions_backend',
'PATCH': 'zerver.views.streams.update_subscriptions_backend',
'DELETE': 'zerver.views.streams.remove_subscriptions_backend'}),
# muting -> zerver.views.muting
url(r'^users/me/subscriptions/muted_topics$', rest_dispatch,
{'PATCH': 'zerver.views.muting.update_muted_topic'}),
# used to register for an event queue in tornado
url(r'^register$', rest_dispatch,
{'POST': 'zerver.views.events_register.events_register_backend'}),
# events -> zerver.tornado.views
url(r'^events$', rest_dispatch,
{'GET': 'zerver.tornado.views.get_events',
'DELETE': 'zerver.tornado.views.cleanup_event_queue'}),
# report -> zerver.views.report
#
# These endpoints are for internal error/performance reporting
# from the browser to the webapp, and we don't expect to ever
# include in our API documentation.
url(r'^report/error$', rest_dispatch,
# Logged-out browsers can hit this endpoint, for portico page JS exceptions.
{'POST': ('zerver.views.report.report_error', {'allow_anonymous_user_web',
'intentionally_undocumented'})}),
url(r'^report/send_times$', rest_dispatch,
{'POST': ('zerver.views.report.report_send_times', {'intentionally_undocumented'})}),
url(r'^report/narrow_times$', rest_dispatch,
{'POST': ('zerver.views.report.report_narrow_times', {'intentionally_undocumented'})}),
url(r'^report/unnarrow_times$', rest_dispatch,
{'POST': ('zerver.views.report.report_unnarrow_times', {'intentionally_undocumented'})}),
# Used to generate a Zoom video call URL
url(r'^calls/create$', rest_dispatch,
{'GET': 'zerver.views.video_calls.get_zoom_url'}),
# Used for public-only realm exporting
url(r'^export/realm$', rest_dispatch,
{'POST': 'zerver.views.public_export.public_only_realm_export'}),
]
# These views serve pages (HTML). As such, their internationalization
# must depend on the url.
#
# If you're adding a new page to the website (as opposed to a new
# endpoint for use by code), you should add it here.
i18n_urls = [
url(r'^$', zerver.views.home.home, name='zerver.views.home.home'),
# We have a desktop-specific landing page in case we change our /
# to not log in in the future. We don't want to require a new
# desktop app build for everyone in that case
url(r'^desktop_home/$', zerver.views.home.desktop_home,
name='zerver.views.home.desktop_home'),
url(r'^accounts/login/sso/$', zerver.views.auth.remote_user_sso, name='login-sso'),
url(r'^accounts/login/jwt/$', zerver.views.auth.remote_user_jwt, name='login-jwt'),
url(r'^accounts/login/social/([\w,-]+)$', zerver.views.auth.start_social_login,
name='login-social'),
url(r'^accounts/register/social/([\w,-]+)$',
zerver.views.auth.start_social_signup,
name='signup-social'),
url(r'^accounts/login/google/$', zerver.views.auth.start_google_oauth2,
name='zerver.views.auth.start_google_oauth2'),
url(r'^accounts/login/google/send/$',
zerver.views.auth.send_oauth_request_to_google,
name='zerver.views.auth.send_oauth_request_to_google'),
url(r'^accounts/login/google/done/$', zerver.views.auth.finish_google_oauth2,
name='zerver.views.auth.finish_google_oauth2'),
url(r'^accounts/login/subdomain/([^/]+)$', zerver.views.auth.log_into_subdomain,
name='zerver.views.auth.log_into_subdomain'),
url(r'^accounts/login/local/$', zerver.views.auth.dev_direct_login,
name='zerver.views.auth.dev_direct_login'),
# We have two entries for accounts/login; only the first one is
# used for URL resolution. The second here is to allow
# reverse("django.contrib.auth.views.login") in templates to
# return `/accounts/login/`.
url(r'^accounts/login/', zerver.views.auth.login_page,
{'template_name': 'zerver/login.html'}, name='zerver.views.auth.login_page'),
url(r'^accounts/login/', login, {'template_name': 'zerver/login.html'},
name='django.contrib.auth.views.login'),
url(r'^accounts/logout/', zerver.views.auth.logout_then_login,
name='zerver.views.auth.logout_then_login'),
url(r'^accounts/webathena_kerberos_login/',
zerver.views.zephyr.webathena_kerberos_login,
name='zerver.views.zephyr.webathena_kerberos_login'),
url(r'^accounts/password/reset/$', zerver.views.auth.password_reset,
name='zerver.views.auth.password_reset'),
url(r'^accounts/password/reset/done/$', password_reset_done,
{'template_name': 'zerver/reset_emailed.html'}),
url(r'^accounts/password/reset/(?P<uidb64>[0-9A-Za-z]+)/(?P<token>.+)/$',
password_reset_confirm,
{'post_reset_redirect': '/accounts/password/done/',
'template_name': 'zerver/reset_confirm.html',
'set_password_form': zerver.forms.LoggingSetPasswordForm},
name='django.contrib.auth.views.password_reset_confirm'),
url(r'^accounts/password/done/$', password_reset_complete,
{'template_name': 'zerver/reset_done.html'}),
url(r'^accounts/deactivated/',
zerver.views.auth.show_deactivation_notice,
name='zerver.views.auth.show_deactivation_notice'),
# Displays digest email content in browser.
url(r'^digest/$', zerver.views.digest.digest_page),
# Registration views, require a confirmation ID.
url(r'^accounts/home/', zerver.views.registration.accounts_home,
name='zerver.views.registration.accounts_home'),
url(r'^accounts/send_confirm/(?P<email>[\S]+)?',
TemplateView.as_view(template_name='zerver/accounts_send_confirm.html'),
name='signup_send_confirm'),
url(r'^accounts/new/send_confirm/(?P<email>[\S]+)?',
TemplateView.as_view(template_name='zerver/accounts_send_confirm.html'),
{'realm_creation': True}, name='new_realm_send_confirm'),
url(r'^accounts/register/', zerver.views.registration.accounts_register,
name='zerver.views.registration.accounts_register'),
url(r'^accounts/do_confirm/(?P<confirmation_key>[\w]+)',
zerver.views.registration.check_prereg_key_and_redirect,
name='check_prereg_key_and_redirect'),
url(r'^accounts/confirm_new_email/(?P<confirmation_key>[\w]+)',
zerver.views.user_settings.confirm_email_change,
name='zerver.views.user_settings.confirm_email_change'),
# Email unsubscription endpoint. Allows for unsubscribing from various types of emails,
# including the welcome emails (day 1 & 2), missed PMs, etc.
url(r'^accounts/unsubscribe/(?P<email_type>[\w]+)/(?P<confirmation_key>[\w]+)',
zerver.views.unsubscribe.email_unsubscribe,
name='zerver.views.unsubscribe.email_unsubscribe'),
# Portico-styled page used to provide email confirmation of terms acceptance.
url(r'^accounts/accept_terms/$', zerver.views.home.accounts_accept_terms,
name='zerver.views.home.accounts_accept_terms'),
# Find your account
url(r'^accounts/find/$', zerver.views.registration.find_account,
name='zerver.views.registration.find_account'),
# Go to organization subdomain
url(r'^accounts/go/$', zerver.views.registration.realm_redirect,
name='zerver.views.registration.realm_redirect'),
# Realm Creation
url(r'^new/$', zerver.views.registration.create_realm,
name='zerver.views.create_realm'),
url(r'^new/(?P<creation_key>[\w]+)$',
zerver.views.registration.create_realm, name='zerver.views.create_realm'),
# Realm Reactivation
url(r'^reactivate/(?P<confirmation_key>[\w]+)', zerver.views.realm.realm_reactivation,
name='zerver.views.realm.realm_reactivation'),
# Global public streams (Zulip's way of doing archives)
url(r'^archive/streams/(?P<stream_id>\d+)/topics/(?P<topic_name>[^/]+)$',
zerver.views.archive.archive,
name='zerver.views.archive.archive'),
url(r'^archive/streams/(?P<stream_id>\d+)/topics$',
zerver.views.archive.get_web_public_topics_backend,
name='zerver.views.archive.get_web_public_topics_backend'),
# Login/registration
url(r'^register/$', zerver.views.registration.accounts_home, name='register'),
url(r'^login/$', zerver.views.auth.login_page, {'template_name': 'zerver/login.html'},
name='zerver.views.auth.login_page'),
url(r'^join/(?P<confirmation_key>\S+)/$',
zerver.views.registration.accounts_home_from_multiuse_invite,
name='zerver.views.registration.accounts_home_from_multiuse_invite'),
# API and integrations documentation
url(r'^integrations/doc-html/(?P<integration_name>[^/]*)$',
zerver.views.documentation.integration_doc,
name="zerver.views.documentation.integration_doc"),
url(r'^integrations/(.*)', IntegrationView.as_view()),
url(r'^team/$', zerver.views.users.team_view),
url(r'^history/$', TemplateView.as_view(template_name='zerver/history.html')),
url(r'^apps/(.*)', zerver.views.home.apps_view, name='zerver.views.home.apps_view'),
url(r'^plans/$', zerver.views.home.plans_view, name='plans'),
# Landing page, features pages, signup form, etc.
url(r'^hello/$', TemplateView.as_view(template_name='zerver/hello.html',
get_context_data=latest_info_context),
name='landing-page'),
url(r'^new-user/$', RedirectView.as_view(url='/hello', permanent=True)),
url(r'^features/$', TemplateView.as_view(template_name='zerver/features.html')),
url(r'^why-zulip/$', TemplateView.as_view(template_name='zerver/why-zulip.html')),
url(r'^for/open-source/$', TemplateView.as_view(template_name='zerver/for-open-source.html')),
url(r'^for/companies/$', TemplateView.as_view(template_name='zerver/for-companies.html')),
url(r'^for/working-groups-and-communities/$',
TemplateView.as_view(template_name='zerver/for-working-groups-and-communities.html')),
url(r'^for/mystery-hunt/$', TemplateView.as_view(template_name='zerver/for-mystery-hunt.html')),
url(r'^security/$', TemplateView.as_view(template_name='zerver/security.html')),
url(r'^atlassian/$', TemplateView.as_view(template_name='zerver/atlassian.html')),
# Terms of Service and privacy pages.
url(r'^terms/$', TemplateView.as_view(template_name='zerver/terms.html'), name='terms'),
url(r'^privacy/$', TemplateView.as_view(template_name='zerver/privacy.html'), name='privacy'),
url(r'^config-error/google$', TemplateView.as_view(
template_name='zerver/config_error.html',),
{'google_error': True},),
url(r'^config-error/github$', TemplateView.as_view(
template_name='zerver/config_error.html',),
{'github_error': True},),
url(r'^config-error/smtp$', TemplateView.as_view(
template_name='zerver/config_error.html',),
{'smtp_error': True},),
url(r'^config-error/ldap$', TemplateView.as_view(
template_name='zerver/config_error.html',),
{'ldap_error_realm_is_none': True},
name='ldap_error_realm_is_none'),
url(r'^config-error/dev$', TemplateView.as_view(
template_name='zerver/config_error.html',),
{'dev_not_supported_error': True},
name='dev_not_supported'),
]
# Make a copy of i18n_urls so that they appear without prefix for english
urls = list(i18n_urls)
# Include the dual-use patterns twice
urls += [
url(r'^api/v1/', include(v1_api_and_json_patterns)),
url(r'^json/', include(v1_api_and_json_patterns)),
]
# user_uploads -> zerver.views.upload.serve_file_backend
#
# This url is an exception to the url naming schemes for endpoints. It
# supports both API and session cookie authentication, using a single
# URL for both (not 'api/v1/' or 'json/' prefix). This is required to
# easily support the mobile apps fetching uploaded files without
# having to rewrite URLs, and is implemented using the
# 'override_api_url_scheme' flag passed to rest_dispatch
urls += [
url(r'^user_uploads/(?P<realm_id_str>(\d*|unk))/(?P<filename>.*)',
rest_dispatch,
{'GET': ('zerver.views.upload.serve_file_backend',
{'override_api_url_scheme'})}),
# This endpoint serves thumbnailed versions of images using thumbor;
# it requires an exception for the same reason.
url(r'^thumbnail', rest_dispatch,
{'GET': ('zerver.views.thumbnail.backend_serve_thumbnail',
{'override_api_url_scheme'})}),
# Avatars have the same constraint due to `!avatar` syntax.
url(r'^avatar/(?P<email_or_id>[\S]+)/(?P<medium>[\S]+)?',
rest_dispatch,
{'GET': ('zerver.views.users.avatar',
{'override_api_url_scheme'})}),
url(r'^avatar/(?P<email_or_id>[\S]+)',
rest_dispatch,
{'GET': ('zerver.views.users.avatar',
{'override_api_url_scheme'})}),
]
# This url serves as a way to recieve CSP violation reports from the users.
# We use this endpoint to just log these reports.
urls += url(r'^report/csp_violations$', zerver.views.report.report_csp_violations,
name='zerver.views.report.report_csp_violations'),
# This url serves as a way to provide backward compatibility to messages
# rendered at the time Zulip used camo for doing http -> https conversion for
# such links with images previews. Now thumbor can be used for serving such
# images.
urls += url(r'^external_content/(?P<digest>[\S]+)/(?P<received_url>[\S]+)',
zerver.views.camo.handle_camo_url,
name='zerver.views.camo.handle_camo_url'),
# Incoming webhook URLs
# We don't create urls for particular git integrations here
# because of generic one below
for incoming_webhook in WEBHOOK_INTEGRATIONS:
if incoming_webhook.url_object:
urls.append(incoming_webhook.url_object)
# Desktop-specific authentication URLs
urls += [
url(r'^json/fetch_api_key$', rest_dispatch,
{'POST': 'zerver.views.auth.json_fetch_api_key'}),
]
# Mobile-specific authentication URLs
urls += [
# This json format view used by the mobile apps lists which
# authentication backends the server allows as well as details
# like the requested subdomains'd realm icon (if known) and
# server-specific compatibility.
url(r'^api/v1/server_settings', zerver.views.auth.api_get_server_settings),
# This is a deprecated old version of api/v1/server_settings that only returns auth backends.
url(r'^api/v1/get_auth_backends', zerver.views.auth.api_get_auth_backends,
name='zerver.views.auth.api_get_auth_backends'),
# Used as a global check by all mobile clients, which currently send
# requests to https://zulipchat.com/compatibility almost immediately after
# starting up.
url(r'^compatibility$', zerver.views.compatibility.check_global_compatibility),
# This json format view used by the mobile apps accepts a username
# password/pair and returns an API key.
url(r'^api/v1/fetch_api_key$', zerver.views.auth.api_fetch_api_key,
name='zerver.views.auth.api_fetch_api_key'),
# This is for the signing in through the devAuthBackEnd on mobile apps.
url(r'^api/v1/dev_fetch_api_key$', zerver.views.auth.api_dev_fetch_api_key,
name='zerver.views.auth.api_dev_fetch_api_key'),
# This is for fetching the emails of the admins and the users.
url(r'^api/v1/dev_list_users$', zerver.views.auth.api_dev_list_users,
name='zerver.views.auth.api_dev_list_users'),
# Used to present the GOOGLE_CLIENT_ID to mobile apps
url(r'^api/v1/fetch_google_client_id$',
zerver.views.auth.api_fetch_google_client_id,
name='zerver.views.auth.api_fetch_google_client_id'),
]
# View for uploading messages from email mirror
urls += [
url(r'^email_mirror_message$', zerver.views.email_mirror.email_mirror_message,
name='zerver.views.email_mirror.email_mirror_message'),
]
# Include URL configuration files for site-specified extra installed
# Django apps
for app_name in settings.EXTRA_INSTALLED_APPS:
app_dir = os.path.join(settings.DEPLOY_ROOT, app_name)
if os.path.exists(os.path.join(app_dir, 'urls.py')):
urls += [url(r'^', include('%s.urls' % (app_name,)))]
i18n_urls += import_string("{}.urls.i18n_urlpatterns".format(app_name))
# Tornado views
urls += [
# Used internally for communication between Django and Tornado processes
url(r'^notify_tornado$', zerver.tornado.views.notify, name='zerver.tornado.views.notify'),
url(r'^api/v1/events/internal$', zerver.tornado.views.get_events_internal),
]
# Python Social Auth
urls += [url(r'^', include('social_django.urls', namespace='social'))]
# User documentation site
urls += [url(r'^help/(?P<article>.*)$',
MarkdownDirectoryView.as_view(template_name='zerver/documentation_main.html',
path_template='/zerver/help/%s.md'))]
urls += [url(r'^api/(?P<article>[-\w]*\/?)$',
MarkdownDirectoryView.as_view(template_name='zerver/documentation_main.html',
path_template='/zerver/api/%s.md'))]
# Two Factor urls
if settings.TWO_FACTOR_AUTHENTICATION_ENABLED:
urls += [url(r'', include(tf_urls)),
url(r'', include(tf_twilio_urls))]
if settings.DEVELOPMENT:
urls += dev_urls.urls
i18n_urls += dev_urls.i18n_urls
# The sequence is important; if i18n urls don't come first then
# reverse url mapping points to i18n urls which causes the frontend
# tests to fail
urlpatterns = i18n_patterns(*i18n_urls) + urls + legacy_urls
def handler400(request: HttpRequest, exception: Exception) -> HttpResponse:
# (This workaround should become obsolete with Django 2.1; the
# issue was fixed upstream in commit 7ec0fdf62 on 2018-02-14.)
#
# This behaves exactly like the default Django implementation in
# the case where you haven't made a template "400.html", which we
# haven't -- except that it doesn't call `@requires_csrf_token` to
# attempt to set a `csrf_token` variable that the template could
# use if there were a template. We skip @requires_csrf_token
# because that codepath can raise an error on a bad request, which
# is exactly the case we're trying to handle when we get here.
# Bug filed upstream: https://code.djangoproject.com/ticket/28693
#
# This function is used just because it has this special name in
# the root urls.py file; for more details, see:
# https://docs.djangoproject.com/en/1.11/topics/http/views/#customizing-error-views
return HttpResponseBadRequest(
'<h1>Bad Request (400)</h1>', content_type='text/html')
``` |
[
{
"content": "Reconstruct the code exactly:\n```python\n# Copyright 2004-2008 Roman Yakovenko.\r\n# Distributed under the Boost Software License, Version 1.0. (See\r\n# accompanying file LICENSE_1_0.txt or copy at\r\n# http://www.boost.org/LICENSE_1_0.txt)\r\n\r\n\"\"\"This module contains the implementation of... | [
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\n# Copyright 2004-2008 Roman Yakovenko.\r\n# Distributed under the Boost Software License, Version 1.0. (See\r\n# accompanying file LICENSE_1_0.txt or copy at\r\n# http://www.boost.org/LICENSE_1_0.txt)\r\n\r\n\"\"\"This module contains the i... | ```python
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
"""This module contains the implementation of the L{config_t} class.
"""
import os
import sys
import copy
class parser_configuration_t(object):
"""Configuration object to collect parameters for invoking C++ parser
This class serves as a base class for the parameters that can be used
to customize the call to C++ parser. This class also allows users to work with
relative files paths. In this case files are searched in the following order:
1. current directory
2. working directory
3. additional include paths specified by the user
"""
def __init__( self
, working_directory='.'
, include_paths=None
, define_symbols=None
, undefine_symbols=None
, cflags=""
, compiler=None):
"""Constructor.
"""
object.__init__( self )
self.__working_directory = working_directory
if not include_paths:
include_paths = []
self.__include_paths = include_paths
if not define_symbols:
define_symbols = []
self.__define_symbols = define_symbols
if not undefine_symbols:
undefine_symbols = []
self.__undefine_symbols = undefine_symbols
self.__cflags = cflags
self.__compiler = compiler
def clone(self):
raise NotImplementedError( self.__class__.__name__ )
def __get_working_directory(self):
return self.__working_directory
def __set_working_directory(self, working_dir):
self.__working_directory=working_dir
working_directory = property( __get_working_directory, __set_working_directory )
@property
def include_paths(self):
"""list of include paths to look for header files"""
return self.__include_paths
@property
def define_symbols(self):
"""list of "define" directives """
return self.__define_symbols
@property
def undefine_symbols(self):
"""list of "undefine" directives """
return self.__undefine_symbols
@property
def compiler(self):
"""compiler name to simulate"""
return self.__compiler
def __get_cflags(self):
return self.__cflags
def __set_cflags(self, val):
self.__cflags = val
cflags = property( __get_cflags, __set_cflags
, doc="additional flags to pass to compiler" )
def __ensure_dir_exists( self, dir_path, meaning ):
if os.path.isdir( dir_path ):
return
msg = None
if os.path.exists( self.working_directory ):
raise RuntimeError( '%s("%s") does not exist!' % ( meaning, dir_path ) )
else:
raise RuntimeError( '%s("%s") should be "directory", not a file.' % ( meaning, dir_path ) )
def raise_on_wrong_settings( self ):
"""validates the configuration settings and raises RuntimeError on error"""
self.__ensure_dir_exists( self.working_directory, 'working directory' )
map( lambda idir: self.__ensure_dir_exists( idir, 'include directory' )
, self.include_paths )
class gccxml_configuration_t(parser_configuration_t):
"""Configuration object to collect parameters for invoking gccxml.
This class serves as a container for the parameters that can be used
to customize the call to gccxml.
"""
def __init__( self
, gccxml_path=''
, working_directory='.'
, include_paths=None
, define_symbols=None
, undefine_symbols=None
, start_with_declarations=None
, ignore_gccxml_output=False
, cflags=""
, compiler=None):
"""Constructor.
"""
parser_configuration_t.__init__( self
, working_directory=working_directory
, include_paths=include_paths
, define_symbols=define_symbols
, undefine_symbols=undefine_symbols
, cflags=cflags
, compiler=compiler)
self.__gccxml_path = gccxml_path
if not start_with_declarations:
start_with_declarations = []
self.__start_with_declarations = start_with_declarations
self.__ignore_gccxml_output = ignore_gccxml_output
def clone(self):
return copy.deepcopy( self )
def __get_gccxml_path(self):
return self.__gccxml_path
def __set_gccxml_path(self, new_path ):
self.__gccxml_path = new_path
gccxml_path = property( __get_gccxml_path, __set_gccxml_path
, doc="gccxml binary location" )
@property
def start_with_declarations(self):
"""list of declarations gccxml should start with, when it dumps declaration tree"""
return self.__start_with_declarations
def __get_ignore_gccxml_output(self):
return self.__ignore_gccxml_output
def __set_ignore_gccxml_output(self, val=True):
self.__ignore_gccxml_output = val
ignore_gccxml_output = property( __get_ignore_gccxml_output, __set_ignore_gccxml_output
, doc="set this property to True, if you want pygccxml to ignore any error\\warning that comes from gccxml" )
def raise_on_wrong_settings( self ):
super( gccxml_configuration_t, self ).raise_on_wrong_settings()
if os.path.isfile( self.gccxml_path ):
return
if sys.platform == 'win32':
gccxml_name = 'gccxml' + '.exe'
environment_var_delimiter = ';'
elif sys.platform == 'linux2' or sys.platform == 'darwin':
gccxml_name = 'gccxml'
environment_var_delimiter = ':'
else:
raise RuntimeError( 'unable to find out location of gccxml' )
may_be_gccxml = os.path.join( self.gccxml_path, gccxml_name )
if os.path.isfile( may_be_gccxml ):
self.gccxml_path = may_be_gccxml
else:
for path in os.environ['PATH'].split( environment_var_delimiter ):
gccxml_path = os.path.join( path, gccxml_name )
if os.path.isfile( gccxml_path ):
self.gccxml_path = gccxml_path
break
else:
msg = 'gccxml_path("%s") should exists or to be a valid file name.' \
% self.gccxml_path
raise RuntimeError( msg )
config_t = gccxml_configuration_t #backward computability
``` |
[
{
"content": "```python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2006 - 2014 Detlev Offenbach <detlev@die-offenbachs.de>\n#\n\n\"\"\"\nModule implementing the Printer configuration page.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom PyQt5.QtCore import pyqtSlot\n\nfrom .ConfigurationPageBase imp... | [
{
"content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2006 - 2014 Detlev Offenbach <detlev@die-offenbachs.de>\n#\n\n\"\"\"\nModule implementing the Printer configuration page.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom PyQt5.QtCore import pyqtSlot\n\nfrom .Configura... | ```python
# -*- coding: utf-8 -*-
# Copyright (c) 2006 - 2014 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing the Printer configuration page.
"""
from __future__ import unicode_literals
from PyQt5.QtCore import pyqtSlot
from .ConfigurationPageBase import ConfigurationPageBase
from .Ui_GraphicsPage import Ui_GraphicsPage
import Preferences
class GraphicsPage(ConfigurationPageBase, Ui_GraphicsPage):
"""
Class implementing the Printer configuration page.
"""
def __init__(self):
"""
Constructor
"""
super(GraphicsPage, self).__init__()
self.setupUi(self)
self.setObjectName("GraphicsPage")
# set initial values
self.graphicsFont = Preferences.getGraphics("Font")
self.graphicsFontSample.setFont(self.graphicsFont)
def save(self):
"""
Public slot to save the Printer configuration.
"""
Preferences.setGraphics("Font", self.graphicsFont)
@pyqtSlot()
def on_graphicsFontButton_clicked(self):
"""
Private method used to select the font for the graphics items.
"""
self.graphicsFont = self.selectFont(self.graphicsFontSample,
self.graphicsFont)
def polishPage(self):
"""
Public slot to perform some polishing actions.
"""
self.graphicsFontSample.setFont(self.graphicsFont)
def create(dlg):
"""
Module function to create the configuration page.
@param dlg reference to the configuration dialog
@return reference to the instantiated page (ConfigurationPageBase)
"""
page = GraphicsPage()
return page
``` |
[
{
"content": "Here is the code block:\n```python\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport tornado.web\nimport tornado.autoreload\nimport tornado\n\nimport os\nimport shutil\n\nfrom sky.crawler import crawl\nfrom sky.crawler.crawling import get_image_set\nfrom sky.configs import DEFAULT_CRAWL_C... | [
{
"content": "Here is the code block:\n<|memory_start|>```python\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport tornado.web\nimport tornado.autoreload\nimport tornado\n\nimport os\nimport shutil\n\nfrom sky.crawler import crawl\nfrom sky.crawler.crawling import get_image_set\nfrom sky.configs import... | ```python
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import tornado.web
import tornado.autoreload
import tornado
import os
import shutil
from sky.crawler import crawl
from sky.crawler.crawling import get_image_set
from sky.configs import DEFAULT_CRAWL_CONFIG
from sky.helper import extractDomain
from sky.scraper import Scraper
# from textblob import TextBlob
def is_numeric(x):
try:
int(x)
return True
except ValueError:
return False
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render('page_template.html', items=[], cached=False)
def post(self):
CRAWL_CONFIG = DEFAULT_CRAWL_CONFIG
CRAWL_CONFIG.update({
'collections_path': os.path.join(os.path.expanduser('~'), 'sky_collections/'),
# 'max_workers': 10,
})
args = self.request.arguments
print(args)
for arg in args:
value = args[arg][0].decode('utf8')
if value and arg != 'url' and arg != 'checkboxcache':
print('pre', arg, CRAWL_CONFIG[arg])
if isinstance(CRAWL_CONFIG[arg], list):
CRAWL_CONFIG[arg] = [int(value)] if is_numeric(value) else [value]
else:
CRAWL_CONFIG[arg] = int(value) if is_numeric(value) else value
print('post', arg, CRAWL_CONFIG[arg])
url = self.get_argument('url', '')
use_cache = self.get_argument('checkboxcache', '')
domain = extractDomain(url)
CRAWL_CONFIG['seed_urls'] = [url]
CRAWL_CONFIG['collection_name'] = domain[7:]
if use_cache != 'on':
col_path = os.path.join(CRAWL_CONFIG['collections_path'],
CRAWL_CONFIG['collection_name'])
print(col_path)
if os.path.exists(col_path):
shutil.rmtree(col_path)
crawl.start(CRAWL_CONFIG)
SCRAPE_CONFIG = CRAWL_CONFIG.copy()
SCRAPE_CONFIG.update({
'template_proportion': 0.4,
'max_templates': 100
})
skindex = Scraper(SCRAPE_CONFIG)
skindex.load_local_pages()
skindex.add_template_elements()
res = skindex.process_all(remove_visuals=True,
maxn=CRAWL_CONFIG['max_saved_responses'])
items = []
for num, url in enumerate(res):
if num == CRAWL_CONFIG['max_saved_responses']:
break
dc = res[url]
dc['url'] = url
dc['source_name'] = domain
dc['images'] = [x for x in reversed(dc['images'][:5])]
# dc['blobs'] = [TextBlob(x) for x in dc['body'] if dc['body']]
items.append(dc)
# this is quite out of place like this
print('num unique images', len(get_image_set({x['url']: x for x in items})))
if items and 'money' in items[0]:
items = sorted(items, key=lambda x: len(x['money']), reverse=True)
self.render('page_template.html', items=items, cached=False)
settings = {
'template_path': os.path.join(os.path.dirname(__file__), 'templates'),
'static_path': os.path.join(os.path.dirname(__file__), 'static')
}
def main(host='localhost', port=7900):
# to run the server, type-in $ python view.py
application = tornado.web.Application([
(r"/", MainHandler),
], **settings)
application.listen(int(port), host)
ioloop = tornado.ioloop.IOLoop().instance()
print('serving skyViewer at "{}:{}" from file: {}'.format(host, port, __file__))
ioloop.start()
if __name__ == '__main__':
main()
``` |
[
{
"content": "Replicate the source code:\n```python\n#pylint: disable=no-init,invalid-name\nfrom __future__ import (absolute_import, division, print_function)\n\nimport mantid.simpleapi as api\nfrom mantid.api import *\nfrom mantid.kernel import *\nimport os\nfrom reduction_workflow.find_data import find_data\n... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\n#pylint: disable=no-init,invalid-name\nfrom __future__ import (absolute_import, division, print_function)\n\nimport mantid.simpleapi as api\nfrom mantid.api import *\nfrom mantid.kernel import *\nimport os\nfrom reduction_workflow.find_data im... | ```python
#pylint: disable=no-init,invalid-name
from __future__ import (absolute_import, division, print_function)
import mantid.simpleapi as api
from mantid.api import *
from mantid.kernel import *
import os
from reduction_workflow.find_data import find_data
class SANSBeamSpreaderTransmission(PythonAlgorithm):
def category(self):
return "Workflow\\SANS\\UsesPropertyManager"
def name(self):
return "SANSBeamSpreaderTransmission"
def summary(self):
return "Compute transmission using the beam spreader method"
def PyInit(self):
self.declareProperty(MatrixWorkspaceProperty("InputWorkspace", "",
direction=Direction.Input))
self.declareProperty(FileProperty("SampleSpreaderFilename", "",
action=FileAction.Load,
extensions=['xml', 'nxs', 'nxs.h5']))
self.declareProperty(FileProperty("DirectSpreaderFilename", "",
action=FileAction.Load,
extensions=['xml', 'nxs', 'nxs.h5']))
self.declareProperty(FileProperty("SampleScatteringFilename", "",
action=FileAction.Load,
extensions=['xml', 'nxs', 'nxs.h5']))
self.declareProperty(FileProperty("DirectScatteringFilename", "",
action=FileAction.Load,
extensions=['xml', 'nxs', 'nxs.h5']))
self.declareProperty("SpreaderTransmissionValue", 1.0,
"Transmission of the beam spreader")
self.declareProperty("SpreaderTransmissionError", 0.0,
"Error on the transmission of the beam spreader")
self.declareProperty("ThetaDependent", True,
"If true, a theta-dependent correction will be applied")
self.declareProperty(FileProperty("DarkCurrentFilename", "",
action=FileAction.OptionalLoad,
extensions=['xml', 'nxs', 'nxs.h5']))
self.declareProperty("UseSampleDarkCurrent", False,
"If true, the sample dark current will be used")
self.declareProperty("ReductionProperties", "__sans_reduction_properties",
validator=StringMandatoryValidator(),
doc="Property manager name for the reduction")
self.declareProperty(MatrixWorkspaceProperty("OutputWorkspace", "",
direction = Direction.Output))
self.declareProperty("MeasuredTransmission", 0.0,
direction=Direction.Output)
self.declareProperty("MeasuredError", 0.0,
direction=Direction.Output)
self.declareProperty("OutputMessage", "",
direction=Direction.Output, doc = "Output message")
def PyExec(self): # noqa: C901
# Get the reduction property manager
property_manager_name = self.getProperty("ReductionProperties").value
property_manager = PropertyManagerDataService.retrieve(property_manager_name)
# Build the name we are going to give the transmission workspace
sample_scatt = self.getPropertyValue("SampleScatteringFilename")
sample_basename = os.path.basename(sample_scatt)
entry_name = "TransmissionSpreader%s" % sample_scatt
trans_ws_name = "__transmission_fit_%s" % sample_basename
trans_ws = None
# If we have already computed the transmission, used the
# previously computed workspace
if property_manager.existsProperty(entry_name):
trans_ws_name = property_manager.getProperty(entry_name)
if AnalysisDataService.doesExist(trans_ws_name):
trans_ws = AnalysisDataService.retrieve(trans_ws_name)
# Get instrument to use with FileFinder
instrument = ''
if property_manager.existsProperty("InstrumentName"):
instrument = property_manager.getProperty("InstrumentName").value
# Get the data loader
def _load_data(filename, output_ws):
if not property_manager.existsProperty("LoadAlgorithm"):
Logger("SANSBeamSpreaderTransmission").error("SANS reduction not set up properly: missing load algorithm")
raise RuntimeError("SANS reduction not set up properly: missing load algorithm")
p=property_manager.getProperty("LoadAlgorithm")
alg=Algorithm.fromString(p.valueAsStr)
alg.setProperty("Filename", filename)
alg.setProperty("OutputWorkspace", output_ws)
if alg.existsProperty("ReductionProperties"):
alg.setProperty("ReductionProperties", property_manager_name)
alg.execute()
msg = ''
if alg.existsProperty("OutputMessage"):
msg = alg.getProperty("OutputMessage").value
return msg
# Compute the transmission if we don't already have it
if trans_ws is None:
# Load data files
sample_spreader_ws = "__trans_sample_spreader"
direct_spreader_ws = "__trans_direct_spreader"
sample_scatt_ws = "__trans_sample_scatt"
direct_scatt_ws = "__trans_direct_scatt"
sample_spread = self.getPropertyValue("SampleSpreaderFilename")
direct_spread = self.getPropertyValue("DirectSpreaderFilename")
direct_scatt = self.getPropertyValue("DirectScatteringFilename")
ws_names = [[sample_spread, sample_spreader_ws],
[direct_spread, direct_spreader_ws],
[sample_scatt, sample_scatt_ws],
[direct_scatt, direct_scatt_ws]]
for f in ws_names:
filepath = find_data(f[0], instrument=instrument)
_load_data(filepath, f[1])
self._subtract_dark_current(f[1], property_manager)
# Get normalization for transmission calculation
monitor_det_ID = None
if property_manager.existsProperty("TransmissionNormalisation"):
sample_ws = AnalysisDataService.retrieve(sample_scatt_ws)
if property_manager.getProperty("TransmissionNormalisation").value=="Monitor":
monitor_det_ID = int(sample_ws.getInstrument().getNumberParameter("default-incident-monitor-spectrum")[0])
else:
monitor_det_ID = int(sample_ws.getInstrument().getNumberParameter("default-incident-timer-spectrum")[0])
elif property_manager.existsProperty("NormaliseAlgorithm"):
def _normalise(workspace):
p=property_manager.getProperty("NormaliseAlgorithm")
alg=Algorithm.fromString(p.valueAsStr)
alg.setProperty("InputWorkspace", workspace)
alg.setProperty("OutputWorkspace", workspace)
if alg.existsProperty("ReductionProperties"):
alg.setProperty("ReductionProperties", property_manager_name)
alg.execute()
msg = ''
if alg.existsProperty("OutputMessage"):
msg += alg.getProperty("OutputMessage").value+'\n'
return msg
for f in ws_names:
_normalise(f[1])
# Calculate transmission. Use the reduction method's normalization channel (time or beam monitor)
# as the monitor channel.
spreader_t_value = self.getPropertyValue("SpreaderTransmissionValue")
spreader_t_error = self.getPropertyValue("SpreaderTransmissionError")
alg = AlgorithmManager.createUnmanaged('CalculateTransmissionBeamSpreader')
alg.initialize()
alg.setProperty("SampleSpreaderRunWorkspace", sample_spreader_ws)
alg.setProperty("DirectSpreaderRunWorkspace", direct_spreader_ws)
alg.setProperty("SampleScatterRunWorkspace", sample_scatt_ws)
alg.setProperty("DirectScatterRunWorkspace", direct_scatt_ws)
alg.setProperty("IncidentBeamMonitor", monitor_det_ID)
alg.setProperty("OutputWorkspace",trans_ws_name)
alg.setProperty("SpreaderTransmissionValue",spreader_t_value)
alg.setProperty("SpreaderTransmissionError",spreader_t_error)
alg.execute()
trans_ws = AnalysisDataService.retrieve(trans_ws_name)
for f in ws_names:
if AnalysisDataService.doesExist(f[1]):
AnalysisDataService.remove(f[1])
# 2- Apply correction (Note: Apply2DTransCorr)
input_ws_name = self.getPropertyValue("InputWorkspace")
if not AnalysisDataService.doesExist(input_ws_name):
Logger("SANSBeamSpreaderTransmission").error("Could not find input workspace")
workspace = AnalysisDataService.retrieve(input_ws_name).name()
# Clone workspace to make boost-python happy
api.CloneWorkspace(InputWorkspace=workspace,
OutputWorkspace='__'+workspace)
workspace = '__'+workspace
self._apply_transmission(workspace, trans_ws_name)
trans = trans_ws.dataY(0)[0]
error = trans_ws.dataE(0)[0]
output_str = ''
if len(trans_ws.dataY(0))==1:
self.setProperty("MeasuredTransmission", trans)
self.setProperty("MeasuredError", error)
output_str = "\n%s T = %6.2g += %6.2g\n" % (output_str, trans, error)
output_msg = "Transmission correction applied [%s]%s\n" % (trans_ws_name, output_str)
output_ws = AnalysisDataService.retrieve(workspace)
self.setProperty("OutputWorkspace", output_ws)
self.setPropertyValue("OutputMessage", output_msg)
def _apply_transmission(self, workspace, trans_workspace):
"""
Apply transmission correction
@param workspace: workspace to apply correction to
@param trans_workspace: workspace name for of the transmission
"""
# Make sure the binning is compatible
api.RebinToWorkspace(WorkspaceToRebin=trans_workspace,
WorkspaceToMatch=workspace,
OutputWorkspace=trans_workspace+'_rebin',
PreserveEvents=False)
# Apply angle-dependent transmission correction using the zero-angle transmission
theta_dependent = self.getProperty("ThetaDependent").value
api.ApplyTransmissionCorrection(InputWorkspace=workspace,
TransmissionWorkspace=trans_workspace+'_rebin',
OutputWorkspace=workspace,
ThetaDependent=theta_dependent)
if AnalysisDataService.doesExist(trans_workspace+'_rebin'):
AnalysisDataService.remove(trans_workspace+'_rebin')
def _subtract_dark_current(self, workspace_name, property_manager):
"""
Subtract the dark current
@param workspace_name: name of the workspace to subtract from
@param property_manager: property manager object
"""
# Subtract dark current
use_sample_dc = self.getProperty("UseSampleDarkCurrent").value
dark_current_data = self.getPropertyValue("DarkCurrentFilename")
property_manager_name = self.getProperty("ReductionProperties").value
def _dark(workspace, dark_current_property):
if property_manager.existsProperty(dark_current_property):
p=property_manager.getProperty(dark_current_property)
# Dark current subtraction for sample data
alg=Algorithm.fromString(p.valueAsStr)
alg.setProperty("InputWorkspace", workspace)
alg.setProperty("OutputWorkspace", workspace)
alg.setProperty("Filename", dark_current_data)
if alg.existsProperty("PersistentCorrection"):
alg.setProperty("PersistentCorrection", False)
if alg.existsProperty("ReductionProperties"):
alg.setProperty("ReductionProperties", property_manager_name)
alg.execute()
msg = "Dark current subtracted"
if alg.existsProperty("OutputMessage"):
msg += alg.getProperty("OutputMessage").value
return msg
if use_sample_dc is True:
_dark(workspace_name, "DarkCurrentAlgorithm")
elif len(dark_current_data.strip())>0:
_dark(workspace_name, "DefaultDarkCurrentAlgorithm")
#############################################################################################
AlgorithmFactory.subscribe(SANSBeamSpreaderTransmission)
``` |
[
{
"content": "```python\nimport logging\nimport time\nfrom typing import Tuple\nfrom urllib.parse import urlsplit, urljoin\n\nfrom bs4 import BeautifulSoup\n\nfrom feedrsub.feeds.feedfinder.feedinfo import FeedInfo\nfrom feedrsub.utils.requests_session import RequestsSession, requests_session\n\nlogger = loggin... | [
{
"content": "<|memory_start|>```python\nimport logging\nimport time\nfrom typing import Tuple\nfrom urllib.parse import urlsplit, urljoin\n\nfrom bs4 import BeautifulSoup\n\nfrom feedrsub.feeds.feedfinder.feedinfo import FeedInfo\nfrom feedrsub.utils.requests_session import RequestsSession, requests_session\n\... | ```python
import logging
import time
from typing import Tuple
from urllib.parse import urlsplit, urljoin
from bs4 import BeautifulSoup
from feedrsub.feeds.feedfinder.feedinfo import FeedInfo
from feedrsub.utils.requests_session import RequestsSession, requests_session
logger = logging.getLogger("feedfinder4")
def coerce_url(url: str) -> str:
url = url.strip()
if url.startswith("feed://"):
return "http://{0}".format(url[7:])
for proto in ["http://", "https://"]:
if url.startswith(proto):
return url
return "https://{0}".format(url)
def get_site_root(url: str) -> str:
"""
Find the root domain of a url
"""
url = coerce_url(url)
parsed = urlsplit(url)
print(parsed)
return parsed.netloc
class FeedFinder:
def __init__(self, session, get_feed_info=False, timeout=(3.05, 10)):
self.session = session
self.get_feed_info = get_feed_info
self.timeout = timeout
def get_url(self, url: str):
try:
r = self.session.get(url, timeout=self.timeout)
except Exception as e:
logger.warning(u"Error while getting URL: {0}, {1}".format(url, str(e)))
return None
return r
@staticmethod
def is_feed_data(text: str) -> bool:
data = text.lower()
if data.count("<html"):
return False
return bool(data.count("<rss") + data.count("<rdf") + data.count("<feed"))
def is_feed(self, url: str) -> str:
response = self.get_url(url)
if not response or not response.text or not self.is_feed_data(response.text):
return ""
return response.text
@staticmethod
def is_feed_url(url: str) -> bool:
return any(map(url.lower().endswith, [".rss", ".rdf", ".xml", ".atom"]))
@staticmethod
def is_feedlike_url(url: str) -> bool:
return any(map(url.lower().count, ["rss", "rdf", "xml", "atom", "feed"]))
def check_urls(self, urls: list) -> list:
feeds = []
for url in urls:
url_text = self.is_feed(url)
if url_text:
feed = self.create_feed_info(url, url_text)
feeds.append(feed)
return feeds
def create_feed_info(self, url: str, text: str) -> FeedInfo:
info = FeedInfo(url)
if self.get_feed_info:
logger.info(u"Getting FeedInfo for {0}".format(url))
info.get_info(text=text, soup=self.soup, finder=self)
return info
@property
def soup(self) -> BeautifulSoup:
return self.parsed_soup
def create_soup(self, text: str) -> None:
self.parsed_soup = BeautifulSoup(text, "html.parser")
def search_links(self, url: str) -> list:
links = []
for link in self.soup.find_all("link"):
if link.get("type") in [
"application/rss+xml",
"text/xml",
"application/atom+xml",
"application/x.atom+xml",
"application/x-atom+xml",
]:
links.append(urljoin(url, link.get("href", "")))
return self.check_urls(links)
def search_a_tags(self, url: str) -> Tuple[list, list]:
logger.info("Looking for <a> tags.")
local, remote = [], []
for a in self.soup.find_all("a"):
href = a.get("href", None)
if href is None:
continue
if "://" not in href and self.is_feed_url(href):
local.append(href)
if self.is_feedlike_url(href):
remote.append(href)
return local, remote
@requests_session()
def find_feeds(
url: str,
check_all: bool = False,
get_feed_info: bool = False,
timeout: tuple = (3.05, 10),
**kwargs
) -> list:
finder = FeedFinder(
kwargs.get("session"), get_feed_info=get_feed_info, timeout=timeout
)
# Format the URL properly.
url = coerce_url(url)
feeds = []
start_time = time.perf_counter()
# Download the requested URL
logger.info("Finding feeds at URL: {0}".format(url))
response = finder.get_url(url)
search_time = int((time.perf_counter() - start_time) * 1000)
logger.debug("Searched url in {0}ms".format(search_time))
if not response or not response.text:
return []
text = response.text
# Parse text with BeautifulSoup
finder.create_soup(text)
# Check if it is already a feed.
if finder.is_feed_data(text):
found = finder.create_feed_info(url, text)
feeds.append(found)
return feeds
# Search for <link> tags
logger.info("Looking for <link> tags.")
found_links = finder.search_links(url)
feeds.extend(found_links)
logger.info("Found {0} feed <link> tags.".format(len(found_links)))
search_time = int((time.perf_counter() - start_time) * 1000)
logger.debug("Searched <link> tags in {0}ms".format(search_time))
if len(feeds) and not check_all:
return sort_urls(feeds, url)
# Look for <a> tags.
logger.info("Looking for <a> tags.")
local, remote = finder.search_a_tags(url)
# Check the local URLs.
local = [urljoin(url, l) for l in local]
found_local = finder.check_urls(local)
feeds.extend(found_local)
logger.info("Found {0} local <a> links to feeds.".format(len(found_local)))
# Check the remote URLs.
remote = [urljoin(url, l) for l in remote]
found_remote = finder.check_urls(remote)
feeds.extend(found_remote)
logger.info("Found {0} remote <a> links to feeds.".format(len(found_remote)))
search_time = int((time.perf_counter() - start_time) * 1000)
logger.debug("Searched <a> links in {0}ms".format(search_time))
if len(feeds) and not check_all:
return sort_urls(feeds, url)
# Guessing potential URLs.
fns = ["atom.xml", "index.atom", "index.rdf", "rss.xml", "index.xml", "index.rss"]
urls = list(urljoin(url, f) for f in fns)
found_guessed = finder.check_urls(urls)
feeds.extend(found_guessed)
logger.info("Found {0} guessed links to feeds.".format(len(found_guessed)))
search_time = int((time.perf_counter() - start_time) * 1000)
logger.debug("Searched guessed urls in {0}ms".format(search_time))
return sort_urls(feeds, url)
def url_feed_prob(url: str, original_url: str = None) -> int:
score = 0
if original_url:
url_domain = get_site_root(url)
original_domain = get_site_root(original_url)
if url_domain not in original_domain:
score -= 17
if "comments" in url:
score -= 15
if "georss" in url:
score -= 9
if "alt" in url:
score -= 7
kw = ["rss", "atom", ".xml", "feed", "rdf"]
for p, t in zip(range(len(kw) * 2, 0, -2), kw):
if t in url:
score += p
if url.startswith("https"):
score += 9
print("Url: {0}, Score: {1}".format(url, score))
return score
def sort_urls(feeds, original_url=None):
print("Sorting feeds: {0}".format(feeds))
sorted_urls = sorted(
list(set(feeds)), key=lambda x: url_feed_prob(x.url, original_url), reverse=True
)
logger.info(u"Returning sorted URLs: {0}".format(sorted_urls))
return sorted_urls
``` |
[
{
"content": "Here is a code file:\n```python\n\"\"\"\nhomeassistant.util.template\n~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nTemplate utility methods for rendering strings with HA data.\n\"\"\"\n# pylint: disable=too-few-public-methods\nimport json\nimport logging\nimport jinja2\nfrom jinja2.sandbox import ImmutableSandb... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n\"\"\"\nhomeassistant.util.template\n~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nTemplate utility methods for rendering strings with HA data.\n\"\"\"\n# pylint: disable=too-few-public-methods\nimport json\nimport logging\nimport jinja2\nfrom jinja2.sandbox impor... | ```python
"""
homeassistant.util.template
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Template utility methods for rendering strings with HA data.
"""
# pylint: disable=too-few-public-methods
import json
import logging
import jinja2
from jinja2.sandbox import ImmutableSandboxedEnvironment
from homeassistant.const import STATE_UNKNOWN
from homeassistant.exceptions import TemplateError
_LOGGER = logging.getLogger(__name__)
_SENTINEL = object()
def render_with_possible_json_value(hass, template, value,
error_value=_SENTINEL):
""" Renders template with value exposed.
If valid JSON will expose value_json too. """
variables = {
'value': value
}
try:
variables['value_json'] = json.loads(value)
except ValueError:
pass
try:
return render(hass, template, variables)
except TemplateError:
_LOGGER.exception('Error parsing value')
return value if error_value is _SENTINEL else error_value
def render(hass, template, variables=None, **kwargs):
""" Render given template. """
if variables is not None:
kwargs.update(variables)
try:
return ENV.from_string(template, {
'states': AllStates(hass),
'is_state': hass.states.is_state
}).render(kwargs).strip()
except jinja2.TemplateError as err:
raise TemplateError(err)
class AllStates(object):
""" Class to expose all HA states as attributes. """
def __init__(self, hass):
self._hass = hass
def __getattr__(self, name):
return DomainStates(self._hass, name)
def __iter__(self):
return iter(sorted(self._hass.states.all(),
key=lambda state: state.entity_id))
def __call__(self, entity_id):
state = self._hass.states.get(entity_id)
return STATE_UNKNOWN if state is None else state.state
class DomainStates(object):
""" Class to expose a specific HA domain as attributes. """
def __init__(self, hass, domain):
self._hass = hass
self._domain = domain
def __getattr__(self, name):
return self._hass.states.get('{}.{}'.format(self._domain, name))
def __iter__(self):
return iter(sorted(
(state for state in self._hass.states.all()
if state.domain == self._domain),
key=lambda state: state.entity_id))
def forgiving_round(value, precision=0):
""" Rounding method that accepts strings. """
try:
value = round(float(value), precision)
return int(value) if precision == 0 else value
except ValueError:
# If value can't be converted to float
return value
def multiply(value, amount):
""" Converts to float and multiplies value. """
try:
return float(value) * amount
except ValueError:
# If value can't be converted to float
return value
class TemplateEnvironment(ImmutableSandboxedEnvironment):
""" Home Assistant template environment. """
def is_safe_callable(self, obj):
return isinstance(obj, AllStates) or super().is_safe_callable(obj)
ENV = TemplateEnvironment()
ENV.filters['round'] = forgiving_round
ENV.filters['multiply'] = multiply
``` |
[
{
"content": "Return the code exactly, with no changes:\n```python\n# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Author: Joël Grand-guillaume (Camptocamp)\n# Copyright 2010 Camptocamp SA\n#\n# This program is free software: you can redist... | [
{
"content": "Return the code exactly, with no changes:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Author: Joël Grand-guillaume (Camptocamp)\n# Copyright 2010 Camptocamp SA\n#\n# This program is free software... | ```python
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Joël Grand-guillaume (Camptocamp)
# Copyright 2010 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import invoice
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
``` |
[
{
"content": "Repeat the code exactly:\n```python\n# Default datasets for dungeon tiles.\nwalls =\t{\n\t'north' \t: True,\n\t'east'\t\t: True,\n\t'south'\t\t: True,\n\t'west'\t\t: True\n}\n\nentities = {\n\t'items'\t\t: [],\n\t'objects'\t: [],\n\t'enemies'\t: [],\n\t'npcs'\t\t: []\n}\n\n# Defines a series of ti... | [
{
"content": "Repeat the code exactly:\n<|memory_start|>```python\n# Default datasets for dungeon tiles.\nwalls =\t{\n\t'north' \t: True,\n\t'east'\t\t: True,\n\t'south'\t\t: True,\n\t'west'\t\t: True\n}\n\nentities = {\n\t'items'\t\t: [],\n\t'objects'\t: [],\n\t'enemies'\t: [],\n\t'npcs'\t\t: []\n}\n\n# Define... | ```python
# Default datasets for dungeon tiles.
walls = {
'north' : True,
'east' : True,
'south' : True,
'west' : True
}
entities = {
'items' : [],
'objects' : [],
'enemies' : [],
'npcs' : []
}
# Defines a series of tiles with walls.
class Tile (object):
def __init__ (self, walls=walls, entities=entities, text=''):
# Indentifies an identified tile for maze generation.
self.visited = False
# Tile walls definitions, defined by a dictionary of booleans or definitions.
self.wall_north = walls['north']
self.wall_east = walls['east']
self.wall_south = walls['south']
self.wall_west = walls['west']
# Defines if the tile is an entrance or exit.
self.entrance = False
self.exit = False
# Lists of various entities on the tile.
self.items = entities['items']
self.objects = entities['objects']
self.enemies = entities['enemies']
self.npcs = entities['npcs']
# Text that displays when the player enters the tile.
self.text = text
# Removes walls during generation.
def remove_wall (self, wall):
if wall == 'north':
self.wall_north = False
elif wall == 'east':
self.wall_east = False
elif wall == 'south':
self.wall_south = False
elif wall == 'west':
self.wall_west = False
# Marks a tile as processed during generation.
def visit (self):
self.visited = True
# Sets the tile as the entrance.
def set_entrance (self):
self.entrance = True
# Sets the tile as the exit.
def set_exit (self):
self.exit = True
# Sets a list of items on the tile.
def set_items (self, items):
self.items = items
# Sets a list of interactable objects on the tile.
def set_objects (self, objects):
self.objects = objects
# Sets a list of enemies on the tile.
def set_enemies (self, enemies):
self.enemies = enemies
# Sets a list of npcs on the tile.
def set_npcs (self, npcs):
self.npcs = npcs
# Text that displays as the player(s) enter the tile.
def enter_text (self):
out = ['You enter a dim corridor.']
if self.exit:
out.append('\nYou find yourself at the exit.')
out = ''.join(out)
return out
def set_text (self, text):
self.text = text
``` |
[
{
"content": "Return the code unaltered:\n```python\nimport threading\nimport json\nimport time\nimport queue\nimport gevent # pylint: disable=import-error\nfrom flask import Flask, render_template # pylint: disable=import-error\nfrom flask_socketio import SocketIO, emit, join_room, leave_room, send # pylint: d... | [
{
"content": "Return the code unaltered:\n<|memory_start|>```python\nimport threading\nimport json\nimport time\nimport queue\nimport gevent # pylint: disable=import-error\nfrom flask import Flask, render_template # pylint: disable=import-error\nfrom flask_socketio import SocketIO, emit, join_room, leave_room, ... | ```python
import threading
import json
import time
import queue
import gevent # pylint: disable=import-error
from flask import Flask, render_template # pylint: disable=import-error
from flask_socketio import SocketIO, emit, join_room, leave_room, send # pylint: disable=import-error
from Utils import *
from LCM import *
HOST_URL = "192.168.128.64" # "0.0.0.0"
PORT = 7000
#TODO work on this, new headers and deprecated headers.
app = Flask(__name__)
app.config['SECRET_KEY'] = 'omegalul!'
socketio = SocketIO(app)
master_robots = {ALLIANCE_COLOR.BLUE: 0, ALLIANCE_COLOR.GOLD:0}
@socketio.on('dawn-to-server-alliance-codes')
def ui_to_server_setup_match(alliance_codes):
lcm_send(LCM_TARGETS.SHEPHERD, SHEPHERD_HEADER.CODE_APPLICATION, json.loads(alliance_codes))
def receiver():
events = gevent.queue.Queue()
lcm_start_read(str.encode(LCM_TARGETS.DAWN), events, put_json=True)
while True:
if not events.empty():
event = events.get_nowait()
eventDict = json.loads(event)
print("RECEIVED:", event)
if eventDict["header"] == DAWN_HEADER.ROBOT_STATE:
socketio.emit(DAWN_HEADER.ROBOT_STATE, event)
elif eventDict["header"] == DAWN_HEADER.CODES:
socketio.emit(DAWN_HEADER.CODES, event)
elif eventDict["header"] == DAWN_HEADER.RESET:
master_robots[ALLIANCE_COLOR.BLUE] = 0
master_robots[ALLIANCE_COLOR.GOLD] = 0
elif eventDict["header"] == DAWN_HEADER.MASTER:
master_robots[eventDict["alliance"]] = int(eventDict["team_number"])
# socketio.emit(DAWN_HEADER.MASTER, event)
print(master_robots)
# print({"alliance": ALLIANCE_COLOR.BLUE,
# "team_number": master_robots[ALLIANCE_COLOR.BLUE]})
# print({"alliance": ALLIANCE_COLOR.GOLD,
# "team_number": master_robots[ALLIANCE_COLOR.GOLD]})
socketio.emit(DAWN_HEADER.MASTER, json.dumps(master_robots))
# socketio.emit(DAWN_HEADER.MASTER, json.dumps({"alliance": ALLIANCE_COLOR.BLUE,
# "team_number": master_robots[ALLIANCE_COLOR.BLUE]}))
# socketio.emit(DAWN_HEADER.MASTER, json.dumps({"alliance": ALLIANCE_COLOR.GOLD,
# "team_number": master_robots[ALLIANCE_COLOR.GOLD]}))
socketio.emit(DAWN_HEADER.HEARTBEAT, json.dumps({"heartbeat" : 1}))
socketio.sleep(1)
socketio.start_background_task(receiver)
socketio.run(app, host=HOST_URL, port=PORT)
``` |
[
{
"content": "```python\n# Copyright (c) 2012 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\nimport unittest\n\nfrom chrome_remote_control import browser_options\n\nclass BrowserOptionsTest(unittest.TestCase):\n... | [
{
"content": "<|memory_start|>```python\n# Copyright (c) 2012 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\nimport unittest\n\nfrom chrome_remote_control import browser_options\n\nclass BrowserOptionsTest(unitt... | ```python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from chrome_remote_control import browser_options
class BrowserOptionsTest(unittest.TestCase):
def testDefaults(self):
options = browser_options.BrowserOptions()
parser = options.CreateParser()
parser.add_option('-x', action='store', default=3)
parser.parse_args(['--browser', 'any'])
self.assertEquals(options.x, 3) # pylint: disable=E1101
def testDefaultsPlusOverride(self):
options = browser_options.BrowserOptions()
parser = options.CreateParser()
parser.add_option('-x', action='store', default=3)
parser.parse_args(['--browser', 'any', '-x', 10])
self.assertEquals(options.x, 10) # pylint: disable=E1101
def testDefaultsDontClobberPresetValue(self):
options = browser_options.BrowserOptions()
setattr(options, 'x', 7)
parser = options.CreateParser()
parser.add_option('-x', action='store', default=3)
parser.parse_args(['--browser', 'any'])
self.assertEquals(options.x, 7) # pylint: disable=E1101
def testCount0(self):
options = browser_options.BrowserOptions()
parser = options.CreateParser()
parser.add_option('-x', action='count', dest='v')
parser.parse_args(['--browser', 'any'])
self.assertEquals(options.v, None) # pylint: disable=E1101
def testCount2(self):
options = browser_options.BrowserOptions()
parser = options.CreateParser()
parser.add_option('-x', action='count', dest='v')
parser.parse_args(['--browser', 'any', '-xx'])
self.assertEquals(options.v, 2) # pylint: disable=E1101
def testOptparseMutabilityWhenSpecified(self):
options = browser_options.BrowserOptions()
parser = options.CreateParser()
parser.add_option('-x', dest='verbosity', action='store_true')
options_ret, _ = parser.parse_args(['--browser', 'any', '-x'])
self.assertEquals(options_ret, options)
self.assertTrue(options.verbosity)
def testOptparseMutabilityWhenNotSpecified(self):
options = browser_options.BrowserOptions()
parser = options.CreateParser()
parser.add_option('-x', dest='verbosity', action='store_true')
options_ret, _ = parser.parse_args(['--browser', 'any'])
self.assertEquals(options_ret, options)
self.assertFalse(options.verbosity)
``` |
[
{
"content": "Here is some code:\n```python\n############## errror code\n\n# killed\nEC_Kill = 100\n\n# transfer timeout\nEC_Transfer = 101\n\n# expire\nEC_Expire = 102\n\n# aborted\nEC_Aborted = 103\n\n# wait timeout\nEC_WaitTimeout = 104\n\n# reassigned by rebrokeage \nEC_Reassigned = 105\n\n# reassigned by s... | [
{
"content": "Here is some code:\n<|memory_start|>```python\n############## errror code\n\n# killed\nEC_Kill = 100\n\n# transfer timeout\nEC_Transfer = 101\n\n# expire\nEC_Expire = 102\n\n# aborted\nEC_Aborted = 103\n\n# wait timeout\nEC_WaitTimeout = 104\n\n# reassigned by rebrokeage \nEC_Reassigned = 105\n\n#... | ```python
############## errror code
# killed
EC_Kill = 100
# transfer timeout
EC_Transfer = 101
# expire
EC_Expire = 102
# aborted
EC_Aborted = 103
# wait timeout
EC_WaitTimeout = 104
# reassigned by rebrokeage
EC_Reassigned = 105
# reassigned by server-side retry
EC_Retried = 106
# retried by pilot
EC_PilotRetried = 107
# lost file (=dataservice.ErrorCode.EC_LostFile)
EC_LostFile = 110
# retried for event service
EC_EventServiceRetried = 111
# merge for event service
EC_EventServiceMerge = 112
# merge job failed
EC_MergeFailed = 113
# max attempt reached for Event Service
EC_EventServiceMaxAttempt = 114
# do nothing since other consumers are still running
EC_EventServiceWaitOthers = 115
# killed since unused and unnecessary any more
EC_EventServiceUnused = 116
# didn't process any events on WN
EC_EventServiceUnprocessed = 117
# didn't process any events on WN and last consumer
EC_EventServiceLastUnprocessed = 118
# all event ranges failed
EC_EventServiceAllFailed = 119
# associated consumer generated ES merge
EC_EventServiceKillOK = 120
# associated consumer failed
EC_EventServiceKillNG = 121
# killed for preemption
EC_EventServicePreemption = 122
# retred but didn't process any events on WN
EC_EventServiceNoEvent = 123
# input files inconsitent with JEDI
EC_EventServiceInconsistentIn = 124
# No event service queues available for new consumers
EC_EventServiceNoEsQueues = 125
# Closed in bad job status
EC_EventServiceBadStatus = 126
# failed to lock semaphore for job cloning
EC_JobCloningUnlock = 200
# worker is done before job is done
EC_WorkerDone = 300
# file not found
class EC_NotFound:
pass
# file relocated
class EC_Redirect:
def __init__(self,url):
self.url = url
``` |
[
{
"content": "```python\n# Copyright 2020 Tensorforce Team. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE... | [
{
"content": "<|memory_start|>```python\n# Copyright 2020 Tensorforce Team. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/... | ```python
# Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from tensorforce import TensorforceError
from tensorforce.core import tf_util
from tensorforce.core.parameters import Decaying
class Exponential(Decaying):
"""
Exponentially decaying hyperparameter (specification key: `exponential`).
Args:
unit ("timesteps" | "episodes" | "updates"): Unit of decay schedule
(<span style="color:#C00000"><b>required</b></span>).
num_steps (int): Number of decay steps
(<span style="color:#C00000"><b>required</b></span>).
initial_value (float): Initial value
(<span style="color:#C00000"><b>required</b></span>).
decay_rate (float): Decay rate
(<span style="color:#C00000"><b>required</b></span>).
staircase (bool): Whether to apply decay in a discrete staircase, as opposed to continuous,
fashion (<span style="color:#00C000"><b>default</b></span>: false).
name (string): <span style="color:#0000C0"><b>internal use</b></span>.
dtype (type): <span style="color:#0000C0"><b>internal use</b></span>.
min_value (dtype-compatible value): <span style="color:#0000C0"><b>internal use</b></span>.
max_value (dtype-compatible value): <span style="color:#0000C0"><b>internal use</b></span>.
"""
def __init__(
self, *, unit, num_steps, initial_value, decay_rate, staircase=False, name=None, dtype=None,
min_value=None, max_value=None, **kwargs
):
super().__init__(
decay='exponential', unit=unit, num_steps=num_steps, initial_value=initial_value,
name=name, dtype=dtype, min_value=min_value, max_value=max_value, decay_rate=decay_rate,
staircase=staircase, **kwargs
)
``` |
[
{
"content": "Repeat the code precisely as written (spacing intact):\n```python\n'''Label\n=====\n\nThe :class:`Label` widget is for rendering text. It supports ascii and unicode\nstrings::\n\n # hello world text\n l = Label(text='Hello world')\n\n # unicode text; can only display glyphs that are avail... | [
{
"content": "Repeat the code precisely as written (spacing intact):\n<|memory_start|>```python\n'''Label\n=====\n\nThe :class:`Label` widget is for rendering text. It supports ascii and unicode\nstrings::\n\n # hello world text\n l = Label(text='Hello world')\n\n # unicode text; can only display glyph... | ```python
'''Label
=====
The :class:`Label` widget is for rendering text. It supports ascii and unicode
strings::
# hello world text
l = Label(text='Hello world')
# unicode text; can only display glyphs that are available in the font
l = Label(text=u'Hello world ' + unichr(2764))
# multiline text
l = Label(text='Multi\\nLine')
# size
l = Label(text='Hello world', font_size='20sp')
Text alignment and wrapping
---------------------------
The :class:`Label` has :attr:`halign` and :attr:`valign` properties to
control the alignment of its text, but by default these have no effect
and the text is always centered within the Label. This is for
efficiency; the text is aligned only within the pixel drawing of the
characters, which should normally be as small as possible to minimise
the number of pixels pushed to the GPU. By default, this text image is
only just large enough to contain the characters and is positioned in the
center of the Label.
In order for the alignment properties to take effect, the simplest
solution is to set the :attr:`text_size`, which specifies the size of
the bounding box within which text is aligned. For instance, the
following code binds this size to the size of the Label, so text will
be aligned within the widget bounds. This will also automatically wrap
the text of the Label to remain within this area.
.. code-block:: python
# in Python
from kivy.uix.label import Label
class MyLabel(Label):
pass
# in kv
<MyLabel>:
text_size: self.size
halign: 'right'
valign: 'middle'
Markup text
-----------
.. versionadded:: 1.1.0
You can change the style of the text using :doc:`api-kivy.core.text.markup`.
The syntax is similar to the bbcode syntax but only the inline styling is
allowed::
# hello world with world in bold
l = Label(text='Hello [b]World[/b]', markup=True)
# hello in red, world in blue
l = Label(text='[color=ff3333]Hello[/color][color=3333ff]World[/color]',
markup = True)
If you need to escape the markup from the current text, use
:func:`kivy.utils.escape_markup`::
text = 'This is an important message [1]'
l = Label(text='[b]' + escape_markup(text) + '[/b]', markup=True)
The following tags are available:
``[b][/b]``
Activate bold text
``[i][/i]``
Activate italic text
``[font=<str>][/font]``
Change the font
``[size=<integer>][/size]``
Change the font size
``[color=#<color>][/color]``
Change the text color
``[ref=<str>][/ref]``
Add an interactive zone. The reference + bounding box inside the
reference will be available in :attr:`Label.refs`
``[anchor=<str>]``
Put an anchor in the text. You can get the position of your anchor within
the text with :attr:`Label.anchors`
``[sub][/sub]``
Display the text at a subscript position relative to the text before it.
``[sup][/sup]``
Display the text at a superscript position relative to the text before it.
If you want to render the markup text with a [ or ] or & character, you need to
escape them. We created a simple syntax::
[ -> &bl;
] -> &br;
& -> &
Then you can write::
"[size=24]Hello &bl;World&bt;[/size]"
Interactive Zone in Text
------------------------
.. versionadded:: 1.1.0
You can now have definable "links" using text markup. The idea is to be able
to detect when the user clicks on part of the text and to react.
The tag ``[ref=xxx]`` is used for that.
In this example, we are creating a reference on the word "World". When
this word is clicked, the function ``print_it`` will be called with the
name of the reference::
def print_it(instance, value):
print('User clicked on', value)
widget = Label(text='Hello [ref=world]World[/ref]', markup=True)
widget.bind(on_ref_press=print_it)
For prettier rendering, you could add a color for the reference. Replace the
``text=`` in the previous example with::
'Hello [ref=world][color=0000ff]World[/color][/ref]'
Usage example
-------------
The following example marks the anchors and references contained in a label::
from kivy.app import App
from kivy.uix.label import Label
from kivy.clock import Clock
from kivy.graphics import Color, Rectangle
class TestApp(App):
@staticmethod
def get_x(label, ref_x):
""" Return the x value of the ref/anchor relative to the canvas """
return label.center_x - label.texture_size[0] * 0.5 + ref_x
@staticmethod
def get_y(label, ref_y):
""" Return the y value of the ref/anchor relative to the canvas """
# Note the inversion of direction, as y values start at the top of
# the texture and increase downwards
return label.center_y + label.texture_size[1] * 0.5 - ref_y
def show_marks(self, label):
# Indicate the position of the anchors with a red top marker
for name, anc in label.anchors.items():
with label.canvas:
Color(1, 0, 0)
Rectangle(pos=(self.get_x(label, anc[0]),
self.get_y(label, anc[1])),
size=(3, 3))
# Draw a green surround around the refs. Note the sizes y inversion
for name, boxes in label.refs.items():
for box in boxes:
with label.canvas:
Color(0, 1, 0, 0.25)
Rectangle(pos=(self.get_x(label, box[0]),
self.get_y(label, box[1])),
size=(box[2] - box[0],
box[1] - box[3]))
def build(self):
label = Label(
text='[anchor=a]a\\nChars [anchor=b]b\\n[ref=myref]ref[/ref]',
markup=True)
Clock.schedule_once(lambda dt: self.show_marks(label), 1)
return label
TestApp().run()
'''
__all__ = ('Label', )
from functools import partial
from kivy.clock import Clock
from kivy.uix.widget import Widget
from kivy.core.text import Label as CoreLabel
from kivy.core.text.markup import MarkupLabel as CoreMarkupLabel
from kivy.properties import StringProperty, OptionProperty, \
NumericProperty, BooleanProperty, ReferenceListProperty, \
ListProperty, ObjectProperty, DictProperty
from kivy.utils import get_hex_from_color
class Label(Widget):
'''Label class, see module documentation for more information.
:Events:
`on_ref_press`
Fired when the user clicks on a word referenced with a
``[ref]`` tag in a text markup.
'''
__events__ = ['on_ref_press']
_font_properties = ('text', 'font_size', 'font_name', 'bold', 'italic',
'halign', 'valign', 'padding_x', 'padding_y',
'text_size', 'shorten', 'mipmap', 'markup',
'line_height', 'max_lines', 'strip', 'shorten_from',
'split_str', 'unicode_errors')
def __init__(self, **kwargs):
self._trigger_texture = Clock.create_trigger(self.texture_update, -1)
self._trigger_markup_color = partial(self._trigger_texture_update, 'color')
super(Label, self).__init__(**kwargs)
# bind all the property for recreating the texture
d = Label._font_properties
fbind = self.fast_bind
update = self._trigger_texture_update
for x in d:
fbind(x, update, x)
self._label = None
self._create_label()
# force the texture creation
self._trigger_texture()
def on_markup(self, inst, markup):
if markup:
self.fast_bind('color', self._trigger_markup_color)
else:
self.fast_unbind('color', self._trigger_markup_color)
def _create_label(self):
# create the core label class according to markup value
if self._label is not None:
cls = self._label.__class__
else:
cls = None
markup = self.markup
if (markup and cls is not CoreMarkupLabel) or \
(not markup and cls is not CoreLabel):
# markup have change, we need to change our rendering method.
d = Label._font_properties
dkw = dict(list(zip(d, [getattr(self, x) for x in d])))
if markup:
self._label = CoreMarkupLabel(**dkw)
else:
self._label = CoreLabel(**dkw)
def _trigger_texture_update(self, name=None, source=None, value=None):
# check if the label core class need to be switch to a new one
if name == 'markup':
self._create_label()
if source:
if name == 'text':
self._label.text = value
elif name == 'text_size':
self._label.usersize = value
elif name == 'font_size':
self._label.options[name] = value
else:
self._label.options[name] = value
self._trigger_texture()
def texture_update(self, *largs):
'''Force texture recreation with the current Label properties.
After this function call, the :attr:`texture` and :attr:`texture_size`
will be updated in this order.
'''
mrkup = self._label.__class__ is CoreMarkupLabel
self.texture = None
if (not self._label.text or (self.halign[-1] == 'y' or self.strip) and
not self._label.text.strip()):
self.texture_size = (0, 0)
if mrkup:
self.refs, self._label._refs = {}, {}
self.anchors, self._label._anchors = {}, {}
else:
if mrkup:
text = self.text
# we must strip here, otherwise, if the last line is empty,
# markup will retain the last empty line since it only strips
# line by line within markup
if self.halign[-1] == 'y' or self.strip:
text = text.strip()
self._label.text = ''.join(('[color=',
get_hex_from_color(self.color),
']', text, '[/color]'))
self._label.refresh()
# force the rendering to get the references
if self._label.texture:
self._label.texture.bind()
self.refs = self._label.refs
self.anchors = self._label.anchors
else:
self._label.refresh()
texture = self._label.texture
if texture is not None:
self.texture = self._label.texture
self.texture_size = list(self.texture.size)
def on_touch_down(self, touch):
if super(Label, self).on_touch_down(touch):
return True
if not len(self.refs):
return False
tx, ty = touch.pos
tx -= self.center_x - self.texture_size[0] / 2.
ty -= self.center_y - self.texture_size[1] / 2.
ty = self.texture_size[1] - ty
for uid, zones in self.refs.items():
for zone in zones:
x, y, w, h = zone
if x <= tx <= w and y <= ty <= h:
self.dispatch('on_ref_press', uid)
return True
return False
def on_ref_press(self, ref):
pass
#
# Properties
#
disabled_color = ListProperty([1, 1, 1, .3])
'''Text color, in the format (r, g, b, a)
.. versionadded:: 1.8.0
:attr:`disabled_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [1, 1, 1, .5].
'''
text = StringProperty('')
'''Text of the label.
Creation of a simple hello world::
widget = Label(text='Hello world')
If you want to create the widget with an unicode string, use::
widget = Label(text=u'My unicode string')
:attr:`text` is a :class:`~kivy.properties.StringProperty` and defaults to
''.
'''
text_size = ListProperty([None, None])
'''By default, the label is not constrained to any bounding box.
You can set the size constraint of the label with this property.
The text will autoflow into the constrains. So although the font size
will not be reduced, the text will be arranged to fit into the box as best
as possible, with any text still outside the box clipped.
This sets and clips :attr:`texture_size` to text_size if not None.
.. versionadded:: 1.0.4
For example, whatever your current widget size is, if you want the label to
be created in a box with width=200 and unlimited height::
Label(text='Very big big line', text_size=(200, None))
.. note::
This text_size property is the same as the
:attr:`~kivy.core.text.Label.usersize` property in the
:class:`~kivy.core.text.Label` class. (It is named size= in the
constructor.)
:attr:`text_size` is a :class:`~kivy.properties.ListProperty` and
defaults to (None, None), meaning no size restriction by default.
'''
font_name = StringProperty('DroidSans')
'''Filename of the font to use. The path can be absolute or relative.
Relative paths are resolved by the :func:`~kivy.resources.resource_find`
function.
.. warning::
Depending of your text provider, the font file can be ignored. However,
you can mostly use this without problems.
If the font used lacks the glyphs for the particular language/symbols
you are using, you will see '[]' blank box characters instead of the
actual glyphs. The solution is to use a font that has the glyphs you
need to display. For example, to display |unicodechar|, use a font such
as freesans.ttf that has the glyph.
.. |unicodechar| image:: images/unicode-char.png
:attr:`font_name` is a :class:`~kivy.properties.StringProperty` and
defaults to 'DroidSans'.
'''
font_size = NumericProperty('15sp')
'''Font size of the text, in pixels.
:attr:`font_size` is a :class:`~kivy.properties.NumericProperty` and
defaults to 15sp.
'''
line_height = NumericProperty(1.0)
'''Line Height for the text. e.g. line_height = 2 will cause the spacing
between lines to be twice the size.
:attr:`line_height` is a :class:`~kivy.properties.NumericProperty` and
defaults to 1.0.
.. versionadded:: 1.5.0
'''
bold = BooleanProperty(False)
'''Indicates use of the bold version of your font.
.. note::
Depending of your font, the bold attribute may have no impact on your
text rendering.
:attr:`bold` is a :class:`~kivy.properties.BooleanProperty` and defaults to
False.
'''
italic = BooleanProperty(False)
'''Indicates use of the italic version of your font.
.. note::
Depending of your font, the italic attribute may have no impact on your
text rendering.
:attr:`italic` is a :class:`~kivy.properties.BooleanProperty` and defaults
to False.
'''
padding_x = NumericProperty(0)
'''Horizontal padding of the text inside the widget box.
:attr:`padding_x` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
.. versionchanged:: 1.9.0
`padding_x` has been fixed to work as expected.
In the past, the text was padded by the negative of its values.
'''
padding_y = NumericProperty(0)
'''Vertical padding of the text inside the widget box.
:attr:`padding_y` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
.. versionchanged:: 1.9.0
`padding_y` has been fixed to work as expected.
In the past, the text was padded by the negative of its values.
'''
padding = ReferenceListProperty(padding_x, padding_y)
'''Padding of the text in the format (padding_x, padding_y)
:attr:`padding` is a :class:`~kivy.properties.ReferenceListProperty` of
(:attr:`padding_x`, :attr:`padding_y`) properties.
'''
halign = OptionProperty('left', options=['left', 'center', 'right',
'justify'])
'''Horizontal alignment of the text.
:attr:`halign` is an :class:`~kivy.properties.OptionProperty` and
defaults to 'left'. Available options are : left, center, right and
justify.
.. warning::
This doesn't change the position of the text texture of the Label
(centered), only the position of the text in this texture. You probably
want to bind the size of the Label to the :attr:`texture_size` or set a
:attr:`text_size`.
.. versionchanged:: 1.6.0
A new option was added to :attr:`halign`, namely `justify`.
'''
valign = OptionProperty('bottom', options=['bottom', 'middle', 'top'])
'''Vertical alignment of the text.
:attr:`valign` is an :class:`~kivy.properties.OptionProperty` and defaults
to 'bottom'. Available options are : bottom, middle and top.
.. warning::
This doesn't change the position of the text texture of the Label
(centered), only the position of the text within this texture. You
probably want to bind the size of the Label to the :attr:`texture_size`
or set a :attr:`text_size` to change this behavior.
'''
color = ListProperty([1, 1, 1, 1])
'''Text color, in the format (r, g, b, a)
:attr:`color` is a :class:`~kivy.properties.ListProperty` and defaults to
[1, 1, 1, 1].
'''
texture = ObjectProperty(None, allownone=True)
'''Texture object of the text.
The text is rendered automatically when a property changes. The OpenGL
texture created in this operation is stored in this property. You can use
this :attr:`texture` for any graphics elements.
Depending on the texture creation, the value will be a
:class:`~kivy.graphics.texture.Texture` or
:class:`~kivy.graphics.texture.TextureRegion` object.
.. warning::
The :attr:`texture` update is scheduled for the next frame. If you need
the texture immediately after changing a property, you have to call
the :meth:`texture_update` method before accessing :attr:`texture`::
l = Label(text='Hello world')
# l.texture is good
l.font_size = '50sp'
# l.texture is not updated yet
l.texture_update()
# l.texture is good now.
:attr:`texture` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
texture_size = ListProperty([0, 0])
'''Texture size of the text. The size is determined by the font size and
text. If :attr:`text_size` is [None, None], the texture will be the size
required to fit the text, otherwise it's clipped to fit :attr:`text_size`.
When :attr:`text_size` is [None, None], one can bind to texture_size
and rescale it proportionally to fit the size of the label in order to
make the text fit maximally in the label.
.. warning::
The :attr:`texture_size` is set after the :attr:`texture`
property. If you listen for changes to :attr:`texture`,
:attr:`texture_size` will not be up-to-date in your callback.
Bind to :attr:`texture_size` instead.
'''
mipmap = BooleanProperty(False)
'''Indicates whether OpenGL mipmapping is applied to the texture or not.
Read :ref:`mipmap` for more information.
.. versionadded:: 1.0.7
:attr:`mipmap` is a :class:`~kivy.properties.BooleanProperty` and defaults
to False.
'''
shorten = BooleanProperty(False)
'''
Indicates whether the label should attempt to shorten its textual contents
as much as possible if a :attr:`text_size` is given. Setting this to True
without an appropriately set :attr:`text_size` will lead to unexpected
results.
:attr:`shorten_from` and :attr:`split_str` control the direction from
which the :attr:`text` is split, as well as where in the :attr:`text` we
are allowed to split.
:attr:`shorten` is a :class:`~kivy.properties.BooleanProperty` and defaults
to False.
'''
shorten_from = OptionProperty('center', options=['left', 'center',
'right'])
'''The side from which we should shorten the text from, can be left,
right, or center.
For example, if left, the ellipsis will appear towards the left side and we
will display as much text starting from the right as possible. Similar to
:attr:`shorten`, this option only applies when :attr:`text_size` [0] is
not None, In this case, the string is shortened to fit within the specified
width.
.. versionadded:: 1.9.0
:attr:`shorten_from` is a :class:`~kivy.properties.OptionProperty` and
defaults to `center`.
'''
split_str = StringProperty('')
'''The string used to split the :attr:`text` while shortening the string
when :attr:`shorten` is True.
For example, if it's a space, the string will be broken into words and as
many whole words that can fit into a single line will be displayed. If
:attr:`shorten_from` is the empty string, `''`, we split on every character
fitting as much text as possible into the line.
.. versionadded:: 1.9.0
:attr:`split_str` is a :class:`~kivy.properties.StringProperty` and
defaults to `''` (the empty string).
'''
unicode_errors = OptionProperty(
'replace', options=('strict', 'replace', 'ignore'))
'''How to handle unicode decode errors. Can be `'strict'`, `'replace'` or
`'ignore'`.
.. versionadded:: 1.9.0
:attr:`unicode_errors` is an :class:`~kivy.properties.OptionProperty` and
defaults to `'replace'`.
'''
markup = BooleanProperty(False)
'''
.. versionadded:: 1.1.0
If True, the text will be rendered using the
:class:`~kivy.core.text.markup.MarkupLabel`: you can change the
style of the text using tags. Check the
:doc:`api-kivy.core.text.markup` documentation for more information.
:attr:`markup` is a :class:`~kivy.properties.BooleanProperty` and defaults
to False.
'''
refs = DictProperty({})
'''
.. versionadded:: 1.1.0
List of ``[ref=xxx]`` markup items in the text with the bounding box of
all the words contained in a ref, available only after rendering.
For example, if you wrote::
Check out my [ref=hello]link[/ref]
The refs will be set with::
{'hello': ((64, 0, 78, 16), )}
The references marked "hello" have a bounding box at (x1, y1, x2, y2).
These co-ordinates are relative to the top left corner of the text, with
the y value increasing downwards. You can define multiple refs with the same
name: each occurence will be added as another (x1, y1, x2, y2) tuple to
this list.
The current Label implementation uses these references if they exist in
your markup text, automatically doing the collision with the touch and
dispatching an `on_ref_press` event.
You can bind a ref event like this::
def print_it(instance, value):
print('User click on', value)
widget = Label(text='Hello [ref=world]World[/ref]', markup=True)
widget.on_ref_press(print_it)
.. note::
This works only with markup text. You need :attr:`markup` set to
True.
'''
anchors = DictProperty({})
'''
.. versionadded:: 1.1.0
Position of all the ``[anchor=xxx]`` markup in the text.
These co-ordinates are relative to the top left corner of the text, with
the y value increasing downwards. Anchors names should be unique and only
the first occurence of any duplicate anchors will be recorded.
You can place anchors in your markup text as follows::
text = """
[anchor=title1][size=24]This is my Big title.[/size]
[anchor=content]Hello world
"""
Then, all the ``[anchor=]`` references will be removed and you'll get all
the anchor positions in this property (only after rendering)::
>>> widget = Label(text=text, markup=True)
>>> widget.texture_update()
>>> widget.anchors
{"content": (20, 32), "title1": (20, 16)}
.. note::
This works only with markup text. You need :attr:`markup` set to
True.
'''
max_lines = NumericProperty(0)
'''Maximum number of lines to use, defaults to 0, which means unlimited.
Please note that :attr:`shorten` take over this property. (with
shorten, the text is always one line.)
.. versionadded:: 1.8.0
:attr:`max_lines` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
'''
strip = BooleanProperty(False)
'''Whether leading and trailing spaces and newlines should be stripped from
each displayed line. If True, every line will start at the right or left
edge, depending on :attr:`halign`. If :attr:`halign` is `justify` it is
implicitly True.
.. versionadded:: 1.9.0
:attr:`strip` is a :class:`~kivy.properties.BooleanProperty` and
defaults to False.
'''
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\nimport sys\nfrom osgbench import *\n\ndef usage():\n print \"usage.\"\n return -1\n\ndef main(argv=sys.argv):\n if len(argv) != 2:\n return usage()\n\n filename = argv[1]\n\n print \"Loading %s...\" % filename\n scene =... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\nimport sys\nfrom osgbench import *\n\ndef usage():\n print \"usage.\"\n return -1\n\ndef main(argv=sys.argv):\n if len(argv) != 2:\n return usage()\n\n filename = argv[1]\n\n print \"Loading %s...\" % filen... | ```python
import sys
from osgbench import *
def usage():
print "usage."
return -1
def main(argv=sys.argv):
if len(argv) != 2:
return usage()
filename = argv[1]
print "Loading %s..." % filename
scene = loadScene(filename)
print "Traversing..."
travscene = scene.clone()
g = createGraphOp("GeoType")
g.traverse(travscene)
print "Running test..."
win = TestWindow()
win.open()
test = Test()
test.setWindow(win)
test.setScene(travscene)
test.setNFrames(200)
test.clear()
test.addFov(42)
test.makeOrbit(0, 1, 0)
test.run()
print "trav FPS:", test.getFPS()
print "Snapshot"
timage = test.snapshot(100)
print "done"
test.setScene(scene)
test.run()
print "Non-Trav FPS:", test.getFPS()
print "Snapshot"
image = test.snapshot(100)
print "done"
dimage = image.clone()
dimage.diff(timage)
dimage.write("diffimage.png")
image.write("image1.png")
timage.write("image2.png")
win.close()
if __name__ == '__main__':
sys.exit(main() or 0)
``` |
[
{
"content": "Replicate the source code:\n```python\n# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# ... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\n# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the L... | ```python
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define the paragraph reconstruction model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from bert import modeling
import tensorflow.compat.v1 as tf
from tensorflow.contrib import seq2seq as contrib_seq2seq
class FixedSizeInferenceHelper(contrib_seq2seq.InferenceHelper):
"""Feeds in the output of the decoder at each step for fixed size."""
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""next_inputs_fn for TrainingHelper."""
return (finished, sample_ids, state)
def create_model(model,
labels,
decoder_inputs,
batch_size,
model_type="decode",
sep_positions=None):
"""Creates a classification model.
Args:
model: the BERT model from modeling.py
labels: ground truth paragraph order
decoder_inputs: the input to the decoder if used
batch_size: the batch size
model_type: one of decode, pooled, attn
sep_positions: (optional) for "pooled" indecies of SEP tokens
Returns:
tuple of (loss, per_example_loss, logits, probabilities) for model
"""
output_layer = model.get_pooled_output()
hidden_size = output_layer.shape[-1].value
tpu_batch_size = tf.shape(output_layer)[0]
num_labels = 5 # GOOGLE-INTERNAL TODO(daniter) this shouldn't be hardcoded
with tf.variable_scope("paragraph_reconstruct"):
if model_type == "decode":
lstm_cell = tf.nn.rnn_cell.LSTMCell(
num_units=hidden_size, use_peepholes=True, state_is_tuple=True)
def sample_fn(x):
return tf.to_float(tf.reshape(tf.argmax(x, axis=-1), (-1, 1)))
helper = FixedSizeInferenceHelper(
sample_fn=sample_fn,
sample_shape=[1],
sample_dtype=tf.float32,
start_inputs=decoder_inputs[:, 0],
end_fn=None)
# Decoder
project_layer = tf.layers.Dense(
num_labels, use_bias=False, name="output_projection")
my_decoder = contrib_seq2seq.BasicDecoder(
lstm_cell,
helper,
tf.nn.rnn_cell.LSTMStateTuple(output_layer, output_layer),
output_layer=project_layer)
# Dynamic decoding
outputs, _, _ = contrib_seq2seq.dynamic_decode(
my_decoder,
swap_memory=True,
scope="paragraph_reconstruct",
maximum_iterations=5)
logits = outputs.rnn_output
cross_ent = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
per_example_loss = cross_ent
loss = tf.reduce_sum(cross_ent) / tf.to_float(batch_size)
probabilities = tf.nn.softmax(logits, axis=-1)
# GOOGLE-INTERAL: TODO(daniter) currently neither of these actually train
elif model_type == "pooled":
token_embeddings = model.get_sequence_output()
# sep positions come out batch by batch so we need to add the batch index
# we do that explicitly here since we don't know the batch size in the
# record decoder
batch_idx = tf.range(tpu_batch_size)
batch_idx = tf.reshape(batch_idx, [tpu_batch_size, 1])
batch_idx = tf.tile(batch_idx, [1, 5]) # double check
batch_idx = tf.reshape(batch_idx, [tpu_batch_size, 5, 1])
# batch_idx = tf.Print(batch_idx, [batch_idx],
# message="batch_idx", summarize=999999)
sep_positions = tf.concat([batch_idx, sep_positions], axis=2)
# sep_positions = tf.Print(sep_positions, [sep_positions],
# message="sep_positions", summarize=999999)
sep_vecs = tf.gather_nd(token_embeddings, sep_positions)
sep_vecs = tf.reshape(sep_vecs, [tpu_batch_size, 5, hidden_size])
# sep_vecs = tf.Print(sep_vecs, [sep_vecs], message="sep_vecs",
# summarize=999999)
logits = tf.layers.dense(
inputs=sep_vecs, units=num_labels, name="output_projection")
# logits = tf.Print(logits, [logits], message="logits", summarize=999999)
cross_ent = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
per_example_loss = cross_ent
loss = tf.reduce_sum(cross_ent) / tf.to_float(batch_size)
probabilities = tf.nn.softmax(logits, axis=-1)
elif model_type == "attn":
# change size to match sequence embedding size
input_consts = tf.constant([0, 1, 2, 3, 4])
position_encoding = tf.broadcast_to(input_consts, [tpu_batch_size, 5])
# position_encoding = tf.to_float(
# tf.reshape(position_encoding, (-1, 5, 1)))
token_type_table = tf.get_variable(
name="attention_embedding",
shape=[5, 512], # don't hardcode
initializer=tf.truncated_normal_initializer(stddev=0.02))
# This vocab will be small so we always do one-hot here, since it is
# always faster for a small vocabulary.
flat_token_type_ids = tf.reshape(position_encoding, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=5)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[tpu_batch_size, 5, 512])
token_embeddings = model.get_sequence_output()
attn = modeling.attention_layer(token_type_embeddings, token_embeddings)
attn = tf.reshape(attn, (-1, 5, 512)) # head size
logits = tf.layers.dense(
inputs=attn, units=num_labels, name="output_projection")
cross_ent = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
per_example_loss = cross_ent
loss = tf.reduce_sum(cross_ent) / tf.to_float(batch_size)
probabilities = tf.nn.softmax(logits, axis=-1)
return (loss, per_example_loss, logits, probabilities)
def gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions over a minibatch."""
sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
def get_masked_lm_output(bert_config, input_tensor, output_weights, positions,
label_ids, label_weights):
"""Get loss and log probs for the masked LM."""
input_tensor = gather_indexes(input_tensor, positions)
with tf.variable_scope("cls/predictions"):
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
with tf.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=bert_config.hidden_size,
activation=modeling.get_activation(bert_config.hidden_act),
kernel_initializer=modeling.create_initializer(
bert_config.initializer_range))
input_tensor = modeling.layer_norm(input_tensor)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = tf.get_variable(
"output_bias",
shape=[bert_config.vocab_size],
initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
label_ids = tf.reshape(label_ids, [-1])
label_weights = tf.reshape(label_weights, [-1])
one_hot_labels = tf.one_hot(
label_ids, depth=bert_config.vocab_size, dtype=tf.float32)
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
numerator = tf.reduce_sum(label_weights * per_example_loss)
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
return (loss, per_example_loss, log_probs)
``` |
[
{
"content": "Repeat the code exactly:\n```python\n#! /Library/Frameworks/Python.framework/Versions/2.7/Resources/Python.app/Contents/MacOS/Python\n'''\nGeneric plotting script for PYTHONRT\n\n'''\n\n\nimport read_output as rd\nimport os, sys\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cobra_su... | [
{
"content": "Repeat the code exactly:\n<|memory_start|>```python\n#! /Library/Frameworks/Python.framework/Versions/2.7/Resources/Python.app/Contents/MacOS/Python\n'''\nGeneric plotting script for PYTHONRT\n\n'''\n\n\nimport read_output as rd\nimport os, sys\nimport matplotlib.pyplot as plt\nimport numpy as np\... | ```python
#! /Library/Frameworks/Python.framework/Versions/2.7/Resources/Python.app/Contents/MacOS/Python
'''
Generic plotting script for PYTHONRT
'''
import read_output as rd
import os, sys
import matplotlib.pyplot as plt
import numpy as np
import cobra_sub as sub
rd.setpars()
def strip(character, string):
''' strip a character from a string'''
new_string = ""
for s in string:
if s != character:
new_string += s
return new_string
def plot_spec (filename, lmin, lmax, smooth = 1, nobs = 0, use = [], \
savename = "fig", yscale = "linear", xscale = "linear" , \
sources = False, Fnu = False):
'''
Function for plotting a spec file outputted from the radiative transfer code PYTHONRT
:INPUT:
filename string
name of file
lmin, lmax float
wavelength range in ANGSTROMS
nobs int
number of observes
smooth int
smoothing factor
use array
which observations to use
savename string
yscale, xscale string
lin or log scale
sources Bool
Plot sources or not
Fnu Bool
Is it an Fnu plot?
:OUTPUT:
Creates plot and opens in preview
'''
# default savename is filename
if savename == "fig":
savename = filename + ".png"
# create spec class from spec file
spec = rd.read_spec_file(filename)
if nobs == 0:
nobs = len(spec.spec)
# strip filenames of funny characters that TeX complains about
savename = strip("_", savename)
filename = strip("_", filename)
# default argument is to plot all observations
if len(use) == 0:
use = np.arange(nobs)
nuse = int(len(use))
# work out the dimensions of the plot
if nuse < 3:
ny = nuse
nx = 1
else:
nx = 2
ny = (len(use) + 1) / 2
# do we want to smooth? if so, do it!
if smooth > 1:
for i in use:
sub.smooth_spectrum( spec, smooth )
# now create figure
fig=plt.figure(figsize=(8.3,11.7),dpi=80)
fig.suptitle(filename,fontsize=24,fontweight='bold')
fig.subplots_adjust(hspace=0.3,wspace=0.2)
for i in range(nuse):
ax = fig.add_subplot( ny, nx, i)
if Fnu:
ax.plot(spec.freq, spec.spec[use[i]])
else:
ax.plot(spec.wavelength, spec.spec[use[i]])
ax.set_yscale(yscale)
ax.set_xscale(xscale)
plt.xlim(lmin, lmax)
plt.savefig(savename)
command = "open -a preview %s" % savename
os.system(command)
if sources:
fig=plt.figure(figsize=(8.3,11.7),dpi=80)
fig.suptitle(filename,fontsize=24,fontweight='bold')
fig.subplots_adjust(hspace=0.3,wspace=0.2)
return 0
filename = sys.argv[1]
nobs = int(sys.argv[2])
plot_spec(filename, 3000, 7000, smooth = 20, yscale = "log")
``` |
[
{
"content": "Repeat the code precisely:\n```python\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the... | [
{
"content": "Repeat the code precisely:\n<|memory_start|>```python\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file ... | ```python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import unittest
import time
import sys
from TestUtils import TestUtilsMixin, ACCUMULO_HOME, SITE, ROOT, ROOT_PASSWORD, INSTANCE_NAME, ZOOKEEPERS
table='testTable'
count=str(10000)
min=str(0)
max=str(99999)
valueSize=str(100)
memory=str(1<<20)
latency=str(1000)
numThreads=str(4)
visibility='A|B'
auths='A,B'
log = logging.getLogger('test.auto')
class Examples(TestUtilsMixin, unittest.TestCase):
"Start a clean accumulo, run the examples"
order = 21
def runExample(self, cmd):
self.assert_(self.wait(self.runOn(self.masterHost(), [self.accumulo_sh(),] + cmd)), "example exited with error status.")
def ashell(self, input, expected = 0):
out, err, code = self.shell(self.masterHost(), input + '\n')
self.assert_(code == expected)
return out
def comment(self, description):
LINE = '-'*40
log.info(LINE)
log.info(description)
log.info(LINE)
def execute(self, *cmd):
self.assert_(self.wait(self.runOn('localhost', cmd)), "command exited with error status.")
def executeExpectFail(self, *cmd):
self.assert_(not self.wait(self.runOn('localhost', cmd)), "command did not exit with error status and we expected it to.")
def executeIgnoreFail(self, *cmd):
self.wait(self.runOn('localhost', cmd))
def runTest(self):
examplesJar = os.path.join(ACCUMULO_HOME, 'lib', 'accumulo-examples-simple.jar')
self.comment("Testing MaxMutation constraint")
self.ashell('createtable test_ingest\n'
'constraint -a org.apache.accumulo.examples.simple.constraints.MaxMutationSize\n')
handle = self.runOn('localhost', [self.accumulo_sh(), 'org.apache.accumulo.test.TestIngest', '-u', ROOT, '--rows', '1', '--start', '0', '--cols', '10000', '-p', ROOT_PASSWORD])
out, err = handle.communicate()
self.failIf(handle.returncode==0)
self.failUnless(err.find("MutationsRejectedException: # constraint violations : 1") >= 0, "Was able to insert a mutation larger than max size")
self.ashell('createtable %s\nsetauths -u %s -s A,B\nquit\n' %(table, ROOT))
self.comment("Testing dirlist example (a little)")
self.comment(" ingesting accumulo source")
self.execute(self.accumulo_sh(), 'org.apache.accumulo.examples.simple.dirlist.Ingest',
'-i', INSTANCE_NAME, '-z', ZOOKEEPERS, '-u', ROOT, '-p', ROOT_PASSWORD,
'--dirTable', 'dirTable',
'--indexTable', 'indexTable',
'--dataTable', 'dataTable',
'--vis', visibility,
'--chunkSize', 100000,
ACCUMULO_HOME+"/test")
self.comment(" searching for a file")
handle = self.runOn('localhost', [self.accumulo_sh(), 'org.apache.accumulo.examples.simple.dirlist.QueryUtil',
'-i', INSTANCE_NAME, '-z', ZOOKEEPERS, '-u', ROOT, '-p', ROOT_PASSWORD,
'-t', 'indexTable', '--auths', auths, '--search', '--path', 'examples.py'])
out, err = handle.communicate()
self.assert_(handle.returncode == 0)
self.assert_(out.find('test/system/auto/simple/examples.py') >= 0)
self.comment(" found file at " + out)
self.comment("Testing ageoff filtering")
out = self.ashell("createtable filtertest\n"
"setiter -t filtertest -scan -p 10 -n myfilter -ageoff\n"
"\n"
"5000\n"
"\n"
"insert foo a b c\n"
"scan\n"
"sleep 5\n"
"scan\n")
self.assert_(2 == len([line for line in out.split('\n') if line.find('foo') >= 0]))
self.comment("Testing bloom filters are fast for missing data")
self.ashell('createtable bloom_test\nconfig -t bloom_test -s table.bloom.enabled=true\n')
self.execute(self.accumulo_sh(), 'org.apache.accumulo.examples.simple.client.RandomBatchWriter', '--seed', '7',
'-i', INSTANCE_NAME, '-z', ZOOKEEPERS, '-u', ROOT, '-p', ROOT_PASSWORD, '-t', 'bloom_test',
'--num', '1000000', '--min', '0', '--max', '1000000000', '--size', '50', '--batchMemory', '2M', '--batchLatency', '60s',
'--batchThreads', '3')
self.ashell('flush -t bloom_test -w\n')
now = time.time()
self.execute(self.accumulo_sh(), 'org.apache.accumulo.examples.simple.client.RandomBatchScanner', '--seed', '7',
'-i', INSTANCE_NAME, '-z', ZOOKEEPERS, '-u', ROOT, '-p', ROOT_PASSWORD, '-t', 'bloom_test',
'--num', '500', '--min', '0', '--max', '1000000000', '--size', '50', '--scanThreads', 4)
diff = time.time() - now
now = time.time()
self.executeExpectFail(self.accumulo_sh(), 'org.apache.accumulo.examples.simple.client.RandomBatchScanner', '--seed', '8',
'-i', INSTANCE_NAME, '-z', ZOOKEEPERS, '-u', ROOT, '-p', ROOT_PASSWORD, '-t', 'bloom_test',
'--num', '500', '--min', '0', '--max', '1000000000', '--size', '50', '--scanThreads', 4)
diff2 = time.time() - now
self.assert_(diff2 < diff)
self.comment("Creating a sharded index of the accumulo java files")
self.ashell('createtable shard\ncreatetable doc2term\nquit\n')
self.execute('/bin/sh', '-c',
'find %s/examples -name "*.java" | xargs %s/bin/accumulo org.apache.accumulo.examples.simple.shard.Index -i %s -z %s -t shard -u %s -p %s --partitions 30' %
(ACCUMULO_HOME, ACCUMULO_HOME, INSTANCE_NAME, ZOOKEEPERS, ROOT, ROOT_PASSWORD))
self.execute(self.accumulo_sh(), 'org.apache.accumulo.examples.simple.shard.Query',
'-i', INSTANCE_NAME, '-z', ZOOKEEPERS, '-t', 'shard', '-u', ROOT, '-p', ROOT_PASSWORD,
'foo', 'bar')
self.comment("Creating a word index of the sharded files")
self.execute(self.accumulo_sh(), 'org.apache.accumulo.examples.simple.shard.Reverse',
'-i', INSTANCE_NAME, '-z', ZOOKEEPERS, '--shardTable', 'shard', '--doc2Term', 'doc2term', '-u', ROOT, '-p', ROOT_PASSWORD)
self.comment("Making 1000 conjunctive queries of 5 random words")
self.execute(self.accumulo_sh(), 'org.apache.accumulo.examples.simple.shard.ContinuousQuery',
'-i', INSTANCE_NAME, '-z', ZOOKEEPERS, '--shardTable', 'shard', '--doc2Term', 'doc2term', '-u', ROOT, '-p', ROOT_PASSWORD, '--terms', 5, '--count', 1000)
self.executeIgnoreFail('hadoop', 'fs', '-rmr', "tmp/input", "tmp/files", "tmp/splits.txt", "tmp/failures")
self.execute('hadoop', 'fs', '-mkdir', "tmp/input")
self.comment("Starting bulk ingest example")
self.comment(" Creating some test data")
self.execute(self.accumulo_sh(), 'org.apache.accumulo.examples.simple.mapreduce.bulk.GenerateTestData', '--start-row', 0, '--count', 1000000, '--output', 'tmp/input/data')
self.execute(self.accumulo_sh(), 'org.apache.accumulo.examples.simple.mapreduce.bulk.SetupTable',
'-i', INSTANCE_NAME, '-z', ZOOKEEPERS, '-u', ROOT, '-p', ROOT_PASSWORD, '-t', 'bulkTable')
self.execute(ACCUMULO_HOME+'/bin/tool.sh', examplesJar, 'org.apache.accumulo.examples.simple.mapreduce.bulk.BulkIngestExample',
'-i', INSTANCE_NAME, '-z', ZOOKEEPERS, '-u', ROOT, '-p', ROOT_PASSWORD, '-t', 'bulkTable', '--inputDir', 'tmp/input', '--workDir', 'tmp')
self.execute(self.accumulo_sh(), 'org.apache.accumulo.examples.simple.mapreduce.bulk.VerifyIngest',
'-i', INSTANCE_NAME, '-z', ZOOKEEPERS, '-u', ROOT, '-p', ROOT_PASSWORD, '-t', 'bulkTable', '--start-row', 0, '--count', 1000000)
self.wait(self.runOn(self.masterHost(), [
'hadoop', 'fs', '-rmr', "tmp/tableFile", "tmp/nines"
]))
self.comment("Running TeraSortIngest for a million rows")
self.ashell('createtable sorted\nquit\n')
# 10,000 times smaller than the real terasort
ROWS = 1000*1000
self.wait(self.runOn(self.masterHost(), [
ACCUMULO_HOME+'/bin/tool.sh',
examplesJar,
'org.apache.accumulo.examples.simple.mapreduce.TeraSortIngest',
'--count', ROWS,
'-nk', 10, '-xk', 10,
'-nv', 78, '-xv', 78,
'-t', 'sorted',
'-i', INSTANCE_NAME,
'-z', ZOOKEEPERS,
'-u', ROOT,
'-p', ROOT_PASSWORD,
'--splits', 4]))
self.comment("Looking for '999' in all rows")
self.wait(self.runOn(self.masterHost(), [
ACCUMULO_HOME+'/bin/tool.sh',
examplesJar,
'org.apache.accumulo.examples.simple.mapreduce.RegexExample',
'-i', INSTANCE_NAME,
'-z', ZOOKEEPERS,
'-u', ROOT,
'-p', ROOT_PASSWORD,
'-t', 'sorted',
'--rowRegex', '.*999.*',
'--output', 'tmp/nines']))
self.comment("Generating hashes of each row")
self.wait(self.runOn(self.masterHost(), [
ACCUMULO_HOME+'/bin/tool.sh',
examplesJar,
'org.apache.accumulo.examples.simple.mapreduce.RowHash',
'-i', INSTANCE_NAME,
'-z', ZOOKEEPERS,
'-u', ROOT,
'-p', ROOT_PASSWORD,
'-t', 'sorted',
'--column', ':',
]))
self.comment("Exporting the table to HDFS")
self.wait(self.runOn(self.masterHost(), [
ACCUMULO_HOME+'/bin/tool.sh',
examplesJar,
'org.apache.accumulo.examples.simple.mapreduce.TableToFile',
'-i', INSTANCE_NAME,
'-z', ZOOKEEPERS,
'-u', ROOT,
'-p', ROOT_PASSWORD,
'-t', 'sorted',
'--output', 'tmp/tableFile'
]))
self.comment("Running WordCount using Accumulo aggregators")
self.wait(self.runOn(self.masterHost(), [
'hadoop', 'fs', '-rmr', "tmp/wc"
]))
self.wait(self.runOn(self.masterHost(), [
'hadoop', 'fs', '-mkdir', "tmp/wc"
]))
self.wait(self.runOn(self.masterHost(), [
'hadoop', 'fs', '-copyFromLocal', ACCUMULO_HOME + "/README", "tmp/wc/Accumulo.README"
]))
self.ashell('createtable wordCount\nsetiter -scan -majc -minc -p 10 -n sum -class org.apache.accumulo.core.iterators.user.SummingCombiner\n\ncount\n\nSTRING\nquit\n')
self.wait(self.runOn(self.masterHost(), [
ACCUMULO_HOME+'/bin/tool.sh',
examplesJar,
'org.apache.accumulo.examples.simple.mapreduce.WordCount',
'-i', INSTANCE_NAME,
'-z', ZOOKEEPERS,
'-u', ROOT,
'-p', ROOT_PASSWORD,
'--input', 'tmp/wc',
'-t', 'wctable'
]))
self.comment("Inserting data with a batch writer")
self.runExample(['org.apache.accumulo.examples.simple.helloworld.InsertWithBatchWriter',
'-i', INSTANCE_NAME,
'-z', ZOOKEEPERS,
'-t', 'helloBatch',
'-u', ROOT,
'-p', ROOT_PASSWORD])
self.comment("Reading data")
self.runExample(['org.apache.accumulo.examples.simple.helloworld.ReadData',
'-i', INSTANCE_NAME,
'-z', ZOOKEEPERS,
'-t', 'helloBatch',
'-u', ROOT,
'-p', ROOT_PASSWORD])
self.comment("Running isolated scans")
self.runExample(['org.apache.accumulo.examples.simple.isolation.InterferenceTest',
'-i', INSTANCE_NAME,
'-z', ZOOKEEPERS,
'-u', ROOT,
'-p', ROOT_PASSWORD,
'-t', 'itest1',
'--iterations', 100000,
'--isolated'])
self.comment("Running scans without isolation")
self.runExample(['org.apache.accumulo.examples.simple.isolation.InterferenceTest',
'-i', INSTANCE_NAME,
'-z', ZOOKEEPERS,
'-u', ROOT,
'-p', ROOT_PASSWORD,
'-t', 'itest2',
'--iterations', 100000])
self.comment("Using some example constraints")
self.ashell('\n'.join([
'createtable testConstraints',
'constraint -t testConstraints -a org.apache.accumulo.examples.simple.constraints.NumericValueConstraint',
'constraint -t testConstraints -a org.apache.accumulo.examples.simple.constraints.AlphaNumKeyConstraint',
'insert r1 cf1 cq1 1111',
'insert r1 cf1 cq1 ABC',
'scan',
'quit'
]), 1)
self.comment("Performing some row operations")
self.runExample(['org.apache.accumulo.examples.simple.client.RowOperations',
'-i', INSTANCE_NAME,
'-z', ZOOKEEPERS,
'-u', ROOT,
'-p', ROOT_PASSWORD ])
self.comment("Using the batch writer")
self.runExample(['org.apache.accumulo.examples.simple.client.SequentialBatchWriter',
'-i', INSTANCE_NAME,
'-z', ZOOKEEPERS,
'-u', ROOT,
'-p', ROOT_PASSWORD,
'-t', table,
'--start', min,
'--num', count,
'--size', valueSize,
'--batchMemory', memory,
'--batchLatency', latency,
'--batchThreads', numThreads,
'--vis', visibility])
self.comment("Reading and writing some data")
self.runExample(['org.apache.accumulo.examples.simple.client.ReadWriteExample',
'-i', INSTANCE_NAME,
'-z', ZOOKEEPERS,
'-u', ROOT,
'-p', ROOT_PASSWORD,
'--auths', auths,
'--table', table,
'-c',
'--debug'])
self.comment("Deleting some data")
self.runExample(['org.apache.accumulo.examples.simple.client.ReadWriteExample',
'-i', INSTANCE_NAME,
'-z', ZOOKEEPERS,
'-u', ROOT,
'-p', ROOT_PASSWORD,
'-auths', auths,
'--table', table,
'-d',
'--debug'])
self.comment("Writing some random data with the batch writer")
self.runExample(['org.apache.accumulo.examples.simple.client.RandomBatchWriter',
'-i', INSTANCE_NAME,
'-z', ZOOKEEPERS,
'-u', ROOT,
'-p', ROOT_PASSWORD,
'-t', table,
'--seed','5',
'--num', count,
'--min', min,
'--max', max,
'--size', valueSize,
'--batchMemory', memory,
'--batchLatency', latency,
'--batchThreads', numThreads,
'--vis', visibility])
self.comment("Writing some random data with the batch writer")
self.runExample(['org.apache.accumulo.examples.simple.client.RandomBatchScanner',
'-i', INSTANCE_NAME,
'-z', ZOOKEEPERS,
'-u', ROOT,
'-p', ROOT_PASSWORD,
'-t', table,
'--seed','5',
'--num', count,
'--min', min,
'--max', max,
'--size', valueSize,
'--scanThreads', numThreads,
'--auths', auths]);
self.comment("Running an example table operation (Flush)")
self.runExample(['org.apache.accumulo.examples.simple.client.Flush',
'-i', INSTANCE_NAME,
'-z', ZOOKEEPERS,
'-u', ROOT,
'-p', ROOT_PASSWORD,
'-t', table])
self.shutdown_accumulo();
def suite():
result = unittest.TestSuite()
result.addTest(Examples())
return result
``` |
[
{
"content": "Produce an exact reconstruction of the code:\n```python\nimport httpretty\n\nfrom paystackapi.tests.base_test_case import BaseTestCase\nfrom paystackapi.transfer import Transfer\n\n\nclass TestTransfer(BaseTestCase):\n\n @httpretty.activate\n def test_initiate(self):\n \"\"\"Method de... | [
{
"content": "Produce an exact reconstruction of the code:\n<|memory_start|>```python\nimport httpretty\n\nfrom paystackapi.tests.base_test_case import BaseTestCase\nfrom paystackapi.transfer import Transfer\n\n\nclass TestTransfer(BaseTestCase):\n\n @httpretty.activate\n def test_initiate(self):\n ... | ```python
import httpretty
from paystackapi.tests.base_test_case import BaseTestCase
from paystackapi.transfer import Transfer
class TestTransfer(BaseTestCase):
@httpretty.activate
def test_initiate(self):
"""Method defined to test transfer initiation."""
httpretty.register_uri(
httpretty.POST,
self.endpoint_url("/transfer"),
content_type='text/json',
body='{"status": true, "message": "Transfer requires OTP to continue"}',
status=201,
)
response = Transfer.initiate(
source="balance",
reason="Calm down",
amount="3794800",
recipient="RCP_gx2wn530m0i3w3m",
)
self.assertTrue(response['status'])
@httpretty.activate
def test_list(self):
"""Method defined to test transfer list."""
httpretty.register_uri(
httpretty.GET,
self.endpoint_url("/transfer"),
content_type='text/json',
body='{"status": true, "message": "Transfers retrieved"}',
status=201,
)
response = Transfer.list(
perPage=3,
page=1
)
self.assertTrue(response['status'])
@httpretty.activate
def test_fetch(self):
"""Method defined to test transfer fetch."""
httpretty.register_uri(
httpretty.GET,
self.endpoint_url("/transfer/TRF_2x5j67tnnw1t98k"),
content_type='text/json',
body='{"status": true, "message": "Transfers retrieved"}',
status=201,
)
response = Transfer.fetch(
id_or_code="TRF_2x5j67tnnw1t98k",
)
self.assertTrue(response['status'])
@httpretty.activate
def test_finalize(self):
"""Method defined to test transfer finalize."""
httpretty.register_uri(
httpretty.POST,
self.endpoint_url("/transfer/finalize_transfer"),
content_type='text/json',
body='{"status": true, "message": "Transfer has been queued"}',
status=201,
)
response = Transfer.finalize(
transfer_code="TRF_2x5j67tnnw1t98k",
otp="928783"
)
self.assertTrue(response['status'])
@httpretty.activate
def test_initiate_bulk_transfer(self):
"""Method defined to test transfer initiate bulk transfer."""
httpretty.register_uri(
httpretty.POST,
self.endpoint_url("/transfer/bulk"),
content_type='text/json',
body='{"status": true, "message": "2 transfers queued."}',
status=201,
)
response = Transfer.initiate_bulk_transfer(
currency="TRF_2x5j67tnnw1t98k",
source="928783",
transfers=[
{
"amount": 50000,
"recipient": "RCP_db342dvqvz9qcrn"
},
{
"amount": 50000,
"recipient": "RCP_db342dvqvz9qcrn"
}
]
)
self.assertTrue(response['status'])
@httpretty.activate
def test_verify(self):
"""Method defined to test transfer verify."""
httpretty.register_uri(
httpretty.GET,
self.endpoint_url("/verify/ref_demo"),
content_type='text/json',
body='{"status": true, "message": "Transfer has been queued"}',
status=201,
)
response = Transfer.verify(
reference="ref_demo",
)
self.assertTrue(response['status'])
``` |
[
{
"content": "Repeat the following code:\n```python\n# -*- coding: utf-8 -*-\n# vim: set ts=4\n\n# Copyright 2016 Rémi Duraffort\n# This file is part of ReactOBus.\n#\n# ReactOBus is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published ... | [
{
"content": "Repeat the following code:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n# vim: set ts=4\n\n# Copyright 2016 Rémi Duraffort\n# This file is part of ReactOBus.\n#\n# ReactOBus is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public Licen... | ```python
# -*- coding: utf-8 -*-
# vim: set ts=4
# Copyright 2016 Rémi Duraffort
# This file is part of ReactOBus.
#
# ReactOBus is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ReactOBus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with ReactOBus. If not, see <http://www.gnu.org/licenses/>
import logging
import multiprocessing
from setproctitle import setproctitle
import zmq
LOG = logging.getLogger("ROB.core")
class Core(multiprocessing.Process):
def __init__(self, inbound, outbound):
super().__init__()
self.inbound = inbound
self.outbound = outbound
def run(self):
setproctitle("ReactOBus [core]")
# Create the ZMQ context
self.context = zmq.Context.instance()
self.pull = self.context.socket(zmq.PULL)
LOG.debug("Binding inbound (%s)", self.inbound)
self.pull.bind(self.inbound)
self.pub = self.context.socket(zmq.PUB)
# Set 0 limit on input and output HWM
self.pub.setsockopt(zmq.SNDHWM, 0)
LOG.debug("Binding outbound (%s)", self.outbound)
self.pub.bind(self.outbound)
while True:
msg = self.pull.recv_multipart()
LOG.debug(msg)
# TODO: use a proxy
# Publish to all outputs
self.pub.send_multipart(msg)
``` |
[
{
"content": "Here is the script:\n```python\n# encoding: utf-8\nimport datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n \n # Adding model 'Household'\n db.create_tabl... | [
{
"content": "Here is the script:\n<|memory_start|>```python\n# encoding: utf-8\nimport datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n \n # Adding model 'Household'\n ... | ```python
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Household'
db.create_table('people_household', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=150)),
('status', self.gf('django.db.models.fields.CharField')(default='ns', max_length=10)),
('anniversary', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('barcode', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('notes', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('first_visit', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
))
db.send_create_signal('people', ['Household'])
# Adding model 'Person'
db.create_table('people_person', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('household', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['people.Household'])),
('fname', self.gf('django.db.models.fields.CharField')(max_length=150)),
('mname', self.gf('django.db.models.fields.CharField')(max_length=150, null=True, blank=True)),
('lname', self.gf('django.db.models.fields.CharField')(max_length=150)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)),
('gender', self.gf('django.db.models.fields.CharField')(default='ns', max_length=10)),
('role', self.gf('django.db.models.fields.CharField')(default='ns', max_length=10)),
('bdate', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('ddate', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('allergies', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
))
db.send_create_signal('people', ['Person'])
# Adding model 'Address'
db.create_table('people_address', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('household', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['people.Household'])),
('address1', self.gf('django.db.models.fields.CharField')(max_length=255)),
('address2', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('city', self.gf('django.db.models.fields.CharField')(max_length=255)),
('state', self.gf('django.contrib.localflavor.us.models.USStateField')(max_length=2)),
('zipcode', self.gf('django.db.models.fields.CharField')(max_length=25)),
('atype', self.gf('django.db.models.fields.CharField')(default='ns', max_length=10)),
('notes', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
))
db.send_create_signal('people', ['Address'])
def backwards(self, orm):
# Deleting model 'Household'
db.delete_table('people_household')
# Deleting model 'Person'
db.delete_table('people_person')
# Deleting model 'Address'
db.delete_table('people_address')
models = {
'people.address': {
'Meta': {'ordering': "('address1',)", 'object_name': 'Address'},
'address1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'address2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'atype': ('django.db.models.fields.CharField', [], {'default': "'ns'", 'max_length': '10'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Household']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'state': ('django.contrib.localflavor.us.models.USStateField', [], {'max_length': '2'}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'people.household': {
'Meta': {'ordering': "('name',)", 'object_name': 'Household'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'anniversary': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'barcode': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'first_visit': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'ns'", 'max_length': '10'})
},
'people.person': {
'Meta': {'ordering': "('lname', 'fname')", 'object_name': 'Person'},
'allergies': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'bdate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'ddate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'fname': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'gender': ('django.db.models.fields.CharField', [], {'default': "'ns'", 'max_length': '10'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['people.Household']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lname': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'mname': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'ns'", 'max_length': '10'})
}
}
complete_apps = ['people']
``` |
[
{
"content": "```python\nimport numpy as np\nfrom keras.layers.core import Dense\nfrom keras.models import Sequential\nfrom keras.optimizers import RMSprop\nfrom ..value_function import ValueFunction\nfrom ...game.utils import normalize_board\n\n\nclass MLP(ValueFunction):\n\n def __init__(self):\n se... | [
{
"content": "<|memory_start|>```python\nimport numpy as np\nfrom keras.layers.core import Dense\nfrom keras.models import Sequential\nfrom keras.optimizers import RMSprop\nfrom ..value_function import ValueFunction\nfrom ...game.utils import normalize_board\n\n\nclass MLP(ValueFunction):\n\n def __init__(se... | ```python
import numpy as np
from keras.layers.core import Dense
from keras.models import Sequential
from keras.optimizers import RMSprop
from ..value_function import ValueFunction
from ...game.utils import normalize_board
class MLP(ValueFunction):
def __init__(self):
self.model = Sequential()
self.model.add(Dense(150, input_dim=42, init='lecun_uniform', activation='tanh'))
self.model.add(Dense(1, init='lecun_uniform', activation='tanh'))
self.model.compile(loss='mse', optimizer=RMSprop())
def update(self, state, value):
x = normalize_board(state.board)
y = np.array([[value]])
self.model.fit(x, y, batch_size=1, nb_epoch=1, verbose=0)
##################
# Value Function #
##################
def __getitem__(self, state_action):
state, action = state_action
copy = state.copy().make_move(action)
x = normalize_board(copy.board)
value = self.model.predict(np.array([x]), batch_size=1)
return value[0][0]
``` |
[
{
"content": "Here is a code file:\n```python\n#!/usr/bin/env python3\n# Copyright (c) 2014-2016 The Bitcoin Core developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\"\"\"Test the RPC HTTP basics.\"\"\"\n\nfrom t... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\n#!/usr/bin/env python3\n# Copyright (c) 2014-2016 The Bitcoin Core developers\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\"\"\"Test the RPC HTTP basics.... | ```python
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RPC HTTP basics."""
from test_framework.test_framework import StatusquoTestFramework
from test_framework.util import *
import http.client
import urllib.parse
class HTTPBasicsTest (StatusquoTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 3
self.setup_clean_chain = False
def setup_network(self):
self.setup_nodes()
def run_test(self):
#################################################
# lowlevel check for http persistent connection #
#################################################
url = urllib.parse.urlparse(self.nodes[0].url)
authpair = url.username + ':' + url.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#same should be if we add keep-alive because this should be the std. behaviour
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock!=None) #according to http/1.1 connection must still be open!
conn.close()
#now do the same with "Connection: close"
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection":"close"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock==None) #now the connection must be closed after the response
#node1 (2nd node) is running with disabled keep-alive option
urlNode1 = urllib.parse.urlparse(self.nodes[1].url)
authpair = urlNode1.username + ':' + urlNode1.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode1.hostname, urlNode1.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
#node2 (third node) is running with standard keep-alive parameters which means keep-alive is on
urlNode2 = urllib.parse.urlparse(self.nodes[2].url)
authpair = urlNode2.username + ':' + urlNode2.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock!=None) #connection must be closed because statusquod should use keep-alive by default
# Check excessive request size
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*1000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.NOT_FOUND)
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*10000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.BAD_REQUEST)
if __name__ == '__main__':
HTTPBasicsTest ().main ()
``` |
[
{
"content": "```python\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import ImageGrid\nimport numpy as np\nfrom os import listdir, getcwd\nfrom os import chdir\nfrom PIL import Image\nimport matplotlib.gridspec as gridspec\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspe... | [
{
"content": "<|memory_start|>```python\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import ImageGrid\nimport numpy as np\nfrom os import listdir, getcwd\nfrom os import chdir\nfrom PIL import Image\nimport matplotlib.gridspec as gridspec\nimport matplotlib.pyplot as plt\nimport matplotlib.gri... | ```python
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
import numpy as np
from os import listdir, getcwd
from os import chdir
from PIL import Image
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.image as mimage
from matplotlib.backends.backend_pdf import PdfPages
files = listdir('CNN_run2/Visualisations_w_folders/max_pooling_3')
chdir('CNN_run2/Visualisations_w_folders/max_pooling_3')
images = [Image.open(f).convert('LA') for f in files]
"""
fig = plt.figure()
grid = ImageGrid(fig, 111, # similar to subplot(111)
nrows_ncols = (2, 5), # creates 2x2 grid of axes
axes_pad=0.1, # pad between axes in inch.
)
"""
num_rows = 1
num_cols = 128
fig = plt.figure()
gs = gridspec.GridSpec(num_rows, num_cols, wspace=0.0)
i = 0
for g in gs:
ax = plt.subplot(g)
ax.imshow(images[i])
ax.set_xticks([])
ax.set_yticks([])
i = i + 1
# ax.set_aspect('auto')
plt.axis('off')
plt.show()
``` |
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\nfrom helper import unittest, PillowTestCase\n\nfrom PIL import GimpGradientFile\n\n\nclass TestImage(PillowTestCase):\n\n def test_linear_pos_le_middle(self):\n # Arrange\n middle = 0.5\n pos = 0.25\n\n #... | [
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\nfrom helper import unittest, PillowTestCase\n\nfrom PIL import GimpGradientFile\n\n\nclass TestImage(PillowTestCase):\n\n def test_linear_pos_le_middle(self):\n # Arrange\n middle = 0.5\n pos = 0... | ```python
from helper import unittest, PillowTestCase
from PIL import GimpGradientFile
class TestImage(PillowTestCase):
def test_linear_pos_le_middle(self):
# Arrange
middle = 0.5
pos = 0.25
# Act
ret = GimpGradientFile.linear(middle, pos)
# Assert
self.assertEqual(ret, 0.25)
def test_linear_pos_le_small_middle(self):
# Arrange
middle = 1e-11
pos = 1e-12
# Act
ret = GimpGradientFile.linear(middle, pos)
# Assert
self.assertEqual(ret, 0.0)
def test_linear_pos_gt_middle(self):
# Arrange
middle = 0.5
pos = 0.75
# Act
ret = GimpGradientFile.linear(middle, pos)
# Assert
self.assertEqual(ret, 0.75)
def test_linear_pos_gt_small_middle(self):
# Arrange
middle = 1 - 1e-11
pos = 1 - 1e-12
# Act
ret = GimpGradientFile.linear(middle, pos)
# Assert
self.assertEqual(ret, 1.0)
def test_curved(self):
# Arrange
middle = 0.5
pos = 0.75
# Act
ret = GimpGradientFile.curved(middle, pos)
# Assert
self.assertEqual(ret, 0.75)
def test_sine(self):
# Arrange
middle = 0.5
pos = 0.75
# Act
ret = GimpGradientFile.sine(middle, pos)
# Assert
self.assertEqual(ret, 0.8535533905932737)
def test_sphere_increasing(self):
# Arrange
middle = 0.5
pos = 0.75
# Act
ret = GimpGradientFile.sphere_increasing(middle, pos)
# Assert
self.assertAlmostEqual(ret, 0.9682458365518543)
def test_sphere_decreasing(self):
# Arrange
middle = 0.5
pos = 0.75
# Act
ret = GimpGradientFile.sphere_decreasing(middle, pos)
# Assert
self.assertEqual(ret, 0.3385621722338523)
def test_load_via_imagepalette(self):
# Arrange
from PIL import ImagePalette
test_file = "Tests/images/gimp_gradient.ggr"
# Act
palette = ImagePalette.load(test_file)
# Assert
# load returns raw palette information
self.assertEqual(len(palette[0]), 1024)
self.assertEqual(palette[1], "RGBA")
def test_load_1_3_via_imagepalette(self):
# Arrange
from PIL import ImagePalette
# GIMP 1.3 gradient files contain a name field
test_file = "Tests/images/gimp_gradient_with_name.ggr"
# Act
palette = ImagePalette.load(test_file)
# Assert
# load returns raw palette information
self.assertEqual(len(palette[0]), 1024)
self.assertEqual(palette[1], "RGBA")
if __name__ == '__main__':
unittest.main()
``` |
[
{
"content": "Here is a code file:\n```python\nimport os\nimport unittest\n\nfrom vsg.rules import package_body\nfrom vsg import vhdlFile\nfrom vsg.tests import utils\n\nsTestDir = os.path.dirname(__file__)\n\nlFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_201_test_input.vhd'))\n\nlExpe... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\nimport os\nimport unittest\n\nfrom vsg.rules import package_body\nfrom vsg import vhdlFile\nfrom vsg.tests import utils\n\nsTestDir = os.path.dirname(__file__)\n\nlFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_201_test_input... | ```python
import os
import unittest
from vsg.rules import package_body
from vsg import vhdlFile
from vsg.tests import utils
sTestDir = os.path.dirname(__file__)
lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_201_test_input.vhd'))
lExpected = []
lExpected.append('')
utils.read_file(os.path.join(sTestDir, 'rule_201_test_input.fixed.vhd'), lExpected)
class test_package_body_rule(unittest.TestCase):
def setUp(self):
self.oFile = vhdlFile.vhdlFile(lFile)
self.assertIsNone(eError)
def test_rule_201(self):
oRule = package_body.rule_201()
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'package_body')
self.assertEqual(oRule.identifier, '201')
lExpected = [9, 16]
oRule.analyze(self.oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
def test_fix_rule_201(self):
oRule = package_body.rule_201()
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
``` |
[
{
"content": "```python\n# Copyright 2011-2017 Facundo Batista\n# All Rigths Reserved\n\n\"\"\"The server for kilink.\"\"\"\n\nimport logging\nimport time\n\nfrom functools import update_wrapper\n\nfrom flask import (\n Flask,\n jsonify,\n render_template,\n request,\n make_response\n)\n\n# from ... | [
{
"content": "<|memory_start|>```python\n# Copyright 2011-2017 Facundo Batista\n# All Rigths Reserved\n\n\"\"\"The server for kilink.\"\"\"\n\nimport logging\nimport time\n\nfrom functools import update_wrapper\n\nfrom flask import (\n Flask,\n jsonify,\n render_template,\n request,\n make_respon... | ```python
# Copyright 2011-2017 Facundo Batista
# All Rigths Reserved
"""The server for kilink."""
import logging
import time
from functools import update_wrapper
from flask import (
Flask,
jsonify,
render_template,
request,
make_response
)
# from flask.ext.assets import Environment
# from flask_assets import Environment
from flask_babel import Babel
from flask_babel import gettext as _
from sqlalchemy import create_engine
import backend
import loghelper
from config import config, LANGUAGES
from metrics import StatsdClient
from decorators import crossdomain
# set up flask
app = Flask(__name__)
app.config.from_object(__name__)
app.config["STATIC_URL"] = 'static'
app.config["STATIC_ROOT"] = 'static'
app.config["PROPAGATE_EXCEPTIONS"] = False
babel = Babel(app)
# flask-assets
# assets = Environment(app)
# assets.cache = "/tmp/"
# assets.init_app(app)
# logger
logger = logging.getLogger('kilink.kilink')
# metrics
metrics = StatsdClient("linkode")
def nocache(f):
"""Decorator to make a page un-cacheable."""
def new_func(*args, **kwargs):
"""The new function."""
resp = make_response(f(*args, **kwargs))
resp.headers['Cache-Control'] = 'public, max-age=0'
return resp
return update_wrapper(new_func, f)
def measure(metric_name):
"""Decorator generator to send metrics counting and with timing."""
def _decorator(oldf):
"""The decorator itself."""
def newf(*args, **kwargs):
"""The function to replace."""
tini = time.time()
try:
result = oldf(*args, **kwargs)
except Exception as exc:
name = "%s.error.%s" % (metric_name, exc.__class__.__name__)
metrics.count(name, 1)
raise
else:
tdelta = time.time() - tini
metrics.count(metric_name + '.ok', 1)
metrics.timing(metric_name, tdelta)
return result
# need to fix the name because it's used by flask
newf.func_name = oldf.func_name
return newf
return _decorator
@app.errorhandler(backend.KilinkNotFoundError)
def handle_not_found_error(error):
"""Return 404 on kilink not found"""
logger.debug(error.message)
return jsonify({'message': error.message}), 404
@app.errorhandler(backend.KilinkDataTooBigError)
def handle_content_data_too_big_error(error):
"""Return 413 on content data too big"""
logger.debug(error.message)
return jsonify({'message': error.message}), 413
@babel.localeselector
def get_locale():
"""Return the best matched language supported."""
return request.accept_languages.best_match(LANGUAGES.keys())
# accesory pages
@app.route('/about')
@measure("about")
def about():
"""Show the about page."""
return render_template('_about.html')
@app.route('/tools')
@measure("tools")
def tools():
"""Show the tools page."""
return render_template('_tools.html')
@app.route('/version')
@measure("version")
def version():
"""Show the project version, very very simple, just for developers/admin help."""
return kilinkbackend.get_version()
# views
@app.route('/')
@app.route('/<linkode_id>')
@app.route('/<linkode_id>/<revno>')
@app.route('/l/<linkode_id>')
@app.route('/l/<linkode_id>/<revno>')
@measure("index")
def index(linkode_id=None, revno=None):
"""The base page."""
return render_template('_new.html')
# API
@app.route('/api/1/linkodes/', methods=['POST'])
@crossdomain(origin='*')
@measure("api.create")
def api_create():
"""Create a kilink."""
content = request.form['content']
text_type = request.form.get('text_type', "")
logger.debug("API create start; type=%r size=%d", text_type, len(content))
try:
klnk = kilinkbackend.create_kilink(content, text_type)
except backend.KilinkDataTooBigError:
logger.debug("Content data too big; on creation")
response = make_response()
return response, 413
ret_json = jsonify(linkode_id=klnk.linkode_id, revno=klnk.linkode_id)
response = make_response(ret_json)
response.headers['Location'] = 'http://%s/%s' % (config["server_host"], klnk.linkode_id)
logger.debug("API create done; linkode_id=%s", klnk.linkode_id)
return response, 201
@app.route('/api/1/linkodes/<linkode_id>', methods=['POST'])
@crossdomain(origin='*')
@measure("api.update")
def api_update(linkode_id):
"""Update a kilink."""
content = request.form['content']
parent = request.form['parent']
text_type = request.form['text_type']
logger.debug("API update start; linkode_id=%r parent=%r type=%r size=%d",
linkode_id, parent, text_type, len(content))
try:
klnk = kilinkbackend.update_kilink(parent, content, text_type)
except backend.KilinkNotFoundError:
logger.debug("API update done; linkode_id %r not found", linkode_id)
response = make_response()
return response, 404
except backend.KilinkDataTooBigError:
logger.debug("Content data too big.; linkode_id %r", linkode_id)
response = make_response()
return response, 413
logger.debug("API update done; linkode_id=%r", klnk.linkode_id)
ret_json = jsonify(revno=klnk.linkode_id)
response = make_response(ret_json)
response.headers['Location'] = 'http://%s/%s' % (config["server_host"], klnk.linkode_id)
return response, 201
@app.route('/api/1/linkodes/<linkode_id>/<revno>', methods=['GET'])
@app.route('/api/1/linkodes/<linkode_id>', methods=['GET'])
@crossdomain(origin='*')
@measure("api.get")
def api_get(linkode_id, revno=None):
"""Get the kilink and revno content"""
logger.debug("API get; linkode_id=%r revno=%r", linkode_id, revno)
if revno is not None:
# the linkode_id to get the info from is the second token
linkode_id = revno
klnk = kilinkbackend.get_kilink(linkode_id)
# get the tree
tree, nodeq = kilinkbackend.build_tree(linkode_id)
logger.debug("API get done; type=%r size=%d len_tree=%d",
klnk.text_type, len(klnk.content), nodeq)
ret_json = jsonify(content=klnk.content, text_type=klnk.text_type,
tree=tree, timestamp=klnk.timestamp)
return ret_json
if __name__ == "__main__":
# load config
config.load_file("configs/development.yaml")
# log setup
handlers = loghelper.setup_logging(config['log_directory'], verbose=True)
for h in handlers:
app.logger.addHandler(h)
h.setLevel(logging.DEBUG)
app.logger.setLevel(logging.DEBUG)
# set up the backend
engine = create_engine(config["db_engine"], echo=True)
kilinkbackend = backend.KilinkBackend(engine)
app.run(debug=True, host='0.0.0.0')
``` |
[
{
"content": "Replicate the source code:\n```python\n#!/usr/bin/env python\n\"\"\"\nConnect to a bugzilla xml-rpc.cgi and download all the things.\nThis exports products, bugs, comments and bug history to a \"bugzilla.json\"\noutput file which can in turn be used to quickly import things to a different\nformat.... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\n#!/usr/bin/env python\n\"\"\"\nConnect to a bugzilla xml-rpc.cgi and download all the things.\nThis exports products, bugs, comments and bug history to a \"bugzilla.json\"\noutput file which can in turn be used to quickly import things to a di... | ```python
#!/usr/bin/env python
"""
Connect to a bugzilla xml-rpc.cgi and download all the things.
This exports products, bugs, comments and bug history to a "bugzilla.json"
output file which can in turn be used to quickly import things to a different
format.
"""
import json
import sys
import xmlrpc.client
# Edit these to your liking or in local_settings.py
# Highest bug id in Bugzilla. Any bug with a higher id will not be imported.
MAX_BUG_ID = 10000
# Export output file
XMLRPC_EXPORT_FILE = "bugzilla.json"
# List of bugs that will not be exported
XMLRPC_BLACKLIST = []
try:
from local_settings import *
except ImportError:
pass
class RPCEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, xmlrpc.client.DateTime):
return o.value
raise NotImplementedError
def main():
if len(sys.argv) < 2:
sys.stderr.write("Usage: %s [URL TO XML-RPC]\n" % (sys.argv[0]))
exit(1)
emails = set()
print("Connecting to %r" % (sys.argv[1]))
bugzilla = xmlrpc.client.ServerProxy(sys.argv[1])
print("Exporting products")
_products = bugzilla.Product.get(bugzilla.Product.get_selectable_products())["products"]
products = {product["name"]: product for product in _products}
print("Exporting bugs")
valid_ids = filter(lambda i: i not in XMLRPC_BLACKLIST, range(1, MAX_BUG_ID))
bugs = bugzilla.Bug.get({"ids": list(valid_ids), "permissive": True})["bugs"]
valid_ids = [k["id"] for k in bugs]
print("Exporting bug history")
history = bugzilla.Bug.history({"ids": valid_ids})["bugs"]
print("Exporting comments")
_comments = bugzilla.Bug.comments({"ids": valid_ids})["bugs"]
# god damn it bugzilla
comments = {int(id): _comments[id] for id in _comments}
for histitem, bug in zip(history, bugs):
assert histitem["id"] == bug["id"]
bug["history"] = histitem["history"]
# turn bugs into a dict
bugs = {int(bug["id"]): bug for bug in bugs}
for id, comments in comments.items():
comments = comments["comments"]
for comment in comments:
# Add to the list of users we want to export
emails.add(comment["author"])
bugs[id]["comments"] = comments
# now move the bugs dict to the products
for product in products.values():
product["bugs"] = {}
for id, bug in bugs.items():
products[bug["product"]]["bugs"][id] = bug
json_out = {"products": products}
print("Exporting all users")
users = bugzilla.User.get({"names": list(emails)})["users"]
json_out["users"] = {user["name"]: user["real_name"] for user in users}
with open(XMLRPC_EXPORT_FILE, "w") as f:
f.write(json.dumps(json_out, cls=RPCEncoder))
if __name__ == "__main__":
main()
``` |
[
{
"content": "Recreate the entire code block with identical formatting:\n```python\n#\n# Gramps - a GTK+/GNOME based genealogy program\n#\n# Copyright (C) 2000-2007 Donald N. Allingham\n# Copyright (C) 2009 Gary Burton\n# Copyright (C) 2011 Tim G L Lyons\n#\n# This program is free software; you can... | [
{
"content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\n#\n# Gramps - a GTK+/GNOME based genealogy program\n#\n# Copyright (C) 2000-2007 Donald N. Allingham\n# Copyright (C) 2009 Gary Burton\n# Copyright (C) 2011 Tim G L Lyons\n#\n# This program is free s... | ```python
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2009 Gary Burton
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
File and folder related utility functions
"""
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import os
import sys
import locale
import shutil
import logging
LOG = logging.getLogger(".gen.utils.file")
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ..constfunc import win, mac
from ..const import TEMP_DIR, USER_HOME
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
_NEW_NAME_PATTERN = '%s%sUntitled_%d.%s'
#-------------------------------------------------------------------------
#
# Functions
#
#-------------------------------------------------------------------------
def find_file( filename):
# try the filename we got
try:
fname = filename
if os.path.isfile( filename):
return( filename)
except:
pass
# Build list of alternate encodings
encodings = set()
#Darwin returns "mac roman" for preferredencoding, but since it
#returns "UTF-8" for filesystemencoding, and that's first, this
#works.
for enc in [sys.getfilesystemencoding, locale.getpreferredencoding]:
try:
encodings.add(enc)
except:
pass
encodings.add('UTF-8')
encodings.add('ISO-8859-1')
for enc in encodings:
try:
fname = filename.encode(enc)
if os.path.isfile( fname):
return fname
except:
pass
# not found
return ''
def find_folder( filename):
# try the filename we got
try:
fname = filename
if os.path.isdir( filename):
return( filename)
except:
pass
# Build list of alternate encodings
try:
encodings = [sys.getfilesystemencoding(),
locale.getpreferredencoding(),
'UTF-8', 'ISO-8859-1']
except:
encodings = [sys.getfilesystemencoding(), 'UTF-8', 'ISO-8859-1']
encodings = list(set(encodings))
for enc in encodings:
try:
fname = filename.encode(enc)
if os.path.isdir( fname):
return fname
except:
pass
# not found
return ''
def get_unicode_path_from_file_chooser(path):
"""
Return the Unicode version of a path string.
:type path: str
:param path: The path to be converted to Unicode
:rtype: unicode
:returns: The Unicode version of path.
"""
# make only unicode of path of type 'str'
if not (isinstance(path, str)):
return path
if win():
# in windows filechooser returns officially utf-8, not filesystemencoding
try:
return unicode(path)
except:
LOG.warn("Problem encountered converting string: %s." % path)
return unicode(path, sys.getfilesystemencoding(), errors='replace')
else:
try:
return unicode(path, sys.getfilesystemencoding())
except:
LOG.warn("Problem encountered converting string: %s." % path)
return unicode(path, sys.getfilesystemencoding(), errors='replace')
def get_unicode_path_from_env_var(path):
"""
Return the Unicode version of a path string.
:type path: str
:param path: The path to be converted to Unicode
:rtype: unicode
:returns: The Unicode version of path.
"""
# make only unicode of path of type 'str'
if not (isinstance(path, str)):
return path
if win():
# In Windows path/filename returned from a environment variable is in filesystemencoding
try:
new_path = unicode(path, sys.getfilesystemencoding())
return new_path
except:
LOG.warn("Problem encountered converting string: %s." % path)
return unicode(path, sys.getfilesystemencoding(), errors='replace')
else:
try:
return unicode(path)
except:
LOG.warn("Problem encountered converting string: %s." % path)
return unicode(path, sys.getfilesystemencoding(), errors='replace')
def get_new_filename(ext, folder='~/'):
ix = 1
while os.path.isfile(os.path.expanduser(_NEW_NAME_PATTERN %
(folder, os.path.sep, ix, ext))):
ix = ix + 1
return os.path.expanduser(_NEW_NAME_PATTERN % (folder, os.path.sep, ix, ext))
def get_empty_tempdir(dirname):
""" Return path to TEMP_DIR/dirname, a guaranteed empty directory
makes intervening directories if required
fails if _file_ by that name already exists,
or for inadequate permissions to delete dir/files or create dir(s)
"""
dirpath = os.path.join(TEMP_DIR,dirname)
if os.path.isdir(dirpath):
shutil.rmtree(dirpath)
os.makedirs(dirpath)
dirpath = get_unicode_path_from_env_var(dirpath)
return dirpath
def rm_tempdir(path):
"""Remove a tempdir created with get_empty_tempdir"""
if path.startswith(TEMP_DIR) and os.path.isdir(path):
shutil.rmtree(path)
def relative_path(original, base):
"""
Calculate the relative path from base to original, with base a directory,
and original an absolute path
On problems, original is returned unchanged
"""
if not os.path.isdir(base):
return original
#original and base must be absolute paths
if not os.path.isabs(base):
return original
if not os.path.isabs(original):
return original
original = os.path.normpath(original)
base = os.path.normpath(base)
# If the db_dir and obj_dir are on different drives (win only)
# then there cannot be a relative path. Return original obj_path
(base_drive, base) = os.path.splitdrive(base)
(orig_drive, orig_name) = os.path.splitdrive(original)
if base_drive.upper() != orig_drive.upper():
return original
# Starting from the filepath root, work out how much of the filepath is
# shared by base and target.
base_list = (base).split(os.sep)
target_list = (orig_name).split(os.sep)
# make sure '/home/person' and 'c:/home/person' both give
# list ['home', 'person']
base_list = filter(None, base_list)
target_list = filter(None, target_list)
i = -1
for i in range(min(len(base_list), len(target_list))):
if base_list[i] <> target_list[i]: break
else:
#if break did not happen we are here at end, and add 1.
i += 1
rel_list = [os.pardir] * (len(base_list)-i) + target_list[i:]
return os.path.join(*rel_list)
def media_path(db):
"""
Given a database, return the mediapath to use as basedir for media
"""
mpath = db.get_mediapath()
if mpath is None:
#use home dir
mpath = USER_HOME
return mpath
def media_path_full(db, filename):
"""
Given a database and a filename of a media, return the media filename
is full form, eg 'graves/tomb.png' becomes '/home/me/genea/graves/tomb.png
"""
if os.path.isabs(filename):
return filename
mpath = media_path(db)
return os.path.join(mpath, filename)
def search_for(name):
if name.startswith( '"' ):
name = name.split('"')[1]
else:
name = name.split()[0]
if win():
for i in os.environ['PATH'].split(';'):
fname = os.path.join(i, name)
if os.access(fname, os.X_OK) and not os.path.isdir(fname):
return 1
if os.access(name, os.X_OK) and not os.path.isdir(name):
return 1
else:
for i in os.environ['PATH'].split(':'):
fname = os.path.join(i, name)
if os.access(fname, os.X_OK) and not os.path.isdir(fname):
return 1
return 0
def fix_encoding(value, errors='strict'):
# The errors argument specifies the response when the input string can't be
# converted according to the encoding's rules. Legal values for this
# argument are 'strict' (raise a UnicodeDecodeError exception), 'replace'
# (add U+FFFD, 'REPLACEMENT CHARACTER'), or 'ignore' (just leave the
# character out of the Unicode result).
if not isinstance(value, unicode):
try:
return unicode(value)
except:
try:
if mac():
codeset = locale.getlocale()[1]
else:
codeset = locale.getpreferredencoding()
except:
codeset = "UTF-8"
return unicode(value, codeset, errors)
else:
return value
``` |
[
{
"content": "Here is the code content:\n```python\n# Copyright (C) 2013-2017 Roland Lutz\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at... | [
{
"content": "Here is the code content:\n<|memory_start|>```python\n# Copyright (C) 2013-2017 Roland Lutz\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the Li... | ```python
# Copyright (C) 2013-2017 Roland Lutz
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import xorn.storage
def setup():
global line_data, box_data, circle_data, net_data
rev0 = xorn.storage.Revision(None)
assert rev0 is not None
rev0.finalize()
# first change
rev1 = xorn.storage.Revision(rev0)
assert rev1 is not None
line_data = xorn.storage.Line()
line_data.x = 0
line_data.y = 1
line_data.width = 3
line_data.height = 2
line_data.color = 3
line_data.line.width = 1
ob0 = rev1.add_object(line_data)
assert ob0 is not None
rev1.finalize()
# second change
rev2 = xorn.storage.Revision(rev1)
assert rev2 is not None
box_data = xorn.storage.Box()
box_data.x = 1
box_data.y = 1
box_data.width = 2
box_data.height = 2
box_data.color = 3
box_data.line.width = 1
ob1a = rev2.add_object(box_data)
assert ob1a is not None
circle_data = xorn.storage.Circle()
circle_data.x = -1
circle_data.y = -1
circle_data.radius = 2
circle_data.color = 3
circle_data.line.width = 1
circle_data.fill.type = 1
ob1b = rev2.add_object(circle_data)
assert ob1b is not None
rev2.finalize()
# third change
rev3 = xorn.storage.Revision(rev2)
assert rev3 is not None
net_data = xorn.storage.Net()
net_data.x = 0
net_data.y = 1
net_data.width = 3
net_data.height = 2
net_data.color = 4
rev3.set_object_data(ob0, net_data)
rev3.delete_object(ob1a)
rev3.finalize()
return rev0, rev1, rev2, rev3, ob0, ob1a, ob1b
``` |
[
{
"content": "Replicate the source code:\n```python\nimport json\nimport time\nimport re\nfrom collections import OrderedDict\n\nfrom doajtest.helpers import DoajTestCase\nfrom portality import models\nfrom portality.upgrade import do_upgrade\nfrom portality.lib.paths import rel2abs\n\ndef operation(journal):\n... | [
{
"content": "Replicate the source code:\n<|memory_start|>```python\nimport json\nimport time\nimport re\nfrom collections import OrderedDict\n\nfrom doajtest.helpers import DoajTestCase\nfrom portality import models\nfrom portality.upgrade import do_upgrade\nfrom portality.lib.paths import rel2abs\n\ndef opera... | ```python
import json
import time
import re
from collections import OrderedDict
from doajtest.helpers import DoajTestCase
from portality import models
from portality.upgrade import do_upgrade
from portality.lib.paths import rel2abs
def operation(journal):
j = models.Journal.pull(journal.id)
bj = j.bibjson()
bj.title = "Updated Title"
j.save()
return j
class TestUpgrade(DoajTestCase):
def test_upgrade(self):
# populate the index with some journals with title
saved_journals = {}
for i in range(5):
j = models.Journal()
j.set_in_doaj(True)
bj = j.bibjson()
bj.title = "Test Journal"
bj.add_identifier(bj.P_ISSN, "{x}000-0000".format(x=i))
bj.publisher = "Test Publisher {x}".format(x=i)
bj.add_url("http://homepage.com/{x}".format(x=i), "homepage")
j.save()
saved_journals[j.id] = j.last_updated
# and with some journals without title
for i in range(5):
j = models.Journal()
j.set_in_doaj(True)
bj = j.bibjson()
bj.add_identifier(bj.P_ISSN, "{x}000-0001".format(x=i))
bj.title = "Journal to Change"
bj.publisher = "Test Publisher {x}".format(x=i)
bj.add_url("http://homepage.com/{x}".format(x=i), "homepage")
j.save()
saved_journals[j.id] = j.last_updated
# make sure the last updated dates will be suitably different after migration
time.sleep(1.5)
path =rel2abs(__file__, ".", "resources", "migrate.json")
with open(path) as f:
instructions = json.loads(f.read(), object_pairs_hook=OrderedDict)
do_upgrade(instructions,None)
p = re.compile('[0-4]000-0001')
for id in saved_journals:
j = models.Journal.pull(id)
bj = j.bibjson()
pissn = bj.get_one_identifier(bj.P_ISSN)
if not p.match(pissn):
assert bj.title == "Test Journal"
assert j.last_updated == saved_journals[j.id]
else:
assert bj.title == "Updated Title"
assert not j.last_updated == saved_journals[j.id]
``` |
[
{
"content": "Reconstruct the code exactly:\n```python\n#!/usr/bin/env python\nimport pika\nimport sys\nimport signal\nimport json\nimport logging\nfrom optparse import OptionParser\nimport eblocal\n\ndef createApplication(command):\n res = eblocal.createApp(command[\"name\"], command[\"source\"])\n if re... | [
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\n#!/usr/bin/env python\nimport pika\nimport sys\nimport signal\nimport json\nimport logging\nfrom optparse import OptionParser\nimport eblocal\n\ndef createApplication(command):\n res = eblocal.createApp(command[\"name\"], command[\"sourc... | ```python
#!/usr/bin/env python
import pika
import sys
import signal
import json
import logging
from optparse import OptionParser
import eblocal
def createApplication(command):
res = eblocal.createApp(command["name"], command["source"])
if res is None:
logging.error("Can't create application")
return
res = eblocal.createEnv(command["name"], command["source"])
if res is None:
logging.error("Can't create application environment, deleting application")
eblocal.deleteApp(command["name"])
return
logging.info("Application: "+command["name"]+" created")
def rebuildApplicationEnvironment(command):
res = eblocal.getEnv(command["name"])
if res is None:
logging.error("No application environment present, creating")
createApplication(command)
return
res = eblocal.rebuildEnv(command["name"])
if res is None:
logging.error("Can't rebuild environment")
def deleteApplication(command):
res = eblocal.deleteApp(command["name"])
if res is None:
logging.error("Can't delete application")
def deleteAgedApplication(command):
age = eblocal.getEnvAge(command["name"])
if age is None:
logging.error("Can't detect environment age")
return
if age < options.max_age:
return
logging.info("Environment age > "+str(options.max_age)+" hrs, deleting.")
res = eblocal.deleteApp(command["name"])
if res is None:
logging.error("Can't delete application")
operations = dict()
operations['create'] = createApplication
operations['rebuild'] = rebuildApplicationEnvironment
operations['delete'] = deleteApplication
operations['deleteaged'] = deleteAgedApplication
def on_message(channel, method_frame, header_frame, body):
logging.debug(method_frame.delivery_tag)
logging.debug(body)
logging.debug(header_frame)
try:
command = json.loads(body)
logging.info("Command: "+command["operation"]+" for: "+command["name"]+", source is: "+command["source"])
if command["operation"] in operations:
if options.run == "yes":
logging.info("Run operation: "+command["operation"])
operations[command["operation"]](command)
else:
logging.info("Simulate run operation: "+command["operation"])
except:
logging.exception("Error while running command: "+str(sys.exc_info()[0]))
channel.basic_ack(delivery_tag=method_frame.delivery_tag)
def signal_handler(sig, frame):
logging.info("Interrupted with: "+str(sig)+", exit now!")
channel.stop_consuming()
connection.close()
sys.exit(0)
parser = OptionParser()
parser.add_option("-r", "--run", type="string", help="if not set to \"yes\", do really nothing, just accept messages", dest="run", default="no")
parser.add_option("-q", "--queue", help="queue name", type="string", dest="queue", default="test")
parser.add_option("-l", "--log-level", help="log level", dest="log_level", type="int", default=1)
parser.add_option("-m", "--max-age", help="maximum application age in hours", dest="max_age", type="int", default=6)
(options, args) = parser.parse_args()
if options.log_level == 1:
log_level = logging.INFO
elif options.log_level >= 2:
log_level = logging.DEBUG
logging.basicConfig(level=log_level)
logging.info("Start reciever on queue: "+options.queue)
signal.signal(signal.SIGHUP, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGQUIT, signal_handler)
connection = pika.BlockingConnection()
channel = connection.channel()
channel.queue_declare(queue=options.queue)
channel.exchange_declare(exchange='commands', type='fanout')
channel.queue_bind(exchange='commands', queue=options.queue)
channel.basic_consume(on_message, queue=options.queue)
channel.start_consuming()
``` |
[
{
"content": "Repeat the code precisely:\n```python\n\"\"\"\n\n The MCMC fitting code used in Hilton et al. (2012), in a more general purpose form\n\n Copyright 2015 Matt Hilton (matt.hilton@mykolab.com)\n \n This file is part of fitScalingRelation.\n\n fitScalingRelation is free software: you ca... | [
{
"content": "Repeat the code precisely:\n<|memory_start|>```python\n\"\"\"\n\n The MCMC fitting code used in Hilton et al. (2012), in a more general purpose form\n\n Copyright 2015 Matt Hilton (matt.hilton@mykolab.com)\n \n This file is part of fitScalingRelation.\n\n fitScalingRelation is free ... | ```python
"""
The MCMC fitting code used in Hilton et al. (2012), in a more general purpose form
Copyright 2015 Matt Hilton (matt.hilton@mykolab.com)
This file is part of fitScalingRelation.
fitScalingRelation is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
fitScalingRelation is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with fitScalingRelation. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import math
import string
from astLib import *
import pylab as plt
import numpy as np
import astropy.table as atpy
import popen2
from scipy import stats
from scipy import special
from scipy import interpolate
from scipy import ndimage
import pyximport; pyximport.install()
import cythonScalingRelation as csr
import time
import pickle
import matplotlib
import IPython
np.random.seed()
plt.matplotlib.interactive(False)
# For some unknown reason, mathtext in matplotlib is behaving weirdly since Ubuntu 16.10 upgrade
#try:
#plt.matplotlib.rc('text', usetex=True)
#except:
#pass
#-------------------------------------------------------------------------------------------------------------
# Adopt Ed's cosmology
#astCalc.OMEGA_M0=0.27
#astCalc.OMEGA_L=0.73
#-------------------------------------------------------------------------------------------------------------
def ask_for( key ):
s = raw_input( "ParametersDict: enter value for '%s': " % key )
try:
val = eval(s)
except NameError:
# allow people to enter unquoted strings
val = s
return val
class ParametersDict( dict ):
def __getitem__( self, key ):
if key not in self:
print "ParametersDict: parameter '%s' not found" % key
val = ask_for( key )
print "ParametersDict: setting '%s' = %s" % (key,repr(val))
dict.__setitem__( self, key, val )
return dict.__getitem__( self, key )
def read_from_file( self, filename ):
f = open( filename )
old = ''
for line in f:
line = line.strip()
if len(line) == 0 or line[0] == '#':
continue
s = line.split('#')
line = s[0]
#if line[-1] == '\\':
#s = line.split('\\')
#if len(s) > 1:
#old = string.join([old, s[0]])
#continue
#else:
#line = string.join([old, s[0]])
#old = ''
##IPython.embed()
##sys.exit()
s = line.split('=')
if len(s) != 2:
print "Error parsing line:"
print line
IPython.embed()
sys.exit()
continue
try:
key = s[0].strip()
val = eval(s[1].strip()) # XXX:make safer
except:
raise Exception, "can't parse line: %s" % (line)
self[key] = val
f.close()
def write_to_file( self, filename, mode = 'w' ):
f = open( filename, mode )
keys = self.keys()
keys.sort()
for key in keys:
f.write( "%s = %s\n" % (key,repr(self[key])) )
f.close()
def cmp( self, otherDict ):
diff = []
ks = self.keys()
for k in ks:
try:
if otherDict[k] == self.params[k]:
continue
diff += [k]
break
except KeyError:
diff += [k]
return otherDict
#-------------------------------------------------------------------------------------------------------------
def selectStartParsFromPriors(settingsDict):
"""Choose random starting values for the MCMC from the priors we're placing on the parameters.
"""
variables=settingsDict['variables']
pars=np.zeros(len(variables))
for i in range(len(variables)):
v=variables[i]
if settingsDict['%sFit' % (v)] == 'fixed':
pars[i]=settingsDict['%s0' % (v)]
else:
pars[i]=np.random.uniform(settingsDict['prior_%s_MIN' % (v)], settingsDict['prior_%s_MAX' % (v)])
# This makes sure that if we're testing by swapping axes, we can use the same prior ranges
if 'swapAxes' in settingsDict.keys() and settingsDict['swapAxes'] == True:
b=1.0/pars[1]
a=-pars[0]/pars[1]
pars[0]=a
pars[1]=b
return pars
#-------------------------------------------------------------------------------------------------------------
def getPPrior(pPars, settingsDict):
"""Gets prior probability.
"""
variables=settingsDict['variables']
# This makes sure that if we're testing by swapping axes, we can use the same prior ranges
if 'swapAxes' in settingsDict.keys() and settingsDict['swapAxes'] == True:
b=1.0/pPars[1]
a=-pPars[0]/pPars[1]
pPars[0]=a
pPars[1]=b
priors=np.zeros(len(variables))
for i in range(len(variables)):
v=variables[i]
if pPars[i] > settingsDict['prior_%s_MIN' % (v)] and pPars[i] < settingsDict['prior_%s_MAX' % (v)]:
priors[i]=1.0
else:
priors[i]=0.0
# Fixed parameters must surely be within the priors...
if settingsDict['%sFit' % (v)] == 'fixed':
priors[i]=1.0
pPrior=np.product(priors)
return pPrior
#-------------------------------------------------------------------------------------------------------------
def byteSwapArr(arr):
"""FITS is big-endian, but cython likes native-endian arrays (little-endian for x86)... so, byteswap
if we need.
"""
if arr.dtype.byteorder == '>':
arr=arr.byteswap().newbyteorder('=')
return arr
#-------------------------------------------------------------------------------------------------------------
def sampleGetter(settingsDict, sampleDef, outDir):
"""Loads in catalogue in .fits table format, and add columns xToFit, yToFit, xErrToFit, yErrToFit,
which are fed into the MCMCFit routine. Applies any asked for scalings and cuts according to the
contents of settingsDict and sampleDef.
"""
# Stuff we need from settings...
xColumnName=settingsDict['xColumnName']
xPlusErrColumnName=settingsDict['xPlusErrColumnName']
xMinusErrColumnName=settingsDict['xMinusErrColumnName']
yColumnName=settingsDict['yColumnName']
yPlusErrColumnName=settingsDict['yPlusErrColumnName']
yMinusErrColumnName=settingsDict['yMinusErrColumnName']
xPivot=settingsDict['xPivot']
yPivot=settingsDict['yPivot']
xTakeLog10=settingsDict['xTakeLog10']
yTakeLog10=settingsDict['yTakeLog10']
redshiftColumnName=settingsDict['redshiftColumnName']
xScaleFactor=settingsDict['xScaleFactor']
yScaleFactor=settingsDict['yScaleFactor']
yScaleFactorPower=settingsDict['yScaleFactorPower']
newTab=atpy.Table().read(settingsDict['inFileName'])
# Make a new table here with cuts applied
# NOTE: we really need a better way of labelling constraints
for key in sampleDef:
if key not in ['label', 'plotLabel']:
if key[-4:] == '_MIN':
col=key[:-4]
newTab=newTab[np.where(newTab[col] > sampleDef[key])]
elif key[-4:] == '_MAX':
col=key[:-4]
newTab=newTab[np.where(newTab[col] < sampleDef[key])]
else:
if type(sampleDef[key]) != list:
newTab=newTab[np.where(newTab[key] == sampleDef[key])]
else:
print "Need to add more sampleDef key handling code"
IPython.embed()
sys.exit()
if len(newTab) == 0:
print "Hmm... all objects cut? empty newTab"
IPython.embed()
sys.exit()
# Value added useful columns
Ez=[]
for row in newTab:
Ez.append(astCalc.Ez(row[redshiftColumnName]))
newTab.add_column(atpy.Column(Ez, 'E(z)'))
# Add columns we will fit to, scaling and applying log10 as necessary
# We apply pivots here also (undo them, if necessary, elsewhere)
stab=newTab
# We should probably make this default
if xPivot == "median":
xPivot=np.median(newTab[xColumnName])
settingsDict['xPivot']=xPivot
if yPivot == "median":
yPivot=np.median(newTab[yColumnName])
settingsDict['yPivot']=yPivot
if yScaleFactor == "E(z)":
yScaling=np.power(stab["E(z)"], yScaleFactorPower)
elif yScaleFactor == None:
yScaling=np.ones(len(stab))
else:
raise Exception, "didn't understand yScaleFactor"
if xTakeLog10 == True:
xToFit=np.log10(stab[xColumnName]/xPivot)
xErrToFitPlus=np.log10((stab[xColumnName]+stab[xPlusErrColumnName])/xPivot)-xToFit
xErrToFitMinus=xToFit-np.log10((stab[xColumnName]-stab[xMinusErrColumnName])/xPivot)
else:
xToFit=stab[xColumnName]
xErrToFitPlus=stab[xPlusErrColumnName]
xErrToFitMinus=stab[xMinusErrColumnName]
if yTakeLog10 == True:
yToFit=np.log10(yScaling*stab[yColumnName]/yPivot)
yErrToFitPlus=np.log10(yScaling*(stab[yColumnName]+stab[yPlusErrColumnName])/yPivot)-yToFit
yErrToFitMinus=yToFit-np.log10(yScaling*(stab[yColumnName]-stab[yMinusErrColumnName])/yPivot)
else:
yToFit=stab[yColumnName]
yErrToFitPlus=stab[yPlusErrColumnName]
yErrToFitMinus=stab[yMinusErrColumnName]
# Swap
if xToFit.dtype.byteorder == '>':
xToFit=xToFit.byteswap().newbyteorder('=')
stab.add_column(atpy.Column(xToFit, 'xToFit'))
stab.add_column(atpy.Column(xErrToFitPlus, 'xErrToFitPlus'))
stab.add_column(atpy.Column(xErrToFitMinus, 'xErrToFitMinus'))
stab.add_column(atpy.Column(yToFit, 'yToFit'))
stab.add_column(atpy.Column(yErrToFitPlus, 'yErrToFitPlus'))
stab.add_column(atpy.Column(yErrToFitMinus, 'yErrToFitMinus'))
# If we ever get around to fiddling with detection probabilities again, change this...
if 'detPColumnName' in settingsDict.keys():
if settingsDict['detPColumnName'] != 'detP':
stab.add_column(atpy.Column(stab[settingsDict['detPColumnName']], 'detP'))
#stab['detP']=np.ones(len(stab))
#stab['detP']=stab['detP'].byteswap().newbyteorder()
#IPython.embed()
#sys.exit()
else:
stab.add_column(atpy.Column([1.0]*len(stab), 'detP'))
if 'ignoreSelectionFunction' in settingsDict.keys() and settingsDict['ignoreSelectionFunction'] == True:
stab['detP']=np.ones(len(stab))
if settingsDict['symmetriseErrors'] == True:
xAvErr=(stab['xErrToFitPlus']+stab['xErrToFitMinus'])/2.0
yAvErr=(stab['yErrToFitPlus']+stab['yErrToFitMinus'])/2.0
stab['xErrToFitPlus']=xAvErr
stab['xErrToFitMinus']=xAvErr
stab['yErrToFitPlus']=yAvErr
stab['yErrToFitMinus']=yAvErr
# Histograms of redshift and x property distribution, one above the other
# Fiddle with this later...
#print "plots"
#IPython.embed()
#sys.exit()
#fontDict={'size': 16}
#cols=1
#pylab.figure(figsize=(6, 8*cols))
#pylab.subplots_adjust(0.1, 0.06, 0.97, 0.97, 0.03, 0.12)
#pylab.subplot(2, 1, 1)
#pylab.hist(stab['redshift'], bins = numpy.linspace(0.0, 1.5, 16), histtype = 'stepfilled', color =
#'#A0A0A0', ec = '#A0A0A0')
#pylab.xlabel("$z$", fontdict = fontDict)
#pylab.ylabel("N", fontdict = fontDict)
#pylab.ylim(0, 60)
#pylab.subplot(2, 1, 2)
#pylab.hist(stab['temp'], bins = numpy.linspace(0, 12, 13), histtype = 'stepfilled', color =
#'#A0A0A0', ec = '#A0A0A0')
#pylab.xlabel("$T$ (keV)", fontdict = fontDict)
#pylab.ylabel("N", fontdict = fontDict)
##pylab.yticks(ylocs, [""]*len(ylabels))
#pylab.ylim(0, 60)
#pylab.savefig(outDir+os.path.sep+"zT_histograms.pdf")
#pylab.close()
return stab
#-------------------------------------------------------------------------------------------------------------
def MCMCFit(settingsDict, tab):
"""My attempt at fitting using MCMC and maximum likelihood.
settingsDict = dictionary containing MCMC parameters and settings
You can choose whether to use the likelihood for 'bisector' or 'orthogonal' fitting using the 'method' key.
"""
# Can now swap axes for testing purposes
if 'swapAxes' in settingsDict.keys():
swapAxes=settingsDict['swapAxes']
else:
swapAxes=False
print "... swapAxes = ", swapAxes
# Choice of method
method=settingsDict['method']
if method == 'orthogonal':
likelihood=csr.fastOrthogonalLikelihood
variables=['A', 'B', 'C', 'S']
numFreePars=4
elif method == 'bisector':
likelihood=csr.fastBisectorLikelihood
variables=['A', 'B', 'C', 'Sx', 'Sy']
numFreePars=5
settingsDict['variables']=variables # A handy place to store this for cutting down code elsewhere
scales=[]
for v in variables:
scales.append(settingsDict['%sScale' % (v)])
# Start by writing this in python, but calling the likelihood function in cython
# MCMC parameters
numSamples=settingsDict['numSamples'] # Total number of random steps over likelihood surface
burnSamples=settingsDict['burnSamples'] # Throw away initial bunch of this many samples
thinning=settingsDict['thinning'] # Keep only every ith sample - good in some ways, bad in others
# Choice of evolution models
if settingsDict['evoModel'] == '1+z':
log10RedshiftEvo=np.log10(tab[settingsDict['redshiftColumnName']]+1)
elif settingsDict['evoModel'] == 'E(z)':
log10RedshiftEvo=np.log10(tab['E(z)'])
else:
raise Exception, "didn't understand evoModel '%s'" % (evoModel)
#log10RedshiftEvo=np.array(log10RedshiftEvo, dtype = float)
# To start with, we're going to use the same proposal distribution for everything
# But later on we could dig out the correlated random numbers code to generate random parameter values that
# satisfy the covariance we see between parameters, which would speed things up.
cPars=selectStartParsFromPriors(settingsDict)
#print "... starting values [A, B, C, S] = [%.2f, %.2f, %.2f, %.2f]" % (cA, cB, cC, cS)
# Byte swapping festival to keep cython happy
yToFit=byteSwapArr(tab['yToFit'])
yErrToFitPlus=byteSwapArr(tab['yErrToFitPlus'])
yErrToFitMinus=byteSwapArr(tab['yErrToFitMinus'])
xToFit=byteSwapArr(tab['xToFit'])
xErrToFitPlus=byteSwapArr(tab['xErrToFitPlus'])
xErrToFitMinus=byteSwapArr(tab['xErrToFitMinus'])
detP=byteSwapArr(tab['detP'])
# Another thing... fix this later properly... but if everything isn't same data type, cython falls over
yToFit=np.array(tab['yToFit'], dtype = np.float64)
yErrToFitPlus=np.array(tab['yErrToFitPlus'], dtype = np.float64)
yErrToFitMinus=np.array(tab['yErrToFitMinus'], dtype = np.float64)
xToFit=np.array(tab['xToFit'], dtype = np.float64)
xErrToFitPlus=np.array(tab['xErrToFitPlus'], dtype = np.float64)
xErrToFitMinus=np.array(tab['xErrToFitMinus'], dtype = np.float64)
log10RedshiftEvo=np.array(log10RedshiftEvo, dtype = np.float64)
detP=np.array(tab['detP'], dtype = np.float64)
if swapAxes == False:
try:
cProb, probArray=likelihood(cPars, yToFit, yErrToFitPlus, yErrToFitMinus, xToFit, xErrToFitPlus,
xErrToFitMinus, log10RedshiftEvo, detP)
except:
print "byte swapping problem?"
IPython.embed()
sys.exit()
else:
cProb, probArray=likelihood(cPars, xToFit, xErrToFitPlus, xErrToFitMinus, yToFit, yErrToFitPlus,
yErrToFitMinus, log10RedshiftEvo, detP)
if cProb == 0:
raise Exception, "initial position in MCMC chain has zero probability - change initial values/fiddle with priors in .par file?"
allPars=[] # == 'the Markov chain'
likelihoods=[]
# Metropolis-Hastings (actually just Metropolis since our candidate distribution is symmetric)
for k in range(numSamples):
# Progress update
tenPercent=numSamples/10
for j in range(0,11):
if k == j*tenPercent:
print "... "+str(j*10)+"% complete ..."
pPars=makeProposal(cPars, scales, settingsDict)
if swapAxes == False:
pProb, probArray=likelihood(pPars, yToFit, yErrToFitPlus, yErrToFitMinus, xToFit, xErrToFitPlus,
xErrToFitMinus, log10RedshiftEvo, detP)
else:
pProb, probArray=likelihood(pPars, xToFit, xErrToFitPlus, xErrToFitMinus, yToFit, yErrToFitPlus,
yErrToFitMinus, log10RedshiftEvo, detP)
if np.isinf(pProb) == True:
print "Hmm - infinite probability?"
IPython.embed()
sys.exit()
# Changed below because we're now dealing with log10 probabilities instead of the actual numbers
alpha=pProb-cProb
acceptProposal=False
if alpha > 0:
acceptProposal=True
else:
U=math.log10(np.random.uniform(0, 1))
if U <= alpha:
acceptProposal=True
# Our prior is uniform, so we're really just using it to force the answer into a range
# i.e. if it's not 1.0, then something has strayed out of the box.
pPrior=getPPrior(pPars, settingsDict)
if acceptProposal == True and pPrior == 1.0:
cPars=pPars
cProb=pProb
# Only keep samples after burning in and also thin as we go along
if k > burnSamples and k % thinning == 0:
# If we want to plot the trace (i.e. to check mixing) then we want to store these always in some fashion
# As it is, we're only keeping the ones that are drawn from the probability distributions
allPars.append(cPars)
likelihoods.append(pProb)
allPars=np.array(allPars)
likelihoods=np.array(likelihoods)
# If we swap axes, it's just easier to transform back into a form we know
if 'swapAxes' in settingsDict.keys() and settingsDict['swapAxes'] == True:
a=-allPars[:, 0]/allPars[:, 1]
b=1.0/allPars[:, 1]
allPars[:, 0]=a
allPars[:, 1]=b
# Gewerke test to check if the chain has converged
# If z < 2 then we're converged
index10Percent=int(len(allPars)*0.1)
index50Percent=int(len(allPars)*0.5)
mean10Percent=allPars[:index10Percent].mean(axis = 0)
mean50Percent=allPars[::-1][:index50Percent].mean(axis = 0)
var10Percent=allPars[:index10Percent].var(axis = 0)
var50Percent=allPars[::-1][:index50Percent].var(axis = 0)
zStatistic=(mean10Percent-mean50Percent)/np.sqrt(var10Percent+var50Percent)
zStatistic=np.nan_to_num(zStatistic)
# Zap entries in here that are fixed (avoids round off or div 0 making them look large when we don't care)
for i in range(len(variables)):
v=variables[i]
if settingsDict['%sFit' % (v)] == 'fixed':
zStatistic[i]=0.0
numFreePars=numFreePars-1
# Max likelihood values are simply the mean of the values in the probability distribution
# 1-sigma errors are similarly easy (could also use calc1SigmaError routine, but this is quicker)
resultsDict={}
for i in range(len(variables)):
v=variables[i]
resultsDict['%s' % (v)]=allPars[:, i].mean()
resultsDict['%sErr' % (v)]=calc68Percentile(allPars[:, i])
# Scott's translation of orthogonal scatter S into scatter in y-variable at fixed x-variable
if method == 'orthogonal':
s=allPars[:, 3]/np.cos(np.arctan(allPars[:, 1]))
resultsDict['s']=s.mean()
resultsDict['sErr']=calc68Percentile(s)
# We have numFreePars above
lnL=np.log(np.power(10, likelihoods))
resultsDict['AIC']=2*numFreePars-2*lnL.max()
resultsDict['AICc']=resultsDict['AIC']+(2*numFreePars*(numFreePars+1))/(float(len(tab))-numFreePars-1)
resultsDict['pars']=allPars
resultsDict['zStatistic']=zStatistic
# chi-sq
#yMod=(xToFit*resultsDict['B'])+resultsDict['A']+resultsDict['C']*log10RedshiftEvo
#chiSq=np.sum(np.power(yToFit-yMod, 2)/np.power(yErrToFitPlus, 2))
#resultsDict['chiSq']=chiSq
#print "check chiSq"
#IPython.embed()
#sys.exit()
return resultsDict
#-------------------------------------------------------------------------------------------------------------
def makeProposal(pars, scales, settingsDict):
"""Generates random set of parameters in format [A, B, C, S] for feeding into likelihood function.
Proposal distributions are assumed Gaussian with scales [AScale, BScale, CScale, SScale].
"""
# This makes sure that if we're testing by swapping axes, we can use the same prior scales
# To the same space as our scales
if 'swapAxes' in settingsDict.keys() and settingsDict['swapAxes'] == True:
b=1.0/pars[1]
a=-pars[0]/pars[1]
pars[0]=a
pars[1]=b
prop=np.random.normal(pars, scales)
# And back...
if 'swapAxes' in settingsDict.keys() and settingsDict['swapAxes'] == True:
b=1.0/prop[1]
a=-prop[0]/prop[1]
prop[0]=a
prop[1]=b
# Force scatters +ve
prop[3:]=abs(prop[3:])
if settingsDict['AFit'] == 'fixed':
prop[0]=settingsDict['A0']
if settingsDict['BFit'] == 'fixed':
prop[1]=settingsDict['B0']
if settingsDict['CFit'] == 'fixed':
prop[2]=settingsDict['C0']
if settingsDict['method'] == 'orthogonal':
if settingsDict['SFit'] == 'fixed':
prop[3]=settingsDict['S0']
elif settingsDict['method'] == 'bisector':
if settingsDict['SxFit'] == 'fixed':
prop[3]=settingsDict['Sx0']
if settingsDict['SyFit'] == 'fixed':
prop[4]=settingsDict['Sy0']
return prop
#-------------------------------------------------------------------------------------------------------------
def make1DProbDensityPlots(fitResults, settingsDict, outDir):
"""Makes 1D plots of probability density distributions
"""
sigmaScale=5.0
bins=30
variables=settingsDict['variables']
axes=range(len(variables))
# Individual plots
#for v, a in zip(variables, axes):
#if settingsDict['%sFit' % (v)] == 'free':
#x=np.linspace(fitResults['%s' % (v)]-sigmaScale*fitResults['%sErr' % (v)],
#fitResults['%s' % (v)]+sigmaScale*fitResults['%sErr' % (v)], bins)
#P1D=LTCythonMCMC.fast1DProbProjection(x, a, fitResults['pars'])
#make1DPlot(x, P1D, '%s' % (v), '%s = %.3f $\pm$ %.3f' % (v, fitResults['%s' % (v)], fitResults['%sErr' % (v)]),
#outDir+os.path.sep+"1DProb_%s.pdf" % (v))
# Make an uber plot with multiple panels
cols=0
for v, a in zip(variables, axes):
if settingsDict['%sFit' % (v)] == 'free':
cols=cols+1
plt.figure(figsize=(4.5*cols, 3.94))
plt.subplots_adjust(0.02, 0.12, 0.98, 0.92, 0.1, 0.1)
count=0
for v, a in zip(variables, axes):
if settingsDict['%sFit' % (v)] == 'free':
count=count+1
x=np.linspace(fitResults['%s' % (v)]-sigmaScale*fitResults['%sErr' % (v)],
fitResults['%s' % (v)]+sigmaScale*fitResults['%sErr' % (v)], bins)
P1D=csr.fast1DProbProjection(x, a, fitResults['pars'])
P1D=P1D/P1D.max()
plt.subplot(1, cols, count)
ax=plt.gca()
y=P1D
fitLabel='%s = %.3f $\pm$ %.3f' % (v, fitResults['%s' % (v)], fitResults['%sErr' % (v)])
xLabel='%s' % (v)
plt.plot(x, y, 'k-', label = fitLabel)
plt.xlabel(xLabel, fontdict = {'size': 14})
plt.ylabel("")
plt.yticks([], [])
ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(6))
plt.ylim(0, 1.2)
leg=plt.legend(prop = {'size': 12})
leg.draw_frame(False)
plt.draw()
plt.savefig(outDir+os.path.sep+"1DProb_allPars.pdf")
plt.close()
#-------------------------------------------------------------------------------------------------------------
def make1DPlot(x, y, xLabel, fitLabel, outFileName):
"""Actually makes the 1D probability plots
"""
plt.plot(x, y, label = fitLabel)
plt.xlabel(xLabel)
plt.ylabel("")
plt.legend()
plt.savefig(outFileName)
plt.close()
#-------------------------------------------------------------------------------------------------------------
def makeContourPlots(fitResults, outDir, sampleLabel):
"""This takes fit results and turns it into contour plots.
"""
mlA, mlAErr=fitResults['A'], fitResults['AErr']
mlB, mlBErr=fitResults['B'], fitResults['BErr']
mlC, mlCErr=fitResults['C'], fitResults['CErr']
mlS, mlSErr=fitResults['S'], fitResults['SErr']
pars=fitResults['pars']
# Make 2d contour plots of valid combinations, determined by if they have a non null 1 sigma error
As=np.linspace(mlA-5.0*mlAErr-math.fmod(mlA-5.0*mlAErr, 0.1), mlA+7.0*mlAErr-math.fmod(mlA+7.0*mlAErr, 0.1), 81)
Bs=np.linspace(mlB-5.0*mlBErr-math.fmod(mlB-5.0*mlBErr, 0.1), mlB+7.0*mlBErr-math.fmod(mlB+7.0*mlBErr, 0.1), 81)
Cs=np.linspace(mlC-5.0*mlCErr-math.fmod(mlC-5.0*mlCErr, 0.1), mlC+7.0*mlCErr-math.fmod(mlC+7.0*mlCErr, 0.1), 81)
Ss=np.linspace(mlS-5.0*mlSErr-math.fmod(mlS-5.0*mlSErr, 0.05), mlS+7.0*mlSErr-math.fmod(mlS+7.0*mlSErr, 0.05), 81)
if mlAErr > 0 and mlBErr > 0:
outFileName=outDir+os.path.sep+"contours_AvB_"+sampleLabel+".pdf"
PDist2D=csr.fast2DProbProjection(As, Bs, 0, 1, pars)
astImages.saveFITS(outFileName.replace(".pdf", ".fits"), PDist2D, None)
probContourPlot(As, Bs, "A", "B", 0.1, 0.1, mlA, mlB, mlAErr, mlBErr, PDist2D, outFileName)
if mlAErr > 0 and mlCErr > 0:
outFileName=outDir+os.path.sep+"contours_AvC_"+sampleLabel+".pdf"
PDist2D=csr.fast2DProbProjection(As, Cs, 0, 2, pars)
probContourPlot(As, Cs, "A", "C", 0.1, 0.5, mlA, mlC, mlAErr, mlCErr, PDist2D, outFileName)
astImages.saveFITS(outFileName.replace(".pdf", ".fits"), PDist2D, None)
if mlAErr > 0 and mlSErr > 0:
outFileName=outDir+os.path.sep+"contours_AvS_"+sampleLabel+".pdf"
PDist2D=csr.fast2DProbProjection(As, Ss, 0, 3, pars)
probContourPlot(As, Ss, "A", "S", 0.1, 0.05, mlA, mlS, mlAErr, mlSErr, PDist2D, outFileName)
astImages.saveFITS(outFileName.replace(".pdf", ".fits"), PDist2D, None)
if mlBErr > 0 and mlCErr > 0:
outFileName=outDir+os.path.sep+"contours_BvC_"+sampleLabel+".pdf"
PDist2D=csr.fast2DProbProjection(Bs, Cs, 1, 2, pars)
probContourPlot(Bs, Cs, "B", "C", 0.1, 0.5, mlB, mlC, mlBErr, mlCErr, PDist2D, outFileName)
astImages.saveFITS(outFileName.replace(".pdf", ".fits"), PDist2D, None)
#-------------------------------------------------------------------------------------------------------------
def probContourPlot(par1Values, par2Values, par1Label, par2Label, par1TickStep, par2TickStep, mlPar1, mlPar2,
mlPar1Err, mlPar2Err, PDist2D, outFileName):
"""Make a 2d contour plot of probability surface of given parameters.
par1Values = values for parameter 1 (plotted on Y axis)
par2Values = values for parameter 2 (plotted on X axis)
par1Label = text label for Y axis
par2Label = text label for X axis
par1TickStep = tick step along Y axis
par2TickStep = tick step along X axis
mlPar1 = maximum likelihood value for parameter 1
mlPar2 = maximum likelihood value for parameter 2
mlPar1Err = 1d 1-sigma error in parameter 1
mlPar2Err = 1d 1-sigma error in parameter 2
PDist2D = 2d likelihood surface, made using fast2DProbProjection
"""
tck1=interpolate.splrep(par1Values, np.arange(par1Values.shape[0]))
par1TickLabels=np.arange(par1Values.min(), par1Values.max(), par1TickStep)
par1TickIndices=interpolate.splev(par1TickLabels, tck1)
plt.yticks(par1TickIndices, par1TickLabels)
tck2=interpolate.splrep(par2Values, np.arange(par2Values.shape[0]))
par2TickLabels=np.arange(par2Values.min(), par2Values.max(), par2TickStep)
par2TickIndices=interpolate.splev(par2TickLabels, tck2)
plt.xticks(par2TickIndices, par2TickLabels)
# We have to smooth to get decent looking contours
# Gaussian smoothing preserves the normalisation
# NOTE: smoothing only needed if very fine grid
PDist2D=ndimage.gaussian_filter(PDist2D, 1)
# Work out where to put contours
sigma1Level=calc2DProbThreshold(PDist2D, 0.683)
sigma2Level=calc2DProbThreshold(PDist2D, 0.95)
plt.contour(PDist2D, [sigma1Level, sigma2Level], colors = 'b')
# Save plot - trim down area first (?) and add axes labels
plt.plot(interpolate.splev(mlPar2, tck2), interpolate.splev(mlPar1, tck1), 'r*',
label = "%s = %.2f $\pm$ %.2f, %s = %.2f $\pm$ %.2f" % (par1Label, mlPar1, mlPar1Err, par2Label, mlPar2, mlPar2Err))
plt.legend(numpoints = 1)
plt.xlabel(par2Label)
plt.ylabel(par1Label)
if outFileName != None:
plt.savefig(outFileName)
plt.close()
#-------------------------------------------------------------------------------------------------------------
def calc1SigmaError(par1d, prob1d, mlParValue):
"""Calculates 1d 1-sigma error on a parameter (marginalised, is the word I'm looking for I think) relative
to the maximum likelihood value.
NOTE: Now we're using MCMC, the regular calc68Percentile routine below works just fine, and is quicker
than this.
"""
norm=np.trapz(prob1d, par1d)
prob1d=prob1d/norm
tckPDist=interpolate.splrep(par1d, prob1d)
target=0.683 # 1 sigma
dRange=np.linspace(0.0, par1d.max()-mlParValue, 1000) # we need to wok out how to choose sensible values
bestDiff=1e6
dBest=1e6
for d in dRange:
integrationRange=np.linspace(mlParValue-d, mlParValue+d, 1000)
diff=abs(target-np.trapz(interpolate.splev(integrationRange, tckPDist), integrationRange))
if diff < bestDiff:
bestDiff=diff
dBest=d
return dBest
#-------------------------------------------------------------------------------------------------------------
def calc2DProbThreshold(PDist2D, probThresh):
"""Calculates threshold probability per pixel in PDist2D needed to draw confidence contours at e.g.
1-sigma, 2-sigma level
"""
p=PDist2D.flatten()
p.sort()
p=p[::-1]
pCumSum=p.cumsum()
diff=abs(pCumSum-probThresh)
pIndex=diff.tolist().index(diff.min())
pLevel=p[pIndex]
return pLevel
#------------------------------------------------------------------------------------------------------------
def calc68Percentile(arr):
"""Calculates the 68-percentile (i.e. equivalent to 1-sigma error) from an array.
"""
res=np.abs(arr-np.median(arr))
res=np.sort(res)
index=int(round(0.683*arr.shape[0]))
try:
err=res[index]
except:
print "index error?"
IPython.embed()
sys.exit()
return err
#-------------------------------------------------------------------------------------------------------------
def makeScalingRelationPlot(sampleTab, fitResults, outDir, sampleDict, settingsDict):
"""Make a scaling relation plot.
sampleDict = the dictionary defining the sample (e.g. min z, max z etc.)
"""
# Stuff we need from settings...
xColumnName=settingsDict['xColumnName']
xPlusErrColumnName=settingsDict['xPlusErrColumnName']
xMinusErrColumnName=settingsDict['xMinusErrColumnName']
yColumnName=settingsDict['yColumnName']
yPlusErrColumnName=settingsDict['yPlusErrColumnName']
yMinusErrColumnName=settingsDict['yMinusErrColumnName']
xPivot=settingsDict['xPivot']
xTakeLog10=settingsDict['xTakeLog10']
yTakeLog10=settingsDict['yTakeLog10']
redshiftColumnName=settingsDict['redshiftColumnName']
xScaleFactor=settingsDict['xScaleFactor']
yScaleFactor=settingsDict['yScaleFactor']
yScaleFactorPower=settingsDict['yScaleFactorPower']
# The plot
plt.figure(figsize=(10, 10))
plt.axes([0.1, 0.1, 0.85, 0.85])
if yScaleFactor != None:
yPlot=np.power(sampleTab['E(z)'], yScaleFactorPower)*sampleTab[yColumnName]
yPlotErrs=np.array([np.power(sampleTab['E(z)'], yScaleFactorPower)*sampleTab[yMinusErrColumnName],
np.power(sampleTab['E(z)'], yScaleFactorPower)*sampleTab[yPlusErrColumnName]])
else:
yPlot=sampleTab[yColumnName]
yPlotErrs=np.array([sampleTab[yMinusErrColumnName],
sampleTab[yPlusErrColumnName]])
plt.errorbar(sampleTab[xColumnName], yPlot,
yerr = yPlotErrs,
xerr = np.array([sampleTab[xMinusErrColumnName],
sampleTab[xPlusErrColumnName]]),
fmt = 'kD', mec = 'k', label = sampleDict['label']+" (N=%d)" % (len(sampleTab)))
if xTakeLog10 == True and yTakeLog10 == True:
plt.loglog()
elif xTakeLog10 == True and yTakeLog10 == False:
plt.semilogx()
elif xTakeLog10 == False and yTakeLog10 == True:
plt.semilogy()
#cmdata=np.outer(np.linspace(0, 1, 10), np.linspace(0, 1, 10)) # to easily make a colorbar 0-1
#cmim=plt.imshow(cmdata, cmap = "gray")
#ax=plt.axes([0.1, 0.17, 0.85, 0.78])
if np.sum(np.equal(sampleTab['detP'], 1.0)) == len(sampleTab):
shadeByDetP=False
else:
shadeByDetP=True
if shadeByDetP == True:
for row, pY in zip(sampleTab, yPlot):
plt.plot(row[xColumnName], [pY], 'D', color = (row['detP'], row['detP'], row['detP']))
plotRange=np.linspace(settingsDict['xPlotMin'], settingsDict['xPlotMax'], 100)
if xTakeLog10 == True and yTakeLog10 == True:
yFit=settingsDict['yPivot']*np.power(10, fitResults['A'])*np.power((plotRange/xPivot), fitResults['B'])
elif xTakeLog10 == False and yTakeLog10 == False:
yFit=settingsDict['yPivot']*(fitResults['A']+fitResults['B']*(plotRange/xPivot))
else:
raise Exception, "add semilogx, semilogy fit line code"
if xPivot != 1.0:
fitLabel='%s (%s) = 10$^{%.2f \pm %.2f}$ (%s/%.1f %s)$^{%.2f \pm %.2f}$' % (settingsDict['yPlotLabel'], settingsDict['yPlotLabelUnits'], fitResults['A'], fitResults['AErr'], settingsDict['xPlotLabel'], xPivot, settingsDict['xPlotLabelUnits'], fitResults['B'], fitResults['BErr'])
else:
fitLabel='%s (%s) = 10$^{%.2f \pm %.2f}$ (%s)$^{%.2f \pm %.2f}$' % (settingsDict['yPlotLabel'], settingsDict['yPlotLabelUnits'], fitResults['A'], fitResults['AErr'], settingsDict['xPlotLabel'], fitResults['B'], fitResults['BErr'])
yLabel="%s (%s)" % (settingsDict['yPlotLabel'], settingsDict['yPlotLabelUnits'])
if settingsDict['yScaleFactor'] == "E(z)":
fitLabel="$E^{%d}(z)$ " % (settingsDict['yScaleFactorPower'])+fitLabel
yLabel="$E^{%d}(z)$ " % (settingsDict['yScaleFactorPower'])+yLabel
plt.plot(plotRange, yFit, 'b--', label = fitLabel)
## Below is just diagnostic
#if sampleLabel == 'REXCESS':
#prattLabel='$L_{\sf X}$ (erg s$^{-1}$) = 10$^{44.85 \pm 0.06}$ ($T/5.0$ keV)$^{3.35 \pm 0.32}$'
#prattLabel="$E^{-1}(z)$ "+prattLabel
#prattLabel="P09: "+prattLabel
#prattLX=np.power(10, 44.85)*np.power((plotRange/5.0), 3.35)
#plt.plot(plotRange, prattLX, 'r:', label = prattLabel)
#sample['plotLabel']=""
plt.ylabel(yLabel, size = 16)
plt.xlabel("%s (%s)" % (settingsDict['xPlotLabel'], settingsDict['xPlotLabelUnits']), size = 16)
plt.xlim(settingsDict['xPlotMin'], settingsDict['xPlotMax'])
plt.ylim(settingsDict['yPlotMin'], settingsDict['yPlotMax'])
if settingsDict['showPlotLegend'] == True:
leg=plt.legend(loc = 'upper left', prop = {'size': 16}, scatterpoints = 1, numpoints = 1)
leg.draw_frame(False)
plt.draw()
ax=plt.gca()
plt.text(0.95, 0.05, sampleDict['plotLabel'], ha = 'right', va = 'center', transform = ax.transAxes,
fontdict = {"size": 16, "linespacing" : 1.2, 'family': 'serif'})
outFileName=outDir+os.path.sep+"scalingRelation_%s_%s.pdf" % (yColumnName, xColumnName)
plt.savefig(outFileName)
plt.close()
#-------------------------------------------------------------------------------------------------------------
def makeScalingRelationPlot_ABC(sampleTab, fitResults, outDir, sampleDict, settingsDict, mode = 'normal'):
"""Make a scaling relation plot with y values scaling by normalisation and z evolution.
sampleDict = the dictionary defining the sample (e.g. min z, max z etc.)
"""
# Stuff we need from settings...
xColumnName=settingsDict['xColumnName']
xPlusErrColumnName=settingsDict['xPlusErrColumnName']
xMinusErrColumnName=settingsDict['xMinusErrColumnName']
yColumnName=settingsDict['yColumnName']
yPlusErrColumnName=settingsDict['yPlusErrColumnName']
yMinusErrColumnName=settingsDict['yMinusErrColumnName']
xPivot=settingsDict['xPivot']
xTakeLog10=settingsDict['xTakeLog10']
yTakeLog10=settingsDict['yTakeLog10']
redshiftColumnName=settingsDict['redshiftColumnName']
xScaleFactor=settingsDict['xScaleFactor']
yScaleFactor=settingsDict['yScaleFactor']
yScaleFactorPower=settingsDict['yScaleFactorPower']
# The plot...
if yScaleFactor != None:
yPlot=np.power(sampleTab['E(z)'], yScaleFactorPower)*sampleTab[yColumnName]
yPlotErrs=np.array([np.power(sampleTab['E(z)'], yScaleFactorPower)*sampleTab[yMinusErrColumnName],
np.power(sampleTab['E(z)'], yScaleFactorPower)*sampleTab[yPlusErrColumnName]])
else:
yPlot=sampleTab[yColumnName]
yPlotErrs=np.array([sampleTab[yMinusErrColumnName],
sampleTab[yPlusErrColumnName]])
fitLabel='%s = 10$^{%.2f \pm %.2f}$ (%s/%d)$^{%.2f \pm %.2f}$' % (settingsDict['yPlotLabel'], fitResults['A'], fitResults['AErr'], settingsDict['xPlotLabel'], xPivot, fitResults['B'], fitResults['BErr'])
yLabel="%s (%s)" % (settingsDict['yPlotLabel'], settingsDict['yPlotLabelUnits'])
if settingsDict['evoModel'] == '1+z':
yPlot=np.power(sampleTab[redshiftColumnName]+1, -fitResults['C'])*yPlot
yPlotErrs=np.power(sampleTab[redshiftColumnName]+1, -fitResults['C'])*yPlotErrs
fitLabel=fitLabel+' (1+$z$)$^{%s}$' % (fitResults['plotLabel_C'])
yLabel=yLabel.replace("(%s)" % (settingsDict['yPlotLabelUnits']), "(1+$z$)$^{%.1f}$ (%s)" % (-1*fitResults['C'], settingsDict['yPlotLabelUnits']))
elif settingsDict['evoModel'] == 'E(z)':
yPlot=np.power(sampleTab['E(z)'], -fitResults['C'])*yPlot
yPlotErrs=np.power(sampleTab['E(z)'], -fitResults['C'])*yPlotErrs
fitLabel=fitLabel+' $E(z)^{%s}$' % (fitResults['plotLabel_C'])
yLabel=yLabel.replace("(%s)" % (settingsDict['yPlotLabelUnits']), "$E(z)^{%.1f}$ (%s)" % (-1*fitResults['C'], settingsDict['yPlotLabelUnits']))
if settingsDict['yScaleFactor'] == "E(z)":
fitLabel="$E^{%d}(z)$ " % (settingsDict['yScaleFactorPower'])+fitLabel
yLabel="$E^{%d}(z)$ " % (settingsDict['yScaleFactorPower'])+yLabel
if mode == 'normal':
plt.figure(figsize=(8, 8))
ax=plt.axes([0.11, 0.1, 0.86, 0.85])
plotRange=np.linspace(0.1*sampleTab[xColumnName].min(), 10*sampleTab[xColumnName].max(), 100)
yFit=np.power(10, fitResults['A'])*np.power((plotRange/xPivot), fitResults['B'])
plt.plot(plotRange, yFit, 'b--', label = fitLabel)
outFileName=outDir+os.path.sep+"scalingRelation_%s_%s_ABC.pdf" % (settingsDict['yColumnName'], settingsDict['xColumnName'])
# Old
#plt.errorbar(sampleTab['temp'], plotLXs,
#yerr = plotLXErrs,
#xerr = np.array([sampleTab['temp_min'],
#sampleTab['temp_max']]),
#fmt = 'kD', mec = 'k', label = sampleLabel+" (N=%d)" % (len(sampleTab)))
# New (coding by redshift)
zBins=[[0.0, 0.25], [0.25, 0.5], [0.5, 1.5]]
labels=["0.0 < $z$ < 0.25", "0.25 < $z$ < 0.5", "0.5 < $z$ < 1.5"]
#colours=['k', [0.5, 0, 1], [1, 0.5, 0]]
colours=['k', 'c', 'r']
symbols=['D', 'o', '^']
for zBin, col, s, l in zip(zBins, colours, symbols, labels):
mask=np.logical_and(np.greater(sampleTab[redshiftColumnName], zBin[0]), np.less_equal(sampleTab[redshiftColumnName], zBin[1]))
plt.errorbar(sampleTab[xColumnName][mask], yPlot[mask],
yerr = yPlotErrs[:, mask],
xerr = np.array([sampleTab[xMinusErrColumnName][mask],
sampleTab[xPlusErrColumnName][mask]]),
fmt = s, ecolor = col, mfc = col, mec = col, label = l)
elif mode == 'PDetCoded':
plotRange=np.linspace(0.1, 22.0, 100)
fitLXs=np.power(10, fitResults['A'])*np.power((plotRange/pivotT), fitResults['B'])
#fitLabel='$L_{\sf X}$ (erg s$^{-1}$) = 10$^{%.2f \pm %.2f}$ ($T/%.1f$ keV)$^{%.2f \pm %.2f}$ (1+$z$)$^{%.2f \pm %.2f}$' % (fitResults['A'], fitResults['AErr'], pivotT, fitResults['B'], fitResults['BErr'], fitResults['C'], fitResults['CErr'])
plt.plot(plotRange, fitLXs, 'b--', label = fitLabel)
outFileName=outDir+os.path.sep+"L-T_ABC_PDetCoded.pdf"
plt.figure(figsize=(8, 8))
plt.axes([0.5, 0.5, 0.1, 0.1])
cmdata=np.outer(np.linspace(0, 1, 10), np.linspace(0, 1, 10)) # to easily make a colorbar 0-1
cmim=plt.imshow(cmdata, cmap = "gray")
ax=plt.axes([0.1, 0.17, 0.85, 0.78])
for row, pLX in zip(sampleTab, plotLXs):
plt.plot(row['temp'], [pLX], 'D', color = (row['detP'], row['detP'], row['detP']))
cmax=plt.axes([0.1, 0.075, 0.85, 0.1], frameon=False)
plt.xticks([], [])
plt.yticks([], [])
plt.colorbar(cmim, orientation = 'v', aspect = 40.0)
plt.figtext(0.52, 0.03, "P$_{\sf det}$", va = 'center', ha = 'center')
plt.axes(ax)
else:
raise Exception, "didn't understand mode"
plt.loglog()
plt.ylabel(yLabel, size = 16)
plt.xlabel("%s (%s)" % (settingsDict['xPlotLabel'], settingsDict['xPlotLabelUnits']), size = 16)
plt.xlim(settingsDict['xPlotMin'], settingsDict['xPlotMax'])
plt.ylim(settingsDict['yPlotMin'], settingsDict['yPlotMax'])
#leg=plt.legend(loc = 'upper left', prop = {'size': 16}, scatterpoints = 1, numpoints = 1)
#leg.draw_frame(False)
plt.draw()
ax=plt.gca()
plt.text(0.95, 0.05, sampleDict['plotLabel'], ha = 'right', va = 'center', transform = ax.transAxes,
fontdict = {"size": 16, "linespacing" : 1.2, 'family': 'serif'})
plt.savefig(outFileName)
plt.close()
#-------------------------------------------------------------------------------------------------------------
def makeScalingRelationPlots_sideBySide(sampleDefs, outDir, settingsDict):
"""Makes side by side subpanel plots of all the scaling relations in sampleDefs
"""
# Stuff we need from settings...
xColumnName=settingsDict['xColumnName']
xPlusErrColumnName=settingsDict['xPlusErrColumnName']
xMinusErrColumnName=settingsDict['xMinusErrColumnName']
yColumnName=settingsDict['yColumnName']
yPlusErrColumnName=settingsDict['yPlusErrColumnName']
yMinusErrColumnName=settingsDict['yMinusErrColumnName']
xPivot=settingsDict['xPivot']
xTakeLog10=settingsDict['xTakeLog10']
yTakeLog10=settingsDict['yTakeLog10']
redshiftColumnName=settingsDict['redshiftColumnName']
xScaleFactor=settingsDict['xScaleFactor']
yScaleFactor=settingsDict['yScaleFactor']
yScaleFactorPower=settingsDict['yScaleFactorPower']
# Make an uber plot with multiple panels
# NOTE: add adjustable layout later...
cols=len(sampleDefs)
plt.figure(figsize=(6*cols, 6))
plt.subplots_adjust(0.05, 0.1, 0.99, 0.99, 0.02, 0.02)
count=0
for s in sampleDefs:
sampleTab=s['stab']
fitResults=s['fitResults']
count=count+1
plt.subplot(1, cols, count)
if yScaleFactor != None:
yPlot=np.power(sampleTab['E(z)'], yScaleFactorPower)*sampleTab[yColumnName]
yPlotErrs=np.array([np.power(sampleTab['E(z)'], yScaleFactorPower)*sampleTab[yMinusErrColumnName],
np.power(sampleTab['E(z)'], yScaleFactorPower)*sampleTab[yPlusErrColumnName]])
else:
yPlot=sampleTab[yColumnName]
yPlotErrs=np.array([sampleTab[yMinusErrColumnName],
sampleTab[yPlusErrColumnName]])
plt.errorbar(sampleTab[xColumnName], yPlot,
yerr = yPlotErrs,
xerr = np.array([sampleTab[xMinusErrColumnName],
sampleTab[xPlusErrColumnName]]),
fmt = 'kD', mec = 'k', label = s['label']+" (N=%d)" % (len(sampleTab)))
plt.loglog()
plotRange=np.linspace(0.1*sampleTab[xColumnName].min(), 10*sampleTab[xColumnName].max(), 100)
yFit=settingsDict['yPivot']*np.power(10, fitResults['A'])*np.power((plotRange/xPivot), fitResults['B'])
fitLabel='%s (%s) = 10$^{%.2f \pm %.2f}$ (%s/%.1f %s)$^{%.2f \pm %.2f}$' % (settingsDict['yPlotLabel'], settingsDict['yPlotLabelUnits'], fitResults['A'], fitResults['AErr'], settingsDict['xPlotLabel'], xPivot, settingsDict['xPlotLabelUnits'], fitResults['B'], fitResults['BErr'])
yLabel="%s (%s)" % (settingsDict['yPlotLabel'], settingsDict['yPlotLabelUnits'])
if settingsDict['yScaleFactor'] == "E(z)":
fitLabel="$E^{%d}(z)$ " % (settingsDict['yScaleFactorPower'])+fitLabel
yLabel="$E^{%d}(z)$ " % (settingsDict['yScaleFactorPower'])+yLabel
plt.plot(plotRange, yFit, 'b--', label = fitLabel)
plt.ylabel(yLabel, size = 16)
plt.xlabel("%s (%s)" % (settingsDict['xPlotLabel'], settingsDict['xPlotLabelUnits']), size = 16)
ax=plt.gca()
plt.text(0.95, 0.05, s['plotLabel'], ha = 'right', va = 'center', transform = ax.transAxes,
fontdict = {"size": 16, "linespacing" : 1.2, 'family': 'serif'})
if count > 1:
ylocs, ylabels=plt.yticks()
plt.ylabel("")
plt.yticks(ylocs, [""]*len(ylabels))
plt.xlim(settingsDict['xPlotMin'], settingsDict['xPlotMax'])
plt.ylim(settingsDict['yPlotMin'], settingsDict['yPlotMax'])
outFileName=outDir+os.path.sep+"scalingRelation_multiPlot_%s_%s.pdf" % (yColumnName, xColumnName)
plt.savefig(outFileName)
plt.close()
#-------------------------------------------------------------------------------------------------------------
def makeRoundedPlotLabelStrings(fitResults, variables, numSigFig = 1):
"""Add plot labels to fitResults, to given number of sig fig, taking care of rounding
NOTE: disabled the rounding for now
"""
# Not rounding, just dp not sf
dps=[2, 2, 1, 3, 3]
for p, dp in zip(variables, dps):
if fitResults['%sErr' % (p)] != 0:
fmt="%."+str(dp)+"f"
valStr=fmt % (fitResults['%s' % (p)])
errStr=fmt % (fitResults['%sErr' % (p)])
fitResults['plotLabel_%s' % (p)]="%s \pm %s" % (valStr, errStr)
#-------------------------------------------------------------------------------------------------------------
def makeNormEvoPlot(stab, fitResults, outDir, settingsDict):
"""Makes plot of evolution of the normalisation.
"""
zs=np.linspace(0, 2.0, 100)
Ez=[]
for z in zs:
Ez.append(astCalc.Ez(z))
Ez=np.array(Ez)
plt.figure(figsize=(8,6))
plt.axes([0.13, 0.1, 0.85, 0.86])
xColumnName=settingsDict['xColumnName']
yColumnName=settingsDict['yColumnName']
redshiftColumnName=settingsDict['redshiftColumnName']
yLabel="%s / %s$_{Fit (z=0)}$" % (settingsDict['yPlotLabel'], settingsDict['yPlotLabel'])
# If we have applied E(z)^{some power}, we want to plot that expected scaling,
# as well as a null line for no evolution
if settingsDict['yScaleFactor'] == 'E(z)':
dataNormalisation=((np.power(stab['E(z)'], settingsDict['yScaleFactorPower'])*stab[yColumnName])/np.power(stab[xColumnName]/settingsDict['xPivot'], fitResults['B']))/np.power(10, fitResults['A'])
nullLine=np.power(Ez, settingsDict['yScaleFactorPower']) # because E(z)^{some power} is flat in this form, null line is not
yScalingLine=np.ones(len(Ez)) # because we've scaled it out it's flat
yLabel="($E^{-1}(z)$ %s) / %s$_{Fit (z=0)}$" % (settingsDict['yPlotLabel'], settingsDict['yPlotLabel'])
else:
dataNormalisation=(stab[yColumnName]/np.power(stab[xColumnName]/settingsDict['xPivot'], fitResults['B']))/np.power(10, fitResults['A'])
nullLine=np.zeros(len(Ez))
yScalingLine=None
yLabel="%s / %s$_{Fit (z=0)}$" % (settingsDict['yPlotLabel'], settingsDict['yPlotLabel'])
dataLabel='%s$_{Fit (z=0)}$ = (%s/%d)$^{%.2f}$ / 10$^{%.2f}$' % (settingsDict['yPlotLabel'], settingsDict['xPlotLabel'], settingsDict['xPivot'], fitResults['B'], fitResults['A'])
if settingsDict['yScaleFactor'] == 'E(z)':
# Look for fractions
if settingsDict['yScaleFactorPower'] == -1:
yScalingLineLabel='$E(z)$'
elif abs(settingsDict['yScaleFactorPower']) == 2/3.0:
yScalingLineLabel='$E(z)$'
powerFactor=settingsDict['yScaleFactorPower']
# Need to swap power, remember we scaled these out...
if powerFactor > 0:
yScalingLineLabel=yScalingLineLabel+"$^{-2/3}$"
else:
yScalingLineLabel=yScalingLineLabel+"$^{2/3}$"
else:
print "yScalingLineLabel fraction handling?"
IPython.embed()
sys.exit()
plt.plot(stab[redshiftColumnName], dataNormalisation, 'kD', label = dataLabel)
if np.any(yScalingLine) != None:
plt.plot(zs, yScalingLine, 'b--', label = yScalingLineLabel, lw = 2)
plt.plot(zs, nullLine, 'g-.', label = 'no evolution', lw = 2)
if settingsDict['evoModel'] == '1+z':
plt.plot(zs, np.power(1+zs, fitResults['C']), 'r', lw = 2, label = '(1+z)$^{%.2f \pm %.2f}$' % (fitResults['C'], fitResults['CErr']))
shadedX=np.linspace(0, 2.0, 100)
shadedYPlus=np.power(shadedX+1, fitResults['C']+fitResults['CErr'])
shadedYMinus=np.power(shadedX+1, fitResults['C']-fitResults['CErr'])
elif settingsDict['evoModel'] == 'E(z)':
plt.plot(zs, np.power(Ez, fitResults['C']), 'r', lw = 2, label = '$E(z)^{%.2f \pm %.2f}$' % (fitResults['C'], fitResults['CErr']))
shadedX=np.linspace(0, 2.0, len(Ez))
shadedYPlus=np.power(Ez, fitResults['C']+fitResults['CErr'])
shadedYMinus=np.power(Ez, fitResults['C']-fitResults['CErr'])
if fitResults['C'] < 0:
loc="upper right"
else:
loc="lower left"
leg=plt.legend(loc = loc, prop = {'size': 14}, numpoints = 1)
leg.draw_frame(False)
plt.draw()
plt.xlabel("$z$", fontdict = {'size': 20})
plt.ylabel(yLabel, fontdict = {'size': 20})
xs=shadedX.tolist()+shadedX[::-1].tolist()
ys=shadedYPlus.tolist()+shadedYMinus[::-1].tolist()
plt.fill(xs, ys, 'b', alpha=0.2, edgecolor='none', label = "None", lw = 0.1)
plt.semilogy()
#plt.loglog()
plt.xlim(0, 1.6)
plt.ylim(1e-2, 1e2)
plt.savefig(outDir+os.path.sep+"normEvo_%s_%s.pdf" % (yColumnName, xColumnName))
plt.close()
#-------------------------------------------------------------------------------------------------------------
def makePaperContourPlots(fitResults, parDict, outDir):
"""Special case of plots, for 4 parameter fits, for the paper.
"""
if 'S' not in fitResults.keys():
print "... using bisector method - 2D contour plots disabled ..."
return None
mlA, mlAErr=fitResults['A'], fitResults['AErr']
mlB, mlBErr=fitResults['B'], fitResults['BErr']
mlC, mlCErr=fitResults['C'], fitResults['CErr']
mlS, mlSErr=fitResults['S'], fitResults['SErr']
pars=fitResults['pars']
# We only want to go on if we have a full set...
if mlAErr == 0 or mlBErr == 0 or mlCErr == 0 or mlSErr == 0:
return None
plt.figure(figsize=(10, 10))
plt.subplots_adjust(0.08, 0.07, 0.97, 0.97, 0.0, 0.0)
# Make 2d contour plots of valid combinations, determined by if they have a non null 1 sigma error
# NOTE: here steps have to be smaller than AStep, BStep, CStep, SStep below
# NOTE: any strange numbers in here are fiddling to get non-overlapping plot labels
As=np.linspace(mlA-5.0*mlAErr-math.fmod(mlA-5.0*mlAErr, 0.1), mlA+5.0*mlAErr-math.fmod(mlA+5.0*mlAErr, 0.1), 81)
Bs=np.linspace(mlB-5.0*mlBErr-math.fmod(mlB-5.0*mlBErr, 0.1), mlB+5.0*mlBErr-math.fmod(mlB+5.0*mlBErr, 0.1), 81)
Cs=np.linspace(mlC-5.0*mlCErr-math.fmod(mlC-5.0*mlCErr, 0.1), mlC+5.0*mlCErr-math.fmod(mlC+5.0*mlCErr, 0.1), 81)
Ss=np.linspace(mlS-5.0*mlSErr-math.fmod(mlS-5.0*mlSErr, 0.01), mlS+5.0*mlSErr-math.fmod(mlS+5.0*mlSErr, 0.01), 81)
# Steps for tick label plotting adjustment
AStep=0.2
BStep=0.4
CStep=1.0
SStep=0.02
# Bottom row
# AB
plt.subplot(4, 4, 15)
PDist2D=csr.fast2DProbProjection(As, Bs, 0, 1, pars)
probContourPlot_subPlot(As, Bs, "A", "B", AStep, BStep, mlA, mlB, mlAErr, mlBErr, PDist2D, noYLabels = True)
# AC
plt.subplot(4, 4, 14)
PDist2D=csr.fast2DProbProjection(As, Cs, 0, 2, pars)
probContourPlot_subPlot(As, Cs, "A", "C", AStep, CStep, mlA, mlC, mlAErr, mlCErr, PDist2D, noYLabels = True)
# AS
plt.subplot(4, 4, 13)
PDist2D=csr.fast2DProbProjection(As, Ss, 0, 3, pars)
probContourPlot_subPlot(As, Ss, "A", "S", AStep, SStep, mlA, mlS, mlAErr, mlSErr, PDist2D)
# Middle row
# BC
plt.subplot(4, 4, 10)
PDist2D=csr.fast2DProbProjection(Bs, Cs, 1, 2, pars)
probContourPlot_subPlot(Bs, Cs, "B", "C", BStep, CStep, mlB, mlC, mlBErr, mlCErr, PDist2D, noXLabels = True, noYLabels = True)
# BS
plt.subplot(4, 4, 9)
PDist2D=csr.fast2DProbProjection(Bs, Ss, 1, 3, pars)
probContourPlot_subPlot(Bs, Ss, "B", "S", BStep, SStep, mlB, mlS, mlBErr, mlSErr, PDist2D, noXLabels = True)
# Top row
# CS
plt.subplot(4, 4, 5)
PDist2D=csr.fast2DProbProjection(Cs, Ss, 2, 3, pars)
probContourPlot_subPlot(Cs, Ss, "C", "S", CStep, SStep, mlC, mlS, mlCErr, mlSErr, PDist2D, noXLabels = True)
# 1D plots
# S
plt.subplot(4, 4, 1)
PDist1D=csr.fast1DProbProjection(Ss, 3, pars)
probPlot1D_subPlot(Ss, "S", SStep, mlS, mlSErr, PDist1D, fitResults['plotLabel_S'], noYLabels = True, noXLabels = True)
# C
plt.subplot(4, 4, 6)
PDist1D=csr.fast1DProbProjection(Cs, 2, pars)
probPlot1D_subPlot(Cs, "C", CStep, mlC, mlCErr, PDist1D, fitResults['plotLabel_C'], noYLabels = True, noXLabels = True)
# B
plt.subplot(4, 4, 11)
PDist1D=csr.fast1DProbProjection(Bs, 1, pars)
probPlot1D_subPlot(Bs, "B", BStep, mlB, mlBErr, PDist1D, fitResults['plotLabel_B'], noYLabels = True, noXLabels = True)
# A
plt.subplot(4, 4, 16)
PDist1D=csr.fast1DProbProjection(As, 0, pars)
probPlot1D_subPlot(As, "A", AStep, mlA, mlAErr, PDist1D, fitResults['plotLabel_A'], noYLabels = True, noXLabels = False)
plt.savefig(outDir+os.path.sep+"2DProb_allPars.pdf")
plt.close()
#-------------------------------------------------------------------------------------------------------------
def probPlot1D_subPlot(par1Values, par1Label, par1TickStep, mlPar1, mlPar1Err, PDist1D, resultLabel,
noXLabels = False, noYLabels = False):
"""Make a 1d contour plot of marginalised probability for a parameter.
par1Values = values for parameter 1 (plotted on Y axis)
par1Label = text label for Y axis
par1TickStep = tick step along Y axis
mlPar1 = maximum likelihood value for parameter 1
mlPar1Err = 1d 1-sigma error in parameter 1
PDist1D = 1d prob distribution for parameter 1
"""
par1TickLabels=np.arange(par1Values.min(), par1Values.max(), par1TickStep)
plt.xticks(par1TickLabels, par1TickLabels)
PDist1D=PDist1D/PDist1D.max()
ax=plt.gca()
fitLabel='%s = %s' % (par1Label, resultLabel.replace("\pm", "$\pm$"))
plt.plot(par1Values, PDist1D, 'k-', label = fitLabel)
plt.ylabel("")
plt.yticks([], [])
#ax.xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(6))
plt.ylim(0, 1.2)
leg=plt.legend(loc = (0.0, 0.86), prop = {'size': 12})
leg.draw_frame(False)
plt.draw()
plt.xlabel(par1Label)
if noYLabels == True:
ylocs, ylabels=plt.yticks()
plt.ylabel("")
plt.yticks(ylocs, [""]*len(ylabels))
if noXLabels == True:
xlocs, xlabels=plt.xticks()
plt.xlabel("")
plt.xticks(xlocs, [""]*len(xlabels))
#-------------------------------------------------------------------------------------------------------------
def probContourPlot_subPlot(par1Values, par2Values, par1Label, par2Label, par1TickStep, par2TickStep, mlPar1, mlPar2,
mlPar1Err, mlPar2Err, PDist2D, noXLabels = False, noYLabels = False):
"""Make a 2d contour plot of probability surface of given parameters. Somewhat needless duplication of
code, for makePaperContourPlots
par1Values = values for parameter 1 (plotted on Y axis)
par2Values = values for parameter 2 (plotted on X axis)
par1Label = text label for Y axis
par2Label = text label for X axis
par1TickStep = tick step along Y axis
par2TickStep = tick step along X axis
mlPar1 = maximum likelihood value for parameter 1
mlPar2 = maximum likelihood value for parameter 2
mlPar1Err = 1d 1-sigma error in parameter 1
mlPar2Err = 1d 1-sigma error in parameter 2
PDist2D = 2d likelihood surface, made using fast2DProbProjection
"""
tck1=interpolate.splrep(par1Values, np.arange(par1Values.shape[0]))
par1TickLabels=np.arange(par1Values.min(), par1Values.max(), par1TickStep)
par1TickIndices=interpolate.splev(par1TickLabels, tck1)
plt.yticks(par1TickIndices, par1TickLabels)
tck2=interpolate.splrep(par2Values, np.arange(par2Values.shape[0]))
par2TickLabels=np.arange(par2Values.min(), par2Values.max(), par2TickStep)
par2TickIndices=interpolate.splev(par2TickLabels, tck2)
plt.xticks(par2TickIndices, par2TickLabels)
# We have to smooth to get decent looking contours
# Gaussian smoothing preserves the normalisation
# NOTE: smoothing only needed if very fine grid
PDist2D=ndimage.gaussian_filter(PDist2D, 1)
# Work out where to put contours
sigma1Level=calc2DProbThreshold(PDist2D, 0.683)
sigma2Level=calc2DProbThreshold(PDist2D, 0.95)
# Apparently, we need to switch the order in newer versions of matplotlib
try:
plt.contour(PDist2D, [sigma2Level, sigma1Level], colors = 'k')
except:
print "contour problem"
IPython.embed()
sys.exit()
# Save plot - trim down area first (?) and add axes labels
plt.plot(interpolate.splev(mlPar2, tck2), interpolate.splev(mlPar1, tck1), 'k*',
label = "%s = %.2f $\pm$ %.2f, %s = %.2f $\pm$ %.2f" % (par1Label, mlPar1, mlPar1Err, par2Label, mlPar2, mlPar2Err))
#plt.legend(numpoints = 1)
plt.xlabel(par2Label)
plt.ylabel(par1Label)
if noYLabels == True:
ylocs, ylabels=plt.yticks()
plt.ylabel("")
plt.yticks(ylocs, [""]*len(ylabels))
if noXLabels == True:
xlocs, xlabels=plt.xticks()
plt.xlabel("")
plt.xticks(xlocs, [""]*len(xlabels))
``` |
[
{
"content": "Reconstruct the code file line-for-line, unmodified:\n```python\n# ==============================================================================\n# Copyright (C) 2011 Diego Duclos\n# Copyright (C) 2011-2018 Anton Vorobyov\n#\n# This file is part of Eos.\n#\n# Eos is free software: you can redistr... | [
{
"content": "Reconstruct the code file line-for-line, unmodified:\n<|memory_start|>```python\n# ==============================================================================\n# Copyright (C) 2011 Diego Duclos\n# Copyright (C) 2011-2018 Anton Vorobyov\n#\n# This file is part of Eos.\n#\n# Eos is free software:... | ```python
# ==============================================================================
# Copyright (C) 2011 Diego Duclos
# Copyright (C) 2011-2018 Anton Vorobyov
#
# This file is part of Eos.
#
# Eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Eos. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
from logging import getLogger
from eos.const.eos import EffectBuildStatus
from eos.const.eve import EffectId
from eos.eve_obj.effect import EffectFactory
from .modifier import make_hardpoint_modifiers
from .modifier import make_slot_modifiers
logger = getLogger(__name__)
def add_hardpoint_modifiers(effect):
if effect.modifiers:
msg = 'hardpoint amount bonus effect has modifiers, overwriting them'
logger.warning(msg)
effect.modifiers = make_hardpoint_modifiers()
effect.build_status = EffectBuildStatus.custom
def add_slot_modifiers(effect):
if effect.modifiers:
msg = 'slot amount bonus effect has modifiers, overwriting them'
logger.warning(msg)
effect.modifiers = make_slot_modifiers()
effect.build_status = EffectBuildStatus.custom
EffectFactory.register_instance_by_id(
add_hardpoint_modifiers,
EffectId.hardpoint_modifier_effect)
EffectFactory.register_instance_by_id(
add_slot_modifiers,
EffectId.slot_modifier)
``` |
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\nfrom yambopy import *\nimport numpy as np\nimport shutil\nimport os\nfrom netCDF4 import Dataset\n\ndef abs2(x):\n return x.real**2 + x.imag**2\n\nclass YamboWFDB():\n def __init__(self,savedb,path=None,save='SAVE',filename='ns.w... | [
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\nfrom yambopy import *\nimport numpy as np\nimport shutil\nimport os\nfrom netCDF4 import Dataset\n\ndef abs2(x):\n return x.real**2 + x.imag**2\n\nclass YamboWFDB():\n def __init__(self,savedb,path=None,save='SAVE... | ```python
from yambopy import *
import numpy as np
import shutil
import os
from netCDF4 import Dataset
def abs2(x):
return x.real**2 + x.imag**2
class YamboWFDB():
def __init__(self,savedb,path=None,save='SAVE',filename='ns.wf'):
"""
load wavefunction from yambo
"""
if path is None:
self.path = save
else:
self.path = path+'/SAVE'
self.filename = filename
#take some data from savedb
self.savedb = savedb
self.wfcgrid = savedb.wfcgrid
self.gvectors = savedb.gvectors
self.kpoints = savedb.kpts_car
self.lat = savedb.lat
self.rlat = savedb.rlat
#read wf
self.read()
self.nkpoints, self.nspin, self.ng, self.nbands = self.wf.shape
def read(self):
path = self.path
filename = self.filename
wf = []
nk = 1
while True:
try:
fname = "%s/%s_fragments_%d_1"%(path,filename,nk)
database = Dataset(fname)
re = database.variables['WF_REAL_COMPONENTS_@_K%d_BAND_GRP_1'%nk][:]
im = database.variables['WF_IM_COMPONENTS_@_K%d_BAND_GRP_1'%nk][:]
a = re+1j*im
wf.append(a)
nk+=1
except:
if nk==1:
raise IOError('Could not read %s'%fname)
break
self.wf = np.array(wf)
self.nkpoints, self.nspin, self.ng, self.nbands = self.wf.shape
def get_wf_gvecs(self,kpoint=0):
"""
Get the indexes of teh wavefunctions
"""
#create array for fft
indexes = self.wfcgrid[kpoint]
indexes = indexes[indexes > 0] #remove componnents that do not belong
gvecs = self.gvectors[indexes]
return gvecs
def write(self,path):
"""
write the wavefunctions in new files
"""
if os.path.isdir(path): shutil.rmtree(path)
os.mkdir(path)
#copy all the files
oldpath = self.path
filename = self.filename
shutil.copyfile("%s/%s"%(oldpath,filename),"%s/%s"%(path,filename))
for nk in xrange(self.nkpoints):
fname = "%s_fragments_%d_1"%(filename,nk+1)
shutil.copyfile("%s/%s"%(oldpath,fname),"%s/%s"%(path,fname))
#edit with the new wfs
wf = self.wf
for nk in xrange(self.nkpoints):
fname = "%s_fragments_%d_1"%(filename,nk+1)
database = Dataset("%s/%s"%(path,fname),'r+')
database.variables['WF_REAL_COMPONENTS_@_K%d_BAND_GRP_1'%(nk+1)][:] = wf[nk].real
database.variables['WF_IM_COMPONENTS_@_K%d_BAND_GRP_1'%(nk+1)][:] = wf[nk].imag
db.close()
print 'new wavefunctions written in %s'%path
def __str__(self):
s = ""
s += "nkpoints: %4d\n"%self.nkpoints
s += "nspin: %4d\n"%self.nspin
s += "nbands: %4d\n"%self.nbands
s += "ng: %4d\n"%self.ng
return s
if __name__ == "__main__":
ywf = YamboWFDB(path='database')
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\n# flake8: noqa\n# SKIP this file when reformatting.\n# The rest of this file was generated by South.\n\n# encoding: utf-8\nimport datetime\n\nfrom django.db import models\nfrom maasserver.enum import NODE_STATUS\nfrom south.db import db\nfrom s... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\n# flake8: noqa\n# SKIP this file when reformatting.\n# The rest of this file was generated by South.\n\n# encoding: utf-8\nimport datetime\n\nfrom django.db import models\nfrom maasserver.enum import NODE_STATUS\nfrom south.db i... | ```python
# flake8: noqa
# SKIP this file when reformatting.
# The rest of this file was generated by South.
# encoding: utf-8
import datetime
from django.db import models
from maasserver.enum import NODE_STATUS
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Node.netboot'
db.add_column(u'maasserver_node', 'netboot', self.gf('django.db.models.fields.BooleanField')(default=True), keep_default=False)
# Find all the allocated nodes with netboot=True.
allocated_nodes = orm['maasserver.node'].objects.filter(
status=NODE_STATUS.ALLOCATED, netboot=True)
# Set netboot=False on these nodes.
allocated_nodes.update(netboot=False)
def backwards(self, orm):
# Deleting field 'Node.netboot'
db.delete_column(u'maasserver_node', 'netboot')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'maasserver.config': {
'Meta': {'object_name': 'Config'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'value': ('maasserver.fields.JSONObjectField', [], {'null': 'True'})
},
u'maasserver.filestorage': {
'Meta': {'object_name': 'FileStorage'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '255'}),
'filename': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'maasserver.macaddress': {
'Meta': {'object_name': 'MACAddress'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mac_address': ('maasserver.fields.MACAddressField', [], {'unique': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {})
},
u'maasserver.node': {
'Meta': {'object_name': 'Node'},
'after_commissioning_action': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'architecture': ('django.db.models.fields.CharField', [], {'default': "u'i386'", 'max_length': '10'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'netboot': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'power_parameters': ('maasserver.fields.JSONObjectField', [], {'default': "u''", 'blank': 'True'}),
'power_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '10', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '10'}),
'system_id': ('django.db.models.fields.CharField', [], {'default': "u'node-20250ca0-b8f4-11e1-afce-002215205ce8'", 'unique': 'True', 'max_length': '41'}),
'token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {})
},
u'maasserver.nodegroup': {
'Meta': {'object_name': 'NodeGroup'},
'api_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}),
'api_token': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Token']", 'unique': 'True'}),
'broadcast_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_range_high': ('django.db.models.fields.IPAddressField', [], {'unique': 'True', 'max_length': '15'}),
'ip_range_low': ('django.db.models.fields.IPAddressField', [], {'unique': 'True', 'max_length': '15'}),
'name': ('django.db.models.fields.CharField', [], {'default': "u''", 'unique': 'True', 'max_length': '80'}),
'router_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'subnet_mask': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'updated': ('django.db.models.fields.DateTimeField', [], {}),
'worker_ip': ('django.db.models.fields.IPAddressField', [], {'unique': 'True', 'max_length': '15'})
},
u'maasserver.sshkey': {
'Meta': {'unique_together': "((u'user', u'key'),)", 'object_name': 'SSHKey'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.TextField', [], {}),
'updated': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
u'maasserver.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'piston.consumer': {
'Meta': {'object_name': 'Consumer'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '16'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'consumers'", 'null': 'True', 'to': "orm['auth.User']"})
},
'piston.token': {
'Meta': {'object_name': 'Token'},
'callback': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'callback_confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'consumer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['piston.Consumer']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'timestamp': ('django.db.models.fields.IntegerField', [], {'default': '1339989444L'}),
'token_type': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tokens'", 'null': 'True', 'to': "orm['auth.User']"}),
'verifier': ('django.db.models.fields.CharField', [], {'max_length': '10'})
}
}
complete_apps = ['maasserver']
``` |
[
{
"content": "```python\nfrom perfrunner.helpers import local\nfrom perfrunner.helpers.cbmonitor import timeit, with_stats\nfrom perfrunner.tests import PerfTest\n\n\nclass DCPThroughputTest(PerfTest):\n\n def _report_kpi(self, time_elapsed: float):\n self.reporter.post(\n *self.metrics.dcp... | [
{
"content": "<|memory_start|>```python\nfrom perfrunner.helpers import local\nfrom perfrunner.helpers.cbmonitor import timeit, with_stats\nfrom perfrunner.tests import PerfTest\n\n\nclass DCPThroughputTest(PerfTest):\n\n def _report_kpi(self, time_elapsed: float):\n self.reporter.post(\n *... | ```python
from perfrunner.helpers import local
from perfrunner.helpers.cbmonitor import timeit, with_stats
from perfrunner.tests import PerfTest
class DCPThroughputTest(PerfTest):
def _report_kpi(self, time_elapsed: float):
self.reporter.post(
*self.metrics.dcp_throughput(time_elapsed)
)
@with_stats
@timeit
def access(self, *args):
username, password = self.cluster_spec.rest_credentials
for target in self.target_iterator:
local.run_dcptest(
host=target.node,
username=username,
password=password,
bucket=target.bucket,
num_items=self.test_config.load_settings.items,
num_connections=self.test_config.dcp_settings.num_connections
)
def run(self):
self.load()
self.wait_for_persistence()
time_elapsed = self.access()
self.report_kpi(time_elapsed)
class JavaDCPThroughputTest(DCPThroughputTest):
def init_java_dcp_client(self):
local.clone_git_repo(repo=self.test_config.java_dcp_settings.repo,
branch=self.test_config.java_dcp_settings.branch)
local.build_java_dcp_client()
@with_stats
@timeit
def access(self, *args):
for target in self.target_iterator:
local.run_java_dcp_client(
connection_string=target.connection_string,
messages=self.test_config.load_settings.items,
config_file=self.test_config.java_dcp_settings.config,
)
def run(self):
self.init_java_dcp_client()
super().run()
``` |
[
{
"content": "Recreate the entire code block with identical formatting:\n```python\n\"\"\"\nThis module contains dsolve() and different helper functions that it\nuses.\n\ndsolve() solves ordinary differential equations. See the docstring on\nthe various functions for their uses. Note that partial differential\n... | [
{
"content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\n\"\"\"\nThis module contains dsolve() and different helper functions that it\nuses.\n\ndsolve() solves ordinary differential equations. See the docstring on\nthe various functions for their uses. Note that partia... | ```python
"""
This module contains dsolve() and different helper functions that it
uses.
dsolve() solves ordinary differential equations. See the docstring on
the various functions for their uses. Note that partial differential
equations support is in pde.py. Note that ode_hint() functions have
docstrings describing their various methods, but they are intended for
internal use. Use dsolve(ode, func, hint=hint) to solve an ode using a
specific hint. See also the docstring on dsolve().
**Functions in this module**
These are the user functions in this module:
- dsolve() - Solves ODEs.
- classify_ode() - Classifies ODEs into possible hints for dsolve().
- checkodesol() - Checks if an equation is the solution to an ODE.
- ode_order() - Returns the order (degree) of an ODE.
- homogeneous_order() - Returns the homogeneous order of an
expression.
These are the non-solver helper functions that are for internal use.
The user should use the various options to dsolve() to obtain the
functionality provided by these functions:
- odesimp() - Does all forms of ODE simplification.
- ode_sol_simplicity() - A key function for comparing solutions by
simplicity.
- constantsimp() - Simplifies arbitrary constants.
- constant_renumber() - Renumber arbitrary constants
- _handle_Integral() - Evaluate unevaluated Integrals.
See also the docstrings of these functions.
**Solving methods currently implemented**
The following methods are implemented for solving ordinary differential
equations. See the docstrings of the various ode_hint() functions for
more information on each (run help(ode)):
- 1st order separable differential equations
- 1st order differential equations whose coefficients or dx and dy
are functions homogeneous of the same order.
- 1st order exact differential equations.
- 1st order linear differential equations
- 1st order Bernoulli differential equations.
- 2nd order Liouville differential equations.
- nth order linear homogeneous differential equation with constant
coefficients.
- nth order linear inhomogeneous differential equation with constant
coefficients using the method of undetermined coefficients.
- nth order linear inhomogeneous differential equation with constant
coefficients using the method of variation of parameters.
**Philosophy behind this module**
This module is designed to make it easy to add new ODE solving methods
without having to mess with the solving code for other methods. The
idea is that there is a classify_ode() function, which takes in an ODE
and tells you what hints, if any, will solve the ODE. It does this
without attempting to solve the ODE, so it is fast. Each solving method
is a hint, and it has its own function, named ode_hint. That function
takes in the ODE and any match expression gathered by classify_ode and
returns a solved result. If this result has any integrals in it, the
ode_hint function will return an unevaluated Integral class. dsolve(),
which is the user wrapper function around all of this, will then call
odesimp() on the result, which, among other things, will attempt to
solve the equation for the dependent variable (the function we are
solving for), simplify the arbitrary constants in the expression, and
evaluate any integrals, if the hint allows it.
**How to add new solution methods**
If you have an ODE that you want dsolve() to be able to solve, try to
avoid adding special case code here. Instead, try finding a general
method that will solve your ODE, as well as others. This way, the ode
module will become more robust, and unhindered by special case hacks.
WolphramAlpha and Maple's DETools[odeadvisor] function are two resources
you can use to classify a specific ODE. It is also better for a method
to work with an nth order ODE instead of only with specific orders, if
possible.
To add a new method, there are a few things that you need to do. First,
you need a hint name for your method. Try to name your hint so that it
is unambiguous with all other methods, including ones that may not be
implemented yet. If your method uses integrals, also include a
"hint_Integral" hint. If there is more than one way to solve ODEs with
your method, include a hint for each one, as well as a "hint_best" hint.
Your ode_hint_best() function should choose the best using min with
ode_sol_simplicity as the key argument. See
ode_1st_homogeneous_coeff_best(), for example. The function that uses
your method will be called ode_hint(), so the hint must only use
characters that are allowed in a Python function name (alphanumeric
characters and the underscore '_' character). Include a function for
every hint, except for "_Integral" hints (dsolve() takes care of those
automatically). Hint names should be all lowercase, unless a word is
commonly capitalized (such as Integral or Bernoulli). If you have a hint
that you do not want to run with "all_Integral" that doesn't have an
"_Integral" counterpart (such as a best hint that would defeat the
purpose of "all_Integral"), you will need to remove it manually in the
dsolve() code. See also the classify_ode() docstring for guidelines on
writing a hint name.
Determine *in general* how the solutions returned by your method
compare with other methods that can potentially solve the same ODEs.
Then, put your hints in the allhints tuple in the order that they should
be called. The ordering of this tuple determines which hints are
default. Note that exceptions are ok, because it is easy for the user to
choose individual hints with dsolve(). In general, "_Integral" variants
should go at the end of the list, and "_best" variants should go before
the various hints they apply to. For example, the
"undetermined_coefficients" hint comes before the
"variation_of_parameters" hint because, even though variation of
parameters is more general than undetermined coefficients, undetermined
coefficients generally returns cleaner results for the ODEs that it can
solve than variation of parameters does, and it does not require
integration, so it is much faster.
Next, you need to have a match expression or a function that matches the
type of the ODE, which you should put in classify_ode() (if the match
function is more than just a few lines, like
_undetermined_coefficients_match(), it should go outside of
classify_ode()). It should match the ODE without solving for it as much
as possible, so that classify_ode() remains fast and is not hindered by
bugs in solving code. Be sure to consider corner cases. For example, if
your solution method involves dividing by something, make sure you
exclude the case where that division will be 0.
In most cases, the matching of the ODE will also give you the various
parts that you need to solve it. You should put that in a dictionary
(.match() will do this for you), and add that as matching_hints['hint']
= matchdict in the relevant part of classify_ode. classify_ode will
then send this to dsolve(), which will send it to your function as the
match argument. Your function should be named ode_hint(eq, func, order,
match). If you need to send more information, put it in the match
dictionary. For example, if you had to substitute in a dummy variable
in classify_ode to match the ODE, you will need to pass it to your
function using the match dict to access it. You can access the
independent variable using func.args[0], and the dependent variable (the
function you are trying to solve for) as func.func. If, while trying to
solve the ODE, you find that you cannot, raise NotImplementedError.
dsolve() will catch this error with the "all" meta-hint, rather than
causing the whole routine to fail.
Add a docstring to your function that describes the method employed.
Like with anything else in SymPy, you will need to add a doctest to the
docstring, in addition to real tests in test_ode.py. Try to maintain
consistency with the other hint functions' docstrings. Add your method
to the list at the top of this docstring. Also, add your method to
ode.txt in the docs/src directory, so that the Sphinx docs will pull its
docstring into the main SymPy documentation. Be sure to make the Sphinx
documentation by running "make html" from within the doc directory to
verify that the docstring formats correctly.
If your solution method involves integrating, use C.Integral() instead
of integrate(). This allows the user to bypass hard/slow integration by
using the "_Integral" variant of your hint. In most cases, calling
.doit() will integrate your solution. If this is not the case, you will
need to write special code in _handle_Integral(). Arbitrary constants
should be symbols named C1, C2, and so on. All solution methods should
return an equality instance. If you need an arbitrary number of
arbitrary constants, you can use constants =
numbered_symbols(prefix='C', function=Symbol, start=1). If it is
possible to solve for the dependent function in a general way, do so.
Otherwise, do as best as you can, but do not call solve in your
ode_hint() function. odesimp() will attempt to solve the solution for
you, so you do not need to do that. Lastly, if your ODE has a common
simplification that can be applied to your solutions, you can add a
special case in odesimp() for it. For example, solutions returned from
the "1st_homogeneous_coeff" hints often have many log() terms, so
odesimp() calls logcombine() on them (it also helps to write the
arbitrary constant as log(C1) instead of C1 in this case). Also
consider common ways that you can rearrange your solution to have
constantsimp() take better advantage of it. It is better to put
simplification in odesimp() than in your method, because it can then be
turned off with the simplify flag in dsolve(). If you have any
extraneous simplification in your function, be sure to only run it using
"if match.get('simplify', True):", especially if it can be slow or if it
can reduce the domain of the solution.
Finally, as with every contribution to SymPy, your method will need to
be tested. Add a test for each method in test_ode.py. Follow the
conventions there, i.e., test the solver using dsolve(eq, f(x),
hint=your_hint), and also test the solution using checkodesol (you can
put these in a separate tests and skip/XFAIL if it runs too slow/doesn't
work). Be sure to call your hint specifically in dsolve, that way the
test won't be broken simply by the introduction of another matching
hint. If your method works for higher order (>1) ODEs, you will need to
run sol = constant_renumber(sol, 'C', 1, order), for each solution, where
order is the order of the ODE. This is because constant_renumber renumbers
the arbitrary constants by printing order, which is platform dependent.
Try to test every corner case of your solver, including a range of
orders if it is a nth order solver, but if your solver is slow, auch as
if it involves hard integration, try to keep the test run time down.
Feel free to refactor existing hints to avoid duplicating code or
creating inconsistencies. If you can show that your method exactly
duplicates an existing method, including in the simplicity and speed of
obtaining the solutions, then you can remove the old, less general
method. The existing code is tested extensively in test_ode.py, so if
anything is broken, one of those tests will surely fail.
"""
from sympy.core import Add, Basic, C, S, Mul, Pow, oo
from sympy.core.function import Derivative, diff, expand_mul
from sympy.core.multidimensional import vectorize
from sympy.core.relational import Equality, Eq
from sympy.core.symbol import Symbol, Wild
from sympy.core.sympify import sympify
from sympy.functions import cos, exp, im, log, re, sin, sign
from sympy.matrices import wronskian
from sympy.polys import RootsOf, discriminant, RootOf
from sympy.series import Order
from sympy.simplify import collect, logcombine, powsimp, separatevars, \
simplify, trigsimp
from sympy.solvers import solve
from sympy.utilities import numbered_symbols, all, any, make_list
from sympy.utilities.iterables import minkey
# This is a list of hints in the order that they should be applied. That means
# that, in general, hints earlier in the list should produce simpler results
# than those later for ODEs that fit both. This is just based on my own
# empirical observations, so if you find that *in general*, a hint later in
# the list is better than one before it, fell free to modify the list. Note
# however that you can easily override the hint used in dsolve() for a specific ODE
# (see the docstring). In general, "_Integral" hints should be grouped
# at the end of the list, unless there is a method that returns an unevaluatable
# integral most of the time (which should surely go near the end of the list
# anyway).
# "default", "all", "best", and "all_Integral" meta-hints should not be
# included in this list, but "_best" and "_Integral" hints should be included.
allhints = ("separable", "1st_exact", "1st_linear", "Bernoulli",
"1st_homogeneous_coeff_best", "1st_homogeneous_coeff_subs_indep_div_dep",
"1st_homogeneous_coeff_subs_dep_div_indep", "nth_linear_constant_coeff_homogeneous",
"nth_linear_constant_coeff_undetermined_coefficients",
"nth_linear_constant_coeff_variation_of_parameters",
"Liouville", "separable_Integral", "1st_exact_Integral", "1st_linear_Integral",
"Bernoulli_Integral", "1st_homogeneous_coeff_subs_indep_div_dep_Integral",
"1st_homogeneous_coeff_subs_dep_div_indep_Integral",
"nth_linear_constant_coeff_variation_of_parameters_Integral",
"Liouville_Integral")
def dsolve(eq, func, hint="default", simplify=True, **kwargs):
"""
Solves any (supported) kind of ordinary differential equation.
**Usage**
dsolve(eq, f(x), hint) -> Solve ordinary differential equation
eq for function f(x), using method hint.
**Details**
``eq`` can be any supported ordinary differential equation (see
the ode docstring for supported methods). This can either
be an Equality, or an expression, which is assumed to be
equal to 0.
``f(x)`` is a function of one variable whose derivatives in that
variable make up the ordinary differential equation eq.
``hint`` is the solving method that you want dsolve to use. Use
classify_ode(eq, f(x)) to get all of the possible hints for
an ODE. The default hint, 'default', will use whatever hint
is returned first by classify_ode(). See Hints below for
more options that you can use for hint.
``simplify`` enables simplification by odesimp(). See its
docstring for more information. Turn this off, for example,
to disable solving of solutions for func or simplification
of arbitrary constants. It will still integrate with this
hint. Note that the solution may contain more arbitrary
constants than the order of the ODE with this option
enabled.
**Hints**
Aside from the various solving methods, there are also some
meta-hints that you can pass to dsolve():
"default":
This uses whatever hint is returned first by
classify_ode(). This is the default argument to
dsolve().
"all":
To make dsolve apply all relevant classification hints,
use dsolve(ODE, func, hint="all"). This will return a
dictionary of hint:solution terms. If a hint causes
dsolve to raise NotImplementedError, value of that
hint's key will be the exception object raised. The
dictionary will also include some special keys:
- order: The order of the ODE. See also ode_order().
- best: The simplest hint; what would be returned by
"best" below.
- best_hint: The hint that would produce the solution
given by 'best'. If more than one hint produces the
best solution, the first one in the tuple returned by
classify_ode() is chosen.
- default: The solution that would be returned by
default. This is the one produced by the hint that
appears first in the tuple returned by classify_ode().
"all_Integral":
This is the same as "all", except if a hint also has a
corresponding "_Integral" hint, it only returns the
"_Integral" hint. This is useful if "all" causes
dsolve() to hang because of a difficult or impossible
integral. This meta-hint will also be much faster than
"all", because integrate() is an expensive routine.
"best":
To have dsolve() try all methods and return the simplest
one. This takes into account whether the solution is
solvable in the function, whether it contains any
Integral classes (i.e. unevaluatable integrals), and
which one is the shortest in size.
See also the classify_ode() docstring for more info on hints,
and the ode docstring for a list of all supported hints.
**Tips**
- You can declare the derivative of an unknown function this way:
>>> from sympy import Function, Derivative
>>> from sympy.abc import x # x is the independent variable
>>> f = Function("f")(x) # f is a function of x
>>> # f_ will be the derivative of f with respect to x
>>> f_ = Derivative(f, x)
- See test_ode.py for many tests, which serves also as a set of
examples for how to use dsolve().
- dsolve always returns an Equality class (except for the case
when the hint is "all" or "all_Integral"). If possible, it
solves the solution explicitly for the function being solved
for. Otherwise, it returns an implicit solution.
- Arbitrary constants are symbols named C1, C2, and so on.
- Because all solutions should be mathematically equivalent,
some hints may return the exact same result for an ODE. Often,
though, two different hints will return the same solution
formatted differently. The two should be equivalent. Also
note that sometimes the values of the arbitrary constants in
two different solutions may not be the same, because one
constant may have "absorbed" other constants into it.
- Do help(ode.ode_hintname) to get help more information on a
specific hint, where hintname is the name of a hint without
"_Integral".
**Examples**
>>> from sympy import Function, dsolve, Eq, Derivative, sin, cos
>>> from sympy.abc import x
>>> f = Function('f')
>>> dsolve(Derivative(f(x),x,x)+9*f(x), f(x))
f(x) == C1*sin(3*x) + C2*cos(3*x)
>>> dsolve(sin(x)*cos(f(x)) + cos(x)*sin(f(x))*f(x).diff(x), f(x),
... hint='separable')
-log(1 - sin(f(x))**2)/2 == C1 + log(1 - sin(x)**2)/2
>>> dsolve(sin(x)*cos(f(x)) + cos(x)*sin(f(x))*f(x).diff(x), f(x),
... hint='1st_exact')
f(x) == acos(C1/cos(x))
>>> dsolve(sin(x)*cos(f(x)) + cos(x)*sin(f(x))*f(x).diff(x), f(x),
... hint='best')
f(x) == acos(C1/cos(x))
>>> # Note that even though separable is the default, 1st_exact produces
>>> # a simpler result in this case.
"""
# TODO: Implement initial conditions
# See issue 1621. We first need a way to represent things like f'(0).
if isinstance(eq, Equality):
if eq.rhs != 0:
return dsolve(eq.lhs-eq.rhs, func, hint=hint, simplify=simplify, **kwargs)
eq = eq.lhs
# Magic that should only be used internally. Prevents classify_ode from
# being called more than it needs to be by passing its results through
# recursive calls.
if kwargs.get('classify', True):
hints = classify_ode(eq, func, dict=True)
else:
# Here is what all this means:
#
# hint: The hint method given to dsolve() by the user.
# hints: The dictionary of hints that match the ODE, along with
# other information (including the internal pass-through magic).
# default: The default hint to return, the first hint from allhints
# that matches the hint. This is obtained from classify_ode().
# match: The hints dictionary contains a match dictionary for each hint
# (the parts of the ODE for solving). When going through the
# hints in "all", this holds the match string for the current
# hint.
# order: The order of the ODE, as determined by ode_order().
hints = kwargs.get('hint',
{'default': hint,
hint: kwargs['match'],
'order': kwargs['order']})
if hints['order'] == 0:
raise ValueError(str(eq) + " is not a differential equation in " + str(func))
if not hints['default']:
# classify_ode will set hints['default'] to None if no hints match.
raise NotImplementedError("dsolve: Cannot solve " + str(eq))
if hint == 'default':
return dsolve(eq, func, hint=hints['default'], simplify=simplify, classify=False,
order=hints['order'], match=hints[hints['default']])
elif hint in ('all', 'all_Integral', 'best'):
retdict = {}
failedhints = {}
gethints = set(hints) - set(['order', 'default', 'ordered_hints'])
if hint == 'all_Integral':
for i in hints:
if i[-9:] == '_Integral':
gethints.remove(i[:-9])
# special case
if "1st_homogeneous_coeff_best" in gethints:
gethints.remove("1st_homogeneous_coeff_best")
for i in gethints:
try:
sol = dsolve(eq, func, hint=i, simplify=simplify, classify=False,
order=hints['order'], match=hints[i])
except NotImplementedError, detail: # except NotImplementedError as detail:
failedhints[i] = detail
else:
retdict[i] = sol
retdict['best'] = minkey(retdict.values(), key=lambda x:
ode_sol_simplicity(x, func, trysolving=not simplify))
if hint == 'best':
return retdict['best']
for i in hints['ordered_hints']:
if retdict['best'] == retdict.get(i, None):
retdict['best_hint'] = i
break
retdict['default'] = hints['default']
retdict['order'] = sympify(hints['order'])
retdict.update(failedhints)
return retdict
elif hint not in allhints: # and hint not in ('default', 'ordered_hints'):
raise ValueError("Hint not recognized: " + hint)
elif hint not in hints:
raise ValueError("ODE " + str(eq) + " does not match hint " + hint)
elif hint[-9:] == '_Integral':
solvefunc = globals()['ode_' + hint[:-9]]
else:
solvefunc = globals()['ode_' + hint] # convert the string into a function
# odesimp() will attempt to integrate, if necessary, apply constantsimp(),
# attempt to solve for func, and apply any other hint specific simplifications
if simplify:
return odesimp(solvefunc(eq, func, order=hints['order'],
match=hints[hint]), func, hints['order'], hint)
else:
# We still want to integrate (you can disable it separately with the hint)
r = hints[hint]
r['simplify'] = False # Some hints can take advantage of this option
return _handle_Integral(solvefunc(eq, func, order=hints['order'],
match=hints[hint]), func, hints['order'], hint)
def classify_ode(eq, func, dict=False):
"""
Returns a tuple of possible dsolve() classifications for an ODE.
The tuple is ordered so that first item is the classification that
dsolve() uses to solve the ODE by default. In general,
classifications at the near the beginning of the list will produce
better solutions faster than those near the end, thought there are
always exceptions. To make dsolve use a different classification,
use dsolve(ODE, func, hint=<classification>). See also the dsolve()
docstring for different meta-hints you can use.
If dict is true, classify_ode() will return a dictionary of
hint:match expression terms. This is intended for internal use by
dsolve(). Note that because dictionaries are ordered arbitrarily,
this will most likely not be in the same order as the tuple.
You can get help on different hints by doing help(ode.ode_hintname),
where hintname is the name of the hint without "_Integral".
See sympy.ode.allhints or the sympy.ode docstring for a list of all
supported hints that can be returned from classify_ode.
**Notes on Hint Names**
*"_Integral"*
If a classification has "_Integral" at the end, it will return
the expression with an unevaluated Integral class in it. Note
that a hint may do this anyway if integrate() cannot do the
integral, though just using an "_Integral" will do so much
faster. Indeed, an "_Integral" hint will always be faster than
its corresponding hint without "_Integral" because integrate()
is an expensive routine. If dsolve() hangs, it is probably
because integrate() is hanging on a tough or impossible
integral. Try using an "_Integral" hint or "all_Integral" to
get it return something.
Note that some hints do not have "_Integral" counterparts. This
is because integrate() is not used in solving the ODE for those
method. For example, nth order linear homogeneous ODEs with
constant coefficients do not require integration to solve, so
there is no "nth_linear_homogeneous_constant_coeff_Integrate"
hint. You can easily evaluate any unevaluated Integrals in an
expression by doing expr.doit().
*Ordinals*
Some hints contain an ordinal such as "1st_linear". This is to
help differentiate them from other hints, as well as from other
methods that may not be implemented yet. If a hint has "nth" in
it, such as the "nth_linear" hints, this means that the method
used to applies to ODEs of any order.
*"indep" and "dep"*
Some hints contain the words "indep" or "dep". These reference
the independent variable and the dependent function,
respectively. For example, if an ODE is in terms of f(x), then
"indep" will refer to x and "dep" will refer to f.
*"subs"*
If a hints has the word "subs" in it, it means the the ODE is
solved by substituting the expression given after the word
"subs" for a single dummy variable. This is usually in terms of
"indep" and "dep" as above. The substituted expression will be
written only in characters allowed for names of Python objects,
meaning operators will be spelled out. For example, indep/dep
will be written as indep_div_dep.
*"coeff"*
The word "coeff" in a hint refers to the coefficients of
something in the ODE, usually of the derivative terms. See the
docstring for the individual methods for more info (help(ode)).
This is contrast to "coefficients", as in
"undetermined_coefficients", which refers to the common name of
a method.
*"_best"*
Methods that have more than one fundamental way to solve will
have a hint for each sub-method and a "_best"
meta-classification. This will evaluate all hints and return the
best, using the same considerations as the normal "best"
meta-hint.
**Examples**
>>> from sympy import Function, classify_ode, Eq
>>> from sympy.abc import x
>>> f = Function('f')
>>> classify_ode(Eq(f(x).diff(x), 0), f(x))
('separable', '1st_linear', '1st_homogeneous_coeff_best',
'1st_homogeneous_coeff_subs_indep_div_dep',
'1st_homogeneous_coeff_subs_dep_div_indep',
'nth_linear_constant_coeff_homogeneous', 'separable_Integral',
'1st_linear_Integral',
'1st_homogeneous_coeff_subs_indep_div_dep_Integral',
'1st_homogeneous_coeff_subs_dep_div_indep_Integral')
>>> classify_ode(f(x).diff(x, 2) + 3*f(x).diff(x) + 2*f(x) - 4, f(x))
('nth_linear_constant_coeff_undetermined_coefficients',
'nth_linear_constant_coeff_variation_of_parameters',
'nth_linear_constant_coeff_variation_of_parameters_Integral')
"""
from sympy import expand
if len(func.args) != 1:
raise ValueError("dsolve() and classify_ode() only work with functions " + \
"of one variable")
x = func.args[0]
f = func.func
y = Symbol('y', dummy=True)
if isinstance(eq, Equality):
if eq.rhs != 0:
return classify_ode(eq.lhs-eq.rhs, func)
eq = eq.lhs
order = ode_order(eq, f(x))
# hint:matchdict or hint:(tuple of matchdicts)
# Also will contain "default":<default hint> and "order":order items.
matching_hints = {"order": order}
if not order:
if dict:
matching_hints["default"] = None
return matching_hints
else:
return ()
df = f(x).diff(x)
a = Wild('a', exclude=[f(x)])
b = Wild('b', exclude=[f(x)])
c = Wild('c', exclude=[f(x)])
d = Wild('d', exclude=[df, f(x).diff(x, 2)])
e = Wild('e', exclude=[df])
k = Wild('k', exclude=[df])
n = Wild('n', exclude=[f(x)])
c1 = Wild('c1', exclude=[x])
eq = expand(eq)
# Precondition to try remove f(x) from highest order derivative
reduced_eq = None
if eq.is_Add:
deriv_coef = eq.coeff(f(x).diff(x, order))
if deriv_coef != 1:
r = deriv_coef.match(a*f(x)**c1)
if r and r[c1]:
den = f(x)**r[c1]
reduced_eq = Add(*[arg/den for arg in eq.args])
if not reduced_eq:
reduced_eq = eq
if order == 1:
# Linear case: a(x)*y'+b(x)*y+c(x) == 0
if eq.is_Add:
ind, dep = reduced_eq.as_independent(f)
else:
u = Symbol('u', dummy=True)
ind, dep = (reduced_eq + u).as_independent(f)
ind, dep = [tmp.subs(u, 0) for tmp in [ind, dep]]
r = {a: dep.coeff(df, expand=False) or S.Zero, # if we get None for coeff, take 0
b: dep.coeff(f(x), expand=False) or S.Zero, # ditto
c: ind}
# double check f[a] since the preconditioning may have failed
if not r[a].has(f) and (r[a]*df + r[b]*f(x) + r[c]).expand() - reduced_eq == 0:
r['a'] = a
r['b'] = b
r['c'] = c
matching_hints["1st_linear"] = r
matching_hints["1st_linear_Integral"] = r
# Bernoulli case: a(x)*y'+b(x)*y+c(x)*y**n == 0
r = collect(reduced_eq, f(x), exact = True).match(a*df + b*f(x) + c*f(x)**n)
if r and r[c] != 0 and r[n] != 1: # See issue 1577
r['a'] = a
r['b'] = b
r['c'] = c
r['n'] = n
matching_hints["Bernoulli"] = r
matching_hints["Bernoulli_Integral"] = r
# Exact Differential Equation: P(x,y)+Q(x,y)*y'=0 where dP/dy == dQ/dx
# WITH NON-REDUCED FORM OF EQUATION
r = collect(eq, df, exact = True).match(d + e * df)
if r:
r['d'] = d
r['e'] = e
r['y'] = y
r[d] = r[d].subs(f(x),y)
r[e] = r[e].subs(f(x),y)
if r[d] != 0 and simplify(r[d].diff(y)) == simplify(r[e].diff(x)):
matching_hints["1st_exact"] = r
matching_hints["1st_exact_Integral"] = r
# This match is used for several cases below; we now collect on
# f(x) so the matching works.
r = collect(reduced_eq, df, exact = True).match(d+e*df)
if r:
r['d'] = d
r['e'] = e
r['y'] = y
r[d] = r[d].subs(f(x),y)
r[e] = r[e].subs(f(x),y)
# Separable Case: y' == P(y)*Q(x)
r[d] = separatevars(r[d])
r[e] = separatevars(r[e])
# m1[coeff]*m1[x]*m1[y] + m2[coeff]*m2[x]*m2[y]*y'
m1 = separatevars(r[d], dict=True, symbols=(x, y))
m2 = separatevars(r[e], dict=True, symbols=(x, y))
if m1 and m2:
r1 = {'m1':m1, 'm2':m2, 'y':y}
matching_hints["separable"] = r1
matching_hints["separable_Integral"] = r1
# First order equation with homogeneous coefficients:
# dy/dx == F(y/x) or dy/dx == F(x/y)
ordera = homogeneous_order(r[d], x, y)
orderb = homogeneous_order(r[e], x, y)
if ordera == orderb and ordera != None:
# u1=y/x and u2=x/y
u1 = Symbol('u1', dummy=True)
u2 = Symbol('u2', dummy=True)
if simplify((r[d]+u1*r[e]).subs({x:1, y:u1})) != 0:
matching_hints["1st_homogeneous_coeff_subs_dep_div_indep"] = r
matching_hints["1st_homogeneous_coeff_subs_dep_div_indep_Integral"] = r
if simplify((r[e]+u2*r[d]).subs({x:u2, y:1})) != 0:
matching_hints["1st_homogeneous_coeff_subs_indep_div_dep"] = r
matching_hints["1st_homogeneous_coeff_subs_indep_div_dep_Integral"] = r
if matching_hints.has_key("1st_homogeneous_coeff_subs_dep_div_indep") \
and matching_hints.has_key("1st_homogeneous_coeff_subs_indep_div_dep"):
matching_hints["1st_homogeneous_coeff_best"] = r
if order == 2:
# Liouville ODE f(x).diff(x, 2) + g(f(x))*(f(x).diff(x))**2 + h(x)*f(x).diff(x)
# See Goldstein and Braun, "Advanced Methods for the Solution of
# Differential Equations", pg. 98
s = d*f(x).diff(x, 2) + e*df**2 + k*df
r = reduced_eq.match(s)
if r and r[d] != 0:
y = Symbol('y', dummy=True)
g = simplify(r[e]/r[d]).subs(f(x), y)
h = simplify(r[k]/r[d])
if h.has(f(x)) or g.has(x):
pass
else:
r = {'g':g, 'h':h, 'y':y}
matching_hints["Liouville"] = r
matching_hints["Liouville_Integral"] = r
if order > 0:
# nth order linear ODE
# a_n(x)y^(n) + ... + a_1(x)y' + a_0(x)y = F(x) = b
r = _nth_linear_match(reduced_eq, func, order)
# Constant coefficient case (a_i is constant for all i)
if r and not any(r[i].has(x) for i in r if i >= 0):
# Inhomogeneous case: F(x) is not identically 0
if r[-1]:
undetcoeff = _undetermined_coefficients_match(r[-1], x)
matching_hints["nth_linear_constant_coeff_variation_of_parameters"] = r
matching_hints["nth_linear_constant_coeff_variation_of_parameters" + \
"_Integral"] = r
if undetcoeff['test']:
r['trialset'] = undetcoeff['trialset']
matching_hints["nth_linear_constant_coeff_undetermined_" + \
"coefficients"] = r
# Homogeneous case: F(x) is identically 0
else:
matching_hints["nth_linear_constant_coeff_homogeneous"] = r
# Order keys based on allhints.
retlist = []
for i in allhints:
if i in matching_hints:
retlist.append(i)
if dict:
# Dictionaries are ordered arbitrarily, so we need to make note of which
# hint would come first for dsolve(). In Python 3, this should be replaced
# with an ordered dictionary.
matching_hints["default"] = None
matching_hints["ordered_hints"] = tuple(retlist)
for i in allhints:
if i in matching_hints:
matching_hints["default"] = i
break
return matching_hints
else:
return tuple(retlist)
@vectorize(0)
def odesimp(eq, func, order, hint):
r"""
Simplifies ODEs, including trying to solve for func and running
constantsimp().
It may use knowledge of the type of solution that that hint returns
to apply additional simplifications.
It also attempts to integrate any Integrals in the expression, if
the hint is not an "_Integral" hint.
This function should have no effect on expressions returned by
dsolve(), as dsolve already calls odesimp(), but the individual hint
functions do not call odesimp (because the dsolve() wrapper does).
Therefore, this function is designed for mainly internal use.
**Example**
>>> from sympy import sin, symbols, dsolve, pprint, Function
>>> from sympy.solvers.ode import odesimp
>>> x , u2, C1= symbols('x u2 C1')
>>> f = Function('f')
>>> eq = dsolve(x*f(x).diff(x) - f(x) - x*sin(f(x)/x), f(x),
... hint='1st_homogeneous_coeff_subs_indep_div_dep_Integral',
... simplify=False)
>>> pprint(eq)
x
----
f(x)
/
|
| /1 \
| 1 + u2*sin|--|
| \u2/ /f(x)\
- | -------------------------- d(u2) + log|----| = 0
| / /1 \\ \ C1 /
| - |1 + u2*sin|--||*u2 + u2
| \ \u2//
|
/
<BLANKLINE>
>> pprint(odesimp(eq, f(x), 1,
... hint='1st_homogeneous_coeff_subs_indep_div_dep'
... )) # (this is slow, so we skip)
x
--------- = C1
/f(x)\
tan|----|
\2*x /
"""
x = func.args[0]
f = func.func
C1 = Symbol('C1')
# First, integrate, if the hint allows it.
eq = _handle_Integral(eq, func, order, hint)
# Second, clean up the arbitrary constants.
# Right now, nth linear hints can put as many as 2*order constants in an
# expression. If that number grows with another hint, the third argument
# here should be raised accordingly, or constantsimp() rewritten to handle
# an arbitrary number of constants.
eq = constantsimp(eq, x, 2*order)
# Lastly, now that we have cleaned up the expression, try solving for func.
# When RootOf is implemented in solve(), we will want to return a RootOf
# everytime instead of an Equality.
"""
if hint[:21] == "1st_homogeneous_coeff":
eq = logcombine(eq, assume_pos_real=True)
if eq.lhs.is_Function and eq.lhs.func is log and eq.rhs == 0:
eq = Eq(eq.lhs.args[0]/C1,C1)
"""
if eq.lhs == func and not eq.rhs.has(func):
# The solution is already solved
pass
elif eq.rhs == func and not eq.lhs.has(func):
# The solution is solved, but in reverse, so switch it
eq = Eq(eq.rhs, eq.lhs)
else:
# The solution is not solved, so try to solve it
try:
eqsol = solve(eq, func)
if eqsol == []:
raise NotImplementedError
except NotImplementedError:
eq = [eq]
else:
eq = [Eq(f(x), t) for t in eqsol]
# Special handling for certain hints that we know will usually take a
# certain form
if hint[:21] == "1st_homogeneous_coeff":
neweq = []
for i in eq:
# Solutions from this hint can almost always be logcombined
newi = logcombine(i, assume_pos_real=True)
if newi.lhs.is_Function and newi.lhs.func is log and newi.rhs == 0:
# log(C1*stuff) == 0 --> stuff == C1
# Note that this is a form of constant simplification.
# And also, the division of C1 relies on constantsimp()
# making it C1*stuff.
newi = Eq(newi.lhs.args[0]/C1,C1)
neweq.append(newi)
eq = neweq
if len(eq) == 1:
eq = eq[0] # We only want a list if there are multiple solutions
if hint[:25] == "nth_linear_constant_coeff":
# Collect terms to make the solution look nice.
# This is also necessary for constantsimp to remove unnecessary terms
# from the particular solution from variation of parameters
global collectterms
sol = eq.rhs
sol = expand_mul(sol)
for i, reroot, imroot in collectterms:
sol = collect(sol, x**i*exp(reroot*x)*sin(abs(imroot)*x))
sol = collect(sol, x**i*exp(reroot*x)*cos(imroot*x))
for i, reroot, imroot in collectterms:
sol = collect(sol, x**i*exp(reroot*x))
del collectterms
eq = Eq(f(x), sol)
# We cleaned up the costants before solving to help the solve engine with
# a simpler expression, but the solved expression could have introduced
# things like -C1, so rerun constantsimp() one last time before returning.
eq = constant_renumber(constantsimp(eq, x, 2*order), 'C', 1, 2*order)
return eq
@vectorize(2)
def checkodesol(ode, func, sol, order='auto', solve_for_func=True):
"""
Substitutes sol for func in ode and checks that the result is 0.
This only works when func is one function, like f(x). sol can be a
single solution or a list of solutions. Either way, each solution
must be an Equality instance (e.g., Eq(f(x), C1*cos(x) +
C2*sin(x))). If it is a list of solutions, it will return a list of
the checkodesol() result for each solution.
It tries the following methods, in order, until it finds zero
equivalence:
1. Substitute the solution for f in the original equation. This
only works if the ode is solved for f. It will attempt to solve
it first unless solve_for_func == False
2. Take n derivatives of the solution, where n is the order of
ode, and check to see if that is equal to the solution. This
only works on exact odes.
3. Take the 1st, 2nd, ..., nth derivatives of the solution, each
time solving for the derivative of f of that order (this will
always be possible because f is a linear operator). Then back
substitute each derivative into ode in reverse order.
This function returns a tuple. The first item in the tuple is True
if the substitution results in 0, and False otherwise. The second
item in the tuple is what the substitution results in. It should
always be 0 if the first item is True. Note that sometimes this
function will False, but with an expression that is identically
equal to 0, instead of returning True. This is because simplify()
cannot reduce the expression to 0. If an expression returned by
this function vanishes identically, then sol really is a solution to
ode.
If this function seems to hang, it is probably because of a hard
simplification.
To use this function to test, test the first item of the tuple.
**Examples**
>>> from sympy import Eq, Function, checkodesol, symbols
>>> x, C1 = symbols('x C1')
>>> f = Function('f')
>>> checkodesol(f(x).diff(x), f(x), Eq(f(x), C1))
(True, 0)
>>> assert checkodesol(f(x).diff(x), f(x), Eq(f(x), C1))[0]
>>> assert not checkodesol(f(x).diff(x), f(x), Eq(f(x), x))[0]
>>> checkodesol(f(x).diff(x, 2), f(x), Eq(f(x), x**2))
(False, 2)
"""
if not func.is_Function or len(func.args) != 1:
raise ValueError("func must be a function of one variable, not " + str(func))
x = func.args[0]
s = True
testnum = 0
if not isinstance(ode, Equality):
ode = Eq(ode, 0)
if not isinstance(sol, Equality):
raise ValueError("sol must be an Equality, got " + str(sol))
if order == 'auto':
order = ode_order(ode, func)
if solve_for_func and not (sol.lhs == func and not sol.rhs.has(func)) and not \
(sol.rhs == func and not sol.lhs.has(func)):
try:
solved = solve(sol, func)
if solved == []:
raise NotImplementedError
except NotImplementedError:
pass
else:
if len(solved) == 1:
result = checkodesol(ode, func, Eq(func, solved[0]), \
order=order, solve_for_func=False)
else:
result = checkodesol(ode, func, [Eq(func, t) for t in solved],
order=order, solve_for_func=False)
return result
while s:
if testnum == 0:
# First pass, try substituting a solved solution directly into the ode
# This has the highest chance of succeeding.
ode_diff = ode.lhs - ode.rhs
if sol.lhs == func:
s = ode_diff.subs(func, sol.rhs)
elif sol.rhs == func:
s = ode_diff.subs(func, sol.lhs)
else:
testnum += 1
continue
ss = simplify(s)
if ss:
# with the new numer_denom in power.py, if we do a simple
# expansion then testnum == 0 verifies all solutions.
s = ss.expand()
else:
s = 0
testnum += 1
elif testnum == 1:
# Second pass. If we cannot substitute f, try seeing if the nth
# derivative is equal, this will only work for odes that are exact,
# by definition.
s = simplify(trigsimp(diff(sol.lhs, x, order) - diff(sol.rhs, x, order)) - \
trigsimp(ode.lhs) + trigsimp(ode.rhs))
# s2 = simplify(diff(sol.lhs, x, order) - diff(sol.rhs, x, order) - \
# ode.lhs + ode.rhs)
testnum += 1
elif testnum == 2:
# Third pass. Try solving for df/dx and substituting that into the ode.
# Thanks to Chris Smith for suggesting this method. Many of the
# comments below are his too.
# The method:
# - Take each of 1..n derivatives of the solution.
# - Solve each nth derivative for d^(n)f/dx^(n)
# (the differential of that order)
# - Back substitute into the ode in decreasing order
# (i.e., n, n-1, ...)
# - Check the result for zero equivalence
if sol.lhs == func and not sol.rhs.has(func):
diffsols = {0:sol.rhs}
elif sol.rhs == func and not sol.lhs.has(func):
diffsols = {0:sol.lhs}
else:
diffsols = {}
sol = sol.lhs - sol.rhs
for i in range(1, order + 1):
# Differentiation is a linear operator, so there should always
# be 1 solution. Nonetheless, we test just to make sure.
# We only need to solve once. After that, we will automatically
# have the solution to the differential in the order we want.
if i == 1:
ds = sol.diff(x)
try:
sdf = solve(ds,func.diff(x, i))
if len(sdf) != 1:
raise NotImplementedError
except NotImplementedError:
testnum += 1
break
else:
diffsols[i] = sdf[0]
else:
# This is what the solution says df/dx should be.
diffsols[i] = diffsols[i - 1].diff(x)
# Make sure the above didn't fail.
if testnum > 2:
continue
else:
# Substitute it into ode to check for self consistency.
lhs, rhs = ode.lhs, ode.rhs
for i in range(order, -1, -1):
if i == 0 and not diffsols.has_key(0):
# We can only substitute f(x) if the solution was
# solved for f(x).
break
lhs = lhs.subs(func.diff(x, i), diffsols[i])
rhs = rhs.subs(func.diff(x, i), diffsols[i])
ode_or_bool = Eq(lhs,rhs)
if isinstance(ode_or_bool, bool):
if ode_or_bool:
lhs = rhs = S.Zero
else:
ode_or_bool = simplify(ode_or_bool)
lhs = ode_or_bool.lhs
rhs = ode_or_bool.rhs
# No sense in overworking simplify--just prove the numerator goes to zero
s = simplify(trigsimp((lhs-rhs).as_numer_denom()[0]))
testnum += 1
else:
break
if not s:
return (True, s)
elif s is True: # The code above never was able to change s
raise NotImplementedError("Unable to test if " + str(sol) + \
" is a solution to " + str(ode) + ".")
else:
return (False, s)
def ode_sol_simplicity(sol, func, trysolving=True):
"""
Returns an extended integer representing how simple a solution to an
ODE is.
The following things are considered, in order from most simple to
least:
- sol is solved for func.
- sol is not solved for func, but can be if passed to solve (e.g.,
a solution returned by dsolve(ode, func, simplify=False)
- If sol is not solved for func, then base the result on the length
of sol, as computed by len(str(sol)).
- If sol has any unevaluated Integrals, this will automatically be
considered less simple than any of the above.
This function returns an integer such that if solution A is simpler
than solution B by above metric, then ode_sol_simplicity(sola, func)
< ode_sol_simplicity(solb, func).
Currently, the following are the numbers returned, but if the
heuristic is ever improved, this may change. Only the ordering is
guaranteed.
sol solved for func -2
sol not solved for func but can be -1
sol is not solved or solvable for func len(str(sol))
sol contains an Integral oo
oo here means the SymPy infinity, which should compare greater than
any integer.
If you already know solve() cannot solve sol, you can use
trysolving=False to skip that step, which is the only potentially
slow step. For example, dsolve with the simplify=False flag should
do this.
If sol is a list of solutions, if the worst solution in the list
returns oo it returns that, otherwise it returns len(str(sol)), that
is, the length of the string representation of the whole list.
**Examples**
This function is designed to be passed to min as the key argument,
such as min(listofsolutions, key=lambda i: ode_sol_simplicity(i, f(x))).
Note that as long as SymPy supports Python 2.4, you must use the minkey()
function in sympy/utilities/iterables.py to emulate this behavior.
>>> from sympy import symbols, Function, Eq, tan, cos, sqrt, Integral
>>> from sympy.solvers.ode import ode_sol_simplicity
>>> from sympy.utilities.iterables import minkey
>>> x, C1 = symbols('x C1')
>>> f = Function('f')
>>> ode_sol_simplicity(Eq(f(x), C1*x**2), f(x))
-2
>>> ode_sol_simplicity(Eq(x**2 + f(x), C1), f(x))
-1
>>> ode_sol_simplicity(Eq(f(x), C1*Integral(2*x, x)), f(x))
oo
>>> # This is from dsolve(x*f(x).diff(x) - f(x) - x*sin(f(x)/x), \
>>> # f(x), hint='1st_homogeneous_coeff_subs_indep_div_dep')
>>> eq1 = Eq(x/tan(f(x)/(2*x)), C1)
>>> # This is from the same ode with the
>>> # '1st_homogeneous_coeff_subs_dep_div_indep' hint.
>>> eq2 = Eq(x*sqrt(1 + cos(f(x)/x))/sqrt(-1 + cos(f(x)/x)), C1)
>>> ode_sol_simplicity(eq1, f(x))
23
>>> minkey([eq1, eq2], key=lambda i: ode_sol_simplicity(i, f(x)))
x/tan(f(x)/(2*x)) == C1
"""
#TODO: write examples
# See the docstring for the coercion rules. We check easier (faster)
# things here first, to save time.
if type(sol) in (list, tuple):
# See if there are Integrals
for i in sol:
if ode_sol_simplicity(i, func, trysolving=trysolving) == oo:
return oo
return len(str(sol))
if sol.has(C.Integral):
return oo
# Next, try to solve for func. This code will change slightly when RootOf
# is implemented in solve(). Probably a RootOf solution should fall somewhere
# between a normal solution and an unsolvable expression.
# First, see if they are already solved
if sol.lhs == func and not sol.rhs.has(func) or\
sol.rhs == func and not sol.lhs.has(func):
return -2
# We are not so lucky, try solving manually
if trysolving:
try:
sols = solve(sol, func)
if sols == []:
raise NotImplementedError
except NotImplementedError:
pass
else:
return -1
# Finally, a naive computation based on the length of the string version
# of the expression. This may favor combined fractions because they
# will not have duplicate denominators, and may slightly favor expressions
# with fewer additions and subtractions, as those are separated by spaces
# by the printer.
# Additional ideas for simplicity heuristics are welcome, like maybe
# checking if a equation has a larger domain, or if constantsimp has
# introduced arbitrary constants numbered higher than the order of a
# given ode that sol is a solution of.
return len(str(sol))
@vectorize(0)
def constantsimp(expr, independentsymbol, endnumber, startnumber=1,
symbolname='C'):
"""
Simplifies an expression with arbitrary constants in it.
This function is written specifically to work with dsolve(), and is
not intended for general use.
Simplification is done by "absorbing" the arbitrary constants in to
other arbitrary constants, numbers, and symbols that they are not
independent of.
The symbols must all have the same name with numbers after it, for
example, C1, C2, C3. The symbolname here would be 'C', the
startnumber would be 1, and the end number would be 3. If the
arbitrary constants are independent of the variable x, then the
independent symbol would be x. There is no need to specify the
dependent function, such as f(x), because it already has the
independent symbol, x, in it.
Because terms are "absorbed" into arbitrary constants and because
constants are renumbered after simplifying, the arbitrary constants
in expr are not necessarily equal to the ones of the same name in
the returned result.
If two or more arbitrary constants are added, multiplied, or raised
to the power of each other, they are first absorbed together into a
single arbitrary constant. Then the new constant is combined into
other terms if necessary.
Absorption is done naively. constantsimp() does not attempt to
expand or simplify the expression first to obtain better absorption.
So for example, exp(C1)*exp(x) will be simplified to C1*exp(x), but
exp(C1 + x) will be left alone.
Use constant_renumber() to renumber constants after simplification.
Without using that function, simplified constants may end up
having any numbering to them.
In rare cases, a single constant can be "simplified" into two
constants. Every differential equation solution should have as many
arbitrary constants as the order of the differential equation. The
result here will be technically correct, but it may, for example,
have C1 and C2 in an expression, when C1 is actually equal to C2.
Use your discretion in such situations, and also take advantage of
the ability to use hints in dsolve().
**Examples**
>>> from sympy import symbols
>>> from sympy.solvers.ode import constantsimp
>>> C1, C2, C3, x, y = symbols('C1 C2 C3 x y')
>>> constantsimp(2*C1*x, x, 3)
C1*x
>>> constantsimp(C1 + 2 + x + y, x, 3)
C1 + x
>>> constantsimp(C1*C2 + 2 + x + y + C3*x, x, 3)
C2 + x + C3*x
"""
# This function works recursively. The idea is that, for Mul,
# Add, Pow, and Function, if the class has a constant in it, then
# we can simplify it, which we do by recursing down and
# simplifying up. Otherwise, we can skip that part of the
# expression.
from sympy.utilities import any
constantsymbols = [Symbol(symbolname+"%d" % t) for t in range(startnumber,
endnumber + 1)]
x = independentsymbol
if isinstance(expr, Equality):
# For now, only treat the special case where one side of the equation
# is a constant
if expr.lhs in constantsymbols:
return Eq(expr.lhs, constantsimp(expr.rhs + expr.lhs, x, endnumber,
startnumber, symbolname) - expr.lhs)
# this could break if expr.lhs is absorbed into another constant,
# but for now, the only solutions that return Eq's with a constant
# on one side are first order. At any rate, it will still be
# technically correct. The expression will just have too many
# constants in it
elif expr.rhs in constantsymbols:
return Eq(constantsimp(expr.lhs + expr.rhs, x, endnumber,
startnumber, symbolname) - expr.rhs, expr.rhs)
else:
return Eq(constantsimp(expr.lhs, x, endnumber, startnumber,
symbolname), constantsimp(expr.rhs, x, endnumber,
startnumber, symbolname))
if type(expr) not in (Mul, Add, Pow) and not expr.is_Function:
# We don't know how to handle other classes
# This also serves as the base case for the recursion
return expr
elif not any(expr.has(t) for t in constantsymbols):
return expr
else:
newargs = []
hasconst = False
isPowExp = False
reeval = False
for i in expr.args:
if i not in constantsymbols:
newargs.append(i)
else:
newconst = i
hasconst = True
if expr.is_Pow and i == expr.exp:
isPowExp = True
for i in range(len(newargs)):
isimp = constantsimp(newargs[i], x, endnumber, startnumber,
symbolname)
if isimp in constantsymbols:
reeval = True
hasconst = True
newconst = isimp
if expr.is_Pow and i == 1:
isPowExp = True
newargs[i] = isimp
if hasconst:
newargs = [i for i in newargs if i.has(x)]
if isPowExp:
newargs = newargs + [newconst] # Order matters in this case
else:
newargs = [newconst] + newargs
if expr.is_Pow and len(newargs) == 1:
newargs.append(S.One)
if expr.is_Function:
if (len(newargs) == 0 or hasconst and len(newargs) == 1):
return newconst
else:
newfuncargs = [constantsimp(t, x, endnumber, startnumber,
symbolname) for t in expr.args]
return expr.new(*newfuncargs)
else:
newexpr = expr.new(*newargs)
if reeval:
return constantsimp(newexpr, x, endnumber, startnumber,
symbolname)
else:
return newexpr
@vectorize(0)
def constant_renumber(expr, symbolname, startnumber, endnumber):
"""
Renumber arbitrary constants in expr.
This is a simple function that goes through and renumbers any Symbol
with a name in the form symbolname + num where num is in the range
from startnumber to endnumber.
Symbols are renumbered based on Basic._compare_pretty, so they
should be numbered roughly in the order that they appear in the
final, printed expression. Note that this ordering is based in part
on hashes, so it can produce different results on different
machines.
The structure of this function is very similar to that of
constantsimp().
**Example**
>>> from sympy import symbols, Eq, pprint
>>> from sympy.solvers.ode import constant_renumber
>>> x, C1, C2, C3 = symbols('x C1 C2 C3')
>>> pprint(C2 + C1*x + C3*x**2)
2
C2 + C1*x + C3*x
>>> pprint(constant_renumber(C2 + C1*x + C3*x**2, 'C', 1, 3))
2
C1 + C2*x + C3*x
"""
global newstartnumber
newstartnumber = 1
def _constant_renumber(expr, symbolname, startnumber, endnumber):
"""
We need to have an internal recursive function so that
newstartnumber maintains its values throughout recursive calls.
"""
from sympy.utilities import any
constantsymbols = [Symbol(symbolname+"%d" % t) for t in range(startnumber,
endnumber + 1)]
global newstartnumber
if isinstance(expr, Equality):
return Eq(_constant_renumber(expr.lhs, symbolname, startnumber, endnumber),
_constant_renumber(expr.rhs, symbolname, startnumber, endnumber))
if type(expr) not in (Mul, Add, Pow) and not expr.is_Function and\
not any(expr.has(t) for t in constantsymbols):
# Base case, as above. We better hope there aren't constants inside
# of some other class, because they won't be renumbered.
return expr
elif expr in constantsymbols:
# Renumbering happens here
newconst = Symbol(symbolname + str(newstartnumber))
newstartnumber += 1
return newconst
else:
if expr.is_Function or expr.is_Pow:
return expr.new(*[_constant_renumber(x, symbolname, startnumber,
endnumber) for x in expr.args])
else:
sortedargs = list(expr.args)
sortedargs.sort(Basic._compare_pretty)
return expr.new(*[_constant_renumber(x, symbolname, startnumber,
endnumber) for x in sortedargs])
return _constant_renumber(expr, symbolname, startnumber, endnumber)
def _handle_Integral(expr, func, order, hint):
"""
Converts a solution with Integrals in it into an actual solution.
For most hints, this simply runs expr.doit()
"""
x = func.args[0]
f = func.func
if hint == "1st_exact":
global exactvars
x0 = exactvars['x0']
y0 = exactvars['y0']
y = exactvars['y']
tmpsol = expr.lhs.doit()
sol = 0
assert tmpsol.is_Add
for i in tmpsol.args:
if x0 not in i and y0 not in i:
sol += i
assert sol != 0
sol = Eq(sol.subs(y, f(x)),expr.rhs) # expr.rhs == C1
del exactvars
elif hint == "1st_exact_Integral":
# FIXME: We still need to back substitute y
# y = exactvars['y']
# sol = expr.subs(y, f(x))
# For now, we are going to have to return an expression with f(x) replaced
# with y. Substituting results in the y's in the second integral
# becoming f(x), which prevents the integral from being evaluatable.
# For example, Integral(cos(f(x)), (x, x0, x)). If there were a way to
# do inert substitution, that could maybe be used here instead.
del exactvars
sol = expr
elif hint == "nth_linear_constant_coeff_homogeneous":
sol = expr
elif hint[-9:] != "_Integral":
sol = expr.doit()
else:
sol = expr
return sol
def ode_order(expr, func):
"""
Returns the order of a given ODE with respect to func.
This function is implemented recursively.
**Examples**
>>> from sympy import Function, ode_order
>>> from sympy.abc import x
>>> f, g = map(Function, ['f', 'g'])
>>> ode_order(f(x).diff(x, 2) + f(x).diff(x)**2 +
... f(x).diff(x), f(x))
2
>>> ode_order(f(x).diff(x, 2) + g(x).diff(x, 3), f(x))
2
>>> ode_order(f(x).diff(x, 2) + g(x).diff(x, 3), g(x))
3
"""
a = Wild('a', exclude=[func])
order = 0
if isinstance(expr, Derivative) and expr.args[0] == func:
order = len(expr.symbols)
else:
for arg in expr.args:
if isinstance(arg, Derivative) and arg.args[0] == func:
order = max(order, len(arg.symbols))
elif expr.match(a):
order = 0
else :
for arg1 in arg.args:
order = max(order, ode_order(arg1, func))
return order
# FIXME: replace the general solution in the docstring with
# dsolve(equation, hint='1st_exact_Integral'). You will need to be able
# to have assumptions on P and Q that dP/dy = dQ/dx.
def ode_1st_exact(eq, func, order, match):
r"""
Solves 1st order exact ordinary differential equations.
A 1st order differential equation is called exact if it is the total
differential of a function. That is, the differential equation
P(x, y)dx + Q(x, y)dy = 0 is exact if there is some function F(x, y)
such that P(x, y) = dF/dx and Q(x, y) = dF/dy (d here refers to the
partial derivative). It can be shown that a necessary and
sufficient condition for a first order ODE to be exact is that
dP/dy = dQ/dx. Then, the solution will be as given below::
>>> from sympy import Function, Eq, Integral, symbols, pprint
>>> x, y, t, x0, y0, C1= symbols('x y t x0 y0 C1')
>>> P, Q, F= map(Function, ['P', 'Q', 'F'])
>>> pprint(Eq(Eq(F(x, y), Integral(P(t, y), (t, x0, x)) +
... Integral(Q(x0, t), (t, y0, y))), C1))
x y
/ /
| |
F(x, y) = | P(t, y) dt + | Q(x0, t) dt = C1
| |
/ /
x0 y0
Where the first partials of P and Q exist and are continuous in a
simply connected region.
A note: SymPy currently has no way to represent inert substitution on
an expression, so the hint '1st_exact_Integral' will return an integral
with dy. This is supposed to represent the function that you are
solving for.
**Example**
>>> from sympy import Function, dsolve, cos, sin
>>> from sympy.abc import x
>>> f = Function('f')
>>> dsolve(cos(f(x)) - (x*sin(f(x)) - f(x)**2)*f(x).diff(x),
... f(x), hint='1st_exact')
x*cos(f(x)) + f(x)**3/3 == C1
**References**
- http://en.wikipedia.org/wiki/Exact_differential_equation
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 73
# indirect doctest
"""
x = func.args[0]
f = func.func
r = match # d+e*diff(f(x),x)
C1 = Symbol('C1')
x0 = Symbol('x0', dummy=True)
y0 = Symbol('y0', dummy=True)
global exactvars # This is the only way to pass these dummy variables to
# _handle_Integral
exactvars = {'y0':y0, 'x0':x0, 'y':r['y']}
# If we ever get a Constant class, x0 and y0 should be constants, I think
sol = C.Integral(r[r['e']].subs(x,x0),(r['y'],y0,f(x)))+C.Integral(r[r['d']],(x,x0,x))
return Eq(sol, C1)
def ode_1st_homogeneous_coeff_best(eq, func, order, match):
r"""
Returns the best solution to an ODE from the two hints
'1st_homogeneous_coeff_subs_dep_div_indep' and
'1st_homogeneous_coeff_subs_indep_div_dep'.
This is as determined by ode_sol_simplicity().
See the ode_1st_homogeneous_coeff_subs_indep_div_dep() and
ode_1st_homogeneous_coeff_subs_dep_div_indep() docstrings for more
information on these hints. Note that there is no
'1st_homogeneous_coeff_best_Integral' hint.
**Example**
::
>>> from sympy import Function, dsolve, pprint
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(2*x*f(x) + (x**2 + f(x)**2)*f(x).diff(x), f(x),
... hint='1st_homogeneous_coeff_best'))
___________
/ 2
/ 3*x
/ 1 + ----- *f(x) = C1
3 / 2
\/ f (x)
**References**
- http://en.wikipedia.org/wiki/Homogeneous_differential_equation
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 59
# indirect doctest
"""
# There are two substitutions that solve the equation, u1=y/x and u2=x/y
# They produce different integrals, so try them both and see which
# one is easier.
sol1 = ode_1st_homogeneous_coeff_subs_indep_div_dep(eq,
func, order, match)
sol2 = ode_1st_homogeneous_coeff_subs_dep_div_indep(eq,
func, order, match)
simplify = match.get('simplify', True)
if simplify:
sol1 = odesimp(sol1, func, order, "1st_homogeneous_coeff_subs_indep_div_dep")
sol2 = odesimp(sol2, func, order, "1st_homogeneous_coeff_subs_dep_div_indep")
return minkey([sol1, sol2], key=lambda x: ode_sol_simplicity(x, func,
trysolving=not simplify))
def ode_1st_homogeneous_coeff_subs_dep_div_indep(eq, func, order, match):
r"""
Solves a 1st order differential equation with homogeneous coefficients
using the substitution
u1 = <dependent variable>/<independent variable>.
This is a differential equation P(x, y) + Q(x, y)dy/dx = 0, that P
and Q are homogeneous of the same order. A function F(x, y) is
homogeneous of order n if F(xt, yt) = t**n*F(x, y). Equivalently,
F(x, y) can be rewritten as G(y/x) or H(x/y). See also the
docstring of homogeneous_order().
If the coefficients P and Q in the differential equation above are
homogeneous functions of the same order, then it can be shown that
the substitution y = u1*x (u1 = y/x) will turn the differential
equation into an equation separable in the variables x and u. if
h(u1) is the function that results from making the substitution
u1 = f(x)/x on P(x, f(x)) and g(u2) is the function that results
from the substitution on Q(x, f(x)) in the differential equation
P(x, f(x)) + Q(x, f(x))*diff(f(x), x) = 0, then the general solution
is::
>>> from sympy import Function, dsolve, pprint
>>> from sympy.abc import x
>>> f, g, h = map(Function, ['f', 'g', 'h'])
>>> genform = g(f(x)/x) + h(f(x)/x)*f(x).diff(x)
>>> pprint(genform)
d /f(x)\ /f(x)\
--(f(x))*h|----| + g|----|
dx \ x / \ x /
>>> pprint(dsolve(genform, f(x),
... hint='1st_homogeneous_coeff_subs_dep_div_indep_Integral'))
f(x)
----
x
/
|
| -h(u1)
- | ---------------- d(u1) + log(C1*x) = 0
| u1*h(u1) + g(u1)
|
/
Where u1*h(u1) + g(u1) != 0 and x != 0.
See also the docstrings of ode_1st_homogeneous_coeff_best() and
ode_1st_homogeneous_coeff_subs_indep_div_dep().
**Example**
::
>>> from sympy import Function, dsolve
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(2*x*f(x) + (x**2 + f(x)**2)*f(x).diff(x), f(x),
... hint='1st_homogeneous_coeff_subs_dep_div_indep'))
________________
/ 3
/ 3*f(x) f (x)
x* / ------ + ----- = C1
3 / x 3
\/ x
**References**
- http://en.wikipedia.org/wiki/Homogeneous_differential_equation
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 59
# indirect doctest
"""
x = func.args[0]
f = func.func
u1 = Symbol('u1', dummy=True) # u1 == f(x)/x
r = match # d+e*diff(f(x),x)
C1 = Symbol('C1')
int = C.Integral((-r[r['e']]/(r[r['d']]+u1*r[r['e']])).subs({x:1, r['y']:u1}),
(u1, None, f(x)/x))
sol = logcombine(Eq(log(x), int + log(C1)), assume_pos_real=True)
return sol
def ode_1st_homogeneous_coeff_subs_indep_div_dep(eq, func, order, match):
r"""
Solves a 1st order differential equation with homogeneous coefficients
using the substitution
u2 = <independent variable>/<dependent variable>.
This is a differential equation P(x, y) + Q(x, y)dy/dx = 0, that P
and Q are homogeneous of the same order. A function F(x, y) is
homogeneous of order n if F(xt, yt) = t**n*F(x, y). Equivalently,
F(x, y) can be rewritten as G(y/x) or H(x/y). See also the
docstring of homogeneous_order().
If the coefficients P and Q in the differential equation above are
homogeneous functions of the same order, then it can be shown that
the substitution x = u2*y (u2 = x/y) will turn the differential
equation into an equation separable in the variables y and u2. if
h(u2) is the function that results from making the substitution
u2 = x/f(x) on P(x, f(x)) and g(u2) is the function that results
from the substitution on Q(x, f(x)) in the differential equation
P(x, f(x)) + Q(x, f(x))*diff(f(x), x) = 0, then the general solution
is:
>>> from sympy import Function, dsolve, pprint
>>> from sympy.abc import x
>>> f, g, h = map(Function, ['f', 'g', 'h'])
>>> genform = g(x/f(x)) + h(x/f(x))*f(x).diff(x)
>>> pprint(genform)
d / x \ / x \
--(f(x))*h|----| + g|----|
dx \f(x)/ \f(x)/
>>> pprint(dsolve(genform, f(x),
... hint='1st_homogeneous_coeff_subs_indep_div_dep_Integral'))
x
----
f(x)
/
|
| -g(u2)
| ---------------- d(u2)
| u2*g(u2) + h(u2)
|
/
f(x) = C1*e
Where u2*g(u2) + h(u2) != 0 and f(x) != 0.
See also the docstrings of ode_1st_homogeneous_coeff_best() and
ode_1st_homogeneous_coeff_subs_dep_div_indep().
**Example**
>>> from sympy import Function, pprint
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(2*x*f(x) + (x**2 + f(x)**2)*f(x).diff(x), f(x),
... hint='1st_homogeneous_coeff_subs_indep_div_dep'))
___________
/ 2
/ 3*x
/ 1 + ----- *f(x) = C1
3 / 2
\/ f (x)
**References**
- http://en.wikipedia.org/wiki/Homogeneous_differential_equation
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 59
# indirect doctest
"""
x = func.args[0]
f = func.func
u2 = Symbol('u2', dummy=True) # u2 == x/f(x)
r = match # d+e*diff(f(x),x)
C1 = Symbol('C1')
int = C.Integral((-r[r['d']]/(r[r['e']]+u2*r[r['d']])).subs({x:u2, r['y']:1}),
(u2, None, x/f(x)))
sol = logcombine(Eq(log(f(x)), int + log(C1)), assume_pos_real=True)
return sol
# XXX: Should this function maybe go somewhere else?
def homogeneous_order(eq, *symbols):
"""
Returns the order n if g is homogeneous and None if it is not
homogeneous.
Determines if a function is homogeneous and if so of what order.
A function f(x,y,...) is homogeneous of order n if
f(t*x,t*y,t*...) == t**n*f(x,y,...). The function is implemented recursively.
If the function is of two variables, F(x, y), then f being
homogeneous of any order is equivalent to being able to rewrite
F(x, y) as G(x/y) or H(y/x). This fact is used to solve 1st order
ordinary differential equations whose coefficients are homogeneous
of the same order (see the docstrings of
ode.ode_1st_homogeneous_coeff_subs_indep_div_dep() and
ode.ode_1st_homogeneous_coeff_subs_indep_div_dep()
Symbols can be functions, but every argument of the function must be
a symbol, and the arguments of the function that appear in the
expression must match those given in the list of symbols. If a
declared function appears with different arguments than given in the
list of symbols, None is returned.
**Examples**
>>> from sympy import Function, homogeneous_order, sqrt
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> homogeneous_order(f(x), f(x)) == None
True
>>> homogeneous_order(f(x,y), f(y, x), x, y) == None
True
>>> homogeneous_order(f(x), f(x), x)
1
>>> homogeneous_order(x**2*f(x)/sqrt(x**2+f(x)**2), x, f(x))
2
>>> homogeneous_order(x**2+f(x), x, f(x)) == None
True
"""
if eq.has(log):
eq = logcombine(eq, assume_pos_real=True)
return _homogeneous_order(eq, *symbols)
def _homogeneous_order(eq, *symbols):
"""
The real work for homogeneous_order.
This runs as a separate function call so that logcombine doesn't
endlessly put back together what homogeneous_order is trying to take
apart.
"""
if not symbols:
raise ValueError, "homogeneous_order: no symbols were given."
n = set()
# Replace all functions with dummy variables
for i in symbols:
if i.is_Function:
if not all([j in symbols for j in i.args]):
return None
else:
dummyvar = numbered_symbols(prefix='d', dummy=True).next()
eq = eq.subs(i, dummyvar)
symbols = list(symbols)
symbols.remove(i)
symbols.append(dummyvar)
symbols = tuple(symbols)
# The following are not supported
if eq.has(Order) or eq.has(Derivative):
return None
# These are all constants
if type(eq) in (int, float) or eq.is_Number or eq.is_Integer or \
eq.is_Rational or eq.is_NumberSymbol or eq.is_Real:
return sympify(0)
# Break the equation into additive parts
if eq.is_Add:
s = set()
for i in eq.args:
s.add(_homogeneous_order(i, *symbols))
if len(s) != 1:
return None
else:
n = s
if eq.is_Pow:
if not eq.exp.is_number:
return None
o = _homogeneous_order(eq.base, *symbols)
if o == None:
return None
else:
n.add(sympify(o*eq.exp))
t = Symbol('t', dummy=True, positive=True) # It is sufficient that t > 0
r = Wild('r', exclude=[t])
a = Wild('a', exclude=[t])
eqs = eq.subs(dict(zip(symbols,(t*i for i in symbols))))
if eqs.is_Mul:
if t not in eqs:
n.add(sympify(0))
else:
m = eqs.match(r*t**a)
if m:
n.add(sympify(m[a]))
else:
s = 0
for i in eq.args:
o = _homogeneous_order(i, *symbols)
if o == None:
return None
else:
s += o
n.add(sympify(s))
if eq.is_Function:
if eq.func is log:
# The only possibility to pull a t out of a function is a power in
# a logarithm. This is very likely due to calling of logcombine().
args = make_list(eq.args[0], Mul)
if all(i.is_Pow for i in args):
base = 1
expos = set()
for pow in args:
if sign(pow.exp).is_negative:
s = -1
else:
s = 1
expos.add(s*pow.exp)
base *= pow.base**s
if len(expos) != 1:
return None
else:
return _homogeneous_order(expos.pop()*log(base), *symbols)
else:
if _homogeneous_order(eq.args[0], *symbols) == 0:
return sympify(0)
else:
return None
else:
if _homogeneous_order(eq.args[0], *symbols) == 0:
return sympify(0)
else:
return None
if len(n) != 1 or n == None:
return None
else:
return n.pop()
return None
def ode_1st_linear(eq, func, order, match):
r"""
Solves 1st order linear differential equations.
These are differential equations of the form dy/dx _ P(x)*y = Q(x).
These kinds of differential equations can be solved in a general
way. The integrating factor exp(Integral(P(x), x)) will turn the
equation into a separable equation. The general solution is::
>>> from sympy import Function, dsolve, Eq, pprint, diff, sin
>>> from sympy.abc import x
>>> f, P, Q = map(Function, ['f', 'P', 'Q'])
>>> genform = Eq(f(x).diff(x) + P(x)*f(x), Q(x))
>>> pprint(genform)
d
P(x)*f(x) + --(f(x)) = Q(x)
dx
>>> pprint(dsolve(genform, f(x), hint='1st_linear_Integral'))
/ / \
| | |
| | / | /
| | | | |
| | | P(x) dx | - | P(x) dx
| | | | |
| | / | /
f(x) = |C1 + | Q(x)*e dx|*e
| | |
\ / /
**Example**
>>> f = Function('f')
>>> pprint(dsolve(Eq(x*diff(f(x), x) - f(x), x**2*sin(x)),
... f(x), '1st_linear'))
f(x) = x*(C1 - cos(x))
**References**
- http://en.wikipedia.org/wiki/Linear_differential_equation#First_order_equation
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 92
# indirect doctest
"""
x = func.args[0]
f = func.func
r = match # a*diff(f(x),x) + b*f(x) + c
C1 = Symbol('C1')
t = exp(C.Integral(r[r['b']]/r[r['a']], x))
tt = C.Integral(t*(-r[r['c']]/r[r['a']]), x)
return Eq(f(x),(tt + C1)/t)
def ode_Bernoulli(eq, func, order, match):
r"""
Solves Bernoulli differential equations.
These are equations of the form dy/dx + P(x)*y = Q(x)*y**n, n != 1.
The substitution w = 1/y**(1-n) will transform an equation of this
form into one that is linear (see the docstring of
ode_1st_linear()). The general solution is::
>>> from sympy import Function, dsolve, Eq, pprint
>>> from sympy.abc import x, n
>>> f, P, Q = map(Function, ['f', 'P', 'Q'])
>>> genform = Eq(f(x).diff(x) + P(x)*f(x), Q(x)*f(x)**n)
>>> pprint(genform)
d n
P(x)*f(x) + --(f(x)) = f (x)*Q(x)
dx
>>> pprint(dsolve(genform, f(x), hint='Bernoulli_Integral')) #doctest: +SKIP
1
----
1 - n
// / \ \
|| | | |
|| | / | / |
|| | | | | |
|| | (1 - n)* | P(x) dx | (-1 + n)* | P(x) dx|
|| | | | | |
|| | / | / |
f(x) = ||C1 + (-1 + n)* | -Q(x)*e dx|*e |
|| | | |
\\ / / /
Note that when n = 1, then the equation is separable (see the
docstring of ode_separable()).
>>> pprint(dsolve(Eq(f(x).diff(x) + P(x)*f(x), Q(x)*f(x)), f(x),
... hint='separable_Integral'))
f(x)
/
| /
| 1 |
| - dy = C1 + | (-P(x) + Q(x)) dx
| y |
| /
/
**Example**
>>> from sympy import Function, dsolve, Eq, pprint, log
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(Eq(x*f(x).diff(x) + f(x), log(x)*f(x)**2),
... f(x), hint='Bernoulli'))
1
f(x) = -------------------
/ log(x) 1\
x*|C1 + ------ + -|
\ x x/
**References**
- http://en.wikipedia.org/wiki/Bernoulli_differential_equation
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 95
# indirect doctest
"""
x = func.args[0]
f = func.func
r = match # a*diff(f(x),x) + b*f(x) + c*f(x)**n, n != 1
C1 = Symbol('C1')
t = exp((1-r[r['n']])*C.Integral(r[r['b']]/r[r['a']],x))
tt = (r[r['n']]-1)*C.Integral(t*r[r['c']]/r[r['a']],x)
return Eq(f(x),((tt + C1)/t)**(1/(1-r[r['n']])))
def ode_Liouville(eq, func, order, match):
r"""
Solves 2nd order Liouville differential equations.
The general form of a Liouville ODE is
d^2y/dx^2 + g(y)*(dy/dx)**2 + h(x)*dy/dx. The general solution is::
>>> from sympy import Function, dsolve, Eq, pprint, diff
>>> from sympy.abc import x
>>> f, g, h = map(Function, ['f', 'g', 'h'])
>>> genform = Eq(diff(f(x),x,x) + g(f(x))*diff(f(x),x)**2 +
... h(x)*diff(f(x),x), 0)
>>> pprint(genform)
2 2
d d d
--(f(x)) *g(f(x)) + --(f(x))*h(x) + -----(f(x)) = 0
dx dx dx dx
>>> pprint(dsolve(genform, f(x), hint='Liouville_Integral'))
f(x)
/ /
| |
| / | /
| | | |
| - | h(x) dx | | g(y) dy
| | | |
| / | /
C1 + C2* | e dx + | e dy = 0
| |
/ /
**Example**
::
>>> from sympy import Function, dsolve, Eq, pprint
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(diff(f(x), x, x) + diff(f(x), x)**2/f(x) +
... diff(f(x), x)/x, f(x), hint='Liouville'))
________________ ________________
[f(x) = -\/ C1 + C2*log(x) , f(x) = \/ C1 + C2*log(x) ]
**References**
- Goldstein and Braun, "Advanced Methods for the Solution of
Differential Equations", pp. 98
- http://www.maplesoft.com/support/help/view.aspx?path=odeadvisor/Liouville
# indirect doctest
"""
# Liouville ODE f(x).diff(x, 2) + g(f(x))*(f(x).diff(x, 2))**2 + h(x)*f(x).diff(x)
# See Goldstein and Braun, "Advanced Methods for the Solution of
# Differential Equations", pg. 98, as well as
# http://www.maplesoft.com/support/help/view.aspx?path=odeadvisor/Liouville
x = func.args[0]
f = func.func
r = match # f(x).diff(x, 2) + g*f(x).diff(x)**2 + h*f(x).diff(x)
y = r['y']
C1 = Symbol('C1')
C2 = Symbol('C2')
int = C.Integral(exp(C.Integral(r['g'], y)), (y, None, f(x)))
sol = Eq(int + C1*C.Integral(exp(-C.Integral(r['h'], x)), x) + C2, 0)
return sol
def _nth_linear_match(eq, func, order):
"""
Matches a differential equation to the linear form:
a_n(x)y^(n) + ... + a_1(x)y' + a_0(x)y + B(x) = 0
Returns a dict of order:coeff terms, where order is the order of the
derivative on each term, and coeff is the coefficient of that
derivative. The key -1 holds the function B(x). Returns None if
the ode is not linear. This function assumes that func has already
been checked to be good.
**Examples**
>>> from sympy import Function, cos, sin
>>> from sympy.abc import x
>>> from sympy.solvers.ode import _nth_linear_match
>>> f = Function('f')
>>> _nth_linear_match(f(x).diff(x, 3) + 2*f(x).diff(x) +
... x*f(x).diff(x, 2) + cos(x)*f(x).diff(x) + x - f(x) -
... sin(x), f(x), 3)
{1: 2 + cos(x), 0: -1, -1: x - sin(x), 2: x, 3: 1}
>>> _nth_linear_match(f(x).diff(x, 3) + 2*f(x).diff(x) +
... x*f(x).diff(x, 2) + cos(x)*f(x).diff(x) + x - f(x) -
... sin(f(x)), f(x), 3) == None
True
"""
from sympy import S
x = func.args[0]
one_x = set([x])
terms = dict([(i, S.Zero) for i in range(-1, order+1)])
for i in make_list(eq, Add):
if not i.has(func):
terms[-1] += i
else:
c, f = i.as_independent(func)
if not ((isinstance(f, Derivative) and set(f.symbols) == one_x) or\
f == func):
return None
else:
terms[len(f.args[1:])] += c
return terms
def ode_nth_linear_constant_coeff_homogeneous(eq, func, order, match, returns='sol'):
"""
Solves an nth order linear homogeneous differential equation with
constant coefficients.
This is an equation of the form a_n*f(x)^(n) + a_(n-1)*f(x)^(n-1) +
... + a1*f'(x) + a0*f(x) = 0
These equations can be solved in a general manner, by taking the
roots of the characteristic equation a_n*m**n + a_(n-1)*m**(n-1) +
... + a1*m + a0 = 0. The solution will then be the sum of
Cn*x**i*exp(r*x) terms, for each where Cn is an arbitrary constant,
r is a root of the characteristic equation and i is is one of each
from 0 to the multiplicity of the root - 1 (for example, a root 3 of
multiplicity 2 would create the terms C1*exp(3*x) + C2*x*exp(3*x)).
The exponential is usually expanded for complex roots using Euler's
equation exp(I*x) = cos(x) + I*sin(x). Complex roots always come in
conjugate pars in polynomials with real coefficients, so the two
roots will be represented (after simplifying the constants) as
exp(a*x)*(C1*cos(b*x) + C2*sin(b*x)).
If SymPy cannot find exact roots to the characteristic equation, a
RootOf instance will be return in its stead.
>>> from sympy import Function, dsolve, Eq
>>> from sympy.abc import x
>>> f = Function('f')
>>> dsolve(f(x).diff(x, 5) + 10*f(x).diff(x) - 2*f(x), f(x),
... hint='nth_linear_constant_coeff_homogeneous')
... # doctest: +NORMALIZE_WHITESPACE
f(x) == C1*exp(x*RootOf(_m**5 + 10*_m - 2, _m, domain='ZZ', index=0)) + \
C2*exp(x*RootOf(_m**5 + 10*_m - 2, _m, domain='ZZ', index=1)) + \
C3*exp(x*RootOf(_m**5 + 10*_m - 2, _m, domain='ZZ', index=2)) + \
C4*exp(x*RootOf(_m**5 + 10*_m - 2, _m, domain='ZZ', index=3)) + \
C5*exp(x*RootOf(_m**5 + 10*_m - 2, _m, domain='ZZ', index=4))
Note that because this method does not involve integration, there is
no 'nth_linear_constant_coeff_homogeneous_Integral' hint.
The following is for internal use:
- returns = 'sol' returns the solution to the ODE.
- returns = 'list' returns a list of linearly independent
solutions, for use with non homogeneous solution methods like
variation of parameters and undetermined coefficients. Note that,
though the solutions should be linearly independent, this function
does not explicitly check that. You can do "assert
simplify(wronskian(sollist)) != 0" to check for linear independence.
Also, "assert len(sollist) == order" will need to pass.
- returns = 'both', return a dictionary {'sol':solution to ODE,
'list': list of linearly independent solutions}.
**Example**
>>> from sympy import Function, dsolve, pprint
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(f(x).diff(x, 4) + 2*f(x).diff(x, 3) -
... 2*f(x).diff(x, 2) - 6*f(x).diff(x) + 5*f(x), f(x),
... hint='nth_linear_constant_coeff_homogeneous'))
x -2*x
f(x) = (C1 + C2*x)*e + (C3*sin(x) + C4*cos(x))*e
**References**
- http://en.wikipedia.org/wiki/Linear_differential_equation
section: Nonhomogeneous_equation_with_constant_coefficients
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 211
# indirect doctest
"""
x = func.args[0]
f = func.func
r = match
# A generator of constants
constants = numbered_symbols(prefix='C', function=Symbol, start=1)
# First, set up characteristic equation.
m = Symbol('m', dummy=True)
chareq = S.Zero
for i in r.keys():
if type(i) == str or i < 0:
pass
else:
chareq += r[i]*m**i
chareqroots = RootsOf(chareq, m)
charroots_exact = list(chareqroots.exact_roots())
charroots_formal = list(chareqroots.formal_roots())
if charroots_formal and discriminant(chareq, m) == 0:
# If Poly cannot find the roots explicitly, we can only return
# an expression in terms of RootOf's if we know the roots
# are not repeated. We use the fact that a polynomial has
# repeated roots iff its discriminant == 0.
# Ideally, RootOf would cancel out roots from charroots_exact, so
# we check the discriminant of only the unknown part of the chareq.
# See issue 1557.
raise NotImplementedError("Cannot find all of the roots of " + \
"characteristic equation " + str(chareq) + ", which has " + \
"repeated roots.")
# Create a dict root: multiplicity or charroots
charroots = {}
for i in charroots_exact + charroots_formal:
if i in charroots:
charroots[i] += 1
else:
charroots[i] = 1
gsol = S(0)
# We need keep track of terms so we can run collect() at the end.
# This is necessary for constantsimp to work properly.
global collectterms
collectterms = []
for root, multiplicity in charroots.items():
for i in range(multiplicity):
if isinstance(root, RootOf):
gsol += exp(root*x)*constants.next()
assert multiplicity == 1
collectterms = [(0, root, 0)] + collectterms
else:
reroot = re(root)
imroot = im(root)
gsol += x**i*exp(reroot*x)*(constants.next()*sin(abs(imroot)*x) \
+ constants.next()*cos(imroot*x))
# This ordering is important
collectterms = [(i, reroot, imroot)] + collectterms
if returns == 'sol':
return Eq(f(x), gsol)
elif returns in ('list' 'both'):
# Create a list of (hopefully) linearly independent solutions
gensols = []
# Keep track of when to use sin or cos for nonzero imroot
for i, reroot, imroot in collectterms:
if imroot == 0:
gensols.append(x**i*exp(reroot*x))
else:
if x**i*exp(reroot*x)*sin(abs(imroot)*x) in gensols:
gensols.append(x**i*exp(reroot*x)*cos(imroot*x))
else:
gensols.append(x**i*exp(reroot*x)*sin(abs(imroot)*x))
if returns == 'list':
return gensols
else:
return {'sol':Eq(f(x), gsol), 'list':gensols}
else:
raise ValueError('Unknown value for key "returns".')
def ode_nth_linear_constant_coeff_undetermined_coefficients(eq, func, order, match):
r"""
Solves an nth order linear differential equation with constant
coefficients using the method of undetermined coefficients.
This method works on differential equations of the form a_n*f(x)^(n)
+ a_(n-1)*f(x)^(n-1) + ... + a1*f'(x) + a0*f(x) = P(x), where P(x)
is a function that has a finite number of linearly independent
derivatives.
Functions that fit this requirement are finite sums functions of the
form a*x**i*exp(b*x)*sin(c*x + d) or a*x**i*exp(b*x)*cos(c*x + d),
where i is a non-negative integer and a, b, c, and d are constants.
For example any polynomial in x, functions like x**2*exp(2*x),
x*sin(x), and exp(x)*cos(x) can all be used. Products of sin's and
cos's have a finite number of derivatives, because they can be
expanded into sin(a*x) and cos(b*x) terms. However, SymPy currently
cannot do that expansion, so you will need to manually rewrite the
expression in terms of the above to use this method. So, for example,
you will need to manually convert sin(x)**2 into (1 + cos(2*x))/2 to
properly apply the method of undetermined coefficients on it.
This method works by creating a trial function from the expression
and all of its linear independent derivatives and substituting them
into the original ODE. The coefficients for each term will be a
system of linear equations, which are be solved for and substituted,
giving the solution. If any of the trial functions are linearly
dependent on the solution to the homogeneous equation, they are
multiplied by sufficient x to make them linearly independent.
**Example**
>>> from sympy import Function, dsolve, pprint, exp, cos
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(f(x).diff(x, 2) + 2*f(x).diff(x) + f(x) -
... 4*exp(-x)*x**2 + cos(2*x), f(x),
... hint='nth_linear_constant_coeff_undetermined_coefficients'))
/ 4\
4*sin(2*x) 3*cos(2*x) | x | -x
f(x) = - ---------- + ---------- + |C1 + C2*x + --|*e
25 25 \ 3 /
**References**
- http://en.wikipedia.org/wiki/Method_of_undetermined_coefficients
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 221
# indirect doctest
"""
gensol = ode_nth_linear_constant_coeff_homogeneous(eq, func, order, match,
returns='both')
match.update(gensol)
return _solve_undetermined_coefficients(eq, func, order, match)
def _solve_undetermined_coefficients(eq, func, order, match):
"""
Helper function for the method of undetermined coefficients.
See the ode_nth_linear_constant_coeff_undetermined_coefficients()
docstring for more information on this method.
match should be a dictionary that has the following keys:
'list' - A list of solutions to the homogeneous equation, such as
the list returned by
ode_nth_linear_constant_coeff_homogeneous(returns='list')
'sol' - The general solution, such as the solution returned by
ode_nth_linear_constant_coeff_homogeneous(returns='sol')
'trialset' - The set of trial functions as returned by
_undetermined_coefficients_match()['trialset']
"""
x = func.args[0]
f = func.func
r = match
coeffs = numbered_symbols('a', dummy=True)
coefflist = []
gensols = r['list']
gsol = r['sol']
trialset = r['trialset']
notneedset = set([])
newtrialset = set([])
global collectterms
if len(gensols) != order:
raise NotImplementedError("Cannot find " + str(order) + \
" solutions to the homogeneous equation nessesary to apply " + \
"undetermined coefficients to " + str(eq) + " (number of terms != order)")
usedsin = set([])
mult = 0 # The multiplicity of the root
getmult = True
for i, reroot, imroot in collectterms:
if getmult:
mult = i + 1
getmult = False
if i == 0:
getmult = True
if imroot:
# Alternate between sin and cos
if (i, reroot) in usedsin:
check = x**i*exp(reroot*x)*cos(imroot*x)
else:
check = x**i*exp(reroot*x)*sin(abs(imroot)*x)
usedsin.add((i, reroot))
else:
check = x**i*exp(reroot*x)
if check in trialset:
# If an element of the trial function is already part of the homogeneous
# solution, we need to multiply by sufficient x to make it linearly
# independent. We also don't need to bother checking for the coefficients
# on those elements, since we already know it will be 0.
while True:
if check*x**mult in trialset:
mult += 1
else:
break
trialset.add(check*x**mult)
notneedset.add(check)
newtrialset = trialset - notneedset
trialfunc = 0
for i in newtrialset:
c = coeffs.next()
coefflist.append(c)
trialfunc += c*i
eqs = eq.subs(f(x), trialfunc)
coeffsdict = dict(zip(trialset, [0]*(len(trialset) + 1)))
# XXX: Replace this with as_Add when Mateusz's Polys branch gets merged in
# The else clause actually should never be run unless the ode is only one
# term, in which case it must be a derivative term and so will be inhomogeneous
eqs = expand_mul(eqs)
for i in make_list(eqs, Add):
s = separatevars(i, dict=True, symbols=[x])
coeffsdict[s[x]] += s['coeff']
coeffvals = solve(coeffsdict.values(), coefflist)
if not coeffvals:
raise NotImplementedError("Could not solve " + str(eq) + " using the " + \
" method of undetermined coefficients (unable to solve for coefficients).")
psol = trialfunc.subs(coeffvals)
return Eq(f(x), gsol.rhs + psol)
def _undetermined_coefficients_match(expr, x):
"""
Returns a trial function match if undetermined coefficients can be
applied to expr, and None otherwise.
A trial expression can be found for an expression for use with the
method of undetermined coefficients if the expression is an
additive/multiplicative combination of constants, polynomials in x
(the independent variable of expr), sin(a*x + b), cos(a*x + b), and
exp(a*x) terms (in other words, it has a finite number of linearly
independent derivatives).
Note that you may still need to multiply each term returned here by
sufficient x to make it linearly independent with the solutions to
the homogeneous equation.
This is intended for internal use by undetermined_coefficients
hints.
SymPy currently has no way to convert sin(x)**n*cos(y)**m into a sum
of only sin(a*x) and cos(b*x) terms, so these are not implemented.
So, for example, you will need to manually convert sin(x)**2 into
(1 + cos(2*x))/2 to properly apply the method of undetermined
coefficients on it.
**Example**
>>> from sympy import log, exp
>>> from sympy.solvers.ode import _undetermined_coefficients_match
>>> from sympy.abc import x
>>> _undetermined_coefficients_match(9*x*exp(x) + exp(-x), x)
{'test': True, 'trialset': set([x*exp(x), exp(x), exp(-x)])}
>>> _undetermined_coefficients_match(log(x), x)
{'test': False}
"""
from sympy import S
a = Wild('a', exclude=[x])
b = Wild('b', exclude=[x])
expr = powsimp(expr, combine='exp') # exp(x)*exp(2*x + 1) => exp(3*x + 1)
retdict = {}
def _test_term(expr, x):
"""
Test if expr fits the proper form for undetermined coefficients.
"""
if expr.is_Add:
return all([_test_term(i, x) for i in expr.args])
elif expr.is_Mul:
if expr.has(sin) or expr.has(cos):
foundtrig = False
# Make sure that there is only on trig function in the args.
# See the docstring.
for i in expr.args:
if i.has(sin) or i.has(cos):
if foundtrig:
return False
else:
foundtrig = True
return all([_test_term(i, x) for i in expr.args])
elif expr.is_Function:
if expr.func in (sin, cos, exp):
if expr.args[0].match(a*x + b):
return True
else:
return False
else:
return False
elif expr.is_Pow and expr.base.is_Symbol and expr.exp.is_Integer and \
expr.exp >= 0:
return True
elif expr.is_Pow and expr.base.is_number:
if expr.exp.match(a*x + b):
return True
else:
return False
elif expr.is_Symbol or expr.is_Number:
return True
else:
return False
def _get_trial_set(expr, x, exprs=set([])):
"""
Returns a set of trial terms for undetermined coefficients.
The idea behind undetermined coefficients is that the terms
expression repeat themselves after a finite number of
derivatives, except for the coefficients (they are linearly
dependent). So if we collect these, we should have the terms of
our trial function.
"""
def _remove_coefficient(expr, x):
"""
Returns the expression without a coefficient.
Similar to expr.as_independent(x)[1], except it only works
multiplicatively.
"""
# I was using the below match, but it doesn't always put all of the
# coefficient in c. c.f. 2**x*6*exp(x)*log(2)
# The below code is probably cleaner anyway.
# c = Wild('c', exclude=[x])
# t = Wild('t')
# r = expr.match(c*t)
term = S.One
if expr.is_Mul:
for i in expr.args:
if i.has(x):
term *= i
elif expr.has(x):
term = expr
return term
expr = expand_mul(expr)
if expr.is_Add:
for term in expr.args:
if _remove_coefficient(term, x) in exprs:
pass
else:
exprs.add(_remove_coefficient(term, x))
exprs = exprs.union(_get_trial_set(term, x, exprs))
else:
term = _remove_coefficient(expr, x)
tmpset = exprs.union(set([term]))
oldset = set([])
while tmpset != oldset:
# If you get stuck in this loop, then _test_term is probably broken
oldset = tmpset.copy()
expr = expr.diff(x)
term = _remove_coefficient(expr, x)
if term.is_Add:
tmpset = tmpset.union(_get_trial_set(term, x, tmpset))
else:
tmpset.add(term)
exprs = tmpset
return exprs
retdict['test'] = _test_term(expr, x)
if retdict['test']:
# Try to generate a list of trial solutions that will have the undetermined
# coefficients. Note that if any of these are not linearly independent
# with any of the solutions to the homogeneous equation, then they will
# need to be multiplied by sufficient x to make them so. This function
# DOES NOT do that (it doesn't even look at the homogeneous equation).
retdict['trialset'] = _get_trial_set(expr, x)
return retdict
def ode_nth_linear_constant_coeff_variation_of_parameters(eq, func, order, match):
r"""
Solves an nth order linear differential equation with constant
coefficients using the method of undetermined coefficients.
This method works on any differential equations of the form
f(x)^(n) + a_(n-1)*f(x)^(n-1) + ... + a1*f'(x) + a0*f(x) = P(x).
This method works by assuming that the particular solution takes the
form Sum(c_i(x)*y_i(x), (x, 1, n)), where y_i is the ith solution to
the homogeneous equation. The solution is then solved using
Wronskian's and Cramer's Rule. The particular solution is given by
Sum(Integral(W_i(x)/W(x), x)*y_i(x), (x, 1, n)), where W(x) is the
Wronskian of the fundamental system (the system of n linearly
independent solutions to the homogeneous equation), and W_i(x) is
the Wronskian of the fundamental system with the ith column replaced
with [0, 0, ..., 0, P(x)].
This method is general enough to solve any nth order inhomogeneous
linear differential equation with constant coefficients, but
sometimes SymPy cannot simplify the Wronskian well enough to
integrate it. If this method hangs, try using the
'nth_linear_constant_coeff_variation_of_parameters_Integral' hint
and simplifying the integrals manually. Also, prefer using
'nth_linear_constant_coeff_undetermined_coefficients' when it
applies, because it doesn't use integration, making it faster and
more reliable.
Warning, using simplify=False with
'nth_linear_constant_coeff_variation_of_parameters' in dsolve()
may cause it to hang, because it will not attempt to simplify
the Wronskian before integrating. It is recommended that you only
use simplify=False with
'nth_linear_constant_coeff_variation_of_parameters_Integral' for
this method, especially if the solution to the homogeneous
equation has trigonometric functions in it.
**Example**
>>> from sympy import Function, dsolve, pprint, exp, log
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(f(x).diff(x, 3) - 3*f(x).diff(x, 2) +
... 3*f(x).diff(x) - f(x) - exp(x)*log(x), f(x),
... hint='nth_linear_constant_coeff_variation_of_parameters'))
/ 3 /11 log(x)\ 2\ x
f(x) = |C1 + C2*x - x *|-- - ------| + C3*x |*e
\ \36 6 / /
**References**
- http://en.wikipedia.org/wiki/Variation_of_parameters
- http://planetmath.org/encyclopedia/VariationOfParameters.html
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 233
# indirect doctest
"""
gensol = ode_nth_linear_constant_coeff_homogeneous(eq, func, order, match,
returns='both')
match.update(gensol)
return _solve_variation_of_parameters(eq, func, order, match)
def _solve_variation_of_parameters(eq, func, order, match):
"""
Helper function for the method of variation of parameters.
See the ode_nth_linear_constant_coeff_undetermined_coefficients()
docstring for more information on this method.
match should be a dictionary that has the following keys:
'list' - A list of solutions to the homogeneous equation, such as
the list returned by
ode_nth_linear_constant_coeff_homogeneous(returns='list')
'sol' - The general solution, such as the solution returned by
ode_nth_linear_constant_coeff_homogeneous(returns='sol')
"""
x = func.args[0]
f = func.func
r = match
psol = 0
gensols = r['list']
gsol = r['sol']
wr = wronskian(gensols, x)
if r.get('simplify', True):
wr = simplify(wr) # We need much better simplification for some ODEs.
# See issue 1563, for example.
# To reduce commonly occuring sin(x)**2 + cos(x)**2 to 1
wr = trigsimp(wr, deep=True, recursive=True)
if not wr:
# The wronskian will be 0 iff the solutions are not linearly independent.
raise NotImplementedError("Cannot find " + str(order) + \
" solutions to the homogeneous equation nessesary to apply " + \
"variation of parameters to " + str(eq) + " (Wronskian == 0)")
if len(gensols) != order:
raise NotImplementedError("Cannot find " + str(order) + \
" solutions to the homogeneous equation nessesary to apply " + \
"variation of parameters to " + str(eq) + " (number of terms != order)")
negoneterm = (-1)**(order)
for i in gensols:
psol += negoneterm*C.Integral(wronskian(filter(lambda x: x != i, \
gensols), x)*r[-1]/wr, x)*i/r[order]
negoneterm *= -1
if r.get('simplify', True):
psol = simplify(psol)
psol = trigsimp(psol, deep=True)
return Eq(f(x), gsol.rhs + psol)
def ode_separable(eq, func, order, match):
r"""
Solves separable 1st order differential equations.
This is any differential equation that can be written as
P(y)*dy/dx = Q(x). The solution can then just be found by
rearranging terms and integrating:
Integral(P(y), y) = Integral(Q(x), x). This hint uses separatevars()
as its back end, so if a separable equation is not caught by this
solver, it is most likely the fault of that function. seperatevars()
is smart enough to do most expansion and factoring necessary to
convert a separable equation F(x, y) into the proper form P(x)*Q(y).
The general solution is::
>>> from sympy import Function, dsolve, Eq, pprint
>>> from sympy.abc import x
>>> a, b, c, d, f = map(Function, ['a', 'b', 'c', 'd', 'f'])
>>> genform = Eq(a(x)*b(f(x))*f(x).diff(x), c(x)*d(f(x)))
>>> pprint(genform)
d
--(f(x))*a(x)*b(f(x)) = c(x)*d(f(x))
dx
>>> pprint(dsolve(genform, f(x), hint='separable_Integral'))
f(x)
/ /
| |
| b(y) | c(x)
| ---- dy = C1 + | ---- dx
| d(y) | a(x)
| |
/ /
**Example**
::
>>> from sympy import Function, dsolve, Eq
>>> from sympy.abc import x
>>> f = Function('f')
>>> pprint(dsolve(Eq(f(x)*f(x).diff(x) + x, 3*x*f(x)**2), f(x),
... hint='separable'))
/ 2 \ 2
-log\1 - 3*f (x)/ x
----------------- = C1 - --
6 2
**Reference**
- M. Tenenbaum & H. Pollard, "Ordinary Differential Equations",
Dover 1963, pp. 52
# indirect doctest
"""
x = func.args[0]
f = func.func
C1 = Symbol('C1')
r = match # {'m1':m1, 'm2':m2, 'y':y}
return Eq(C.Integral(r['m2']['coeff']*r['m2'][r['y']]/r['m1'][r['y']],
(r['y'], None, f(x))), C.Integral(-r['m1']['coeff']*r['m1'][x]/
r['m2'][x], x)+C1)
``` |
[
{
"content": "Repeat the code exactly as the original, including blank lines:\n```python\n# coding: utf-8\n\n\"\"\"\n Qc API\n\n Qc API # noqa: E501\n\n The version of the OpenAPI document: 3.0.0\n Contact: cloudsupport@telestream.net\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nf... | [
{
"content": "Repeat the code exactly as the original, including blank lines:\n<|memory_start|>```python\n# coding: utf-8\n\n\"\"\"\n Qc API\n\n Qc API # noqa: E501\n\n The version of the OpenAPI document: 3.0.0\n Contact: cloudsupport@telestream.net\n Generated by: https://openapi-generator.tec... | ```python
# coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import telestream_cloud_qc
from telestream_cloud_qc.models.kag_size_test import KagSizeTest # noqa: E501
from telestream_cloud_qc.rest import ApiException
class TestKagSizeTest(unittest.TestCase):
"""KagSizeTest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test KagSizeTest
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = telestream_cloud_qc.models.kag_size_test.KagSizeTest() # noqa: E501
if include_optional :
return KagSizeTest(
size = 56,
reject_on_error = True,
checked = True
)
else :
return KagSizeTest(
)
def testKagSizeTest(self):
"""Test KagSizeTest"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
``` |
[
{
"content": "Repeat the code precisely as written (spacing intact):\n```python\nfrom twisted.internet import reactor\nfrom twisted.internet.defer import inlineCallbacks, returnValue, Deferred\nfrom twisted.internet.task import deferLater\nfrom twisted.internet.threads import blockingCallFromThread\n\nfrom Trib... | [
{
"content": "Repeat the code precisely as written (spacing intact):\n<|memory_start|>```python\nfrom twisted.internet import reactor\nfrom twisted.internet.defer import inlineCallbacks, returnValue, Deferred\nfrom twisted.internet.task import deferLater\nfrom twisted.internet.threads import blockingCallFromThr... | ```python
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue, Deferred
from twisted.internet.task import deferLater
from twisted.internet.threads import blockingCallFromThread
from Tribler.Test.Community.Trustchain.test_community import BaseTestTrustChainCommunity
from Tribler.Test.Community.Trustchain.test_trustchain_utilities import TrustChainTestCase
from Tribler.community.triblerchain.block import TriblerChainBlock
from Tribler.community.triblerchain.community import TriblerChainCommunity, PendingBytes, TriblerChainCommunityCrawler
from Tribler.community.trustchain.community import HALF_BLOCK, CRAWL
from Tribler.community.tunnel.routing import Circuit
from Tribler.dispersy.requestcache import IntroductionRequestCache
from Tribler.dispersy.tests.dispersytestclass import DispersyTestFunc
from Tribler.dispersy.util import blocking_call_on_reactor_thread
class TestPendingBytes(TrustChainTestCase):
"""
This class contains tests for the PendingBytes object
"""
def test_add_pending_bytes(self):
"""
Test adding to pending bytes
"""
pending_bytes = PendingBytes(20, 30)
self.assertTrue(pending_bytes.add(20, 30))
self.assertFalse(pending_bytes.add(-100, -100))
class TestTriblerChainCommunity(BaseTestTrustChainCommunity):
"""
Class that tests the TriblerChainCommunity on an integration level.
"""
@staticmethod
def set_expectation(node, req, up, down):
node.community.pending_bytes[req.community.my_member.public_key] = PendingBytes(down, up)
@blocking_call_on_reactor_thread
@inlineCallbacks
def create_nodes(self, *args, **kwargs):
nodes = yield DispersyTestFunc.create_nodes(self, *args, community_class=TriblerChainCommunity,
memory_database=False, **kwargs)
for outer in nodes:
for inner in nodes:
if outer != inner:
outer.send_identity(inner)
returnValue(nodes)
@blocking_call_on_reactor_thread
@inlineCallbacks
def test_cleanup_pending_bytes(self):
"""
Test cleaning of pending bytes
"""
node, = yield self.create_nodes(1)
node.community.pending_bytes['a'] = 1234
self.assertIn('a', node.community.pending_bytes)
node.community.cleanup_pending('a')
self.assertNotIn('a', node.community.pending_bytes)
@blocking_call_on_reactor_thread
@inlineCallbacks
def test_on_tunnel_remove(self):
"""
Test the on_tunnel_remove handler function for a circuit
"""
# Arrange
node, other = yield self.create_nodes(2)
tunnel_node = Circuit(long(0), 0)
tunnel_other = Circuit(long(0), 0)
tunnel_node.bytes_up = tunnel_other.bytes_down = 12 * 1024 * 1024
tunnel_node.bytes_down = tunnel_other.bytes_up = 14 * 1024 * 1024
# Act
node.call(node.community.on_tunnel_remove, None, None, tunnel_node, self._create_target(node, other))
other.call(other.community.on_tunnel_remove, None, None, tunnel_other, self._create_target(other, node))
yield deferLater(reactor, 5.1, lambda: None)
# Assert
_, signature_request = node.receive_message(names=[HALF_BLOCK]).next()
node.give_message(signature_request, other)
yield deferLater(reactor, 0.1, lambda: None)
_, signature_request = other.receive_message(names=[HALF_BLOCK]).next()
other.give_message(signature_request, node)
yield deferLater(reactor, 0.1, lambda: None)
self.assertBlocksInDatabase(node, 2)
self.assertBlocksInDatabase(other, 2)
self.assertBlocksAreEqual(node, other)
@blocking_call_on_reactor_thread
@inlineCallbacks
def test_on_tunnel_remove_small(self):
"""
Test the on_tunnel_remove handler function for a circuit
"""
# Arrange
node, other = yield self.create_nodes(2)
tunnel_node = Circuit(long(0), 0)
tunnel_other = Circuit(long(0), 0)
tunnel_node.bytes_up = tunnel_other.bytes_down = 1024
tunnel_node.bytes_down = tunnel_other.bytes_up = 2 * 1024
# Act
node.call(node.community.on_tunnel_remove, None, None, tunnel_node, self._create_target(node, other))
other.call(other.community.on_tunnel_remove, None, None, tunnel_other, self._create_target(other, node))
yield deferLater(reactor, 5.1, lambda: None)
# Assert
with self.assertRaises(StopIteration):
self.assertFalse(node.receive_message(names=[HALF_BLOCK]).next())
with self.assertRaises(StopIteration):
self.assertFalse(other.receive_message(names=[HALF_BLOCK]).next())
self.assertBlocksInDatabase(node, 0)
self.assertBlocksInDatabase(other, 0)
@blocking_call_on_reactor_thread
@inlineCallbacks
def test_on_tunnel_remove_append_pending(self):
"""
Test the on_tunnel_remove handler function for a circuit
"""
# Arrange
node, other = yield self.create_nodes(2)
tunnel_node = Circuit(long(0), 0)
tunnel_node.bytes_up = 12 * 1024 * 1024
tunnel_node.bytes_down = 14 * 1024 * 1024
# Act
node.call(node.community.on_tunnel_remove, None, None, tunnel_node, self._create_target(node, other))
node.call(node.community.on_tunnel_remove, None, None, tunnel_node, self._create_target(node, other))
yield deferLater(reactor, 5.1, lambda: None)
self.assertEqual(node.community.pending_bytes[other.community.my_member.public_key].up, 2*tunnel_node.bytes_up)
self.assertEqual(node.community.pending_bytes[other.community.my_member.public_key].down,
2*tunnel_node.bytes_down)
def test_receive_request_invalid(self):
"""
Test the community to receive a request message.
"""
# Arrange
node, other = self.create_nodes(2)
target_other = self._create_target(node, other)
TestTriblerChainCommunity.set_expectation(other, node, 10, 5)
transaction = {"up": 10, "down": 5}
node.call(node.community.sign_block, target_other, other.my_member.public_key, transaction)
_, block_req = other.receive_message(names=[HALF_BLOCK]).next()
# Act
# construct faked block
block = block_req.payload.block
block.transaction["up"] += 10
block.transaction["total_up"] = block.transaction["up"]
block_req = node.community.get_meta_message(HALF_BLOCK).impl(
authentication=tuple(),
distribution=(node.community.claim_global_time(),),
destination=(target_other,),
payload=(block,))
other.give_message(block_req, node)
# Assert
self.assertBlocksInDatabase(other, 0)
self.assertBlocksInDatabase(node, 1)
with self.assertRaises(StopIteration):
# No signature responses, or crawl requests should have been sent
node.receive_message(names=[HALF_BLOCK, CRAWL]).next()
def test_receive_request_twice(self):
"""
Test the community to receive a request message twice.
"""
# Arrange
node, other = self.create_nodes(2)
target_other = self._create_target(node, other)
transaction = {"up": 10, "down": 5}
TestTriblerChainCommunity.set_expectation(node, other, 50, 50)
TestTriblerChainCommunity.set_expectation(other, node, 50, 50)
TestTriblerChainCommunity.create_block(node, other, target_other, transaction)
# construct faked block
block = node.call(node.community.persistence.get_latest, node.my_member.public_key)
block_req = node.community.get_meta_message(HALF_BLOCK).impl(
authentication=tuple(),
distribution=(node.community.claim_global_time(),),
destination=(target_other,),
payload=(block,))
other.give_message(block_req, node)
# Assert
self.assertBlocksInDatabase(other, 2)
self.assertBlocksInDatabase(node, 2)
with self.assertRaises(StopIteration):
# No signature responses, or crawl requests should have been sent
node.receive_message(names=[HALF_BLOCK, CRAWL]).next()
def test_receive_request_too_much(self):
"""
Test the community to receive a request that claims more than we are prepared to sign
"""
# Arrange
node, other = self.create_nodes(2)
target_other = self._create_target(node, other)
TestTriblerChainCommunity.set_expectation(other, node, 3, 3)
transaction = {"up": 10, "down": 5}
node.call(node.community.sign_block, target_other, other.my_member.public_key, transaction)
# Act
other.give_message(other.receive_message(names=[HALF_BLOCK]).next()[1], node)
# Assert
self.assertBlocksInDatabase(other, 1)
self.assertBlocksInDatabase(node, 1)
with self.assertRaises(StopIteration):
# No signature responses, or crawl requests should have been sent
node.receive_message(names=[HALF_BLOCK, CRAWL]).next()
def test_receive_request_unknown_pend(self):
"""
Test the community to receive a request that claims about a peer we know nothing about
"""
# Arrange
node, other = self.create_nodes(2)
target_other = self._create_target(node, other)
transaction = {"up": 10, "down": 5}
node.call(node.community.sign_block, target_other, other.my_member.public_key, transaction)
# Act
other.give_message(other.receive_message(names=[HALF_BLOCK]).next()[1], node)
# Assert
self.assertBlocksInDatabase(other, 1)
self.assertBlocksInDatabase(node, 1)
with self.assertRaises(StopIteration):
# No signature responses, or crawl requests should have been sent
node.receive_message(names=[HALF_BLOCK, CRAWL]).next()
def test_block_values(self):
"""
If a block is created between two nodes both
should have the correct total_up and total_down of the signature request.
"""
# Arrange
node, other = self.create_nodes(2)
TestTriblerChainCommunity.set_expectation(node, other, 50, 50)
TestTriblerChainCommunity.set_expectation(other, node, 50, 50)
transaction = {"up": 10, "down": 5}
# Act
TestTriblerChainCommunity.create_block(node, other, self._create_target(node, other), transaction)
# Assert
block = node.call(TriblerChainBlock.create, transaction, node.community.persistence,
node.community.my_member.public_key)
self.assertEqual(20, block.transaction["total_up"])
self.assertEqual(10, block.transaction["total_down"])
block = other.call(TriblerChainBlock.create, transaction, other.community.persistence,
other.community.my_member.public_key)
self.assertEqual(15, block.transaction["total_up"])
self.assertEqual(15, block.transaction["total_down"])
def test_block_values_after_request(self):
"""
After a request is sent, a node should update its totals.
"""
# Arrange
node, other = self.create_nodes(2)
transaction = {"up": 10, "down": 5}
node.call(node.community.sign_block, self._create_target(node, other), other.my_member.public_key, transaction)
# Assert
block = node.call(TriblerChainBlock.create, transaction, node.community.persistence,
node.community.my_member.public_key)
self.assertEqual(20, block.transaction["total_up"])
self.assertEqual(10, block.transaction["total_down"])
def test_crawler_on_introduction_received(self):
"""
Test the crawler takes a step when an introduction is made by the walker
"""
# Arrange
TriblerChainCommunityCrawler.CrawlerDelay = 10000000
crawler = DispersyTestFunc.create_nodes(self, 1, community_class=TriblerChainCommunityCrawler,
memory_database=False)[0]
node, = self.create_nodes(1)
node._community.cancel_pending_task("take fast steps")
node._community.cancel_pending_task("take step")
node._community.cancel_pending_task("start_walking")
target_node_from_crawler = self._create_target(node, crawler)
# when we call on_introduction request it is going to forward the argument to it's super implementation.
# Dispersy will error if it does not expect this, and the target code will not be tested. So we pick at
# dispersy's brains to make it accept the intro response.
intro_request_info = crawler.call(IntroductionRequestCache, crawler.community, None)
intro_response = node.create_introduction_response(target_node_from_crawler, node.lan_address, node.wan_address,
node.lan_address, node.wan_address,
u"unknown", False, intro_request_info.number)
intro_response._candidate = target_node_from_crawler
crawler.community.request_cache._identifiers[
crawler.community.request_cache._create_identifier(intro_request_info.number, u"introduction-request")
] = intro_request_info
# and we don't actually want to send the crawl request since the counter party is fake, just count if it is run
counter = [0]
def on_crawl_request(cand, pk, sequence_number=None):
# Ignore live edge request
if sequence_number != -1:
counter[0] += 1
crawler.community.send_crawl_request = on_crawl_request
# Act
crawler.call(crawler.community.on_introduction_response, [intro_response])
# Assert
self.assertEqual(counter[0], 1)
def test_get_statistics_no_blocks(self):
"""
Test the get_statistics method where last block is none
"""
node, = self.create_nodes(1)
statistics = node.community.get_statistics()
assert isinstance(statistics, dict), type(statistics)
assert len(statistics) > 0
def test_get_statistics_with_previous_block(self):
"""
Test the get_statistics method where a last block exists
"""
# Arrange
node, other = self.create_nodes(2)
transaction = {"up": 10, "down": 5}
TestTriblerChainCommunity.create_block(node, other, self._create_target(node, other), transaction)
# Get statistics
statistics = node.community.get_statistics()
assert isinstance(statistics, dict), type(statistics)
assert len(statistics) > 0
def test_get_statistics_for_not_self(self):
"""
Test the get_statistics method where a last block exists
"""
# Arrange
node, other = self.create_nodes(2)
transaction = {"up": 10, "down": 5}
TestTriblerChainCommunity.create_block(node, other, self._create_target(node, other), transaction)
# Get statistics
statistics = node.community.get_statistics(public_key=other.community.my_member.public_key)
assert isinstance(statistics, dict), type(statistics)
assert len(statistics) > 0
def test_get_trust(self):
"""
Test that the trust nodes have for each other is the upload + the download total of all blocks.
"""
# Arrange
node, other = self.create_nodes(2)
transaction = {'up': 10, 'down': 5, 'total_up': 10, 'total_down': 5}
TestTriblerChainCommunity.create_block(node, other, self._create_target(node, other), transaction)
TestTriblerChainCommunity.create_block(other, node, self._create_target(other, node), transaction)
# Get statistics
node_trust = blockingCallFromThread(reactor, node.community.get_trust, other.community.my_member)
other_trust = blockingCallFromThread(reactor, other.community.get_trust, node.community.my_member)
self.assertEqual(node_trust, 15)
self.assertEqual(other_trust, 15)
def test_get_default_trust(self):
"""
Test that the trust between nodes without blocks is 1.
"""
# Arrange
node, other = self.create_nodes(2)
# Get statistics
node_trust = blockingCallFromThread(reactor, node.community.get_trust, other.community.my_member)
other_trust = blockingCallFromThread(reactor, other.community.get_trust, node.community.my_member)
self.assertEqual(node_trust, 1)
self.assertEqual(other_trust, 1)
def test_get_node_empty(self):
"""
Check whether get_node returns the correct node if no past data is given.
"""
node, = self.create_nodes(1)
self.assertEqual({"total_up": 3, "total_down": 5, "total_neighbors": 2},
node.community.get_node("test", [], 3, 5, 2))
def test_get_node_maximum(self):
"""
Check whether get_node returns the maximum of total_up and total_down.
"""
node, = self.create_nodes(1)
nodes = {"test": {"total_up": 1, "total_down": 10, "total_neighbors": 2}}
self.assertEqual({"total_up": 3, "total_down": 10, "total_neighbors": 2},
node.community.get_node("test", nodes, 3, 5, 1))
def test_get_node_request_total_traffic(self):
"""
Check whether get_node requires a total_traffic method if no total_up and total_down is given.
"""
node, = self.create_nodes(1)
node.community.persistence.total_traffic = lambda _: [5, 6, 2]
self.assertEqual({"total_up": 5, "total_down": 6, "total_neighbors": 2},
node.community.get_node('74657374', []))
def test_format_edges(self):
"""
Verify whether format_edges returns the correct nodes and edges
"""
node, = self.create_nodes(1)
edge_list = [
# [pk_a, pk_b, a->b, b->a, a_up, a_down, a_neighbors]
("aa", "bb", 10, 15, 10, 15, 1),
("bb", "cc", 8, 3, 23, 13, 2)
]
node.community.persistence.total_traffic = lambda pk: (0, 0, 1)
expected_nodes = {
"aa": {"total_up": 10, "total_down": 15, "total_neighbors": 1},
"bb": {"total_up": 23, "total_down": 13, "total_neighbors": 2},
"cc": {"total_up": 0, "total_down": 0, "total_neighbors": 1}
}
expected_edges = {
"aa": [("bb", 10, 15)],
"bb": [("aa", 15, 10), ("cc", 8, 3)],
"cc": [("bb", 3, 8)]
}
nodes, edges = node.community.format_edges(edge_list, "aa")
self.assertDictEqual(nodes, expected_nodes)
self.assertDictEqual(expected_edges, edges)
def test_build_graph_no_edges(self):
"""
Verify whether get_graph returns a correct result if no edges are present.
"""
node, = self.create_nodes(1)
nodes = {
"aa": {"total_up": 0, "total_down": 0, "total_neighbors": 0}
}
edges = {}
expected_nodes = [
{"public_key": "aa", "total_up": 0, "total_down": 0, "total_neighbors": 0, "score": 0.5}
]
expected_edges = []
actual_nodes, actual_edges = node.community.build_graph((nodes, edges), "aa", 2, 0, [])
self.assertListEqual(expected_nodes, actual_nodes)
self.assertListEqual(expected_edges, actual_edges)
def test_build_graph(self):
"""
Verify whether get_graph returns a correct list of nodes and edges
"""
node, = self.create_nodes(1)
nodes = {
"aa": {"total_up": 0, "total_down": 0, "total_neighbors": 2},
"bb": {"total_up": 1, "total_down": 1, "total_neighbors": 5},
"cc": {"total_up": 2, "total_down": 2, "total_neighbors": 2},
"dd": {"total_up": 3, "total_down": 3, "total_neighbors": 1},
"ee": {"total_up": 4, "total_down": 4, "total_neighbors": 1},
"ff": {"total_up": 5, "total_down": 5, "total_neighbors": 1}
}
edges = {
"aa": [("bb", 0, 0), ("cc", 0, 0), ("ff", 0, 0)],
"bb": [("aa", 0, 0), ("cc", 0, 0), ("ee", 0, 0), ("ff", 0, 0), ("dd", 0, 0)],
"cc": [("aa", 0, 0), ("bb", 0, 0)],
}
expected_nodes = [
{"public_key": "aa", "total_up": 0, "total_down": 0, "total_neighbors": 2, "score": 0.5},
{"public_key": "bb", "total_up": 1, "total_down": 1, "total_neighbors": 5, "score": 0.5},
{"public_key": "cc", "total_up": 2, "total_down": 2, "total_neighbors": 2, "score": 0.5},
]
expected_edges = [
{"from": "aa", "to": "bb", "amount": 0},
{"from": "bb", "to": "aa", "amount": 0},
{"from": "aa", "to": "cc", "amount": 0},
{"from": "cc", "to": "aa", "amount": 0},
{"from": "bb", "to": "cc", "amount": 0},
{"from": "cc", "to": "bb", "amount": 0},
]
actual_nodes, actual_edges = node.community.build_graph((nodes, edges), "aa", 1, 1, ["cc"])
self.assertItemsEqual(expected_nodes, actual_nodes)
self.assertItemsEqual(expected_edges, actual_edges)
def test_get_graph_circular(self):
"""
Verify whether get_graph returns a correct list of nodes and edges when of circular form
"""
node, = self.create_nodes(1)
nodes = {
"aa": {"total_up": 0, "total_down": 0, "total_neighbors": 2},
"bb": {"total_up": 1, "total_down": 1, "total_neighbors": 5},
"cc": {"total_up": 2, "total_down": 2, "total_neighbors": 2},
}
edges = {
"aa": [("bb", 0, 0), ("cc", 0, 0)],
"bb": [("aa", 0, 0), ("cc", 0, 0)],
"cc": [("aa", 0, 0), ("bb", 0, 0)]
}
expected_nodes = [
{"public_key": "aa", "total_up": 0, "total_down": 0, "total_neighbors": 2, "score": 0.5},
{"public_key": "bb", "total_up": 1, "total_down": 1, "total_neighbors": 5, "score": 0.5},
{"public_key": "cc", "total_up": 2, "total_down": 2, "total_neighbors": 2, "score": 0.5},
]
expected_edges = [
{"from": "aa", "to": "bb", "amount": 0},
{"from": "bb", "to": "aa", "amount": 0},
{"from": "aa", "to": "cc", "amount": 0},
{"from": "cc", "to": "aa", "amount": 0},
{"from": "bb", "to": "cc", "amount": 0},
{"from": "cc", "to": "bb", "amount": 0},
]
def verify_result((actual_nodes, actual_edges)):
self.assertItemsEqual(expected_nodes, actual_nodes)
self.assertItemsEqual(expected_edges, actual_edges)
node, = self.create_nodes(1)
node.community.persistence.get_graph_edges = lambda _1, _2: Deferred()
node.community.format_edges = lambda _1, _2: (nodes, edges)
d = node.community.get_graph("aa", 1, 2, [])
d.addCallback(verify_result)
d.callback("test")
return d
def test_get_graph(self):
"""
Verify whether the get_graph method adds the two callbacks correctly
"""
test_result = "test_1"
test_public_key = "test_2"
test_neighbor_level = "test_3"
test_max_neighbors = "test_4"
test_mandatory_nodes = "test_5"
test_nodes_edges = ("test_6", "test_7")
test_final_result = "test_8"
def mock_format(result, public_key):
self.assertEqual(result, test_result)
self.assertEqual(public_key, test_public_key)
return test_nodes_edges
def mock_build((nodes, edges), public_key, neighbor_level, max_neighbors, mandatory_nodes):
self.assertEqual(nodes, test_nodes_edges[0])
self.assertEqual(edges, test_nodes_edges[1])
self.assertEqual(public_key, test_public_key)
self.assertEqual(neighbor_level, test_neighbor_level)
self.assertEqual(max_neighbors, test_max_neighbors)
self.assertEqual(mandatory_nodes, test_mandatory_nodes)
return test_final_result
node, = self.create_nodes(1)
node.community.persistence.get_graph_edges = lambda _1, _2: Deferred()
node.community.format_edges = mock_format
node.community.build_graph = mock_build
d = node.community.get_graph(test_public_key, test_neighbor_level, test_max_neighbors, test_mandatory_nodes)
d.addCallback(self.assertEqual, test_final_result)
d.callback(test_result)
return d
``` |
[
{
"content": "Recreate the entire code block with identical formatting:\n```python\n\"\"\"\nParse a GTO object\n\"\"\"\n\nimport os\nimport sys\nimport argparse\n\nfrom roblib import bcolors\nimport json\n\n\ndef list_keys(gto, verbose=False):\n \"\"\"\n List the primary keys in the patric file\n :para... | [
{
"content": "Recreate the entire code block with identical formatting:\n<|memory_start|>```python\n\"\"\"\nParse a GTO object\n\"\"\"\n\nimport os\nimport sys\nimport argparse\n\nfrom roblib import bcolors\nimport json\n\n\ndef list_keys(gto, verbose=False):\n \"\"\"\n List the primary keys in the patric... | ```python
"""
Parse a GTO object
"""
import os
import sys
import argparse
from roblib import bcolors
import json
def list_keys(gto, verbose=False):
"""
List the primary keys in the patric file
:param gto: the json gto
:param verbose: more output
:return:
"""
print("{}".format("\n".join(gto.keys())))
def dump_json(gto, k, verbose=False):
"""
Print out the json representation of some data
:param gto: the json gto
:param k: the key to dump (none for everything)
:param verbose: more output
:return:
"""
if k:
if k in gto:
print(json.dumps(gto[k], indent=4))
else:
sys.stderr.write(f"{bcolors.RED}ERROR: {k} not found.{bcolors.ENDC}\n")
else:
print(json.dumps(gto, indent=4))
def feature_tbl(gto, verbose=False):
"""
Print a tab separated feature table
:param gto: the json gto
:param verbose: more output
:return:
"""
for peg in gto['features']:
if 'location' not in peg:
sys.stderr.write(f"{bcolors.RED}Error: no location found\n{bcolors.PINK}{peg}{bcolors.ENDC}\n")
continue
locs = []
for l in peg['location']:
start = int(l[1])
if l[2] == '+':
stop = (start + int(l[3])) - 1
elif l[2] == '-':
start = (start - int(l[3])) + 1
stop = int(l[1])
else:
sys.stderr.write(f"{bcolors.RED}Error: Don't know location l[2]\n{bcolors.ENDC}")
continue
locs.append(f"{l[0]} {start} - {stop} ({l[2]})")
data = [
peg['id'],
peg['function'],
"; ".join(locs)
]
print("\t".join(data))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Plot a heatmap")
parser.add_argument('-f', help='gto file', required=True)
parser.add_argument('-l', help='list the primary keys and exit', action='store_true')
parser.add_argument('-d', help='dump some part of the json object', action='store_true')
parser.add_argument('-p', help='print protein feature table', action='store_true')
parser.add_argument('-k', help='json primary key (e.g. for dumping, etc)')
parser.add_argument('-o', help='output file')
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
gto = json.load(open(args.f, 'r'))
if args.l:
list_keys(gto, args.v)
sys.exit(0)
if args.d:
dump_json(gto, args.k, args.v)
sys.exit(0)
if args.p:
feature_tbl(gto, args.v)
sys.exit(0)
sys.stderr.write(f"{bcolors.RED}ERROR: You did not specify a command to run{bcolors.ENDC}\n")
``` |
[
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n```python\nvar.nexus_allowAllDigitNames = True # put it somewhere else\nvar.doCheckForDuplicateSequences = False\n\nt = var.trees[0]\na = var.alignments[0]\nt.data = Data()\n\nt.model.dump()\n\nprint('\\nAfter optimizing,... | [
{
"content": "Replicate the code snippet exactly, without paraphrasing or reformatting:\n<|memory_start|>```python\nvar.nexus_allowAllDigitNames = True # put it somewhere else\nvar.doCheckForDuplicateSequences = False\n\nt = var.trees[0]\na = var.alignments[0]\nt.data = Data()\n\nt.model.dump()\n\nprint('\\nA... | ```python
var.nexus_allowAllDigitNames = True # put it somewhere else
var.doCheckForDuplicateSequences = False
t = var.trees[0]
a = var.alignments[0]
t.data = Data()
t.model.dump()
print('\nAfter optimizing, the composition of the model for the non-root nodes is:')
print(t.model.parts[0].comps[0].val)
print('...and the composition of the root model is:')
print(t.model.parts[0].comps[1].val)
t.write()
func.reseedCRandomizer(os.getpid())
# The char "symbols", AAs in this case, are available as a.symbols; that is why
# I gave a name to var.alignments[0]. Also available as
# d.parts[partNum].symbols, so d.parts[0].symbols are also 'arndcqeghilkmfpstwyv'
print(a.symbols)
counts = [0] * 2
for rep in range(100):
ancSt = t.ancestralStateDraw()
for i in range(2):
ch = a.symbols[i] # '01'
cnt = ancSt.count(ch)
counts[i] += cnt
mySum = float(sum(counts))
print("\nsymbol optimized draws")
for i in range(2):
print(" %s %.5f %.4f" % (a.symbols[i], t.model.parts[0].comps[1].val[i], counts[i]/mySum))
#calculate predicted OGT according to Zeldovich
f_ivywrel = 0
f_ivywrel = t.model.parts[0].comps[1].val[1]
print("F(IVYWREL) = " + str(f_ivywrel))
print("T_opt estimate according to Zeldovich: " + str(937.0*float(f_ivywrel) - 335.0))
``` |
[
{
"content": "Here is the script:\n```python\n\"\"\"\nAs mat2_p2D.py, but the boundary conditions are different.\nHere, u=0 on x=0 and u=1 on x=1, while du/dn=0 on y=0 and y=1.\nThis yields a solution u(x,y)=x, regardless of the values of k.\n\"\"\"\n\nfrom dolfin import *\nimport sys, math, numpy\n\nmesh = Uni... | [
{
"content": "Here is the script:\n<|memory_start|>```python\n\"\"\"\nAs mat2_p2D.py, but the boundary conditions are different.\nHere, u=0 on x=0 and u=1 on x=1, while du/dn=0 on y=0 and y=1.\nThis yields a solution u(x,y)=x, regardless of the values of k.\n\"\"\"\n\nfrom dolfin import *\nimport sys, math, num... | ```python
"""
As mat2_p2D.py, but the boundary conditions are different.
Here, u=0 on x=0 and u=1 on x=1, while du/dn=0 on y=0 and y=1.
This yields a solution u(x,y)=x, regardless of the values of k.
"""
from dolfin import *
import sys, math, numpy
mesh = UnitSquareMesh(4, 6)
# Define a MeshFunction over two subdomains
subdomains = MeshFunction('size_t', mesh, 2)
class Omega0(SubDomain):
def inside(self, x, on_boundary):
return True if x[1] <= 0.5 else False
class Omega1(SubDomain):
def inside(self, x, on_boundary):
return True if x[1] >= 0.5 else False
# Mark subdomains with numbers 0 and 1
subdomain0 = Omega0()
subdomain0.mark(subdomains, 0)
subdomain1 = Omega1()
subdomain1.mark(subdomains, 1)
V0 = FunctionSpace(mesh, 'DG', 0)
k = Function(V0)
print 'mesh:', mesh
print 'subdomains:', subdomains
print 'k:', k
# Loop over all cell numbers, find corresponding
# subdomain number and fill cell value in k
k_values = [1.5, 50] # values of k in the two subdomains
for cell_no in range(len(subdomains.array())):
subdomain_no = subdomains.array()[cell_no]
k.vector()[cell_no] = k_values[subdomain_no]
# Much more efficient vectorized code
# (subdomains.array() has elements of type uint32, which
# must be transformed to plain int for numpy.choose to work)
help = numpy.asarray(subdomains.array(), dtype=numpy.int32)
k.vector()[:] = numpy.choose(help, k_values)
print 'k degree of freedoms:', k.vector().array()
#plot(subdomains, title='subdomains')
V = FunctionSpace(mesh, 'Lagrange', 1)
# Define Dirichlet conditions for x=0 boundary
u_L = Constant(0)
class LeftBoundary(SubDomain):
def inside(self, x, on_boundary):
tol = 1E-14 # tolerance for coordinate comparisons
return on_boundary and abs(x[0]) < tol
Gamma_0 = DirichletBC(V, u_L, LeftBoundary())
# Define Dirichlet conditions for x=1 boundary
u_R = Constant(1)
class RightBoundary(SubDomain):
def inside(self, x, on_boundary):
tol = 1E-14 # tolerance for coordinate comparisons
return on_boundary and abs(x[0] - 1) < tol
Gamma_1 = DirichletBC(V, u_R, RightBoundary())
bcs = [Gamma_0, Gamma_1]
# Define variational problem
u = TrialFunction(V)
v = TestFunction(V)
f = Constant(0)
a = k*inner(nabla_grad(u), nabla_grad(v))*dx
L = f*v*dx
# Compute solution
u = Function(V)
solve(a == L, u, bcs)
# Dump solution to the screen
u_nodal_values = u.vector()
u_array = u_nodal_values.array()
coor = mesh.coordinates()
for i in range(len(u_array)):
print 'u(%8g,%8g) = %g' % (coor[i][0], coor[i][1], u_array[i])
#interactive()
``` |
[
{
"content": "Output the full code verbatim (no extra comments):\n```python\nfrom ctypescrypto import pyver\nfrom ctypescrypto.oid import Oid\nfrom ctypescrypto.ec import create\nfrom base64 import b16decode\nfrom subprocess import Popen, PIPE\nimport unittest\n\ndef dump_key(key):\n \"\"\" Convert key into ... | [
{
"content": "Output the full code verbatim (no extra comments):\n<|memory_start|>```python\nfrom ctypescrypto import pyver\nfrom ctypescrypto.oid import Oid\nfrom ctypescrypto.ec import create\nfrom base64 import b16decode\nfrom subprocess import Popen, PIPE\nimport unittest\n\ndef dump_key(key):\n \"\"\" C... | ```python
from ctypescrypto import pyver
from ctypescrypto.oid import Oid
from ctypescrypto.ec import create
from base64 import b16decode
from subprocess import Popen, PIPE
import unittest
def dump_key(key):
""" Convert key into printable form using openssl utility
Used to compare keys which can be stored in different
format by different OpenSSL versions
"""
return Popen(["openssl","pkey","-text","-noout"],stdin=PIPE,stdout=PIPE).communicate(key)[0]
def dump_pub_key(key):
""" Convert key into printable form using openssl utility
Used to compare keys which can be stored in different
format by different OpenSSL versions
"""
return Popen(["openssl","pkey","-text_pub","-noout"],stdin=PIPE,stdout=PIPE).communicate(key)[0]
class TestEcCreation(unittest.TestCase):
ec1priv=b"""-----BEGIN PRIVATE KEY-----
MIGEAgEAMBAGByqGSM49AgEGBSuBBAAKBG0wawIBAQQgKnG6neqZvB98EEuuxnHs
fv+L/5abuNNG20wzUqRpncOhRANCAARWKXWeUZ6WiCKZ2kHx87jmJyx0G3ZB1iQC
+Gp2AJYswbQPhGPigKolzIbZYfwnn7QOca6N8QDhPAn3QQK8trZI
-----END PRIVATE KEY-----
"""
bigkey=b"""-----BEGIN PRIVATE KEY-----
MIGEAgEAMBAGByqGSM49AgEGBSuBBAAKBG0wawIBAQQgAAAAAAAAAAAAAAAAAAAA
AUVRIxlQt1/EQC2hcy/Jvr6hRANCAASRZsKJufkF5V+ePfn2nX81a0oiCV+JT0cV
cUqktWYGr/GB65Zr5Ky1z/nha2bYCb6U4hTwbJP9CRCZr5hJklXn
-----END PRIVATE KEY-----
"""
def test_keyone(self):
key=create(Oid("secp256k1"),b16decode("2A71BA9DEA99BC1F7C104BAEC671EC7EFF8BFF969BB8D346DB4C3352A4699DC3",True))
out=key.exportpriv()
if pyver > 2:
out=out.encode("ascii")
self.assertEqual(dump_key(out),dump_key(self.ec1priv))
if pyver == 2:
self.assertEqual(str(key),dump_pub_key(self.ec1priv))
else:
self.assertEqual(str(key).encode("ascii"),dump_pub_key(self.ec1priv))
def test_bignum(self):
keyval=b'\xff'*32
key=create(Oid("secp256k1"),keyval)
keyblob = key.exportpriv()
if pyver > 2:
keyblob = keyblob.encode("ascii")
self.assertEqual(dump_key(keyblob),dump_key(self.bigkey))
keyblob2 = str(key)
if pyver > 2:
keyblob2 = keyblob2.encode('ascii')
self.assertEqual(keyblob2,dump_pub_key(self.bigkey))
if __name__ == "__main__":
unittest.main()
``` |
[
{
"content": "Here is some code:\n```python\n# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code gen... | [
{
"content": "Here is some code:\n<|memory_start|>```python\n# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information... | ```python
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class DisksOperations(object):
"""DisksOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client Api Version. Constant value: "2018-04-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-04-01"
self.config = config
def _create_or_update_initial(
self, resource_group_name, disk_name, disk, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(disk, 'Disk')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Disk', response)
if response.status_code == 202:
deserialized = self._deserialize('Disk', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, disk_name, disk, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates or updates a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created.
The name can't be changed after the disk is created. Supported
characters for the name are a-z, A-Z, 0-9 and _. The maximum name
length is 80 characters.
:type disk_name: str
:param disk: Disk object supplied in the body of the Put disk
operation.
:type disk: ~azure.mgmt.compute.v2018_04_01.models.Disk
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns Disk or
ClientRawResponse<Disk> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2018_04_01.models.Disk]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2018_04_01.models.Disk]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
disk=disk,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('Disk', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'}
def _update_initial(
self, resource_group_name, disk_name, disk, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.update.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(disk, 'DiskUpdate')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Disk', response)
if response.status_code == 202:
deserialized = self._deserialize('Disk', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def update(
self, resource_group_name, disk_name, disk, custom_headers=None, raw=False, polling=True, **operation_config):
"""Updates (patches) a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created.
The name can't be changed after the disk is created. Supported
characters for the name are a-z, A-Z, 0-9 and _. The maximum name
length is 80 characters.
:type disk_name: str
:param disk: Disk object supplied in the body of the Patch disk
operation.
:type disk: ~azure.mgmt.compute.v2018_04_01.models.DiskUpdate
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns Disk or
ClientRawResponse<Disk> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2018_04_01.models.Disk]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2018_04_01.models.Disk]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._update_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
disk=disk,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('Disk', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'}
def get(
self, resource_group_name, disk_name, custom_headers=None, raw=False, **operation_config):
"""Gets information about a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created.
The name can't be changed after the disk is created. Supported
characters for the name are a-z, A-Z, 0-9 and _. The maximum name
length is 80 characters.
:type disk_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Disk or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.compute.v2018_04_01.models.Disk or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Disk', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'}
def _delete_initial(
self, resource_group_name, disk_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, disk_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created.
The name can't be changed after the disk is created. Supported
characters for the name are a-z, A-Z, 0-9 and _. The maximum name
length is 80 characters.
:type disk_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns OperationStatusResponse
or ClientRawResponse<OperationStatusResponse> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2018_04_01.models.OperationStatusResponse]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2018_04_01.models.OperationStatusResponse]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'}
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Lists all the disks under a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Disk
:rtype:
~azure.mgmt.compute.v2018_04_01.models.DiskPaged[~azure.mgmt.compute.v2018_04_01.models.Disk]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.DiskPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DiskPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks'}
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Lists all the disks under a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Disk
:rtype:
~azure.mgmt.compute.v2018_04_01.models.DiskPaged[~azure.mgmt.compute.v2018_04_01.models.Disk]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.DiskPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DiskPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/disks'}
def _grant_access_initial(
self, resource_group_name, disk_name, access, duration_in_seconds, custom_headers=None, raw=False, **operation_config):
grant_access_data = models.GrantAccessData(access=access, duration_in_seconds=duration_in_seconds)
# Construct URL
url = self.grant_access.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(grant_access_data, 'GrantAccessData')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AccessUri', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def grant_access(
self, resource_group_name, disk_name, access, duration_in_seconds, custom_headers=None, raw=False, polling=True, **operation_config):
"""Grants access to a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created.
The name can't be changed after the disk is created. Supported
characters for the name are a-z, A-Z, 0-9 and _. The maximum name
length is 80 characters.
:type disk_name: str
:param access: Possible values include: 'None', 'Read'
:type access: str or
~azure.mgmt.compute.v2018_04_01.models.AccessLevel
:param duration_in_seconds: Time duration in seconds until the SAS
access expires.
:type duration_in_seconds: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns AccessUri or
ClientRawResponse<AccessUri> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2018_04_01.models.AccessUri]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2018_04_01.models.AccessUri]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._grant_access_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
access=access,
duration_in_seconds=duration_in_seconds,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('AccessUri', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
grant_access.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess'}
def _revoke_access_initial(
self, resource_group_name, disk_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.revoke_access.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'diskName': self._serialize.url("disk_name", disk_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def revoke_access(
self, resource_group_name, disk_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Revokes access to a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created.
The name can't be changed after the disk is created. Supported
characters for the name are a-z, A-Z, 0-9 and _. The maximum name
length is 80 characters.
:type disk_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns OperationStatusResponse
or ClientRawResponse<OperationStatusResponse> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.compute.v2018_04_01.models.OperationStatusResponse]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.compute.v2018_04_01.models.OperationStatusResponse]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._revoke_access_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
revoke_access.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess'}
``` |
[
{
"content": "Here is a code file:\n```python\nimport tensorflow as tf\nimport matplotlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\nimport math\n\nnp.random.seed(1234)\nrandom.seed(1234)\n\nplt.switch_backend(\"TkAgg\")\n\ndef plotScatter(points, color):\n xs = [x[0] for x in point... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\nimport tensorflow as tf\nimport matplotlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\nimport math\n\nnp.random.seed(1234)\nrandom.seed(1234)\n\nplt.switch_backend(\"TkAgg\")\n\ndef plotScatter(points, color):\n xs = [x[0... | ```python
import tensorflow as tf
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import random
import math
np.random.seed(1234)
random.seed(1234)
plt.switch_backend("TkAgg")
def plotScatter(points, color):
xs = [x[0] for x in points]
ys = [y[1] for y in points]
plt.scatter(xs, ys, c=color)
def plot_weights(weights, center, color):
plot_centroid(center)
n = np.array([weights[0] * center[0] + weights[1] * center[1],
-weights[0],
-weights[1]])
byas = -1 * n[0]/n[2]
Xcoef = -1 * n[1]/n[2]
plt.plot([-1.0, 1.0], [-1*Xcoef + byas, Xcoef + byas], '{}-'.format(color))
print("B: " + str(byas))
print("XCoef: " + str(Xcoef))
def plot_centroid(centroid):
plt.plot(centroid[0], centroid[1], markersize=10, marker='x', color='g', mew=5)
def plot_incorrect(point):
plt.plot(point[0], point[1], markersize=5, marker='x', color='r', mew=5)
def generateChevronData():
xBounds = [-50, 50]
yBounds = [-50, 50]
totalPoints = 100
points = []
targets = []
for i in range(0, totalPoints):
x = random.randint(xBounds[0], xBounds[1])
y = random.randint(yBounds[0], yBounds[1])
if x >= y and x <= -y:
points.append([x/50.0,y/50.0])
targets.append(0.0)
else:
points.append([x/50.0,y/50.0])
targets.append(1.0)
return np.array(points), np.array(targets)
def generate_split_data():
xBounds = [-50, 50]
yBounds = [-50, 50]
totalPoints = 100
points = []
targets = []
for i in range(0, totalPoints):
x = random.randint(xBounds[0], xBounds[1])
y = random.randint(yBounds[0], yBounds[1])
if x < 25 and x > -25 :
points.append([x/50.0,y/50.0])
targets.append(0.0)
else:
points.append([x/50.0,y/50.0])
targets.append(1.0)
return np.array(points), np.array(targets)
def generate_clumps():
xBounds = [-50, 50]
yBounds = [-50, 50]
totalPoints = 100
points = []
targets = []
for i in range(0, int(totalPoints/2.0)):
x = random.randint(xBounds[0], 0)
y = random.randint(yBounds[0], 0)
if -x - 30 < y:
points.append([x/50.0,y/50.0])
targets.append(1.0)
else:
points.append([x/50.0,y/50.0])
targets.append(0.0)
for i in range(0, int(totalPoints/2.0)):
x = random.randint(0, xBounds[1])
y = random.randint(0, yBounds[1])
if -x + 30 > y:
points.append([x/50.0,y/50.0])
targets.append(1.0)
else:
points.append([x/50.0,y/50.0])
targets.append(0.0)
return np.array(points), np.array(targets)
def generate_rectangle_data():
xBounds = [-50, 50]
yBounds = [-50, 50]
totalPoints = 100
points = []
targets = []
for i in range(0, totalPoints):
x = random.randint(xBounds[0], xBounds[1])
y = random.randint(yBounds[0], yBounds[1])
if np.abs(x) < 15 and np.abs(y) < 15 :
points.append([x/50.0,y/50.0])
targets.append(0.0)
else:
points.append([x/50.0,y/50.0])
targets.append(1.0)
return np.array(points), np.array(targets)
def sigmoid(phi):
return 1.0/(1.0 + tf.exp(-phi))
points, out = generate_rectangle_data()#generateChevronData()#generate_clumps()#generate_split_data()#
in_size = 2
out_size = 1
num_centroids = 1
num_outputs = 1
inputs = tf.placeholder('float64', [in_size])
targets = tf.placeholder('float64', [out_size])
centroids = tf.Variable(np.random.uniform(low=-1.0, high=1.0, size=(num_centroids, in_size)))
betas = tf.Variable(np.repeat(1.0, num_centroids))
hidden_weights = tf.Variable(np.random.uniform(low=-0.5, high=0.5, size=(num_centroids, in_size)))
output_weights = tf.Variable(np.random.uniform(low=-0.5, high=0.5, size=(num_outputs, num_centroids + 1)))
input_by_plane = lambda x: tf.subtract(inputs, x)
transformed_by_points = tf.map_fn(input_by_plane, centroids)
# Peform Computation
prob = tf.reduce_sum(tf.multiply(transformed_by_points, hidden_weights), 1)
square_diff = lambda c: tf.reduce_sum(tf.pow(tf.subtract(inputs, c), 2.0))
g = tf.exp(-1.0 * tf.multiply(betas, tf.map_fn(square_diff, centroids)))
hidden_out = sigmoid(tf.multiply(g, prob))#tf.add(0.5 * (1 - g), tf.multiply(g, prob))
#gated = tf.multiply(g, prob)
#hidden_out = sigmoid(gated)
hidden_out_prime = tf.concat([[1.0], hidden_out], 0)
output = sigmoid(tf.matmul(tf.transpose(tf.expand_dims(hidden_out_prime, 1)), tf.transpose(output_weights)))
errors = tf.pow(tf.subtract(tf.expand_dims(targets, 1), output), 2.0)
error = tf.reduce_sum(errors)
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(error)
clip_op_betas = tf.assign(betas, tf.clip_by_value(betas, 0, np.infty))
model = tf.global_variables_initializer()
with tf.Session() as session:
session.run(model)
for e in range(10000):
for d in range(len(points)):
session.run(train_op, feed_dict={inputs: points[d], targets: [out[d]]})
session.run(clip_op_betas)
if e % 10 == 0:
err = 0
for d in range(len(points)):
err += session.run(error, feed_dict={inputs: points[d], targets: [out[d]]})
#print(session.run(prob, feed_dict={inputs: points[d], targets: [out[d]]}))
#print(session.run(g, feed_dict={inputs: points[d], targets: [out[d]]}))
print(err)
print(session.run(betas))
incorrect = []
for d in range(len(points)):
o = session.run(output, feed_dict={inputs: points[d], targets: [out[d]]})
if not int(round(o[0,0])) == out[d]:
incorrect.append(points[d])
centroids = session.run(centroids)
betas = session.run(betas)
boundarys = session.run(hidden_weights)
# Plot points on graph
c1 = []
c2 = []
for i in range(0, len(points)):
if out[i] == 0:
c1.append(points[i])
else:
c2.append(points[i])
print("Type 0: ", len(c1))
print("Type 1: ", len(c2))
plotScatter(c1,'y')
plotScatter(c2, 'b')
for centroid in centroids:
plot_centroid(centroid)
for i in range(len(boundarys)):
plot_weights(boundarys[i], centroids[i], 'g')
#for plane in boundarys:
# plot_weights(boundarys, 'g')
for point in incorrect:
plot_incorrect(point)
#plot_weights(final_gate, 'g')
plt.gca().set_aspect('equal')
plt.xlim(xmin=-1.5, xmax=1.5)
plt.ylim(ymin=-1.5, ymax=1.5)
plt.show()
``` |
[
{
"content": "Reconstruct the code exactly:\n```python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2015-10-05\n@author: Zhixiong Zhao\n\"\"\"\n\nimport __init__\nfrom HHFormat import *\nimport molecule.HHMolecule \nimport molecule.HHAtom\nimport molecule.HHResidue\nimport molecule.HHBond\nimport geometry.HHPoi... | [
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2015-10-05\n@author: Zhixiong Zhao\n\"\"\"\n\nimport __init__\nfrom HHFormat import *\nimport molecule.HHMolecule \nimport molecule.HHAtom\nimport molecule.HHResidue\nimport molecule.HHBond\nimpor... | ```python
# -*- coding: utf-8 -*-
"""
Created on 2015-10-05
@author: Zhixiong Zhao
"""
import __init__
from HHFormat import *
import molecule.HHMolecule
import molecule.HHAtom
import molecule.HHResidue
import molecule.HHBond
import geometry.HHPoint
Mol=molecule.HHMolecule.Molecule
Atom=molecule.HHAtom.Atom
Res=molecule.HHResidue.Residue
Bond=molecule.HHBond.Bond
Point=geometry.HHPoint.Point
class MOL2(FileFormator):
extension=['mol2'];
def CreateAtomLine(self, atom, lenatom=4, lenres=3):
output=atom.index.rjust(lenatom)+" "+atom.name.ljust(5)
output+=("%.4f" % atom.coordinates.x).rjust(11) + ("%.4f" % atom.coordinates.y).rjust(11)+ ("%.4f" % atom.coordinates.z).rjust(11)+ ' '
output+=atom.atype.ljust(6)+str(atom.resid).rjust(lenres)+ ' ' + atom.resname.ljust(6)+ atom.pcharge.rjust(9)+ os.linesep
return output
def CreateBondline(bond,lenbond=4):
output=bond.index.rjust(lenbond)+" "+bond.idx_bgn.rjust(lenbond)+" "+\
bond.idx_end.rjust(lenbond)+" "+bond.btype.lower().ljust(lenbond)+ os.linesep
return output
def WriteObj(self,obj):
if (isinstance(obj,Atom)):
self.write(CreateAtomLine(obj))
elif(isinstance(obj,Res) or isinstance(obj,Mol)):
for atom in obj.atoms:
self.write(CreateAtomLine(atom))
elif(isinstance(obj,Bond)):
self.write(CreateBondline(obj));
else:
self.write(str(obj));
def ReadAtomLine(self, Line):
items=Line.split()
atom=Atom()
atom.index = int(items[0])
atom.atomid = int(items[0])
atom.name = items[1]
atom.coordinates = Point(float(items[2]), float(items[3]), float(items[4]))
atom.atype=items[5]
#sybyl type
#atom.element_name=atom.atype[0:2].strip('.').strip()
atom.element_name=atom.DeduceElementFromName(atom.name);
if len(items)==9:
atom.resid = int(items[6])
atom.resname = items[7]
atom.charge = items[8]
return atom;
def ReadBondLine(self, Line):
items=Line.split()
bond=Bond()
bond.index = int(items[0])
bond.idx_bgn = int(items[1])
bond.idx_bgn = int(items[2])
bond.btype = items[3]
return bond;
def WriteMolFile(self,mol,filename):
self.open(filename,'w');
self.write("@<TRIPOS>MOLECULE\n")
self.write(mol.name+'\n')
self.write("%5d %5d %5d %5d %5d \n", mol.GetNumAtom(), mol.GetNumBond(), mol.GetNumFrag(), 0, 0);
self.write("@<TRIPOS>ATOM\n");
self.WriteObj(mol);
self.write("@<TRIPOS>BOND\n");
def ReadMolFile(self, filename):
self.open(filename,'r');
findmol=False;
findatom=False;
findbond=False;
nextmol=False;
mols=[]
mol=None
for line in self.handle:
if (line[:17] == "@<TRIPOS>MOLECULE"):
findmol=True;
findatom=False;
findbond=False;
if (nextmol):
mols.append(mol)
nextmol=False;
mol=Mol()
continue;
if (line[:13] == "@<TRIPOS>ATOM"):
findatom=True;
findmol=False;
nextmol=True;
continue;
if (line[:13] == "@<TRIPOS>BOND"):
findatom=False;
findbond=True;
continue;
if (findbond and line[:9]=="@<TRIPOS>"):
findbond=False;
continue;
if (findatom):
atom=self.ReadAtomLine(line);
atom.mol=mol;
mol.atoms.append();
if (findbond):
bond=self.ReadBondLine(line);
bond.mol=mol;
bond.SetAtomsFromIdx()
mol.bonds.append(bond);
mols.append(mol);
self.close();
if (len(mols)==1):return mols[0];
elif (len(mols)>1):return mols;
elif (len(mols)==0):return None;
if __name__=="__main__":
mr=MOL2()
a=mr.ReadMolFile("test.mol2");
print a
print a.atoms[0]
``` |
[
{
"content": "```python\n# -*- coding: utf-8 -*-\n# Copyright (C) 2009-2017, 2021 Rocky Bernstein <rocky@gnu.org>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either ver... | [
{
"content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\n# Copyright (C) 2009-2017, 2021 Rocky Bernstein <rocky@gnu.org>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Founda... | ```python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2017, 2021 Rocky Bernstein <rocky@gnu.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Subsidiary routines used to "pack" and "unpack" TCP messages. """
TCP_MAX_PACKET = 8192 # Largest size for a recv
LOG_MAX_MSG = 4 # int(log(TCP_MAX_PACKET)
def pack_msg(msg):
fmt = "%%0%dd" % LOG_MAX_MSG # A funny way of writing: '%04d'
byte_msg = bytes(msg, "UTF-8")
byte_fmt = bytes(fmt % len(byte_msg), "UTF-8")
return byte_fmt + byte_msg
def unpack_msg(buf):
if len(buf) == 0:
# Fake a quit
return "", bytes("q".encode("utf-8"))
length = int(buf[0:LOG_MAX_MSG])
data = buf[LOG_MAX_MSG : LOG_MAX_MSG + length]
buf = buf[LOG_MAX_MSG + length :]
return buf, data
# Demo
if __name__ == "__main__":
print(unpack_msg(pack_msg("Hello, there!"))[1])
# assert unpack_msg(pack_msg(msg))[1] == msg
pass
``` |
[
{
"content": "```python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\n\nfrom kombu.async.http.curl import READ, WRITE, CurlClient\n\nfrom kombu.tests.case import (\n HubCase, Mock, call, patch, case_requires, set_module_symbol,\n)\n\n\n@case_requires('pycurl')\nclass test_CurlClient(HubCa... | [
{
"content": "<|memory_start|>```python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\n\nfrom kombu.async.http.curl import READ, WRITE, CurlClient\n\nfrom kombu.tests.case import (\n HubCase, Mock, call, patch, case_requires, set_module_symbol,\n)\n\n\n@case_requires('pycurl')\nclass test_... | ```python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from kombu.async.http.curl import READ, WRITE, CurlClient
from kombu.tests.case import (
HubCase, Mock, call, patch, case_requires, set_module_symbol,
)
@case_requires('pycurl')
class test_CurlClient(HubCase):
class Client(CurlClient):
Curl = Mock(name='Curl')
def test_when_pycurl_missing(self):
with set_module_symbol('kombu.async.http.curl', 'pycurl', None):
with self.assertRaises(ImportError):
self.Client()
def test_max_clients_set(self):
x = self.Client(max_clients=303)
self.assertEqual(x.max_clients, 303)
def test_init(self):
with patch('kombu.async.http.curl.pycurl') as _pycurl:
x = self.Client()
self.assertIsNotNone(x._multi)
self.assertIsNotNone(x._pending)
self.assertIsNotNone(x._free_list)
self.assertIsNotNone(x._fds)
self.assertEqual(
x._socket_action, x._multi.socket_action,
)
self.assertEqual(len(x._curls), x.max_clients)
self.assertTrue(x._timeout_check_tref)
x._multi.setopt.assert_has_calls([
call(_pycurl.M_TIMERFUNCTION, x._set_timeout),
call(_pycurl.M_SOCKETFUNCTION, x._handle_socket),
])
def test_close(self):
with patch('kombu.async.http.curl.pycurl'):
x = self.Client()
x._timeout_check_tref = Mock(name='timeout_check_tref')
x.close()
x._timeout_check_tref.cancel.assert_called_with()
for _curl in x._curls:
_curl.close.assert_called_with()
x._multi.close.assert_called_with()
def test_add_request(self):
with patch('kombu.async.http.curl.pycurl'):
x = self.Client()
x._process_queue = Mock(name='_process_queue')
x._set_timeout = Mock(name='_set_timeout')
request = Mock(name='request')
x.add_request(request)
self.assertIn(request, x._pending)
x._process_queue.assert_called_with()
x._set_timeout.assert_called_with(0)
def test_handle_socket(self):
with patch('kombu.async.http.curl.pycurl') as _pycurl:
hub = Mock(name='hub')
x = self.Client(hub)
fd = Mock(name='fd1')
# POLL_REMOVE
x._fds[fd] = fd
x._handle_socket(_pycurl.POLL_REMOVE, fd, x._multi, None, _pycurl)
hub.remove.assert_called_with(fd)
self.assertNotIn(fd, x._fds)
x._handle_socket(_pycurl.POLL_REMOVE, fd, x._multi, None, _pycurl)
# POLL_IN
hub = x.hub = Mock(name='hub')
fds = [fd, Mock(name='fd2'), Mock(name='fd3')]
x._fds = {f: f for f in fds}
x._handle_socket(_pycurl.POLL_IN, fd, x._multi, None, _pycurl)
hub.remove.assert_has_calls([call(fd)])
hub.add_reader.assert_called_with(fd, x.on_readable, fd)
self.assertEqual(x._fds[fd], READ)
# POLL_OUT
hub = x.hub = Mock(name='hub')
x._handle_socket(_pycurl.POLL_OUT, fd, x._multi, None, _pycurl)
hub.add_writer.assert_called_with(fd, x.on_writable, fd)
self.assertEqual(x._fds[fd], WRITE)
# POLL_INOUT
hub = x.hub = Mock(name='hub')
x._handle_socket(_pycurl.POLL_INOUT, fd, x._multi, None, _pycurl)
hub.add_reader.assert_called_with(fd, x.on_readable, fd)
hub.add_writer.assert_called_with(fd, x.on_writable, fd)
self.assertEqual(x._fds[fd], READ | WRITE)
# UNKNOWN EVENT
hub = x.hub = Mock(name='hub')
x._handle_socket(0xff3f, fd, x._multi, None, _pycurl)
# FD NOT IN FDS
hub = x.hub = Mock(name='hub')
x._fds.clear()
x._handle_socket(0xff3f, fd, x._multi, None, _pycurl)
self.assertFalse(hub.remove.called)
def test_set_timeout(self):
x = self.Client()
x._set_timeout(100)
def test_timeout_check(self):
with patch('kombu.async.http.curl.pycurl') as _pycurl:
x = self.Client()
x._process_pending_requests = Mock(name='process_pending')
x._multi.socket_all.return_value = 333, 1
_pycurl.error = KeyError
x._timeout_check(_pycurl=_pycurl)
x._multi.socket_all.return_value = None
x._multi.socket_all.side_effect = _pycurl.error(333)
x._timeout_check(_pycurl=_pycurl)
def test_on_readable_on_writeable(self):
with patch('kombu.async.http.curl.pycurl') as _pycurl:
x = self.Client()
x._on_event = Mock(name='on_event')
fd = Mock(name='fd')
x.on_readable(fd, _pycurl=_pycurl)
x._on_event.assert_called_with(fd, _pycurl.CSELECT_IN)
x.on_writable(fd, _pycurl=_pycurl)
x._on_event.assert_called_with(fd, _pycurl.CSELECT_OUT)
``` |
[
{
"content": "Reconstruct the code exactly:\n```python\n#!/usr/bin/env python\n##\n# Copyright (c) 2012-2014 Apple Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the... | [
{
"content": "Reconstruct the code exactly:\n<|memory_start|>```python\n#!/usr/bin/env python\n##\n# Copyright (c) 2012-2014 Apple Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obta... | ```python
#!/usr/bin/env python
##
# Copyright (c) 2012-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from __future__ import print_function
from calendarserver.tools.cmdline import utilityMain, WorkerService
from argparse import ArgumentParser
from twext.python.log import Logger
from twisted.internet.defer import inlineCallbacks
from twext.who.idirectory import RecordType
import time
log = Logger()
class DisplayAPNSubscriptions(WorkerService):
users = []
def doWork(self):
rootResource = self.rootResource()
directory = rootResource.getDirectory()
return displayAPNSubscriptions(self.store, directory, rootResource,
self.users)
def main():
parser = ArgumentParser(description='Display Apple Push Notification subscriptions')
parser.add_argument('-f', '--config', dest='configFileName', metavar='CONFIGFILE', help='caldavd.plist configuration file path')
parser.add_argument('-d', '--debug', action='store_true', help='show debug logging')
parser.add_argument('user', help='one or more users to display', nargs='+') # Required
args = parser.parse_args()
DisplayAPNSubscriptions.users = args.user
utilityMain(
args.configFileName,
DisplayAPNSubscriptions,
verbose=args.debug,
)
@inlineCallbacks
def displayAPNSubscriptions(store, directory, root, users):
for user in users:
print
record = yield directory.recordWithShortName(RecordType.user, user)
if record is not None:
print("User %s (%s)..." % (user, record.uid))
txn = store.newTransaction(label="Display APN Subscriptions")
subscriptions = (yield txn.apnSubscriptionsBySubscriber(record.uid))
(yield txn.commit())
if subscriptions:
byKey = {}
for token, key, timestamp, userAgent, ipAddr in subscriptions:
byKey.setdefault(key, []).append((token, timestamp, userAgent, ipAddr))
for key, tokens in byKey.iteritems():
print
protocol, _ignore_host, path = key.strip("/").split("/", 2)
resource = {
"CalDAV": "calendar",
"CardDAV": "addressbook",
}[protocol]
if "/" in path:
uid, collection = path.split("/")
else:
uid = path
collection = None
record = yield directory.recordWithUID(uid)
user = record.shortNames[0]
if collection:
print("...is subscribed to a share from %s's %s home" % (user, resource),)
else:
print("...is subscribed to %s's %s home" % (user, resource),)
# print(" (key: %s)\n" % (key,))
print("with %d device(s):" % (len(tokens),))
for token, timestamp, userAgent, ipAddr in tokens:
print(" %s\n '%s' from %s\n %s" % (
token, userAgent, ipAddr,
time.strftime(
"on %a, %d %b %Y at %H:%M:%S %z(%Z)",
time.localtime(timestamp)
)
))
else:
print(" ...is not subscribed to anything.")
else:
print("User %s not found" % (user,))
``` |
[
{
"content": "Here is a code file:\n```python\nimport sys\nimport types\n\nfrom rpython.flowspace.model import Constant\nfrom rpython.flowspace.operation import op\nfrom rpython.annotator import description, model as annmodel\nfrom rpython.rlib.objectmodel import UnboxedValue\nfrom rpython.tool.pairtype import ... | [
{
"content": "Here is a code file:\n<|memory_start|>```python\nimport sys\nimport types\n\nfrom rpython.flowspace.model import Constant\nfrom rpython.flowspace.operation import op\nfrom rpython.annotator import description, model as annmodel\nfrom rpython.rlib.objectmodel import UnboxedValue\nfrom rpython.tool.... | ```python
import sys
import types
from rpython.flowspace.model import Constant
from rpython.flowspace.operation import op
from rpython.annotator import description, model as annmodel
from rpython.rlib.objectmodel import UnboxedValue
from rpython.tool.pairtype import pairtype, pair
from rpython.tool.identity_dict import identity_dict
from rpython.tool.flattenrec import FlattenRecursion
from rpython.rtyper.extregistry import ExtRegistryEntry
from rpython.rtyper.error import TyperError
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.lltypesystem.lltype import (
Ptr, Struct, GcStruct, malloc, cast_pointer, castable, nullptr,
RuntimeTypeInfo, getRuntimeTypeInfo, typeOf, Void, FuncType, Bool, Signed,
functionptr)
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.rtyper.lltypesystem import rstr
from rpython.rtyper.rmodel import (
Repr, getgcflavor, inputconst, warning, mangle)
class FieldListAccessor(object):
def initialize(self, TYPE, fields):
assert type(fields) is dict
self.TYPE = TYPE
self.fields = fields
for x in fields.itervalues():
assert isinstance(x, ImmutableRanking)
def all_immutable_fields(self):
result = set()
for key, value in self.fields.iteritems():
if value in (IR_IMMUTABLE, IR_IMMUTABLE_ARRAY):
result.add(key)
return result
def __repr__(self):
return '<FieldListAccessor for %s>' % getattr(self, 'TYPE', '?')
class ImmutableRanking(object):
def __init__(self, name, is_immutable):
self.name = name
self.is_immutable = is_immutable
def __nonzero__(self):
return self.is_immutable
def __repr__(self):
return '<%s>' % self.name
IR_MUTABLE = ImmutableRanking('mutable', False)
IR_IMMUTABLE = ImmutableRanking('immutable', True)
IR_IMMUTABLE_ARRAY = ImmutableRanking('immutable_array', True)
IR_QUASIIMMUTABLE = ImmutableRanking('quasiimmutable', False)
IR_QUASIIMMUTABLE_ARRAY = ImmutableRanking('quasiimmutable_array', False)
class ImmutableConflictError(Exception):
"""Raised when the _immutable_ or _immutable_fields_ hints are
not consistent across a class hierarchy."""
def getclassrepr(rtyper, classdef):
if classdef is None:
return rtyper.rootclass_repr
result = classdef.repr
if result is None:
result = classdef.repr = ClassRepr(rtyper, classdef)
rtyper.add_pendingsetup(result)
return result
def getinstancerepr(rtyper, classdef, default_flavor='gc'):
if classdef is None:
flavor = default_flavor
else:
flavor = getgcflavor(classdef)
try:
result = rtyper.instance_reprs[classdef, flavor]
except KeyError:
result = buildinstancerepr(rtyper, classdef, gcflavor=flavor)
rtyper.instance_reprs[classdef, flavor] = result
rtyper.add_pendingsetup(result)
return result
def buildinstancerepr(rtyper, classdef, gcflavor='gc'):
from rpython.rtyper.rvirtualizable import VirtualizableInstanceRepr
if classdef is None:
unboxed = []
virtualizable = False
else:
unboxed = [subdef for subdef in classdef.getallsubdefs() if
subdef.classdesc.pyobj is not None and
issubclass(subdef.classdesc.pyobj, UnboxedValue)]
virtualizable = classdef.classdesc.read_attribute(
'_virtualizable_', Constant(False)).value
config = rtyper.annotator.translator.config
usetagging = len(unboxed) != 0 and config.translation.taggedpointers
if virtualizable:
assert len(unboxed) == 0
assert gcflavor == 'gc'
return VirtualizableInstanceRepr(rtyper, classdef)
elif usetagging:
# the UnboxedValue class and its parent classes need a
# special repr for their instances
if len(unboxed) != 1:
raise TyperError("%r has several UnboxedValue subclasses" % (
classdef,))
assert gcflavor == 'gc'
from rpython.rtyper.lltypesystem import rtagged
return rtagged.TaggedInstanceRepr(rtyper, classdef, unboxed[0])
else:
return InstanceRepr(rtyper, classdef, gcflavor)
class MissingRTypeAttribute(TyperError):
pass
# ____________________________________________________________
#
# There is one "vtable" per user class, with the following structure:
# A root class "object" has:
#
# struct object_vtable {
# // struct object_vtable* parenttypeptr; not used any more
# RuntimeTypeInfo * rtti;
# Signed subclassrange_min; //this is also the id of the class itself
# Signed subclassrange_max;
# RPyString * name;
# struct object * instantiate();
# }
#
# Every other class X, with parent Y, has the structure:
#
# struct vtable_X {
# struct vtable_Y super; // inlined
# ... // extra class attributes
# }
# The type of the instances is:
#
# struct object { // for the root class
# struct object_vtable* typeptr;
# }
#
# struct X {
# struct Y super; // inlined
# ... // extra instance attributes
# }
#
# there's also a nongcobject
OBJECT_VTABLE = lltype.ForwardReference()
CLASSTYPE = Ptr(OBJECT_VTABLE)
OBJECT = GcStruct('object', ('typeptr', CLASSTYPE),
hints={'immutable': True, 'shouldntbenull': True,
'typeptr': True},
rtti=True)
OBJECTPTR = Ptr(OBJECT)
OBJECT_VTABLE.become(Struct('object_vtable',
#('parenttypeptr', CLASSTYPE),
('subclassrange_min', Signed),
('subclassrange_max', Signed),
('rtti', Ptr(RuntimeTypeInfo)),
('name', Ptr(rstr.STR)),
('hash', Signed),
('instantiate', Ptr(FuncType([], OBJECTPTR))),
hints={'immutable': True}))
# non-gc case
NONGCOBJECT = Struct('nongcobject', ('typeptr', CLASSTYPE))
NONGCOBJECTPTR = Ptr(NONGCOBJECT)
OBJECT_BY_FLAVOR = {'gc': OBJECT, 'raw': NONGCOBJECT}
LLFLAVOR = {'gc': 'gc', 'raw': 'raw', 'stack': 'raw'}
def cast_vtable_to_typeptr(vtable):
while typeOf(vtable).TO != OBJECT_VTABLE:
vtable = vtable.super
return vtable
def alloc_array_name(name):
return rstr.string_repr.convert_const(name)
class ClassRepr(Repr):
def __init__(self, rtyper, classdef):
self.rtyper = rtyper
self.classdef = classdef
self.vtable_type = lltype.ForwardReference()
self.lowleveltype = Ptr(self.vtable_type)
def __repr__(self):
if self.classdef is None:
clsname = 'object'
else:
clsname = self.classdef.name
return '<ClassRepr for %s>' % (clsname,)
def compact_repr(self):
if self.classdef is None:
clsname = 'object'
else:
clsname = self.classdef.name
return 'ClassR %s' % (clsname,)
def convert_desc(self, desc):
subclassdef = desc.getuniqueclassdef()
if self.classdef is not None:
if self.classdef.commonbase(subclassdef) != self.classdef:
raise TyperError("not a subclass of %r: %r" % (
self.classdef.name, desc))
r_subclass = getclassrepr(self.rtyper, subclassdef)
return r_subclass.getruntime(self.lowleveltype)
def convert_const(self, value):
if not isinstance(value, (type, types.ClassType)):
raise TyperError("not a class: %r" % (value,))
bk = self.rtyper.annotator.bookkeeper
return self.convert_desc(bk.getdesc(value))
def prepare_method(self, s_value):
# special-casing for methods:
# if s_value is SomePBC([MethodDescs...])
# return a PBC representing the underlying functions
if (isinstance(s_value, annmodel.SomePBC) and
s_value.getKind() == description.MethodDesc):
s_value = self.classdef.lookup_filter(s_value)
funcdescs = [mdesc.funcdesc for mdesc in s_value.descriptions]
return annmodel.SomePBC(funcdescs)
return None # not a method
def get_ll_eq_function(self):
return None
def _setup_repr(self):
# NOTE: don't store mutable objects like the dicts below on 'self'
# before they are fully built, to avoid strange bugs in case
# of recursion where other code would uses these
# partially-initialized dicts.
clsfields = {}
pbcfields = {}
allmethods = {}
# class attributes
llfields = []
for name, attrdef in self.classdef.attrs.items():
if attrdef.readonly:
s_value = attrdef.s_value
s_unboundmethod = self.prepare_method(s_value)
if s_unboundmethod is not None:
allmethods[name] = True
s_value = s_unboundmethod
r = self.rtyper.getrepr(s_value)
mangled_name = 'cls_' + name
clsfields[name] = mangled_name, r
llfields.append((mangled_name, r.lowleveltype))
# attributes showing up in getattrs done on the class as a PBC
extra_access_sets = self.classdef.extra_access_sets
for access_set, (attr, counter) in extra_access_sets.items():
r = self.rtyper.getrepr(access_set.s_value)
mangled_name = mangle('pbc%d' % counter, attr)
pbcfields[access_set, attr] = mangled_name, r
llfields.append((mangled_name, r.lowleveltype))
llfields.sort()
llfields.sort(key=attr_reverse_size)
#
self.rbase = getclassrepr(self.rtyper, self.classdef.basedef)
self.rbase.setup()
kwds = {'hints': {'immutable': True}}
vtable_type = Struct('%s_vtable' % self.classdef.name,
('super', self.rbase.vtable_type),
*llfields, **kwds)
self.vtable_type.become(vtable_type)
allmethods.update(self.rbase.allmethods)
self.clsfields = clsfields
self.pbcfields = pbcfields
self.allmethods = allmethods
self.vtable = None
def getvtable(self):
"""Return a ptr to the vtable of this type."""
if self.vtable is None:
self.init_vtable()
return cast_vtable_to_typeptr(self.vtable)
def getruntime(self, expected_type):
assert expected_type == CLASSTYPE
return self.getvtable()
def init_vtable(self):
"""Create the actual vtable"""
self.vtable = malloc(self.vtable_type, immortal=True)
vtable_part = self.vtable
r_parentcls = self
while r_parentcls.classdef is not None:
self.setup_vtable(vtable_part, r_parentcls)
vtable_part = vtable_part.super
r_parentcls = r_parentcls.rbase
self.fill_vtable_root(vtable_part)
def setup_vtable(self, vtable, r_parentcls):
"""Initialize the vtable portion corresponding to 'r_parentcls'."""
# setup class attributes: for each attribute name at the level
# of 'r_parentcls', look up its value in the class
def assign(mangled_name, value):
if (isinstance(value, Constant) and
isinstance(value.value, staticmethod)):
value = Constant(value.value.__get__(42)) # staticmethod => bare function
llvalue = r.convert_desc_or_const(value)
setattr(vtable, mangled_name, llvalue)
for fldname in r_parentcls.clsfields:
mangled_name, r = r_parentcls.clsfields[fldname]
if r.lowleveltype is Void:
continue
value = self.classdef.classdesc.read_attribute(fldname, None)
if value is not None:
assign(mangled_name, value)
# extra PBC attributes
for (access_set, attr), (mangled_name, r) in r_parentcls.pbcfields.items():
if self.classdef.classdesc not in access_set.descs:
continue # only for the classes in the same pbc access set
if r.lowleveltype is Void:
continue
attrvalue = self.classdef.classdesc.read_attribute(attr, None)
if attrvalue is not None:
assign(mangled_name, attrvalue)
def fill_vtable_root(self, vtable):
"""Initialize the head of the vtable."""
vtable.hash = hash(self)
# initialize the 'subclassrange_*' and 'name' fields
if self.classdef is not None:
#vtable.parenttypeptr = self.rbase.getvtable()
vtable.subclassrange_min = self.classdef.minid
vtable.subclassrange_max = self.classdef.maxid
else: # for the root class
vtable.subclassrange_min = 0
vtable.subclassrange_max = sys.maxint
rinstance = getinstancerepr(self.rtyper, self.classdef)
rinstance.setup()
if rinstance.gcflavor == 'gc':
vtable.rtti = getRuntimeTypeInfo(rinstance.object_type)
if self.classdef is None:
name = 'object'
else:
name = self.classdef.shortname
vtable.name = alloc_array_name(name)
if hasattr(self.classdef, 'my_instantiate_graph'):
graph = self.classdef.my_instantiate_graph
vtable.instantiate = self.rtyper.getcallable(graph)
#else: the classdef was created recently, so no instantiate()
# could reach it
def fromtypeptr(self, vcls, llops):
"""Return the type pointer cast to self's vtable type."""
self.setup()
castable(self.lowleveltype, vcls.concretetype) # sanity check
return llops.genop('cast_pointer', [vcls],
resulttype=self.lowleveltype)
fromclasstype = fromtypeptr
def getclsfield(self, vcls, attr, llops):
"""Read the given attribute of 'vcls'."""
if attr in self.clsfields:
mangled_name, r = self.clsfields[attr]
v_vtable = self.fromtypeptr(vcls, llops)
cname = inputconst(Void, mangled_name)
return llops.genop('getfield', [v_vtable, cname], resulttype=r)
else:
if self.classdef is None:
raise MissingRTypeAttribute(attr)
return self.rbase.getclsfield(vcls, attr, llops)
def setclsfield(self, vcls, attr, vvalue, llops):
"""Write the given attribute of 'vcls'."""
if attr in self.clsfields:
mangled_name, r = self.clsfields[attr]
v_vtable = self.fromtypeptr(vcls, llops)
cname = inputconst(Void, mangled_name)
llops.genop('setfield', [v_vtable, cname, vvalue])
else:
if self.classdef is None:
raise MissingRTypeAttribute(attr)
self.rbase.setclsfield(vcls, attr, vvalue, llops)
def getpbcfield(self, vcls, access_set, attr, llops):
if (access_set, attr) not in self.pbcfields:
raise TyperError("internal error: missing PBC field")
mangled_name, r = self.pbcfields[access_set, attr]
v_vtable = self.fromtypeptr(vcls, llops)
cname = inputconst(Void, mangled_name)
return llops.genop('getfield', [v_vtable, cname], resulttype=r)
def rtype_issubtype(self, hop):
class_repr = get_type_repr(self.rtyper)
v_cls1, v_cls2 = hop.inputargs(class_repr, class_repr)
if isinstance(v_cls2, Constant):
cls2 = v_cls2.value
minid = hop.inputconst(Signed, cls2.subclassrange_min)
maxid = hop.inputconst(Signed, cls2.subclassrange_max)
return hop.gendirectcall(ll_issubclass_const, v_cls1, minid,
maxid)
else:
v_cls1, v_cls2 = hop.inputargs(class_repr, class_repr)
return hop.gendirectcall(ll_issubclass, v_cls1, v_cls2)
class RootClassRepr(ClassRepr):
"""ClassRepr for the root of the class hierarchy"""
classdef = None
def __init__(self, rtyper):
self.rtyper = rtyper
self.vtable_type = OBJECT_VTABLE
self.lowleveltype = Ptr(self.vtable_type)
def _setup_repr(self):
self.clsfields = {}
self.pbcfields = {}
self.allmethods = {}
self.vtable = None
def init_vtable(self):
self.vtable = malloc(self.vtable_type, immortal=True)
self.fill_vtable_root(self.vtable)
def get_type_repr(rtyper):
return rtyper.rootclass_repr
# ____________________________________________________________
class __extend__(annmodel.SomeInstance):
def rtyper_makerepr(self, rtyper):
return getinstancerepr(rtyper, self.classdef)
def rtyper_makekey(self):
return self.__class__, self.classdef
class __extend__(annmodel.SomeType):
def rtyper_makerepr(self, rtyper):
return get_type_repr(rtyper)
def rtyper_makekey(self):
return self.__class__,
class InstanceRepr(Repr):
def __init__(self, rtyper, classdef, gcflavor='gc'):
self.rtyper = rtyper
self.classdef = classdef
if classdef is None:
self.object_type = OBJECT_BY_FLAVOR[LLFLAVOR[gcflavor]]
else:
ForwardRef = lltype.FORWARDREF_BY_FLAVOR[LLFLAVOR[gcflavor]]
self.object_type = ForwardRef()
self.iprebuiltinstances = identity_dict()
self.lowleveltype = Ptr(self.object_type)
self.gcflavor = gcflavor
def _setup_repr(self, llfields=None, hints=None, adtmeths=None):
# NOTE: don't store mutable objects like the dicts below on 'self'
# before they are fully built, to avoid strange bugs in case
# of recursion where other code would uses these
# partially-initialized dicts.
if self.classdef is None:
self.immutable_field_set = set()
self.rclass = getclassrepr(self.rtyper, self.classdef)
fields = {}
allinstancefields = {}
if self.classdef is None:
fields['__class__'] = 'typeptr', get_type_repr(self.rtyper)
else:
# instance attributes
attrs = self.classdef.attrs.items()
attrs.sort()
myllfields = []
for name, attrdef in attrs:
if not attrdef.readonly:
r = self.rtyper.getrepr(attrdef.s_value)
mangled_name = 'inst_' + name
fields[name] = mangled_name, r
myllfields.append((mangled_name, r.lowleveltype))
myllfields.sort(key=attr_reverse_size)
if llfields is None:
llfields = myllfields
else:
llfields = llfields + myllfields
self.rbase = getinstancerepr(self.rtyper, self.classdef.basedef,
self.gcflavor)
self.rbase.setup()
MkStruct = lltype.STRUCT_BY_FLAVOR[LLFLAVOR[self.gcflavor]]
if adtmeths is None:
adtmeths = {}
if hints is None:
hints = {}
hints = self._check_for_immutable_hints(hints)
kwds = {}
if self.gcflavor == 'gc':
kwds['rtti'] = True
for name, attrdef in attrs:
if not attrdef.readonly and self.is_quasi_immutable(name):
llfields.append(('mutate_' + name, OBJECTPTR))
object_type = MkStruct(self.classdef.name,
('super', self.rbase.object_type),
hints=hints,
adtmeths=adtmeths,
*llfields,
**kwds)
self.object_type.become(object_type)
allinstancefields.update(self.rbase.allinstancefields)
allinstancefields.update(fields)
self.fields = fields
self.allinstancefields = allinstancefields
def _check_for_immutable_hints(self, hints):
loc = self.classdef.classdesc.lookup('_immutable_')
if loc is not None:
if loc is not self.classdef.classdesc:
raise ImmutableConflictError(
"class %r inherits from its parent _immutable_=True, "
"so it should also declare _immutable_=True" % (
self.classdef,))
if loc.classdict.get('_immutable_').value is not True:
raise TyperError(
"class %r: _immutable_ = something else than True" % (
self.classdef,))
hints = hints.copy()
hints['immutable'] = True
self.immutable_field_set = set() # unless overwritten below
if self.classdef.classdesc.lookup('_immutable_fields_') is not None:
hints = hints.copy()
immutable_fields = self.classdef.classdesc.classdict.get(
'_immutable_fields_')
if immutable_fields is not None:
self.immutable_field_set = set(immutable_fields.value)
accessor = FieldListAccessor()
hints['immutable_fields'] = accessor
return hints
def __repr__(self):
if self.classdef is None:
clsname = 'object'
else:
clsname = self.classdef.name
return '<InstanceRepr for %s>' % (clsname,)
def compact_repr(self):
if self.classdef is None:
clsname = 'object'
else:
clsname = self.classdef.name
return 'InstanceR %s' % (clsname,)
def _setup_repr_final(self):
self._setup_immutable_field_list()
self._check_for_immutable_conflicts()
if self.gcflavor == 'gc':
if (self.classdef is not None and
self.classdef.classdesc.lookup('__del__') is not None):
s_func = self.classdef.classdesc.s_read_attribute('__del__')
source_desc = self.classdef.classdesc.lookup('__del__')
source_classdef = source_desc.getclassdef(None)
source_repr = getinstancerepr(self.rtyper, source_classdef)
assert len(s_func.descriptions) == 1
funcdesc, = s_func.descriptions
graph = funcdesc.getuniquegraph()
self.check_graph_of_del_does_not_call_too_much(graph)
FUNCTYPE = FuncType([Ptr(source_repr.object_type)], Void)
destrptr = functionptr(FUNCTYPE, graph.name,
graph=graph,
_callable=graph.func)
else:
destrptr = None
OBJECT = OBJECT_BY_FLAVOR[LLFLAVOR[self.gcflavor]]
self.rtyper.attachRuntimeTypeInfoFunc(self.object_type,
ll_runtime_type_info,
OBJECT, destrptr)
vtable = self.rclass.getvtable()
self.rtyper.set_type_for_typeptr(vtable, self.lowleveltype.TO)
def _setup_immutable_field_list(self):
hints = self.object_type._hints
if "immutable_fields" in hints:
accessor = hints["immutable_fields"]
if not hasattr(accessor, 'fields'):
immutable_fields = set()
rbase = self
while rbase.classdef is not None:
immutable_fields.update(rbase.immutable_field_set)
rbase = rbase.rbase
self._parse_field_list(immutable_fields, accessor, hints)
def _parse_field_list(self, fields, accessor, hints):
ranking = {}
for name in fields:
quasi = False
if name.endswith('?[*]'): # a quasi-immutable field pointing to
name = name[:-4] # an immutable array
rank = IR_QUASIIMMUTABLE_ARRAY
quasi = True
elif name.endswith('[*]'): # for virtualizables' lists
name = name[:-3]
rank = IR_IMMUTABLE_ARRAY
elif name.endswith('?'): # a quasi-immutable field
name = name[:-1]
rank = IR_QUASIIMMUTABLE
quasi = True
else: # a regular immutable/green field
rank = IR_IMMUTABLE
try:
mangled_name, r = self._get_field(name)
except KeyError:
continue
if quasi and hints.get("immutable"):
raise TyperError(
"can't have _immutable_ = True and a quasi-immutable field "
"%s in class %s" % (name, self.classdef))
ranking[mangled_name] = rank
accessor.initialize(self.object_type, ranking)
return ranking
def _check_for_immutable_conflicts(self):
# check for conflicts, i.e. a field that is defined normally as
# mutable in some parent class but that is now declared immutable
is_self_immutable = "immutable" in self.object_type._hints
base = self
while base.classdef is not None:
base = base.rbase
for fieldname in base.fields:
try:
mangled, r = base._get_field(fieldname)
except KeyError:
continue
if r.lowleveltype == Void:
continue
base._setup_immutable_field_list()
if base.object_type._immutable_field(mangled):
continue
# 'fieldname' is a mutable, non-Void field in the parent
if is_self_immutable:
raise ImmutableConflictError(
"class %r has _immutable_=True, but parent class %r "
"defines (at least) the mutable field %r" %
(self, base, fieldname))
if (fieldname in self.immutable_field_set or
(fieldname + '?') in self.immutable_field_set):
raise ImmutableConflictError(
"field %r is defined mutable in class %r, but "
"listed in _immutable_fields_ in subclass %r" %
(fieldname, base, self))
def hook_access_field(self, vinst, cname, llops, flags):
pass # for virtualizables; see rvirtualizable.py
def hook_setfield(self, vinst, fieldname, llops):
if self.is_quasi_immutable(fieldname):
c_fieldname = inputconst(Void, 'mutate_' + fieldname)
llops.genop('jit_force_quasi_immutable', [vinst, c_fieldname])
def is_quasi_immutable(self, fieldname):
search1 = fieldname + '?'
search2 = fieldname + '?[*]'
rbase = self
while rbase.classdef is not None:
if (search1 in rbase.immutable_field_set or
search2 in rbase.immutable_field_set):
return True
rbase = rbase.rbase
return False
def new_instance(self, llops, classcallhop=None, nonmovable=False):
"""Build a new instance, without calling __init__."""
flavor = self.gcflavor
flags = {'flavor': flavor}
if nonmovable:
flags['nonmovable'] = True
ctype = inputconst(Void, self.object_type)
cflags = inputconst(Void, flags)
vlist = [ctype, cflags]
vptr = llops.genop('malloc', vlist,
resulttype=Ptr(self.object_type))
ctypeptr = inputconst(CLASSTYPE, self.rclass.getvtable())
self.setfield(vptr, '__class__', ctypeptr, llops)
# initialize instance attributes from their defaults from the class
if self.classdef is not None:
flds = self.allinstancefields.keys()
flds.sort()
for fldname in flds:
if fldname == '__class__':
continue
mangled_name, r = self.allinstancefields[fldname]
if r.lowleveltype is Void:
continue
value = self.classdef.classdesc.read_attribute(fldname, None)
if value is not None:
ll_value = r.convert_desc_or_const(value)
# don't write NULL GC pointers: we know that the malloc
# done above initialized at least the GC Ptr fields to
# NULL already, and that's true for all our GCs
if (isinstance(r.lowleveltype, Ptr) and
r.lowleveltype.TO._gckind == 'gc' and
not ll_value):
continue
cvalue = inputconst(r.lowleveltype, ll_value)
self.setfield(vptr, fldname, cvalue, llops,
flags={'access_directly': True})
return vptr
def convert_const(self, value):
if value is None:
return self.null_instance()
if isinstance(value, types.MethodType):
value = value.im_self # bound method -> instance
bk = self.rtyper.annotator.bookkeeper
try:
classdef = bk.getuniqueclassdef(value.__class__)
except KeyError:
raise TyperError("no classdef: %r" % (value.__class__,))
if classdef != self.classdef:
# if the class does not match exactly, check that 'value' is an
# instance of a subclass and delegate to that InstanceRepr
if classdef.commonbase(self.classdef) != self.classdef:
raise TyperError("not an instance of %r: %r" % (
self.classdef.name, value))
rinstance = getinstancerepr(self.rtyper, classdef)
result = rinstance.convert_const(value)
return self.upcast(result)
# common case
return self.convert_const_exact(value)
def convert_const_exact(self, value):
try:
return self.iprebuiltinstances[value]
except KeyError:
self.setup()
result = self.create_instance()
self.iprebuiltinstances[value] = result
self.initialize_prebuilt_instance(value, self.classdef, result)
return result
def get_reusable_prebuilt_instance(self):
"Get a dummy prebuilt instance. Multiple calls reuse the same one."
try:
return self._reusable_prebuilt_instance
except AttributeError:
self.setup()
result = self.create_instance()
self._reusable_prebuilt_instance = result
self.initialize_prebuilt_data(Ellipsis, self.classdef, result)
return result
_initialize_data_flattenrec = FlattenRecursion()
def initialize_prebuilt_instance(self, value, classdef, result):
# must fill in the hash cache before the other ones
# (see test_circular_hash_initialization)
self.initialize_prebuilt_hash(value, result)
self._initialize_data_flattenrec(self.initialize_prebuilt_data,
value, classdef, result)
def get_ll_hash_function(self):
return ll_inst_hash
get_ll_fasthash_function = get_ll_hash_function
def rtype_type(self, hop):
if hop.s_result.is_constant():
return hop.inputconst(hop.r_result, hop.s_result.const)
instance_repr = self.common_repr()
vinst, = hop.inputargs(instance_repr)
if hop.args_s[0].can_be_none():
return hop.gendirectcall(ll_inst_type, vinst)
else:
return instance_repr.getfield(vinst, '__class__', hop.llops)
def rtype_getattr(self, hop):
if hop.s_result.is_constant():
return hop.inputconst(hop.r_result, hop.s_result.const)
attr = hop.args_s[1].const
vinst, vattr = hop.inputargs(self, Void)
if attr == '__class__' and hop.r_result.lowleveltype is Void:
# special case for when the result of '.__class__' is a constant
[desc] = hop.s_result.descriptions
return hop.inputconst(Void, desc.pyobj)
if attr in self.allinstancefields:
return self.getfield(vinst, attr, hop.llops,
flags=hop.args_s[0].flags)
elif attr in self.rclass.allmethods:
# special case for methods: represented as their 'self' only
# (see MethodsPBCRepr)
return hop.r_result.get_method_from_instance(self, vinst,
hop.llops)
else:
vcls = self.getfield(vinst, '__class__', hop.llops)
return self.rclass.getclsfield(vcls, attr, hop.llops)
def rtype_setattr(self, hop):
attr = hop.args_s[1].const
r_value = self.getfieldrepr(attr)
vinst, vattr, vvalue = hop.inputargs(self, Void, r_value)
self.setfield(vinst, attr, vvalue, hop.llops,
flags=hop.args_s[0].flags)
def rtype_bool(self, hop):
vinst, = hop.inputargs(self)
return hop.genop('ptr_nonzero', [vinst], resulttype=Bool)
def ll_str(self, i): # doesn't work for non-gc classes!
from rpython.rtyper.lltypesystem.ll_str import ll_int2hex
from rpython.rlib.rarithmetic import r_uint
if not i:
return rstr.null_str
instance = cast_pointer(OBJECTPTR, i)
# Two choices: the first gives a fast answer but it can change
# (typically only once) during the life of the object.
#uid = r_uint(cast_ptr_to_int(i))
uid = r_uint(llop.gc_id(lltype.Signed, i))
#
res = rstr.instance_str_prefix
res = rstr.ll_strconcat(res, instance.typeptr.name)
res = rstr.ll_strconcat(res, rstr.instance_str_infix)
res = rstr.ll_strconcat(res, ll_int2hex(uid, False))
res = rstr.ll_strconcat(res, rstr.instance_str_suffix)
return res
def get_ll_eq_function(self):
return None # defaults to compare by identity ('==' on pointers)
def can_ll_be_null(self, s_value):
return s_value.can_be_none()
def check_graph_of_del_does_not_call_too_much(self, graph):
# RPython-level __del__() methods should not do "too much".
# In the PyPy Python interpreter, they usually do simple things
# like file.__del__() closing the file descriptor; or if they
# want to do more like call an app-level __del__() method, they
# enqueue the object instead, and the actual call is done later.
#
# Here, as a quick way to check "not doing too much", we check
# that from no RPython-level __del__() method we can reach a
# JitDriver.
#
# XXX wrong complexity, but good enough because the set of
# reachable graphs should be small
callgraph = self.rtyper.annotator.translator.callgraph.values()
seen = {graph: None}
while True:
oldlength = len(seen)
for caller, callee in callgraph:
if caller in seen and callee not in seen:
func = getattr(callee, 'func', None)
if getattr(func, '_dont_reach_me_in_del_', False):
lst = [str(callee)]
g = caller
while g:
lst.append(str(g))
g = seen.get(g)
lst.append('')
raise TyperError("the RPython-level __del__() method "
"in %r calls:%s" %
(graph, '\n\t'.join(lst[::-1])))
if getattr(func, '_cannot_really_call_random_things_',
False):
continue
seen[callee] = caller
if len(seen) == oldlength:
break
def common_repr(self): # -> object or nongcobject reprs
return getinstancerepr(self.rtyper, None, self.gcflavor)
def _get_field(self, attr):
return self.fields[attr]
def null_instance(self):
return nullptr(self.object_type)
def upcast(self, result):
return cast_pointer(self.lowleveltype, result)
def create_instance(self):
return malloc(self.object_type, flavor=self.gcflavor, immortal=True)
def initialize_prebuilt_data(self, value, classdef, result):
if self.classdef is not None:
# recursively build the parent part of the instance
self.rbase.initialize_prebuilt_data(value, classdef, result.super)
# then add instance attributes from this level
for name, (mangled_name, r) in self.fields.items():
if r.lowleveltype is Void:
llattrvalue = None
else:
try:
attrvalue = getattr(value, name)
except AttributeError:
attrvalue = self.classdef.classdesc.read_attribute(
name, None)
if attrvalue is None:
# Ellipsis from get_reusable_prebuilt_instance()
#if value is not Ellipsis:
#warning("prebuilt instance %r has no "
# "attribute %r" % (value, name))
llattrvalue = r.lowleveltype._defl()
else:
llattrvalue = r.convert_desc_or_const(attrvalue)
else:
llattrvalue = r.convert_const(attrvalue)
setattr(result, mangled_name, llattrvalue)
else:
# OBJECT part
rclass = getclassrepr(self.rtyper, classdef)
result.typeptr = rclass.getvtable()
def initialize_prebuilt_hash(self, value, result):
llattrvalue = getattr(value, '__precomputed_identity_hash', None)
if llattrvalue is not None:
lltype.init_identity_hash(result, llattrvalue)
def getfieldrepr(self, attr):
"""Return the repr used for the given attribute."""
if attr in self.fields:
mangled_name, r = self.fields[attr]
return r
else:
if self.classdef is None:
raise MissingRTypeAttribute(attr)
return self.rbase.getfieldrepr(attr)
def getfield(self, vinst, attr, llops, force_cast=False, flags={}):
"""Read the given attribute (or __class__ for the type) of 'vinst'."""
if attr in self.fields:
mangled_name, r = self.fields[attr]
cname = inputconst(Void, mangled_name)
if force_cast:
vinst = llops.genop('cast_pointer', [vinst], resulttype=self)
self.hook_access_field(vinst, cname, llops, flags)
return llops.genop('getfield', [vinst, cname], resulttype=r)
else:
if self.classdef is None:
raise MissingRTypeAttribute(attr)
return self.rbase.getfield(vinst, attr, llops, force_cast=True,
flags=flags)
def setfield(self, vinst, attr, vvalue, llops, force_cast=False,
flags={}):
"""Write the given attribute (or __class__ for the type) of 'vinst'."""
if attr in self.fields:
mangled_name, r = self.fields[attr]
cname = inputconst(Void, mangled_name)
if force_cast:
vinst = llops.genop('cast_pointer', [vinst], resulttype=self)
self.hook_access_field(vinst, cname, llops, flags)
self.hook_setfield(vinst, attr, llops)
llops.genop('setfield', [vinst, cname, vvalue])
else:
if self.classdef is None:
raise MissingRTypeAttribute(attr)
self.rbase.setfield(vinst, attr, vvalue, llops, force_cast=True,
flags=flags)
def rtype_isinstance(self, hop):
class_repr = get_type_repr(hop.rtyper)
instance_repr = self.common_repr()
v_obj, v_cls = hop.inputargs(instance_repr, class_repr)
if isinstance(v_cls, Constant):
cls = v_cls.value
llf, llf_nonnull = make_ll_isinstance(self.rtyper, cls)
if hop.args_s[0].can_be_None:
return hop.gendirectcall(llf, v_obj)
else:
return hop.gendirectcall(llf_nonnull, v_obj)
else:
return hop.gendirectcall(ll_isinstance, v_obj, v_cls)
class __extend__(pairtype(InstanceRepr, InstanceRepr)):
def convert_from_to((r_ins1, r_ins2), v, llops):
# which is a subclass of which?
if r_ins1.classdef is None or r_ins2.classdef is None:
basedef = None
else:
basedef = r_ins1.classdef.commonbase(r_ins2.classdef)
if basedef == r_ins2.classdef:
# r_ins1 is an instance of the subclass: converting to parent
v = llops.genop('cast_pointer', [v],
resulttype=r_ins2.lowleveltype)
return v
elif basedef == r_ins1.classdef:
# r_ins2 is an instance of the subclass: potentially unsafe
# casting, but we do it anyway (e.g. the annotator produces
# such casts after a successful isinstance() check)
v = llops.genop('cast_pointer', [v],
resulttype=r_ins2.lowleveltype)
return v
else:
return NotImplemented
def rtype_is_((r_ins1, r_ins2), hop):
if r_ins1.gcflavor != r_ins2.gcflavor:
# obscure logic, the is can be true only if both are None
v_ins1, v_ins2 = hop.inputargs(
r_ins1.common_repr(), r_ins2.common_repr())
return hop.gendirectcall(ll_both_none, v_ins1, v_ins2)
if r_ins1.classdef is None or r_ins2.classdef is None:
basedef = None
else:
basedef = r_ins1.classdef.commonbase(r_ins2.classdef)
r_ins = getinstancerepr(r_ins1.rtyper, basedef, r_ins1.gcflavor)
return pairtype(Repr, Repr).rtype_is_(pair(r_ins, r_ins), hop)
rtype_eq = rtype_is_
def rtype_ne(rpair, hop):
v = rpair.rtype_eq(hop)
return hop.genop("bool_not", [v], resulttype=Bool)
# ____________________________________________________________
def rtype_new_instance(rtyper, classdef, llops, classcallhop=None,
nonmovable=False):
rinstance = getinstancerepr(rtyper, classdef)
return rinstance.new_instance(llops, classcallhop, nonmovable=nonmovable)
def ll_inst_hash(ins):
if not ins:
return 0 # for None
else:
return lltype.identityhash(ins)
_missing = object()
def fishllattr(inst, name, default=_missing):
p = widest = lltype.normalizeptr(inst)
while True:
try:
return getattr(p, 'inst_' + name)
except AttributeError:
pass
try:
p = p.super
except AttributeError:
break
if default is _missing:
raise AttributeError("%s has no field %s" %
(lltype.typeOf(widest), name))
return default
def attr_reverse_size((_, T)):
# This is used to sort the instance or class attributes by decreasing
# "likely size", as reported by rffi.sizeof(), to minimize padding
# holes in C. Fields should first be sorted by name, just to minimize
# randomness, and then (stably) sorted by 'attr_reverse_size'.
if T is lltype.Void:
return None
from rpython.rtyper.lltypesystem.rffi import sizeof
try:
return -sizeof(T)
except StandardError:
return None
# ____________________________________________________________
#
# Low-level implementation of operations on classes and instances
# doesn't work for non-gc stuff!
def ll_cast_to_object(obj):
return cast_pointer(OBJECTPTR, obj)
# doesn't work for non-gc stuff!
def ll_type(obj):
return cast_pointer(OBJECTPTR, obj).typeptr
def ll_issubclass(subcls, cls):
return llop.int_between(Bool,
cls.subclassrange_min,
subcls.subclassrange_min,
cls.subclassrange_max)
def ll_issubclass_const(subcls, minid, maxid):
return llop.int_between(Bool, minid, subcls.subclassrange_min, maxid)
def ll_isinstance(obj, cls): # obj should be cast to OBJECT or NONGCOBJECT
if not obj:
return False
obj_cls = obj.typeptr
return ll_issubclass(obj_cls, cls)
def make_ll_isinstance(rtyper, cls):
try:
return rtyper.isinstance_helpers[cls._obj]
except KeyError:
minid = cls.subclassrange_min
maxid = cls.subclassrange_max
if minid.number_with_subclasses():
def ll_isinstance_const_nonnull(obj):
objid = obj.typeptr.subclassrange_min
return llop.int_between(Bool, minid, objid, maxid)
else:
def ll_isinstance_const_nonnull(obj):
return obj.typeptr == cls
def ll_isinstance_const(obj):
if not obj:
return False
return ll_isinstance_const_nonnull(obj)
result = (ll_isinstance_const, ll_isinstance_const_nonnull)
rtyper.isinstance_helpers[cls._obj] = result
return result
def ll_runtime_type_info(obj):
return obj.typeptr.rtti
def ll_inst_type(obj):
if obj:
return obj.typeptr
else:
# type(None) -> NULL (for now)
return nullptr(typeOf(obj).TO.typeptr.TO)
def ll_both_none(ins1, ins2):
return not ins1 and not ins2
# ____________________________________________________________
def feedllattr(inst, name, llvalue):
p = widest = lltype.normalizeptr(inst)
while True:
try:
return setattr(p, 'inst_' + name, llvalue)
except AttributeError:
pass
try:
p = p.super
except AttributeError:
break
raise AttributeError("%s has no field %s" % (lltype.typeOf(widest),
name))
def declare_type_for_typeptr(vtable, TYPE):
"""Hack for custom low-level-only 'subclasses' of OBJECT:
call this somewhere annotated, in order to declare that it is
of the given TYPE and has got the corresponding vtable."""
class Entry(ExtRegistryEntry):
_about_ = declare_type_for_typeptr
def compute_result_annotation(self, s_vtable, s_TYPE):
assert s_vtable.is_constant()
assert s_TYPE.is_constant()
return annmodel.s_None
def specialize_call(self, hop):
vtable = hop.args_v[0].value
TYPE = hop.args_v[1].value
assert lltype.typeOf(vtable) == CLASSTYPE
assert isinstance(TYPE, GcStruct)
assert lltype._castdepth(TYPE, OBJECT) > 0
hop.rtyper.set_type_for_typeptr(vtable, TYPE)
hop.exception_cannot_occur()
return hop.inputconst(lltype.Void, None)
``` |
[
{
"content": "Return the code unaltered:\n```python\n# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# License: GNU General Public License v3. See license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nimport erpnext\nimport json\nimport itertools\nfrom frappe import msgpri... | [
{
"content": "Return the code unaltered:\n<|memory_start|>```python\n# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# License: GNU General Public License v3. See license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nimport erpnext\nimport json\nimport itertools\nfrom frap... | ```python
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import erpnext
import json
import itertools
from frappe import msgprint, _
from frappe.utils import (cstr, flt, cint, getdate, now_datetime, formatdate,
strip, get_timestamp, random_string)
from frappe.website.website_generator import WebsiteGenerator
from erpnext.setup.doctype.item_group.item_group import invalidate_cache_for, get_parent_item_groups
from frappe.website.render import clear_cache
from frappe.website.doctype.website_slideshow.website_slideshow import get_slideshow
from erpnext.controllers.item_variant import (get_variant, copy_attributes_to_variant,
make_variant_item_code, validate_item_variant_attributes, ItemVariantExistsError)
class DuplicateReorderRows(frappe.ValidationError): pass
class Item(WebsiteGenerator):
website = frappe._dict(
page_title_field = "item_name",
condition_field = "show_in_website",
template = "templates/generators/item.html",
no_cache = 1
)
def onload(self):
super(Item, self).onload()
self.set_onload('sle_exists', self.check_if_sle_exists())
if self.is_fixed_asset:
asset = frappe.db.get_all("Asset", filters={"item_code": self.name, "docstatus": 1}, limit=1)
self.set_onload("asset_exists", True if asset else False)
def autoname(self):
if frappe.db.get_default("item_naming_by")=="Naming Series":
if self.variant_of:
if not self.item_code:
template_item_name = frappe.db.get_value("Item", self.variant_of, "item_name")
self.item_code = make_variant_item_code(self.variant_of, template_item_name, self)
else:
from frappe.model.naming import make_autoname
self.item_code = make_autoname(self.naming_series+'.#####')
elif not self.item_code:
msgprint(_("Item Code is mandatory because Item is not automatically numbered"), raise_exception=1)
self.item_code = strip(self.item_code)
self.name = self.item_code
def before_insert(self):
if not self.description:
self.description = self.item_name
self.publish_in_hub = 1
def after_insert(self):
'''set opening stock and item price'''
if self.standard_rate:
self.add_price()
if self.opening_stock:
self.set_opening_stock()
def validate(self):
super(Item, self).validate()
if not self.item_name:
self.item_name = self.item_code
if not self.description:
self.description = self.item_name
self.validate_uom()
self.add_default_uom_in_conversion_factor_table()
self.validate_conversion_factor()
self.validate_item_type()
self.check_for_active_boms()
self.fill_customer_code()
self.check_item_tax()
self.validate_barcode()
self.cant_change()
self.validate_warehouse_for_reorder()
self.update_item_desc()
self.synced_with_hub = 0
self.validate_has_variants()
self.validate_attributes()
self.validate_variant_attributes()
self.validate_website_image()
self.make_thumbnail()
self.validate_fixed_asset()
if not self.get("__islocal"):
self.old_item_group = frappe.db.get_value(self.doctype, self.name, "item_group")
self.old_website_item_groups = frappe.db.sql_list("""select item_group
from `tabWebsite Item Group`
where parentfield='website_item_groups' and parenttype='Item' and parent=%s""", self.name)
def on_update(self):
invalidate_cache_for_item(self)
self.validate_name_with_item_group()
self.update_item_price()
self.update_template_item()
def add_price(self, price_list=None):
'''Add a new price'''
if not price_list:
price_list = (frappe.db.get_single_value('Selling Settings', 'selling_price_list')
or frappe.db.get_value('Price List', _('Standard Selling')))
if price_list:
item_price = frappe.get_doc({
"doctype": "Item Price",
"price_list": price_list,
"item_code": self.name,
"currency": erpnext.get_default_currency(),
"price_list_rate": self.standard_rate
})
item_price.insert()
def set_opening_stock(self):
'''set opening stock'''
if not self.is_stock_item or self.has_serial_no or self.has_batch_no:
return
if not self.valuation_rate and self.standard_rate:
self.valuation_rate = self.standard_rate
if not self.valuation_rate:
frappe.throw(_("Valuation Rate is mandatory if Opening Stock entered"))
from erpnext.stock.doctype.stock_entry.stock_entry_utils import make_stock_entry
# default warehouse, or Stores
default_warehouse = (frappe.db.get_single_value('Stock Settings', 'default_warehouse')
or frappe.db.get_value('Warehouse', {'warehouse_name': _('Stores')}))
if default_warehouse:
stock_entry = make_stock_entry(item_code=self.name, target=default_warehouse,
qty=self.opening_stock, rate=self.valuation_rate)
stock_entry.add_comment("Comment", _("Opening Stock"))
def make_route(self):
if not self.route:
return cstr(frappe.db.get_value('Item Group', self.item_group,
'route')) + '/' + self.scrub(self.item_name + '-' + random_string(5))
def get_parents(self, context):
item_group, route = frappe.db.get_value('Item Group', self.item_group, ['name', 'route'])
context.parents = [{'name': route, 'label': item_group}]
def validate_website_image(self):
"""Validate if the website image is a public file"""
auto_set_website_image = False
if not self.website_image and self.image:
auto_set_website_image = True
self.website_image = self.image
if not self.website_image:
return
# find if website image url exists as public
file_doc = frappe.get_all("File", filters={
"file_url": self.website_image
}, fields=["name", "is_private"], order_by="is_private asc", limit_page_length=1)
if file_doc:
file_doc = file_doc[0]
if not file_doc:
if not auto_set_website_image:
frappe.msgprint(_("Website Image {0} attached to Item {1} cannot be found")
.format(self.website_image, self.name))
self.website_image = None
elif file_doc.is_private:
if not auto_set_website_image:
frappe.msgprint(_("Website Image should be a public file or website URL"))
self.website_image = None
def make_thumbnail(self):
"""Make a thumbnail of `website_image`"""
import requests.exceptions
if not self.is_new() and self.website_image != frappe.db.get_value(self.doctype, self.name, "website_image"):
self.thumbnail = None
if self.website_image and not self.thumbnail:
file_doc = None
try:
file_doc = frappe.get_doc("File", {
"file_url": self.website_image,
"attached_to_doctype": "Item",
"attached_to_name": self.name
})
except frappe.DoesNotExistError:
pass
# cleanup
frappe.local.message_log.pop()
except requests.exceptions.HTTPError:
frappe.msgprint(_("Warning: Invalid attachment {0}").format(self.website_image))
self.website_image = None
except requests.exceptions.SSLError:
frappe.msgprint(_("Warning: Invalid SSL certificate on attachment {0}").format(self.website_image))
self.website_image = None
# for CSV import
if self.website_image and not file_doc:
try:
file_doc = frappe.get_doc({
"doctype": "File",
"file_url": self.website_image,
"attached_to_doctype": "Item",
"attached_to_name": self.name
}).insert()
except IOError:
self.website_image = None
if file_doc:
if not file_doc.thumbnail_url:
file_doc.make_thumbnail()
self.thumbnail = file_doc.thumbnail_url
def validate_fixed_asset(self):
if self.is_fixed_asset:
if self.is_stock_item:
frappe.throw(_("Fixed Asset Item must be a non-stock item."))
if not self.asset_category:
frappe.throw(_("Asset Category is mandatory for Fixed Asset item"))
def get_context(self, context):
context.show_search=True
context.search_link = '/product_search'
context.parent_groups = get_parent_item_groups(self.item_group) + \
[{"name": self.name}]
self.set_variant_context(context)
self.set_attribute_context(context)
self.set_disabled_attributes(context)
self.get_parents(context)
return context
def set_variant_context(self, context):
if self.has_variants:
context.no_cache = True
# load variants
# also used in set_attribute_context
context.variants = frappe.get_all("Item",
filters={"variant_of": self.name, "show_variant_in_website": 1},
order_by="name asc")
variant = frappe.form_dict.variant
if not variant and context.variants:
# the case when the item is opened for the first time from its list
variant = context.variants[0]
if variant:
context.variant = frappe.get_doc("Item", variant)
for fieldname in ("website_image", "web_long_description", "description",
"website_specifications"):
if context.variant.get(fieldname):
value = context.variant.get(fieldname)
if isinstance(value, list):
value = [d.as_dict() for d in value]
context[fieldname] = value
if self.slideshow:
if context.variant and context.variant.slideshow:
context.update(get_slideshow(context.variant))
else:
context.update(get_slideshow(self))
def set_attribute_context(self, context):
if self.has_variants:
attribute_values_available = {}
context.attribute_values = {}
context.selected_attributes = {}
# load attributes
for v in context.variants:
v.attributes = frappe.get_all("Item Variant Attribute",
fields=["attribute", "attribute_value"], filters={"parent": v.name})
for attr in v.attributes:
values = attribute_values_available.setdefault(attr.attribute, [])
if attr.attribute_value not in values:
values.append(attr.attribute_value)
if v.name==context.variant.name:
context.selected_attributes[attr.attribute] = attr.attribute_value
# filter attributes, order based on attribute table
for attr in self.attributes:
values = context.attribute_values.setdefault(attr.attribute, [])
if cint(frappe.db.get_value("Item Attribute", attr.attribute, "numeric_values")):
for val in sorted(attribute_values_available.get(attr.attribute, []), key=flt):
values.append(val)
else:
# get list of values defined (for sequence)
for attr_value in frappe.db.get_all("Item Attribute Value",
fields=["attribute_value"], filters={"parent": attr.attribute}, order_by="idx asc"):
if attr_value.attribute_value in attribute_values_available.get(attr.attribute, []):
values.append(attr_value.attribute_value)
context.variant_info = json.dumps(context.variants)
def set_disabled_attributes(self, context):
"""Disable selection options of attribute combinations that do not result in a variant"""
if not self.attributes or not self.has_variants:
return
context.disabled_attributes = {}
attributes = [attr.attribute for attr in self.attributes]
def find_variant(combination):
for variant in context.variants:
if len(variant.attributes) < len(attributes):
continue
if "combination" not in variant:
ref_combination = []
for attr in variant.attributes:
idx = attributes.index(attr.attribute)
ref_combination.insert(idx, attr.attribute_value)
variant["combination"] = ref_combination
if not (set(combination) - set(variant["combination"])):
# check if the combination is a subset of a variant combination
# eg. [Blue, 0.5] is a possible combination if exists [Blue, Large, 0.5]
return True
for i, attr in enumerate(self.attributes):
if i==0:
continue
combination_source = []
# loop through previous attributes
for prev_attr in self.attributes[:i]:
combination_source.append([context.selected_attributes.get(prev_attr.attribute)])
combination_source.append(context.attribute_values[attr.attribute])
for combination in itertools.product(*combination_source):
if not find_variant(combination):
context.disabled_attributes.setdefault(attr.attribute, []).append(combination[-1])
def add_default_uom_in_conversion_factor_table(self):
uom_conv_list = [d.uom for d in self.get("uoms")]
if self.stock_uom not in uom_conv_list:
ch = self.append('uoms', {})
ch.uom = self.stock_uom
ch.conversion_factor = 1
to_remove = []
for d in self.get("uoms"):
if d.conversion_factor == 1 and d.uom != self.stock_uom:
to_remove.append(d)
[self.remove(d) for d in to_remove]
def update_template_tables(self):
template = frappe.get_doc("Item", self.variant_of)
# add item taxes from template
for d in template.get("taxes"):
self.append("taxes", {"tax_type": d.tax_type, "tax_rate": d.tax_rate})
# copy re-order table if empty
if not self.get("reorder_levels"):
for d in template.get("reorder_levels"):
n = {}
for k in ("warehouse", "warehouse_reorder_level",
"warehouse_reorder_qty", "material_request_type"):
n[k] = d.get(k)
self.append("reorder_levels", n)
def validate_conversion_factor(self):
check_list = []
for d in self.get('uoms'):
if cstr(d.uom) in check_list:
frappe.throw(_("Unit of Measure {0} has been entered more than once in Conversion Factor Table").format(d.uom))
else:
check_list.append(cstr(d.uom))
if d.uom and cstr(d.uom) == cstr(self.stock_uom) and flt(d.conversion_factor) != 1:
frappe.throw(_("Conversion factor for default Unit of Measure must be 1 in row {0}").format(d.idx))
def validate_item_type(self):
if self.has_serial_no == 1 and self.is_stock_item == 0:
msgprint(_("'Has Serial No' can not be 'Yes' for non-stock item"), raise_exception=1)
if self.has_serial_no == 0 and self.serial_no_series:
self.serial_no_series = None
def check_for_active_boms(self):
if self.default_bom:
bom_item = frappe.db.get_value("BOM", self.default_bom, "item")
if bom_item not in (self.name, self.variant_of):
frappe.throw(_("Default BOM ({0}) must be active for this item or its template").format(bom_item))
def fill_customer_code(self):
""" Append all the customer codes and insert into "customer_code" field of item table """
cust_code=[]
for d in self.get('customer_items'):
cust_code.append(d.ref_code)
self.customer_code=','.join(cust_code)
def check_item_tax(self):
"""Check whether Tax Rate is not entered twice for same Tax Type"""
check_list=[]
for d in self.get('taxes'):
if d.tax_type:
account_type = frappe.db.get_value("Account", d.tax_type, "account_type")
if account_type not in ['Tax', 'Chargeable', 'Income Account', 'Expense Account']:
frappe.throw(_("Item Tax Row {0} must have account of type Tax or Income or Expense or Chargeable").format(d.idx))
else:
if d.tax_type in check_list:
frappe.throw(_("{0} entered twice in Item Tax").format(d.tax_type))
else:
check_list.append(d.tax_type)
def validate_barcode(self):
if self.barcode:
duplicate = frappe.db.sql("""select name from tabItem where barcode = %s
and name != %s""", (self.barcode, self.name))
if duplicate:
frappe.throw(_("Barcode {0} already used in Item {1}").format(self.barcode, duplicate[0][0]))
def cant_change(self):
if not self.get("__islocal"):
to_check = ("has_serial_no", "is_stock_item",
"valuation_method", "has_batch_no", "is_fixed_asset")
vals = frappe.db.get_value("Item", self.name, to_check, as_dict=True)
if not vals.get('valuation_method') and self.get('valuation_method'):
vals['valuation_method'] = frappe.db.get_single_value("Stock Settings", "valuation_method") or "FIFO"
if vals:
for key in to_check:
if cstr(self.get(key)) != cstr(vals.get(key)):
if not self.check_if_linked_document_exists(key):
break # no linked document, allowed
else:
frappe.throw(_("As there are existing transactions against item {0}, you can not change the value of {1}").format(self.name, frappe.bold(self.meta.get_label(key))))
if vals and not self.is_fixed_asset and self.is_fixed_asset != vals.is_fixed_asset:
asset = frappe.db.get_all("Asset", filters={"item_code": self.name, "docstatus": 1}, limit=1)
if asset:
frappe.throw(_('"Is Fixed Asset" cannot be unchecked, as Asset record exists against the item'))
def check_if_linked_document_exists(self, key):
linked_doctypes = ["Delivery Note Item", "Sales Invoice Item", "Purchase Receipt Item",
"Purchase Invoice Item", "Stock Entry Detail", "Stock Reconciliation Item"]
# For "Is Stock Item", following doctypes is important
# because reserved_qty, ordered_qty and requested_qty updated from these doctypes
if key == "is_stock_item":
linked_doctypes += ["Sales Order Item", "Purchase Order Item", "Material Request Item"]
for doctype in linked_doctypes:
if frappe.db.get_value(doctype, filters={"item_code": self.name, "docstatus": 1}) or \
frappe.db.get_value("Production Order",
filters={"production_item": self.name, "docstatus": 1}):
return True
for d in self.get("reorder_levels"):
if d.warehouse_reorder_level and not d.warehouse_reorder_qty:
frappe.throw(_("Row #{0}: Please set reorder quantity").format(d.idx))
def validate_warehouse_for_reorder(self):
warehouse = []
for i in self.get("reorder_levels"):
if i.get("warehouse") and i.get("warehouse") not in warehouse:
warehouse += [i.get("warehouse")]
else:
frappe.throw(_("Row {0}: An Reorder entry already exists for this warehouse {1}")
.format(i.idx, i.warehouse), DuplicateReorderRows)
def check_if_sle_exists(self):
sle = frappe.db.sql("""select name from `tabStock Ledger Entry`
where item_code = %s""", self.name)
return sle and 'exists' or 'not exists'
def validate_name_with_item_group(self):
# causes problem with tree build
if frappe.db.exists("Item Group", self.name):
frappe.throw(_("An Item Group exists with same name, please change the item name or rename the item group"))
def update_item_price(self):
frappe.db.sql("""update `tabItem Price` set item_name=%s,
item_description=%s, modified=NOW() where item_code=%s""",
(self.item_name, self.description, self.name))
def on_trash(self):
super(Item, self).on_trash()
frappe.db.sql("""delete from tabBin where item_code=%s""", self.item_code)
frappe.db.sql("delete from `tabItem Price` where item_code=%s", self.name)
for variant_of in frappe.get_all("Item", filters={"variant_of": self.name}):
frappe.delete_doc("Item", variant_of.name)
def before_rename(self, old_name, new_name, merge=False):
if self.item_name==old_name:
frappe.db.set_value("Item", old_name, "item_name", new_name)
if merge:
# Validate properties before merging
if not frappe.db.exists("Item", new_name):
frappe.throw(_("Item {0} does not exist").format(new_name))
field_list = ["stock_uom", "is_stock_item", "has_serial_no", "has_batch_no"]
new_properties = [cstr(d) for d in frappe.db.get_value("Item", new_name, field_list)]
if new_properties != [cstr(self.get(fld)) for fld in field_list]:
frappe.throw(_("To merge, following properties must be same for both items")
+ ": \n" + ", ".join([self.meta.get_label(fld) for fld in field_list]))
def after_rename(self, old_name, new_name, merge):
if self.route:
invalidate_cache_for_item(self)
clear_cache(self.route)
frappe.db.set_value("Item", new_name, "item_code", new_name)
if merge:
self.set_last_purchase_rate(new_name)
self.recalculate_bin_qty(new_name)
for dt in ("Sales Taxes and Charges", "Purchase Taxes and Charges"):
for d in frappe.db.sql("""select name, item_wise_tax_detail from `tab{0}`
where ifnull(item_wise_tax_detail, '') != ''""".format(dt), as_dict=1):
item_wise_tax_detail = json.loads(d.item_wise_tax_detail)
if old_name in item_wise_tax_detail:
item_wise_tax_detail[new_name] = item_wise_tax_detail[old_name]
item_wise_tax_detail.pop(old_name)
frappe.db.set_value(dt, d.name, "item_wise_tax_detail",
json.dumps(item_wise_tax_detail), update_modified=False)
def set_last_purchase_rate(self, new_name):
last_purchase_rate = get_last_purchase_details(new_name).get("base_rate", 0)
frappe.db.set_value("Item", new_name, "last_purchase_rate", last_purchase_rate)
def recalculate_bin_qty(self, new_name):
from erpnext.stock.stock_balance import repost_stock
frappe.db.auto_commit_on_many_writes = 1
existing_allow_negative_stock = frappe.db.get_value("Stock Settings", None, "allow_negative_stock")
frappe.db.set_value("Stock Settings", None, "allow_negative_stock", 1)
repost_stock_for_warehouses = frappe.db.sql_list("""select distinct warehouse
from tabBin where item_code=%s""", new_name)
# Delete all existing bins to avoid duplicate bins for the same item and warehouse
frappe.db.sql("delete from `tabBin` where item_code=%s", new_name)
for warehouse in repost_stock_for_warehouses:
repost_stock(new_name, warehouse)
frappe.db.set_value("Stock Settings", None, "allow_negative_stock", existing_allow_negative_stock)
frappe.db.auto_commit_on_many_writes = 0
def copy_specification_from_item_group(self):
self.set("website_specifications", [])
if self.item_group:
for label, desc in frappe.db.get_values("Item Website Specification",
{"parent": self.item_group}, ["label", "description"]):
row = self.append("website_specifications")
row.label = label
row.description = desc
def update_item_desc(self):
if frappe.db.get_value('BOM',self.name, 'description') != self.description:
frappe.db.sql("""update `tabBOM` set description = %s where item = %s and docstatus < 2""",(self.description, self.name))
frappe.db.sql("""update `tabBOM Item` set description = %s where
item_code = %s and docstatus < 2""",(self.description, self.name))
frappe.db.sql("""update `tabBOM Explosion Item` set description = %s where
item_code = %s and docstatus < 2""",(self.description, self.name))
def update_template_item(self):
"""Set Show in Website for Template Item if True for its Variant"""
if self.variant_of and self.show_in_website:
self.show_variant_in_website = 1
self.show_in_website = 0
if self.show_variant_in_website:
# show template
template_item = frappe.get_doc("Item", self.variant_of)
if not template_item.show_in_website:
template_item.show_in_website = 1
template_item.flags.ignore_permissions = True
template_item.save()
def validate_has_variants(self):
if not self.has_variants and frappe.db.get_value("Item", self.name, "has_variants"):
if frappe.db.exists("Item", {"variant_of": self.name}):
frappe.throw(_("Item has variants."))
def validate_uom(self):
if not self.get("__islocal"):
check_stock_uom_with_bin(self.name, self.stock_uom)
if self.has_variants:
for d in frappe.db.get_all("Item", filters= {"variant_of": self.name}):
check_stock_uom_with_bin(d.name, self.stock_uom)
if self.variant_of:
template_uom = frappe.db.get_value("Item", self.variant_of, "stock_uom")
if template_uom != self.stock_uom:
frappe.throw(_("Default Unit of Measure for Variant '{0}' must be same as in Template '{1}'")
.format(self.stock_uom, template_uom))
def validate_attributes(self):
if (self.has_variants or self.variant_of) and self.variant_based_on=='Item Attribute':
attributes = []
if not self.attributes:
frappe.throw(_("Attribute table is mandatory"))
for d in self.attributes:
if d.attribute in attributes:
frappe.throw(_("Attribute {0} selected multiple times in Attributes Table".format(d.attribute)))
else:
attributes.append(d.attribute)
def validate_variant_attributes(self):
if self.variant_of and self.variant_based_on=='Item Attribute':
args = {}
for d in self.attributes:
if not d.attribute_value:
frappe.throw(_("Please specify Attribute Value for attribute {0}").format(d.attribute))
args[d.attribute] = d.attribute_value
variant = get_variant(self.variant_of, args, self.name)
if variant:
frappe.throw(_("Item variant {0} exists with same attributes")
.format(variant), ItemVariantExistsError)
validate_item_variant_attributes(self, args)
def get_timeline_data(doctype, name):
'''returns timeline data based on stock ledger entry'''
out = {}
items = dict(frappe.db.sql('''select posting_date, count(*)
from `tabStock Ledger Entry` where item_code=%s
and posting_date > date_sub(curdate(), interval 1 year)
group by posting_date''', name))
for date, count in items.iteritems():
timestamp = get_timestamp(date)
out.update({ timestamp: count })
return out
def validate_end_of_life(item_code, end_of_life=None, disabled=None, verbose=1):
if (not end_of_life) or (disabled is None):
end_of_life, disabled = frappe.db.get_value("Item", item_code, ["end_of_life", "disabled"])
if end_of_life and end_of_life!="0000-00-00" and getdate(end_of_life) <= now_datetime().date():
msg = _("Item {0} has reached its end of life on {1}").format(item_code, formatdate(end_of_life))
_msgprint(msg, verbose)
if disabled:
_msgprint(_("Item {0} is disabled").format(item_code), verbose)
def validate_is_stock_item(item_code, is_stock_item=None, verbose=1):
if not is_stock_item:
is_stock_item = frappe.db.get_value("Item", item_code, "is_stock_item")
if is_stock_item != 1:
msg = _("Item {0} is not a stock Item").format(item_code)
_msgprint(msg, verbose)
def validate_cancelled_item(item_code, docstatus=None, verbose=1):
if docstatus is None:
docstatus = frappe.db.get_value("Item", item_code, "docstatus")
if docstatus == 2:
msg = _("Item {0} is cancelled").format(item_code)
_msgprint(msg, verbose)
def _msgprint(msg, verbose):
if verbose:
msgprint(msg, raise_exception=True)
else:
raise frappe.ValidationError, msg
def get_last_purchase_details(item_code, doc_name=None, conversion_rate=1.0):
"""returns last purchase details in stock uom"""
# get last purchase order item details
last_purchase_order = frappe.db.sql("""\
select po.name, po.transaction_date, po.conversion_rate,
po_item.conversion_factor, po_item.base_price_list_rate,
po_item.discount_percentage, po_item.base_rate
from `tabPurchase Order` po, `tabPurchase Order Item` po_item
where po.docstatus = 1 and po_item.item_code = %s and po.name != %s and
po.name = po_item.parent
order by po.transaction_date desc, po.name desc
limit 1""", (item_code, cstr(doc_name)), as_dict=1)
# get last purchase receipt item details
last_purchase_receipt = frappe.db.sql("""\
select pr.name, pr.posting_date, pr.posting_time, pr.conversion_rate,
pr_item.conversion_factor, pr_item.base_price_list_rate, pr_item.discount_percentage,
pr_item.base_rate
from `tabPurchase Receipt` pr, `tabPurchase Receipt Item` pr_item
where pr.docstatus = 1 and pr_item.item_code = %s and pr.name != %s and
pr.name = pr_item.parent
order by pr.posting_date desc, pr.posting_time desc, pr.name desc
limit 1""", (item_code, cstr(doc_name)), as_dict=1)
purchase_order_date = getdate(last_purchase_order and last_purchase_order[0].transaction_date \
or "1900-01-01")
purchase_receipt_date = getdate(last_purchase_receipt and \
last_purchase_receipt[0].posting_date or "1900-01-01")
if (purchase_order_date > purchase_receipt_date) or \
(last_purchase_order and not last_purchase_receipt):
# use purchase order
last_purchase = last_purchase_order[0]
purchase_date = purchase_order_date
elif (purchase_receipt_date > purchase_order_date) or \
(last_purchase_receipt and not last_purchase_order):
# use purchase receipt
last_purchase = last_purchase_receipt[0]
purchase_date = purchase_receipt_date
else:
return frappe._dict()
conversion_factor = flt(last_purchase.conversion_factor)
out = frappe._dict({
"base_price_list_rate": flt(last_purchase.base_price_list_rate) / conversion_factor,
"base_rate": flt(last_purchase.base_rate) / conversion_factor,
"discount_percentage": flt(last_purchase.discount_percentage),
"purchase_date": purchase_date
})
conversion_rate = flt(conversion_rate) or 1.0
out.update({
"price_list_rate": out.base_price_list_rate / conversion_rate,
"rate": out.base_rate / conversion_rate,
"base_rate": out.base_rate
})
return out
def invalidate_cache_for_item(doc):
invalidate_cache_for(doc, doc.item_group)
website_item_groups = list(set((doc.get("old_website_item_groups") or [])
+ [d.item_group for d in doc.get({"doctype":"Website Item Group"}) if d.item_group]))
for item_group in website_item_groups:
invalidate_cache_for(doc, item_group)
if doc.get("old_item_group") and doc.get("old_item_group") != doc.item_group:
invalidate_cache_for(doc, doc.old_item_group)
def check_stock_uom_with_bin(item, stock_uom):
if stock_uom == frappe.db.get_value("Item", item, "stock_uom"):
return
matched=True
ref_uom = frappe.db.get_value("Stock Ledger Entry",
{"item_code": item}, "stock_uom")
if ref_uom:
if cstr(ref_uom) != cstr(stock_uom):
matched = False
else:
bin_list = frappe.db.sql("select * from tabBin where item_code=%s", item, as_dict=1)
for bin in bin_list:
if (bin.reserved_qty > 0 or bin.ordered_qty > 0 or bin.indented_qty > 0 \
or bin.planned_qty > 0) and cstr(bin.stock_uom) != cstr(stock_uom):
matched = False
break
if matched and bin_list:
frappe.db.sql("""update tabBin set stock_uom=%s where item_code=%s""", (stock_uom, item))
if not matched:
frappe.throw(_("Default Unit of Measure for Item {0} cannot be changed directly because you have already made some transaction(s) with another UOM. You will need to create a new Item to use a different Default UOM.").format(item))
``` |
[
{
"content": "Provide an exact copy of the source code:\n```python\n#-*- coding: utf-8 -*-\n\n###########################################################################\n## ##\n## Copyrights Etienne Chové <chove@crans.org> 2009 ... | [
{
"content": "Provide an exact copy of the source code:\n<|memory_start|>```python\n#-*- coding: utf-8 -*-\n\n###########################################################################\n## ##\n## Copyrights Etienne Chové <chove@crans.org> 20... | ```python
#-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Etienne Chové <chove@crans.org> 2009 ##
## Copyrights Frédéric Rodrigo 2011 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
from plugins.Plugin import Plugin
class TagRemove_Incompatibles(Plugin):
def init(self, logger):
Plugin.init(self, logger)
self.errors[900] = { "item": 4030, "level": 1, "tag": ["tag", "fix:chair"], "desc": T_(u"Tag conflict") }
self.CONFLICT = {}
self.CONFLICT[0] = set(['aerialway', 'aeroway', 'amenity', 'highway', 'railway', 'waterway', 'landuse'])
self.CONFLICT[1] = set(['aerialway', 'aeroway', 'amenity', 'highway', 'leisure', 'railway', 'natural'])
self.CONFLICT[2] = set(['aerialway', 'aeroway', 'amenity', 'highway', 'leisure', 'railway', 'waterway', 'place'])
self.CONFLICT[3] = set(['building', 'place'])
self.CONFLICT[4] = set(['information', 'place'])
def node(self, data, tags):
if tags.get('railway') in ('abandoned', 'tram', 'proposed', 'razed', 'construction'):
del tags['railway']
if tags.get('waterway') == 'dam':
del tags['waterway']
if tags.get('railway') == 'tram_stop' and tags.get('highway') == 'bus_stop':
del tags['railway']
del tags['highway']
for i in range(0, len(self.CONFLICT)):
conflict = set(tags).intersection(self.CONFLICT[i])
if len(conflict) > 1:
return {"class": 900, "subclass": 1, "text": T_("Conflict between tags: %s", (", ".join(conflict)))}
if tags.get('bridge') == 'yes' and tags.get('tunnel') == 'yes':
return {"class": 900, "subclass": 2, "text": T_("Conflict between tags: 'bridge' and 'tunnel'")}
if tags.get('highway') == 'crossing' and tags.get('crossing') == 'no':
return {"class": 900, "subclass": 3, "text": T_("Conflict between tags: crossing=no must be used without a highway=crossing")}
def way(self, data, tags, nds):
return self.node(data, tags)
def relation(self, data, tags, members):
return self.node(data, tags)
###########################################################################
from plugins.Plugin import TestPluginCommon
class Test(TestPluginCommon):
def test(self):
a = TagRemove_Incompatibles(None)
a.init(None)
for t in [{"aerialway": "yes", "aeroway": "yes"},
{"highway": "trunk", "railway": "rail"},
{"bridge": "yes", "tunnel": "yes"},
{"crossing": "no", "highway": "crossing"},
]:
self.check_err(a.node(None, t), t)
self.check_err(a.way(None, t, None), t)
self.check_err(a.relation(None, t, None), t)
for t in [{"aerialway": "yes"},
{"highway": "residential", "railway": "tram"},
{"highway": "bus_stop", "railway": "tram_stop"},
{"bridge": "yes", "tunnel": "no"},
{"waterway": "dam", "highway": "road"},
]:
assert not a.node(None, t), t
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.